id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
3484448 | <gh_stars>1-10
# Generated by Django 2.2.6 on 2019-12-10 08:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Exam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('total_duration', models.DurationField(default=30)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('total_duration', models.DurationField(default=30)),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='QuestionGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('exam', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exam.Exam')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exam.Question')),
],
),
migrations.AddField(
model_name='question',
name='topic',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exam.Topic'),
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('is_right_answer', models.BooleanField(default=False)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exam.Question')),
],
),
]
| StarcoderdataPython |
5133696 | from yacs.config import CfgNode as CN
__all__ = ['get_default_cfg']
_C = CN()
_C.OUTPUT_DIR = "results"
_C.DATASET = CN()
_C.DATASET.PATH = ''
_C.MODEL = CN()
_C.TRAIN = CN()
_C.TRAIN.BASE_LEARNING_RATE = 0.05
_C.TRAIN.LEARNING_DECAY_RATE = 0.1
_C.TRAIN.LEARNING_DECAY_STEPS = [1000, 2000]
_C.TRAIN.TRAIN_EPOCHS = 3000
_C.TRAIN.SNAPSHOT_FREQ = 100
_C.TRAIN.REPORT_FREQ = 30
def get_default_cfg():
return _C.clone()
| StarcoderdataPython |
32989 | <filename>packages/validate_and_forward/lambda_handler.py<gh_stars>1-10
import json
import os
import traceback
from datetime import datetime
from uuid import uuid4
import boto3
from aws.ssm import get_ssm_params
from database import Jobs
from gp_file_parser.parser import parse_gp_extract_file_s3
from jobs.statuses import InputFolderType, InvalidErrorType, JobStatus, ParseStatus
from lr_logging import error, get_cloudlogbase_config, success
from lr_logging.exceptions import InvalidFilename, InvalidGPExtract, InvalidStructure
from mesh import AWSMESHMailbox, get_mesh_mailboxes
from pynamodb.exceptions import PutError, PynamoDBConnectionError, QueryError
from spine_aws_common.lambda_application import LambdaApplication
class ValidateAndForward(LambdaApplication):
"""
Lambda:
Responsible for creating a Job, tracking it DynamoDB and then sending a given input file off to DPS.
If the input file is invalid, then reject
Trigger:
extracts_inbound inputs bucket
"""
def __init__(self):
super().__init__(additional_log_config=get_cloudlogbase_config())
self.job_id = None
self.s3 = boto3.client("s3")
self.mesh_params = get_ssm_params(
self.system_config["MESH_SSM_PREFIX"], self.system_config["AWS_REGION"]
)
def initialise(self):
pass
def start(self):
try:
self.job_id = str(uuid4())
self.input_bucket = self.event["Records"][0]["s3"]["bucket"]["name"]
self.input_file = self.event["Records"][0]["s3"]["object"]["key"]
except KeyError as e:
self.response = error(
"validate_and_forward Lambda tried to access missing key",
self.log_object.internal_id,
error=traceback.format_exc(),
)
raise e
self.create_initial_job()
self.filename, file = self.get_file_contents()
# get gp_file extract stats for job's table
# Also handles rejection if file can't be parsed and validations fails
try:
practice_code, num_of_records = self.parse_gp_extract(
self.input_bucket, self.input_file
)
self.log_object.write_log(
"LRSDI02",
log_row_dict={
"input_file": self.input_file,
"job_id": self.job_id,
},
)
except (InvalidStructure, InvalidGPExtract, InvalidFilename) as exc:
self.update_job_status(
JobStatus.REJECTED.value, ParseStatus.PARSE_FAILED.value
)
message = json.dumps(self.process_invalid_message(exc))
self.handle_rejection(InputFolderType.REJECTED, message)
self.log_object.write_log(
"LRSDW01",
log_row_dict={
"input_file": self.input_file,
"job_id": self.job_id,
"reason": exc,
},
)
self.response = success(
message="Lambda application stopped gp extract input file rejected, rejected file handled",
internal_id=self.log_object.internal_id,
job_id=self.job_id,
file=self.input_file,
reason=exc,
)
return self.response
self.update_job_info(practice_code, num_of_records)
self.send_mesh_file(self.filename, file)
self.update_job_status(JobStatus.SENT_TO_DPS.value)
self.response = success(
message="validate_and_forward file sent",
internal_id=self.log_object.internal_id,
job=self.job_id,
file=self.input_file,
)
def create_initial_job(self):
"""Creates an initial Job item in DynamoDb."""
try:
job_item = Jobs(
self.job_id,
PracticeCode=ParseStatus.NOT_PARSED.value,
FileName=self.input_file,
Timestamp=datetime.now(),
StatusId=JobStatus.PENDING.value,
)
job_item.save()
self.log_object.write_log(
"LRSDI01",
log_row_dict={
"job_id": self.job_id,
"input_file": self.input_file,
},
)
except (PynamoDBConnectionError, PutError) as e:
self.log_object.write_log(
"LRSDC01",
log_row_dict={
"job_id": self.job_id,
"input_file": self.input_file,
"error": e,
},
)
self.response = error(
message="validate_and_forward failed to create a job item before processing the gp_extract file",
internal_id=self.log_object.internal_id,
job_id=self.job_id,
file=self.input_file,
)
raise e
def parse_gp_extract(self, input_bucket: str, input_file: str) -> tuple[str, int]:
"""Handler to process an uploaded S3 object containing a GP flat
file extract
Returns:
extract file data: practice_code & num_of_records
"""
self.upload_date = datetime.now()
input_file_dict = parse_gp_extract_file_s3(
self.s3,
input_bucket,
input_file,
self.upload_date,
)
practice_code = input_file_dict["practice_code"]
num_of_records = len(input_file_dict["records"])
return practice_code, num_of_records
def get_file_contents(self):
filename = os.path.basename(str(self.input_file))
file = (
self.s3.get_object(
Bucket=self.input_bucket,
Key=self.input_file,
)["Body"]
.read()
.decode("utf-8")
)
return filename, file
def send_mesh_file(self, filename: str, file):
listrec_mesh_id, dps_mesh_id = get_mesh_mailboxes(
json.loads(self.mesh_params["mesh_mappings"]),
self.mesh_params["listrec_dps_workflow"],
)
mesh = AWSMESHMailbox(listrec_mesh_id, self.log_object)
mesh.send_message(dps_mesh_id, filename, file, overwrite=True)
self.log_object.write_log(
"LRSDI04",
log_row_dict={
"job_id": self.job_id,
"input_file": self.input_file,
"dps_mesh_id": dps_mesh_id,
},
)
def update_job_info(self, practice_code: str, num_of_records: int):
"""Creates Job items in DynamoDb.
Args:
practice_code (str): GP practice code of GP extract.
num_of_records (int): Number of records in GP extract.
"""
try:
job = Jobs.IdIndex.query(self.job_id)
for j in job:
j.PracticeCode = practice_code
j.TotalRecords = num_of_records
j.save()
self.log_object.write_log(
"LRSDI03",
log_row_dict={
"upload_filename": self.input_file,
"job_id": self.job_id,
},
)
except (PynamoDBConnectionError, PutError, QueryError) as e:
self.log_object.write_log(
"LRSDC02",
log_row_dict={
"job_id": self.job_id,
"input_file": self.input_file,
"error": e,
},
)
self.response = error(
message="validate_and_forward failed to update job before sending to DPS",
internal_id=self.log_object.internal_id,
job_id=self.job_id,
file=self.input_file,
)
raise e
def update_job_status(self, status_code: str, practice_code=None):
"""Updates Job Status in DynamoDB"""
try:
job = Jobs.IdIndex.query(self.job_id)
if practice_code is None:
for j in job:
j.StatusId = status_code
j.save()
else:
for j in job:
j.StatusId = status_code
j.PracticeCode = practice_code
j.save()
self.log_object.write_log(
"LRSDI05",
log_row_dict={
"upload_filename": self.input_file,
"job_id": self.job_id,
"status_code": status_code,
},
)
except (PynamoDBConnectionError, PutError, QueryError) as e:
self.log_object.write_log(
"LRSDC03",
log_row_dict={
"job_id": self.job_id,
"input_file": self.input_file,
"error": e,
},
)
self.response = error(
message="validate_and_forward failed to update job after sending to DPS",
internal_id=self.log_object.internal_id,
job_id=self.job_id,
file=self.input_file,
)
raise e
def cleanup_files(self, bucket: str, key: str):
"""Cleanup file that have already been processed e.g. sent DPS file from s3 or rejected"""
try:
self.s3.delete_object(Bucket=bucket, Key=key)
self.log_object.write_log(
"LRSDI03",
log_row_dict={"key": key, "bucket": bucket},
)
except Exception as e:
self.log_object.write_log(
"LRSDC02",
log_row_dict={"key": key, "bucket": bucket},
)
raise e
def handle_rejection(self, prefix: InputFolderType, error_message: str = None):
"""Handles a rejected GP extract file,
stores input file and log in output bucket
Args:
prefix (str): S3 folder prefix for where to place the handled file
error_message (str): message to handle.
"""
rejection_output_bucket = self.system_config["REJECTION_BUCKET"]
key = f"{prefix.value}{self.filename}"
self.s3.copy_object(
Bucket=rejection_output_bucket,
Key=key,
CopySource={"Bucket": self.input_bucket, "Key": self.input_file},
)
if error_message:
log_filename = f"{self.filename}-RejectedFile-{self.job_id}.json"
log_key = f"{InputFolderType.REJECTED.value}logs/{log_filename}"
self.s3.put_object(
Body=error_message, Bucket=rejection_output_bucket, Key=log_key
)
self.cleanup_files(self.input_bucket, self.input_file)
def process_invalid_message(self, exception: Exception) -> str:
"""Create a formatted error message string based on raised
exception, used to store log files
Args:
exception (Exception): exception raised
Returns:
dict: dictionary of failed file information
"""
rejection_log = {"file": self.input_file, "upload_date": str(self.upload_date)}
if isinstance(exception, InvalidStructure):
error = {
"error_type": InvalidErrorType.STRUCTURE.value,
"message": [exception.args[0]],
}
elif isinstance(exception, InvalidGPExtract):
msg = exception.args[0]
error = {
"error_type": InvalidErrorType.RECORDS.value,
"total_records": msg["total_records"],
"total_invalid_records": len(msg["invalid_records"]),
"message": msg["invalid_records"],
}
elif isinstance(exception, InvalidFilename):
msg = exception.args[0]["message"]
error = {"error_type": InvalidErrorType.FILENAME.value, "message": msg}
rejection_log.update(error)
return rejection_log
| StarcoderdataPython |
6516856 | <reponame>hitotsunorb1/Zope
##############################################################################
#
# Copyright (c) 2004, 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Adapter test fixtures
"""
from zope.component import adapter
from zope.interface import Interface
from zope.interface import implementer
class IAdaptable(Interface):
"""This is a Zope interface.
"""
def method():
"""This method will be adapted
"""
class IAdapted(Interface):
"""The interface we adapt to.
"""
def adaptedMethod():
"""A method to adapt.
"""
class IOrigin(Interface):
"""Something we'll adapt"""
class IDestination(Interface):
"""The result of an adaption"""
def method():
"""Do something"""
@implementer(IAdaptable)
class Adaptable(object):
def method(self):
return "The method"
@implementer(IAdapted)
@adapter(IAdaptable)
class Adapter(object):
def __init__(self, context):
self.context = context
def adaptedMethod(self):
return "Adapted: %s" % self.context.method()
@implementer(IOrigin)
class Origin(object):
pass
@implementer(IDestination)
class OriginalAdapter(object):
def __init__(self, context):
self.context = context
def method(self):
return "Original"
@implementer(IDestination)
class OverrideAdapter(object):
def __init__(self, context):
self.context = context
def method(self):
return "Overridden"
| StarcoderdataPython |
6632999 | <filename>scrapeops_python_logger/utils/error_handling.py
import functools
from scrapeops_python_logger.exceptions import ScrapeOpsAPIResponseError
def exception_handler(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except ScrapeOpsAPIResponseError as e:
pass
except Exception as e:
pass
return wrapper
| StarcoderdataPython |
12807607 | from .naming import frozendict
class TypeCheck(object):
"""
Encapsulate the results of a type check pass.
"""
class Error(Exception):
pass
@staticmethod
def success():
return TypeCheck(True, "")
@staticmethod
def failure(msg):
return TypeCheck(False, msg)
def __init__(self, success, message):
self._success = success
self._message = message
def message(self):
return self._message
def ok(self):
return self._success
def __repr__(self):
if self.ok():
return 'TypeCheck(OK)'
else:
return 'TypeCheck(FAILED): %s' % self._message
class TypeFactoryType(type):
_TYPE_FACTORIES = {}
def __new__(mcs, name, parents, attributes):
"""Args:
mcs(metaclass): the class object to create an instance of. Since this is actually
creating an instance of a type factory class, it's really a metaclass.
name (str): the name of the type to create.
parents (list(class)): the superclasses.
attributes (map(string, value)):
"""
if 'PROVIDES' not in attributes:
return type.__new__(mcs, name, parents, attributes)
else:
provides = attributes['PROVIDES']
new_type = type.__new__(mcs, name, parents, attributes)
TypeFactoryType._TYPE_FACTORIES[provides] = new_type
return new_type
TypeFactoryClass = TypeFactoryType('TypeFactoryClass', (object,), {})
class TypeFactory(TypeFactoryClass):
@staticmethod
def get_factory(type_name):
assert type_name in TypeFactoryType._TYPE_FACTORIES, (
'Unknown type: %s, Existing factories: %s' % (
type_name, TypeFactoryType._TYPE_FACTORIES.keys()))
return TypeFactoryType._TYPE_FACTORIES[type_name]
@staticmethod
def create(type_dict, *type_parameters, **kwargs):
"""
Implemented by the TypeFactory to produce a new type.
Should return:
reified type
(with usable type.__name__)
"""
raise NotImplementedError("create unimplemented for: %s" % repr(type_parameters))
@staticmethod
def new(type_dict, type_factory, *type_parameters, **kwargs):
"""
Create a fully reified type from a type schema.
"""
type_tuple = (type_factory,) + type_parameters
if type_tuple not in type_dict:
factory = TypeFactory.get_factory(type_factory)
reified_type = factory.create(type_dict, *type_parameters, **kwargs)
type_dict[type_tuple] = reified_type
return type_dict[type_tuple]
@staticmethod
def wrapper(factory):
assert issubclass(factory, TypeFactory)
def wrapper_function(*type_parameters):
return TypeFactory.new({}, factory.PROVIDES, *tuple(
[typ.serialize_type() for typ in type_parameters]))
return wrapper_function
@staticmethod
def load(type_tuple, into=None):
"""
Determine all types touched by loading the type and deposit them into
the particular namespace.
"""
type_dict = {}
TypeFactory.new(type_dict, *type_tuple)
deposit = into if (into is not None and isinstance(into, dict)) else {}
for reified_type in type_dict.values():
deposit[reified_type.__name__] = reified_type
return deposit
@staticmethod
def load_json(json_list, into=None):
"""
Determine all types touched by loading the type and deposit them into
the particular namespace.
"""
def l2t(obj):
if isinstance(obj, list):
return tuple(l2t(L) for L in obj)
elif isinstance(obj, dict):
return frozendict(obj)
else:
return obj
return TypeFactory.load(l2t(json_list), into=into)
@staticmethod
def load_file(filename, into=None):
import json
with open(filename) as fp:
return TypeFactory.load_json(json.load(fp), into=into)
class TypeMetaclass(type):
def __instancecheck__(cls, other):
if not hasattr(other, 'type_parameters'):
return False
if not hasattr(other, '__class__'):
return False
if cls.__name__ != other.__class__.__name__:
return False
return cls.type_factory() == other.type_factory() and (
cls.type_parameters() == other.type_parameters())
def __new__(mcls, name, parents, attributes):
"""Creates a new Type object (an instance of TypeMetaclass).
Args:
name (str): the name of the new type.
parents (list(str)): a list of superclasses.
attributes: (???): a map from name to value for "parameters" for defining
the new type.
"""
return type.__new__(mcls, name, parents, attributes)
class Type(object):
@classmethod
def type_factory(cls):
""" Return the name of the factory that produced this class. """
raise NotImplementedError
@classmethod
def type_parameters(cls):
""" Return the type parameters used to produce this class. """
raise NotImplementedError
@classmethod
def serialize_type(cls):
return (cls.type_factory(),) + cls.type_parameters()
@classmethod
def dump(cls, fp):
import json
json.dump(cls.serialize_type(), fp)
def check(self):
"""
Returns a TypeCheck object explaining whether or not a particular
instance of this object typechecks.
"""
raise NotImplementedError
| StarcoderdataPython |
5128329 | <filename>flybirds/core/driver/screen.py
# -*- coding: utf-8 -*-
"""
screen proxy
"""
from flybirds.core.global_context import GlobalContext
def screen_shot(path):
GlobalContext.screen.screen_shot(path)
def screen_link_to_behave(scenario, step_index, tag=None):
GlobalContext.screen.screen_link_to_behave(scenario, step_index, tag)
| StarcoderdataPython |
6423649 | # ABC002a
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
print(max(tuple(map(int, input().split()))))
| StarcoderdataPython |
185167 | <gh_stars>1-10
"""Module to store EnforceTyping exceptions."""
class EnforcedTypingError(TypeError):
"""Class to raise errors relating to static typing decorator."""
| StarcoderdataPython |
6646847 | <gh_stars>0
"""
Checks database validity.
Checks if word.word and word.bad_variant are equal (case insensetive)
Checks if there is only one uppercase letter in word.word and word.bad_variant
"""
import unittest
from models import Session, Word
class WordsTest(unittest.TestCase):
def test_word_and_bad_variant_equality(self):
session = Session()
for word in session.query(Word).all():
self.assertEqual(word.word.lower(), word.bad_variant.lower(),
msg=f"WORD ID: {word.id}")
session.close()
def test_only_one_stress_in_word(self):
session = Session()
for word in session.query(Word).all():
self.assertEqual(1, sum([char.isupper() for char in word.word]),
msg=f"WORD ID: {word.id}")
self.assertEqual(
1,
sum([char.isupper() for char in word.bad_variant]),
msg=f"WORD ID: {word.id}",
)
session.close()
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
5127881 | import os
import sys
import json
import webbrowser
import requests
from urllib import parse
GET_INFO = False
url = "https://api.bilibili.com/x/v2/dm/subtitle/draft/save"
bvid = "BV1qf4y1B7D3"
oid = "245340306"
if GET_INFO:
webbrowser.open_new_tab(f"http://api.bilibili.com/x/web-interface/view?bvid={bvid}")
sys.exit(0)
config = os.path.join(__file__,'..','..','config.json')
with open(config,'r') as f:
config = json.load(f)
csrf = ""
cookie = config.get('bilibili_cookie','')
for line in cookie.split(";"):
line = line.strip()
if line.startswith("bili_jct="):
csrf = line[len("bili_jct=") :]
with open('test/test.bcc','r') as f:
subtitle = f.read()
lang = "en-US"
payload = f'lan={lang}&submit=true&csrf={csrf}&sign=false&bvid={bvid}&type=1&oid={oid}&{parse.urlencode({"data":subtitle})}'
headers = {
'Cookie': 'bfe_id=6f285c892d9d3c1f8f020adad8bed553; bili_jct=9f0d2f125fb02375a2938581cb08a373; sid=kq8c5ukb; DedeUserID=12895307; DedeUserID__ckMd5=4786945f2e41f323; SESSDATA=90c4a3a0%2C1611422137%2C5c6a6*71',
'Content-Type': 'application/x-www-form-urlencoded'
}
response = requests.request("POST", url, headers=headers, data = payload)
print(response.text)
| StarcoderdataPython |
1818389 | <filename>website/webQuery.py<gh_stars>0
from flask import Flask, url_for, render_template, request, redirect
import sys
sys.path.append('../')
import retrival
sys.path.append('website')
import time
import urllib
app = Flask(__name__)
links = []
@app.route('/<nextQ>', methods=['POST', 'GET'])
def homeRedirect(nextQ):
error = None
if request.method == 'GET':
pastTime = time.time()
text = nextQ
linkObj = retrival.Retrival()
linkObj.searchRequest(str(text))
links = linkObj.flink
for link in links:
links[links.index(link)] = urllib.parse.unquote(link)
dym = linkObj.dym
if dym != [None]:
dym = ' '.join(dym)
t = str(time.time() - pastTime)
if links != []:
return render_template('index.html', name=links, timming=t)
elif links == []:
if dym == [None] or dym == []:
return render_template('index.html', error='Search Not Found!', timming=t)
elif dym != [None]:
return render_template('index.html', instead=dym, timming=t)
error = 'Invalid'
elif request.method == 'POST':
return redirect("", code=303)
return render_template('index.html', error=error)
@app.route('/', methods=['POST', 'GET'])
def home():
error = None
if request.method == 'POST':
if request.form['query']:
pastTime = time.time()
text = request.form['query']
linkObj = retrival.Retrival()
linkObj.searchRequest(str(text))
links = linkObj.flink
dym = linkObj.dym
if dym != [None]:
dym = ' '.join(dym)
for link in links:
links[links.index(link)] = urllib.parse.unquote(link)
t = str(time.time() - pastTime)
if links != []:
return render_template('index.html', name=links, timming=t)
elif links == []:
if dym == [None] or dym == []:
return render_template('index.html', error='Search Not Found!', timming=t)
elif dym != [None]:
return render_template('index.html', instead=dym, timming=t)
error = 'Invalid'
return render_template('index.html', error=error)
if __name__ == '__main__':
app.run() | StarcoderdataPython |
3287882 | #!/usr/bin/env python3
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
from cartopy.io.img_tiles import OSM
imagery = OSM()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=imagery.crs)
ax.set_extent([-119.0, -116, 32, 34], crs=ccrs.PlateCarree())
ax.gridlines()
# #states_provinces = cfeature.NaturalEarthFeature(
# category='cultural',
# name='admin_1_states_provinces_lines',
# scale='10m',
# facecolor='none')
# ax.add_feature(states_provinces, edgecolor='gray')
# ax.add_feature(cfeature.BORDERS, linestyle=":")
#ax.add_feature(cfeature.STATES.with_scale('10m'))
land_10m = cfeature.NaturalEarthFeature('physical', 'land', '10m',
edgecolor='black',
facecolor='grey'
#facecolor=cfeature.COLORS['land']
)
coast_10m = cfeature.NaturalEarthFeature('physical', 'coastline', '10m',
edgecolor='black',
facecolor='white')
#ocean_10m = cfeature.NaturalEarthFeature('physical', 'ocean', '10m',
# edgecolor='black',
# facecolor='blue')
ax.add_feature(land_10m)
ax.add_feature(coast_10m)
#ax.add_feature(ocean_10m)
# ax = plt.axes(projection=ccrs.PlateCarree())
# ax.set_extent([-119.12, -116.2, 32.5, 33.9], crs=ccrs.PlateCarree())
#ax.add_image(imagery, 5)
#ax.coastlines('10m')
plt.show()
| StarcoderdataPython |
9792128 | <filename>snakeai/agent/test.py
class test():
def __init__(self):
self.flag = True
| StarcoderdataPython |
6684018 | <filename>mapper.py
from utils import get_env_vars
import yaml
import json
import os
class Mapper:
def __init__(self, env_vars, log) -> None:
self.log = log
self.mapping_file = env_vars['MAPPING_FILE']
self.mapping_folder = env_vars['MAPPING_FOLDER']
self.load()
def load(self):
'''
Load the mapping files with the mappings
'''
self.mapping_files = []
dir_path = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(dir_path, self.mapping_file)) as file:
self.mappings = yaml.load(file, Loader=yaml.FullLoader)
if self.mapping_folder[0] != '/':
self.mapping_folder = os.path.join(dir_path, self.mapping_folder)
if os.path.exists(self.mapping_folder):
for mapping_file in os.listdir(self.mapping_folder):
with open(os.path.join(self.mapping_folder, mapping_file)) as file:
self.mapping_files.append(
{"file": mapping_file, "content": json.load(file)})
def apply_mapping(self, data):
'''
Extend the object depends on the configured mappings
'''
for mapping in self.mappings:
if(all(k in data != None for k in mapping['from'])):
mapping_file = next(
x for x in self.mapping_files if x['file'] == mapping['file'])
if mapping_file is None:
self.log.info(f"Mapping file: '{mapping['file']}' not found")
continue
mapping_found = False
for map_item in mapping_file['content']:
if(all(data[k] == map_item[k] for k in mapping['from'])):
data[mapping['to']] = map_item[mapping['to']]
self.log.info(f"Mapping, {mapping}, Map_item: {map_item}")
mapping_found = True
break
if not mapping_found:
self.log.info(f"No mapping found for Mapping: {mapping}")
data[mapping['to']] = 'NO_MAPPING_FOUND'
| StarcoderdataPython |
9695459 | ##########################################################################
# MediPy - Copyright (C) Universite de Strasbourg
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
import logging
import operator
import numpy
from vtk import vtkDataArray, vtkImageData, vtkImageExport, vtkImageImport, vtkLookupTable
import vtk.util.numpy_support
from vtk.util import vtkConstants
import medipy.base
from medipy.vtk import vtkColorTransferFunctionWithAlpha, vtkEnhancedLookupTable
def array_to_vtk_image(array, copy_data, data_type="scalar"):
""" Create an ``vtkImage`` matching the contents and type of given array. If
``copy_data`` is ``True``, then the data of the will be copied. Otherwise the
data will be shared, and the array **must not** be destroyed before the
``vtkImage``. ``data_type`` specifies how the array should be interpreted :
either as a n-array of scalars (``data_type="scalar"``) or as an n-1 array
of vectors (``data_type="vector"``).
"""
if data_type not in ["scalar", "vector"] :
raise medipy.base.Exception("Unknown data_type: {0}".format(repr(data_type)))
if data_type == "scalar" :
ndim = array.ndim
elif data_type == "vector" :
ndim = array.ndim-1
if ndim > 3 :
raise medipy.base.Exception(
"Cannot convert a {0} array of dimension {1}".format(data_type,
array.ndim))
importer = vtkImageImport()
if numpy.iscomplexobj(array) :
# Get the first element of the array
element = array.flat.next()
scalar_type = vtk.util.numpy_support.get_vtk_array_type(element.real.dtype)
else :
scalar_type = vtk.util.numpy_support.get_vtk_array_type(array.dtype)
importer.SetDataScalarType(scalar_type)
if data_type == "scalar" :
number_of_components = 1
elif data_type == "vector" :
number_of_components = array.shape[ndim]
if numpy.iscomplexobj(array) :
number_of_components *= 2
importer.SetNumberOfScalarComponents(number_of_components)
extent = 6*[0]
extent[1] = array.shape[ndim-1]-1
if ndim >= 2 :
extent[3] = array.shape[ndim-2]-1
if ndim >= 3 :
extent[5] = array.shape[ndim-3]-1
importer.SetDataExtent(extent)
importer.SetWholeExtent(extent)
size = array.itemsize*reduce(operator.mul, array.shape, 1)
if copy_data :
importer.CopyImportVoidPointer(array, size)
else :
importer.SetImportVoidPointer(array, size)
importer.Update()
return importer.GetOutput()
def vtk_image_to_array(vtk_image) :
""" Create an ``numpy.ndarray`` matching the contents and type of given image.
If the number of scalars components in the image is greater than 1, then
the ndarray will be 4D, otherwise it will be 3D.
"""
exporter = vtkImageExport()
exporter.SetInput(vtk_image)
# Create the destination array
extent = vtk_image.GetWholeExtent()
shape = [extent[5]-extent[4]+1,
extent[3]-extent[2]+1,
extent[1]-extent[0]+1]
if vtk_image.GetNumberOfScalarComponents() > 1:
shape += [vtk_image.GetNumberOfScalarComponents()]
dtype = vtk.util.numpy_support.get_numpy_array_type(vtk_image.GetScalarType())
array = numpy.zeros(shape, dtype=dtype)
exporter.Export(array)
return array
def vtk_image_to_medipy_image(vtk_image, medipy_image):
""" Create an ``medipy.base.Image`` matching the contents and type of given image.
"""
if medipy_image is None :
medipy_image = medipy.base.Image(
(0,0,0),
vtk.util.numpy_support.get_numpy_array_type(vtk_image.GetScalarType()))
medipy_image.data = vtk_image_to_array(vtk_image)
if vtk_image.GetNumberOfScalarComponents() > 1 :
medipy_image.data_type = "vector"
else :
medipy_image.data_type = "scalar"
medipy_image.origin = [x for x in reversed(vtk_image.GetOrigin())]
medipy_image.spacing = [x for x in reversed(vtk_image.GetSpacing())]
# VTK images are not oriented, assume identity
medipy_image.direction = numpy.identity(3)
return medipy_image
def build_vtk_colormap(colormap, vtk_colormap=None) :
""" Build either a ``vtkLookupTable`` or a ``vtkColorTransferFunctionWithAlpha``
from the given colormap. The colormap is specified as a
custom table -- which must respect the formats of the dictionaries
defined in :func:`build_lookup_table` and
:func:`build_color_transfer_function_with_alpha`. If ``vtk_colormap``
is ``None``, a new vtk object is created, otherwise ``vtk_colormap``
is used as a container.
"""
if type(colormap) not in [list, tuple] :
colormap_type = str(type(colormap))
raise medipy.base.Exception("Cannot process colormap of type %s"%(colormap_type))
if type(colormap[0][0]) in [list, tuple] :
# Stage colormap
vtk_colormap = build_color_transfer_function_with_alpha(colormap, vtk_colormap)
else :
# "Regular" colormap
vtk_colormap = build_lookup_table(colormap, vtk_colormap)
return vtk_colormap
def build_lookup_table(colormap, vtk_colormap=None):
""" Build a ``vtkLookupTable`` from a colormap, given as an array of colors.
"""
if vtk_colormap is None :
vtk_colormap = vtkEnhancedLookupTable()
vtk_colormap.SetRampToLinear()
# Allocate a new colormap to avoid numerous ModifiedEvents to be fired by
# the original colormap
new_colormap = vtkEnhancedLookupTable()
new_colormap.DeepCopy(vtk_colormap)
new_colormap.Allocate(len(colormap), len(colormap))
for i in range(len(colormap)) :
new_colormap.SetTableValue(i, colormap[i])
vtk_colormap.DeepCopy(new_colormap)
vtk_colormap.Modified()
return vtk_colormap
def build_color_transfer_function_with_alpha(colormap, vtk_colormap=None):
""" Build a ``vtkColorTransferFunctionWithAlpha`` from an array of (range, color)
"""
if vtk_colormap is None :
vtk_colormap = vtkColorTransferFunctionWithAlpha()
vtk_colormap.RemoveAllPoints()
special_points = []
for extent, (r,g,b,a) in colormap:
if extent[0] == extent[1]:
special_points.append((extent, (r,g,b,a)))
else:
try :
vtk_colormap.AddRGBASegment(extent[0], r, g, b, a,
extent[1], r, g, b, a)
except :
logging.debug("%s %s"%(extent, (r,g,b,a)))
for extent, color in special_points:
vtk_colormap.AddRGBAPoint(extent[0], r, g, b, a)
return vtk_colormap
| StarcoderdataPython |
8132809 | <filename>Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/scripts/xsslint/xsslint/django_linter.py
"""
Classes for Django Template Linting.
"""
import re
from xsslint.utils import Expression, StringLines
from xsslint.reporting import ExpressionRuleViolation
class TransExpression(Expression):
"""
The expression handling trans tag
"""
def __init__(self, ruleset, results, *args, **kwargs):
super().__init__(*args, **kwargs)
self.string_lines = StringLines(kwargs['template'])
self.ruleset = ruleset
self.results = results
def validate_expression(self, template_file, expressions=None):
"""
Validates trans tag expression for missing escaping filter
Arguments:
template_file: The content of the Django template.
results: Violations to be generated.
Returns:
None
"""
trans_expr = self.expression_inner
# extracting translation string message
trans_var_name_used, trans_expr_msg = self.process_translation_string(trans_expr)
if not trans_var_name_used or not trans_expr_msg:
return
# Checking if trans tag has interpolated variables eg {} in translations string.
# and testing for possible interpolate_html tag used for it.
if self.check_string_interpolation(trans_expr_msg,
trans_var_name_used,
expressions,
template_file):
return
escape_expr_start_pos, escape_expr_end_pos = self.find_filter_tag(template_file)
if not escape_expr_start_pos or not escape_expr_end_pos:
return
self.process_escape_filter_tag(template_file=template_file,
escape_expr_start_pos=escape_expr_start_pos,
escape_expr_end_pos=escape_expr_end_pos,
trans_var_name_used=trans_var_name_used)
def process_translation_string(self, trans_expr):
"""
Process translation string into string and variable name used
Arguments:
trans_expr: Translation expression inside {% %}
Returns:
None
"""
quote = re.search(r"""\s*['"].*['"]\s*""", trans_expr, re.I)
if not quote:
_add_violations(self.results,
self.ruleset.django_trans_escape_filter_parse_error,
self)
return None, None
trans_expr_msg = trans_expr[quote.start():quote.end()].strip()
if _check_is_string_has_html(trans_expr_msg):
_add_violations(self.results,
self.ruleset.django_html_interpolation_missing,
self)
return None, None
pos = trans_expr.find('as', quote.end())
if pos == -1:
_add_violations(self.results, self.ruleset.django_trans_missing_escape, self)
return None, None
trans_var_name_used = trans_expr[pos + len('as'):].strip()
return trans_var_name_used, trans_expr_msg
def check_string_interpolation(self, trans_expr_msg, trans_var_name_used, expressions, template_file):
"""
Checks if the translation string has used interpolation variable eg {variable} but not
used interpolate_html tag to escape them
Arguments:
trans_expr_msg: Translation string in quotes
trans_var_name_used: Translation variable used
expressions: List of expressions found during django file processing
template_file: django template file
Returns:
True: In case it finds interpolated variables
False: No interpolation variables found
"""
if _check_is_string_has_variables(trans_expr_msg):
interpolate_tag, html_interpolated = _is_html_interpolated(trans_var_name_used,
expressions)
if not html_interpolated:
_add_violations(self.results, self.ruleset.django_html_interpolation_missing, self)
if interpolate_tag:
interpolate_tag.validate_expression(template_file, expressions)
return True
return
def find_filter_tag(self, template_file):
"""
Finds if there is force_filter tag applied
Arguments:
template_file: django template file
Returns:
(None, None): In case there is a violations
(start, end): Found filter tag start and end position
"""
trans_expr_lineno = self.string_lines.index_to_line_number(self.start_index)
escape_expr_start_pos = template_file.find('{{', self.end_index)
if escape_expr_start_pos == -1:
_add_violations(self.results,
self.ruleset.django_trans_missing_escape,
self)
return None, None
# {{ found but should be on the same line as trans tag
trans_expr_filter_lineno = self.string_lines.index_to_line_number(escape_expr_start_pos)
if trans_expr_filter_lineno != trans_expr_lineno:
_add_violations(self.results,
self.ruleset.django_trans_missing_escape,
self)
return None, None
escape_expr_end_pos = template_file.find('}}', escape_expr_start_pos)
# couldn't find matching }}
if escape_expr_end_pos == -1:
_add_violations(self.results,
self.ruleset.django_trans_missing_escape,
self)
return None, None
# }} should be also on the same line
trans_expr_filter_lineno = self.string_lines.index_to_line_number(escape_expr_end_pos)
if trans_expr_filter_lineno != trans_expr_lineno:
_add_violations(self.results,
self.ruleset.django_trans_missing_escape,
self)
return None, None
return escape_expr_start_pos, escape_expr_end_pos
def process_escape_filter_tag(self, **kwargs):
"""
Checks if the escape filter and process it for violations
Arguments:
kwargs: Having force_filter expression start, end, trans expression variable
used and templates
Returns:
None: If found any violations
"""
template_file = kwargs['template_file']
escape_expr_start_pos = kwargs['escape_expr_start_pos']
escape_expr_end_pos = kwargs['escape_expr_end_pos']
trans_var_name_used = kwargs['trans_var_name_used']
escape_expr = template_file[escape_expr_start_pos + len('{{'):
escape_expr_end_pos].strip(' ')
# check escape expression has the right variable and its escaped properly
# with force_escape filter
if '|' not in escape_expr or len(escape_expr.split('|')) != 2:
_add_violations(self.results,
self.ruleset.django_trans_invalid_escape_filter,
self)
return
escape_expr_var_used, escape_filter = \
escape_expr.split('|')[0].strip(' '), escape_expr.split('|')[1].strip(' ')
if trans_var_name_used != escape_expr_var_used:
_add_violations(self.results,
self.ruleset.django_trans_escape_variable_mismatch,
self)
return
if escape_filter != 'force_escape':
_add_violations(self.results,
self.ruleset.django_trans_invalid_escape_filter,
self)
return
class BlockTransExpression(Expression):
"""
The expression handling blocktrans tag
"""
def __init__(self, ruleset, results, *args, **kwargs):
super().__init__(*args, **kwargs)
self.string_lines = StringLines(kwargs['template'])
self.ruleset = ruleset
self.results = results
def validate_expression(self, template_file, expressions=None):
"""
Validates blocktrans tag expression for missing escaping filter
Arguments:
template_file: The content of the Django template.
results: Violations to be generated.
Returns:
None
"""
if not self._process_block(template_file, expressions):
return
filter_start_pos = template_file.rfind('{%', 0, self.start_index)
if filter_start_pos == -1:
_add_violations(self.results,
self.ruleset.django_blocktrans_missing_escape_filter,
self)
return
filter_end_pos = template_file.find('%}', filter_start_pos)
if filter_end_pos > self.start_index:
_add_violations(self.results,
self.ruleset.django_blocktrans_escape_filter_parse_error,
self)
return
escape_filter = template_file[filter_start_pos:filter_end_pos + 2]
if len(escape_filter) < len('{%filter force_escape%}'):
_add_violations(self.results,
self.ruleset.django_blocktrans_missing_escape_filter,
self)
return
escape_filter = escape_filter[2:-2].strip()
escape_filter = escape_filter.split(' ')
if len(escape_filter) != 2:
_add_violations(self.results,
self.ruleset.django_blocktrans_missing_escape_filter,
self)
return
if escape_filter[0] != 'filter' or escape_filter[1] != 'force_escape':
_add_violations(self.results,
self.ruleset.django_blocktrans_missing_escape_filter,
self)
return
def _process_block(self, template_file, expressions):
"""
Process blocktrans..endblocktrans block
Arguments:
template_file: The content of the Django template.
Returns:
None
"""
blocktrans_string = self._extract_translation_msg(template_file)
# if no string extracted might have hit a parse error just return
if not blocktrans_string:
return
if _check_is_string_has_html(blocktrans_string):
_add_violations(self.results, self.ruleset.django_html_interpolation_missing, self)
return
# Checking if blocktrans tag has interpolated variables eg {}
# in translations string. Would be tested for
# possible html interpolation done somewhere else.
if _check_is_string_has_variables(blocktrans_string):
blocktrans_expr = self.expression_inner
pos = blocktrans_expr.find('asvar')
if pos == -1:
_add_violations(self.results, self.ruleset.django_html_interpolation_missing, self)
return
trans_var_name_used = blocktrans_expr[pos + len('asvar'):].strip()
# check for interpolate_html expression for the variable in trans expression
interpolate_tag, html_interpolated = _is_html_interpolated(trans_var_name_used,
expressions)
if not html_interpolated:
_add_violations(self.results, self.ruleset.django_html_interpolation_missing, self)
if interpolate_tag:
interpolate_tag.validate_expression(template_file, expressions)
return
return True
def _extract_translation_msg(self, template_file):
endblocktrans = re.compile(r'{%\s*endblocktrans.*?%}').search(template_file,
self.end_index)
if not endblocktrans.start():
_add_violations(self.results,
self.ruleset.django_blocktrans_parse_error,
self)
return
return template_file[self.end_index + 2: endblocktrans.start()].strip(' ')
class HtmlInterpolateExpression(Expression):
"""
The expression handling interplate_html tag
"""
def __init__(self, ruleset, results, *args, **kwargs):
super().__init__(*args, **kwargs)
self.string_lines = StringLines(kwargs['template'])
self.ruleset = ruleset
self.results = results
self.validated = False
self.interpolated_string_var = None
trans_expr = self.expression_inner
# extracting interpolated variable string name
expr_list = trans_expr.split(' ')
if len(expr_list) < 2:
_add_violations(self.results,
self.ruleset.django_html_interpolation_invalid_tag,
self)
return
self.interpolated_string_var = expr_list[1]
def validate_expression(self, template_file, expressions=None):
"""
Validates interpolate_html tag expression for missing safe filter for html tags
Arguments:
template_file: The content of the Django template.
results: Violations to be generated.
Returns:
None
"""
# if the expression is already validated, we would not be processing it again
if not self.interpolated_string_var or self.validated:
return
self.validated = True
trans_expr = self.expression_inner
html_tags = re.finditer(r"""\s*['"]</?[a-zA-Z0-9 =\-'_"]+.*?>['"]""",
trans_expr, re.I)
for html_tag in html_tags:
tag_end = html_tag.end()
escape_filter = trans_expr[tag_end:tag_end + len('|safe')]
if escape_filter != '|safe':
_add_violations(self.results,
self.ruleset.django_html_interpolation_missing_safe_filter,
self)
return
return True
def _check_is_string_has_html(trans_expr):
html_tags = re.search(r"""</?[a-zA-Z0-9 =\-'_":]+>""", trans_expr, re.I)
if html_tags:
return True
def _check_is_string_has_variables(trans_expr):
var_tags = re.search(r"""(?<!{){(?!{)[a-zA-Z0-9 =\-'_":]+(?<!})}(?!})""", trans_expr, re.I)
if var_tags:
return True
def _is_html_interpolated(trans_var_name_used, expressions):
html_interpolated = False
interpolate_tag_expr = None
for expr in expressions:
if isinstance(expr, HtmlInterpolateExpression):
if expr.interpolated_string_var == trans_var_name_used:
html_interpolated = True
interpolate_tag_expr = expr
return interpolate_tag_expr, html_interpolated
def _add_violations(results, rule_violation, self):
results.violations.append(ExpressionRuleViolation(
rule_violation, self
))
| StarcoderdataPython |
296744 | <reponame>chars32/edx_python
#Write a function that takes a two-dimensional list (list of lists) of numbers as argument and returns a list
#which includes the sum of each row. You can assume that the number of columns in each row is the same.
def sum_of_two_lists_row(list2d):
final_list = []
for list_numbers in list2d:
sum_list = 0
for number in list_numbers:
sum_list += number
final_list.append(sum_list)
return final_list
print(sum_of_two_lists([[1,2],[3,4]])) | StarcoderdataPython |
1926137 | <gh_stars>0
# Copyright (c) 2018 Ansible, Inc.
# All Rights Reserved.
from ansiblelint import AnsibleLintRule
class TaskHasNameRule(AnsibleLintRule):
id = '502'
shortdesc = 'All tasks should be named'
description = 'All tasks should have a distinct name for readability ' + \
'and for --start-at-task to work'
tags = ['task']
_nameless_tasks = ['meta', 'debug', 'include_role', 'import_role',
'include_tasks', 'import_tasks']
def matchtask(self, file, task):
return (not task.get('name') and
task["action"]["__ansible_module__"] not in self._nameless_tasks)
| StarcoderdataPython |
259076 | <reponame>muglyon/https-github.com-muglyon-DCOP-Decentralised-Control-of-Intelligent-Devices
#! python3
# monitoring_area.py - Modelisation of a room
import operator
import abc
import constants as c
from random import randint
class MonitoringArea(object):
__metaclass__ = abc.ABCMeta
def __init__(self, id_monitored_area):
self.id = id_monitored_area
self.front_neighbor = None
self.right_neighbor = None
self.left_neighbor = None
self.current_v = 0
self.previous_v = 0
self.tau = randint(c.MIN_TAU_VALUE, c.INFINITY)
def get_neighbors_id_sorted(self):
"""
Get all neighbors id of the agent sorted by degree (decreasing)
:return: neighbors id list sorted by degree
:rtype: list
"""
return self.get_neighbors_id_sorted_except(-1)
def get_neighbors_id_sorted_except(self, agent_id):
"""
Get all neighbors id EXCEPT <agent_id>
:param agent_id: id of the agent to ignore
:type agent_id: integer
:return: neighbors id list sorted by degree
:rtype: list
"""
neighbors = {}
if self.left_neighbor is not None and self.left_neighbor.id != int(agent_id):
neighbors[str(self.left_neighbor.id)] = self.left_neighbor.get_degree()
if self.right_neighbor is not None and self.right_neighbor.id != int(agent_id):
neighbors[str(self.right_neighbor.id)] = self.right_neighbor.get_degree()
if self.front_neighbor is not None and self.front_neighbor.id != int(agent_id):
neighbors[str(self.front_neighbor.id)] = self.front_neighbor.get_degree()
neighbors = sorted(neighbors.items(), key=lambda x: x[1], reverse=True)
return [int(x) for x, _ in neighbors]
def get_degree(self):
count = 0
if self.left_neighbor is not None:
count += 1
if self.right_neighbor is not None:
count += 1
if self.front_neighbor is not None:
count += 1
return count
def to_string_neighbors(self):
"""
To String for Neighbors
:return: neighbors in string format
:rtype: string
"""
string = "monitored_area " + str(self.id) + " : \n"
if self.left_neighbor is not None:
string += " | LeftNeighbor : " + str(self.left_neighbor.id) + "\n"
if self.right_neighbor is not None:
string += " | RightNeighbor : " + str(self.right_neighbor.id) + "\n"
if self.front_neighbor is not None:
string += " | FrontNeighbor : " + str(self.front_neighbor.id) + "\n"
return string
def to_json_format(self):
data = {"id": self.id, "tau": self.tau, "devices": []}
for device in self.device_list:
data["devices"].append(device.to_json_format())
return data
@abc.abstractmethod
def attach_observer(self, observer):
return
@abc.abstractmethod
def add_or_update_device(self):
return
@abc.abstractmethod
def pop_or_reprogram_devices(self):
return
@abc.abstractmethod
def increment_time(self, minutes):
return
@abc.abstractmethod
def has_no_devices(self):
return
@abc.abstractmethod
def set_device_in_critic(self):
return
| StarcoderdataPython |
220233 | <filename>src/pycompiler/interpreter.py
#!/usr/bin/env python
##
# <NAME>
# dave at drogers dot us
# This software is for instructive purposes. Use at your own risk - not meant to be robust at all.
# Feel free to use anything, credit is appreciated if warranted.
##
import sys, os, StringIO
parent_dir = os.path.abspath( os.path.join(__file__, '../..') )
if not parent_dir in sys.path:
sys.path.append(parent_dir)
__all__ =['PlhInterpreter']
from globals import *
from util import *
from translator import *
from vm import *
from scanner import *
class PlhInterpreter:
def __init__(self, vm=None, translator=None, scanner=None, outputdir=None):
if not outputdir:
self.outputdir = os.path.join(os.getcwd(), 'interpreter_files')
else:
self.outputdir = outputdir
if not os.path.exists(outputdir):
os.mkdir(outputdir)
self.translator = translator
self.vm = vm
self.trans = translator
self.scanner = scanner
self.tokfile = os.path.join(self.outputdir, 'tokfile')
self.codefile = os.path.join(self.outputdir, 'codefile')
self.datafile = os.path.join(self.outputdir, 'datafile')
self.tr_outfile = os.path.join(self.outputdir, 'tr_outfile')
self.vm_outfile = os.path.join(self.outputdir, 'vm_outfile')
def run_file(self, srcfile, interactive=True):
"""Run srcfile through the scanner, parser, and vm
@param srcfile: a PL/H source code file
@param interactive: if False, srcfile is run and any stdout is returned
but stdin is not available via keyboard
@return: string containing stdout from running program if interactive False
"""
self.scanner = Scanner(srcfile=srcfile, tokfile=self.tokfile)
self.scanner.scan()
self.trans = PlhTranslator(tokensource=self.tokfile,
codefile=self.codefile,
datafile=self.datafile,
outfile=self.tr_outfile)
self.trans.parse()
self.vm = VM(outfile=self.vm_outfile,
codefile=self.codefile,
datafile=self.datafile)
if interactive:
self.vm.execute()
else:
sys.stdout.flush()
old_stdout = sys.stdout
retstring = ''
try:
sys.stdout = StringIO.StringIO()
self.vm.execute()
sys.stdout.flush()
retstring = sys.stdout.getvalue()
finally:
sys.stdout = old_stdout
return retstring
if __name__ == '__main__':
outputdir = os.path.join(tempdir, 'interp_main_files')
plh = PlhInterpreter(outputdir=outputdir)
srcfile = os.path.join(outputdir, 'interp_main.plh')
src = \
"""declare x(5);
numtosort=5;
i=1;
:in:
get x(i);
i=i+1;
if i<=numtosort then goto in;
i=1;
:nexti:
j=i+1;
:nextj:
if x(j)<x(i) then
do;
temp=x(i);
x(i)=x(j);
x(j)=temp;
end;
j=j+1;
if j<=numtosort then goto nextj;
put x(i);
i=i+1;
if i<=numtosort-1 then goto nexti;
put x(numtosort);
stop;
"""
open(srcfile, 'w').write(src)
plh.run_file(srcfile)
print plh.trans.symbol_table_str()
| StarcoderdataPython |
8090771 | <filename>python-client/swagger_client/models/com_github_appscode_stash_apis_stash_v1alpha1_recovery_spec.py
# coding: utf-8
"""
stash-server
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_backend import ComGithubAppscodeStashApisStashV1alpha1Backend # noqa: F401,E501
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_local_spec import ComGithubAppscodeStashApisStashV1alpha1LocalSpec # noqa: F401,E501
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_local_typed_reference import ComGithubAppscodeStashApisStashV1alpha1LocalTypedReference # noqa: F401,E501
from swagger_client.models.io_k8s_api_core_v1_local_object_reference import IoK8sApiCoreV1LocalObjectReference # noqa: F401,E501
class ComGithubAppscodeStashApisStashV1alpha1RecoverySpec(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'backend': 'ComGithubAppscodeStashApisStashV1alpha1Backend',
'image_pull_secrets': 'list[IoK8sApiCoreV1LocalObjectReference]',
'node_name': 'str',
'paths': 'list[str]',
'pod_ordinal': 'str',
'recovered_volumes': 'list[ComGithubAppscodeStashApisStashV1alpha1LocalSpec]',
'workload': 'ComGithubAppscodeStashApisStashV1alpha1LocalTypedReference'
}
attribute_map = {
'backend': 'backend',
'image_pull_secrets': 'imagePullSecrets',
'node_name': 'nodeName',
'paths': 'paths',
'pod_ordinal': 'podOrdinal',
'recovered_volumes': 'recoveredVolumes',
'workload': 'workload'
}
def __init__(self, backend=None, image_pull_secrets=None, node_name=None, paths=None, pod_ordinal=None, recovered_volumes=None, workload=None): # noqa: E501
"""ComGithubAppscodeStashApisStashV1alpha1RecoverySpec - a model defined in Swagger""" # noqa: E501
self._backend = None
self._image_pull_secrets = None
self._node_name = None
self._paths = None
self._pod_ordinal = None
self._recovered_volumes = None
self._workload = None
self.discriminator = None
if backend is not None:
self.backend = backend
if image_pull_secrets is not None:
self.image_pull_secrets = image_pull_secrets
if node_name is not None:
self.node_name = node_name
if paths is not None:
self.paths = paths
if pod_ordinal is not None:
self.pod_ordinal = pod_ordinal
if recovered_volumes is not None:
self.recovered_volumes = recovered_volumes
if workload is not None:
self.workload = workload
@property
def backend(self):
"""Gets the backend of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:return: The backend of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:rtype: ComGithubAppscodeStashApisStashV1alpha1Backend
"""
return self._backend
@backend.setter
def backend(self, backend):
"""Sets the backend of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec.
:param backend: The backend of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:type: ComGithubAppscodeStashApisStashV1alpha1Backend
"""
self._backend = backend
@property
def image_pull_secrets(self):
"""Gets the image_pull_secrets of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:return: The image_pull_secrets of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:rtype: list[IoK8sApiCoreV1LocalObjectReference]
"""
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, image_pull_secrets):
"""Sets the image_pull_secrets of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec.
:param image_pull_secrets: The image_pull_secrets of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:type: list[IoK8sApiCoreV1LocalObjectReference]
"""
self._image_pull_secrets = image_pull_secrets
@property
def node_name(self):
"""Gets the node_name of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:return: The node_name of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:rtype: str
"""
return self._node_name
@node_name.setter
def node_name(self, node_name):
"""Sets the node_name of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec.
:param node_name: The node_name of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:type: str
"""
self._node_name = node_name
@property
def paths(self):
"""Gets the paths of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:return: The paths of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:rtype: list[str]
"""
return self._paths
@paths.setter
def paths(self, paths):
"""Sets the paths of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec.
:param paths: The paths of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:type: list[str]
"""
self._paths = paths
@property
def pod_ordinal(self):
"""Gets the pod_ordinal of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:return: The pod_ordinal of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:rtype: str
"""
return self._pod_ordinal
@pod_ordinal.setter
def pod_ordinal(self, pod_ordinal):
"""Sets the pod_ordinal of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec.
:param pod_ordinal: The pod_ordinal of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:type: str
"""
self._pod_ordinal = pod_ordinal
@property
def recovered_volumes(self):
"""Gets the recovered_volumes of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:return: The recovered_volumes of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:rtype: list[ComGithubAppscodeStashApisStashV1alpha1LocalSpec]
"""
return self._recovered_volumes
@recovered_volumes.setter
def recovered_volumes(self, recovered_volumes):
"""Sets the recovered_volumes of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec.
:param recovered_volumes: The recovered_volumes of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:type: list[ComGithubAppscodeStashApisStashV1alpha1LocalSpec]
"""
self._recovered_volumes = recovered_volumes
@property
def workload(self):
"""Gets the workload of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:return: The workload of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:rtype: ComGithubAppscodeStashApisStashV1alpha1LocalTypedReference
"""
return self._workload
@workload.setter
def workload(self, workload):
"""Sets the workload of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec.
:param workload: The workload of this ComGithubAppscodeStashApisStashV1alpha1RecoverySpec. # noqa: E501
:type: ComGithubAppscodeStashApisStashV1alpha1LocalTypedReference
"""
self._workload = workload
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ComGithubAppscodeStashApisStashV1alpha1RecoverySpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
9668258 | <reponame>GeorgeVelikov/Surffee<filename>surveys/views/annotation/operation_add_one.py<gh_stars>0
from django.core.exceptions import PermissionDenied
from django.shortcuts import redirect
from django.views.generic import UpdateView
from ...models.survey import Survey, Choice
from ...models.annotation import Annotation, Classification, Word
from ..helper import check_existing_word_dominates_new_word, check_overwrite_existing_word, create_new_classification, \
delete_overlay_word_classifications
class AddOne(UpdateView):
def get(self, request, *args, **kwargs):
if not request.user.pk:
raise PermissionDenied("You are not logged in")
self.object = None
survey = Survey.objects.get(pk=self.kwargs.get('survey_id'))
annotation = Annotation.objects.get(pk=self.kwargs.get('annotation_id'))
choice = Choice.objects.get(pk=self.kwargs.get('choice_id'))
if request.user.pk != survey.creator.pk:
raise PermissionDenied("You do not own the survey")
if request.user.pk != annotation.creator.pk:
raise PermissionDenied("You do not own the annotation")
if request.user.pk != choice.question.survey.creator.pk:
raise PermissionDenied("You do not own the choice")
classification_name = self.kwargs.get('class')
word_text = self.kwargs.get('word_text')
leftover_word = choice.choice_text
word_count_track = 0
while leftover_word.find(word_text) >= 0:
word_start = leftover_word.find(word_text) + word_count_track
word_end = word_start + len(word_text)
classification_annotation = Classification.objects.filter(name=classification_name,
annotation=annotation)
if classification_annotation.exists():
check_overwrite_existing_word(choice, classification_annotation, word_text)
check_existing_word_dominates_new_word(choice, classification_annotation, annotation, word_text, survey.id)
classification = Classification.objects.get(name=classification_name,
annotation=annotation)
else:
classification = create_new_classification(classification_name, annotation)
classification.save()
delete_overlay_word_classifications(choice, word_start, word_end)
word = Word.objects.create(text=word_text,
start=word_start,
end=word_end,
choice=choice,
classification=classification)
leftover_word = choice.choice_text[word_end::]
word_count_track += (word_end - word_count_track)
word.save()
return redirect('/surveys/'+str(survey.id)+'/annotate/'+str(annotation.id))
| StarcoderdataPython |
3340539 | <gh_stars>0
version = "3.28"
| StarcoderdataPython |
3214514 | import argparse
import pandas as pd
from tqdm import tqdm
from PIL import Image
import numpy as np
from contextualized_topic_models.datasets.dataset import CTMDataset
from sklearn.metrics.pairwise import cosine_similarity
from utils import load_model
from sentence_transformers import SentenceTransformer, util
import pickle
import scipy
import numpy as np
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
parser = argparse.ArgumentParser()
parser.add_argument("--text_model", type=str, required=True)
parser.add_argument("--image_model", type=str, required=True)
parser.add_argument("--tp", type=str, required=True)
args = parser.parse_args()
text_list = pd.read_csv(open("./Data2021/MediaEvalNewsImagesBatch04articles.tsv", "r"), sep="\t")
image_list = pd.read_csv(open("./Data2021/MediaEvalNewsImagesBatch04images.tsv", "r"), sep="\t")
texts = []
text_ids = []
for _, r in tqdm(text_list.iterrows(), desc="Loading texts"):
if not pd.isnull(r.text):
texts.append(r.title + ". " + r.text)
text_ids.append(r.articleID)
images = []
image_ids = []
for _, r in tqdm(image_list.iterrows(), desc="loading images"):
if not pd.isnull(r.imgFile) and os.path.exists(os.path.join("./Data2021/images", r.imgFile)):
img = Image.open(os.path.join("./Data2021/images", r.imgFile))
img = img.convert("RGB")
images.append(img)
image_ids.append(r.imgFile)
tp = pickle.load(open(args.tp, "rb"))
ctm = load_model(args.text_model, len(tp.vocab))
vctm = load_model(args.image_model, len(tp.vocab))
testing_dataset = tp.transform(text_for_contextual=texts)
img_model = SentenceTransformer('clip-ViT-B-32')
img_emb = img_model.encode(images, batch_size=128, convert_to_tensor=True, show_progress_bar=True)
img_emb = np.array(img_emb.cpu())
image_test_bow_embeddings = scipy.sparse.csr_matrix(np.zeros((len(img_emb), 1)))
image_testing_dataset = CTMDataset(X_contextual = img_emb, X_bow=image_test_bow_embeddings ,idx2token = testing_dataset.idx2token)
test_topic_dist = ctm.get_doc_topic_distribution(testing_dataset, n_samples=20)
v_test_topic_dist = vctm.get_doc_topic_distribution(image_testing_dataset, n_samples=20)
dist_sim = cosine_similarity(test_topic_dist, v_test_topic_dist)
model_type, n_topics = os.path.basename(args.text_model).split("_")[:2]
with open(model_type+"_"+n_topics+"_submission.csv", "w") as out:
for doc in tqdm(range(len(texts)), desc="Searching images"):
ind_sims = sorted([(s,i) for i, s in enumerate(dist_sim[doc])], reverse=True)
ind_sims = [i[1] for i in ind_sims]
img_ids = [image_ids[i] for i in ind_sims]
print(str(int(text_ids[doc]))+"\t"+"\t".join(img_ids), file=out)
| StarcoderdataPython |
1804103 | <reponame>AndreaVoltan/MyKratos7.0
from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# Importing the Kratos Library
import KratosMultiphysics
# Import applications
import KratosMultiphysics.StructuralMechanicsApplication as StructuralMechanicsApplication
# Import base class file
import structural_mechanics_solver
def CreateSolver(main_model_part, custom_settings):
return FormfindingMechanicalSolver(main_model_part, custom_settings)
class FormfindingMechanicalSolver(structural_mechanics_solver.MechanicalSolver):
"""The structural mechanics formfinding solver.
This class creates the mechanical solver for formdinding.
Public member variables:
formfinding_settings -- settings for the formfinding solver.
See structural_mechanics_solver.py for more information.
"""
def __init__(self, main_model_part, custom_settings):
# Set defaults and validate custom settings.
self.formfinding_settings = KratosMultiphysics.Parameters("""
{
"print_formfinding_iterations": false
}
""")
self.validate_and_transfer_matching_settings(custom_settings, self.formfinding_settings)
# Validate the remaining settings in the base class.
# Construct the base solver.
super(FormfindingMechanicalSolver, self).__init__(main_model_part, custom_settings)
self.print_on_rank_zero("::[FormfindingMechanicalSolver]:: ", "Construction finished")
def _create_solution_scheme(self):
return KratosMultiphysics.ResidualBasedIncrementalUpdateStaticScheme()
def _create_mechanical_solution_strategy(self):
computing_model_part = self.GetComputingModelPart()
mechanical_scheme = self.get_solution_scheme()
linear_solver = self.get_linear_solver()
mechanical_convergence_criterion = self.get_convergence_criterion()
builder_and_solver = self.get_builder_and_solver()
return StructuralMechanicsApplication.FormfindingUpdatedReferenceStrategy(
computing_model_part,
mechanical_scheme,
linear_solver,
mechanical_convergence_criterion,
builder_and_solver,
self.settings["max_iteration"].GetInt(),
self.settings["compute_reactions"].GetBool(),
self.settings["reform_dofs_at_each_step"].GetBool(),
self.settings["move_mesh_flag"].GetBool(),
self.formfinding_settings["print_formfinding_iterations"].GetBool(),
self.settings["line_search"].GetBool())
| StarcoderdataPython |
5045887 | <reponame>yuchun1214/Working-Shift-Arrangement-System<gh_stars>1-10
import json
import os
import csv
from django.shortcuts import render
from django.template import loader
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
import datetime
from . import utils
def get_client_ip(request):
x_forward_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forward_for:
ip = x_forwarded_for.split(',')[-1].strip()
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def set_cookie(response, key, value, days_expire = 7):
if days_expire is None:
max_age = 365 * 24 * 60 * 60 #one year
else:
max_age = days_expire * 24 * 60 * 60
expires = datetime.datetime.strftime(datetime.datetime.utcnow() + datetime.timedelta(seconds=max_age), "%a, %d-%b-%Y %H:%M:%S GMT")
response.set_cookie(key, value, max_age=max_age, expires=expires, domain=settings.SESSION_COOKIE_DOMAIN, secure=settings.SESSION_COOKIE_SECURE or None)
def checkIfLogin(request):
username = request.COOKIES.get("username")
ip = get_client_ip(request)
token = request.COOKIES.get("token")
if username == None or token == None:
return False
expectedToken = utils.md5Hash(ip + username)
return token == expectedToken
def loginPage(request):
if(checkIfLogin(request)):
return HttpResponseRedirect('/overview?year=2018')
print("======%s=====" % request.COOKIES)
template = loader.get_template('Shift/login.html')
return HttpResponse(template.render({},request))
@csrf_exempt
def login(request):
data = json.loads(request.body)
if utils.checkLogin(data):
# must set the cookie value
# In the next version, I'll set the cookie feature just because I'm lazy
response = HttpResponse('Ok')
set_cookie(response, "username", data["username"],1)
ip = get_client_ip(request)
token = utils.md5Hash(ip + data["username"])
set_cookie(response, "token", token, 1)
return response
else:
return HttpResponse('Not Ok')
def overview(request):
# check the query of year is legal
if(not checkIfLogin(request)):
return HttpResponseRedirect("/index/")
rules = utils.getRule(request.GET)
if rules != None:
months = utils.getCollection(request.GET['year'])
rules['monthAttr'] = months
template = loader.get_template('Shift/overview.html')
return HttpResponse(template.render(rules, request))
else:
raise Http404("Overview Page Not Found")
def shift(request):
# step 1 : check year and month is legal
if not checkIfLogin(request):
return HttpResponseRedirect("/index/")
result = utils.checkYearMonthLegal(request.GET)
quality = utils.uploadQuality(result['year'], result['month'])
if result != None:
shift = utils.getShift('shift' + result['year'] + result['month'])
shift['year'] = result['year']
shift['mon'] = request.GET['month']
shift['quality'] = quality
template = loader.get_template('Shift/shift.html')
return HttpResponse(template.render(shift, request))
else:
raise Http404("Shift Page Not Found")
@csrf_exempt
def postShift(request):
# check post mode
year = request.GET['year']
month = request.GET['month']
mode = request.GET['mode']
try:
data = json.loads(request.body, encoding=False)
except:
data = {}
if mode == 'computing':
# step 1 : generate the current month calendar(csv)
utils.generateTheCalendarCSV(data,year,month)
# step 2 : generate next month calendar(csv)
utils.generateNextMonthCSV(year, month)
# step 3 : generate holiday(csv)
utils.generateHolidayCSV(data, year, month)
# step 4 : calculate the shift
utils.executeProgram(year, month)
# step 5 : update database
utils.updateDataBase(year,month)
return shift(request)
elif mode == 'saving':
utils.saveShift(data, year, month)
return HttpResponse('Ok')
elif mode == 'clear':
utils.clearSchedule(year, month)
return HttpResponse('Ok')
def saveShift(request):
year = request.GET['year']
month = request.GET['month']
data = json.loads(request.body, encoding='utf-8')
utils.saveShift(data, year, month);
# save data to the database
return HttpResponse('Ok')
# Create your views here.
| StarcoderdataPython |
8017189 | <gh_stars>0
# FILE: vscp.py
#
# This file is part of the VSCP (https://www.vscp.org)
#
# The MIT License (MIT)
#
# Copyright (c) 2000-2017 <NAME>, <NAME>is AB <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import struct
import socket
import sys
import datetime
from ctypes import *
VSCP_DEFAULT_UDP_PORT = 33333
VSCP_DEFAULT_TCP_PORT = 9598
VSCP_ANNNOUNCE_MULTICAST_PORT = 9598
VSCP_MULTICAST_IPV4_ADDRESS_STR = "172.16.31.10"
VSCP_DEFAULT_MULTICAST_PORT = 44444
VSCP_DEFAULT_MULTICAST_TTL = 1
VSCP_ADDRESS_SEGMENT_CONTROLLER = 0x00
VSCP_ADDRESS_NEW_NODE = 0xff
#VSCP levels
VSCP_LEVEL1 = 0
VSCP_LEVEL2 = 1
# VSCP priority
VSCP_PRIORITY_0 = 0x00
VSCP_PRIORITY_1 = 0x20
VSCP_PRIORITY_2 = 0x40
VSCP_PRIORITY_3 = 0x60
VSCP_PRIORITY_4 = 0x80
VSCP_PRIORITY_5 = 0xA0
VSCP_PRIORITY_6 = 0xC0
VSCP_PRIORITY_7 = 0xE0
VSCP_PRIORITY_HIGH = 0x00
VSCP_PRIORITY_LOW = 0xE0
VSCP_PRIORITY_MEDIUM = 0xC0
VSCP_PRIORITY_NORMAL = 0x60
VSCP_HEADER_PRIORITY_MASK = 0xE0
VSCP_HEADER_HARD_CODED = 0x10 # If set node nickname is hardcoded
VSCP_HEADER_NO_CRC = 0x08 # Don't calculate CRC
VSCP_NO_CRC_CALC = 0x08 # If set no CRC is calculated
VSCP_MASK_PRIORITY = 0xE0
VSCP_MASK_HARDCODED = 0x10
VSCP_MASK_NOCRCCALC = 0x08
VSCP_LEVEL1_MAXDATA = 8
VSCP_LEVEL2_MAXDATA = (512 - 25)
VSCP_NOCRC_CALC_DUMMY_CRC = 0xAA55 # If no CRC cal bit is set the CRC value
# should be set to this value for the CRC
# calculation to be skipped.
VSCP_CAN_ID_HARD_CODED = 0x02000000 # Hard coded bit in CAN frame id
# GUID byte positions
VSCP_GUID_MSB = 0
VSCP_GUID_LSB = 15
# VSCP event structure
class vscpEvent(Structure):
_fields_ = [("crc", c_ushort),
("obid", c_ulong),
("timestamp", c_ulong),
("head", c_ushort),
("year", c_ushort),
("month", c_ubyte),
("day", c_ubyte),
("hour", c_ubyte),
("minute", c_ubyte),
("second", c_ubyte),
("vscpclass", c_ushort),
("vscptype", c_ushort),
("guid", c_ubyte * 16),
("sizeData", c_ushort),
("pdata", c_void_p)]
# VSCP event ex structure
class vscpEventEx(Structure):
_fields_ = [("crc", c_ushort),
("obid", c_ulong),
("timestamp", c_ulong),
("head", c_ushort),
("year", c_ushort),
("month", c_ubyte),
("day", c_ubyte),
("hour", c_ubyte),
("minute", c_ubyte),
("second", c_ubyte),
("vscpclass", c_ushort),
("vscptype", c_ushort),
("guid", c_ubyte * 16),
("sizeData", c_ushort),
("data", c_ubyte * VSCP_LEVEL2_MAXDATA)]
# Event filter
class vscpEventFilter(Structure):
_fields_ = [("filter_priority", c_ubyte),
("mask_priority", c_ubyte),
("filter_class", c_ubyte),
("mask_class", c_ubyte),
("filter_type", c_ubyte),
("mask_type", c_ubyte),
("filter_guid", c_ubyte * 16),
("mask_guid", c_ubyte * 16) ]
# Transmission statistics structure
class VSCPStatistics(Structure):
_fields_ = [("cntReceiveFrames", c_long),
("cntTransmitFrames", c_long),
("cntReceiveData", c_long),
("cntTransmitData", c_long),
("cntOverruns", c_long),
("x", c_long),
("y", c_long),
("z", c_long) ]
class VSCPStatus(Structure):
_fields_ = [("channel_status", c_ulong),
("lasterrorcode", c_ulong),
("lasterrorsubcode", c_ulong)]
class VSCPChannelInfo(Structure):
_fields_ = [("channelType", c_ubyte),
("channel", c_ushort),
("guid", c_ubyte * 16)]
# VSCP Encryption types
VSCP_ENCRYPTION_NONE = 0
VSCP_ENCRYPTION_AES128 = 1
VSCP_ENCRYPTION_AES192 = 2
VSCP_ENCRYPTION_AES256 = 3
# VSCP Encryption tokens
VSCP_ENCRYPTION_TOKEN_0 = ""
VSCP_ENCRYPTION_TOKEN_1 = "AES128"
VSCP_ENCRYPTION_TOKEN_2 = "AES192"
VSCP_ENCRYPTION_TOKEN_3 = "AES256"
# Packet frame format type = 0
# without byte0 and CRC
# total frame size is 1 + 34 + 2 + data-length
VSCP_MULTICAST_PACKET0_HEADER_LENGTH = 35
# Multicast packet ordinals
VSCP_MULTICAST_PACKET0_POS_PKTTYPE = 0
VSCP_MULTICAST_PACKET0_POS_HEAD = 1
VSCP_MULTICAST_PACKET0_POS_HEAD_MSB = 1
VSCP_MULTICAST_PACKET0_POS_HEAD_LSB = 2
VSCP_MULTICAST_PACKET0_POS_TIMESTAMP = 3
VSCP_MULTICAST_PACKET0_POS_YEAR = 7
VSCP_MULTICAST_PACKET0_POS_YEAR_MSB = 7
VSCP_MULTICAST_PACKET0_POS_YEAR_LSB = 8
VSCP_MULTICAST_PACKET0_POS_MONTH = 9
VSCP_MULTICAST_PACKET0_POS_DAY = 10
VSCP_MULTICAST_PACKET0_POS_HOUR = 11
VSCP_MULTICAST_PACKET0_POS_MINUTE = 12
VSCP_MULTICAST_PACKET0_POS_SECOND = 13
VSCP_MULTICAST_PACKET0_POS_VSCP_CLASS = 14
VSCP_MULTICAST_PACKET0_POS_VSCP_CLASS_MSB = 14
VSCP_MULTICAST_PACKET0_POS_VSCP_CLASS_LSB = 15
VSCP_MULTICAST_PACKET0_POS_VSCP_TYPE = 16
VSCP_MULTICAST_PACKET0_POS_VSCP_TYPE_MSB = 16
VSCP_MULTICAST_PACKET0_POS_VSCP_TYPE_LSB = 17
VSCP_MULTICAST_PACKET0_POS_VSCP_GUID = 18
VSCP_MULTICAST_PACKET0_POS_VSCP_SIZE = 34
VSCP_MULTICAST_PACKET0_POS_VSCP_SIZE_MSB = 34
VSCP_MULTICAST_PACKET0_POS_VSCP_SIZE_LSB = 35
VSCP_MULTICAST_PACKET0_POS_VSCP_DATA = 36
# Two byte CRC follow here and if the frame is encrypted
# the initialization vector follows.
# VSCP multicast packet types
VSCP_MULTICAST_TYPE_EVENT = 0
# Multicast proxy CLASS=1026, TYPE=3 https://www.vscp.org/docs/vscpspec/doku.php?id=class2.information#type_3_0x0003_level_ii_proxy_node_heartbeat
VSCP_MULTICAST_PROXY_HEARTBEAT_DATA_SIZE = 192
VSCP_MULTICAST_PROXY_HEARTBEAT_POS_REALGUID = 0 # The real GUID for the node
VSCP_MULTICAST_PROXY_HEARTBEAT_POS_IFGUID = 32 # GUID for interface node is on
VSCP_MULTICAST_PROXY_HEARTBEAT_POS_IFLEVEL = 48 # 0=Level I node, 1=Level II node
VSCP_MULTICAST_PROXY_HEARTBEAT_POS_NODENAME = 64 # Name of node
VSCP_MULTICAST_PROXY_HEARTBEAT_POS_IFNAME = 128 # Name of interface
# Default key for VSCP Server
# Change if other key is used
VSCP_DEFAULT_KEY16 = '<KEY>'
VSCP_DEFAULT_KEY24 = '<KEY>'
VSCP_DEFAULT_KEY32 = '<KEY>'
# Bootloaders
VSCP_BOOTLOADER_VSCP = 0x00 # VSCP boot loader algorithm
VSCP_BOOTLOADER_PIC1 = 0x01 # PIC algorithm 0
VSCP_BOOTLOADER_AVR1 = 0x10 # AVR algorithm 0
VSCP_BOOTLOADER_LPC1 = 0x20 # NXP/Philips LPC algorithm 0
VSCP_BOOTLOADER_ST = 0x30 # ST STR algorithm 0
VSCP_BOOTLOADER_FREESCALE = 0x40 # Freescale Kinetics algorithm 0
VSCP_BOOTLOADER_NONE = 0xff
# * * * Data Coding for VSCP packets * * *
# Data format masks
VSCP_MASK_DATACODING_TYPE = 0xE0 # Bits 5,6,7
VSCP_MASK_DATACODING_UNIT = 0x18 # Bits 3,4
VSCP_MASK_DATACODING_INDEX = 0x07 # Bits 0,1,2
# These bits are coded in the three MSB bytes of the first data byte
# in a packet and tells the type of the data that follows.
VSCP_DATACODING_BIT = 0x00
VSCP_DATACODING_BYTE = 0x20
VSCP_DATACODING_STRING = 0x40
VSCP_DATACODING_INTEGER = 0x60
VSCP_DATACODING_NORMALIZED = 0x80
VSCP_DATACODING_SINGLE = 0xA0 # single precision float
VSCP_DATACODING_RESERVED1 = 0xC0
VSCP_DATACODING_RESERVED2 = 0xE0
# These bits are coded in the four least significant bits of the first data byte
# in a packet and tells how the following data should be interpreted. For a flow sensor
# the default format can be litres/minute. Other formats such as m3/second can be defined
# by the node if it which. However it must always be able to report in the default format.
VSCP_DATACODING_INTERPRETION_DEFAULT = 0
# CRC8 Constants
VSCP_CRC8_POLYNOMIAL = 0x18
VSCP_CRC8_REMINDER = 0x00
# CRC16 Constants
VSCP_CRC16_POLYNOMIAL = 0x1021
VSCP_CRC16_REMINDER = 0xFFFF
# CRC32 Constants
VSCP_CRC32_POLYNOMIAL = 0x04C11DB7
VSCP_CRC32_REMINDER = 0xFFFFFFFF
# Node data - the required registers are fetched from this
# structure
class vscpMyNode(Structure):
_fields_ = [ ("guid", c_ubyte * 16),
("nicknameID", c_ubyte ) ]
# * * * Standard VSCP registers * * *
# Register defines above 0x7f
VSCP_STD_REGISTER_ALARM_STATUS = 0x80
VSCP_STD_REGISTER_MAJOR_VERSION = 0x81
VSCP_STD_REGISTER_MINOR_VERSION = 0x82
VSCP_STD_REGISTER_SUB_VERSION = 0x83
# 0x84 - 0x88
VSCP_STD_REGISTER_USER_ID = 0x84
# 0x89 - 0x8C
VSCP_STD_REGISTER_USER_MANDEV_ID = 0x89
# 0x8D -0x90
VSCP_STD_REGISTER_USER_MANSUBDEV_ID = 0x8D
# Nickname
VSCP_STD_REGISTER_NICKNAME_ID = 0x91
# Selected register page
VSCP_STD_REGISTER_PAGE_SELECT_MSB = 0x92
VSCP_STD_REGISTER_PAGE_SELECT_LSB = 0x93
# Firmware version
VSCP_STD_REGISTER_FIRMWARE_MAJOR = 0x94
VSCP_STD_REGISTER_FIRMWARE_MINOR = 0x95
VSCP_STD_REGISTER_FIRMWARE_SUBMINOR = 0x96
VSCP_STD_REGISTER_BOOT_LOADER = 0x97
VSCP_STD_REGISTER_BUFFER_SIZE = 0x98
VSCP_STD_REGISTER_PAGES_COUNT = 0x99
# 0xd0 - 0xdf GUID
VSCP_STD_REGISTER_GUID = 0xD0
# 0xe0 - 0xff MDF
VSCP_STD_REGISTER_DEVICE_URL = 0xE0
# Level I Decision Matrix
VSCP_LEVEL1_DM_ROW_SIZE = 8
VSCP_LEVEL1_DM_OFFSET_OADDR = 0
VSCP_LEVEL1_DM_OFFSET_FLAGS = 1
VSCP_LEVEL1_DM_OFFSET_CLASS_MASK = 2
VSCP_LEVEL1_DM_OFFSET_CLASS_FILTER = 3
VSCP_LEVEL1_DM_OFFSET_TYPE_MASK = 4
VSCP_LEVEL1_DM_OFFSET_TYPE_FILTER = 5
VSCP_LEVEL1_DM_OFFSET_ACTION = 6
VSCP_LEVEL1_DM_OFFSET_ACTION_PARAM = 7
# Bits for VSCP server 64/16-bit capability code
# used by CLASS1.PROTOCOL, HIGH END SERVER RESPONSE
# and low end 16-bits for
# CLASS2.PROTOCOL, HIGH END SERVER HEART BEAT
VSCP_SERVER_CAPABILITY_TCPIP = (1<<15)
VSCP_SERVER_CAPABILITY_UDP = (1<<14)
VSCP_SERVER_CAPABILITY_MULTICAST_ANNOUNCE = (1<<13)
VSCP_SERVER_CAPABILITY_RAWETH = (1<<12)
VSCP_SERVER_CAPABILITY_WEB = (1<<11)
VSCP_SERVER_CAPABILITY_WEBSOCKET = (1<<10)
VSCP_SERVER_CAPABILITY_REST = (1<<9)
VSCP_SERVER_CAPABILITY_MULTICAST_CHANNEL = (1<<8)
VSCP_SERVER_CAPABILITY_RESERVED = (1<<7)
VSCP_SERVER_CAPABILITY_IP6 = (1<<6)
VSCP_SERVER_CAPABILITY_IP4 = (1<<5)
VSCP_SERVER_CAPABILITY_SSL = (1<<4)
VSCP_SERVER_CAPABILITY_TWO_CONNECTIONS = (1<<3)
VSCP_SERVER_CAPABILITY_AES256 = (1<<2)
VSCP_SERVER_CAPABILITY_AES192 = (1<<1)
VSCP_SERVER_CAPABILITY_AES128 = 1
# Offsets into the data of the capabilities event
# VSCP_CLASS2_PROTOCOL, Type=20/VSCP2_TYPE_PROTOCOL_HIGH_END_SERVER_CAPS
VSCP_CAPABILITY_OFFSET_CAP_ARRAY = 0
VSCP_CAPABILITY_OFFSET_GUID = 8
VSCP_CAPABILITY_OFFSET_IP_ADDR = 24
VSCP_CAPABILITY_OFFSET_SRV_NAME = 40
VSCP_CAPABILITY_OFFSET_NON_STD_PORTS = 104
# Error Codes
VSCP_ERROR_SUCCESS = 0 # All is OK
VSCP_ERROR_ERROR = -1 # Error
VSCP_ERROR_CHANNEL = 7 # Invalid channel
VSCP_ERROR_FIFO_EMPTY = 8 # FIFO is empty
VSCP_ERROR_FIFO_FULL = 9 # FIFI is full
VSCP_ERROR_FIFO_SIZE = 10 # FIFO size error
VSCP_ERROR_FIFO_WAIT = 11
VSCP_ERROR_GENERIC = 12 # Generic error
VSCP_ERROR_HARDWARE = 13 # Hardware error
VSCP_ERROR_INIT_FAIL = 14 # Initialization failed
VSCP_ERROR_INIT_MISSING = 15
VSCP_ERROR_INIT_READY = 16
VSCP_ERROR_NOT_SUPPORTED = 17 # Not supported
VSCP_ERROR_OVERRUN = 18 # Overrun
VSCP_ERROR_RCV_EMPTY = 19 # Receive buffer empty
VSCP_ERROR_REGISTER = 20 # Register value error
VSCP_ERROR_TRM_FULL = 21 # Transmit buffer full
VSCP_ERROR_LIBRARY = 28 # Unable to load library
VSCP_ERROR_PROCADDRESS = 29 # Unable get library proc. address
VSCP_ERROR_ONLY_ONE_INSTANCE = 30 # Only one instance allowed
VSCP_ERROR_SUB_DRIVER = 31 # Problem with sub driver call
VSCP_ERROR_TIMEOUT = 32 # Time-out
VSCP_ERROR_NOT_OPEN = 33 # The device is not open.
VSCP_ERROR_PARAMETER = 34 # A parameter is invalid.
VSCP_ERROR_MEMORY = 35 # Memory exhausted.
VSCP_ERROR_INTERNAL = 36 # Some kind of internal program error
VSCP_ERROR_COMMUNICATION = 37 # Some kind of communication error
VSCP_ERROR_USER = 38 # Login error user name
VSCP_ERROR_PASSWORD = 39 # Login error password
VSCP_ERROR_CONNECTION = 40 # Could not connect
VSCP_ERROR_INVALID_HANDLE = 41 # The handle is not valid
VSCP_ERROR_OPERATION_FAILED = 42 # Operation failed for some reason
VSCP_ERROR_BUFFER_TO_SMALL = 43 # Supplied buffer is to small to fit content
VSCP_ERROR_UNKNOWN_ITEM = 44 # Requested item (remote variable) is unknown
VSCP_ERROR_ALREADY_DEFINED = 45 # The name is already in use.
#
# Template for VSCP XML event data
#
# data: datetime,head,obid,datetime,timestamp,class,type,guid,sizedata,data,note
#
#<event>
# <head>3</head>
# <obid>1234</obid>
# <datetime>2017-01-13T10:16:02</datetime>
# <timestamp>50817</timestamp>
# <class>10</class>
# <type>6</type>
# <guid>00:00:00:00:00:00:00:00:00:00:00:00:00:01:00:02</guid>
# <sizedata>7</sizedata>
# <data>0x48,0x34,0x35,0x2E,0x34,0x36,0x34</data>
# <note></note>
#</event>
#
VSCP_XML_EVENT_TEMPLATE = "<event>\n"\
"<head>%d</head>\n"\
"<obid>%lu</obid>\n"\
"<datetime>%s</datetime>\n"\
"<timestamp>%lu</timestamp>\n"\
"<class>%d</class>\n"\
"<type>%d</type>\n"\
"<guid>%s</guid>\n"\
"<sizedata>%d</sizedata>\n"\
"<data>%s</data>\n"\
"<note>%s</note>\n"\
"</event>"
#
#
# Template for VSCP JSON event data
# data: datetime,head,obid,datetime,timestamp,class,type,guid,data,note
#
#
# "head": 2,
# "obid"; 123,
# "datetime": "2017-01-13T10:16:02",
# "timestamp":50817,
# "class": 10,
# "type": 8,
# "guid": "00:00:00:00:00:00:00:00:00:00:00:00:00:01:00:02",
# "data": [1,2,3,4,5,6,7],
# "note": "This is some text"
#
#
VSCP_JSON_EVENT_TEMPLATE = "{\n"\
"\"head\": %d,\n"\
"\"obid\": %lu,\n"\
"\"datetime\": \"%s\",\n"\
"\"timestamp\": %lu,\n"\
"\"class\": %d,\n"\
"\"type\": %d,\n"\
"\"guid\": \"%s\",\n"\
"\"data\": [%s],\n"\
"\"note\": \"%s\"\n"\
"}"
#
#
# Template for VSCP HTML event data
#
# data: datetime,class,type,data-count,data,guid,head,timestamp,obid,note
#
#<h2>VSCP Event</h2>
#<p>
# Time: 2017-01-13T10:16:02 <br>
#</p>
#<p>
# Class: 10 <br>
# Type: 6 <br>
#</p>
#<p>
# Data count: 6<br>
# Data: 1,2,3,4,5,6,7<br>
#</p>
#<p>
# From GUID: 00:00:00:00:00:00:00:00:00:00:00:00:00:01:00:02<br>
#</p>
#<p>
# Head: 6 <br>
# DateTime: 2013-11-02T12:34:22Z
# Timestamp: 1234 <br>
# obid: 1234 <br>
# note: This is a note <br>
#</p>
#
VSCP_HTML_EVENT_TEMPLATE = "<h2>VSCP Event</h2> "\
"<p>"\
"Class: %d <br>"\
"Type: %d <br>"\
"</p>"\
"<p>"\
"Data count: %d<br>"\
"Data: %s<br>"\
"</p>"\
"<p>"\
"From GUID: %s<br>"\
"</p>"\
"<p>"\
"Head: %d <br>"\
"<p>"\
"DateTime: %s <br>"\
"</p>"\
"Timestamp: %lu <br>"\
"obid: %lu <br>"\
"note: %s <br>"\
"</p>"
# Set packet type part of multicast packet type
def SET_VSCP_MULTICAST_TYPE( type, encryption ) :
return ( ( type << 4 ) | encryption )
# Get packet type part of multicast packet type
def GET_VSCP_MULTICAST_PACKET_TYPE( type) :
return ( ( type >> 4 ) & 0x0f )
# Get encryption part if multicast packet type
def GET_VSCP_MULTICAST_PACKET_ENCRYPTION( type ) :
return ( ( type ) & 0x0f )
# Get data coding type
def VSCP_DATACODING_TYPE( b ) :
return ( VSCP_MASK_DATACODING_TYPE & b )
# Get data coding unit
def VSCP_DATACODING_UNIT( b ) :
return ( ( VSCP_MASK_DATACODING_UNIT & b ) >> 3 )
# Get data coding sensor index
def VSCP_DATACODING_INDEX( b ) :
return ( VSCP_MASK_DATACODING_INDEX & b )
| StarcoderdataPython |
3341763 | <gh_stars>10-100
"""Хромосомы и гены параметров модели."""
from poptimizer.evolve.evolve import Evolution
from poptimizer.evolve.forecaster import get_forecasts
| StarcoderdataPython |
8096695 | <filename>atemon/__init__.py
"""
Various packages provided by Atemon Technology Consultants.
Website: http://www.atemon.com
Git Hub: https://github.com/atemon
Twitter: https://twitter.com/atemonastery
Author: <NAME> <EMAIL>
Copyright 2016 Atemon Technology Consultants LLP
Each package may have different opensource licence(s), Please refer to LICENSE.txt
in each package/or top of the file.
This file is part of SMS library and distributed under the MIT license (MIT).
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE X CONSORTIUM BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of the Atemon Technology Consultants LLP shall
not be used in advertising or otherwise to promote the sale, use or other dealings in this
Software without prior written authorization from the Atemon Technology Consultants LLP.
"""
| StarcoderdataPython |
12843335 | # -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- <NAME>
For COPYING and LICENSE details, please refer to the LICENSE file
"""
"""
module to compile the required python extensions
This is for development purposes only! Later on
it might be integrated into the standard setup.py
"""
# http://docs.cython.org/src/tutorial/cython_tutorial.htmlfrom
# distutils.core import setup
from distutils.core import setup
from Cython.Build import cythonize
import numpy
setup(
ext_modules=cythonize(
["./pycmbs/geostatistic/variogram_base.pyx"]),
# this is needed to get proper information on numpy headers
include_dirs=[numpy.get_include()]
)
# run as ... to build extension
# $ python setup_extensions.py build_ext --inplace
| StarcoderdataPython |
1632203 | <gh_stars>0
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from .models import *
import cloudinary
import cloudinary.uploader
import cloudinary.api
from django.http import JsonResponse
from rest_framework import status
from django.http import Http404
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializer import ProfileSerializer,ProjectSerializer
from .permissions import IsAdminOrReadOnly
@login_required(login_url="/accounts/login/")
def index(request): # Home page
project = Project.objects.all()
# get the latest project from the database
latest_project = project.first()
# get project rating
rating = Rating.objects.filter(project_id=latest_project).first()
# print(latest_project.id)
return render(
request, "index.html", {"projects": project, "project_home": latest_project, "rating": rating}
)
# single project page
def project_details(request,id):
project = Project.objects.get(id=id)
# get project rating
rating = Rating.objects.filter(project=project)
return render(request, "project.html", {"project": project, "rating": rating})
@login_required(login_url="/accounts/login/")
def profile(request): # view profile
current_user = request.user
profile = Profile.objects.filter(user_id=current_user.id).first() # get profile
project = Project.objects.filter(user_id=current_user.id).all() # get all projects
return render(request, "profile.html", {"profile": profile, "images": project})
@login_required(login_url="/accounts/login/")
def update_profile(request):
if request.method == "POST":
current_user = request.user
first_name = request.POST["first_name"]
last_name = request.POST["last_name"]
username = request.POST["username"]
email = request.POST["email"]
bio = request.POST["bio"]
contact = request.POST["contact"]
profile_image = request.FILES["profile_pic"]
profile_image = cloudinary.uploader.upload(profile_image)
profile_url = profile_image["url"]
user = User.objects.get(id=current_user.id)
# check if user exists in profile table and if not create a new profile
if Profile.objects.filter(user_id=current_user.id).exists():
profile = Profile.objects.get(user_id=current_user.id)
profile.profile_photo = profile_url
profile.bio = bio
profile.contact = contact
profile.save()
else:
profile = Profile(
user_id=current_user.id,
profile_photo=profile_url,
bio=bio,
contact=contact,
)
profile.save_profile()
user.first_name = first_name
user.last_name = last_name
user.username = username
user.email = email
user.save()
return redirect("/profile/", {"success": "Profile Updated Successfully"})
# return render(request, 'profile.html', {'success': 'Profile Updated Successfully'})
else:
return render(request, "profile.html", {"danger": "Profile Update Failed"})
# save project
@login_required(login_url="/accounts/login/")
def save_project(request):
if request.method == "POST":
current_user = request.user
title = request.POST["title"]
location = request.POST["location"]
description = request.POST["description"]
url = request.POST["url"]
image = request.FILES["image"]
# crop image to square
image = cloudinary.uploader.upload(image, crop="limit", width=500, height=500)
# image = cloudinary.uploader.upload(image)
image_url = image["url"]
project = Project(
user_id=current_user.id,
title=title,
location=location,
description=description,
url=url,
image=image_url,
)
project.save_project()
return redirect("/profile/", {"success": "Project Saved Successfully"})
else:
return render(request, "profile.html", {"danger": "Project Save Failed"})
# delete project
@login_required(login_url="/accounts/login/")
def delete_project(request, id):
project = Project.objects.get(id=id)
project.delete_project()
return redirect("/profile/", {"success": "Project Deleted Successfully"})
# rate_project
@login_required(login_url="/accounts/login/")
def rate_project(request, id):
if request.method == "POST":
project = Project.objects.get(id=id)
current_user = request.user
design_rate=request.POST["design"]
usability_rate=request.POST["usability"]
content_rate=request.POST["content"]
Rating.objects.create(
project=project,
user=current_user,
design_rate=design_rate,
usability_rate=usability_rate,
content_rate=content_rate,
avg_rate=round((float(design_rate)+float(usability_rate)+float(content_rate))/3,2),
)
# get the avarage rate of the project for the three rates
avg_rating= (int(design_rate)+int(usability_rate)+int(content_rate))/3
# update the project with the new rate
project.rate=avg_rating
project.update_project()
return render(request, "project.html", {"success": "Project Rated Successfully", "project": project, "rating": Rating.objects.filter(project=project)})
else:
project = Project.objects.get(id=id)
return render(request, "project.html", {"danger": "Project Rating Failed", "project": project})
# search projects
def search_project(request):
if 'search_term' in request.GET and request.GET["search_term"]:
search_term = request.GET.get("search_term")
searched_projects = Project.objects.filter(title__icontains=search_term)
message = f"Search For: {search_term}"
return render(request, "search.html", {"message": message, "projects": searched_projects})
else:
message = "You haven't searched for any term"
return render(request, "search.html", {"message": message})
# rest api ====================================
class ProfileList(APIView): # get all profiles
permission_classes = (IsAdminOrReadOnly,)
def get(self, request, format=None):
all_profiles = Profile.objects.all()
serializers = ProfileSerializer(all_profiles, many=True)
return Response(serializers.data)
# def post(self, request, format=None):
# serializers = MerchSerializer(data=request.data)
class ProjectList(APIView): # get all projects
permission_classes = (IsAdminOrReadOnly,)
def get(self, request, format=None):
all_projects = Project.objects.all()
serializers = ProjectSerializer(all_projects, many=True)
return Response(serializers.data) | StarcoderdataPython |
35750 | <reponame>GPXenergy/gpx_server_api<filename>smart_meter/apps.py
from django.apps import AppConfig
class SmartMeterConfig(AppConfig):
name = 'smart_meter'
| StarcoderdataPython |
9600190 | <gh_stars>0
#!/usr/bin/python3.7
__doc__ = """Small module to process data and take appropriate action
from programs that produce output on a line-by-line basis."""
import subprocess, sys, time, re, os, shlex
ansi_re = re.compile("\x1b\[[0-9;]*[mGKHF]")
def filter_ansi(string_):
return ansi_re.sub("", string_)
def tail_command_old(command):
stdout = tempfile.mktemp()
stderr = tempfile.mktemp()
command = "/bin/sh -c '%s 1> %s 2> %s &'" % (command, stdout, stderr)
result = subprocess.run(command, shell=True)
stdout_f = open(stdout, 'r')
stderr_f = open(stderr, 'r')
while True:
line = stdout_f.readline()
if line:
yield line
else:
time.sleep(0.01)
def tail_command(command):
command = shlex.split(command)
result = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
count = 0
while True:
line = result.stdout.readline().decode()
if line:
yield line
else:
count += 1
if result.returncode == None:
if count % 10:
time.sleep(0.01)
else:
if os.system("ps -%s > /dev/null " % result.pid):
return
else:
return
class GotEOF(Exception):
"Exception raised when EOF of file is found, by empty readline"
pass
def tail(filename):
return tail_f(filename, follow=False)
def tail_f(filename, follow=True):
file = open(filename, 'r')
try:
for line in _tail_f(file, follow=follow, signal_wait=True):
yield line
except GotEOF:
for line in _tail_f(file, follow=follow):
yield line
def _tail_f(file, follow=True, signal_wait=False):
line = ''
while True:
line = file.readline()
if line:
yield line
else:
if not follow: return
if signal_wait:
raise GotEOF()
time.sleep(0.1)
def get_tail_of_tail_f(filename, follow=True, tail_size=10):
file = open(filename, 'r')
lines = []
try:
for line in _tail_f(file, follow=follow, signal_wait=True):
lines.append(line)
except GotEOF:
for line in lines[len(lines) - tail_size:]:
yield line
# If it's a huuuge file with lots of lines, free up that memory as this
# function may run "forever".
del lines
for line in _tail_f(file, follow=follow):
yield line
class Listener:
"""Instances of this 'listen' to each line produced by tail commands."""
def __init__(self):
self.lines = []
def add(self, line):
self.lines.append(time.time(), line)
if __name__ == '__main__':
#for line in tail_command("cat %s" % sys.argv[0]):
# print(line)
for line in tail_command("ls --color -l"):
print(filter_ansi(line))
for line in tail_command("./output_ansi.py"):
print(filter_ansi(line))
for line in get_tail_of_tail_f("/var/log/syslog"):
print(line, end='')
| StarcoderdataPython |
8189860 | """
Provides the ``SSHSession`` dictionary, which maps host
to :py:class:`paramiko.transport.Transport`.
"""
from __future__ import absolute_import
from .log import ArcError
SSHSession = {}
def ssh_connect(host, user, pkey, window_size = (2 << 15) - 1):
"""
Creates a :py:class:`paramiko.Transport` object and adds it to
``SSHSession``.
:param str host: remote host
:param str user: username at remote host
:param str pkey: path to private RSA key
:param int window_size: TCP window size
:note: if command execution times out and output is truncated, it is likely that the TCP window is too small
"""
from paramiko.transport import Transport
from paramiko import RSAKey
global SSHSession
try:
SSHSession[host] = Transport((host, 22))
SSHSession[host].window_size = window_size
pkey = RSAKey.from_private_key_file(pkey, '')
SSHSession[host].connect(username = user, pkey = pkey)
SSHSession[host].__del__ = SSHSession[host].close
except Exception as e:
raise ArcError('Failed to connect to host %s:\n%s' % (host, str(e)), 'common.ssh')
| StarcoderdataPython |
6516569 | <reponame>Kaufi-Jonas/VaRA-Tool-Suite
"""Plot module for util functionality."""
import typing as tp
from pathlib import Path
import pandas as pd
from matplotlib.axes import Axes
from varats.mapping.commit_map import CommitMap
from varats.utils.git_util import FullCommitHash, ShortCommitHash
def find_missing_revisions(
data: tp.Iterable[tp.Tuple[tp.Any, pd.Series]], git_path: Path,
cmap: CommitMap, should_insert_revision: tp.Callable[[tp.Any, tp.Any],
tp.Tuple[bool, float]],
to_commit_hash: tp.Callable[[tp.Any], ShortCommitHash],
are_neighbours: tp.Callable[[ShortCommitHash, ShortCommitHash], bool]
) -> tp.Set[FullCommitHash]:
"""Calculate a set of revisions that could be missing because the changes
between certain points are to steep."""
new_revs: tp.Set[FullCommitHash] = set()
_, last_row = next(data)
for _, row in data:
should_insert, gradient = should_insert_revision(last_row, row)
if should_insert:
lhs_cm = to_commit_hash(last_row)
rhs_cm = to_commit_hash(row)
if are_neighbours(lhs_cm, rhs_cm):
print(
"Found steep gradient between neighbours " +
f"{lhs_cm} - {rhs_cm}: {round(gradient, 5)}"
)
print(f"Investigate: git -C {git_path} diff {lhs_cm} {rhs_cm}")
else:
print(
"Unusual gradient between " +
f"{lhs_cm} - {rhs_cm}: {round(gradient, 5)}"
)
new_rev_id = round(
(cmap.short_time_id(lhs_cm) + cmap.short_time_id(rhs_cm)) /
2.0
)
new_rev = cmap.c_hash(new_rev_id)
print(f"-> Adding {new_rev} as new revision to the sample set")
new_revs.add(new_rev)
last_row = row
return new_revs
def pad_axes(
ax: Axes,
pad_x: tp.Optional[float] = None,
pad_y: tp.Optional[float] = None
) -> None:
"""Add some padding to the axis limits."""
if pad_x:
x_min, x_max = ax.get_xlim()
padding_x = (x_max - x_min) * pad_x
ax.set_xlim(x_min - padding_x, x_max + padding_x)
if pad_y:
y_min, y_max = ax.get_ylim()
padding_y = (y_max - y_min) * pad_y
ax.set_ylim(y_min - padding_y, y_max + padding_y)
def align_yaxis(ax1: Axes, value1: float, ax2: Axes, value2: float) -> None:
"""
Adjust ax2 ylimit so that value2 in ax2 is aligned to value1 in ax1.
See https://stackoverflow.com/a/26456731
"""
_, y_ax1 = ax1.transData.transform((0, value1))
_, y_ax2 = ax2.transData.transform((0, value2))
adjust_yaxis(ax2, (y_ax1 - y_ax2) / 2, value2)
adjust_yaxis(ax1, (y_ax2 - y_ax1) / 2, value1)
def adjust_yaxis(ax: Axes, ydif: float, value: float) -> None:
"""
Shift axis ax by ydiff, maintaining point value at the same location.
See https://stackoverflow.com/a/26456731
"""
inv = ax.transData.inverted()
_, delta_y = inv.transform((0, 0)) - inv.transform((0, ydif))
miny, maxy = ax.get_ylim()
miny, maxy = miny - value, maxy - value
if -miny > maxy or (-miny == maxy and delta_y > 0):
nminy = miny
nmaxy = miny * (maxy + delta_y) / (miny + delta_y)
else:
nmaxy = maxy
nminy = maxy * (miny + delta_y) / (maxy + delta_y)
ax.set_ylim(nminy + value, nmaxy + value)
| StarcoderdataPython |
3324107 | <gh_stars>0
from itertools import groupby
import datetime
import os.path
import bisect
import iso8601
from flask import url_for
MIN_YEAR = 2006
ICONS = {
"member": "bill-introduced.png",
"committee": "committee-discussion.png",
"house": "house.png",
"president": "signed-by-president.png",
"unknown": "bill-introduced.png",
}
def get_location(event):
if event.get('type') in ['bill-signed', 'bill-act-commenced', 'bill-enacted']:
return {
'name': 'Office of the President',
'class': 'president',
}
if event.get('house'):
return {
'name': event['house']['name'],
'class': event['house']['short_name'],
}
if event.get('committee'):
if 'house' in event['committee']:
return {
'name': event['committee']['house']['name'],
'class': event['committee']['house']['short_name'],
}
return {
'name': event['committee']['name'],
'url': url_for('committee_detail', committee_id=event['committee']['id']),
'class': '',
}
return {'name': 'Unknown', 'class': ''}
def get_agent(event, bill):
info = None
if event.get('type') in ['bill-signed', 'bill-act-commenced', 'bill-enacted']:
info = {
'name': '<NAME>',
'type': 'president',
}
elif event.get('type') == 'bill-introduced':
info = {
'name': bill['introduced_by'] or (bill.get('place_of_introduction') or {}).get('name'),
'type': 'member',
}
elif event.get('member'):
info = {
'name': event['member']['name'],
'type': 'member',
'url': url_for('member', member_id=event['member']['id'])
}
elif event.get('committee'):
info = {
'name': event['committee']['name'],
'type': 'committee',
'url': url_for('committee_detail', committee_id=event['committee']['id'])
}
elif event.get('house'):
info = {
'name': event['house']['name'],
'type': 'house',
}
else:
info = {'name': 'Unknown', 'type': 'unknown'}
info['icon'] = ICONS[info['type']]
return info
def bill_history(bill):
""" Work out the history of a bill and return a description of it. """
history = []
events = bill.get('events', [])
events.sort(key=lambda e: [
iso8601.parse_date(e['date']),
get_location(e),
get_agent(e, bill)])
for location, location_events in groupby(events, get_location):
location_history = []
for agent, agent_events in groupby(location_events, lambda e: get_agent(e, bill)):
info = {'events': list(agent_events)}
info.update(agent)
location_history.append(info)
info = {'events': location_history}
info.update(location)
history.append(info)
history = hansard_linking(history)
return history
def match_title(event_title):
"""
Match bill title against the following possible titles
"""
bill_titles = [
"Bill passed by National Assembly",
"Bill passed by both Houses",
"Bill revived on this date",
"The NCOP rescinded",
"Bill remitted",
]
for title in bill_titles:
if title in event_title:
return True
return False
def match_dates(hansard_date, event_date):
hansard_iso_date = iso8601.parse_date(hansard_date)
event_iso_date = iso8601.parse_date(event_date)
if hansard_iso_date.date() == event_iso_date.date():
return True
return False
def hansard_linking(bill_history):
"""
We need to link certain bill events to hansards
Hansrds will always be linked to a house (NA or NCOP)
The Date of the bill event and the hansard will be the same.
Bill Titles we are looking for:
* Bill passed by National Assembly
* Bill passed by both Houses
* Bill revived on this date
* The NCOP rescinded
* Bill remitted
If the event(bill_passed etc) is matched, a new dict is created with the matching hansard id.
The Hansard event is not modified.
"""
for class_history in bill_history:
for event_history in class_history["events"]:
if event_history["type"] == "house":
for event in event_history["events"]:
if event["type"] == "plenary":
for bill_event in event_history["events"]:
if match_title(bill_event["title"]) and match_dates(
event["date"], bill_event["date"]
):
bill_event["hansard"] = {"id": event["id"]}
return bill_history
def count_parliamentary_days(date_from, date_to):
""" Count the number of parliamentary days between two dates, inclusive.
"""
i = bisect.bisect(PARLIAMENTARY_DAYS, date_from)
j = bisect.bisect(PARLIAMENTARY_DAYS, date_to)
return j - i + 1
def load_parliamentary_days():
""" Load the dates when parliament sat from data/parliament-sitting-days.txt
This file can be updated from a spreadsheet using bin/load_parliamentary_days.py
"""
with open(os.path.join(os.path.dirname(__file__), "../data/parliament-sitting-days.txt"), "r") as f:
lines = f.readlines()
dates = [datetime.date(*(int(x) for x in d.split("-"))) for d in lines]
return sorted(dates)
PARLIAMENTARY_DAYS = load_parliamentary_days()
| StarcoderdataPython |
11392261 | #! /usr/bin/env python
import argparse
import sys
import requests
import simplejson
import dict2xml
from bs4 import BeautifulSoup
class Json2xml(object):
# -------------------------------
##
# @Synopsis This class could read a json file
# from the filesystem or get a file from across
# the Internet, and convert that json object to
# xml
#
# @Param data : Data to be fed into the system.
#
# @Returns Null
# ---------------------------------
def __init__(self, data: str) -> None:
self.data = data
# -------------------------------
##
# @Synopsis Read JSON from a file in
# the system
# ---------------------------------
@classmethod
def fromjsonfile(cls, filename: str):
try:
json_data = open(filename)
data = simplejson.load(json_data)
json_data.close()
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
data = []
return cls(data)
# -------------------------------
##
# @Synopsis Fetches the JSON
# data from an URL Source.
#
# ---------------------------------
@classmethod
def fromurl(cls, url: str):
response = requests.get(url)
if response.status_code == 200:
return cls(response.json())
else:
raise Exception("Bad URl, Can't get JSON response")
# -------------------------------
##
# @Synopsis This method actually
# converts the json data that is converted
# into dict into XML
#
# @Returns XML
# ---------------------------------
def json2xml(self):
if self.data:
xmldata = dict2xml.dict2xml(self.data)
xml = BeautifulSoup(xmldata, "html.parser")
return xml
def main(argv=None):
parser = argparse.ArgumentParser(description='Utility to convert json to valid xml.')
parser.add_argument('--url', dest='url', action='store')
parser.add_argument('--file', dest='file', action='store')
args = parser.parse_args()
if args.url:
url = args.url
data = Json2xml.fromurl(url)
print(Json2xml.json2xml(data))
if args.file:
file = args.file
data = Json2xml.fromjsonfile(file)
print(Json2xml.json2xml(data))
if __name__ == "__main__":
main(sys.argv)
| StarcoderdataPython |
1969169 | <filename>experiments/rpi/dojo/dojo_display7.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import atexit
import time
import RPi.GPIO as GPIO
import spi
# assegurar que a função cleanup será chamada na saída do script
atexit.register(GPIO.cleanup)
# usar numeração lógica dos pinos
GPIO.setmode(GPIO.BCM)
DISPLAY = [17, 4, 9, 11, 7, 27, 22, 10]
SPI_CLK = 18
SPI_MISO = 23
SPI_MOSI = 24
SPI_CS = 25
conversor_ad = spi.Mcp3008(SPI_CLK, SPI_MISO, SPI_MOSI, SPI_CS)
CANAL_POTENCIOMETRO = 1
for led in DISPLAY[:6]:
GPIO.setup(led, GPIO.OUT)
GPIO.output(led, 0)
while True:
for led in DISPLAY[:6]:
GPIO.output(led, 1)
atraso = conversor_ad.read(CANAL_POTENCIOMETRO)/1000.0
time.sleep(atraso)
GPIO.output(led, 0)
| StarcoderdataPython |
1788722 | from . import connection
class KafkaProducerPipeline(object):
"""
Publish serialize item to configured topic
"""
def __init__(self, producer):
self.producer = producer
self.topic = None
def open_spider(self, spider):
if not hasattr(spider, 'produce_item_topic'):
return ValueError('produce_item_topic name is not provided')
self.topic = spider.produce_item_topic
def process_item(self, item, spider):
"""
This method has overridden for pipeline to process the item
:param item:
:param spider:
:return:
"""
"""
send(self, topic, value=None, key=None, headers=None, partition=None, timestamp_ms=None):
"""
self.producer.send(topic=self.topic, value=item)
return item
@classmethod
def from_settings(cls, settings):
"""
This
:param settings: the current scrapy spider settings
:return: KafkaProducerPipeline instance
"""
producer = connection.producer_from_settings({})
return cls(producer)
@classmethod
def from_crawler(cls, crawler):
return cls.from_settings(crawler.settings)
def close_spider(self, spider):
if self.producer:
self.producer.close()
| StarcoderdataPython |
4991482 | from .. import hook, bar, manager
import base
from pythonwifi.iwlibs import Wireless, Iwstats
class Wlan(base._TextBox):
"""
Displays Wifi ssid and quality.
"""
defaults = manager.Defaults(
("font", "Arial", "Font"),
("fontsize", None, "Pixel size. Calculated if None."),
("padding", None, "Padding. Calculated if None."),
("background", "000000", "Background colour"),
("foreground", "ffffff", "Foreground colour")
)
def __init__(self, interface="wlan0", width=bar.CALCULATED, **config):
"""
- interface: Wlan interface name.
- width: A fixed width, or bar.CALCULATED to calculate the width
automatically (which is recommended).
"""
self.interface = interface
base._TextBox.__init__(self, " ", width, **config)
def _configure(self, qtile, bar):
base._TextBox._configure(self, qtile, bar)
self.timeout_add(1, self.update)
def update(self):
interface = Wireless(self.interface)
stats = Iwstats(self.interface)
quality = stats.qual.quality
essid = interface.getEssid()
text = "{} {}/70".format(essid, quality)
if self.text != text:
self.text = text
self.bar.draw()
return True
| StarcoderdataPython |
5186617 | from algos.deepobfuscator import DeepObfuscator
from utils.metrics import MetricLoader
class EntropyLoss(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='mean'):
super(EntropyLoss, self).__init__(size_average, reduce, reduction)
# input is probability distribution of output classes
def forward(self, input):
if (input < 0).any() or (input > 1).any():
print(input)
raise Exception('Entropy Loss takes probabilities 0<=input<=1')
input = input + 1e-16 # for numerical stability while taking log
H = torch.mean(torch.sum(input * torch.log(input), dim=1))
return H
class maxentropy(DeepObfuscator):
""" The only difference between maxentropy and Deepobfuscator
is the loss function for the proxy adversary and the label is the private attribute instead of reconstruction.
"""
def __init__(self, config, utils) -> None:
super(maxentropy, self).__init__(config, utils)
self.update_loss()
def update_loss(self):
self.loss = EntropyLoss().forward
def get_adv_loss(self):
# Since it is L1, it has to be minimized
return self.adv_loss | StarcoderdataPython |
169991 | from django.contrib.auth.models import User, Group
from django.http.response import Http404
from django.shortcuts import get_object_or_404
from rest_framework.parsers import MultiPartParser, FormParser, FileUploadParser, JSONParser
from rest_framework import generics, permissions, status, views
from rest_framework.response import Response
from oauth2_provider.contrib.rest_framework import TokenHasReadWriteScope, TokenHasScope
from api import custompermission
from accounts.api.serializers import AccountProfileSerializer, FollowStoriesSerializer, FollowUserSerializer, SignupSerializer, GroupSerializer
from fanfics.api.serializers import UserFanficSerializer, SocialSerializer, FanficSerializer, UserSerializer
from accounts.models import AccountProfile, FollowUser, FollowStories, Social
from fanfics.models import Fanfic
class UserFanficDetailView(generics.RetrieveAPIView):
"""
Retrieve an user formatted
"""
queryset = User.objects.all()
serializer_class = UserFanficSerializer
lookup_field = 'username'
permission_classes = (
permissions.AllowAny,
)
class UserDetailView(generics.RetrieveUpdateDestroyAPIView):
"""
Retrieve an user
"""
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = (
permissions.IsAuthenticated,
custompermission.IsUserOrReadonly,
)
def put(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class AccountProfileDetailView(generics.RetrieveUpdateDestroyAPIView):
"""
Retrieve and update a profile account
"""
queryset = AccountProfile.objects.all()
serializer_class = AccountProfileSerializer
permission_classes = (
permissions.AllowAny,
custompermission.IsCurrentUserOrReadonly,
)
lookup_field = ('user__username')
parser_classes = (MultiPartParser, FormParser, JSONParser,)
class SocialListApiView(generics.ListCreateAPIView):
"""
Retrieve a social account
"""
serializer_class = SocialSerializer
pagination_class = None
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
)
def get_queryset(self):
account = self.kwargs['account']
if account:
return Social.objects.filter(account=account)
else:
return Social.objects.all()
class SocialDestroyApiView(generics.DestroyAPIView):
"""
Destroy a social account
"""
queryset = Social.objects.all()
serializer_class = SocialSerializer
permission_classes = (
permissions.IsAuthenticated,
)
class GroupListView(generics.ListAPIView):
permission_classes = [permissions.IsAuthenticated, TokenHasScope]
required_scopes = ['groups']
queryset = Group.objects.all()
serializer_class = GroupSerializer
class SignupView(generics.CreateAPIView):
queryset = User.objects.all()
serializer_class = SignupSerializer
permission_classes = (custompermission.IsAuthenticatedOrCreate,)
def liked_fanfic(request):
fanfic_id = request.data.get('id')
user_id = request.data.get('user')
if fanfic_id and user_id:
try:
fanfic = Fanfic.objects.get(id=int(fanfic_id))
if fanfic:
likes = fanfic.users_like.add(user_id)
fanfic.users_like = likes
fanfic.save()
return Response({'status': 'ok'}, status=status.HTTP_201_CREATED)
except:
return Response({'status': 'nok'}, status=status.HTTP_400_BAD_REQUEST)
class FavoritedFanficView(views.APIView):
"""
Favorite fanfic
"""
# serializer_class = FanficSerializer()
authentication_classes = ()
permission_classes = ()
def post(self, request, *args, **kwargs):
"""
serializer = FollowUserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
"""
serializer = FanficSerializer()
if serializer.data:
liked_fanfic(request)
return Response(serializer.data, status=status.HTTP_200_OK)
def unliked_fanfic(request):
fanfic_id = request.data.get('id')
user_id = request.data.get('user')
if fanfic_id and user_id:
try:
fanfic = Fanfic.objects.get(id=int(fanfic_id))
if fanfic:
likes = fanfic.users_like.remove(user_id)
fanfic.users_like = likes
fanfic.save()
return Response({'status': 'ok'}, status=status.HTTP_200_OK)
except:
return Response({'status': 'nok'}, status=status.HTTP_400_BAD_REQUEST)
class UnfavoritedFanficView(views.APIView):
"""
Unfavorite fanfic
"""
serializer_class = FanficSerializer()
authentication_classes = ()
permission_classes = ()
def post(self, request, *args, **kwargs):
serializer = FanficSerializer()
if serializer.data:
unliked_fanfic(request)
return Response(serializer.data, status=status.HTTP_200_OK)
class FollowUserView(views.APIView):
"""
Users followed
"""
serializer_class = FollowUserSerializer()
authentication_classes = ()
permission_classes = (permissions.AllowAny,)
def get(self, request, format=None):
"""
return list of all authors followed
"""
try:
follow_users = FollowUser.objects.all()
serializer = FollowUserSerializer(follow_users, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
except:
return Response({'status': 'no content'}, status=status.HTTP_204_NO_CONTENT)
def post(self, request, *args, **kwargs):
serializer = FollowUserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response({'status': 'ko'}, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk=None):
follow_user_id = request.data.get('id')
try:
follow_user = FollowUser.objects.get(id=follow_user_id)
follow_user.delete()
return Response({'status': 'ok'}, status=status.HTTP_200_OK)
except:
return Response({'status': 'ko'}, status=status.HTTP_400_BAD_REQUEST)
class FollowAuthorDeleteView(views.APIView):
"""
Author followed
"""
serializer_class = FollowUserSerializer()
authentication_classes = ()
permission_classes = (permissions.AllowAny,)
def get_object(self, request, user_to):
user_from = request.data.get('user_from')
try:
return FollowUser.objects.get(user_to=user_to, user_from=user_from)
except FollowUser.DoesNotExist:
raise Http404
def get(self, request, user_to, format=None):
author_followed = self.get_object(request, user_to)
serializer = FollowUserSerializer(author_followed)
return Response(serializer.data)
def delete(self, request, user_to, format=None):
author_followed = self.get_object(request, user_to)
author_followed.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class FollowStoriesDeleteView(views.APIView):
"""
Author followed
"""
serializer_class = FollowStoriesSerializer()
authentication_classes = ()
permission_classes = (permissions.AllowAny,)
def get_object(self, request, to_fanfic):
from_user = request.data.get('from_user')
try:
return FollowStories.objects.get(to_fanfic=to_fanfic, from_user=from_user)
except FollowStories.DoesNotExist:
raise Http404
def get(self, request, to_fanfic, format=None):
story_followed = self.get_object(request, to_fanfic)
serializer = FollowStoriesSerializer(story_followed)
return Response(serializer.data)
def delete(self, request, to_fanfic, format=None):
story_followed = self.get_object(request, to_fanfic)
story_followed.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class FollowStoriesView(views.APIView):
"""
Stories followed
"""
serializer_class = FollowStoriesSerializer()
authentication_classes = ()
permission_classes = (permissions.AllowAny,)
def get(self, request, format=None):
"""
return list of all stories followed
"""
try:
stories = FollowStories.objects.all()
serializer = FollowStoriesSerializer(stories, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
except:
return Response({'status': 'no content'}, status=status.HTTP_204_NO_CONTENT)
def post(self, request):
serializer = FollowStoriesSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response({'status': 'ko'}, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk=None):
follow_story_id = request.data.get('id')
try:
follow_story = FollowStories.objects.get(id=follow_story_id)
follow_story.delete()
return Response({'status': 'ok'}, status=status.HTTP_200_OK)
except:
return Response({'status': 'ko'}, status=status.HTTP_400_BAD_REQUEST)
class DeleteAccountView(views.APIView):
"""
Disable user account
"""
serializer_class = UserSerializer()
authentication_classes = ()
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, *args, **kwargs):
user = request.user
user.is_active = False
user.save()
return Response({"status": "ok"}, status=status.HTTP_200_OK)
| StarcoderdataPython |
8195186 | #!/usr/bin/env python
#
# Copyright 2010 Los Alamos National Security, LLC
# Written by <NAME> (CCS-2)
#
'''
Functions for running a simulation.
'''
_usage = '''
Usage: %s [options] input_file
where,
input_file : The input file. If the extension on the file name is
".xml", then the executable reads this file directly. Otherwise,
the file is assumed to be a Python front-end input file, which is
then converted to an xml file, after which the executable is run.
Options [default]:
-d, --doc
Dump input variable documentation to stdout and exit.
Unavailable if input_file is an xml file. [false]
-h, --help
Print this message and exit. [false]
-n, --procs
Run on this many processors. [1]
-p, --python
Dump input variables, in python format, to stdout and exit.
Some editing may be required in order to use this dump as an
input file. Unavailable if input_file is an xml file. [false]
-x, --xml
Convert input_file to xml and exit. [false]
Options -d and -x may be combined; if either of these 2 options are
specified, the executable is not run.
The -d and -p options do not document all possible input
combinations. For example, a linear solver type could allow CG or
GMRES. If the solver type is set to CG in input_file, only CG
related input variables will be documented with the -d option.
There is no way from the -d or -p options to know that GMRES is
available as a solver option.
Example:
%s --procs=4 input.py
'''
import subprocess
import getopt
import sys
import os
from py_siminput.input_interface import Dump_XML, Dump_Doc, Dump_Python
# global variables
prefix_default = 'time %s'
script_name = os.path.basename(sys.argv[0])
build_type = 'host'
def Usage(mesg):
'''
Prints documentation and exits.
'''
print(mesg)
print(_usage % (script_name, script_name))
sys.exit(0)
def system(command, debug):
'''
Runs command on the underlying operating system.
'''
if debug:
print(" " + command) # for debugging
else:
r = os.system(command)
if r != 0: # alternatively, "if not os.WIFEXITED(r)"
print("Error running: " + command)
sys.exit(1)
def append_to_file(file):
'''
Can be used to append items to a file, in a system() call.
'''
return " 1>> " + file + " 2>&1"
def mpi_only_run_string(num_pes,exe,inputfile):
'''
This function may be used to define a default run string for
mpi runs, dependent on the platform.
'''
machine = subprocess.getoutput('uname')
if machine == "AIX":
# IBM AIX (with poe)
s = "poe %s -procs %d -rmpool 2 -retry 1" % (exe,num_pes,inputfile)
elif machine == "OSF1":
# COMPAQ
s = "prun -n %d %s %s" % (num_pes,exe,inputfile)
else:
# Set the default
s = "mpirun -np %d %s %s" % (num_pes,exe,inputfile)
return s
def mcmpi_run_string(num_pes,exe1,exe2,inputfile):
'''
This function defines a run string for mcmpi runs. only has default.
platforms should be added a la mpi_run_string as needed.
'''
s = "mpirun -np %d %s %s : -np %d %s" % (num_pes,exe2,inputfile,
num_pes,exe1)
return s
def mpidacs_run_string(num_pes,exe1,exe2,inputfile):
'''
This function defines a run string for mpidacs runs. only has default.
platforms should be added a la mpi_run_string as needed.
'''
s = "mpiexec --mca mpi_paffinity_alone 1 -np %d %s %s %s" % (num_pes,exe2,
exe1,
inputfile)
return s
def run_string(num_pes,exe1,exe2,inputfile):
if build_type == 'host' and num_pes > 1:
return mpi_only_run_string(num_pes,exe1,inputfile)
elif build_type == 'host' and num_pes == 1:
return exe1 + " " + inputfile
elif build_type == 'mcmpi':
return mcmpi_run_string(num_pes,exe1,exe2,inputfile)
elif build_type == 'ppe':
return mpidacs_run_string(num_pes,exe1,exe2,inputfile)
def unit_test_args(exe1,exe2,input,args):
'''
This is a utility function that parses the command-line arguments
from dracos launchtest and returns the pair (number_of_procs,
run_string).
Typically, args is sys.argv.
'''
# serial default
rs = "%s"
num_pes = 1
# check for --procs, i.e., a parallel run.
if len(args) > 1:
try:
optlist, args = getopt.getopt(args[1:], 'n:',
['procs='])
except getopt.error as val:
print('ERROR ' + val.msg)
sys.exit(1)
for o, a in optlist:
if o in ('-n', '--procs'):
num_pes = int(a)
rs = run_string(num_pes,exe1,exe2,input)
# all done
return (num_pes, rs)
def get_root(input):
input_dict = {}
exec(compile(open(input).read(), input, 'exec'), input_dict)
if 'root' not in input_dict:
Usage("No 'root' variable defined in input file %s" % (input))
return input_dict['root']
def run_root(root, input_xml, command, debug = 0):
'''
This is a low-level function, used by the run() function. Also,
it may be useful in more sophisticated scripts.
root = Root of input tree.
input_xml = File name where xml is dumped.
command = Command string.
debug = Print the action to be taken, but do not actually do it.
'''
Dump_XML(root, input_xml)
system(command, debug)
def run(exe1,exe2):
'''
Runs exes. See _usage string for more information on the actions of
this function.
'''
prefix = prefix_default
debug = 0
dump_doc = 0
dump_python = 0
dump_xml = 0
input = 'input.xml'
num_pes = 1
# Parse command line options
try:
optlist, args = getopt.getopt(sys.argv[1:], 'dhn:px',
['doc',
'help',
'procs=',
'python',
'xml'])
except getopt.error as val:
Usage('ERROR: ' + val.msg)
for o, a in optlist:
if o in ('-d', '--doc'):
dump_doc = 1
elif o in ('-h', '--help'):
Usage("")
elif o in ('-p', '--python'):
dump_python = 1
elif o in ('-n', '--procs'):
num_pes = int(a)
elif o in ('-x', '--xml'):
dump_xml = 1
if len(args) > 1:
Usage("Too many arguments.")
if len(args) < 1:
Usage("Must supply input filename.")
input = args[0]
# Make sure input exists
if not os.path.isfile(input):
Usage("Unable to find input file: %s" % (input))
# Create the xml filename
input_base = os.path.basename(input)
input_is_xml = False
if input_base[-4:] == ".xml":
input_is_xml = True
input_xml = input_base
elif len(input_base) < 3 or input_base[-3:] != ".py":
input_xml = input_base + ".xml"
else:
input_xml = input_base[0:-3] + ".xml"
# Filter input file through python. The input file must generate
# an input interface named 'root'.
if input_is_xml:
if dump_xml:
Usage('--xml specified when input is already xml.')
if dump_python:
Usage('--python unavailable when input is xml.')
if dump_doc:
Usage('--doc unavailable when input is xml.')
command = run_string(num_pes,exe1,exe2,input_xml)
print('Running: %s ...' % (command))
system(command,debug)
else:
print('Generating %s from %s ...' % (input_xml, input))
root = get_root(input)
run_sim = 1 # if true, run simulation.
if dump_xml:
run_sim = 0
if dump_doc:
# Dump documentation
Dump_Doc(root)
run_sim = 0
if dump_python:
# Dump python
d = Dump_Python(1)
d.dump(root, 'root')
run_sim = 0
if run_sim:
# Run the solver
command = run_string(num_pes,exe1,exe2,input_xml)
print('Running: %s ...' % (command))
run_root(root, input_xml, command, debug)
elif dump_xml:
# just dump the xml
Dump_XML(root, input_xml)
| StarcoderdataPython |
3203104 | import uuid
import django.contrib.postgres.indexes
import django.contrib.postgres.search
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("backend", "0001"),
]
operations = [
migrations.CreateModel(
name="Item",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created", models.DateTimeField(auto_now_add=True, db_index=True)),
("updated", models.DateTimeField(auto_now=True, db_index=True)),
("name", models.TextField()),
("description", models.TextField()),
(
"search_vector",
django.contrib.postgres.search.SearchVectorField(null=True),
),
],
),
migrations.CreateModel(
name="User",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created", models.DateTimeField(auto_now_add=True, db_index=True)),
("updated", models.DateTimeField(auto_now=True, db_index=True)),
("full_name", models.TextField()),
("email", models.TextField()),
("hashed_password", models.TextField()),
(
"search_vector",
django.contrib.postgres.search.SearchVectorField(null=True),
),
],
),
migrations.AddIndex(
model_name="user",
index=models.Index(fields=["email"], name="backend_use_email_db66b5_idx"),
),
migrations.AddIndex(
model_name="user",
index=django.contrib.postgres.indexes.GinIndex(
fields=["search_vector"], name="backend_use_search__6cf6bf_gin"
),
),
migrations.AddConstraint(
model_name="user",
constraint=models.UniqueConstraint(fields=("email",), name="unique_email"),
),
migrations.AddField(
model_name="item",
name="owner",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="backend.user",
),
),
migrations.AddIndex(
model_name="item",
index=models.Index(
fields=["name", "owner"], name="backend_ite_name_c0732e_idx"
),
),
migrations.AddIndex(
model_name="item",
index=django.contrib.postgres.indexes.GinIndex(
fields=["search_vector"], name="backend_ite_search__4170f7_gin"
),
),
migrations.AddConstraint(
model_name="item",
constraint=models.UniqueConstraint(
fields=("name", "owner"), name="unique_owner_and_name"
),
),
]
| StarcoderdataPython |
8101593 | '''=============================================================
c:/1work/Python/djcode/pjtk2/migration/get_project_dates.py
Created: 14 Jan 2015 15:43:57
DESCRIPTION:
This script was developed from get_spatial_data.py (and should be
merged with it to faciliate annual/frequent updates).
This scripts gets the start and end date for every project in the
master databases and then updates prj_date0 and prj_date1 in project
tracker. This script should be run each time a project is merged into
a master set. Project start and end dates are required fields when a
project is created, but can't be known until the project is complete.
This script ensures that project tracker reflects what is in the
databases.
<NAME>
=============================================================
'''
import csv
import pyodbc
import datetime
from dateutil import parser
import psycopg2
PG_USER = 'adam'
PG_DB = 'pjtk2'
PG_HOST = '192.168.127.12'
#PG_HOST = '127.0.0.1'
masters = {
'offshore': {
'path': 'Z:/Data Warehouse/Assessment/Index/Offshore/IA_OFFSHORE.mdb',
'table': 'Offshore_FN121',
'sam': 'SAM',
'ddlat': 'dd_lat',
'ddlon': 'dd_lon',
'effdt0': 'effdt0',
'effdt1': 'effdt1',
'groupby': False,
},
'nearshore': {
'path': 'Z:/Data Warehouse/Assessment/Index/Nearshore/IA_NEARSHORE.mdb',
'table': 'IA121',
'sam': 'SAM',
'ddlat': 'DD_LAT',
'ddlon': 'DD_LON',
'effdt0': 'effdt0',
'effdt1': 'effdt1',
'groupby': False,
},
'smallfish': {
'path': ('Z:/Data Warehouse/Assessment/Index/Nearshore/' +
'Small_Fish/COA_Nearshore_Smallfish.mdb'),
'table': '121',
'sam': 'SAM',
'ddlat': 'dd_lat',
'ddlon': 'dd_lon',
'effdt0': 'effdt0',
'effdt1': 'effdt1',
'groupby': False,
},
'fishway': {
'path': 'Z:\Data Warehouse\Assessment\Fishway\Fishway_Master.mdb',
'table': 'IM_121',
'sam': 'SAM',
'ddlat': 'DD_LAT',
'ddlon': 'DD_LON',
'effdt0': 'effdt0',
'effdt1': 'effdt1',
'groupby': True,
},
'sturgeon': {
'path': ('Z:/Data Warehouse/Assessment/Index/Sturgeon/' +
'SturgeonMaster.mdb'),
'table': 'Sturgeon_FN121',
'sam': 'SAM',
'ddlat': 'dd_lat',
'ddlon': 'dd_lon',
'effdt0': 'effdt0',
'effdt1': 'effdt1',
'groupby': False,
},
'comcatch': {
'path':('Z:/Data Warehouse/Commercial Fisheries/Catch Sampling/' +
'CF_Master.mdb'),
'table': 'Final_FN121',
'sam': 'SAM',
'ddlat': 'DD_LAT',
'ddlon': 'DD_LON',
'effdt0': 'DATE',
'effdt1': 'DATE',
'groupby': False,
},
'stocking': {
'path':('Y:/Information Resources/Dataset_Utilities/FS_Maker/' +
'FS_Master.mdb'),
'table': 'FS_Events',
'sam': 'EVENT',
'ddlat': 'DD_LAT',
'ddlon': 'DD_LON',
'effdt0': 'Event_Date',
'effdt1': 'Event_Date',
'groupby': False,
},
'creel': {
'path':('Z:/Data Warehouse/Recreational Fisheries/Creel/SC/' +
'SC_Master.mdb'),
'table': 'FINAL_FN121',
'sam': 'SAM',
'ddlat': 'DD_LAT',
'ddlon': 'DD_LON',
'effdt0': 'EFFDT0',
'effdt1': 'EFFDT0',
'groupby': True,
},
'sportfish':{
'path':('Z:/Data Warehouse/Recreational Fisheries/Angler Diary/Sf/' +
'SF_MASTER.mdb'),
'table': 'FN121',
'sam': 'SAM',
'ddlat': 'DD_LAT',
'ddlon': 'DD_LON',
'effdt0': 'EFFDT0',
'effdt1': 'EFFDT0',
'groupby': True,
},
'benthic': {
'path':('Z:/Data Warehouse/Derived Datasets/UNIT PROJECTS/Benthics/' +
'Lake Huron Benthics.mdb'),
#'path':'Y:/File Transfer/Lake Huron Benthics.mdb',
'table': 'LH_benthics',
'sam': 'Station ID',
'ddlat': 'DD Latitude',
'ddlon': 'DD Longitude',
'effdt0': 'Date',
'effdt1': 'Date',
'groupby': False,
},
}
def build_sql2(db_dict):
'''a little helper function that will build the sql statement to get
the start and end data of each project in the table (database)'''
sql_base="""SELECT PRJ_CD, Min([{effdt0}]) AS PRJ_Start, Max([{effdt1}])
AS PRJ_END
FROM [{table}]
GROUP BY PRJ_CD;"""
sql = sql_base.format(**db_dict)
return sql
prj_dates = []
#loop over our database dictionary and query each one for the project info.
for db in masters.keys():
dbase = masters[db]
constr = r"DRIVER={{Microsoft Access Driver (*.mdb)}};DBQ={0}"
constr = constr.format(dbase['path'])
mdbconn = pyodbc.connect(constr)
mdbcur = mdbconn.cursor()
# create a cursor
#try the lookup tables first - keep things simple
mdbcur = mdbconn.cursor()
sql = build_sql2(dbase)
try:
mdbcur.execute(sql)
result = mdbcur.fetchall()
print("There were {0} records found in {1}".format(len(result),
db))
except:
print('Problem with {}'.format(db))
mdbconn.close()
for row in result:
prj_dates.append([row[0], row[1], row[2]])
#convert the list of tuples returned by the db to a list of lists
prj_list = [[x[0], x[1], x[2]] for x in prj_dates]
#now covert each of dates to datetime objects (if they aren't already') and
#capture any that can't be converted
bad_start = []
bad_end = []
for prj in prj_list:
if prj[1] and prj[1].__class__ != datetime.datetime:
try:
prj[1] = parser.parse(prj[1])
except TypeError:
bad_start.append(prj)
if prj[2] and prj[2].__class__ != datetime.datetime:
try:
prj[2] = parser.parse(prj[2])
except TypeError:
bad_end.append(prj)
print("There were {} bad start dates found.".format(len(bad_start)))
print("There were {} bad end dates found.".format(len(bad_end)))
#now write prj_dates into a temporary table in project tracker
#and compare project start and end dates
# update those where they are different.
constr = "host={0} dbname={1} user={2}".format(PG_HOST, PG_DB, PG_USER)
pgconn = psycopg2.connect(constr)
pgcur = pgconn.cursor()
print('Making temporary project dates table...')
sql = """DROP TABLE IF EXISTS prj_dates_tmp"""
pgcur.execute(sql)
sql = """CREATE TABLE prj_dates_tmp
(
id serial NOT NULL,
prj_cd character(13) NOT NULL,
prj_start DATE,
prj_end DATE
)
"""
pgcur.execute(sql)
pgconn.commit()
print('Inserting project dates into temporary table...')
args = ([{'prj_cd': x[0], 'prj_start':x[1], 'prj_end':x[2]}
for x in prj_list])
sql = """INSERT INTO prj_dates_tmp (prj_cd, prj_start, prj_end)
VALUES(%(prj_cd)s, %(prj_start)s, %(prj_end)s);"""
pgcur.executemany(sql, args)
pgconn.commit()
#=============================================================
#write out some basic information about project that have start
# dates different than the master:
sql = """-- start dates that differ
SELECT master_database as dbase, prj.year,
dates.prj_cd,
prj_start AS master_start,
prj_date0 AS tracker_start,
prj_start - prj_date0 AS diff
FROM prj_dates_tmp dates
JOIN pjtk2_project prj ON prj.prj_cd = dates.prj_cd
join pjtk2_database db on db.id=prj.master_database_id
WHERE prj.prj_date0 != dates.prj_start
--and db.master_database = 'Fish Stocking'
ORDER BY dbase,
prj.year DESC,
prj.prj_cd;"""
pgcur.execute(sql)
rs = pgcur.fetchall()
fname = 'c:/1work/Python/djcode/pjtk2/migration/start_dates.csv'
with open(fname, 'w') as f:
writer = csv.writer(f)
writer.writerow([x[0] for x in pgcur.description])
writer.writerows(rs)
#=============================================================
#write out some basic information about project that have ends
#different than the master:
sql = """-- end dates that differ
SELECT master_database as dbase, prj.year,
prj.year,
dates.prj_cd,
prj_end AS master_end,
prj_date1 AS tracker_end,
prj_end - prj_date1 AS diff
FROM prj_dates_tmp dates
JOIN pjtk2_project prj ON prj.prj_cd = dates.prj_cd
join pjtk2_database db on db.id=prj.master_database_id
WHERE prj.prj_date1 != dates.prj_end
--and db.master_database = 'Fish Stocking'
ORDER BY dbase,
prj.year DESC,
prj.prj_cd;"""
pgcur.execute(sql)
rs = pgcur.fetchall()
fname = 'c:/1work/Python/djcode/pjtk2/migration/end_dates.csv'
with open(fname, 'w') as f:
writer = csv.writer(f)
writer.writerow([x[0] for x in pgcur.description])
writer.writerows(rs)
#=============================================================
# update project tracker:
print('Updating START dates....')
sql = """-- update the project start dates from the masters:
UPDATE pjtk2_project
SET prj_date0 = prj_start
FROM prj_dates_tmp
WHERE prj_dates_tmp.prj_cd = pjtk2_project.prj_cd
and prj_date0 != prj_start;"""
pgcur.execute(sql)
print('Updating END dates....')
sql = """-- update the project End dates from the masters:
UPDATE pjtk2_project
SET prj_date1 = prj_end
FROM prj_dates_tmp
WHERE prj_dates_tmp.prj_cd = pjtk2_project.prj_cd
and prj_date1 != prj_end;"""
pgcur.execute(sql)
print('Cleaning up...')
sql = """DROP TABLE prj_dates_tmp"""
pgcur.execute(sql)
pgconn.commit()
pgcur.close()
pgconn.close()
print('Done updating project dates!!')
| StarcoderdataPython |
268999 | # Example running ManFit
import manfit as mf
import numpy as np
import matplotlib.pyplot as plt
# write function with parameters as parameters
t = np.linspace(0,5,100)
ydata = np.exp(-t/3)*np.sin(2*np.pi*0.7*t) + np.random.uniform(low=-0.5,high=0.5, size=len(t))
def sine(t, freq, tau):
return np.exp(-t/tau)*np.sin(2*np.pi*freq*t)
def cosine(t, freq, phi):
return np.cos(2*np.pi*freq*t + phi)
# Initialize Parameters
params = [mf.Parameter(-1,1, name='Frequency'),
mf.Parameter(0.1,3, name='Tau')]
# setup plot
figure, axes = plt.subplots(1,1,figsize=(6,4), dpi=96)
line1, = axes.plot(t,sine(t, *[param.val for param in params]))
line2, = axes.plot(t,ydata)
# start ManFit
# mf.manfit(figure, parameter_list, lines_to_update, xdata, ydata, fit_func)
mf.manfit(figure, params, {line1:sine}, t, ydata, sine) | StarcoderdataPython |
3361456 | """
lambdata - a collection of data science helper functions
"""
import pandas as pd
import numpy as np
# sample code
ONE = pd.DataFrame(np.ones(10))
Zeros = pd.DataFrame(np.zeros(50))
| StarcoderdataPython |
310397 | <reponame>thitta/Someone.tw-Blog
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.test import TestCase, Client
from cms.models import Post, Collection
from cms.utils import reset_post_relations
USER1 = {"username": "john", "password": "<PASSWORD>"}
USER2 = {"username": "mary", "password": "<PASSWORD>"}
class IntegrationTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# create user
User = get_user_model()
user1 = User.objects.create_user(**USER1)
user2 = User.objects.create_user(**USER2)
# create mock data
posts = [None] * 6
posts[0] = Post.create_mock_instance(user1, IsPublic=True, IsOnList=True, RankingIndex=0, )
posts[1] = Post.create_mock_instance(user1, IsPublic=True, IsOnList=True, RankingIndex=2, )
posts[2] = Post.create_mock_instance(user1, IsPublic=True, IsOnList=True, RankingIndex=0, )
posts[3] = Post.create_mock_instance(user1, IsPublic=True, IsOnList=True, RankingIndex=0, IsTop=True)
posts[4] = Post.create_mock_instance(user1, IsPublic=True, IsOnList=False)
posts[5] = Post.create_mock_instance(user1, IsPublic=False, IsOnList=False)
reset_post_relations()
# Collections
collections = [None] * 3
collections[0] = Collection.create_mock_instance(IsPublic=True)
collections[1] = Collection.create_mock_instance(IsPublic=False)
collections[0].Posts.add(posts[0], posts[1], posts[4], posts[5])
# ------------------------------
# post:read
# ------------------------------
def test_ok_GET_post_list(self):
c = Client()
res = c.get("/")
# status code
exp = 200
act = res.status_code
self.assertEqual(exp, act)
# posts length
exp = 4
act = len(res.context["posts"])
self.assertEqual(exp, act)
def test_ok_GET_post_detail(self):
# ok
c = Client()
res = c.get("/post/1/title")
exp = 200
act = res.status_code
self.assertEqual(exp, act)
# none public post: return 404
c = Client()
res = c.get("/post/6/title")
exp = 404
act = res.status_code
self.assertEqual(exp, act)
# none existent post: return 404
c = Client()
res = c.get("/post/999/title")
exp = 404
act = res.status_code
self.assertEqual(exp, act)
# ------------------------------
# post:create/update/delete
# ------------------------------
def test_ok_GET_create_post(self):
# ok
c = Client()
c.login(**USER1)
res = c.get("/post/create")
exp = 200
act = res.status_code
self.assertEqual(exp, act)
# not login: return redirect
c = Client()
res = c.get("/post/create")
exp = 302
act = res.status_code
self.assertEqual(exp, act)
def test_ok_GET_update_post(self):
# ok
c = Client()
c.login(**USER1)
res = c.get("/post/1/update")
exp = 200
act = res.status_code
self.assertEqual(exp, act)
# not login: return redirect
c = Client()
res = c.get("/post/1/update")
exp = 302
act = res.status_code
self.assertEqual(exp, act)
# login but not author: return forbidden
c = Client()
c.login(**USER2)
res = c.get("/post/1/update")
exp = 403
act = res.status_code
self.assertEqual(exp, act)
def test_ok_POST_create_update_delete_post(self):
# prepare: login
c = Client()
User = get_user_model()
c.login(**USER1)
user = User.objects.get(username=USER1["username"])
mock_post_created = {"Title": "Created Post",
"Subtitle": "xxx", "BodyMarkdown": "xxx", "User": user,
"IsOnList": True, "IsPublic": True, "IsTop": True, "RankingIndex": 0}
# create_ok: redirect 302
res = c.post(f"/post/create", data=mock_post_created, follow=True)
final_url, status_code = res.redirect_chain[-1]
exp = 302
act = status_code
self.assertEqual(exp, act)
# create_ok: post is correctly created
created_post_id = int(final_url.split("/")[2])
created_post = Post.objects.get(PostId=created_post_id)
exp = mock_post_created["Title"]
act = created_post.Title
self.assertEqual(exp, act)
# update_ok: redirect 302
mock_post_updated = mock_post_created.copy()
mock_post_updated["Title"] = "Updated Post"
res = c.post(f"/post/{created_post_id}/update", data=mock_post_updated, follow=True)
final_url, status_code = res.redirect_chain[-1]
exp = 302
act = status_code
self.assertEqual(exp, act)
# update_ok: post is correctly updated
updated_post_id = int(final_url.split("/")[2])
updated_post = Post.objects.get(PostId=updated_post_id)
exp = mock_post_updated["Title"]
act = updated_post.Title
self.assertEqual(exp, act)
# update_ok: the created and updated post should be the same post
self.assertEqual(created_post_id, updated_post_id)
# delete_ok: redirect 302
res = c.post(f"/post/{created_post_id}/delete", data=mock_post_updated, follow=True)
final_url, status_code = res.redirect_chain[-1]
exp = 302
act = status_code
self.assertEqual(exp, act)
# delete_raise: post is no longer exists
with self.assertRaises(ObjectDoesNotExist):
Post.objects.get(PostId=created_post_id)
# ------------------------------
# collection: read
# ------------------------------
def test_ok_collection_post_list(self):
c = Client()
res = c.get("/collection/1/collectionTitle")
# http status
exp = 200
act = res.status_code
self.assertEqual(exp, act)
# posts length
exp = 3
act = len(res.context["posts"])
self.assertEqual(exp, act)
| StarcoderdataPython |
339275 | def f():
x = 5
return x
| StarcoderdataPython |
3536044 | import json
import requests
import os
from flask import (request,
jsonify,
make_response,
render_template,
Blueprint,
send_file,
url_for,
abort)
from logzero import logger
from config import config
from extensions import cache
from base.forms import VBrowserForm
from base.utils.auth import jwt_required
from caendr.api.strain import query_strains
from caendr.api.isotype import get_isotypes
from caendr.models.datastore import DatasetRelease
from caendr.models.sql import Strain, StrainAnnotatedVariant
from caendr.services.cloud.storage import generate_blob_url
from caendr.services.dataset_release import get_all_dataset_releases, get_release_path, get_browser_tracks_path, get_release_bucket
from caendr.models.error import NotFoundError
releases_bp = Blueprint('data_releases',
__name__,
template_folder='templates')
# ============= #
# Data Page #
# ============= #
@releases_bp.route('/release/latest')
@releases_bp.route('/release/<string:release_version>')
@cache.memoize(60*60)
def data_releases(release_version=None):
""" Default data page - lists available releases. """
title = "Genomic Data"
alt_parent_breadcrumb = {"title": "Data", "url": url_for('data.landing')}
RELEASES = get_all_dataset_releases(order='-version')
# Get the requested release and throw an error if it doesn't exist
RELEASE = None
if release_version:
for r in RELEASES:
if r.version == release_version:
RELEASE = r
break
if not RELEASE:
raise NotFoundError(f'Release Version: {release_version} Not Found')
else:
RELEASE = RELEASES[0]
if RELEASE.report_type == 'V2':
return data_v02(RELEASE, RELEASES)
elif RELEASE.report_type == 'V1':
return data_v01(RELEASE, RELEASES)
return render_template('data/releases.html', **locals())
@cache.memoize(60*60)
def data_v02(RELEASE, RELEASES):
title = "Genomic Data"
alt_parent_breadcrumb = {"title": "Data", "url": url_for('data.landing')}
release_version = RELEASE.version
strain_listing = query_strains(release_version=release_version)
release_bucket = get_release_bucket()
release_path = get_release_path(release_version)
browser_tracks_path = get_browser_tracks_path()
browser_tracks_url = generate_blob_url(release_bucket, browser_tracks_path)
files = RELEASE.get_report_data_urls_map()
return render_template('data/releases.html', **locals())
@cache.memoize(60*60)
def data_v01(RELEASE, RELEASES):
# Legacy releases (Pre 20200101)
title = "Genomic Data"
alt_parent_breadcrumb = {"title": "Data", "url": url_for('data.landing')}
release_version = RELEASE.version
strain_listing = query_strains(release_version=release_version)
release_bucket = get_release_bucket()
release_path = get_release_path(release_version)
browser_tracks_path = get_browser_tracks_path()
site_bucket_public_name = config.get('MODULE_SITE_BUCKET_PUBLIC_NAME', 'NONE')
files = RELEASE.get_report_data_urls_map()
try:
vcf_summary_url = files.get('vcf_summary_url')
vcf_summary = requests.get(vcf_summary_url).json()
except json.JSONDecodeError:
vcf_summary = None
return render_template('data/releases.html', **locals())
# ======================= #
# Alignment Data Page #
# ======================= #
@releases_bp.route('/release/latest/alignment')
@releases_bp.route('/release/<string:release_version>/alignment')
@cache.memoize(60*60)
def alignment_data(release_version=''):
RELEASES = get_all_dataset_releases(order='-version')
# Get the requested release and throw an error if it doesn't exist
RELEASE = None
if release_version:
for r in RELEASES:
if r.version == release_version:
RELEASE = r
break
if not RELEASE:
raise NotFoundError(f'Release Version: {release_version} Not Found')
else:
RELEASE = RELEASES[0]
# Pre-2020 releases don't have data organized the same way
if RELEASE.report_type == 'V1':
return
# Post-2020 releases
title = "Alignment Data"
alt_parent_breadcrumb = {"title": "Data", "url": url_for('data.landing')}
strain_listing = query_strains(release_version=release_version)
'''
DATASET_RELEASE, WORMBASE_VERSION = list(filter(lambda x: x[0] == release_version, RELEASES))[0]
REPORTS = ["alignment"]
'''
return render_template('data/alignment.html', **locals())
# =========================== #
# Strain Issues Data Page #
# =========================== #
@releases_bp.route('/release/latest/strain_issues')
@releases_bp.route('/release/<string:release_version>/strain_issues')
@cache.memoize(60*60)
def strain_issues(release_version=None):
"""
Strain Issues page
"""
RELEASES = get_all_dataset_releases(order='-version')
# Get the requested release and throw an error if it doesn't exist
RELEASE = None
if release_version:
for r in RELEASES:
if r.version == release_version:
RELEASE = r
break
if not RELEASE:
raise NotFoundError(f'Release Version: {release_version} Not Found')
else:
RELEASE = RELEASES[0]
# Pre-2020 releases don't have data organized the same way
if RELEASE.report_type == 'V1':
return
# Post-2020 releases
title = "Strain Issues"
alt_parent_breadcrumb = {"title": "Data", "url": url_for('data.landing')}
strain_listing_issues = query_strains(release_version=release_version, issues=True)
return render_template('strain/issues.html', **locals())
| StarcoderdataPython |
3277001 | <reponame>coderzh/pywasm3
#!/usr/bin/env python3
import wasm3
import os, time
scriptpath = os.path.dirname(os.path.realpath(__file__))
wasm_fn = os.path.join(scriptpath, "./wasm/coremark-minimal.wasm")
print("Initializing Wasm3 engine...")
def clock_ms():
return int(round(time.time() * 1000))
env = wasm3.Environment()
rt = env.new_runtime(4096)
with open(wasm_fn, "rb") as f:
mod = env.parse_module(f.read())
rt.load(mod)
mod.link_function("env", "clock_ms", "I()", clock_ms)
wasm_run = rt.find_function("run")
print("Running CoreMark 1.0...")
res = wasm_run()
if res > 1:
print(f"Result: {res:.3f}")
else:
print("Error")
| StarcoderdataPython |
1839772 | <gh_stars>0
# -*- coding:utf-8 -*-
# author: Xinge
"""
SemKITTI dataloader
"""
import os
import numpy as np
import torch
import random
import time
import numba as nb
import yaml
from torch.utils import data
import pickle
REGISTERED_DATASET_CLASSES = {}
def register_dataset(cls, name=None):
global REGISTERED_DATASET_CLASSES
if name is None:
name = cls.__name__
assert name not in REGISTERED_DATASET_CLASSES, f"exist class: {REGISTERED_DATASET_CLASSES}"
REGISTERED_DATASET_CLASSES[name] = cls
return cls
def get_model_class(name):
global REGISTERED_DATASET_CLASSES
assert name in REGISTERED_DATASET_CLASSES, f"available class: {REGISTERED_DATASET_CLASSES}"
return REGISTERED_DATASET_CLASSES[name]
@register_dataset
class voxel_dataset(data.Dataset):
def __init__(self, in_dataset, grid_size, rotate_aug=False, flip_aug=False, ignore_label=255, return_test=False,
fixed_volume_space=False, max_volume_space=[50, 50, 1.5], min_volume_space=[-50, -50, -3]):
'Initialization'
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.rotate_aug = rotate_aug
self.ignore_label = ignore_label
self.return_test = return_test
self.flip_aug = flip_aug
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
def __len__(self):
'Denotes the total number of samples'
return len(self.point_cloud_dataset)
def __getitem__(self, index):
'Generates one sample of data'
data = self.point_cloud_dataset[index]
if len(data) == 2:
xyz, labels = data
elif len(data) == 3:
xyz, labels, sig = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
else:
raise Exception('Return invalid data tuple')
# random data augmentation by rotation
if self.rotate_aug:
rotate_rad = np.deg2rad(np.random.random() * 360)
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
xyz[:, :2] = np.dot(xyz[:, :2], j)
# random data augmentation by flip x , y or x+y
if self.flip_aug:
flip_type = np.random.choice(4, 1)
if flip_type == 1:
xyz[:, 0] = -xyz[:, 0]
elif flip_type == 2:
xyz[:, 1] = -xyz[:, 1]
elif flip_type == 3:
xyz[:, :2] = -xyz[:, :2]
max_bound = np.percentile(xyz, 100, axis=0)
min_bound = np.percentile(xyz, 0, axis=0)
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range / (cur_grid_size - 1)
if (intervals == 0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz, min_bound, max_bound) - min_bound) / intervals)).astype(np.int)
# process voxel position
voxel_position = np.zeros(self.grid_size, dtype=np.float32)
dim_array = np.ones(len(self.grid_size) + 1, int)
dim_array[0] = -1
voxel_position = np.indices(self.grid_size) * intervals.reshape(dim_array) + min_bound.reshape(dim_array)
# process labels
processed_label = np.ones(self.grid_size, dtype=np.uint8) * self.ignore_label
label_voxel_pair = np.concatenate([grid_ind, labels], axis=1)
label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]
processed_label = nb_process_label(np.copy(processed_label), label_voxel_pair)
data_tuple = (voxel_position, processed_label)
# center data on each voxel for PTnet
voxel_centers = (grid_ind.astype(np.float32) + 0.5) * intervals + min_bound
return_xyz = xyz - voxel_centers
return_xyz = np.concatenate((return_xyz, xyz), axis=1)
if len(data) == 2:
return_fea = return_xyz
elif len(data) == 3:
return_fea = np.concatenate((return_xyz, sig[..., np.newaxis]), axis=1)
if self.return_test:
data_tuple += (grid_ind, labels, return_fea, index)
else:
data_tuple += (grid_ind, labels, return_fea)
return data_tuple
# transformation between Cartesian coordinates and polar coordinates
def cart2polar(input_xyz):
rho = np.sqrt(input_xyz[:, 0] ** 2 + input_xyz[:, 1] ** 2)
phi = np.arctan2(input_xyz[:, 1], input_xyz[:, 0])
return np.stack((rho, phi, input_xyz[:, 2]), axis=1)
def polar2cat(input_xyz_polar):
# print(input_xyz_polar.shape)
x = input_xyz_polar[0] * np.cos(input_xyz_polar[1])
y = input_xyz_polar[0] * np.sin(input_xyz_polar[1])
return np.stack((x, y, input_xyz_polar[2]), axis=0)
@register_dataset
class cylinder_dataset(data.Dataset):
def __init__(self, in_dataset, grid_size, rotate_aug=False, flip_aug=False, ignore_label=255, return_test=False,
fixed_volume_space=False, max_volume_space=[50, np.pi, 2], min_volume_space=[0, -np.pi, -4],
scale_aug=False,
transform_aug=False, trans_std=[0.1, 0.1, 0.1],
min_rad=-np.pi / 4, max_rad=np.pi / 4):
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.rotate_aug = rotate_aug
self.flip_aug = flip_aug
self.scale_aug = scale_aug
self.ignore_label = ignore_label
self.return_test = return_test
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
self.transform = transform_aug
self.trans_std = trans_std
self.noise_rotation = np.random.uniform(min_rad, max_rad)
def __len__(self):
'Denotes the total number of samples'
return len(self.point_cloud_dataset)
def rotation_points_single_angle(self, points, angle, axis=0):
# points: [N, 3]
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
if axis == 1:
rot_mat_T = np.array(
[[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]],
dtype=points.dtype)
elif axis == 2 or axis == -1:
rot_mat_T = np.array(
[[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]],
dtype=points.dtype)
elif axis == 0:
rot_mat_T = np.array(
[[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]],
dtype=points.dtype)
else:
raise ValueError("axis should in range")
return points @ rot_mat_T
def __getitem__(self, index):
'Generates one sample of data'
data = self.point_cloud_dataset[index]
if len(data) == 2:
xyz, labels = data
elif len(data) == 3:
xyz, labels, sig = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 4:
xyz, labels, sig, origin_len = data
else:
raise Exception('Return invalid data tuple')
# random data augmentation by rotation
if self.rotate_aug:
rotate_rad = np.deg2rad(np.random.random() * 90) - np.pi / 4
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
xyz[:, :2] = np.dot(xyz[:, :2], j)
# random data augmentation by flip x , y or x+y
if self.flip_aug:
flip_type = np.random.choice(4, 1)
if flip_type == 1:
xyz[:, 0] = -xyz[:, 0]
elif flip_type == 2:
xyz[:, 1] = -xyz[:, 1]
elif flip_type == 3:
xyz[:, :2] = -xyz[:, :2]
if self.scale_aug:
noise_scale = np.random.uniform(0.95, 1.05)
xyz[:, 0] = noise_scale * xyz[:, 0]
xyz[:, 1] = noise_scale * xyz[:, 1]
# convert coordinate into polar coordinates
if self.transform:
noise_translate = np.array([np.random.normal(0, self.trans_std[0], 1),
np.random.normal(0, self.trans_std[1], 1),
np.random.normal(0, self.trans_std[2], 1)]).T
xyz[:, 0:3] += noise_translate
xyz_pol = cart2polar(xyz)
max_bound_r = np.percentile(xyz_pol[:, 0], 100, axis=0)
min_bound_r = np.percentile(xyz_pol[:, 0], 0, axis=0)
max_bound = np.max(xyz_pol[:, 1:], axis=0)
min_bound = np.min(xyz_pol[:, 1:], axis=0)
max_bound = np.concatenate(([max_bound_r], max_bound))
min_bound = np.concatenate(([min_bound_r], min_bound))
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range / (cur_grid_size - 1)
if (intervals == 0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz_pol, min_bound, max_bound) - min_bound) / intervals)).astype(np.int)
voxel_position = np.zeros(self.grid_size, dtype=np.float32)
dim_array = np.ones(len(self.grid_size) + 1, int)
dim_array[0] = -1
voxel_position = np.indices(self.grid_size) * intervals.reshape(dim_array) + min_bound.reshape(dim_array)
voxel_position = polar2cat(voxel_position)
processed_label = np.ones(self.grid_size, dtype=np.uint8) * self.ignore_label
label_voxel_pair = np.concatenate([grid_ind, labels], axis=1)
label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]
processed_label = nb_process_label(np.copy(processed_label), label_voxel_pair)
data_tuple = (voxel_position, processed_label)
# center data on each voxel for PTnet
voxel_centers = (grid_ind.astype(np.float32) + 0.5) * intervals + min_bound
return_xyz = xyz_pol - voxel_centers
return_xyz = np.concatenate((return_xyz, xyz_pol, xyz[:, :2]), axis=1)
if len(data) == 2:
return_fea = return_xyz
elif len(data) == 3:
return_fea = np.concatenate((return_xyz, sig[..., np.newaxis]), axis=1)
elif len(data) == 4:
return_fea = np.concatenate((return_xyz, sig[..., np.newaxis]), axis=1)
if self.return_test:
data_tuple += (grid_ind, labels, return_fea, index)
else:
data_tuple += (grid_ind, labels, return_fea)
return data_tuple
@register_dataset
class polar_dataset(data.Dataset):
def __init__(self, in_dataset, grid_size, rotate_aug=False, flip_aug=False, ignore_label=255, return_test=False,
fixed_volume_space=False, max_volume_space=[50, np.pi, 2], min_volume_space=[0, -np.pi, -4],
scale_aug=False):
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.rotate_aug = rotate_aug
self.flip_aug = flip_aug
self.scale_aug = scale_aug
self.ignore_label = ignore_label
self.return_test = return_test
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
def __len__(self):
'Denotes the total number of samples'
return len(self.point_cloud_dataset)
def __getitem__(self, index):
'Generates one sample of data'
data = self.point_cloud_dataset[index]
if len(data) == 2:
xyz, labels = data
elif len(data) == 3:
xyz, labels, sig = data
if len(sig.shape) == 2:
sig = np.squeeze(sig)
else:
raise Exception('Return invalid data tuple')
# random data augmentation by rotation
if self.rotate_aug:
rotate_rad = np.deg2rad(np.random.random() * 45) - np.pi / 8
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
xyz[:, :2] = np.dot(xyz[:, :2], j)
# random data augmentation by flip x , y or x+y
if self.flip_aug:
flip_type = np.random.choice(4, 1)
if flip_type == 1:
xyz[:, 0] = -xyz[:, 0]
elif flip_type == 2:
xyz[:, 1] = -xyz[:, 1]
elif flip_type == 3:
xyz[:, :2] = -xyz[:, :2]
if self.scale_aug:
noise_scale = np.random.uniform(0.95, 1.05)
xyz[:, 0] = noise_scale * xyz[:, 0]
xyz[:, 1] = noise_scale * xyz[:, 1]
xyz_pol = cart2polar(xyz)
max_bound_r = np.percentile(xyz_pol[:, 0], 100, axis=0)
min_bound_r = np.percentile(xyz_pol[:, 0], 0, axis=0)
max_bound = np.max(xyz_pol[:, 1:], axis=0)
min_bound = np.min(xyz_pol[:, 1:], axis=0)
max_bound = np.concatenate(([max_bound_r], max_bound))
min_bound = np.concatenate(([min_bound_r], min_bound))
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range / (cur_grid_size - 1)
if (intervals == 0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz_pol, min_bound, max_bound) - min_bound) / intervals)).astype(np.int)
voxel_position = np.zeros(self.grid_size, dtype=np.float32)
dim_array = np.ones(len(self.grid_size) + 1, int)
dim_array[0] = -1
voxel_position = np.indices(self.grid_size) * intervals.reshape(dim_array) + min_bound.reshape(dim_array)
voxel_position = polar2cat(voxel_position)
processed_label = np.ones(self.grid_size, dtype=np.uint8) * self.ignore_label
label_voxel_pair = np.concatenate([grid_ind, labels], axis=1)
label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]
processed_label = nb_process_label(np.copy(processed_label), label_voxel_pair)
data_tuple = (voxel_position, processed_label)
# center data on each voxel for PTnet
voxel_centers = (grid_ind.astype(np.float32) + 0.5) * intervals + min_bound
return_xyz = xyz_pol - voxel_centers
return_xyz = np.concatenate((return_xyz, xyz_pol, xyz[:, :2]), axis=1)
if len(data) == 2:
return_fea = return_xyz
elif len(data) == 3:
return_fea = np.concatenate((return_xyz, sig[..., np.newaxis]), axis=1)
if self.return_test:
data_tuple += (grid_ind, labels, return_fea, index)
else:
data_tuple += (grid_ind, labels, return_fea)
return data_tuple
@nb.jit('u1[:,:,:](u1[:,:,:],i8[:,:])', nopython=True, cache=True, parallel=False)
def nb_process_label(processed_label, sorted_label_voxel_pair):
label_size = 256
counter = np.zeros((label_size,), dtype=np.uint16)
counter[sorted_label_voxel_pair[0, 3]] = 1
cur_sear_ind = sorted_label_voxel_pair[0, :3]
for i in range(1, sorted_label_voxel_pair.shape[0]):
cur_ind = sorted_label_voxel_pair[i, :3]
if not np.all(np.equal(cur_ind, cur_sear_ind)):
processed_label[cur_sear_ind[0], cur_sear_ind[1], cur_sear_ind[2]] = np.argmax(counter)
counter = np.zeros((label_size,), dtype=np.uint16)
cur_sear_ind = cur_ind
counter[sorted_label_voxel_pair[i, 3]] += 1
processed_label[cur_sear_ind[0], cur_sear_ind[1], cur_sear_ind[2]] = np.argmax(counter)
return processed_label
def collate_fn_BEV(data):
data2stack = np.stack([d[0] for d in data]).astype(np.float32)
label2stack = np.stack([d[1] for d in data]).astype(np.int)
grid_ind_stack = [d[2] for d in data]
point_label = [d[3] for d in data]
xyz = [d[4] for d in data]
return torch.from_numpy(data2stack), torch.from_numpy(label2stack), grid_ind_stack, point_label, xyz
def collate_fn_BEV_test(data):
data2stack = np.stack([d[0] for d in data]).astype(np.float32)
label2stack = np.stack([d[1] for d in data]).astype(np.int)
grid_ind_stack = [d[2] for d in data]
point_label = [d[3] for d in data]
xyz = [d[4] for d in data]
index = [d[5] for d in data]
return torch.from_numpy(data2stack), torch.from_numpy(label2stack), grid_ind_stack, point_label, xyz, index
| StarcoderdataPython |
8003746 | <gh_stars>0
n1 = int(input('Primeiro Número: '))
n2 = int(input('Segundo Número: '))
if n1 > n2:
print('Primeiro número é maior')
elif n2 > n1:
print('Segundo número é maior')
else:
print('ambos são iguais')
| StarcoderdataPython |
1962727 | """libpredweb"""
| StarcoderdataPython |
317502 | #!/usr/bin/env python
import sys
if 'develop' in sys.argv:
# use setuptools for develop, but nothing else
from setuptools import setup
else:
from distutils.core import setup
with open('README.rst') as file:
long_description = file.read()
with open('CHANGES') as file:
long_description += file.read()
execfile('agpy/__version__.py')
setup(name='agpy',
version=__version__,
description='agpy, Adam Ginsburg\'s Python Code (in 0.1 for perpetuity - it won\'t bump up until I release something)',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
data_files=[('h2fit',['h2fit_support/atran.txt',
'h2fit_support/atran2000.fits',
'h2fit_support/atran_arcturus.txt',
'h2fit_support/atran_raw_arcturus.txt',
'h2fit_support/atran_solar.txt',
'h2fit_support/atran_tran.txt',
'h2fit_support/dalgarno1984_table5.txt',
'h2fit_support/h2pars.txt',
'h2fit_support/linelist.txt'])],
url='http://code.google.com/p/agpy/',
packages=['agpy','agpy/mpfit','AG_fft_tools','AG_image_tools','contributed','radex'],
)
| StarcoderdataPython |
3277968 | import numpy as np
from scipy.optimize import minimize
from scipy.io import loadmat
from numpy.linalg import det, inv
from math import sqrt, pi
import scipy.io
import matplotlib.pyplot as plt
import pickle
import sys
def ldaLearn(X,y):
# Inputs
# X - a N x d matrix with each row corresponding to a training example
# y - a N x 1 column vector indicating the labels for each training example
#
# Outputs
# means - A d x k matrix containing learnt means for each of the k classes
# covmat - A single d x d learnt covariance matrix
c=np.hstack((X,y))
UE = np.unique (y[:,0])
mean = np.zeros ((np.shape(X)[1], len(UE))) #initializing mean matrix
coVariance = np.zeros ((np.shape(X)[1], np.shape(X)[1])) #initialzing covariance matrix
## Calculation of mean
for i in range (0,len(UE)):
for j in range (0,np.shape(X)[1]):
elemIndices = np.where (c[:,2] == UE[i])
classElem = X[elemIndices,j]
mean[j,i]= np.mean(classElem[0,:])
##Caculation covariance
coVariance = np.cov(np.transpose(X))
return mean,coVariance
def qdaLearn(X,y):
# Inputs
# X - a N x d matrix with each row corresponding to a training example
# y - a N x 1 column vector indicating the labels for each training example
#
# Outputs
# means - A d x k matrix containing learnt means for each of the k classes
# covmats - A list of k d x d learnt covariance matrices for each of the k classes
c=np.hstack((X,y))
UE = np.unique (y[:,0])
mean = np.zeros ((np.shape(X)[1], len(UE)))
coVariance = [0]* len(UE)
temp = np.zeros ((np.shape(X)[1], np.shape(X)[1]))
## Calculation of mean
for i in range (0,len(UE)):
for j in range (0,np.shape(X)[1]):
elemIndices = np.where (c[:,2] == UE[i])
classElem = X[elemIndices,j]
mu = sum (classElem[0,:])
mu = mu/np.shape (classElem)[1]
mean[j,i] = mu
## Calulating covariance
nu = np.matlib.repmat(mu,len(X),1)
D = np.matmul(np.transpose(X-nu),(X - nu))
D = D/len(X)
coVariance[i]=D
return mean,coVariance
def ldaTest(means,covmat,Xtest,ytest):
classes, count = np.unique(ytest[:,0], return_counts = True)
theta = np.zeros (np.shape(means)[1])
label = np.zeros([len(Xtest),1])
eff = np.zeros([len(Xtest),np.shape(means)[1]])
acc = np.zeros(len(Xtest))
D = np.zeros(np.shape(means)[1])
a = np.array([1,2,3,4,5])
for i in range (0,np.shape(means)[1]):
X=Xtest
nu=np.matlib.repmat(means[:,i],len(Xtest),1)
sigma=inv(covmat)
D=np.matmul(np.matmul((X-nu),sigma),np.transpose(X-nu)) #pdf for all samples for a specific class
eff[:,i] = np.diagonal(D)
for i in range (0,len(Xtest)):
l=np.where(eff[i,:]==np.amin(eff[i,:]))
label[i]=a[l]
if label[i]==ytest[i]:
acc[i]=1
else:
acc[i]=0
# Calculating accuracy
unique, counts = np.unique(acc, return_counts=True)
accuracy=np.count_nonzero(acc==1)/len(acc)*100
return accuracy,label
def qdaTest(means,covmats,Xtest,ytest):
# Inputs
# means, covmats - parameters of the QDA model
# Xtest - a N x d matrix with each row corresponding to a test example
# ytest - a N x 1 column vector indicating the labels for each test example
# Outputs
# acc - A scalar accuracy value
# ypred - N x 1 column vector indicating the predicted labels
classes, count = np.unique(ytest[:,0], return_counts = True)
theta = np.zeros (np.shape(means)[1])
label = np.zeros([len(Xtest),1])
eff = np.zeros([len(Xtest),np.shape(means)[1]])
acc = np.zeros(len(Xtest))
D = np.zeros(np.shape(means)[1])
a = np.array([1,2,3,4,5])
for i in range (0,np.shape(means)[1]):
X=Xtest
nu=np.matlib.repmat(means[:,i],len(Xtest),1)
sigma=inv(covmats[i])
D=np.matmul(np.matmul((X-nu),sigma),np.transpose(X-nu)) #pdf for all samples for a specific class
eff[:,i] = np.diagonal(D)
for i in range (0,len(Xtest)):
l=np.where(eff[i,:]==np.amin(eff[i,:]))
label[i]=a[l]
if label[i]==ytest[i]:
acc[i]=1
else:
acc[i]=0
# Calculating accuracy
unique, counts = np.unique(acc, return_counts=True)
accuracy=np.count_nonzero(acc==1)/len(acc)*100
return accuracy,label
def learnOLERegression(X,y):
# Inputs:
# X = N x d
# y = N x 1
# Output:
# w = d x 1
w = np.matmul(inv(np.matmul(np.transpose(X),X)),np.matmul(np.transpose(X),y))
return w
def learnRidgeRegression(X,y,lambd):
# Inputs:
# X = N x d
# y = N x 1
# lambd = ridge parameter (scalar)
# Output:
# w = d x 1
I= np.eye((np.shape(X)[1]))
lambdI= I * lambd
Inv=inv(lambdI+np.matmul(np.transpose(X),X))
w = np.matmul(Inv,np.matmul(np.transpose(X),y))
return w
def testOLERegression(w,Xtest,ytest):
# Inputs:
# w = d x 1
# Xtest = N x d
# ytest = X x 1
# Output:
# mse
mse =0
for i in range (0,np.shape(ytest[:,0])[0]):
#w = np.squeeze((w))
#temp = Xtest[i]
value = ytest[i] - np.dot(np.squeeze(np.asarray(w)),Xtest[i])
value = np.dot(value,value)
if i == 0:
mse = value;
if i != 0:
mse = mse +value
mse = mse/len(ytest)
return mse
def regressionObjVal(w, X, y, lambd):
# compute squared error (scalar) and gradient of squared error with respect
# to w (vector) for the given data X and y and the regularization parameter
# lambda
if (np.shape(w)[0]==np.shape(Xtest)[1]):
w=w
else:
w=np.transpose(w)
A = np.subtract(y,np.reshape(np.dot(X,w),[len(y),1]))
B = lambd * np.dot(np.transpose(w),w)
Jw =np.dot(np.transpose(A),A) + B # Do not divide by 2 for smooth curve
error=Jw
error_grad= np.dot(-2 * np.transpose(X),A) + np.reshape(np.dot(2,np.dot(lambd, w)),[np.shape(X)[1],1])
error_grad=np.squeeze(np.asarray(error_grad))
return error, error_grad
def mapNonLinear(x,p):
# Inputs:
# x - a single column vector (N x 1)
# p - integer (>= 0)
# Outputs:
# Xp - (N x (p+1))
x=np.reshape(x,[len(x),1])
intercept=np.ones([len(x),1])
Xp= intercept
for i in range(1,p+1):
temp = np.reshape(np.power(x,i),[len(x),1])
Xp = np.concatenate((Xp,temp),axis=1)
return Xp
# Main script
# Problem 1
print("######## PROBLEM 1 #########")
# load the sample data
if sys.version_info.major == 2:
X,y,Xtest,ytest = pickle.load(open('sample.pickle','rb'))
else:
X,y,Xtest,ytest = pickle.load(open('sample.pickle','rb'),encoding = 'latin1')
# LDA
means,covmat = ldaLearn(X,y)
ldaacc,ldares = ldaTest(means,covmat,Xtest,ytest)
print('LDA Accuracy = '+str(ldaacc))
# QDA
means,covmats = qdaLearn(X,y)
qdaacc,qdares = qdaTest(means,covmats,Xtest,ytest)
print('QDA Accuracy = '+str(qdaacc))
# plotting boundaries
x1 = np.linspace(-5,20,100)
x2 = np.linspace(-5,20,100)
xx1,xx2 = np.meshgrid(x1,x2)
xx = np.zeros((x1.shape[0]*x2.shape[0],2))
xx[:,0] = xx1.ravel()
xx[:,1] = xx2.ravel()
fig = plt.figure(figsize=[12,6])
plt.subplot(1, 2, 1)
zacc,zldares = ldaTest(means,covmat,xx,np.zeros((xx.shape[0],1)))
plt.contourf(x1,x2,zldares.reshape((x1.shape[0],x2.shape[0])),alpha=0.3)
plt.scatter(Xtest[:,0],Xtest[:,1],c=ytest[:,0])
plt.title('LDA')
plt.subplot(1, 2, 2)
zacc,zqdares = qdaTest(means,covmats,xx,np.zeros((xx.shape[0],1)))
plt.contourf(x1,x2,zqdares.reshape((x1.shape[0],x2.shape[0])),alpha=0.3)
plt.scatter(Xtest[:,0],Xtest[:,1],c=ytest[:,0])
plt.title('QDA')
plt.show()
# Problem 2
print("######## PROBLEM 2 #########")
if sys.version_info.major == 2:
X,y,Xtest,ytest = pickle.load(open('diabetes.pickle','rb'))
else:
X,y,Xtest,ytest = pickle.load(open('diabetes.pickle','rb'),encoding = 'latin1')
# add intercept
X_i = np.concatenate((np.ones((X.shape[0],1)), X), axis=1)
Xtest_i = np.concatenate((np.ones((Xtest.shape[0],1)), Xtest), axis=1)
w = learnOLERegression(X,y)
mle = testOLERegression(w,Xtest,ytest)
w_i = learnOLERegression(X_i,y)
mle_i = testOLERegression(w_i,Xtest_i,ytest)
print('MSE without intercept '+str(mle))
print('MSE with intercept '+str(mle_i))
# Problem 3
print("######## PROBLEM 3 #########")
k = 101
lambdas = np.linspace(0, 1, num=k)
i = 0
mses3_train = np.zeros((k,1))
mses3 = np.zeros((k,1))
for lambd in lambdas:
w_l = learnRidgeRegression(X_i,y,lambd)
mses3_train[i] = testOLERegression(w_l,X_i,y)
mses3[i] = testOLERegression(w_l,Xtest_i,ytest)
if i==6:
weight_ideal=w_l
i = i + 1
fig = plt.figure(figsize=[12,6])
plt.subplot(1, 2, 1)
plt.plot(lambdas,mses3_train)
plt.title('MSE for Train Data')
plt.subplot(1, 2, 2)
plt.plot(lambdas,mses3)
plt.title('MSE for Test Data')
plt.show()
# Problem 4
print("######## PROBLEM 4 #########")
k = 101
lambdas = np.linspace(0, 1, num=k)
i = 0
mses4_train = np.zeros((k,1))
mses4 = np.zeros((k,1))
opts = {'maxiter' : 20} # Preferred value.
w_init = np.ones((X_i.shape[1],1))
for lambd in lambdas:
args = (X_i, y, lambd)
w_l = minimize(regressionObjVal, w_init, jac=True, args=args,method='CG', options=opts)
w_l = np.transpose(np.array(w_l.x))
w_l = np.reshape(w_l,[len(w_l),1])
mses4_train[i] = testOLERegression(w_l,X_i,y)
mses4[i] = testOLERegression(w_l,Xtest_i,ytest)
i = i + 1
fig = plt.figure(figsize=[12,6])
plt.subplot(1, 2, 1)
plt.plot(lambdas,mses4_train)
plt.plot(lambdas,mses3_train)
plt.title('MSE for Train Data')
plt.legend(['Using scipy.minimize','Direct minimization'])
plt.subplot(1, 2, 2)
plt.plot(lambdas,mses4)
plt.plot(lambdas,mses3)
plt.title('MSE for Test Data')
plt.legend(['Using scipy.minimize','Direct minimization'])
plt.show()
# Problem 5
print("######## PROBLEM 5 #########")
pmax = 7
lambda_opt = lambdas[np.argmin(mses3)] # lambda_opt estimated from Problem 3
mses5_train = np.zeros((pmax,2))
mses5 = np.zeros((pmax,2))
for p in range(pmax):
Xd = mapNonLinear(X[:,2],p)
Xdtest = mapNonLinear(Xtest[:,2],p)
w_d1 = learnRidgeRegression(Xd,y,0)
mses5_train[p,0] = testOLERegression(w_d1,Xd,y)
mses5[p,0] = testOLERegression(w_d1,Xdtest,ytest)
w_d2 = learnRidgeRegression(Xd,y,lambda_opt)
mses5_train[p,1] = testOLERegression(w_d2,Xd,y)
mses5[p,1] = testOLERegression(w_d2,Xdtest,ytest)
fig = plt.figure(figsize=[12,6])
plt.subplot(1, 2, 1)
plt.plot(range(pmax),mses5_train)
plt.title('MSE for Train Data')
plt.legend(('No Regularization','Regularization'))
plt.subplot(1, 2, 2)
plt.plot(range(pmax),mses5)
plt.title('MSE for Test Data')
plt.legend(('No Regularization','Regularization'))
plt.show()
| StarcoderdataPython |
1903500 | <filename>args_parser/hyunjungkim_01.py<gh_stars>0
import os
os.getcwd()
pass
| StarcoderdataPython |
9766781 | <gh_stars>1-10
import os
from .helpers import get_user
from shutil import copyfile
def enable_vhosts():
tmp = []
conf_path = "/usr/local/etc/httpd/httpd.conf"
with open(conf_path, 'r') as input_file:
for line in input_file.readlines():
if line == "#Include /usr/local/etc/httpd/extra/httpd-vhosts.conf\n":
line = line.replace("#", "")
tmp.append(line)
input_file.close()
with open(conf_path, 'w') as output_file:
for line in tmp:
output_file.write(line)
output_file.close()
def generate_hosts():
data_path = os.path.dirname(__file__) + "/../files/hosts"
hosts_path = "/private/etc/hosts"
copyfile(data_path, hosts_path)
def generate_vhosts():
tmp = []
data_path = os.path.dirname(__file__) + "/../files/vhosts"
vhosts_path = "/usr/local/etc/httpd/extra/httpd-vhosts.conf"
user = get_user()
with open(data_path, 'r') as vhost_template:
for line in vhost_template.readlines():
tmp.append(line.replace("{USERNAME}", user))
vhost_template.close()
print(tmp)
# with open(vhosts_path, 'w') as output_file:
# for line in tmp:
# output_file.write(line)
#
# output_file.close()
| StarcoderdataPython |
6677435 | <filename>weights/tools/keras_model_tools.py
# Python script of tools for operations on Keras models
import argparse
import h5py
class KModelTools:
def __init__(self, h5_path=None):
self.h5_path = h5_path
self.f_h5 = h5py.File(h5_path)
def print_h5_wegiths(self):
for layer, g in self.f_h5.items():
print("{}".format(layer))
for key, value in g.attrs.items():
print("{}: {}".format(key, value)+'\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tool set for Keras model')
parser.add_argument('--h5', type=str, required=True, help='Path to the Keras model')
args = parser.parse_args()
k_model_tools = KModelTools(args.h5)
k_model_tools.print_h5_wegiths()
| StarcoderdataPython |
1847242 | from os import terminal_size
from typing import Text
import cv2
import numpy as np
#? Drawing a circle by mouse click
def draw_circle(event, x, y, flags, param):
if event==cv2.EVENT_LBUTTONDOWN:
cv2.circle(image, (x,y), 30, (255, 0, 70), -1)
# create blank image and window
image = np.zeros(shape=(512,512,3), dtype=np.uint8)
cv2.namedWindow("Circle")
cv2.setMouseCallback("Circle", draw_circle)
while True:
cv2.imshow("Circle", image)
if cv2.waitKey(1)==ord('q'):
break
cv2.destroyAllWindows()
# ---------------------------------------------------------------------------- #
#? Drawing Rectangles / Squares
drawing = False
x1, y1 = -1, -1
def draw_shape(event, x, y, flags, param):
global x1, y1, drawing
if event==cv2.EVENT_LBUTTONDOWN:
drawing = True
x1, y1 = x, y
if event==cv2.EVENT_MOUSEMOVE:
if drawing:
cv2.rectangle(image, (x1, y1), (x, y), (255,255,255), -1)
if event==cv2.EVENT_LBUTTONUP:
drawing = False
# create blank image and window
image = np.zeros((512,512,3), np.uint8)
cv2.namedWindow('rectangle')
cv2.setMouseCallback('rectangle', draw_shape)
while True:
cv2.imshow("rectangle", image)
if cv2.waitKey(1)==ord('q'):
break
cv2.destroyAllWindows() | StarcoderdataPython |
338159 | <filename>error_reporting/google/cloud/error_reporting/util.py
# Copyright 2016 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for Stackdriver Error Reporting."""
from google.cloud.error_reporting.client import HTTPContext
def build_flask_context(request):
"""Builds an HTTP context object from a Flask (Werkzeug) request object.
This helper method extracts the relevant HTTP context from a Flask request
object into an object ready to be sent to Error Reporting.
.. code-block:: python
>>> @app.errorhandler(HTTPException)
... def handle_error(exc):
... client.report_exception(
... http_context=build_flask_context(request))
... # rest of error response code here
:type request: :class:`werkzeug.wrappers.request`
:param request: The Flask request object to convert.
:rtype: :class:`~google.cloud.error_reporting.client.HTTPContext`
:returns: An HTTPContext object ready to be sent to the Stackdriver Error
Reporting API.
"""
return HTTPContext(
url=request.url,
method=request.method,
user_agent=request.user_agent.string,
referrer=request.referrer,
remote_ip=request.remote_addr,
)
| StarcoderdataPython |
9783887 | from typing import TYPE_CHECKING
from sqlalchemy import Column, Integer, String, ARRAY, Float, REAL
from sqlalchemy.orm import relationship
from sqlalchemy.sql.schema import ForeignKey
from app.db.base_class import Base
if TYPE_CHECKING:
from .embedding_model import Embedding_Model # noqa: F401
class Embedding(Base):
id = Column(Integer, primary_key=True, index=True)
track_id = Column(Integer, ForeignKey("track.id"))
track = relationship("Track", back_populates="embeddings")
embedding_model_id = Column(Integer, ForeignKey("embedding_model.id"))
embedding_model = relationship("Embedding_Model", back_populates="embeddings")
values = Column(ARRAY(REAL), nullable=False)
| StarcoderdataPython |
9726143 | """
This is a WIP and a learning experience.
Flask Documentation: http://flask.pocoo.org/docs/0.12/patterns/fileuploads/
"""
print("BEGIN")
# TODO: Add security. Some password/token passed along with ALL requests. (mandate HTTPS!!!)
# NOTE: Server permissions should (SHALL) be set to mitigate issues/keep shenanigans low.
# TODO: View page. Show all uploads, require password/token.
import os
from flask import Flask, request, render_template, redirect, url_for, send_from_directory
from werkzeug.utils import secure_filename
UPLOAD_FOLDER = '/var/www/html/dingle/'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
app = Flask(__name__)
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
@app.route("/up", methods=["POST"])
def up():
if request.method == "POST": # Check not neccessarily needed. Later feature? DELETE?
if 'file' not in request.files:
return "Bad request. no file", 400 # 400 -> Bad Request.
sent_file = request.files["file"]
# Should be a non issue for my purpose --- but mind as well handle something weird.
if sent_file.filename == '':
return "Bad request.", 400 # 400 -> Bad Request.
if sent_file and allowed_file(sent_file.filename):
file_name = secure_filename(sent_file.filename)
sent_file.save(os.path.join(
app.config["UPLOAD_FOLDER"], file_name))
return redirect(url_for("upped", file_name=file_name))
@app.route("/up/upped/<file_name>")
def upped(file_name):
return send_from_directory(app.config['UPLOAD_FOLDER'], file_name)
# MISC / RANDOM stuff.
@app.route("/echoer", methods=["POST", "GET"])
def index():
return """<form action="/echo" method="GET"><input name="text"><input type="submit" value="Echo"></form>"""
@app.route("/echoer/echo", methods=["POST"])
def echo():
return "You said: " + request.form['text'] + "\n" + str(request.form) + "\n"
@app.route("/greet/")
@app.route("/greet/<user>")
def greet(user=None):
return render_template("home.html", method=request.method, user=user)
# vars from address specified with < >
@app.route("/user/<name>")
def user(name):
return "Howdy, {}".format(name)
# For int in URL var specify int data type
@app.route("/post/<int:post_id>")
def post(post_id):
return "Howdy, {}".format(post_id)
if __name__ == "__main__":
app.run()
# app.run(debug=True)
| StarcoderdataPython |
9668886 | # The questions is askin us to find the articulation points
# We'll use modified DFS to detect ariculation points
time = 0
def criticalRouters(numRouters, numLinks, links):
# WRITE YOUR CODE HERE
adjList = dict()
for i in range(1,numRouters + 1):
adjList[i] = list()
for link in links:
node1, node2 = link[0], link[1]
adjList[node1].append(node2)
adjList[node2].append(node1)
ap = DFSTraversal(adjList)
result = []
for index, val in enumerate(ap):
if val == True:
result.append(index)
return result
def DFSTraversal(adjList):
# maintain visited nodes, parent of visited nodes
numNodes = len(adjList) + 1
visited = [False] * numNodes
parent = [-1] * numNodes
# discovered stores the time of discovery
discovered = [float('inf')] * numNodes
# stores the ancestor with lowest doscovered time
ancestorWithLowest = [float('inf')] * numNodes
articulationPoint = [False] * numNodes
# start the traversal
for node in adjList.keys():
if visited[node] == False:
DFSTravesalRecursive(adjList, node, visited, articulationPoint, parent, ancestorWithLowest, discovered)
return articulationPoint
def DFSTravesalRecursive(adjList, node, visited, articulationPoint, parent, ancestorWithLowest, discovered):
children = 0
visited[node] = True
# init discover time and andestor low
global time
discovered[node] = time
ancestorWithLowest[node] = time
time += 1
for neighbor in adjList[node]:
if visited[neighbor] == False:
parent[neighbor] = node
children += 1
DFSTravesalRecursive(adjList, neighbor, visited, articulationPoint, parent, ancestorWithLowest, discovered)
ancestorWithLowest[node] = min(ancestorWithLowest[node], ancestorWithLowest[neighbor])
# if node is root and has two or more children, it is AP
if parent[node] == -1 and children > 1:
articulationPoint[node] = True
# if node is not root and low value of children is more than discoery of node then it is AP
if parent[node] != -1 and ancestorWithLowest[neighbor] > discovered[node]:
articulationPoint[node] = True
elif neighbor != parent[node]:
ancestorWithLowest[node] = min(ancestorWithLowest[node], discovered[neighbor])
print(criticalRouters(7,7,[[1,2],[1,3],[2,4],[3,4],[3,6],[6,7],[4,5]])) | StarcoderdataPython |
6427483 | """
Copyright (c) Facebook, Inc. and its affiliates.
"""
import os
import unittest
import logging
from droidlet.dialog.dialogue_manager import DialogueManager
from droidlet.memory.dialogue_stack import DialogueStack
from droidlet.dialog.droidlet_nsp_model_wrapper import DroidletNSPModelWrapper
from agents.loco_mc_agent import LocoMCAgent
from droidlet.interpreter.tests.all_test_commands import *
from agents.craftassist.tests.fake_agent import MockOpt
# FIXME agent this test needs to move to the interpreter folder after
# dialogue_manager is properly split between agent and intepreter
class AttributeDict(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
class FakeMemory:
pass
class FakeAgent(LocoMCAgent):
def __init__(self, opts):
super(FakeAgent, self).__init__(opts)
self.opts = opts
def init_memory(self):
m = FakeMemory()
stack = DialogueStack()
m.dialogue_stack = stack
self.memory = m
def init_physical_interfaces(self):
pass
def init_perception(self):
pass
def init_controller(self):
dialogue_object_classes = {}
self.dialogue_manager = DialogueManager(
memory=self.memory,
dialogue_object_classes=dialogue_object_classes,
semantic_parsing_model_wrapper=DroidletNSPModelWrapper,
opts=self.opts,
)
# NOTE: The following commands in locobot_commands can't be supported
# right away but we'll attempt them in the next round:
# "push the chair",
# "find the closest red thing",
# "copy this motion",
# "topple the pile of notebooks",
locobot_commands = list(GROUND_TRUTH_PARSES) + [
"push the chair",
"find the closest red thing",
"copy this motion",
"topple the pile of notebooks",
]
TTAD_MODEL_DIR = os.path.join(
os.path.dirname(__file__), "../../../../agents/craftassist/models/semantic_parser/"
)
TTAD_BERT_DATA_DIR = os.path.join(
os.path.dirname(__file__), "../../../../agents/craftassist/datasets/annotated_data/"
)
GROUND_TRUTH_DATA_DIR = os.path.join(
os.path.dirname(__file__), "../../../../agents/craftassist/datasets/ground_truth/"
)
class TestDialogueManager(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestDialogueManager, self).__init__(*args, **kwargs)
opts = MockOpt()
opts.nsp_data_dir = TTAD_BERT_DATA_DIR
opts.ground_truth_data_dir = GROUND_TRUTH_DATA_DIR
opts.nsp_models_dir = TTAD_MODEL_DIR
opts.no_ground_truth = False
self.agent = FakeAgent(opts)
def test_parses(self):
logging.info(
"Printing semantic parsing for {} locobot commands".format(len(locobot_commands))
)
for command in locobot_commands:
ground_truth_parse = GROUND_TRUTH_PARSES.get(command, None)
model_prediction = self.agent.dialogue_manager.semantic_parsing_model_wrapper.parsing_model.query_for_logical_form(
command
)
logging.info(
"\nCommand -> '{}' \nGround truth -> {} \nParse -> {}\n".format(
command, ground_truth_parse, model_prediction
)
)
def test_validate_bad_json(self):
# Don't print debug info on failure since it will be misleading
is_valid_json = self.agent.dialogue_manager.semantic_parsing_model_wrapper.validate_parse_tree(
parse_tree={}, debug=False
)
self.assertFalse(is_valid_json)
def test_validate_array_span_json(self):
action_dict = {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"action_type": "BUILD",
"schematic": {
"text_span": [0, [5, 5]],
"filters": {
"triples": [{"pred_text": "has_name", "obj_text": [0, [5, 5]]}]
},
},
}
],
}
is_valid_json = self.agent.dialogue_manager.semantic_parsing_model_wrapper.validate_parse_tree(
action_dict
)
self.assertTrue(is_valid_json)
def test_validate_string_span_json(self):
action_dict = {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"action_type": "DANCE",
"dance_type": {
"look_turn": {
"location": {
"reference_object": {
"filters": {
"triples": [{"pred_text": "has_name", "obj_text": "cube"}]
}
}
}
}
},
}
],
}
is_valid_json = self.agent.dialogue_manager.semantic_parsing_model_wrapper.validate_parse_tree(
action_dict
)
self.assertTrue(is_valid_json)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
11252652 | from setuptools import find_packages, setup
with open('README.md', 'r') as readme:
long_description = readme.read()
setup(
name='webshell',
version='1.0',
packages=find_packages(),
zip_safe=False,
install_requires=[
'flask',
],
author='<NAME>',
author_email='<EMAIL>',
description='Flask application which allows the user to execute '
'arbitrary shell commands on the remote web server via '
'browser.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/swiewiora/python-webshell',
license='MIT',
)
| StarcoderdataPython |
1819232 | # Generated by Django 3.0.4 on 2020-03-12 11:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('localusers', '0004_auto_20200310_1649'),
('posts', '0002_auto_20200312_1113'),
]
operations = [
migrations.AddField(
model_name='event',
name='server',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='disco_server', to='localusers.DiscoServer'),
),
]
| StarcoderdataPython |
3472852 | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.215
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from clients.ctm_api_client.configuration import Configuration
class OrderFolderResultItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"errored_count": "int",
"folder_name": "str",
"not_met_criteria_count": "int",
"order_folder_status": "str",
"ordered_count": "int",
"ordered_list": "list[OrderedItemItem]",
}
attribute_map = {
"errored_count": "errored_count",
"folder_name": "folder_name",
"not_met_criteria_count": "not_met_criteria_count",
"order_folder_status": "order_folder_status",
"ordered_count": "ordered_count",
"ordered_list": "ordered_list",
}
def __init__(
self,
errored_count=None,
folder_name=None,
not_met_criteria_count=None,
order_folder_status=None,
ordered_count=None,
ordered_list=None,
_configuration=None,
): # noqa: E501
"""OrderFolderResultItem - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._errored_count = None
self._folder_name = None
self._not_met_criteria_count = None
self._order_folder_status = None
self._ordered_count = None
self._ordered_list = None
self.discriminator = None
if errored_count is not None:
self.errored_count = errored_count
if folder_name is not None:
self.folder_name = folder_name
if not_met_criteria_count is not None:
self.not_met_criteria_count = not_met_criteria_count
if order_folder_status is not None:
self.order_folder_status = order_folder_status
if ordered_count is not None:
self.ordered_count = ordered_count
if ordered_list is not None:
self.ordered_list = ordered_list
@property
def errored_count(self):
"""Gets the errored_count of this OrderFolderResultItem. # noqa: E501
:return: The errored_count of this OrderFolderResultItem. # noqa: E501
:rtype: int
"""
return self._errored_count
@errored_count.setter
def errored_count(self, errored_count):
"""Sets the errored_count of this OrderFolderResultItem.
:param errored_count: The errored_count of this OrderFolderResultItem. # noqa: E501
:type: int
"""
self._errored_count = errored_count
@property
def folder_name(self):
"""Gets the folder_name of this OrderFolderResultItem. # noqa: E501
:return: The folder_name of this OrderFolderResultItem. # noqa: E501
:rtype: str
"""
return self._folder_name
@folder_name.setter
def folder_name(self, folder_name):
"""Sets the folder_name of this OrderFolderResultItem.
:param folder_name: The folder_name of this OrderFolderResultItem. # noqa: E501
:type: str
"""
self._folder_name = folder_name
@property
def not_met_criteria_count(self):
"""Gets the not_met_criteria_count of this OrderFolderResultItem. # noqa: E501
:return: The not_met_criteria_count of this OrderFolderResultItem. # noqa: E501
:rtype: int
"""
return self._not_met_criteria_count
@not_met_criteria_count.setter
def not_met_criteria_count(self, not_met_criteria_count):
"""Sets the not_met_criteria_count of this OrderFolderResultItem.
:param not_met_criteria_count: The not_met_criteria_count of this OrderFolderResultItem. # noqa: E501
:type: int
"""
self._not_met_criteria_count = not_met_criteria_count
@property
def order_folder_status(self):
"""Gets the order_folder_status of this OrderFolderResultItem. # noqa: E501
:return: The order_folder_status of this OrderFolderResultItem. # noqa: E501
:rtype: str
"""
return self._order_folder_status
@order_folder_status.setter
def order_folder_status(self, order_folder_status):
"""Sets the order_folder_status of this OrderFolderResultItem.
:param order_folder_status: The order_folder_status of this OrderFolderResultItem. # noqa: E501
:type: str
"""
allowed_values = [
"OrderFolderStatusSuccess",
"FolderNotFound",
"UnKnownError",
"UNRECOGNIZED",
] # noqa: E501
if (
self._configuration.client_side_validation
and order_folder_status not in allowed_values
):
raise ValueError(
"Invalid value for `order_folder_status` ({0}), must be one of {1}".format( # noqa: E501
order_folder_status, allowed_values
)
)
self._order_folder_status = order_folder_status
@property
def ordered_count(self):
"""Gets the ordered_count of this OrderFolderResultItem. # noqa: E501
:return: The ordered_count of this OrderFolderResultItem. # noqa: E501
:rtype: int
"""
return self._ordered_count
@ordered_count.setter
def ordered_count(self, ordered_count):
"""Sets the ordered_count of this OrderFolderResultItem.
:param ordered_count: The ordered_count of this OrderFolderResultItem. # noqa: E501
:type: int
"""
self._ordered_count = ordered_count
@property
def ordered_list(self):
"""Gets the ordered_list of this OrderFolderResultItem. # noqa: E501
:return: The ordered_list of this OrderFolderResultItem. # noqa: E501
:rtype: list[OrderedItemItem]
"""
return self._ordered_list
@ordered_list.setter
def ordered_list(self, ordered_list):
"""Sets the ordered_list of this OrderFolderResultItem.
:param ordered_list: The ordered_list of this OrderFolderResultItem. # noqa: E501
:type: list[OrderedItemItem]
"""
self._ordered_list = ordered_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(OrderFolderResultItem, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OrderFolderResultItem):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OrderFolderResultItem):
return True
return self.to_dict() != other.to_dict()
| StarcoderdataPython |
4810495 | from unittest import TestCase
from cloudshell.cli.service.node import Node, NodeOperations
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
class NodeImplementation(Node):
def step_down(self, *args, **kwargs):
pass
def step_up(self, *args, **kwargs):
pass
class TestNode(TestCase):
def setUp(self):
self._node = NodeImplementation()
def test_attribute_parent_node_exist(self):
self.assertTrue(hasattr(self._node, "parent_node"))
def test_attribute_child_nodes_exist(self):
self.assertTrue(hasattr(self._node, "child_nodes"))
def test_add_child_node_append_child(self):
child_node = NodeImplementation()
self._node.add_child_node(child_node)
self.assertTrue(child_node in self._node.child_nodes)
def test_add_child_node_set_parent(self):
child_node = NodeImplementation
self._node.add_child_node(child_node)
self.assertTrue(child_node.parent_node == self._node)
class TestNodeOperations(TestCase):
def setUp(self):
pass
def test_path_to_the_root_single_node(self):
node = NodeImplementation()
self.assertTrue(len(NodeOperations.path_to_the_root(node)) == 1)
def test_path_to_the_root_multiple_node(self):
node1 = NodeImplementation()
node2 = NodeImplementation()
node3 = NodeImplementation()
node1.add_child_node(node2)
node2.add_child_node(node3)
self.assertTrue(len(NodeOperations.path_to_the_root(node3)) == 3)
@patch("cloudshell.cli.service.node.NodeOperations.path_to_the_root")
def test_calculate_route_steps_source_node_root_path_call(self, path_to_the_root):
source_node = Mock()
dest_node = Mock()
path_to_the_root.side_effect = [
[source_node, dest_node],
[dest_node, source_node],
]
NodeOperations.calculate_route_steps(source_node, dest_node)
path_to_the_root.assert_any_call(source_node)
path_to_the_root.assert_any_call(dest_node)
self.assertEqual(2, path_to_the_root.call_count)
@patch("cloudshell.cli.service.node.NodeOperations.path_to_the_root")
def test_calculate_route_steps_dest_node_root_path_call(self, path_to_the_root):
source_node = Mock()
dest_node = Mock()
path_to_the_root.side_effect = [
[source_node, dest_node],
[dest_node, source_node],
]
NodeOperations.calculate_route_steps(source_node, dest_node)
path_to_the_root.assert_any_call(source_node)
path_to_the_root.assert_any_call(dest_node)
self.assertEqual(2, path_to_the_root.call_count)
| StarcoderdataPython |
8051459 | # -*- coding: utf-8 -*-
#Версия Python 2.7
from cart_page import CartPage
from product_page import ProductPage
from selenium import webdriver
from start_page import StartPage
#Класс добавления продукта
class Aplication:
def __init__(self):
self.driver = webdriver.Chrome()
self.start_pg = StartPage(self.driver)
self.product_pg = ProductPage(self.driver)
self.cart_pg = CartPage(self.driver)
def quit(self):
self.driver.quit()
def st_open(self):
self.start_pg.op_start()
def add_duck(self,dk_s):
self.start_pg.find_duck(dk_s)
self.product_pg.add_duck_pg(dk_s)
self.product_pg.go_start()
def d_all_duck(self):
self.start_pg.go_cart()
self.cart_pg.dl_dick()
| StarcoderdataPython |
9713962 | <reponame>BobbyZhouZijian/AI-Algo-Implmentations<filename>deep_learning/criterions/focal_loss.py
"""Implements Focal Loss"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
def __init__(self):
super(FocalLoss, self).__init__()
def forward(self, x, targets, alpha=0.8, gamma=2):
x = F.sigmoid(x)
alpha = torch.tensor([alpha, 1 - alpha])
# flatten
x = x.view(-1)
targets = targets.view(-1)
BCE = F.binary_cross_entropy(x, targets, reduction='mean')
BCE_exp = torch.exp(-BCE)
focal_loss = alpha * (1 - BCE_exp) ** gamma * BCE
return focal_loss
| StarcoderdataPython |
234923 | <reponame>vbillys/reading-group-serialization<filename>examples/python/ExampleIdlData/_ExampleIdlData.py
"""
Generated by Eclipse Cyclone DDS idlc Python Backend
Cyclone DDS IDL version: v0.9.0
Module: ExampleIdlData
IDL file: ExampleIdlData.idl
"""
from enum import auto
from typing import TYPE_CHECKING, Optional
from dataclasses import dataclass
import cyclonedds.idl as idl
import cyclonedds.idl.annotations as annotate
import cyclonedds.idl.types as types
# root module import for resolving types
import ExampleIdlData
BytesArray = types.typedef['ExampleIdlData.BytesArray', types.sequence[types.uint8]]
@dataclass
@annotate.final
@annotate.autoid("sequential")
class Msg(idl.IdlStruct, typename="ExampleIdlData.Msg"):
id: types.int32
annotate.key("id")
message: str
payloadEigen: 'ExampleIdlData.BytesArray'
payloadOpenCVImage: 'ExampleIdlData.BytesArray'
payloadPCLPointCloud: 'ExampleIdlData.BytesArray'
| StarcoderdataPython |
4835498 | # Generated by Django 2.0.3 on 2018-05-16 13:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0010_auto_20180509_2126'),
]
operations = [
migrations.CreateModel(
name='Mail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(db_index=True, max_length=254, unique=True)),
],
),
]
| StarcoderdataPython |
9619281 | #
# Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending
#
""" This module supports writing Deephaven application mode Python scripts. """
from typing import Dict
import jpy
from deephaven import DHError
from deephaven._wrapper import JObjectWrapper, wrap_j_object, unwrap
_JApplicationContext = jpy.get_type("io.deephaven.appmode.ApplicationContext")
_JApplicationState = jpy.get_type("io.deephaven.appmode.ApplicationState")
class ApplicationState(JObjectWrapper):
""" The ApplicationState represents the state of an application. """
j_object_type = _JApplicationState
@property
def j_object(self) -> jpy.JType:
return self.j_app_state
def __init__(self, j_app_state):
self.j_app_state = j_app_state
def __repr__(self):
return f"id: {self.j_app_state.id()}, name: {self.j_app_state.name()}"
def __str__(self):
return repr(self)
def __getitem__(self, item):
item = str(item)
j_field = self.j_app_state.getField(item)
if not j_field:
raise KeyError(item)
return wrap_j_object(j_field.value())
def __setitem__(self, key, value):
key = str(key)
self.j_app_state.setField(key, unwrap(value))
def __delitem__(self, key):
key = str(key)
value = self.j_app_state.removeField(key)
if not value:
raise KeyError(key)
@property
def fields(self) -> Dict[str, object]:
fields = {}
j_fields = self.j_app_state.listFields()
for i in range(j_fields.size()):
j_field = j_fields.get(i)
fields[j_field.name()] = wrap_j_object(j_field.value())
return fields
def get_app_state():
""" Get the current application state object.
Raises:
DHError
"""
try:
return ApplicationState(j_app_state=_JApplicationContext.get())
except Exception as e:
raise DHError(e, "failed to get the application state.") from e
| StarcoderdataPython |
11312463 | <reponame>QMSS-G5072-2021/SelinaDing
from SelinaDing import SelinaDing
def test_cipher():
result = cipher(text = 'ding', shift = 1, encrypt = True)
assert(result) == 'ejoh'
| StarcoderdataPython |
1993784 | <gh_stars>0
from pathlib import Path
import tensorflow as tf
import horovod.tensorflow.keras as hvd
hvd.init()
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
tf.compat.v1.keras.backend.set_session(tf.compat.v1.Session(config=config))
def main() -> None:
### to expose
LR = 0.001
epochs = 5
checkpoint_prefix = Path(".")
###
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train / 255.0
model = get_model(learning_rate=LR)
def on_state_reset():
tf.keras.backend.set_value(model.optimizer.lr, LR * hvd.size())
state = hvd.elastic.KerasState(model, batch=100, epoch=0)
state.register_reset_callbacks([on_state_reset])
callbacks = [
hvd.elastic.CommitStateCallback(state),
hvd.elastic.UpdateBatchStateCallback(state),
hvd.elastic.UpdateEpochStateCallback(state),
]
if hvd.rank() == 0:
callbacks.append(
tf.keras.callbacks.ModelCheckpoint(
str(checkpoint_prefix) + "/checkpoint-{epoch}.h5"
)
)
@hvd.elastic.run
def train(state):
model.fit(
x_train,
y_train,
steps_per_epoch=500 // hvd.size(),
callbacks=callbacks,
epochs=epochs - state.epoch,
verbose=1 if hvd.rank() == 0 else 0,
)
train(state)
def get_model(learning_rate: float) -> tf.keras.Model:
model = tf.keras.models.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10),
]
)
optimizer = tf.keras.optimizers.Adam(learning_rate * hvd.size())
optimizer = hvd.DistributedOptimizer(optimizer)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer=optimizer, loss=loss_fn, metrics=["accuracy"])
return model
if __name__ == "__main__":
main()
| StarcoderdataPython |
11317403 | <reponame>sprij/scrapy-rethinkdb<filename>tests/test_pipeline.py<gh_stars>1-10
import unittest
from mock import Mock, MagicMock, patch
from itertools import combinations_with_replacement
from scrapy_rethinkdb.pipeline import RethinkDBPipeline, NotConfigured, Item
class RethinkDBPipelineTest(unittest.TestCase):
def setUp(self):
# default pipeline under test
self.driver = Mock()
self.table_name = Mock()
self.insert_options = MagicMock()
self.pipeline = RethinkDBPipeline(
self.driver, self.table_name, self.insert_options
)
# patch for driver
self.driver_patcher = patch('scrapy_rethinkdb.pipeline.'
'RethinkDBDriver')
# patcher for the pipeline constructor
self.pipeline_cls_patcher = patch('scrapy_rethinkdb.pipeline.'
'RethinkDBPipeline.__init__')
# returns iterator for all possible combinations for 3 arguments
# which values can be either a Mock or None
self.init_mocks_iter = lambda: \
combinations_with_replacement((None, Mock()), 3)
# returns settings dictionary
self.get_pipeline_settings = \
lambda conn_sett, table_name, insert_options: \
{'RETHINKDB_TABLE': table_name,
'RETHINKDB_CONNECTION': conn_sett,
'RETHINKDB_INSERT_OPTIONS': insert_options}
def test_init_not_configured(self):
# asserts constructor will raise NotConfigured id any of the arguments
# is None
comb_iter = self.init_mocks_iter()
for driver, table_name, insert_options in comb_iter:
if not driver or not table_name or not insert_options:
self.assertRaises(NotConfigured, RethinkDBPipeline,
driver, table_name, insert_options)
def test_init_empty_table_name(self):
# asserts constructor will raise NotConfigured if table_name is empty
self.assertRaises(NotConfigured, RethinkDBPipeline, Mock(), '', Mock())
def test_init_configured(self):
# asserts that the default pipeline under test tried to get the table
self.assertEqual(self.pipeline.table,
self.driver.get_table.return_value)
self.driver.get_table.assert_called_once_with(self.table_name)
def test_from_crawler_configured(self):
# asserts that from_crawler will return a pipeline instance if the
# constructor returns None, as expected
crawler = Mock()
comb_iter = self.init_mocks_iter()
for conn_sett, table_name, insert_options in comb_iter:
crawler.settings = self.get_pipeline_settings(
conn_sett, table_name, insert_options
)
with self.pipeline_cls_patcher as pipeline_cls, \
self.driver_patcher as driver_klass:
pipeline_cls.return_value = None
pipeline = RethinkDBPipeline.from_crawler(crawler)
self.assertIsInstance(pipeline, RethinkDBPipeline)
driver_klass.assert_called_once_with(conn_sett)
pipeline_cls.assert_called_once_with(
driver_klass.return_value, table_name, insert_options
)
def test_from_crawler_not_configured(self):
# asserts that from_crawler will raise NotConfigured the
# constructor raises NotConfigured exception
crawler = Mock()
for conn_sett, table_name, insert_options in self.init_mocks_iter():
crawler.settings = self.get_pipeline_settings(
conn_sett, table_name, insert_options
)
with self.pipeline_cls_patcher as pipeline_cls, \
self.driver_patcher as driver_klass:
pipeline_cls.side_effect = NotConfigured
self.assertRaises(NotConfigured,
RethinkDBPipeline.from_crawler,
crawler)
driver_klass.assert_called_once_with(conn_sett)
pipeline_cls.assert_called_once_with(
driver_klass.return_value, table_name, insert_options
)
def test_process_item_not_an_item(self):
# asserts that a non-item is just returned
spider = Mock()
# mocking before_insert to check that it won't be called in this test
self.pipeline.before_insert = Mock()
self.pipeline.process_item(Mock(), spider)
self.assertTrue(spider.log.msg.called)
self.assertFalse(self.pipeline.before_insert.called)
def test_process_item_success(self):
# asserts that a item is processed
item = Mock(spec=Item)
item._values = {}
# mocking extension points
self.pipeline.before_insert = Mock()
self.pipeline.after_insert = Mock()
self.pipeline.process_item(item, Mock())
self.pipeline.before_insert.assert_called_once_with(
item
)
self.pipeline.table.insert.assert_called_once_with(
item._values
)
self.pipeline.driver.execute.assert_called_once_with(
self.pipeline.table.insert.return_value
)
self.pipeline.after_insert.assert_called_once_with(
item, self.pipeline.driver.execute.return_value
)
| StarcoderdataPython |
11311796 | # imports
import altair as alt
import requests
import streamlit as st
import urllib
import pandas as pd
# import the page-generating functions from your pages module.
from pages.predictions import predict_page
from pages.why_streamlit import why_streamlit_page
from pages.index import index_page
from pages.exploration import exploration_page
# create lists of the display names of pages, and the
# functions that generate those pages.
pagenames = ['index','exploration','prediction','why streamlit?']
pagefuncs = [index_page, exploration_page, predict_page, why_streamlit_page]
# zip those lists together into a dictionary.
pagedict = dict(zip(pagenames,pagefuncs))
def main():
"""The main function executes your app."""
# this'll put a title in the sidebar.
st.sidebar.title("streamlit app demo")
# here we'll use our page lists and the dictionary to display
# a dropdown menu of pages in the sidebar, and allow the user to pick one.
page_to_load = st.sidebar.selectbox("choose a page.", pagenames)
# get the corresponding function out of the dictionary. Call it.
pagedict[page_to_load]()
# this'll put text in the sidebar.
bio = st.sidebar.markdown("""this sidebar is also a good place for you to summarize your project
and maybe write some things about yourself.""")
if __name__ == "__main__":
# run the app
main() | StarcoderdataPython |
4839515 | from WPTParser.JSONParser.DataExtracter import DataExtracter
class ObjectListDataExtracter(DataExtracter):
def __init__(self):
super().__init__()
def extract(self, obj_list: list, key: str):
try:
key = key.replace(' ', '')
dict_key, dict_value = key.split('=')
for obj in obj_list:
if obj.get(dict_key, None) is not None and obj.get(dict_key) == dict_value:
return obj
return None
except Exception as ex:
return None | StarcoderdataPython |
304955 | from .info import QUERY as info
from .bypass import QUERY as bypass
from .decode_invoice import QUERY as decode_invoice
from .check_macaroon import QUERY as check_macaroon
QUERY = [
info,
bypass,
decode_invoice,
check_macaroon
] | StarcoderdataPython |
9649503 | import re
from unittest.mock import Mock, call, patch
import pytest
from civic_scraper.base.asset import Asset, AssetCollection
from .conftest import file_lines
@pytest.fixture
def asset_collection(asset_inputs):
return AssetCollection([Asset(**kwargs) for kwargs in asset_inputs])
def test_asset_methods():
# extend
extended = AssetCollection([1, 2])
extended.extend([3, 4])
assert extended == AssetCollection([1, 2, 3, 4])
# append
appended = AssetCollection([1, 2])
appended.append([3, 4])
assert appended == AssetCollection([1, 2, [3, 4]])
# indexing
indexed = AssetCollection([1, 2])
assert indexed[1] == 2
def test_csv_export(tmpdir, asset_collection):
"csv_export should write standard filename to a target_dir"
outfile = asset_collection.to_csv(target_dir=tmpdir)
pattern = re.compile(r".+civic_scraper_assets_meta_\d{8}T\d{4}z.csv")
assert re.match(pattern, outfile)
contents = file_lines(outfile)
assert len(contents) == 3
# Check header and contents
assert contents[0].startswith("place")
assert contents[0].strip().endswith("content_length")
assert "minutes" in contents[1]
assert "2020-05-04" in contents[1]
assert "agenda" in contents[2]
def test_asset_download(tmpdir, asset_inputs):
response = Mock(name="MockResponse")
response.content = b"some data"
to_patch = "civic_scraper.base.asset.requests.get"
with patch(to_patch) as mock_method:
mock_method.return_value = response
asset_objs = [Asset(**kwargs) for kwargs in asset_inputs]
for asset in asset_objs:
asset.download(target_dir=tmpdir)
assert mock_method.mock_calls == [
call(
"http://nc-nashcounty.civicplus.com/AgendaCenter/ViewFile/Minutes/_05042020-381",
allow_redirects=True,
),
call(
"http://nc-nashcounty.civicplus.com/AgendaCenter/ViewFile/Agenda/_05042020-381",
allow_redirects=True,
),
]
# check files written
actual_file_names = set([f.basename for f in tmpdir.listdir()])
expected_file_names = set(
[
"civicplus_nc-nashcounty_05042020-381_agenda.pdf",
"civicplus_nc-nashcounty_05042020-381_minutes.pdf",
]
)
assert actual_file_names == expected_file_names
| StarcoderdataPython |
1747624 | from __future__ import annotations
import asyncio
import logging
from collections.abc import Callable, Coroutine
from typing import TYPE_CHECKING, Any, ClassVar, TypeVar
from ... import utils
from ...abc import BaseUser
from ...enums import IntEnum
from ...models import register
from ...protobufs import EMsg, GCMsg, GCMsgProto, MsgProto
from ...state import ConnectionState
from ...trade import BaseInventory, Inventory
if TYPE_CHECKING:
from steam.protobufs.client_server_2 import CMsgGcClient
from ...game import Game
from .client import Client
log = logging.getLogger(__name__)
Inv = TypeVar("Inv", bound=BaseInventory[Any])
class GCState(ConnectionState):
Language: ClassVar[IntEnum]
gc_parsers: dict[IntEnum, Callable[..., Any]]
client: Client
def __init__(self, client: Client, **kwargs: Any):
super().__init__(client, **kwargs)
self._gc_connected = asyncio.Event()
self._gc_ready = asyncio.Event()
self.backpack: Inventory = None # type: ignore
self._unpatched_inventory: Callable[[BaseUser, Game], Coroutine[Any, Any, Inventory]]
@register(EMsg.ClientFromGC)
async def parse_gc_message(self, msg: MsgProto[CMsgGcClient]) -> None:
if msg.body.appid != self.client._GAME.id:
return
try:
language = self.__class__.Language(utils.clear_proto_bit(msg.body.msgtype))
except ValueError:
return log.info(
f"Ignoring unknown msg type: {msg.body.msgtype} ({utils.clear_proto_bit(msg.body.msgtype)})"
)
try:
gc_msg = (
GCMsgProto(language, msg.body.payload)
if utils.is_proto(msg.body.msgtype)
else GCMsg(language, msg.body.payload)
)
except Exception as exc:
return log.error("Failed to deserialize message: %r, %r", language, msg.body.payload, exc_info=exc)
else:
log.debug("Socket has received GC message %r from the websocket.", gc_msg)
self.dispatch("gc_message_receive", gc_msg)
self.run_parser(language, gc_msg)
async def fetch_backpack(self, backpack_cls: type[Inv]) -> Inv:
resp = await self.http.get_user_inventory(
self.client.user.id64, self.client._GAME.id, self.client._GAME.context_id
)
return backpack_cls(state=self, data=resp, owner=self.client.user, game=self.client._GAME)
| StarcoderdataPython |
3240752 | import logging
from typing import List
from PIL import Image
import numpy as np
try:
import face_recognition
_FACE_RECOGNITION_LOADED = True
except ImportError:
_FACE_RECOGNITION_LOADED = False
class FaceEmbedder:
def __init__(self, model: str = "large", num_jitters: int = 5):
logging.info("Loading FaceEmbedder")
self.model = model
self.num_jitters = num_jitters
def embed_image(self, image: Image.Image) -> List:
if _FACE_RECOGNITION_LOADED:
encodings = face_recognition.face_encodings(np.array(image), model=self.model, num_jitters=self.num_jitters)
return encodings
return []
| StarcoderdataPython |
3434215 | import json
import responses
import re
import os
from rest_framework.test import APITestCase, APIClient
from rest_framework.reverse import reverse
from rest_framework import status
from rest_framework.authtoken.models import Token
from django.contrib.auth import get_user_model
from django.utils import timezone
from datetime import datetime
from questions.models import Question
from meetups.models import Meetup
from answers.models import Answer
from users.models import User
# Create your tests here.
class BaseTest(APITestCase):
"""
The base where are default test case settings kept
"""
def setUp(self):
"""
Basic setup
"""
self.client = APIClient()
self.user1 = User.objects._create_user(
name="<NAME>",
email="<EMAIL>",
password="<PASSWORD>"
)
self.user1.is_active = True
self.user1.is_admin = True
self.user1.save()
self.user2 = User.objects.create_user(
name="<NAME>",
email="<EMAIL>",
password="<PASSWORD>"
)
self.user2.is_active = True
self.user2.is_admin = False
self.user2.save()
self.meetup = Meetup.objects.create(
title='Test Driven Development',
body='Developers need to discusss the correct approach of doing test driven development',
location='Andela Campus',
creator=self.user2,
scheduled_date=timezone.now() + timezone.timedelta(days=3)
)
self.meetup.save()
self.question1 = Question.objects.create(
title="Why are we testing models",
body="We test models cause we also want to know if the are working",
meetup=self.meetup,
created_by=self.user1
)
self.question1.save()
self.question2 = Question.objects.create(
title="Why are we testing models",
body="We test models cause we also want to know if the are working",
meetup=self.meetup,
created_by=self.user2
)
self.question2.save()
self.answer = Answer.objects.create(
body="test answer",
creator=self.user2,
question=self.question1
)
self.answer.save()
self.answer2 = Answer.objects.create(
body="test answer body 2",
creator=self.user1,
question=self.question1
)
self.answer2.save()
self.update_data = {
'body':'This is the new test update data'
}
self.duplicate_data = {
'body': 'test answer body 2'
}
self.invalid_data = {
'body': '!@#$%^&*(!@#$%'
}
self.meetupId = str(self.meetup.id)
self.questionId = str(self.question2.id)
self.answerId = str(self.answer.id)
self.invalid_id = str(1)
def is_logged_in(self, user):
"""
Authenticate a user and get the token
"""
token, created = Token.objects.get_or_create(user=user)
return self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key)
def update_answer(self, data, meetup, question, answer):
"""
Updates an answer
"""
url = reverse('update_answer', args=[meetup, question, answer])
response = self.client.put(
url,
data=json.dumps(data),
content_type="application/json"
)
return response
class UpdateAnswer(BaseTest):
"""
Tests answer update functionality
"""
def test_update_answer_successfully(self):
self.is_logged_in(self.user2)
response = self.update_answer(self.update_data, self.meetupId, self.questionId, self.answerId)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['message'], 'You have successfully updated the answer')
def test_update_existing_answer(self):
self.is_logged_in(self.user2)
response = self.update_answer(self.duplicate_data, self.meetupId, self.questionId, self.answerId)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
self.assertEqual(response.data['Error'], 'That answer already exists')
def test_update_answer_not_owned_by_user(self):
self.is_logged_in(self.user1)
response = self.update_answer(self.update_data, self.meetupId, self.questionId, self.answerId)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['error'], 'You cannot edit this answer. You did not post it')
def test_update_invalid_answer(self):
self.is_logged_in(self.user2)
response = self.update_answer(self.invalid_data, self.meetupId, self.questionId, self.answerId)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['error'], 'Please enter a valid answer')
def test_update_no_meetup(self):
self.is_logged_in(self.user2)
response = self.update_answer(self.update_data, self.invalid_id, self.questionId, self.answerId)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data['error'], 'The specified meetup does not exist')
def test_update_no_question(self):
self.is_logged_in(self.user2)
response = self.update_answer(self.update_data, self.meetupId, self.invalid_id, self.answerId)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data['error'], 'The specified question does not exist')
def test_update_no_answer(self):
self.is_logged_in(self.user2)
response = self.update_answer(self.update_data, self.meetupId, self.questionId, self.invalid_id)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data['error'], 'The specified answer does not exist') | StarcoderdataPython |
6515709 | from itertools import product
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
import pytest
from linearmodels.iv import IV2SLS
from linearmodels.panel.model import FirstDifferenceOLS
from linearmodels.tests.panel._utility import (
access_attributes,
assert_frame_similar,
assert_results_equal,
datatypes,
generate_data,
)
pytestmark = pytest.mark.filterwarnings(
"ignore::linearmodels.shared.exceptions.MissingValueWarning"
)
missing = [0.0, 0.20]
perms = list(product(missing, datatypes))
ids = ["-".join(str(param) for param in perms) for perm in perms]
@pytest.fixture(params=perms, ids=ids)
def data(request):
missing, datatype = request.param
return generate_data(missing, datatype, other_effects=1)
def test_firstdifference_ols(data):
mod = FirstDifferenceOLS(data.y, data.x)
res = mod.fit(debiased=False)
y = mod.dependent.values3d
x = mod.exog.values3d
dy = np.array(y[0, 1:] - y[0, :-1])
dy = pd.DataFrame(
dy,
index=mod.dependent.panel.major_axis[1:],
columns=mod.dependent.panel.minor_axis,
)
dy = dy.T.stack()
dy = dy.reindex(mod.dependent.index)
dx = x[:, 1:] - x[:, :-1]
_dx = {}
for i, dxi in enumerate(dx):
temp = pd.DataFrame(
dxi,
index=mod.dependent.panel.major_axis[1:],
columns=mod.dependent.panel.minor_axis,
)
temp = temp.T.stack()
temp = temp.reindex(mod.dependent.index)
_dx[mod.exog.vars[i]] = temp
dx = pd.DataFrame(index=_dx[mod.exog.vars[i]].index)
for key in _dx:
dx[key] = _dx[key]
dx = dx[mod.exog.vars]
drop = dy.isnull() | np.any(dx.isnull(), 1)
dy = dy.loc[~drop]
dx = dx.loc[~drop]
ols_mod = IV2SLS(dy, dx, None, None)
ols_res = ols_mod.fit(cov_type="unadjusted")
assert_results_equal(res, ols_res)
res = mod.fit(cov_type="robust", debiased=False)
ols_res = ols_mod.fit(cov_type="robust")
assert_results_equal(res, ols_res)
clusters = data.vc1
ols_clusters = mod.reformat_clusters(data.vc1)
fd = mod.dependent.first_difference()
ols_clusters = ols_clusters.dataframe.loc[fd.index]
res = mod.fit(cov_type="clustered", clusters=clusters, debiased=False)
ols_res = ols_mod.fit(cov_type="clustered", clusters=ols_clusters)
assert_results_equal(res, ols_res)
res = mod.fit(cov_type="clustered", cluster_entity=True, debiased=False)
entity_clusters = mod.dependent.first_difference().entity_ids
ols_res = ols_mod.fit(cov_type="clustered", clusters=entity_clusters)
assert_results_equal(res, ols_res)
ols_clusters["entity.clusters"] = entity_clusters
ols_clusters = ols_clusters.astype(np.int32)
res = mod.fit(
cov_type="clustered", cluster_entity=True, clusters=data.vc1, debiased=False
)
ols_res = ols_mod.fit(cov_type="clustered", clusters=ols_clusters)
assert_results_equal(res, ols_res)
def test_firstdifference_ols_weighted(data):
mod = FirstDifferenceOLS(data.y, data.x, weights=data.w)
res = mod.fit(debiased=False)
y = mod.dependent.values3d
x = mod.exog.values3d
dy = np.array(y[0, 1:] - y[0, :-1])
dy = pd.DataFrame(
dy,
index=mod.dependent.panel.major_axis[1:],
columns=mod.dependent.panel.minor_axis,
)
dy = dy.T.stack()
dy = dy.reindex(mod.dependent.index)
dx = x[:, 1:] - x[:, :-1]
_dx = {}
for i, dxi in enumerate(dx):
temp = pd.DataFrame(
dxi,
index=mod.dependent.panel.major_axis[1:],
columns=mod.dependent.panel.minor_axis,
)
temp = temp.T.stack()
temp = temp.reindex(mod.dependent.index)
_dx[mod.exog.vars[i]] = temp
dx = pd.DataFrame(index=_dx[mod.exog.vars[i]].index)
for key in _dx:
dx[key] = _dx[key]
dx = dx[mod.exog.vars]
w = mod.weights.values3d
w = 1.0 / w
sw = w[0, 1:] + w[0, :-1]
sw = pd.DataFrame(
sw,
index=mod.dependent.panel.major_axis[1:],
columns=mod.dependent.panel.minor_axis,
)
sw = sw.T.stack()
sw = sw.reindex(mod.dependent.index)
sw = 1.0 / sw
sw = sw / sw.mean()
drop = dy.isnull() | np.any(dx.isnull(), 1) | sw.isnull()
dy = dy.loc[~drop]
dx = dx.loc[~drop]
sw = sw.loc[~drop]
ols_mod = IV2SLS(dy, dx, None, None, weights=sw)
ols_res = ols_mod.fit(cov_type="unadjusted")
assert_results_equal(res, ols_res)
res = mod.fit(cov_type="robust", debiased=False)
ols_res = ols_mod.fit(cov_type="robust")
assert_results_equal(res, ols_res)
clusters = data.vc1
ols_clusters = mod.reformat_clusters(data.vc1)
fd = mod.dependent.first_difference()
ols_clusters = ols_clusters.dataframe.loc[fd.index]
res = mod.fit(cov_type="clustered", clusters=clusters, debiased=False)
ols_res = ols_mod.fit(cov_type="clustered", clusters=ols_clusters)
assert_results_equal(res, ols_res)
def test_first_difference_errors(data):
if isinstance(data.x, pd.DataFrame):
time = data.y.index.levels[1][0]
y = data.y.xs(time, level=1, drop_level=False)
x = data.x.xs(time, level=1, drop_level=False)
else:
x = data.x[:, [0], :]
y = data.y[[0], :]
with pytest.raises(ValueError):
FirstDifferenceOLS(y, x)
if not isinstance(data.x, pd.DataFrame):
return
x = data.x.copy()
x["Intercept"] = 1.0
with pytest.raises(ValueError):
FirstDifferenceOLS(data.y, x)
def test_results_access(data):
mod = FirstDifferenceOLS(data.y, data.x)
res = mod.fit(debiased=False)
access_attributes(res)
def test_firstdifference_error(data):
mod = FirstDifferenceOLS(data.y, data.x)
clusters = mod.dependent.dataframe.copy()
for entity in mod.dependent.entities:
clusters.loc[entity] = np.random.randint(9)
clusters.iloc[::3, :] = clusters.iloc[::3, :] + 1
with pytest.raises(ValueError):
mod.fit(cov_type="clustered", clusters=clusters)
def test_fitted_effects_residuals(data):
mod = FirstDifferenceOLS(data.y, data.x)
res = mod.fit()
expected = mod.exog.values2d @ res.params.values
expected = pd.DataFrame(expected, index=mod.exog.index, columns=["fitted_values"])
assert_allclose(res.fitted_values, expected)
assert_frame_similar(res.fitted_values, expected)
expected.iloc[:, 0] = mod.dependent.values2d - expected.values
expected.columns = ["idiosyncratic"]
assert_allclose(res.idiosyncratic, expected)
assert_frame_similar(res.idiosyncratic, expected)
expected.iloc[:, 0] = np.nan
expected.columns = ["estimated_effects"]
assert_allclose(res.estimated_effects, expected)
assert_frame_similar(res.estimated_effects, expected)
def test_extra_df(data):
mod = FirstDifferenceOLS(data.y, data.x)
res = mod.fit()
res_extra = mod.fit(extra_df=10)
assert np.all(np.diag(res_extra.cov) > np.diag(res.cov))
| StarcoderdataPython |
6400620 | <filename>component/tiles/__init__.py
from sepal_ui.frontend.styles import *
from ..widget.custom_widgets import *
from ..frontend.styles import *
from .alert_map import *
from .parameters import *
from .tiles import *
from .ui import *
| StarcoderdataPython |
265974 | <reponame>ioreshnikov/tamizdat<filename>tests/test_website.py
from unittest import TestCase
from unittest.mock import patch, MagicMock
from tamizdat.models import make_database, Book, File
from tamizdat.website import Website
def read_saved_page(book_id):
filename = "tests/assets/{}.html".format(book_id)
with open(filename) as fd:
return fd.read()
def mock_head(url, *args, **kwargs):
mock_response = MagicMock()
mock_response.url = url
return mock_response
class WebsiteTestCase(TestCase):
def setUp(self):
self.database = make_database()
self.website = Website(requests=MagicMock())
def test_get_extension(self):
self.assertEqual(self.website._get_extension("/b/485688/epub"), "epub")
self.assertEqual(self.website._get_extension("/b/485688/fb2"), "fb2")
self.assertEqual(self.website._get_extension("/b/485688/mobi"), "mobi")
self.assertEqual(self.website._get_extension("/b/485688/"), "")
def test_join_paragraph(self):
sentences = [
"Все счастливые семьи похожи друг на друга, \n",
"каждая несчастливая семья несчастлива по-своему."
]
expected = (
"Все счастливые семьи похожи друг на друга, "
"каждая несчастливая семья несчастлива по-своему.")
self.assertEqual(self.website._join_paragraph(sentences), expected)
def test_scraping_info_from_a_webpage(self):
self.website.requests.head = mock_head
page_source = read_saved_page("93872")
info = self.website._scrape_additional_info(page_source)
annotation, cover, ebook = info
self.assertIsNotNone(annotation)
self.assertTrue(cover.endswith("jpg"))
self.assertTrue(ebook.endswith("fb2"))
def test_appending_additional_info(self):
self.website.requests.head = mock_head
page_source = read_saved_page("93872")
info = self.website._scrape_additional_info(page_source)
book = Book(book_id=93872, title="Трудно быть богом")
book.save()
self.assertIsNone(book.annotation)
self.assertIsNone(book.cover_image)
self.assertIsNone(book.ebook_fb2)
self.website._append_additional_info(book, info)
self.assertIsNotNone(book.annotation)
self.assertIsInstance(book.cover_image, File)
self.assertTrue(book.cover_image.remote_url.endswith(".jpg"))
self.assertIsInstance(book.ebook_fb2, File)
self.assertTrue(book.ebook_fb2.remote_url.endswith("/fb2"))
self.assertTrue(book.ebook_fb2.local_path.endswith(".fb2.zip"))
| StarcoderdataPython |
9611656 | # Standard Library
import uuid
# Django
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse
from django.utils.functional import cached_property
# Third Party Libraries
import pytest
from allauth.account.forms import EmailAwarePasswordResetTokenGenerator
from allauth.account.utils import user_pk_to_url_str
from allauth.socialaccount.models import SocialAccount, SocialLogin
TEST_PASSWORD = "<PASSWORD>"
default_token_generator = EmailAwarePasswordResetTokenGenerator()
class MockSession(dict):
"""Provides mock session dict with session key"""
@cached_property
def session_key(self):
return str(uuid.uuid4())
@pytest.fixture
def test_password():
return <PASSWORD>
@pytest.fixture
def mock_session():
return MockSession
@pytest.fixture
def get_response():
return lambda req: HttpResponse()
@pytest.fixture
def user_model():
return get_user_model()
@pytest.fixture
def user(user_model, test_password):
user = user_model(username="tester", email="<EMAIL>")
user.set_password(<PASSWORD>)
user.save()
return user
@pytest.fixture
def anonymous_user():
return AnonymousUser()
@pytest.fixture
def login_user(client, user, test_password):
client.login(username=user.username, password=<PASSWORD>)
return user
@pytest.fixture
def user_with_unusable_password(user):
user.set_unusable_password()
user.save()
return user
@pytest.fixture
def login_user_with_unusable_password(client, user_with_unusable_password):
client.force_login(user_with_unusable_password)
return user_with_unusable_password
@pytest.fixture
def password_reset_kwargs(user):
return dict(
uidb36=user_pk_to_url_str(user), key=default_token_generator.make_token(user)
)
@pytest.fixture
def sociallogin(client, user_model):
account = SocialAccount(provider="google")
sociallogin = SocialLogin(account=account, user=user_model(),)
session = client.session
session["socialaccount_sociallogin"] = sociallogin.serialize()
session.save()
return sociallogin
| StarcoderdataPython |
11246277 | <reponame>pyhf/pyhf-benchmark<filename>src/pyhf_benchmark/manager.py
import time
from pathlib import Path
from .plot import plot, plot_comb
from .stats import SystemStats
class RunManager(object):
def __init__(self, meta=None):
self._stat = []
self._meta = meta
self._start_time = time.time()
self.times = 0
self.directory = Path(
f"{Path(__file__).resolve().parent}/../../output/run_{time.strftime('%Y%m%d', time.localtime())}_{int(self._start_time)}"
)
def start(self, meta=None):
system = SystemStats(meta, self.directory)
self.times += 1
self._stat.append(system)
system.start()
def close(self):
system = self._stat.pop(0)
system.shutdown()
plot(system.dir)
def shutdown(self):
if self.times > 1:
plot_comb(self.directory)
| StarcoderdataPython |
11206881 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from setuptools import setup
setup(
name="myutils",
version="1.0",
author='<NAME>',
author_email='<EMAIL>',
license='BSD-2-Clause',
long_description='Libraries that could be used in every project.',
description='Utils',
url='https://github.com/yuriy-logosha/myutils/README.md'
) | StarcoderdataPython |
1893928 | <gh_stars>0
##Program for converting RIS files into BibTex database
import re
import glob
def readInData(fiName):
fi = open(str(fiName))
fiObj = fi.read()
fi.close()
return fiObj
def readInCrossRef():
refDic = {}
fi = open("KeyForCrossRef.txt")
fiObj = fi.read()
fi.close()
for row in fiObj.split("\n"):
keyName = row.split(" = ")
key = keyName[0]
if keyName[1]!="[special case]":
keyRef = keyName[1].split(",")
refDic[key]=keyRef
return refDic
def turnToDic(fiObj):
reType = re.compile(".. - .*")
dataLi = reType.findall(fiObj)
fiDic = {}
for data in dataLi:
rowLi = data.split(" - ")
try:
exists = fiDic[rowLi[0]]
exists = exists+" and "+str(rowLi[1])
fiDic[rowLi[0]] = exists
except KeyError:
fiDic[rowLi[0]] = str(rowLi[1])
return fiDic
def makeArticleDic2(fiDic,elements,crossRefDic):
newDic = {}
for el in elements:
if el == "pages":
newDic["pages"] = handlePages(fiDic)
if el == "publisher":
newDic["publisher"] = handlePublisher(fiDic)
try:
cross = crossRefDic[el]
countTries = 1
for item in cross:
try:
if el!="year":
newDic[el] = fiDic[item]
break
if el == "year":
year,month=yearSplit(fiDic[item])
newDic["month"]=month
newDic["year"]=year
except KeyError:
if countTries==len(cross) and el=="address":
print el
countTries+=1
except KeyError:
if el == "note":
newDic["note"]="NA"
return newDic
def handlePublisher(fiDic):
try:
pub = fiDic["PB"]
except KeyError:
print "No publisher"
pub = "NA"
try:
loc = fiDic["CY"]
except KeyError:
print "No location"
loc = "NA"
output = str(loc)+": "+str(pub)
return output
def handlePages(fiDic):
try:
sp = fiDic["SP"]
try:
ep = fiDic["EP"]
pages = str(sp)+"-"+str(ep)
except KeyError:
pgs = sp.split("-")
if len(pgs)>1:
pages = sp
if len(pgs)==1:
pages = sp
except KeyError:
pages = "NA"
return pages
def yearSplit(YEAR):
yearLi = YEAR.split("/")
year = yearLi[0]
try:
month = yearLi[1]
except IndexError:
month="NA"
return year,month
def writeArticleEntry(fout,ty,elements,articleDic,keyDic,countErrors):
ifError = 0
try:
author = articleDic["author"]
authorLast = author.split(",")[0]
if len(authorLast)>10:
authorLast = authorLast[0:10]
except KeyError:
authorLast = "NA"
try:
year = articleDic["year"]
except KeyError:
year = "NA"
key = str(authorLast)+str(year)
key = key.replace(" ","").replace(".","")
countKeys = 2
keepOn = True
while keepOn == True:
try:
exists = keyDic[key]
print "Duplicate!!"
key = key+str(countKeys)
except KeyError:
keepOn = False
countKeys = int(countKeys)+1
fout.write(str(ty)+"{"+str(key)+",\n")
for el in elements:
try:
data = articleDic[el]
writeOrNo = True
if data=="NA":
writeOrNo = False
if el=="year" or el=="author" or el=="title" or el=="journal":
writeOrNo = True
if writeOrNo==True:
output = data
if el == "author":
output = output.replace(";", " and")
fout.write("\t"+str(el)+" = {"+str(output)+"},\n")
except KeyError:
ifError+=1
##print "Error",el
##print articleDic
fout.write("}\n\n")
if ifError>0:
countErrors+=1
return keyDic,countErrors
########################################################################
direct = str(raw_input("What is the path you want to use? "))
elements_article = ["author","title","journal","volume","number",
"pages","year","month","note"]
elements_book = ["author","title","publisher","volume","number","series",
"address","edition","year","month","note"]
elements_conference = ["author","title",'booktitle','editor','volume','number',
'series','pages','address','year','month','publisher',
'note']
elements_manual = ['title','author','organization','address','edition','month',
'year','note']
elements_mastersthesis = ['author','title','school','year','type','address',
'month','note']
elements_misc = ['author','title','howpublished','month','year','note']
elements_proceedings = ['title','year','editor','volume/number','series','address',
'month','organization','publisher','note']
elements_techReport = ['author','title','institution','year','type','number',
'address','month','note']
elements_unpublished = ['author','title','note','month','year']
countErrors = 0
fout = open("tempOut.txt","w")
crossRefDic = readInCrossRef()
keyDic = {}
fiGlob = glob.glob(direct+"\\*")
for fiName in fiGlob:
fiObj = readInData(fiName)
fiDic = turnToDic(fiObj)
try:
ty = fiDic["TY"]
except KeyError:
ty = "JOUR"
if ty == "JOUR":
articleDic = makeArticleDic2(fiDic,elements_article,crossRefDic)
keyDic,countErrors = writeArticleEntry(fout,"@article",elements_article,articleDic,keyDic,countErrors)
if ty == "BOOK":
bookDic = makeArticleDic2(fiDic,elements_book,crossRefDic)
keyDic,countErrors = writeArticleEntry(fout,"@book",elements_book,bookDic,keyDic,countErrors)
print "Number of errors is: ",countErrors
fout.close()
| StarcoderdataPython |
9743494 |
# -*- coding: utf-8 -*-
'''
File name: code\mccarthy_91_function\sol_555.py
Author: <NAME>
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #555 :: McCarthy 91 function
#
# For more information see:
# https://projecteuler.net/problem=555
# Problem Statement
'''
The McCarthy 91 function is defined as follows:
$$
M_{91}(n) =
\begin{cases}
n - 10 & \text{if } n > 100 \\
M_{91}(M_{91}(n+11)) & \text{if } 0 \leq n \leq 100
\end{cases}
$$
We can generalize this definition by abstracting away the constants into new variables:
$$
M_{m,k,s}(n) =
\begin{cases}
n - s & \text{if } n > m \\
M_{m,k,s}(M_{m,k,s}(n+k)) & \text{if } 0 \leq n \leq m
\end{cases}
$$
This way, we have $M_{91} = M_{100,11,10}$.
Let $F_{m,k,s}$ be the set of fixed points of $M_{m,k,s}$. That is,
$$F_{m,k,s}= \left\{ n \in \mathbb{N} \, | \, M_{m,k,s}(n) = n \right\}$$
For example, the only fixed point of $M_{91}$ is $n = 91$. In other words, $F_{100,11,10}= \{91\}$.
Now, define $SF(m,k,s)$ as the sum of the elements in $F_{m,k,s}$ and let $S(p,m) = \displaystyle \sum_{1 \leq s < k \leq p}{SF(m,k,s)}$.
For example, $S(10, 10) = 225$ and $S(1000, 1000)=208724467$.
Find $S(10^6, 10^6)$.
'''
# Solution
# Solution Approach
'''
'''
| StarcoderdataPython |
8165489 | from .gziptcp import *
from .gziptcpssl import *
from .plaintcp import *
from .plaintcpssl import *
from .plainudp import *
from .protocolerror import *
__all__ = [
*gziptcp.__all__,
*gziptcpssl.__all__,
*plaintcp.__all__,
*plaintcpssl.__all__,
*plainudp.__all__,
*protocolerror.__all__,
]
| StarcoderdataPython |
6619341 | from django.contrib import admin
import models
admin.site.register(models.Entry)
admin.site.register(models.Category)
| StarcoderdataPython |
3295484 | <filename>tests/data/cve/feed.py
GET_CVE_SYNC_METADATA = {
"CVE_data_type": "CVE",
"CVE_data_format": "MITRE",
"CVE_data_version": "4.0",
"CVE_data_numberOfCVEs": "6769",
"CVE_data_timestamp": "2022-02-23T08:01Z",
"CVE_Items": [{
"cve": {
"data_type": "CVE",
"data_format": "MITRE",
"data_version": "4.0",
"CVE_data_meta": {
"ID": "CVE-1999-0001",
"ASSIGNER": "<EMAIL>",
},
"problemtype": {
"problemtype_data": [{
"description": [{
"lang": "en",
"value": "CWE-20",
}],
}],
},
"references": {
"reference_data": [
{
"url": "http://www.openbsd.org/errata23.html#tcpfix",
"name": "http://www.openbsd.org/errata23.html#tcpfix",
"refsource": "CONFIRM",
"tags": [],
}, {
"url": "http://www.osvdb.org/5707",
"name": "5707",
"refsource": "OSVDB",
"tags": [],
},
],
},
"description": {
"description_data": [{
"lang": "en",
"value": (
"ip_input.c in BSD-derived TCP/IP implementations allows remote "
"attackers to cause a denial of service (crash or hang) via "
"crafted packets."
),
}],
},
},
"configurations": {
"CVE_data_version": "4.0",
"nodes": [{
"operator": "OR",
"children": [],
"cpe_match": [
{
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:1.0:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:1.1.5.1:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:2.1.7:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:2.2:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:2.2.8:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:openbsd:openbsd:2.3:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:bsdi:bsd_os:3.1:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:2.2.3:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:2.2.4:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:2.2.5:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:2.2.6:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:2.0:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:2.0.5:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:2.1.5:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:2.1.6:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:2.2.2:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:2.0.1:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:1.1:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:1.2:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:2.1.6.1:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:2.1.7.1:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:freebsd:freebsd:3.0:*:*:*:*:*:*:*",
"cpe_name": [],
}, {
"vulnerable": True,
"cpe23Uri": "cpe:2.3:o:openbsd:openbsd:2.4:*:*:*:*:*:*:*",
"cpe_name": [],
},
],
}],
},
"impact": {
"baseMetricV2": {
"cvssV2": {
"version": "2.0",
"vectorString": "AV:N/AC:L/Au:N/C:N/I:N/A:P",
"accessVector": "NETWORK",
"accessComplexity": "LOW",
"authentication": "NONE",
"confidentialityImpact": "NONE",
"integrityImpact": "NONE",
"availabilityImpact": "PARTIAL",
"baseScore": 5.0,
},
"severity": "MEDIUM",
"exploitabilityScore": 10.0,
"impactScore": 2.9,
"obtainAllPrivilege": False,
"obtainUserPrivilege": False,
"obtainOtherPrivilege": False,
"userInteractionRequired": False,
},
},
"publishedDate": "1999-12-30T05:00Z",
"lastModifiedDate": "2010-12-16T05:00Z",
}],
}
| StarcoderdataPython |
1935245 | <filename>pySDC/implementations/controller_classes/error_estimator.py
import numpy as np
from scipy.special import factorial
from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh
from pySDC.core.Errors import DataError
class _ErrorEstimatorBase:
"""
This class should be the parent of all error estimator classes, MPI and nonMPI and provide all functions that can
be shared.
"""
def __init__(self, controller, order, size):
self.params = controller.params
if self.params.use_extrapolation_estimate:
self.setup_extrapolation(controller, order, size)
def setup_extrapolation(self, controller, order, size):
"""
The extrapolation based method requires storage of previous values of u, f, t and dt and also requires solving
a linear system of equations to compute the Taylor expansion finite difference style. Here, all variables are
initialized which are needed for this process.
"""
# check if we can handle the parameters
if not controller.MS[0].levels[0].sweep.coll.right_is_node:
raise NotImplementedError('I don\'t know what to do if the last collocation node is not the end point')
# determine the order of the Taylor expansion to be higher than that of the time marching scheme
if self.params.use_HotRod:
self.order = order - 1 + 2
else:
self.order = order + 2
# important: the variables to store the solutions etc. are defined in the children classes
self.n = (self.order + 1) // 2 # since we store u and f, we need only half of each (the +1 is for rounding)
self.n_per_proc = int(np.ceil(self.n / size)) # number of steps that each step needs to store
self.u_coeff = [None] * self.n
self.f_coeff = [0.] * self.n
def communicate_time(self):
raise NotImplementedError('Please implement a function to communicate the time and step sizes!')
def communicate(self):
raise NotImplementedError('Please implement a function to communicates the solution etc.!')
def get_extrapolation_coefficients(self, t_eval=None):
"""
This function solves a linear system where in the matrix A, the row index reflects the order of the derivative
in the Taylor expansion and the column index reflects the particular step and whether its u or f from that
step. The vector b on the other hand, contains a 1 in the first entry and zeros elsewhere, since we want to
compute the value itself and all the derivatives should vanish after combining the Taylor expansions. This
works to the order the number of rows and since we want a square matrix for solving, we need the same amount of
colums, which determines the memory overhead, since it is equal to the solutions / rhs that we need in memory
at the time of evaluation.
This is enough to get the extrapolated solution, but if we want to compute the local error, we have to compute
a prefactor. This is based on error accumulation between steps (first step's solution is exact plus 1 LTE,
second solution is exact plus 2 LTE and so on), which can be computed for adaptive step sizes as well, but its
wonky in time-parallel versions to say the least (it's not cared for and hence wrong, but it might be wrong in
the same way as the embedded method and work for Hot Rod regardless...)
"""
t, dt = self.communicate_time()
# prepare A matrix
A = np.zeros((self.order, self.order))
A[0, 0:self.n] = 1.
j = np.arange(self.order)
inv_facs = 1. / factorial(j)
# get the steps backwards from the point of evaluation
idx = np.argsort(t)
if t_eval is None:
steps_from_now = -np.cumsum(dt[idx][::-1])[self.n - 1::-1]
else:
steps_from_now = t[idx] - t_eval
# fill A matrix
for i in range(1, self.order):
# Taylor expansions of the solutions
A[i, :self.n] = steps_from_now**j[i] * inv_facs[i]
# Taylor expansions of the first derivatives a.k.a. right hand side evaluations
A[i, self.n:self.order] = steps_from_now[2 * self.n - self.order:]**(j[i] - 1) * inv_facs[i - 1]
# prepare rhs
b = np.zeros(self.order)
b[0] = 1.
# solve linear system for the coefficients
coeff = np.linalg.solve(A, b)
self.u_coeff = coeff[:self.n]
self.f_coeff[self.n * 2 - self.order:] = coeff[self.n:self.order] # indexing takes care of uneven order
# determine prefactor
r = abs(self.dt[len(self.dt) - len(self.u_coeff):] / self.dt[-1])**(self.order - 1)
inv_prefactor = -sum(r[1:]) - 1.
for i in range(len(self.u_coeff)):
inv_prefactor += sum(r[1: i + 1]) * self.u_coeff[i]
self.prefactor = 1. / abs(inv_prefactor)
def store_values(self, S):
"""
Store the required attributes of the step to do the extrapolation. We only care about the last collocation
node on the finest level at the moment.
"""
if self.params.use_extrapolation_estimate:
# figure out which values are to be replaced by the new ones
if None in self.t:
oldest_val = len(self.t) - len(self.t[self.t == [None]])
else:
oldest_val = np.argmin(self.t)
f = S.levels[0].f[-1]
if type(f) == imex_mesh:
self.f[oldest_val] = f.impl + f.expl
elif type(f) == mesh:
self.f[oldest_val] = f
else:
raise DataError(f'Unable to store f from datatype {type(f)}, extrapolation based error estimate only\
works with types imex_mesh and mesh')
self.u[oldest_val] = S.levels[0].u[-1]
self.t[oldest_val] = S.time + S.dt
self.dt[oldest_val] = S.dt
def embedded_estimate(self, S):
"""
Compute embedded error estimate on the last node of each level
In serial this is the local error, but in block Gauss-Seidel MSSDC this is a semi-global error in each block
"""
for L in S.levels:
# order rises by one between sweeps, making this so ridiculously easy
L.status.error_embedded_estimate = max([abs(L.uold[-1] - L.u[-1]), np.finfo(float).eps])
def extrapolation_estimate(self, S):
"""
The extrapolation estimate combines values of u and f from multiple steps to extrapolate and compare to the
solution obtained by the time marching scheme.
"""
if None not in self.dt:
if None in self.u_coeff or self.params.use_adaptivity:
self.get_extrapolation_coefficients(t_eval=S.time + S.dt)
self.communicate()
if len(S.levels) > 1:
raise NotImplementedError('Extrapolated estimate only works on the finest level for now')
u_ex = S.levels[0].u[-1] * 0.
idx = np.argsort(self.t)
# see if we need to leave out any values because we are doing something in a block
if (abs(S.time + S.dt - self.t) < 10. * np.finfo(float).eps).any():
idx_step = idx[np.argmin(abs(self.t - S.time - S.dt))]
else:
idx_step = max(idx) + 1
mask = np.logical_and(idx < idx_step, idx >= idx_step - self.n)
for i in range(self.n):
u_ex += self.u_coeff[i] * self.u[idx[mask][i]] + self.f_coeff[i] * self.f[idx[mask][i]]
S.levels[0].status.error_extrapolation_estimate = abs(u_ex - S.levels[0].u[-1]) * self.prefactor
def estimate(self, S):
if self.params.use_HotRod:
if S.status.iter == S.params.maxiter - 1:
self.extrapolation_estimate(S)
elif S.status.iter == S.params.maxiter:
self.embedded_estimate(S)
else:
# only estimate errors when last sweep is performed and not when doing Hot Rod
if S.status.iter == S.params.maxiter:
if self.params.use_extrapolation_estimate:
self.extrapolation_estimate(S)
if self.params.use_embedded_estimate or self.params.use_adaptivity:
self.embedded_estimate(S)
class _ErrorEstimator_nonMPI_BlockGS(_ErrorEstimatorBase):
"""
Error estimator that works with the non-MPI controller in block Gauss-Seidel mode
"""
def __init__(self, controller):
super(_ErrorEstimator_nonMPI_BlockGS, self).__init__(controller, order=controller.MS[0].params.maxiter,
size=len(controller.MS))
def store_values(self, MS):
for S in MS:
super(_ErrorEstimator_nonMPI_BlockGS, self).store_values(S)
def communicate_time(self):
return self.t, self.dt
def communicate(self):
pass
def estimate(self, MS):
# loop in reverse through the block since later steps lag behind with iterations
for i in range(len(MS) - 1, -1, -1):
S = MS[i]
if self.params.use_HotRod:
if S.status.iter == S.params.maxiter - 1:
self.extrapolation_estimate(S)
elif S.status.iter == S.params.maxiter:
self.embedded_estimate_local_error(MS[:i + 1])
break
else:
# only estimate errors when last sweep is performed and not when doing Hot Rod
if S.status.iter == S.params.maxiter:
if self.params.use_extrapolation_estimate:
self.extrapolation_estimate(S)
if self.params.use_embedded_estimate or self.params.use_adaptivity:
self.embedded_estimate_local_error(MS[:i + 1])
def setup_extrapolation(self, controller, order, size):
super(_ErrorEstimator_nonMPI_BlockGS, self).setup_extrapolation(controller, order, size)
# check if we fixed the order by fixing the iteration number
if not controller.MS[0].levels[0].params.restol == 0:
raise NotImplementedError('Extrapolation based error estimate so far only with fixed order!')
# check if we have the same order everywhere
maxiter = [controller.MS[i].params.maxiter for i in range(len(controller.MS))]
if not maxiter.count(maxiter[0]) == len(maxiter):
raise NotImplementedError('All steps need to have the same order in time!')
if controller.params.mssdc_jac:
raise NotImplementedError('Extrapolation error only implemented in block Gauss-Seidel!')
# check if we can deal with the supplied number of processes
if len(controller.MS) > 1 and len(controller.MS) < self.n + 1:
raise NotImplementedError(f'Extrapolation error estimate only works in serial, or in a no-overhead version\
which requires at least {self.n+1} processes for order {self.order} Taylor expansion. You gave {size} processes.')
# create variables to store u, f, t and dt from previous steps
self.u = [None] * self.n_per_proc * size
self.f = [None] * self.n_per_proc * size
self.t = np.array([None] * self.n_per_proc * size)
self.dt = np.array([None] * self.n_per_proc * size)
def embedded_estimate_local_error(self, MS):
"""
In block Gauss-Seidel SDC, the embedded estimate actually estimates sort of the global error within the block,
since the second to last sweep is from an entirely k-1 order method, so to speak. This means the regular
embedded method here yields this semi-global error and we get the local error as the difference of consecutive
semi-global errors.
"""
# prepare a list to store all errors in
semi_global_errors = np.array([[0.] * len(MS[0].levels)] * (len(MS) + 1))
for i in range(len(MS)):
S = MS[i]
for j in range(len(S.levels)):
L = S.levels[j]
semi_global_errors[i][j] = abs(L.uold[-1] - L.u[-1])
L.status.error_embedded_estimate = max([abs(semi_global_errors[i][j] - semi_global_errors[i - 1][j]),
np.finfo(float).eps])
class _ErrorEstimator_nonMPI_no_memory_overhead_BlockGS(_ErrorEstimator_nonMPI_BlockGS):
"""
Error estimator that works with the non-MPI controller in block Gauss-Seidel mode and does not feature memory
overhead due to extrapolation error estimates, since the required values are in memory of other "processes"
anyways.
"""
def __init__(self, controller):
super(_ErrorEstimator_nonMPI_no_memory_overhead_BlockGS, self).__init__(controller)
def store_values(self, MS):
"""
No overhead means nothing to store!
"""
pass
def extrapolation_estimate(self, MS):
"""
The extrapolation estimate combines values of u and f from multiple steps to extrapolate and compare to the
solution obtained by the time marching scheme.
"""
# this is needed since we don't store anything
self.dt = np.array([S.dt for S in MS])
self.t = np.array([S.time for S in MS]) + self.dt
if len(MS) > self.n:
if None in self.u_coeff or self.params.use_adaptivity:
self.get_extrapolation_coefficients()
if len(MS[-1].levels) > 1:
raise NotImplementedError('Extrapolated estimate only works on the finest level for now')
# loop to go through all steps which we can extrapolate to
for j in range(self.n, len(MS)):
u_ex = MS[-1].levels[0].u[-1] * 0.
# loop to sum up contributions from previous steps
for i in range(1, self.n + 1):
L = MS[j - i].levels[0]
if type(L.f[-1]) == imex_mesh:
u_ex += self.u_coeff[-i] * L.u[-1] + self.f_coeff[-i] * (L.f[-1].impl + L.f[-1].expl)
elif type(L.f[-1]) == mesh:
u_ex += self.u_coeff[-i] * L.u[-1] + self.f_coeff[-i] * L.f[-1]
else:
raise DataError(f'Datatype {type(L.f[-1])} not supported by parallel extrapolation error estim\
ate!')
MS[j].levels[0].status.error_extrapolation_estimate = abs(u_ex - MS[j].levels[0].u[-1]) * self.prefactor
def estimate(self, MS):
# loop in reverse through the block since later steps lag behind with iterations
for i in range(len(MS) - 1, -1, -1):
S = MS[i]
if self.params.use_HotRod:
if S.status.iter == S.params.maxiter - 1:
self.extrapolation_estimate(MS[:i + 1])
elif S.status.iter == S.params.maxiter:
self.embedded_estimate_local_error(MS[:i + 1])
break
else:
# only estimate errors when last sweep is performed and not when doing Hot Rod
if S.status.iter == S.params.maxiter:
if self.params.use_extrapolation_estimate:
self.extrapolation_estimate(MS[:i + 1])
if self.params.use_embedded_estimate or self.params.use_adaptivity:
self.embedded_estimate_local_error(MS[:i + 1])
def get_ErrorEstimator_nonMPI(controller):
"""
This function should be called from the controller and return the correct version of the error estimator based on
the chosen parameters.
"""
if len(controller.MS) >= (controller.MS[0].params.maxiter + 4) // 2:
return _ErrorEstimator_nonMPI_no_memory_overhead_BlockGS(controller)
else:
return _ErrorEstimator_nonMPI_BlockGS(controller)
| StarcoderdataPython |
8164513 | <filename>src/ftd/api/maya.py<gh_stars>1-10
# pylint: disable=invalid-name, redefined-builtin, protected-access
"""Object-oriented API for Autodesk Maya."""
from __future__ import absolute_import, division
import abc
import logging
import math
import sys
import types
from maya import cmds
from maya.api import OpenMaya
__version__ = "0.1.0"
__all__ = [
# Common
"clear",
"newscene",
"exists",
# Enum
"Space",
# Encoding & Decoding
"encode",
"decode",
# Nodes
"DependencyNode",
"DagNode",
"create",
"delete",
# Plug & Attributes
"Plug",
# Data types
"Vector",
]
LOG = logging.getLogger(__name__)
# Pytohn 2 & 3 compatibility.
# pylint: disable=undefined-variable
_STRING_TYPES = str if sys.version_info[0] >= 3 else basestring # type: ignore
# pylint: enable=undefined-variable
def _add_metaclass(metaclass):
"""Add a metaclass compatible with python 2 and 3"""
def _decorator(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return _decorator
# Errors
DgError = type("DgError", (Exception,), {})
DagError = type("DagError", (Exception,), {})
ExistsError = type("ExistsError", (Exception,), {})
PlugError = type("PlugError", (Exception,), {})
GraphError = type("GraphError", (Exception,), {})
# Common
def newscene():
"""Create a new scene."""
cmds.file(new=True, force=True)
clear()
def clear():
"""Remove all instances stored in the memory."""
_MetaNode._instances.clear()
# Enum
class Space(object):
"""Space transformation identifiers."""
TRANSFORM = OpenMaya.MSpace.kTransform
PRE_TRANSFORM = OpenMaya.MSpace.kPreTransform
POST_TRANSFORM = OpenMaya.MSpace.kPostTransform
WORLD = OpenMaya.MSpace.kWorld
OBJECT = OpenMaya.MSpace.kObject
# Encoding & Decoding
def encode(obj, default=object):
"""Encode an object.
Create an instance of the type corresponding to the object passed as a
parameter. If the object does not exist in Maya, raises an exception of
type `ValueError` unless a value is specified in the default parameter and
then returns that value.
Note:
The default value of ``default`` parameter is initialized to
:obj:`object`. Indeed, using the "normal" method by initializing it the
None, will make it impossible for the user to use the value None to
specify the default return value.
I didn't any case or the user would return :obj:`object` but in case or
it is what you want, you can still use a "derived" synthax::
>>> encode("unknown", default=None) or object
<class 'object'>
If the encoding is performed on an object already encoded, returns
the object unchanged.
Examples:
>>> from maya import cmds
>>> newscene()
>>> _ = cmds.createNode("transform", name="A")
>>> encode("A")
<DagNode 'A' type::transform>
>>> encode("B")
Traceback (most recent call last):
...
ValueError
>>> encode("B", default=False)
False
>>> _ = cmds.createNode("multMatrix", name="C")
>>> encode("C")
<DependencyNode 'C' type::multMatrix>
Arguments:
obj (any): The object to encode.
default (any): Value that is returned if the object does not exists.
Returns:
any: The encoded object.
Raises:
TypeError: The type of the object is not supported.
ValueError: The object does not exist and the `default` parameter
is not specified.
"""
LOG.debug("Encoding: %s", repr(obj))
# Check if the object is not already encoded.
if obj.__class__.__module__ == __name__:
return obj
if isinstance(obj, _STRING_TYPES):
sel = OpenMaya.MSelectionList()
try:
sel.add(obj)
if "." in obj:
obj = sel.getPlug(0)
else:
obj = sel.getDependNode(0)
except RuntimeError:
if default is not object:
return default
raise ValueError("The object '{}' does not exists.".format(obj))
if isinstance(obj, OpenMaya.MPlug):
return Plug(obj)
if not isinstance(obj, OpenMaya.MObject):
msg = "The object type {} is not supported."
raise TypeError(msg.format(type(obj)))
# Find the most appropriate class in which the object can be encoded.
for each in reversed(OpenMaya.MGlobal.getFunctionSetList(obj)):
cls = _MetaNode._types.get(getattr(OpenMaya.MFn, each))
if cls is not None:
return cls(obj)
raise ValueError("Failed to encode the object '{}'".format(obj))
def decode(obj, **kwargs):
"""Decode an object."""
LOG.debug("Decode: %s", repr(obj))
if obj.__class__.__module__ != __name__:
return obj
if hasattr(obj, "decode"):
return obj.decode(**kwargs)
return str(obj)
def ls(*args, **kwargs):
"""Todo."""
return _wrap(cmds.ls, *args, **kwargs)
def selected():
"""Return the current selected nodes."""
selection = OpenMaya.MGlobal.getActiveSelectionList().getSelectionStrings()
return map(encode, selection)
# Nodes
class _MetaNode(type):
"""Manage all the registered nodes.
Anything involving nodes goes through here at least once :)
This meta class has to main goal:
- Keep track of all classes that are based on it.
- Keep track of all instances of encoded nodes so that they can be reused
when a registered node more than once.
"""
_types = {}
_instances = {}
def __new__(mcs, name, bases, dict_):
"""Register all new classes that derive from this metaclass."""
cls = super(_MetaNode, mcs).__new__(mcs, name, bases, dict_)
mcs._types[cls._identifier] = cls
return cls
def __call__(cls, mobject, *args, **kwargs):
"""Handle the creation of instances in order to implement a singleton.
Each node will be tracked and stored in a python dictionary in order to
reuse the same instance for all encoding attempts on the same node.
Note:
Make a comparison using ``is`` is equivalent to comparing the
:func:`id` of the two operands. So ``a is b`` is equivalent to
``id(a) == id(b)``.
How it works?
To be able to retrieve if a node has already been encoded or not,
we need to find a way to effectively compare nodes.
Using the node name is a very bad idea, as two nodes can have the
same name in the same scene, and the node name is not constant
within a session
Maya provides two different systems that can be used to efficiently
compare nodes, UUIDs or hash codes.
**Universally unique identifier (UUID)**
Every node has a attribute called ``uuid`` stored on it which,
as the name suggest, is unique. That's perfect! Well not so much.
Take a same scene that is referenced twice in another scene.
Each node from the referencde scene is present twice in the scene,
with the same uuid. And this is a problem because we have no way to
differentiate these two nodes in an efficient way.
**Hash code**
For each MObject, maya provides an hash code. On the subject, the
`official documentation`_ says:
[...] several internal Maya objects may return the same code.
However different MObjectHandles whose MObjects refer to the
same internal Maya object will return the same hash code.
[...]
Which is exactly what we want.
Examples:
>>> from maya import cmds
>>> newscene()
>>> _ = cmds.createNode("transform", name="A")
>>> _ = cmds.createNode("transform", name="B")
>>> a = encode("A")
>>> b = encode("B")
>>> a is b
False
>>> c = encode("A")
>>> a is c
True
Arguments:
mobject (MObject): The maya object used to initialize the instance.
Returns:
any: The encoded instance of the node.
.. _official documentation:
https://help.autodesk.com/cloudhelp/2020/ENU/Maya-SDK-MERGED/cpp_ref/class_m_object_handle.html#a23a0c64be863c23d2cf8214243d59bb1
"""
handle = OpenMaya.MObjectHandle(mobject)
hash_code = handle.hashCode()
self = cls._instances.get(hash_code)
if not self:
self = super(_MetaNode, cls).__call__(mobject, *args, **kwargs)
self._handle = handle
cls._instances[hash_code] = self
return self
@_add_metaclass(_MetaNode)
class DependencyNode(object):
"""A Dependency Graph (DG) node."""
_class = OpenMaya.MFnDependencyNode
_identifier = OpenMaya.MFn.kDependencyNode
def __repr__(self):
return "<{} '{}' type::{}>".format(
self.__class__.__name__,
self.fn.name(),
self.fn.typeName,
)
# Type conversion ---
def __str__(self):
return self.fn.name()
def __bool__(self):
return True
__nonzero__ = __bool__
# Arithmetic operators ---
def __add__(self, other):
"""Allow the legacy way to acess plugs.
Examples:
>>> newscene()
>>> a = create("transform", name="A")
>>> a + ".translateX"
'A.translateX'
"""
return str(self) + str(other)
# Reflected arithmetic operators ---
def __radd__(self, other):
return str(other) + str(self)
# Comparison operators ---
def __eq__(self, other):
if isinstance(other, DependencyNode):
return self.object == other.object
return str(self) == str(other)
def __ne__(self, other):
if isinstance(other, DependencyNode):
return self.object != other.object
return str(self) != str(other)
# Emmulate container type ---
def __getitem__(self, key):
return self.findplug(key)
def __setitem__(self, key, value):
self.findplug(key).value = value
# Constructor ---
def __init__(self, mobject):
self._object = mobject
self._fn = self._class(mobject)
self._handle = OpenMaya.MObjectHandle(mobject)
# Read properties ---
@property
def object(self):
"""MObject: The maya object attached to self."""
return self._object
@property
def handle(self):
"""MObjectHandle: The maya object handle attached to self."""
return self._handle
@property
def fn(self):
# pylint: disable=invalid-name
"""MFnDependencyNode: The maya function set attached to self."""
return self._fn
@property
def type(self):
"""str: The type name of the node."""
return self.fn.typeName
@property
def typeid(self):
"""int: A bit number that is used to identify the type of the node in
binary file format.
"""
return self.fn.typeId.id()
@property
def inherited(self):
"""list: The type inheritance of the node."""
return cmds.nodeType(self.name, inherited=True)
@property
def derived(self):
"""list: The types that inherits of the node."""
return cmds.nodeType(self.name, derived=True)
@property
def uuid(self):
"""str: The Universally Unique Identifier (UUID) of the node."""
return self.fn.uuid().asString()
@property
def hash(self):
"""int: Hash code for the internal maya object.
The hash code is not unique, several MObjects can have the same hash
code. However, if different MObectHandle refer to the same maya
internal object, they will return the same hash code
"""
return self.handle.hashCode()
@property
def isdefault(self):
"""bool: True if the node is created automatically by Maya."""
return self.fn.isDefaultNode
@property
def isreferenced(self):
"""bool: True if the node come from a referenced file."""
return self.fn.isFromReferencedFile
# Read write properties ---
@property
def name(self):
"""str: The name of the node."""
return self.fn.name()
@name.setter
def name(self, value):
cmds.rename(self.name, value)
@property
def lock(self):
"""bool: The lock state of the node.
A locked node means that it cannot be deleted, repaired or renamed.
It is also not possible to create, edit or delete their attributes.
"""
return self.fn.isLocked
@lock.setter
def lock(self, value):
cmds.lockNode(self.name, lock=value)
# Public methods ---
def duplicate(self, name=None):
"""Duplicate the node.
Examples:
>>> newscene()
>>> a = create("transform", name="A")
>>> b = a.duplicate("B")
>>> b
<DagNode 'B' type::transform>
>>> a != b
True
Arguments:
name (str): The name to give to the duplicate node.
Returns:
DependencyNode: The instance of the duplicate node.
"""
return encode(cmds.duplicate(self.name, name=name)[0])
def delete(self):
"""Delete the node.
Warning:
Even if the node is deleted, its instance still exists in memory.
Attempting to access a deleted node may cause a crash.
Examples:
>>> newscene()
>>> node = create("transform")
>>> exists(node)
True
>>> node.delete()
>>> exists(node)
False
"""
cmds.delete(self.name)
def findplug(self, attribute):
"""Find a plug from an attribute name.
Examples:
>>> newscene()
>>> node = create("transform", name="A")
>>> node.findplug("message")
<Plug 'A.message' type::message>
>>> node.findplug("unknown")
Traceback (most recent call last):
...
ValueError
Arguments:
attribute (str): The name of the attribute to search for.
Returns:
Plug: The instance of the plug.
Raises:
ValueError: The attribute does not exists on the node.
"""
LOG.debug("Acess '%s.%s'", self.name, attribute)
try:
return Plug(self.fn.findPlug(attribute, False))
except RuntimeError:
message = "The plug '{}.{}' does not exists."
raise ValueError(message.format(self, attribute))
def history(self, filter=None):
"""Search in the node history."""
return self._related(OpenMaya.MItDependencyGraph.kUpstream, filter)
def future(self, filter=None):
"""Search in the future of the node."""
return self._related(OpenMaya.MItDependencyGraph.kDownstream, filter)
def istype(self, filter, strict=False):
"""Check the type of the node.
Arguments:
filter (str, tuple): The node(s) that should match with self.
strict (bool): If `True`, does not check for inherited types and
return `True` only if self has the exact same type as the on of
the specified filter.
Returns:
bool: `True` if self match the filter otherwise `False`.
"""
if strict:
return self.type in filter
if isinstance(filter, _STRING_TYPES):
filter = [filter]
return any(x in self.inherited for x in filter)
# Private methods ---
def _related(self, direction, filter=None):
"""Retrive node through the graph."""
iterator = OpenMaya.MItDependencyGraph(
self.object,
direction,
traversal=OpenMaya.MItDependencyGraph.kDepthFirst,
level=OpenMaya.MItDependencyGraph.kNodeLevel,
)
# Skip self.
iterator.next()
while not iterator.isDone():
node = encode(iterator.currentNode())
# print(node.type, filter)
if filter is None or node.type in filter:
yield node
iterator.next()
class DagNode(DependencyNode):
"""A Directed Acyclic Graph (DAG) node."""
_class = OpenMaya.MFnDagNode
_identifier = OpenMaya.MFn.kDagNode
def __len__(self):
return self.childcount
def __iter__(self):
return self.children()
def __init__(self, mobject):
super(DagNode, self).__init__(mobject)
self._dagpath = OpenMaya.MDagPath.getAPathTo(self.object)
# Read properties ---
@property
def dagpath(self):
"""MDagPath: The dag path instance associated to the node."""
return self._dagpath
@property
def path(self):
"""str: The path of the attached object from the root of the DAG."""
return self.fn.fullPathName()
@property
def childcount(self):
"""int: The number of chidren of the node"""
return self.fn.childCount()
# Public methods ---
def root(self):
"""The root node of the first path leading to this node.
Examples:
>>> newscene()
>>> a = create("transform", name="A")
>>> b = create("transform", name="B")
>>> c = create("transform", name="C")
>>> a.addchild(b)
>>> b.addchild(c)
>>> c.root()
<DagNode 'A' type::transform>
Returns:
DagNode: The root node.
"""
parents = list(self.parents())
if len(parents) > 0:
return parents[-1]
return None
def parents(self, filter=None, strict=False):
"""Find the parents nodes.
Examples:
>>> newscene()
>>> a = create("transform", name="A")
>>> b = create("transform", name="B")
>>> c = create("transform", name="C")
>>> a.addchild(b)
>>> b.addchild(c)
>>> list(c.parents())
[<DagNode 'B' type::transform>, <DagNode 'A' type::transform>]
Arguments:
filter (str, tuple): Filter the returned node types.
strict (bool): If `True`, does not check for inherited types and
return `True` only if self has the exact same type as the on of
the specified filter.
Yield:
DagNode: The next parent node.
"""
# The `parentCount` and `parent` (with an index other than 0)
# methods seem does not to work...
mobject = self.fn.parent(0)
while mobject.apiType() != OpenMaya.MFn.kWorld:
parent = encode(mobject)
if _match_filter(parent, filter, strict):
yield parent
mobject = parent.fn.parent(0)
def parent(self, index=None):
"""Find a parent node.
Examples:
>>> newscene()
>>> a = create("transform", name="A")
>>> b = create("transform", name="B")
>>> a.addchild(b)
>>> b.parent()
<DagNode 'A' type::transform>
Arguments:
index (int): The index of the parent to find.
Returns:
DagNode: The parent node.
Raises:
DagError: The parent at the speicified index is inaccessible.
"""
try:
parents = list(self.parents())
return parents[index or 0]
except IndexError:
if index is None:
return None
msg = "The parent node at the index '{}' is inaccessible."
raise DagError(msg.format(index))
def siblings(self, filter=None, strict=False):
"""Find the siblings nodes
Examples:
>>> newscene()
>>> a = create("transform", name="A")
>>> b = create("transform", name="B")
>>> c = create("transform", name="C")
>>> d = create("transform", name="D")
>>> a.addchildren(b, c, d)
>>> list(b.siblings())
[<DagNode 'C' type::transform>, <DagNode 'D' type::transform>]
Arguments:
filter (str, tuple): Filter the returned node types.
strict (bool): If `True`, does not check for inherited types and
return `True` only if self has the exact same type as the on of
the specified filter.
Yield:
DagNode: The next sibling node.
"""
parent = self.parent()
if parent is None:
nodes = ls(assemblies=True)
else:
nodes = parent.children()
for node in nodes:
if node != self and _match_filter(node, filter, strict):
yield node
def sibling(self, index=None):
"""Find a sibling node.
Examples:
>>> newscene()
>>> a = create("transform", name="A")
>>> b = create("transform", name="B")
>>> c = create("transform", name="C")
>>> a.addchildren(b, c)
>>> b.sibling()
<DagNode 'C' type::transform>
Arguments:
index (int): The index of the sibling to find.
Returns:
DagNode: The sibling node.
Raises:
DagError: The sibling at the speicified index is inaccessible.
"""
try:
siblings = list(self.siblings())
return siblings[index or 0]
except IndexError:
if index is None:
return None
msg = "The sibling node at the index '{}' is inaccessible."
raise DagError(msg.format(index))
def shapes(self, filter=None, strict=False):
"""Find the shape nodes.
Arguments:
filter (str, tuple): Filter the returned node types.
strict (bool): If `True`, does not check for inherited types and
return `True` only if self has the exact same type as the on of
the specified filter.
Yield:
Shape: The next shape node.
"""
for index in range(self.fn.childCount()):
obj = self.fn.child(index)
if obj.hasFn(OpenMaya.MFn.kShape):
child = encode(obj)
if _match_filter(child, filter, strict):
yield child
def shape(self, index=None):
"""Find a shape node.
Arguments:
index (int): The index of the shape to find.
Returns:
Shape: The shape node.
Raises:
DagError: The shape at the speicified index is inaccessible.
"""
try:
shapes = list(self.shapes())
return shapes[index or 0]
except IndexError:
if index is None:
return None
msg = "The shape node at the index '{}' is inaccessible."
raise DagError(msg.format(index))
def children(self, recurse=False, shape=False, filter=None, strict=False):
"""Find the child nodes.
Arguments:
recurse (bool): Include all descendants in the yielded nodes
instead of the just the children.
shape (bool): Include the shapes in the yielded nodes.
filter (str, tuple): Filter the returned node types.
strict (bool): If `True`, does not check for inherited types and
return `True` only if self has the exact same type as the on of
the specified filter.
Yield:
DagNode: The next child node.
"""
for index in range(self.fn.childCount()):
child = encode(self.fn.child(index))
if _match_filter(child, filter, strict):
if not (child.object.hasFn(OpenMaya.MFn.kShape) and not shape):
yield child
if recurse:
for each in child.children(recurse=True, filter=filter):
yield each
def child(self, index=None):
"""Find a child node.
Arguments:
index (int): The index of the child to find.
Returns:
DagNode: The child node.
Raises:
DagError: The child at the speicified index is inaccessible.
"""
try:
children = list(self.children())
return children[index or 0]
except IndexError:
if index is None:
return None
msg = "The sibling node at the index '{}' is inaccessible."
raise DagError(msg.format(index))
def addchild(self, node, index=None):
"""Add a child to the node.
Arguments:
node (DagNode): The node to add.
index (int): The index at which the node will be inserted into the
children.
"""
node._set_parent(self)
if index is not None:
offset = -self.childcount + index + 1
cmds.reorder(node.name, relative=offset)
def addchildren(self, *args):
"""Recursively add multiple children to the node.
Arguments:
*args: The nodes to add as child.
"""
for arg in args:
if isinstance(arg, (list, tuple, set, types.GeneratorType)):
self.addchildren(*arg)
else:
self.addchild(arg)
def hide(self):
"""Set the visibility plug to False."""
self["visibility"] = False
def show(self):
"""Set the visibility plug to True."""
self["visibility"] = True
# Private methods ----
def _set_parent(self, parent):
"""Set the parent of self in the outliner."""
if self.parent() == parent:
LOG.debug("%s is already a child of %s.", self, parent)
else:
cmds.parent(self.name, str(parent))
class Shape(DagNode):
"""A shape node."""
_identifier = OpenMaya.MFn.kShape
def _set_parent(self, parent):
cmds.parent(self.name, parent.name, shape=True, relative=True)
def exists(obj):
"""Check if an object exists in the scene."""
return cmds.objExists(str(obj))
def delete(*args, **kwargs):
"""Delete the specified nodes.
Wrap the `cmds.delete()`_ command.
Examples:
>>> newscene()
>>> node = create("transform")
>>> exists(node)
True
>>> delete(node)
>>> exists(node)
False
Arguments:
*args: The arguments passed to the `cmds.delete()`_ command.
**kwargs: The keyword arguments passed to the `cmds.delete()`_ command.
.. _cmds.delete():
https://help.autodesk.com/cloudhelp/2022/ENU/Maya-Tech-Docs/CommandsPython/delete.html
"""
return _wrap(cmds.delete, *args, **kwargs)
# Creator
@_add_metaclass(abc.ABCMeta)
class Creator(object):
"""Allow to customize the creation of nodes."""
identifier = None
_registered = {}
def __repr__(self):
return "<Creator '{}'>".format(self.identifier)
@classmethod
def register(cls, creator):
"""Register a new creator.
Arguments:
creator (class): The creator to register.
Returns:
class: The registered class.
Raises:
TypeError: Invalid creator type.
"""
if not issubclass(creator, Creator):
raise TypeError("Invalid creator. Must be derivied of Creator.")
cls._registered[creator.identifier] = creator
return creator
@abc.abstractmethod
def create(self, name=None):
"""Create a new node.
Arguments:
name (str): The name to give to the new node.
Returns:
DependencyNode: The created node.
"""
@Creator.register
class LocatorCreator(Creator):
"""Create a new locator."""
identifier = "locator"
def create(self, name=None):
return encode(cmds.spaceLocator(name=name or self.identifier)[0])
def create(type, name=None, **kwargs):
"""Create a new node.
Arguments:
type (str): The type of the node to create.
name (str): The name of the node to create. If not specified,
use the ``type`` parameter instead.
**kwargs: The additional keyword arguments to pass to the
:class:`Creator` or to the `cmds.createNode()`_ command.
Returns:
DependencyNode: A node instace based on the type of the node.
.. _cmds.createNode():
https://help.autodesk.com/cloudhelp/2022/ENU/Maya-Tech-Docs/CommandsPython/createNode.html
"""
if type in Creator._registered:
type = Creator._registered[type](**kwargs)
if isinstance(type, Creator):
return type.create(name)
return _wrap(cmds.createNode, type, name=name or type, **kwargs)
# Plug & Attributes
class Plug(object):
"""A plug object."""
def __repr__(self):
return """<{} '{}' type::{}>""".format(
self.__class__.__name__,
self.name,
self.type,
)
def __str__(self):
return self.name
def __init__(self, mplug):
self._plug = mplug
# Read properties ---
@property
def plug(self):
"""MPlug: The mplug instance of the plug."""
return self._plug
@property
def node(self):
"""DependencyNode: Get the associated node."""
return encode(self.plug.node())
@property
def name(self):
"""str: The plug name."""
return self.plug.name()
@property
def attribute(self):
"""str: THe attribute name of the plug."""
return str(self).rsplit(".", 1)[-1]
@property
def type(self):
"""str: The plug type."""
return cmds.getAttr(self.name, type=True)
@property
def issettable(self):
"""bool: The plug is settable."""
return self.plug.isFreeToChange() == OpenMaya.MPlug.kFreeToChange
@property
def isdefault(self):
"""bool: The plug is default value."""
return self.plug.isDefaultValue()
@property
def isarray(self):
"""bool: True if plug is an array of plugs."""
return self.plug.isArray
@property
def iscompound(self):
"""str: True if plug is compound parent with children."""
return self.plug.isCompound
@property
def childcount(self):
"""int: The number of chidren of the node.
Raises:
TypeError: self has no child.
"""
if self.isarray:
return self.plug.evaluateNumElements()
if self.iscompound:
return self.plug.numChildren()
return 0
# Read write properties ---
@property
def value(self):
"""any: The value of the plug."""
return self._read()
@value.setter
def value(self, value):
return self._write(value)
@property
def default(self):
"""any: The plug is default value."""
value = cmds.attributeQuery(
self.attribute,
node=self.node.name,
listDefault=True,
)
if isinstance(value, (list, tuple)) and len(value) == 1:
value = value[0]
return value
@default.setter
def default(self, value):
cmds.addAttr(self.name, edit=True, defaultValue=value)
# Private methods ---
def _read(self):
"""Read the value from the plug."""
return cmds.getAttr(self.name)
def _write(self, value):
"""Set the value of the plug."""
cmds.setAttr(self.name, value)
# Data types
class Point(object):
"""3D point."""
def __repr__(self):
return "<Vector {}>".format(self)
# Type conversion ---
def __str__(self):
return str(tuple(self))
def __init__(self, x=0, y=0, z=0, w=1):
self._point = OpenMaya.MPoint(x, y, z, w)
@classmethod
def from_mpoint(cls, mpoint):
"""Create a point from a maya point."""
return cls(mpoint.x, mpoint.y, mpoint.z, mpoint.w)
class Vector(object):
"""Three dimensional vector.
Arguments:
x (float): The x component of the vector.
y (float): The y component of the vector.
z (float): The z component of the vector.
"""
def __repr__(self):
return "<Vector {}>".format(self)
# Unary operators ---
def __pos__(self):
"""Positive version of the vector (doesn't do anything)."""
return Vector(+self.x, +self.y, +self.z)
def __neg__(self):
"""Negate all components of the vector."""
return Vector(-self.x, -self.y, -self.z)
def __abs__(self):
"""Convert all negative components to positive."""
return Vector(abs(self.x), abs(self.y), abs(self.z))
def __round__(self, ndigits=0):
"""Round all components of the vector."""
return Vector(
round(self.x, ndigits),
round(self.y, ndigits),
round(self.z, ndigits),
)
def __ceil__(self):
"""Converts all floating numbers to the next integer."""
return Vector(
math.ceil(self.x),
math.ceil(self.y),
math.ceil(self.z),
)
def __floor__(self):
"""Converts all floating numbers to the previous integer."""
return Vector(
math.floor(self.x),
math.floor(self.y),
math.floor(self.z),
)
def __trunc__(self):
"""Converts all floating numbers to the closest integer."""
return Vector(
math.trunc(self.x),
math.trunc(self.y),
math.trunc(self.z),
)
# Type conversion ---
def __str__(self):
return str(tuple(self))
# Arithmetic operators ---
def __add__(self, vector):
return self.from_mvector(self.vector + vector.vector)
def __sub__(self, vector):
return self.from_mvector(self.vector - vector.vector)
def __mul__(self, scalar):
"""Compute the dot product."""
return self.from_mvector(self.vector * scalar)
def __truediv__(self, scalar):
return self.from_mvector(self.vector / scalar)
def __xor__(self, vector):
"""Compute the cross product."""
return self.from_mvector(self.vector ^ vector.vector)
# Comparison operators ---
def __eq__(self, vector):
"""Return True if all components are identical."""
if isinstance(vector, (list, tuple)):
return type(vector)(self) == vector
return self.vector == vector.vector
def __ne__(self, vector):
"""Return True if at least one of the components is not identical."""
if isinstance(vector, (list, tuple)):
return type(vector)(self) != vector
return self.vector != vector.vector
# Emmulate container type ---
def __len__(self):
return 3
def __getitem__(self, key):
"""Allow access to components via the container synthax.
Examples:
>>> v = Vector(1, 2, 3)
>>> v[0] == v["x"] == 1
True
>>> v[1] == v["y"] == 2
True
>>> v[2] == v["z"] == 3
True
>>> v[:2]
(1.0, 2.0)
"""
if key in (0, "x"):
return self.x
if key in (1, "y"):
return self.y
if key in (2, "z"):
return self.z
if isinstance(key, slice):
return tuple(self[i] for i in range(*key.indices(len(self))))
msg = "Vector of length 3. The index '{}' is invalid."
raise IndexError(msg.format(key))
def __setitem__(self, key, value):
if key in (0, "x"):
self.x = value
elif key in (1, "y"):
self.y = value
elif key in (2, "z"):
self.z = value
elif isinstance(key, slice):
for i, j in enumerate(range(*key.indices(len(self)))):
self[j] = value[i]
else:
msg = "Vector of length 3. The index '{}' is invalid."
raise IndexError(msg.format(key))
# Constructor ---
def __copy__(self):
"""Create a copy of the vector."""
return type(self)(self.x, self.y, self.z)
def __init__(self, x=0, y=0, z=0):
self._vector = OpenMaya.MVector(x, y, z)
# Class methods ---
@classmethod
def zero(cls):
"""Build a vector with all its components set to zero."""
return cls(0, 0, 0)
@classmethod
def one(cls):
"""Build a vector with all its components set to one."""
return cls(1, 1, 1)
@classmethod
def from_mvector(cls, mvector):
"""Create a vector from a maya vector."""
return cls(mvector.x, mvector.y, mvector.z)
# Read properties ---
@property
def vector(self):
"""MVector: The maya vector object."""
return self._vector
# Read write properties ---
@property
def x(self):
"""float: The x component of the vector."""
return self.vector.x
@x.setter
def x(self, value):
self.vector.x = value
@property
def y(self):
"""float: The y component of the vector."""
return self.vector.y
@y.setter
def y(self, value):
self.vector.y = value
@property
def z(self):
"""float: The z component of the vector."""
return self.vector.z
@z.setter
def z(self, value):
self.vector.z = value
@property
def length(self):
"""float: The length of the vector."""
return self.vector.length()
@length.setter
def length(self, value):
temp = self.normal()
self.x = temp.x * value
self.y = temp.y * value
self.z = temp.z * value
# Public methods ---
def normal(self):
"""Normalized copy."""
return self.from_mvector(self.vector.normal())
def normalize(self):
"""Inplace normalization."""
self.vector.normalize()
def decode(self, api=False):
"""Decode the vector."""
return self.vector if api else tuple(self)
# Aliases ---
magnitude = length
cross = __xor__
dot = __mul__
copy = __copy__
class Matrix(object):
"""4x4 matrix."""
def __repr__(self):
lines = "\n".join([" ".join(["{:7.3f}"] * 4)] * 4)
return "<Matrix \n{}\n>".format(lines.format(*self.decode(True)))
# Type conversion ---
def __str__(self):
return str(self.decode(flat=True))
# Arithmetic operators ---
def __add__(self, matrix):
return self.from_mmatrix(self.matrix + matrix.matrix)
def __mul__(self, matrix):
return self.from_mmatrix(self.matrix + matrix.matrix)
def __sub__(self, matrix):
return self.from_mmatrix(self.matrix + matrix.matrix)
# Comparison operators ---
def __eq__(self, matrix):
return self.matrix == matrix.matrix
def __ne__(self, matrix):
return self.matrix != matrix.matrix
def __ge__(self, matrix):
return self.matrix >= matrix.matrix
def __gt__(self, matrix):
return self.matrix > matrix.matrix
def __le__(self, matrix):
return self.matrix <= matrix.matrix
def __lt__(self, matrix):
return self.matrix < matrix.matrix
# Emmulate container type ---
def __getitem__(self, key):
if isinstance(key, tuple):
return self.matrix.getElement(*key)
return self.matrix[key]
def __setitem__(self, key, value):
if isinstance(key, tuple):
self.matrix.setElement(*(key + (value,)))
return
self.matrix[key] = value
# Constructor ---
def __init__(self, *values):
if values:
values = [values]
self._matrix = OpenMaya.MMatrix(*values)
@property
def transform(self):
"""MTransformationMatrix: The maya transformation matrix."""
return OpenMaya.MTransformationMatrix(self.matrix)
# Class methods ---
@classmethod
def from_mmatrix(cls, mmatrix):
"""Create a matrix from a maya matrix."""
return cls(*list(mmatrix))
@classmethod
def identity(cls):
"""Create a identity matrix."""
return cls.from_mmatrix(OpenMaya.MMatrix.kIdentity)
@classmethod
def compose(cls, translate=Vector(), rotate=Vector(), scale=Vector.one()):
"""Compose a matrix from translate, rotate and scale value."""
# Read properties ---
@property
def matrix(self):
"""MMatrix: The maya matrix."""
return self._matrix
# Read write methods properties ---
@property
def translate(self):
"""Vector: The translation component."""
return Vector.from_mvector(self.transform.translation(Space.WORLD))
@translate.setter
def translate(self, value):
srt = self.transform.setTranslation(value.vector, Space.WORLD)
self._matrix = srt.asMatrix()
# Public methods ---
def decompose(self):
"""Decompose matrix into translate, rotate and scale value."""
def inverse(self):
"""Inverted copy of the matrix.
Returns:
Matrix: The inverted matrix.
"""
return self.from_mmatrix(self.matrix.inverse())
def decode(self, flat=False):
"""Decode the matrix into a two-dimensional array.
Arguments:
flat (bool): Flatten the result into a single-dimensional array.
Returns:
tuple: The decoded matrix.
"""
matrix = []
for i in range(4):
values = tuple(self.matrix.getElement(i, j) for j in range(4))
if flat:
matrix.extend(values)
else:
matrix.append(values)
return tuple(matrix)
def asrotate(self):
"""Create a matrix with the rotate component."""
self.from_mmatrix(self.transform.asRotateMatrix())
def asscale(self):
"""Create a matrix with the scale component."""
self.from_mmatrix(self.transform.asScaleMatrix())
# Aliases ---
__neg__ = inverse
class EulerRotation(object):
"""3D rotation."""
XYZ = OpenMaya.MEulerRotation.kXYZ
YZX = OpenMaya.MEulerRotation.kYZX
ZXY = OpenMaya.MEulerRotation.kZXY
XZY = OpenMaya.MEulerRotation.kXZY
YXZ = OpenMaya.MEulerRotation.kYXZ
ZYX = OpenMaya.MEulerRotation.kZYX
def __init__(self, x=0, y=0, z=1, order=XYZ):
self._rotation = OpenMaya.MEulerRotation(x, y, z, order)
@classmethod
def from_meuler_rotation(cls, rotation):
"""Create a euler rotation from a maya euler rotation."""
return cls(rotation.x, rotation.y, rotation.z, rotation.order)
class Quaternion(object):
"""Quaternion math."""
def __init__(self, x=0, y=0, z=0, w=1):
self._quaternion = OpenMaya.MQuaternion(x, y, z, w)
@classmethod
def from_mquaternion(cls, mquaternion):
"""Create a quaternion from a maya quaternion."""
return cls(mquaternion.x, mquaternion.y, mquaternion.z, mquaternion.w)
# Utilities
def _match_filter(node, filter, strict=False):
"""Check if the node fit with the specified filter."""
return filter is None or node.istype(filter, strict)
def _wrap(func, *args, **kwargs):
"""To do."""
def _convert(func_, obj):
try:
return func_(obj)
except BaseException:
return obj
# First, decode each arguments
args_ = [_convert(decode, x) for x in args]
kwargs_ = {k: _convert(decode, v) for k, v in kwargs.items()}
# Execute the function
returned = func(*args_, **kwargs_)
if isinstance(returned, OpenMaya.MSelectionList):
returned = returned.getSelectionStrings()
# Finally encode the returned object(s)
if isinstance(returned, _STRING_TYPES):
return _convert(encode, returned)
if isinstance(returned, (list, tuple, set)):
return type(returned)(_convert(encode, x) for x in returned)
return returned
# MIT License
# Copyright (c) 2022 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
| StarcoderdataPython |
12834545 | <reponame>vincenttran-msft/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
import importlib
from .._patch import DUPLICATE_PARAMS_POLICY
from ._recovery_services_backup_passive_client import RecoveryServicesBackupPassiveClient as RecoveryServicesBackupPassiveClientGenerated
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
class RecoveryServicesBackupPassiveClient(RecoveryServicesBackupPassiveClientGenerated):
__doc__ = RecoveryServicesBackupPassiveClientGenerated.__doc__
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
per_call_policies = kwargs.pop("per_call_policies", [])
try:
per_call_policies.append(DUPLICATE_PARAMS_POLICY)
except AttributeError:
per_call_policies = [per_call_policies, DUPLICATE_PARAMS_POLICY]
super().__init__(
credential=credential,
subscription_id=subscription_id,
base_url=base_url,
per_call_policies=per_call_policies,
**kwargs
)
# This file is used for handwritten extensions to the generated code. Example:
# https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md
def patch_sdk():
curr_package = importlib.import_module("azure.mgmt.recoveryservicesbackup.passivestamp.aio")
curr_package.RecoveryServicesBackupPassiveClient = RecoveryServicesBackupPassiveClient
| StarcoderdataPython |
217685 | from pychebfun import *
import numpy as np
import matplotlib.pyplot as plt
# Construct a Python function f and the vector of points at which we want
# to plot it.
def f(x):
return np.sin(6*x) + np.sin(30*np.exp(x))
x = np.linspace(-1,1,1000)
# Plot f on the above points
plt.plot(x,f(x),'k',linewidth=10,alpha=0.3, label="Actual $f$")
# Construct a chebfun interpolation on 20, 40, and 60 points. Evaluate the
# interpolations at the above vector of points and plot.
interps = [20,40,60]
ps = [chebfun(f,N=N) for N in interps]
for p in ps:
label = "Chebfun Interpolant: $N=%d$" % p.size()
plot(p, linewidth=3, label=label)
plt.legend()
| StarcoderdataPython |