id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
1859869 | <gh_stars>1-10
"""
Given an unsorted array of positive integers.
Find the number of triangles that can be formed with three different array elements as three sides of triangles.
For a triangle to be possible from 3 values, the sum of any two values (or sides) must be greater than the third value (or third side).
For example, if the input array is {4, 6, 3, 7}, the output should be 3.
There are three triangles possible {3, 4, 6}, {4, 6, 7} and {3, 6, 7}.
Note that {3, 4, 7} is not a possible triangle.
As another example, consider the array {10, 21, 22, 100, 101, 200, 300}.
There can be 6 possible triangles: {10, 21, 22}, {21, 100, 101}, {22, 100, 101}, {10, 100, 101}, {100, 101, 200} and {101, 200, 300}
SOLUTION:
Sort numbers (ascending order).
We will take triples ai <= aj <= ak, such that i <= j <= k.
For each i, j you need to find largest k that satisfy ak <= ai + aj.
Then all triples (ai,aj,al) j <= l <= k is triangle (because ak >= aj >= ai we can only violate ak < a i+ aj).
Consider two pairs (i, j1) and (i, j2) j1 <= j2.
It's easy to see that k2 (found on step 2 for (i, j2)) >= k1 (found one step 2 for (i, j1)).
It means that if you iterate for j, and you only need to check numbers starting from previous k.
So it gives you O(n) time complexity for each particular i, which implies O(n^2) for whole algorithm.
"""
def num_triangles(arr):
ans = 0
arr.sort()
for i in range(0, len(arr)):
k = i + 2
for j in range(i+1, len(arr)):
while k < len(arr) and arr[i] + arr[j] > arr[k]:
k += 1
ans += k - j - 1
return ans
def main():
arr = [3, 4, 6, 7]
arr = [10, 21, 22, 100, 101, 200, 300]
ans = num_triangles(arr)
print(ans)
main()
| StarcoderdataPython |
8189681 | <reponame>red1408/healthcare-deid<filename>eval/run_pipeline_lib.py
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate DeID findings on Google Cloud."""
from __future__ import absolute_import
from datetime import datetime
import logging
import math
import posixpath
import xml.etree.ElementTree as XmlTree
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from common import gcsutil
from eval import eval_lib
from eval import results_pb2
from google.cloud import storage
from google.protobuf import text_format
def _get_utcnow():
return datetime.utcnow()
def get_findings_from_text(raw_text, types_to_ignore):
"""Convert MAE xml to eval_lib.Finding objects."""
tree = XmlTree.fromstring(raw_text)
note_text = tree.find('TEXT').text
findings = set()
if tree.find('TAGS') is not None:
for tag_elem in tree.find('TAGS'):
if tag_elem.tag in types_to_ignore:
continue
findings.add(eval_lib.Finding.from_tag(
tag_elem.tag, tag_elem.get('spans'), note_text))
return findings, note_text
def _get_findings_from_file(filename, storage_client, types_to_ignore):
"""Parse findings from the given MAE XML file."""
bucket = storage_client.lookup_bucket(filename.bucket)
if not bucket:
raise Exception('Failed to get bucket "{}".'.format(filename.bucket))
blob = bucket.get_blob(filename.blob)
if not blob:
raise Exception('Failed to get blob "{}" in bucket "{}".'.format(
filename.blob, filename.bucket))
contents = blob.download_as_string()
return get_findings_from_text(contents, types_to_ignore)
def compare_findings(findings, golden_findings, record_id, note_text,
golden_note_text):
"""Compare findings against goldens."""
logging.info('Running comparison for record "%s"', record_id)
if note_text != golden_note_text:
# If the only difference is a single trailing character, ignore it.
if ((len(note_text) == len(golden_note_text) + 1 and
note_text.startswith(golden_note_text)) or
(len(golden_note_text) == len(note_text) + 1 and
golden_note_text.startswith(note_text))):
pass
else:
raise Exception(
'Note text is different from golden for record "{}".'.format(
record_id))
strict_entity_results = eval_lib.strict_entity_compare(
findings, golden_findings, record_id)
binary_token_results = eval_lib.binary_token_compare(
findings, golden_findings, record_id)
return strict_entity_results, binary_token_results
def compare_bq_row(row, types_to_ignore):
"""Compare the findings in the given BigQuery row.
Args:
row: BQ row: Map containing (findings_record_id, findings_xml, golden_xml).
types_to_ignore: List of strings representing types that should be excluded
from the analysis.
Returns:
(IndividualResult, IndividualResult), where the first is for strict entity
matching and the second is for binary token matching.
Raises:
Exception: If golden_xml doesn't exist.
"""
findings, note_text = get_findings_from_text(row['findings_xml'],
types_to_ignore)
if 'golden_xml' not in row or row['golden_xml'] is None:
raise Exception(
'No golden found for record %s.' % row['findings_record_id'])
golden_findings, golden_note_text = get_findings_from_text(row['golden_xml'],
types_to_ignore)
record_id = row['findings_record_id']
return compare_findings(findings, golden_findings, record_id, note_text,
golden_note_text)
def compare(filename, golden_dir, types_to_ignore):
"""Load data from the file and the golden file and compare.
Args:
filename: Name of the file to compare.
golden_dir: Directory with golden findings to compare against. Must contain
a file with the same basename as filename.
types_to_ignore: List of strings representing types that should be excluded
from the analysis.
Returns:
(IndividualResult, IndividualResult), where the first is for strict entity
matching and the second is for binary token matching.
"""
storage_client = storage.Client()
golden_file = gcsutil.GcsFileName.from_path(
posixpath.join(golden_dir, posixpath.basename(filename.blob)))
findings, note_text = _get_findings_from_file(
filename, storage_client, types_to_ignore)
golden_findings, golden_note_text = _get_findings_from_file(
golden_file, storage_client, types_to_ignore)
record_id = posixpath.basename(filename.blob)
if record_id.endswith('.xml'):
record_id = record_id[:-4]
return compare_findings(findings, golden_findings, record_id, note_text,
golden_note_text)
class OverallResults(object):
"""Class to hold and accumulate the summarized results to output."""
def __init__(self):
self.strict_entity_matching = eval_lib.AccumulatedResults()
self.binary_token_matching = eval_lib.AccumulatedResults()
self.is_empty = True
def __add__(self, other):
new = OverallResults()
new.strict_entity_matching = (
self.strict_entity_matching + other.strict_entity_matching)
new.binary_token_matching = (
self.binary_token_matching + other.binary_token_matching)
new.is_empty = False
return new
def to_results_proto(self):
"""Convert to results_pb2.Results."""
results = results_pb2.Results()
eval_lib.calculate_stats(self.strict_entity_matching.micro)
results.strict_entity_matching_results.micro_average_results.CopyFrom(
self.strict_entity_matching.micro)
results.strict_entity_matching_results.macro_average_results.CopyFrom(
self.strict_entity_matching.macro.calculate_stats())
r = results.strict_entity_matching_results.per_type_micro_average_results
r.extend(self.strict_entity_matching.per_type_protos())
eval_lib.calculate_stats(self.binary_token_matching.typeless_micro)
results.binary_token_matching_results.micro_average_results.CopyFrom(
self.binary_token_matching.typeless_micro)
results.binary_token_matching_results.macro_average_results.CopyFrom(
self.binary_token_matching.typeless_macro.calculate_stats())
results.binary_token_matching_results.per_type_micro_average_results.extend(
self.binary_token_matching.per_type_protos())
return results
class CombineResultsFn(beam.CombineFn):
"""CombineFn to take individual results and aggregate them."""
def create_accumulator(self):
return OverallResults()
def add_input(self, overall_results, individual_results):
strict_entity_result, binary_token_result = individual_results
overall_results.strict_entity_matching.add_result(strict_entity_result)
overall_results.binary_token_matching.add_result(binary_token_result)
return overall_results
def merge_accumulators(self, accumulators):
overall_results = OverallResults()
for a in accumulators:
overall_results += a
return overall_results
def extract_output(self, overall_results):
if overall_results is None or overall_results.is_empty:
return None
# Dataflow's pickling gets confused if it has to deal with raw protos, so we
# serialize them explicitly.
results = overall_results.to_results_proto()
logging.info('Aggregate results:\n%s', results)
return results.SerializeToString()
def write_aggregate_results_to_gcs(results_bytes, results_dir):
"""Write the aggregate results to results_dir."""
storage_client = storage.Client()
results = results_pb2.Results()
results.ParseFromString(results_bytes)
filename = gcsutil.GcsFileName.from_path(
posixpath.join(results_dir, 'aggregate_results.txt'))
logging.info('Writing aggregate results to %s', filename.string())
bucket = storage_client.lookup_bucket(filename.bucket)
blob = bucket.blob(filename.blob)
blob.upload_from_string(str(results))
def _create_row(stats, now, extra_columns=tuple()):
"""Create a BigQuery row from the given stats."""
row = {'true_positives': stats.true_positives,
'false_positives': stats.false_positives,
'false_negatives': stats.false_negatives}
if not math.isnan(stats.precision):
row['precision'] = stats.precision
if not math.isnan(stats.recall):
row['recall'] = stats.recall
if not math.isnan(stats.f_score):
row['f_score'] = stats.f_score
row['timestamp'] = now
for column_name, val in extra_columns:
row[column_name] = val
return row
def format_individual_result_for_bq(result, now):
_, binary_token_result = result
return _create_row(binary_token_result.typeless, now,
[('record_id', binary_token_result.record_id)])
def format_aggregate_results_for_bq(aggregate_results_bytes, now):
"""Format results as a BigQuery row (dict from column name to value)."""
ret = []
aggregate_results = results_pb2.Results()
aggregate_results.ParseFromString(aggregate_results_bytes)
binary_token_results = aggregate_results.binary_token_matching_results
ret.append(_create_row(binary_token_results.micro_average_results, now,
[('info_type', 'ALL')]))
for result in binary_token_results.per_type_micro_average_results:
ret.append(_create_row(result.stats, now,
[('info_type', result.info_type_category)]))
return ret
def format_aggregate_text_for_bq(text_aggregate_results, timestamp):
"""Format results as a BigQuery row from a text input."""
ret = []
aggregate_results = results_pb2.Results()
text_format.Merge(text_aggregate_results, aggregate_results)
binary_token_results = aggregate_results.binary_token_matching_results
ret.append(_create_row(binary_token_results.micro_average_results, timestamp,
[('info_type', 'ALL')]))
for result in binary_token_results.per_type_micro_average_results:
ret.append(_create_row(result.stats, timestamp,
[('info_type', result.info_type_category)]))
return ret
def format_debug_info(entity_and_binary_result_pair, now):
_, binary_token_result = entity_and_binary_result_pair
for debug_info in binary_token_result.debug_info:
debug_info['timestamp'] = now
return binary_token_result.debug_info
def get_binary_token_result(entity_and_binary_result_pair):
_, binary_token_result = entity_and_binary_result_pair
pb = results_pb2.IndividualResult()
pb.record_id = binary_token_result.record_id
pb.stats.CopyFrom(binary_token_result.typeless)
return text_format.MessageToString(pb)
BASE_SCHEMA = (
'recall:FLOAT,precision:FLOAT,f_score:FLOAT,'
'true_positives:INTEGER,false_positives:INTEGER,false_negatives:INTEGER,'
'timestamp:TIMESTAMP')
def run_pipeline(mae_input_pattern, mae_golden_dir, results_dir,
mae_input_query, mae_golden_table,
write_per_note_stats_to_gcs, results_table,
per_note_results_table, debug_output_table, types_to_ignore,
timestamp, pipeline_args):
"""Evaluate the input files against the goldens."""
if ((mae_input_pattern is None) == (mae_input_query is None) or
(mae_golden_dir is None) == (mae_golden_table is None) or
(mae_input_query is None) != (mae_golden_table is None) or
(mae_input_pattern is None) != (mae_golden_dir is None)):
return ['Must set exactly one of: '
'(--mae_input_pattern AND --mae_golden_dir) '
'OR (--mae_input_query AND --mae_golden_table).']
if write_per_note_stats_to_gcs and not results_dir:
return ['Must set --results_dir when --write_per_note_stats_to_gcs is set.']
logging.info('Starting evaluation.')
p = beam.Pipeline(options=PipelineOptions(pipeline_args))
if mae_input_pattern:
filenames = []
storage_client = storage.Client()
for f in gcsutil.find_files(mae_input_pattern, storage_client):
if posixpath.dirname(f.string()) != posixpath.dirname(mae_input_pattern):
# Ignore subdirectories.
continue
filenames.append(f)
per_note_results = None
if mae_input_query and mae_golden_table:
query_template = ('SELECT findings.record_id, findings.xml, golden.xml '
'FROM ({}) AS findings '
'LEFT JOIN [{}] AS golden '
'ON findings.record_id=golden.record_id')
query = query_template.format(mae_input_query, mae_golden_table)
per_note_results = (p |
beam.io.Read(beam.io.BigQuerySource(query=query)) |
beam.Map(compare_bq_row, types_to_ignore))
else:
per_note_results = (p |
beam.Create(filenames) |
beam.Map(compare, mae_golden_dir, types_to_ignore))
if not timestamp:
timestamp = str(_get_utcnow())
if debug_output_table:
_ = (per_note_results |
beam.FlatMap(format_debug_info, timestamp) |
'write_debug_info' >> beam.io.Write(beam.io.BigQuerySink(
debug_output_table,
schema=('record_id:STRING,classification:STRING,info_type:STRING,'
'text:STRING,context:STRING,start:INTEGER,end:INTEGER,'
'timestamp:TIMESTAMP'),
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND)))
if per_note_results_table:
_ = (per_note_results |
beam.Map(format_individual_result_for_bq, timestamp) |
'write_per_note' >> beam.io.Write(beam.io.BigQuerySink(
per_note_results_table, schema=('record_id:STRING,' + BASE_SCHEMA),
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND)))
aggregate_results = (per_note_results |
beam.CombineGlobally(CombineResultsFn()))
if results_dir:
_ = (aggregate_results |
beam.Map(write_aggregate_results_to_gcs, results_dir))
if results_table:
_ = (aggregate_results |
beam.FlatMap(format_aggregate_results_for_bq, timestamp) |
'write_aggregate' >> beam.io.Write(beam.io.BigQuerySink(
results_table, schema=('info_type:STRING,' + BASE_SCHEMA),
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND)))
if write_per_note_stats_to_gcs:
_ = (per_note_results |
beam.Map(get_binary_token_result) |
beam.io.WriteToText(posixpath.join(results_dir, 'per-note-results')))
result = p.run().wait_until_finish()
logging.info('Eval result: %s', result)
return []
def add_all_args(parser):
"""Add command-line arguments to the parser."""
parser.add_argument(
'--mae_input_pattern', type=str, required=False,
help='GCS directory with MAE files to compare against goldens.')
parser.add_argument(
'--mae_input_query', type=str, required=False,
help='BQ query with MAE XML to compare against goldens.')
parser.add_argument(
'--mae_golden_dir', type=str, required=False,
help='GCS directory with "golden" MAE files to use as a baseline.')
parser.add_argument(
'--mae_golden_table', type=str, required=False,
help='BQ table with "golden" MAE XML to use as a baseline.')
parser.add_argument('--results_dir', type=str,
help='GCS directory to write results to.')
parser.add_argument('--write_per_note_stats_to_gcs', type=bool, default=False,
help=('Also write per-note binary token matching '
'results to GCS.'))
parser.add_argument('--results_table', type=str,
help=('Bigquery table to write overall (micro-averaged) '
'binary token matching results to.'))
parser.add_argument('--per_note_results_table', type=str,
help=('Bigquery table to write per-note binary token '
'matching results to.'))
parser.add_argument('--types_to_ignore', type=lambda s: s.split(','),
help=('Comma-separated list of types that should be '
'excluded from the analysis.'))
parser.add_argument('--debug_output_table', type=str,
help=('Table for storing debug info (including PHI!) for '
'binary token matching results.'))
| StarcoderdataPython |
1707105 | <filename>pycqed/instrument_drivers/meta_instrument/LutMans/mw_lutman.py
from .base_lutman import Base_LutMan, get_redundant_codewords, get_wf_idx_from_name
import numpy as np
from collections.abc import Iterable
from collections import OrderedDict
from qcodes.instrument.parameter import ManualParameter
from qcodes.utils import validators as vals
from pycqed.measurement.waveform_control_CC import waveform as wf
import time
default_mw_lutmap = {
0 : {"name" : "I" , "theta" : 0 , "phi" : 0 , "type" : "ge"},
1 : {"name" : "rX180" , "theta" : 180 , "phi" : 0 , "type" : "ge"},
2 : {"name" : "rY180" , "theta" : 180 , "phi" : 90, "type" : "ge"},
3 : {"name" : "rX90" , "theta" : 90 , "phi" : 0 , "type" : "ge"},
4 : {"name" : "rY90" , "theta" : 90 , "phi" : 90, "type" : "ge"},
5 : {"name" : "rXm90" , "theta" : -90 , "phi" : 0 , "type" : "ge"},
6 : {"name" : "rYm90" , "theta" : -90 , "phi" : 90, "type" : "ge"},
7 : {"name" : "rPhi90", "theta" : 90 , "phi" : 0 , "type" : "ge"},
8 : {"name" : "spec" , "type" : "spec"} ,
9 : {"name" : "rX12" , "theta" : 180 , "phi" : 0 , "type" : "ef"},
10 : {"name" : "square", "type" : "square"},
11 : {"name" : "rY45" , "theta" : 45 , "phi" : 90, "type" : "ge"},
12 : {"name" : "rYm45" , "theta" : -45 , "phi" : 90, "type" : "ge"},
13 : {"name" : "rX45" , "theta" : 45 , "phi" : 0 , "type" : "ge"},
14 : {"name" : "rXm45" , "theta" : -45 , "phi" : 0 , "type" : "ge"},
30 : {"name" : "rPhi180" , "theta" : 180 , "phi" : 0 , "type" : "ge"},
60 : {"name" : "phaseCorrNW" , "type" : "phase"},
61 : {"name" : "phaseCorrNE" , "type" : "phase"},
62 : {"name" : "phaseCorrSW" , "type" : "phase"},
63 : {"name" : "phaseCorrSE" , "type" : "phase"},
}
inspire_mw_lutmap = {
0 : {"name" : "I" , "theta" : 0 , "phi" : 0 , "type" : "ge"}, # I for CW compatibility
1 : {"name" : "rX180" , "theta" : 180 , "phi" : 0 , "type" : "ge"}, # rX180 for CW compatibility
2 : {"name" : "rY180" , "theta" : 180 , "phi" : 90 , "type" : "ge"}, # rY180 for CW compatibility
3 : {"name" : "rX90" , "theta" : 90 , "phi" : 0 , "type" : "ge"}, # rX90 for CW compatibility
4 : {"name" : "rY90" , "theta" : 90 , "phi" : 90 , "type" : "ge"}, # rY90 for CW compatibility
5 : {"name" : "rX270" , "theta" : 270 , "phi" : 0 , "type" : "ge"}, # rXm90 for CW compatibility
6 : {"name" : "rY270" , "theta" : 270 , "phi" : 90 , "type" : "ge"}, # rYm90 for CW compatibility
7 : {"name" : "rX5" , "theta" : 5.625 , "phi" : 0 , "type" : "ge"},
8 : {"name" : "rX11" , "theta" : 11.25 , "phi" : 0 , "type" : "ge"},
9 : {"name" : "rX12" , "theta" : 180 , "phi" : 0 , "type" : "ef"}, # rX12 for CW compatibility
10 : {"name" : "rX16" , "theta" : 16.875 , "phi" : 0 , "type" : "ge"},
11 : {"name" : "rY45" , "theta" : 45 , "phi" : 90 , "type" : "ge"}, # rY45 for CW compatibility
12 : {"name" : "rY315" , "theta" : -45 , "phi" : 90 , "type" : "ge"}, # rYm45 for CW compatibility
13 : {"name" : "rX45" , "theta" : 45 , "phi" : 0 , "type" : "ge"}, # rX45 for CW compatibility
14 : {"name" : "rX315" , "theta" : -45 , "phi" : 0 , "type" : "ge"}, # rXm45 for CW compatibility
15 : {"name" : "rX22" , "theta" : 22.5 , "phi" : 0 , "type" : "ge"},
16 : {"name" : "rX28" , "theta" : 28.125 , "phi" : 0 , "type" : "ge"},
17 : {"name" : "rX33" , "theta" : 33.75 , "phi" : 0 , "type" : "ge"},
18 : {"name" : "rX39" , "theta" : 39.375 , "phi" : 0 , "type" : "ge"},
19 : {"name" : "rX50" , "theta" : 50.625 , "phi" : 0 , "type" : "ge"},
20 : {"name" : "rX56" , "theta" : 56.25 , "phi" : 0 , "type" : "ge"},
21 : {"name" : "rX61" , "theta" : 61.875 , "phi" : 0 , "type" : "ge"},
22 : {"name" : "rX67" , "theta" : 67.5 , "phi" : 0 , "type" : "ge"},
23 : {"name" : "rX73" , "theta" : 73.125 , "phi" : 0 , "type" : "ge"},
24 : {"name" : "rX78" , "theta" : 78.75 , "phi" : 0 , "type" : "ge"},
25 : {"name" : "rX84" , "theta" : 84.375 , "phi" : 0 , "type" : "ge"},
26 : {"name" : "rX95" , "theta" : 95.625 , "phi" : 0 , "type" : "ge"},
27 : {"name" : "rX101" , "theta" : 101.25 , "phi" : 0 , "type" : "ge"},
28 : {"name" : "rX106" , "theta" : 106.875 , "phi" : 0 , "type" : "ge"},
29 : {"name" : "rX112" , "theta" : 112.5 , "phi" : 0 , "type" : "ge"},
30 : {"name" : "rX118" , "theta" : 118.125 , "phi" : 0 , "type" : "ge"},
31 : {"name" : "rX123" , "theta" : 123.75 , "phi" : 0 , "type" : "ge"},
32 : {"name" : "rX129" , "theta" : 129.375 , "phi" : 0 , "type" : "ge"},
33 : {"name" : "rX135" , "theta" : 135 , "phi" : 0 , "type" : "ge"},
34 : {"name" : "rX140" , "theta" : 140.625 , "phi" : 0 , "type" : "ge"},
35 : {"name" : "rX146" , "theta" : 146.25 , "phi" : 0 , "type" : "ge"},
36 : {"name" : "rX151" , "theta" : 151.875 , "phi" : 0 , "type" : "ge"},
37 : {"name" : "rX157" , "theta" : 157.5 , "phi" : 0 , "type" : "ge"},
38 : {"name" : "rX163" , "theta" : 163.125 , "phi" : 0 , "type" : "ge"},
39 : {"name" : "rX168" , "theta" : 168.75 , "phi" : 0 , "type" : "ge"},
40 : {"name" : "rX174" , "theta" : 174.375 , "phi" : 0 , "type" : "ge"},
41 : {"name" : "rX185" , "theta" : -174.375 , "phi" : 0 , "type" : "ge"},
42 : {"name" : "rX191" , "theta" : -168.75 , "phi" : 0 , "type" : "ge"},
43 : {"name" : "rX196" , "theta" : -163.125 , "phi" : 0 , "type" : "ge"},
44 : {"name" : "rX202" , "theta" : -157.5 , "phi" : 0 , "type" : "ge"},
45 : {"name" : "rX208" , "theta" : -151.875 , "phi" : 0 , "type" : "ge"},
46 : {"name" : "rX213" , "theta" : -146.25 , "phi" : 0 , "type" : "ge"},
47 : {"name" : "rX219" , "theta" : -140.625 , "phi" : 0 , "type" : "ge"},
48 : {"name" : "rX225" , "theta" : -135 , "phi" : 0 , "type" : "ge"},
49 : {"name" : "rX230" , "theta" : -129.375 , "phi" : 0 , "type" : "ge"},
50 : {"name" : "rX236" , "theta" : -123.75 , "phi" : 0 , "type" : "ge"},
51 : {"name" : "rX241" , "theta" : -118.125 , "phi" : 0 , "type" : "ge"},
52 : {"name" : "rX247" , "theta" : -112.5 , "phi" : 0 , "type" : "ge"},
53 : {"name" : "rX253" , "theta" : -106.875 , "phi" : 0 , "type" : "ge"},
54 : {"name" : "rX258" , "theta" : -101.25 , "phi" : 0 , "type" : "ge"},
55 : {"name" : "rX264" , "theta" : -95.625 , "phi" : 0 , "type" : "ge"},
56 : {"name" : "rX275" , "theta" : -84.375 , "phi" : 0 , "type" : "ge"},
57 : {"name" : "rX281" , "theta" : -78.75 , "phi" : 0 , "type" : "ge"},
58 : {"name" : "rX286" , "theta" : -73.125 , "phi" : 0 , "type" : "ge"},
59 : {"name" : "rX292" , "theta" : -67.5 , "phi" : 0 , "type" : "ge"},
60 : {"name" : "rX298" , "theta" : -61.875 , "phi" : 0 , "type" : "ge"},
61 : {"name" : "rX303" , "theta" : -56.25 , "phi" : 0 , "type" : "ge"},
62 : {"name" : "rX309" , "theta" : -50.625 , "phi" : 0 , "type" : "ge"},
63 : {"name" : "rX320" , "theta" : -39.375 , "phi" : 0 , "type" : "ge"},
64 : {"name" : "rX326" , "theta" : -33.75 , "phi" : 0 , "type" : "ge"},
65 : {"name" : "rX331" , "theta" : -28.125 , "phi" : 0 , "type" : "ge"},
66 : {"name" : "rX337" , "theta" : -22.5 , "phi" : 0 , "type" : "ge"},
67 : {"name" : "rX343" , "theta" : -16.875 , "phi" : 0 , "type" : "ge"},
68 : {"name" : "rX348" , "theta" : -11.25 , "phi" : 0 , "type" : "ge"},
69 : {"name" : "rX354" , "theta" : -5.625 , "phi" : 0 , "type" : "ge"},
70 : {"name" : "rY5" , "theta" : 5.625 , "phi" : 90 , "type" : "ge"},
71 : {"name" : "rY11" , "theta" : 11.25 , "phi" : 90 , "type" : "ge"},
72 : {"name" : "rY16" , "theta" : 16.875 , "phi" : 90 , "type" : "ge"},
73 : {"name" : "rY22" , "theta" : 22.5 , "phi" : 90 , "type" : "ge"},
74 : {"name" : "rY28" , "theta" : 28.125 , "phi" : 90 , "type" : "ge"},
75 : {"name" : "rY33" , "theta" : 33.75 , "phi" : 90 , "type" : "ge"},
76 : {"name" : "rY39" , "theta" : 39.375 , "phi" : 90 , "type" : "ge"},
77 : {"name" : "rY50" , "theta" : 50.625 , "phi" : 90 , "type" : "ge"},
78 : {"name" : "rY56" , "theta" : 56.25 , "phi" : 90 , "type" : "ge"},
79 : {"name" : "rY61" , "theta" : 61.875 , "phi" : 90 , "type" : "ge"},
80 : {"name" : "rY67" , "theta" : 67.5 , "phi" : 90 , "type" : "ge"},
81 : {"name" : "rY73" , "theta" : 73.125 , "phi" : 90 , "type" : "ge"},
82 : {"name" : "rY78" , "theta" : 78.75 , "phi" : 90 , "type" : "ge"},
83 : {"name" : "rY84" , "theta" : 84.375 , "phi" : 90 , "type" : "ge"},
84 : {"name" : "rY95" , "theta" : 95.625 , "phi" : 90 , "type" : "ge"},
85 : {"name" : "rY101" , "theta" : 101.25 , "phi" : 90 , "type" : "ge"},
86 : {"name" : "rY106" , "theta" : 106.875 , "phi" : 90 , "type" : "ge"},
87 : {"name" : "rY112" , "theta" : 112.5 , "phi" : 90 , "type" : "ge"},
88 : {"name" : "rY118" , "theta" : 118.125 , "phi" : 90 , "type" : "ge"},
89 : {"name" : "rY123" , "theta" : 123.75 , "phi" : 90 , "type" : "ge"},
90 : {"name" : "rY129" , "theta" : 129.375 , "phi" : 90 , "type" : "ge"},
91 : {"name" : "rY135" , "theta" : 135 , "phi" : 90 , "type" : "ge"},
92 : {"name" : "rY140" , "theta" : 140.625 , "phi" : 90 , "type" : "ge"},
93 : {"name" : "rY146" , "theta" : 146.25 , "phi" : 90 , "type" : "ge"},
94 : {"name" : "rY151" , "theta" : 151.875 , "phi" : 90 , "type" : "ge"},
95 : {"name" : "rY157" , "theta" : 157.5 , "phi" : 90 , "type" : "ge"},
96 : {"name" : "rY163" , "theta" : 163.125 , "phi" : 90 , "type" : "ge"},
97 : {"name" : "rY168" , "theta" : 168.75 , "phi" : 90 , "type" : "ge"},
98 : {"name" : "rY174" , "theta" : 174.375 , "phi" : 90 , "type" : "ge"},
99 : {"name" : "rY185" , "theta" : -174.375 , "phi" : 90 , "type" : "ge"},
100: {"name" : "rY191" , "theta" : -168.75 , "phi" : 90 , "type" : "ge"},
101: {"name" : "rY196" , "theta" : -163.125 , "phi" : 90 , "type" : "ge"},
102: {"name" : "rY202" , "theta" : -157.5 , "phi" : 90 , "type" : "ge"},
103: {"name" : "rY208" , "theta" : -151.875 , "phi" : 90 , "type" : "ge"},
104: {"name" : "rY213" , "theta" : -146.25 , "phi" : 90 , "type" : "ge"},
105: {"name" : "rY219" , "theta" : -140.625 , "phi" : 90 , "type" : "ge"},
106: {"name" : "rY225" , "theta" : -135 , "phi" : 90 , "type" : "ge"},
107: {"name" : "rY230" , "theta" : -129.375 , "phi" : 90 , "type" : "ge"},
108: {"name" : "rY236" , "theta" : -123.75 , "phi" : 90 , "type" : "ge"},
109: {"name" : "rY241" , "theta" : -118.125 , "phi" : 90 , "type" : "ge"},
110: {"name" : "rY247" , "theta" : -112.5 , "phi" : 90 , "type" : "ge"},
111: {"name" : "rY253" , "theta" : -106.875 , "phi" : 90 , "type" : "ge"},
112: {"name" : "rY258" , "theta" : -101.25 , "phi" : 90 , "type" : "ge"},
113: {"name" : "rY264" , "theta" : -95.625 , "phi" : 90 , "type" : "ge"},
114: {"name" : "rY275" , "theta" : -84.375 , "phi" : 90 , "type" : "ge"},
115: {"name" : "rY281" , "theta" : -78.75 , "phi" : 90 , "type" : "ge"},
116: {"name" : "rY286" , "theta" : -73.125 , "phi" : 90 , "type" : "ge"},
117: {"name" : "rY292" , "theta" : -67.5 , "phi" : 90 , "type" : "ge"},
118: {"name" : "rY298" , "theta" : -61.875 , "phi" : 90 , "type" : "ge"},
119: {"name" : "rY303" , "theta" : -56.25 , "phi" : 90 , "type" : "ge"},
120: {"name" : "rY309" , "theta" : -50.625 , "phi" : 90 , "type" : "ge"},
121: {"name" : "rY320" , "theta" : -39.375 , "phi" : 90 , "type" : "ge"},
122: {"name" : "rY326" , "theta" : -33.75 , "phi" : 90 , "type" : "ge"},
123: {"name" : "rY331" , "theta" : -28.125 , "phi" : 90 , "type" : "ge"},
124: {"name" : "rY337" , "theta" : -22.5 , "phi" : 90 , "type" : "ge"},
125: {"name" : "rY343" , "theta" : -16.875 , "phi" : 90 , "type" : "ge"},
126: {"name" : "rY348" , "theta" : -11.25 , "phi" : 90 , "type" : "ge"},
127: {"name" : "rY354" , "theta" : -5.625 , "phi" : 90 , "type" : "ge"}
}
valid_types = {'ge', 'ef', 'spec', 'raw-drag', 'ef-raw', 'square', 'phase'}
# _def_lm = ['I', 'rX180', 'rY180', 'rX90', 'rY90',
# 'rXm90', 'rYm90', 'rPhi90', 'spec']
# # use remaining codewords to set pi/2 gates for various angles
# for i in range(18):
# angle = i * 20
# _def_lm.append('r{}_90'.format(angle))
def mw_lutmap_is_valid(lutmap: dict) -> bool:
"""
Test if lutmap obeys schema.
Args:
lutmap
Return:
valid (bool):
"""
# FIXME: make this part of the validator for the LutMap parameter.
for key, value in lutmap.items():
if not isinstance(key, int):
raise TypeError
if value['type'] not in valid_types:
raise ValueError("{} not in {}".format(value['type'],
valid_types))
return True
def theta_to_amp(theta: float, amp180: float):
"""
Convert θ in deg to pulse amplitude based on a reference amp180.
Note that all angles are mapped onto the domain [-180, 180) so that
the minimum possible angle for each rotation is used.
"""
# phase wrapped to [-180, 180)
theta_wrap = ((-theta+180) % 360-180)*-1
amp = theta_wrap/180*amp180
return amp
class Base_MW_LutMan(Base_LutMan):
"""
The base class for the microwave lutman.
Standard microwave pulses are generated based on a lutmap.
- Schema of lutmap.
- important attributes
self._wave_dict
Typical usage flow of the mw-lutmans
1. specify a lutmap that determines what waveforms are used.
2. set some parameters such as mw_amp180
3. generate waveforms -> stored in self._wave_dict
4. upload waveforms
"""
def set_default_lutmap(self):
"""Set the default lutmap for standard microwave drive pulses."""
self.LutMap(default_mw_lutmap.copy())
def set_inspire_lutmap(self):
"""Set the default lutmap for expanded microwave drive pulses."""
self.LutMap(inspire_mw_lutmap.copy())
def codeword_idx_to_parnames(self, cw_idx: int):
"""Convert a codeword_idx to a list of par names for the waveform."""
# the possible channels way of doing this is to make it work both for
# VSM style lutmans and no VSM style lutmans.
possible_channels = ('channel_GI', 'channel_GQ',
'channel_DI', 'channel_DQ',
'channel_I', 'channel_Q')
codewords = ['wave_ch{}_cw{:03}'.format(self[ch](), cw_idx)
for ch in possible_channels if hasattr(self, ch)]
return codewords
def _add_waveform_parameters(self):
# defined here so that the VSM based LutMan can overwrite this
self.wf_func = wf.mod_gauss
self.spec_func = wf.block_pulse
self._add_channel_params()
self.add_parameter('cfg_sideband_mode',
vals=vals.Enum('real-time', 'static'),
initial_value='static',
parameter_class=ManualParameter)
self.add_parameter('mw_amp180', unit='frac', vals=vals.Numbers(-1, 1),
parameter_class=ManualParameter,
initial_value=1.0)
self.add_parameter('mw_amp90_scale',
vals=vals.Numbers(-1, 1),
parameter_class=ManualParameter,
initial_value=0.5)
self.add_parameter('mw_motzoi', vals=vals.Numbers(-2, 2),
parameter_class=ManualParameter,
initial_value=0.0)
self.add_parameter('mw_gauss_width',
vals=vals.Numbers(min_value=1e-9), unit='s',
parameter_class=ManualParameter,
initial_value=4e-9)
self.add_parameter('mw_phi', label='Phase of Rphi pulse',
vals=vals.Numbers(), unit='deg',
parameter_class=ManualParameter,
initial_value=0)
self.add_parameter('spec_length',
vals=vals.Numbers(), unit='s',
parameter_class=ManualParameter,
initial_value=20e-9)
self.add_parameter('spec_amp',
vals=vals.Numbers(), unit='frac',
parameter_class=ManualParameter,
initial_value=1)
# parameters related to timings
self.add_parameter('pulse_delay', unit='s', vals=vals.Numbers(0, 1e-6),
parameter_class=ManualParameter,
initial_value=0)
# square pulse duratio for larger pulses
self.add_parameter('sq_pulse_duration', unit='s', vals=vals.Numbers(0, 1e-6),
parameter_class=ManualParameter,
initial_value=40e-9)
self.add_parameter(
'mw_modulation', vals=vals.Numbers(), unit='Hz',
docstring=('Modulation frequency for qubit driving pulses. Note'
' that when using an AWG with build in modulation this'
' should be set to 0.'),
parameter_class=ManualParameter, initial_value=50.0e6)
self._add_mixer_corr_pars()
self.add_parameter('mw_ef_modulation', vals=vals.Numbers(), unit='Hz',
docstring=('Modulation frequency for driving pulses to the '
'second excited-state.'),
parameter_class=ManualParameter, initial_value=50.0e6)
self.add_parameter('mw_ef_amp180', unit='frac',
docstring=(
'Pulse amplitude for pulsing the ef/12 transition'),
vals=vals.Numbers(-1, 1),
parameter_class=ManualParameter, initial_value=.2)
def _add_mixer_corr_pars(self):
self.add_parameter('mixer_alpha', vals=vals.Numbers(),
parameter_class=ManualParameter,
initial_value=1.0)
self.add_parameter('mixer_phi', vals=vals.Numbers(), unit='deg',
parameter_class=ManualParameter,
initial_value=0.0)
self.add_parameter(
'mixer_apply_predistortion_matrix', vals=vals.Bool(), docstring=(
'If True applies a mixer correction using mixer_phi and '
'mixer_alpha to all microwave pulses using.'),
parameter_class=ManualParameter, initial_value=True)
def _add_channel_params(self):
self.add_parameter('channel_I',
parameter_class=ManualParameter,
vals=vals.Numbers(1, self._num_channels))
self.add_parameter('channel_Q',
parameter_class=ManualParameter,
vals=vals.Numbers(1, self._num_channels))
def generate_standard_waveforms(
self, apply_predistortion_matrix: bool=True):
self._wave_dict = OrderedDict()
if self.cfg_sideband_mode() == 'static':
f_modulation = self.mw_modulation()
else:
f_modulation = 0
# lutmap is expected to obey lutmap mw schema
for idx, waveform in self.LutMap().items():
if waveform['type'] == 'ge':
if waveform['theta'] == 90:
amp = self.mw_amp180()*self.mw_amp90_scale()
elif waveform['theta'] == -90:
amp = - self.mw_amp180() * self.mw_amp90_scale()
else:
amp = theta_to_amp(theta=waveform['theta'],
amp180=self.mw_amp180())
self._wave_dict[idx] = self.wf_func(
amp=amp,
phase=waveform['phi'],
sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(),
motzoi=self.mw_motzoi(),
delay=self.pulse_delay())
elif waveform['type'] == 'ef':
amp = theta_to_amp(theta=waveform['theta'],
amp180=self.mw_ef_amp180())
self._wave_dict[idx] = self.wf_func(
amp=amp,
phase=waveform['phi'],
sigma_length=self.mw_gauss_width(),
f_modulation=self.mw_ef_modulation(),
sampling_rate=self.sampling_rate(),
motzoi=0,
delay=self.pulse_delay())
elif waveform['type'] == 'raw-drag':
self._wave_dict[idx] = self.wf_func(
**waveform["drag_pars"])
elif waveform['type'] == 'spec':
self._wave_dict[idx] = self.spec_func(
amp=self.spec_amp(),
length=self.spec_length(),
sampling_rate=self.sampling_rate(),
delay=0,
phase=0)
elif waveform['type'] == 'square':
# Using a slightly different construction as above
# as the call signatures of these functions is different.
# Apperently the VSM LutMan has both parameters, so make sure
# we detect on the one only available in the VSM. Otherwise, we
# won't get the needed four waveforms.
if 'duration' in waveform.keys():
sq_pulse_duration = waveform['duration']
else:
sq_pulse_duration = self.sq_pulse_duration()
if 'sq_G_amp' in self.parameters:
self._wave_dict[idx] = wf.mod_square_VSM(
amp_G=self.sq_G_amp(), amp_D=self.sq_D_amp(),
length=sq_pulse_duration,#self.mw_gauss_width()*4,
f_modulation=self.mw_modulation() if self.cfg_sideband_mode()!='real-time' else 0,
sampling_rate=self.sampling_rate())
elif 'sq_amp' in self.parameters:
self._wave_dict[idx] = wf.mod_square(
amp=self.sq_amp(), length=sq_pulse_duration,
f_modulation=self.mw_modulation() if self.cfg_sideband_mode()!='real-time' else 0,
phase=0, motzoi=0, sampling_rate=self.sampling_rate())
else:
raise KeyError('Expected parameter "sq_amp" to exist')
else:
raise ValueError
# Add predistortions + test
if (self.mixer_apply_predistortion_matrix()
and apply_predistortion_matrix and self.cfg_sideband_mode != 'real-time'):
self._wave_dict = self.apply_mixer_predistortion_corrections(
self._wave_dict)
return self._wave_dict
def apply_mixer_predistortion_corrections(self, wave_dict):
M = wf.mixer_predistortion_matrix(self.mixer_alpha(),
self.mixer_phi())
for key, val in wave_dict.items():
wave_dict[key] = np.dot(M, val)
return wave_dict
def load_waveform_onto_AWG_lookuptable(self, waveform_name: str,
regenerate_waveforms: bool=False):
if regenerate_waveforms:
self.generate_standard_waveforms()
# FIXME: type mismatch with function parameter, misleading name
if isinstance(waveform_name, int):
cw_idx = waveform_name
else:
raise DeprecationWarning
waveforms = self._wave_dict[cw_idx]
codewords = self.codeword_idx_to_parnames(cw_idx)
for waveform, cw in zip(waveforms, codewords):
self.AWG.get_instr().set(cw, waveform)
def load_phase_pulses_to_AWG_lookuptable(self,
phases=np.arange(0, 360, 20)):
"""
Loads rPhi90 pulses onto the AWG lookuptable.
"""
if (len(phases) > 18):
raise ValueError('max 18 amplitude values can be provided')
lm = self.LutMap()
for i, (phase) in enumerate(phases):
lm[i+9] = {"name": "rPhi90", "theta": 90,
"phi": phase, "type": "ge"}
self.load_waveforms_onto_AWG_lookuptable(regenerate_waveforms=True)
def load_x_pulses_to_AWG_lookuptable(self,
phases=np.arange(0, 360, 20)):
"""
Loads rPhi90 pulses onto the AWG lookuptable.
"""
if (len(phases) > 18):
raise ValueError('max 18 amplitude values can be provided')
lm = self.LutMap()
for i, (phase) in enumerate(phases):
lm[i+9] = {"name": "rPhi90", "theta": phase,
"phi": 0, "type": "ge"}
self.load_waveforms_onto_AWG_lookuptable(regenerate_waveforms=True)
def load_square_waves_to_AWG_lookuptable(self):
"""
Loads square pulses onto the AWG lookuptable.
"""
self.set_default_lutmap()
lm = self.LutMap()
lm[10] = {"name": "square",
"type": "square",
"duration": 1e-6}
lm[11] = {"name": "cw_11",
"type": "square"}
for i in range(12,21):
div = i-12
lm[i] = {"name": "cw_{}".format(i),
"type": "square",
"duration": 40e-9*(i-11)/10}
self.load_waveforms_onto_AWG_lookuptable(regenerate_waveforms=True)
def load_ef_rabi_pulses_to_AWG_lookuptable(self, amps: list=None,
mod_freqs: list=None):
"""
Special loading method that loads (up to) 18 pulses in
order to do a rabi on the ef (1-2) transition.
This method also generates the waveforms.
This method contains several steps
1. determine what ef-pulses to generate
2. generate a LutMap to use and upload the waveforms
3. generate and upload waveforms.
"""
# 1. Determine what ef-pulses to generate
if not isinstance(amps, Iterable) and (mod_freqs is None):
amps = [self.mw_ef_amp180()]
elif len(amps) == 1:
amps = [amps]*len(mod_freqs)
if (len(amps) > 18):
raise ValueError('max 18 amplitude values can be provided')
if mod_freqs is None:
mod_freqs = [self.mw_ef_modulation()]*len(amps)
elif len(mod_freqs) == 1:
mod_freqs = [mod_freqs]*len(amps)
# 2. Generate a LutMap for the ef-pulses
lm = self.LutMap()
for i, (amp, mod_freq) in enumerate(zip(amps, mod_freqs)):
lm[i+9] = {"name": "", "type": "raw-drag",
"drag_pars": {
"amp": amp, "f_modulation": mod_freq,
"sigma_length": self.mw_gauss_width(),
"sampling_rate": self.sampling_rate(),
"motzoi": 0}
}
# 3. generate and upload waveforms
self.load_waveforms_onto_AWG_lookuptable(regenerate_waveforms=True)
class CBox_MW_LutMan(Base_MW_LutMan):
_def_lm = ['I', 'rX180', 'rY180', 'rX90', 'rY90',
'rXm90', 'rYm90', 'rPhi90', 'spec']
# use remaining codewords to set pi/2 gates for various angles
for i in range(18):
angle = i * 20
_def_lm.append('r{}_90'.format(angle))
def __init__(self, name, **kw):
super().__init__(name, **kw)
def _add_channel_params(self):
# CBox channels come in pairs defined in the AWG nr
self.add_parameter('awg_nr', parameter_class=ManualParameter,
initial_value=0, vals=vals.Numbers(0, 2))
def load_waveform_onto_AWG_lookuptable(self, waveform_name: str,
regenerate_waveforms: bool=False):
if regenerate_waveforms:
self.generate_standard_waveforms()
I_wave, Q_wave = self._wave_dict[waveform_name]
codeword = self.LutMap()[waveform_name]
self.AWG.get_instr().set_awg_lookuptable(self.awg_nr(),
codeword, 0, I_wave)
self.AWG.get_instr().set_awg_lookuptable(self.awg_nr(),
codeword, 1, Q_wave)
def set_default_lutmap(self):
"""
Set's the default lutmap for standard microwave drive pulses.
"""
def_lm = self._def_lm
LutMap = OrderedDict()
for cw_idx, cw_key in enumerate(def_lm):
max_cw_cbox = 8
if cw_idx < max_cw_cbox:
LutMap[cw_key] = cw_idx
self.LutMap(LutMap)
class QWG_MW_LutMan(Base_MW_LutMan):
def __init__(self, name, **kw):
self._num_channels = 4
super().__init__(name, **kw)
def _add_channel_params(self):
super()._add_channel_params()
self.add_parameter('channel_amp',
unit='a.u.',
vals=vals.Numbers(-1.8, 1.8),
set_cmd=self._set_channel_amp,
get_cmd=self._get_channel_amp,
docstring=('using the channel amp as additional'
'parameter to allow rabi-type experiments without'
'wave reloading. Should not be using VSM'))
# parameters related to codeword bits
self.add_parameter('bit_shift', unit='', vals=vals.Ints(0, 8),
parameter_class=ManualParameter,
initial_value=0)
self.add_parameter('bit_width', unit='', vals=vals.Ints(0, 8),
parameter_class=ManualParameter,
initial_value=0)
def _add_waveform_parameters(self):
super()._add_waveform_parameters()
# Parameters for a square pulse
self.add_parameter('sq_amp',
unit='frac', vals=vals.Numbers(-1, 1),
parameter_class=ManualParameter,
initial_value=0.5)
def _set_channel_amp(self, val):
AWG = self.AWG.get_instr()
AWG.set('ch{}_amp'.format(self.channel_I()), val)
AWG.set('ch{}_amp'.format(self.channel_Q()), val)
def _get_channel_amp(self):
AWG = self.AWG.get_instr()
val_I = AWG.get('ch{}_amp'.format(self.channel_I()))
val_Q = AWG.get('ch{}_amp'.format(self.channel_Q()))
assert val_Q == val_I
return val_I
def load_waveform_onto_AWG_lookuptable(
self, wave_id: str, regenerate_waveforms: bool=False):
"""
Load a waveform into the AWG.
Args:
wave_id: can be either the "name" of a waveform or
the integer key in self._wave_dict.
regenerate_waveforms (bool) : if True regenerates all waveforms
"""
if regenerate_waveforms:
self.generate_standard_waveforms()
if wave_id not in self.LutMap().keys():
wave_id = get_wf_idx_from_name(wave_id, self.LutMap())
wf_I, wf_Q = self._wave_dict[wave_id]
wf_name_I = 'wave_ch{}_cw{:03}'.format(self.channel_I(), wave_id)
wf_name_Q = 'wave_ch{}_cw{:03}'.format(self.channel_Q(), wave_id)
self.AWG.get_instr().set(wf_name_I, wf_I)
self.AWG.get_instr().set(wf_name_Q, wf_Q)
def apply_mixer_predistortion_corrections(self, wave_dict):
M = wf.mixer_predistortion_matrix(self.mixer_alpha(),
self.mixer_phi())
self.AWG.get_instr().set(
'ch_pair{}_transform_matrix'.format(self.channel_I()), M)
# in the QWG the predistortion matrix is implemented in hardware
# and does not modify the actual wave dict.
# the wave dict is still returned unmodified to preserve the
# call signature as required for HDAWG compatibility.
return wave_dict
class AWG8_MW_LutMan(Base_MW_LutMan):
def __init__(self, name, **kw):
self._num_channels = 8
super().__init__(name, **kw)
self.sampling_rate(2.4e9)
self._add_phase_correction_parameters()
def _add_channel_params(self):
super()._add_channel_params()
self.add_parameter(
'channel_amp', unit='a.u.', vals=vals.Numbers(0, 1),
set_cmd=self._set_channel_amp, get_cmd=self._get_channel_amp,
docstring=('using the channel amp as additional'
'parameter to allow rabi-type experiments without'
'wave reloading. Should not be using VSM'))
self.add_parameter(
'channel_range', unit='V', vals=vals.Enum(0.2, 0.4, 0.6, 0.8, 1, 2, 3, 4, 5),
set_cmd=self._set_channel_range, get_cmd=self._get_channel_range,
docstring=('defines the channel range for the AWG sequencer output'))
# Setting variable to track channel amplitude since it cannot be directly extracted from
# HDAWG while using real-time modulation (because of mixer amplitude imbalance corrections)
self.channel_amp_value = 0
def _add_waveform_parameters(self):
super()._add_waveform_parameters()
# Parameters for a square pulse
self.add_parameter('sq_amp', unit='frac', vals=vals.Numbers(-1, 1),
parameter_class=ManualParameter,
initial_value=0.5)
def _add_phase_correction_parameters(self):
# corrections for phases that the qubit can acquire during one of its CZ gates
for gate in ['NW','NE','SW','SE']:
self.add_parameter(
name=f'vcz_virtual_q_ph_corr_{gate}',
parameter_class=ManualParameter,
unit='deg',
vals=vals.Numbers(-360, 360),
initial_value=0.0,
docstring=f"Virtual phase correction for two-qubit gate in {gate}-direction."
"Will be applied as increment to sine generator phases via command table."
)
# corrections for phases that the qubit can acquire during parking as spectator of a CZ gate.
# this can happen in general for each of its neighbouring qubits (below: 'direction'),
# while it is doing a gate in each possible direction (below: 'gate')
# for direction in ['NW','NE','SW','SE']:
# for gate in ['NW','NE','SW','SE']:
# self.add_parameter(
# name=f'vcz_virtual_q_ph_corr_spec_{direction}_gate_{gate}',
# parameter_class=ManualParameter,
# unit='deg',
# vals=vals.Numbers(0, 360),
# initial_value=0.0,
# docstring=f"Virtual phase correction for parking as spectator of a qubit in direction {direction}, "
# f"that is doing a gate in direction {gate}."
# "Will be applied as increment to sine generator phases via command table."
# )
# corrections for phases that the qubit can acquire during parking as part of a flux-dance step
# there are 8 flux-dance steps for the S17 scheme.
# NOTE: this correction must not be the same as the above one for the case of a spectator
# for a single CZ, because in a flux-dance the qubit can be parked because of multiple adjacent CZ gates
# for step in np.arange(1,9):
# self.add_parameter(
# name=f'vcz_virtual_q_ph_corr_step_{step}',
# parameter_class=ManualParameter,
# unit='deg',
# vals=vals.Numbers(0, 360),
# initial_value=0.0,
# docstring=f"Virtual phase correction for parking in flux-dance step {step}."
# "Will be applied as increment to sine generator phases via command table."
# )
def _set_channel_range(self, val):
awg_nr = (self.channel_I()-1)//2
assert awg_nr == (self.channel_Q()-1)//2
assert self.channel_I() < self.channel_Q()
AWG = self.AWG.get_instr()
if val == 0.8:
AWG.set('sigouts_{}_range'.format(self.channel_I()-1), .8)
AWG.set('sigouts_{}_direct'.format(self.channel_I()-1), 1)
AWG.set('sigouts_{}_range'.format(self.channel_Q()-1), .8)
AWG.set('sigouts_{}_direct'.format(self.channel_Q()-1), 1)
else:
AWG.set('sigouts_{}_direct'.format(self.channel_I()-1), 0)
AWG.set('sigouts_{}_range'.format(self.channel_I()-1), val)
AWG.set('sigouts_{}_direct'.format(self.channel_Q()-1), 0)
AWG.set('sigouts_{}_range'.format(self.channel_Q()-1), val)
def _get_channel_range(self):
awg_nr = (self.channel_I()-1)//2
assert awg_nr == (self.channel_Q()-1)//2
assert self.channel_I() < self.channel_Q()
val = AWG.get('sigouts_{}_range'.format(self.channel_I()-1))
assert val == AWG.get('sigouts_{}_range'.format(self.channel_Q()-1))
return val
def _set_channel_amp(self, val):
AWG = self.AWG.get_instr()
awg_nr = (self.channel_I()-1)//2
# Enforce assumption that channel I preceeds channel Q and share AWG
assert awg_nr == (self.channel_Q()-1)//2
assert self.channel_I() < self.channel_Q()
self.channel_amp_value = val
if self.cfg_sideband_mode() == 'static':
AWG.set('awgs_{}_outputs_{}_gains_0'.format(awg_nr, 0), val)
AWG.set('awgs_{}_outputs_{}_gains_0'.format(awg_nr, 1), 0)
AWG.set('awgs_{}_outputs_{}_gains_1'.format(awg_nr, 0), 0)
AWG.set('awgs_{}_outputs_{}_gains_1'.format(awg_nr, 1), val)
# In case of sideband modulation mode 'real-time', amplitudes have to be set
# according to modulation matrix
elif self.cfg_sideband_mode() == 'real-time':
g0 = np.tan(np.radians(self.mixer_phi()))
g1 = self.mixer_alpha()*1/np.cos(np.radians(self.mixer_phi()))
if np.abs(val*g0) > 1.0 or np.abs(val*g1) > 1.0:
raise Exception('Resulting amplitude from mixer parameters '+\
'exceed the maximum channel amplitude')
# print('Resulting amplitude from mixer parameters '+\
# 'exceed the maximum channel amplitude')
# if np.abs(val*g0):
# g0 = 1/val
# if np.abs(val*g1):
# g1 = 1/val
AWG.set('awgs_{}_outputs_0_gains_0'.format(awg_nr), val)
AWG.set('awgs_{}_outputs_1_gains_0'.format(awg_nr), 0)
AWG.set('awgs_{}_outputs_0_gains_1'.format(awg_nr), val*g0)
AWG.set('awgs_{}_outputs_1_gains_1'.format(awg_nr), val*g1)
else:
raise KeyError('Unexpected value for parameter sideband mode.')
def _get_channel_amp(self):
AWG = self.AWG.get_instr()
awg_nr = (self.channel_I()-1)//2
# Enforce assumption that channel I precedes channel Q and share AWG
assert awg_nr == (self.channel_Q()-1)//2
assert self.channel_I() < self.channel_Q()
vals = []
if self.cfg_sideband_mode() == 'static':
vals.append(AWG.get('awgs_{}_outputs_{}_gains_0'.format(awg_nr, 0)))
vals.append(AWG.get('awgs_{}_outputs_{}_gains_1'.format(awg_nr, 0)))
vals.append(AWG.get('awgs_{}_outputs_{}_gains_0'.format(awg_nr, 1)))
vals.append(AWG.get('awgs_{}_outputs_{}_gains_1'.format(awg_nr, 1)))
assert vals[0]==vals[4]
assert vals[1]==vals[2]==0
# In case of sideband modulation mode 'real-time', amplitudes have to be set
# according to modulation matrix
elif self.cfg_sideband_mode() == 'real-time':
vals.append(self.channel_amp_value)
return vals[0]
def load_waveform_onto_AWG_lookuptable(
self, wave_id: str, regenerate_waveforms: bool=False):
"""
Load a waveform into the AWG.
Args:
wave_id: can be either the "name" of a waveform or
the integer key in self._wave_dict.
regenerate_waveforms (bool) : if True regenerates all waveforms
"""
if regenerate_waveforms:
self.generate_standard_waveforms()
if wave_id not in self.LutMap().keys():
wave_id = get_wf_idx_from_name(wave_id, self.LutMap())
wf_I, wf_Q = self._wave_dict[wave_id]
wf_name_I = 'wave_ch{}_cw{:03}'.format(self.channel_I(), wave_id)
wf_name_Q = 'wave_ch{}_cw{:03}'.format(self.channel_Q(), wave_id)
self.AWG.get_instr().set(wf_name_I, wf_I)
self.AWG.get_instr().set(wf_name_Q, wf_Q)
def load_waveforms_onto_AWG_lookuptable(
self, regenerate_waveforms: bool=True, stop_start: bool = True,
force_load_sequencer_program: bool=False):
"""
Loads all waveforms specified in the LutMap to an AWG.
Args:
regenerate_waveforms (bool): if True calls
generate_standard_waveforms before uploading.
stop_start (bool): if True stops and starts the AWG.
force_load_sequencer_program (bool): if True forces a new compilation
and upload of the program on the sequencer.
"""
# Uploading the codeword program (again) is needed to link the new
# waveforms in case the user has changed the codeword mode.
if force_load_sequencer_program:
# This ensures only the channels that are relevant get reconfigured
if 'channel_GI' in self.parameters:
awgs = [self.channel_GI()//2, self.channel_DI()//2]
else:
awgs = [self.channel_I()//2]
# Enforce assumption that channel I precedes channel Q
assert self.channel_I() < self.channel_Q()
assert (self.channel_I())//2 < (self.channel_Q())//2
self.AWG.get_instr().upload_codeword_program(awgs=awgs)
# This ensures that settings other than the sequencer program are updated
# for different sideband modulation modes
if self.cfg_sideband_mode() == 'static':
self.AWG.get_instr().cfg_sideband_mode('static')
# Turn off modulation modes
self.AWG.get_instr().set('awgs_{}_outputs_0_modulation_mode'.format((self.channel_I()-1)//2), 0)
self.AWG.get_instr().set('awgs_{}_outputs_1_modulation_mode'.format((self.channel_Q()-1)//2), 0)
elif self.cfg_sideband_mode() == 'real-time':
if (self.channel_I()-1)//2 != (self.channel_Q()-1)//2:
raise KeyError('In real-time sideband mode, channel I/Q should share same awg nr.')
self.AWG.get_instr().cfg_sideband_mode('real-time')
# Set same oscillator for I/Q pair and same harmonic
self.AWG.get_instr().set('sines_{}_oscselect'.format(self.channel_I()-1), (self.channel_I()-1)//2)
self.AWG.get_instr().set('sines_{}_oscselect'.format(self.channel_Q()-1), (self.channel_I()-1)//2)
self.AWG.get_instr().set('sines_{}_harmonic'.format(self.channel_I()-1), 1)
self.AWG.get_instr().set('sines_{}_harmonic'.format(self.channel_Q()-1), 1)
# Create respective cossine/sin signals for modulation through phase-shift
self.AWG.get_instr().set('sines_{}_phaseshift'.format(self.channel_I()-1), 90)
self.AWG.get_instr().set('sines_{}_phaseshift'.format(self.channel_Q()-1), 0)
# Create correct modulation modeI
self.AWG.get_instr().set('awgs_{}_outputs_0_modulation_mode'.format((self.channel_I()-1)//2), 6)
self.AWG.get_instr().set('awgs_{}_outputs_1_modulation_mode'.format((self.channel_Q()-1)//2), 6)
else:
raise ValueError('Unexpected value for parameter cfg_sideband_mode.')
super().load_waveforms_onto_AWG_lookuptable(
regenerate_waveforms=regenerate_waveforms,
stop_start=stop_start)
def generate_standard_waveforms(
self, apply_predistortion_matrix: bool=True):
self._wave_dict = OrderedDict()
if self.cfg_sideband_mode() == 'static':
f_modulation = self.mw_modulation()
elif self.cfg_sideband_mode() == 'real-time':
f_modulation = 0
if ((self.channel_I()-1)//2 != (self.channel_Q()-1)//2):
raise KeyError('In real-time sideband mode, channel I/Q should share same awg group.')
self.AWG.get_instr().set('oscs_{}_freq'.format((self.channel_I()-1)//2),
self.mw_modulation())
else:
raise KeyError('Unexpected argument for cfg_sideband_mode')
# lutmap is expected to obey lutmap mw schema
for idx, waveform in self.LutMap().items():
if waveform['type'] == 'ge':
if waveform['theta'] == 90:
amp = self.mw_amp180()*self.mw_amp90_scale()
elif waveform['theta'] == -90:
amp = - self.mw_amp180() * self.mw_amp90_scale()
else:
amp = theta_to_amp(theta=waveform['theta'],
amp180=self.mw_amp180())
self._wave_dict[idx] = self.wf_func(
amp=amp,
phase=waveform['phi'],
sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(),
motzoi=self.mw_motzoi(),
delay=self.pulse_delay())
elif waveform['type'] == 'ef':
amp = theta_to_amp(theta=waveform['theta'],
amp180=self.mw_ef_amp180())
self._wave_dict[idx] = self.wf_func(
amp=amp,
phase=waveform['phi'],
sigma_length=self.mw_gauss_width(),
f_modulation=self.mw_ef_modulation(),
sampling_rate=self.sampling_rate(),
motzoi=0,
delay=self.pulse_delay())
elif waveform['type'] == 'raw-drag':
self._wave_dict[idx] = self.wf_func(
**waveform["drag_pars"])
elif waveform['type'] == 'spec':
self._wave_dict[idx] = self.spec_func(
amp=self.spec_amp(),
length=self.spec_length(),
sampling_rate=self.sampling_rate(),
delay=0,
phase=0)
elif waveform['type'] == 'square':
# Using a slightly different construction as above
# as the call signatures of these functions is different.
# Apperently the VSM LutMan has both parameters, so make sure
# we detect on the one only available in the VSM. Otherwise, we
# won't get the needed four waveforms.
if 'sq_G_amp' in self.parameters:
self._wave_dict[idx] = wf.mod_square_VSM(
amp_G=self.sq_G_amp(), amp_D=self.sq_D_amp(),
length=self.mw_gauss_width()*4,
f_modulation=self.mw_modulation() if self.cfg_sideband_mode()!='real-time' else 0,
sampling_rate=self.sampling_rate())
elif 'sq_amp' in self.parameters:
self._wave_dict[idx] = wf.mod_square(
amp=self.sq_amp(), length=self.mw_gauss_width()*4,
f_modulation=self.mw_modulation() if self.cfg_sideband_mode()!='real-time' else 0,
phase=0, motzoi=0, sampling_rate=self.sampling_rate())
else:
raise KeyError('Expected parameter "sq_amp" to exist')
elif waveform['type'] == 'phase':
# fill codewords that are used for phase correction instructions
# with a zero waveform
self._wave_dict[idx] = wf.block_pulse(
amp=0,
sampling_rate=self.sampling_rate(),
length=self.mw_gauss_width()*4,
)
else:
raise ValueError
# Add predistortions + test
if (self.mixer_apply_predistortion_matrix() and apply_predistortion_matrix and
self.cfg_sideband_mode() == 'static'):
self._wave_dict = self.apply_mixer_predistortion_corrections(
self._wave_dict)
return self._wave_dict
def apply_mixer_predistortion_corrections(self, wave_dict):
M = wf.mixer_predistortion_matrix(self.mixer_alpha(), self.mixer_phi())
for key, val in wave_dict.items():
wave_dict[key] = np.dot(M, val)
return wave_dict
def upload_single_qubit_phase_corrections(self):
commandtable_dict = {
"$schema": "http://docs.zhinst.com/hdawg/commandtable/v2/schema",
"header": { "version": "0.2" },
"table": []
}
# manual waveform index 1-to-1 mapping
for ind in np.arange(0,60,1):
commandtable_dict['table'] += [{"index": int(ind),
"waveform": {"index": int(ind)}
}]
# add phase corrections to the end of the codeword space
phase_corr_inds = np.arange(60,64,1)
for i,d in enumerate(['NW','NE','SW','SE']):
phase = self.parameters[f"vcz_virtual_q_ph_corr_{d}"]()
commandtable_dict['table'] += [{"index": int(phase_corr_inds[i]),
"phase0": {"value": float(phase), "increment": True},
"phase1": {"value": float(phase), "increment": True}
}]
# Note: Whenever using the command table, the phase offset between I and Q channels on
# the HDAWG for real-time modulation have to be set from an index on the table. Index
# 1023 will be used as it is un-used for codeword triggering
commandtable_dict['table'] += [{"index": 1023,
"phase0": {"value": 90.0, "increment": False},
"phase1": {"value": 0.0, "increment": False}
}]
# get internal awg sequencer number (indexed 0,1,2,3)
awg_nr = (self.channel_I()-1) // 2
commandtable_returned, status = self.AWG.get_instr().upload_commandtable(commandtable_dict, awg_nr)
return commandtable_returned, status
class AWG8_VSM_MW_LutMan(AWG8_MW_LutMan):
def __init__(self, name, **kw):
self._num_channels = 8
super().__init__(name, **kw)
self.wf_func = wf.mod_gauss_VSM
self.spec_func = wf.block_pulse_vsm
def _add_waveform_parameters(self):
super()._add_waveform_parameters()
# Base_MW_LutMan._add_waveform_parameters(self)
# Parameters for a square pulse
self.add_parameter('sq_G_amp', unit='frac', vals=vals.Numbers(-1, 1),
parameter_class=ManualParameter,
initial_value=0.5)
self.add_parameter('sq_D_amp', unit='frac', vals=vals.Numbers(-1, 1),
parameter_class=ManualParameter,
initial_value=0)
def _add_channel_params(self):
self.add_parameter(
'channel_amp', unit='a.u.', vals=vals.Numbers(0, 1),
set_cmd=self._set_channel_amp,
get_cmd=self._get_channel_amp,
docstring=('using the channel amp as additional'
'parameter to allow rabi-type experiments without'
'wave reloading. Should not be using VSM'))
for ch in ['GI', 'GQ', 'DI', 'DQ']:
self.add_parameter(
'channel_{}'.format(ch), parameter_class=ManualParameter,
vals=vals.Numbers(1, self._num_channels))
def load_waveform_onto_AWG_lookuptable(
self, wave_id: str, regenerate_waveforms: bool=False):
"""
Load a waveform into the AWG.
Args:
wave_id: can be either the "name" of a waveform or
the integer key in self._wave_dict.
regenerate_waveforms (bool) : if True regenerates all waveforms
"""
if regenerate_waveforms:
self.generate_standard_waveforms()
if wave_id not in self.LutMap().keys():
wave_id = get_wf_idx_from_name(wave_id, self.LutMap())
GI, GQ, DI, DQ = self._wave_dict[wave_id]
wf_name_GI = 'wave_ch{}_cw{:03}'.format(self.channel_GI(), wave_id)
wf_name_GQ = 'wave_ch{}_cw{:03}'.format(self.channel_GQ(), wave_id)
wf_name_DI = 'wave_ch{}_cw{:03}'.format(self.channel_DI(), wave_id)
wf_name_DQ = 'wave_ch{}_cw{:03}'.format(self.channel_DQ(), wave_id)
self.AWG.get_instr().set(wf_name_GI, GI)
self.AWG.get_instr().set(wf_name_GQ, GQ)
self.AWG.get_instr().set(wf_name_DI, DI)
self.AWG.get_instr().set(wf_name_DQ, DQ)
def _set_channel_amp(self, val):
AWG = self.AWG.get_instr()
for awg_ch in [self.channel_GI(), self.channel_GQ(),
self.channel_DI(), self.channel_DQ()]:
awg_nr = (awg_ch-1)//2
ch_pair = (awg_ch-1) % 2
AWG.set('awgs_{}_outputs_{}_amplitude'.format(awg_nr, ch_pair), val)
def _get_channel_amp(self):
AWG = self.AWG.get_instr()
vals = []
for awg_ch in [self.channel_GI(), self.channel_GQ(),
self.channel_DI(), self.channel_DQ()]:
awg_nr = (awg_ch-1)//2
ch_pair = (awg_ch-1) % 2
vals.append(
AWG.get('awgs_{}_outputs_{}_amplitude'.format(awg_nr, ch_pair)))
assert vals[0] == vals[1] == vals[2] == vals[3]
return vals[0]
def _add_mixer_corr_pars(self):
self.add_parameter('G_mixer_alpha', vals=vals.Numbers(),
parameter_class=ManualParameter,
initial_value=1.0)
self.add_parameter('G_mixer_phi', vals=vals.Numbers(), unit='deg',
parameter_class=ManualParameter,
initial_value=0.0)
self.add_parameter('D_mixer_alpha', vals=vals.Numbers(),
parameter_class=ManualParameter,
initial_value=1.0)
self.add_parameter('D_mixer_phi', vals=vals.Numbers(), unit='deg',
parameter_class=ManualParameter,
initial_value=0.0)
self.add_parameter(
'mixer_apply_predistortion_matrix', vals=vals.Bool(), docstring=(
'If True applies a mixer correction using mixer_phi and '
'mixer_alpha to all microwave pulses using.'),
parameter_class=ManualParameter, initial_value=True)
def apply_mixer_predistortion_corrections(self, wave_dict):
M_G = wf.mixer_predistortion_matrix(self.G_mixer_alpha(),
self.G_mixer_phi())
M_D = wf.mixer_predistortion_matrix(self.D_mixer_alpha(),
self.D_mixer_phi())
for key, val in wave_dict.items():
GI, GQ = np.dot(M_G, val[0:2]) # Mixer correction Gaussian comp.
DI, DQ = np.dot(M_D, val[2:4]) # Mixer correction Derivative comp.
wave_dict[key] = GI, GQ, DI, DQ
return wave_dict
class QWG_MW_LutMan_VQE(QWG_MW_LutMan):
def __init__(self, name, **kw):
"""
Waveform allocation strategy for VQE.
|q0> ---- Y ----x---- T1' --- M
|
|q1> -----------x---- T2 ---- M
Waveform table for |q0>
+------+------+---------------+-------------+--------------------+
| Wave | Gate | Amp,phase | Used in VQE | Used for full-tomo |
+------+------+---------------+-------------+--------------------+
| 0 | I | 0,0 | X | |
| 1 | X' | pi,phi | X | |
| 2 | Y' | pi,phi | | X |
| 3 | x' | pi/2,phi | X | |
| 4 | -x' | -pi/2,phi | X | |
| 5 | y' | pi/2,phi+90 | X | |
| 6 | -y' | -pi/2,phi+90 | X | |
| 7 | Y | pi,90 | X | |
| 8 | | | | |
+------+------+---------------+-------------+--------------------+
# NOTE: prime stands for gate compiled along with Z(phi).
Waveform table for |q1>
+------+------+---------------+-------------+--------------------+
| Wave | Gate | Amp,phase | Used in VQE | Used for full-tomo |
+------+------+---------------+-------------+--------------------+
| 0 | I | 0,0 | X | |
| 1 | X | pi,0 | X | |
| 2 | Y | pi,0 | | X |
| 3 | x | pi/2,0 | X | |
| 4 | -x | -pi/2,0 | X | |
| 5 | y | pi/2,90 | X | |
| 6 | -y | -pi/2,90 | X | |
| 7 | | | | |
| 8 | | | | |
+------+------+---------------+-------------+--------------------+
"""
self._num_channels = 4
super().__init__(name, **kw)
# sacrifices last pulse 'Spec' from std list to have 3 bit (8)
self._def_lm = ['I', 'rX180', 'rY180', 'rX90',
'rY90', 'rXm90', 'rYm90', 'rPhi90']
self.set_default_lutmap()
self._vqe_lm = ['I', 'X180c', 'Y180c',
'X90c', 'Xm90c', 'Y90c', 'Y90c', 'rY180']
def set_VQE_lutmap(self):
"""
Set's the default lutmap for standard microwave drive pulses.
"""
vqe_lm = self._vqe_lm
LutMap = {}
for cw_idx, cw_key in enumerate(vqe_lm):
LutMap[cw_key] = (
'wave_ch{}_cw{:03}'.format(self.channel_I(), cw_idx),
'wave_ch{}_cw{:03}'.format(self.channel_Q(), cw_idx))
self.LutMap(LutMap)
def _add_waveform_parameters(self):
super()._add_waveform_parameters()
# parameters related to codeword bits
self.add_parameter('bit_shift', unit='', vals=vals.Ints(0, 4),
parameter_class=ManualParameter,
initial_value=0)
self.add_parameter('bit_width', unit='', vals=vals.Ints(0, 4),
parameter_class=ManualParameter,
initial_value=0)
# parameters related to phase compilation
self.add_parameter('phi', unit='rad', vals=vals.Numbers(0, 360),
parameter_class=ManualParameter,
initial_value=0)
# parameters related to timings
self.add_parameter('pulse_delay', unit='s', vals=vals.Numbers(0, 1e-6),
parameter_class=ManualParameter,
initial_value=0)
def generate_standard_waveforms(self):
self._wave_dict = {}
if self.cfg_sideband_mode() == 'static':
f_modulation = self.mw_modulation()
else:
f_modulation = 0
self.AWG.get_instr().set('ch_pair{}_sideband_frequency'.format(self.channel_I()),
self.mw_modulation())
self.AWG.get_instr().syncSidebandGenerators()
########################################
# STD waveforms
########################################
self._wave_dict['I'] = self.wf_func(
amp=0, sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=0,
motzoi=0, delay=self.pulse_delay())
self._wave_dict['rX180'] = self.wf_func(
amp=self.mw_amp180(), sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=0,
motzoi=self.mw_motzoi(), delay=self.pulse_delay())
self._wave_dict['rY180'] = self.wf_func(
amp=self.mw_amp180(), sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=90,
motzoi=self.mw_motzoi(), delay=self.pulse_delay())
self._wave_dict['rX90'] = self.wf_func(
amp=self.mw_amp180()*self.mw_amp90_scale(),
sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=0,
motzoi=self.mw_motzoi(), delay=self.pulse_delay())
self._wave_dict['rY90'] = self.wf_func(
amp=self.mw_amp180()*self.mw_amp90_scale(),
sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=90,
motzoi=self.mw_motzoi(), delay=self.pulse_delay())
self._wave_dict['rXm90'] = self.wf_func(
amp=-1*self.mw_amp180()*self.mw_amp90_scale(),
sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=0,
motzoi=self.mw_motzoi(), delay=self.pulse_delay())
self._wave_dict['rYm90'] = self.wf_func(
amp=-1*self.mw_amp180()*self.mw_amp90_scale(),
sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=90,
motzoi=self.mw_motzoi(), delay=self.pulse_delay())
self._wave_dict['rPhi180'] = self.wf_func(
amp=self.mw_amp180(), sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=self.mw_phi(),
motzoi=self.mw_motzoi(), delay=self.pulse_delay())
self._wave_dict['rPhi90'] = self.wf_func(
amp=self.mw_amp180()*self.mw_amp90_scale(),
sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=self.mw_phi(),
motzoi=self.mw_motzoi(), delay=self.pulse_delay())
self._wave_dict['rPhim90'] = self.wf_func(
amp=-1*self.mw_amp180()*self.mw_amp90_scale(),
sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=self.mw_phi(),
motzoi=self.mw_motzoi(), delay=self.pulse_delay())
self._wave_dict['spec'] = self.spec_func(
amp=self.spec_amp(),
length=self.spec_length(),
sampling_rate=self.sampling_rate(),
delay=0,
phase=0)
for i in range(18):
angle = i * 20
self._wave_dict['r{}_90'.format(angle)] = self.wf_func(
amp=self.mw_amp180()*self.mw_amp90_scale(),
sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=angle,
motzoi=self.mw_motzoi(), delay=self.pulse_delay())
########################################
# compiled waveforms
########################################
self._wave_dict['X180c'] = self.wf_func(
amp=self.mw_amp180(), sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=self.phi(),
motzoi=self.mw_motzoi(), delay=self.pulse_delay())
self._wave_dict['rY180'] = self.wf_func(
amp=self.mw_amp180(), sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=90,
motzoi=self.mw_motzoi(), delay=self.pulse_delay())
self._wave_dict['rY180c'] = self.wf_func(
amp=self.mw_amp180(), sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=90+self.phi(),
motzoi=self.mw_motzoi(), delay=self.pulse_delay())
self._wave_dict['rX90c'] = self.wf_func(
amp=self.mw_amp180()*self.mw_amp90_scale(),
sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=self.phi(),
motzoi=self.mw_motzoi(), delay=self.pulse_delay())
self._wave_dict['rY90c'] = self.wf_func(
amp=self.mw_amp180()*self.mw_amp90_scale(),
sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=90+self.phi(),
motzoi=self.mw_motzoi(), delay=self.pulse_delay())
self._wave_dict['rXm90c'] = self.wf_func(
amp=-1*self.mw_amp180()*self.mw_amp90_scale(),
sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=self.phi(),
motzoi=self.mw_motzoi(), delay=self.pulse_delay())
self._wave_dict['rYm90c'] = self.wf_func(
amp=-1*self.mw_amp180()*self.mw_amp90_scale(),
sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(), phase=90+self.phi(),
motzoi=self.mw_motzoi(), delay=self.pulse_delay())
if self.mixer_apply_predistortion_matrix():
self._wave_dict = self.apply_mixer_predistortion_corrections(
self._wave_dict)
return self._wave_dict
def load_waveform_onto_AWG_lookuptable(self, waveform_name: str,
regenerate_waveforms: bool=False):
if regenerate_waveforms:
self.generate_standard_waveforms()
# waveform_name is pulse string (i.e. 'X180')
# waveforms is the tuple (G,D) with the actual pulseshape array
# codewords is the tuple (A,B) with the strings wave_chY_cwXXX
# codeword_int is the number XXX (see line above)
waveforms = self._wave_dict[waveform_name]
codewords = self.LutMap()[waveform_name]
codeword_int = int(codewords[0][-3:])
# get redundant codewords as a list
redundant_cw_list = get_redundant_codewords(codeword_int,
bit_width=self.bit_width(),
bit_shift=self.bit_shift())
# update all of them
for redundant_cw_idx in redundant_cw_list:
redundant_cw_I = 'wave_ch{}_cw{:03}'.format(self.channel_I(),
redundant_cw_idx)
self.AWG.get_instr().set(redundant_cw_I, waveforms[0])
redundant_cw_Q = 'wave_ch{}_cw{:03}'.format(self.channel_Q(),
redundant_cw_idx)
self.AWG.get_instr().set(redundant_cw_Q, waveforms[1])
# Not the cleanest inheritance but whatever - MAR Nov 2017
class QWG_VSM_MW_LutMan(AWG8_VSM_MW_LutMan):
def load_waveforms_onto_AWG_lookuptable(
self, regenerate_waveforms: bool=True, stop_start: bool = True):
AWG = self.AWG.get_instr()
if self.cfg_sideband_mode() == 'real-time':
AWG.ch_pair1_sideband_frequency(self.mw_modulation())
AWG.ch_pair3_sideband_frequency(self.mw_modulation())
else:
AWG.ch_pair1_sideband_frequency(0)
AWG.ch_pair3_sideband_frequency(0)
for ch in range(1, 5):
# ensures amplitude specified is in
AWG.set('ch{}_amp'.format(ch), 1)
return Base_MW_LutMan.load_waveforms_onto_AWG_lookuptable(
self=self,
regenerate_waveforms=regenerate_waveforms, stop_start=stop_start)
def _add_channel_params(self):
# all channels are used and hardcoded in functionality
pass
# def set_default_lutmap(self):
# """
# Set's the default lutmap for standard microwave drive pulses.
# """
# def_lm = self._def_lm
# LutMap = OrderedDict()
# for cw_idx, cw_key in enumerate(def_lm):
# LutMap[cw_key] = (
# 'wave_ch1_cw{:03}'.format(cw_idx),
# 'wave_ch2_cw{:03}'.format(cw_idx),
# 'wave_ch3_cw{:03}'.format(cw_idx),
# 'wave_ch4_cw{:03}'.format(cw_idx))
# self.LutMap(LutMap)
| StarcoderdataPython |
62425 | """
This is a simplified version to find statistical prevalence that counts instances and is numerically equivalent
to the confidence metric in association rules (# of occurrences / total occurrences).
"""
import csv
def count_stuff(filename):
"""
Counts instances and sorts them by prevalence
Parameters
----------
filename : string
A .csv file of a SQL query
Returns
-------
comb_sort
Returns a dictionary of function and flow combinations sorted by prevalence. The key is the
component and the value is a list of type: [function-flow, statistical prevalence]
"""
# Combinations of components, functions, and/or flows are stored in a dictionary with the first column
# as the key and the second column as the value
combos = {}
# Instances of each item in the columns are counted for later analysis
counts = {}
with open(filename, encoding='utf-8-sig') as input_file:
for row in csv.reader(input_file, delimiter=','):
# By convention, the first column is the component and the second column is the function and/or flow
comp = row[0]
func = row[1]
# Create a dictionary with a count of instances of each component
if comp not in counts:
counts[comp] = 1
else:
counts[comp] += 1
# Create a dictionary that tracks the number of times a component has a function and/or flow
if comp not in combos:
combos[comp] = {}
combos[comp][func] = 1
else:
if func not in combos[comp]:
combos[comp][func] = 1
else:
combos[comp][func] += 1
# (1) Convert the dictionary of a dictionary to a dictionary of lists for sorting then (2) divide the functions
# and/or flows for each component by the total number of component instances to get the percentage
# of each combination and (3) sort the dictionary by the percentages of each combination.
# (1) Convert
comb_sort = {}
for cs, fs in combos.items():
for k, v in combos[cs].items():
# (2) Divide
comb_sort.setdefault(cs, []).append([k, v / counts[cs]])
# (3) Sort
for k, v in comb_sort.items():
v.sort(key=lambda x: x[1], reverse=True)
return comb_sort
| StarcoderdataPython |
3344267 | import lasagne.layers as L
import lasagne.nonlinearities as NL
import lasagne.init
import numpy as np
import theano.tensor as TT
from rllab.rllab.core.lasagne_layers import ParamLayer
from rllab.rllab.core.lasagne_powered import LasagnePowered
from rllab.rllab.core.network import GRUNetwork
from rllab.rllab.core.serializable import Serializable
from rllab.rllab.distributions.recurrent_diagonal_gaussian import RecurrentDiagonalGaussian
from rllab.rllab.misc import ext
from rllab.rllab.misc.overrides import overrides
from rllab.rllab.policies.base import StochasticPolicy
class GaussianGRUPolicy(StochasticPolicy, LasagnePowered):
def __init__(
self,
env_spec,
hidden_sizes=(32,),
state_include_action=True,
hidden_nonlinearity=NL.tanh,
learn_std=True,
init_std=1.0,
output_nonlinearity=None,
):
"""
:param env_spec: A spec for the env.
:param hidden_sizes: list of sizes for the fully connected hidden layers
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:return:
"""
Serializable.quick_init(self, locals())
super(GaussianGRUPolicy, self).__init__(env_spec)
assert len(hidden_sizes) == 1
if state_include_action:
obs_dim = env_spec.observation_space.flat_dim + env_spec.action_space.flat_dim
else:
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
mean_network = GRUNetwork(
input_shape=(obs_dim,),
output_dim=action_dim,
hidden_dim=hidden_sizes[0],
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
)
l_mean = mean_network.output_layer
obs_var = mean_network.input_var
l_log_std = ParamLayer(
mean_network.input_layer,
num_units=action_dim,
param=lasagne.init.Constant(np.log(init_std)),
name="output_log_std",
trainable=learn_std,
)
l_step_log_std = ParamLayer(
mean_network.step_input_layer,
num_units=action_dim,
param=l_log_std.param,
name="step_output_log_std",
trainable=learn_std,
)
self._mean_network = mean_network
self._l_log_std = l_log_std
self._state_include_action = state_include_action
self._f_step_mean_std = ext.compile_function(
[
mean_network.step_input_layer.input_var,
mean_network.step_prev_hidden_layer.input_var
],
L.get_output([
mean_network.step_output_layer,
l_step_log_std,
mean_network.step_hidden_layer
])
)
self._prev_action = None
self._prev_hidden = None
self._hidden_sizes = hidden_sizes
self._dist = RecurrentDiagonalGaussian(action_dim)
self.reset()
LasagnePowered.__init__(self, [mean_network.output_layer, l_log_std])
@overrides
def dist_info_sym(self, obs_var, state_info_vars):
n_batches, n_steps = obs_var.shape[:2]
obs_var = obs_var.reshape((n_batches, n_steps, -1))
if self._state_include_action:
prev_action_var = state_info_vars["prev_action"]
all_input_var = TT.concatenate(
[obs_var, prev_action_var],
axis=2
)
else:
all_input_var = obs_var
means, log_stds = L.get_output([self._mean_network.output_layer, self._l_log_std], all_input_var)
return dict(mean=means, log_std=log_stds)
def reset(self):
self._prev_action = None
self._prev_hidden = self._mean_network.hid_init_param.get_value()
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
if self._state_include_action:
if self._prev_action is None:
prev_action = np.zeros((self.action_space.flat_dim,))
else:
prev_action = self.action_space.flatten(self._prev_action)
all_input = np.concatenate([
self.observation_space.flatten(observation),
prev_action
])
else:
all_input = self.observation_space.flatten(observation)
# should not be used
prev_action = np.nan
mean, log_std, hidden_vec = [x[0] for x in self._f_step_mean_std([all_input], [self._prev_hidden])]
rnd = np.random.normal(size=mean.shape)
action = rnd * np.exp(log_std) + mean
self._prev_action = action
self._prev_hidden = hidden_vec
agent_info = dict(mean=mean, log_std=log_std)
if self._state_include_action:
agent_info["prev_action"] = prev_action
return action, agent_info
@property
@overrides
def recurrent(self):
return True
@property
def distribution(self):
return self._dist
@property
def state_info_keys(self):
if self._state_include_action:
return ["prev_action"]
else:
return []
| StarcoderdataPython |
5158379 | <reponame>ed-ortizm/sdss-spectra-processing
from setuptools import setup, find_packages
with open("README.md", "r") as file:
long_description = file.read()
setup(
name="sdss",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
packages=find_packages(
where="src", include=["[a-z]*"], exclude=["old_code"]
),
package_dir={"": "src"},
# packages=["autoencoders"],
# package_dir = {"autoencoders":"src/autoencoders"},
description="Module to process sdss spectra for outlier detection",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ed-ortizm/spectra-processing",
license="MIT",
keywords="astrophysics, galaxy, Machine Learning, SDSS",
)
| StarcoderdataPython |
6464 | import function_exercise_01 as st
st.sandwich_toppings('meatballs', 'salad')
| StarcoderdataPython |
1915647 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
# TODO: Complete type-hint and doc string.
def blackman_window(win_len, dtype=np.float32):
arcs = np.pi * np.arange(win_len) / float(win_len)
win = np.asarray(
[0.42 - 0.5 * np.cos(2 * arc) + 0.08 * np.cos(4 * arc) for arc in arcs],
dtype=dtype)
return paddle.to_tensor(win)
def compute_amplitude(waveforms, lengths=None, amp_type="avg", scale="linear"):
if len(waveforms.shape) == 1:
waveforms = waveforms.unsqueeze(0)
assert amp_type in ["avg", "peak"]
assert scale in ["linear", "dB"]
if amp_type == "avg":
if lengths is None:
out = paddle.mean(paddle.abs(waveforms), axis=1, keepdim=True)
else:
wav_sum = paddle.sum(paddle.abs(waveforms), axis=1, keepdim=True)
out = wav_sum / lengths
elif amp_type == "peak":
out = paddle.max(paddle.abs(waveforms), axis=1, keepdim=True)
else:
raise NotImplementedError
if scale == "linear":
return out
elif scale == "dB":
return paddle.clip(20 * paddle.log10(out), min=-80)
else:
raise NotImplementedError
def dB_to_amplitude(SNR):
return 10**(SNR / 20)
def convolve1d(
waveform,
kernel,
padding=0,
pad_type="constant",
stride=1,
groups=1, ):
if len(waveform.shape) != 3:
raise ValueError("Convolve1D expects a 3-dimensional tensor")
# Padding can be a tuple (left_pad, right_pad) or an int
if isinstance(padding, list):
waveform = paddle.nn.functional.pad(
x=waveform,
pad=padding,
mode=pad_type,
data_format='NLC', )
# Move time dimension last, which pad and fft and conv expect.
# (N, L, C) -> (N, C, L)
waveform = waveform.transpose([0, 2, 1])
kernel = kernel.transpose([0, 2, 1])
convolved = paddle.nn.functional.conv1d(
x=waveform,
weight=kernel,
stride=stride,
groups=groups,
padding=padding if not isinstance(padding, list) else 0, )
# Return time dimension to the second dimension.
return convolved.transpose([0, 2, 1])
def notch_filter(notch_freq, filter_width=101, notch_width=0.05):
# Check inputs
assert 0 < notch_freq <= 1
assert filter_width % 2 != 0
pad = filter_width // 2
inputs = paddle.arange(filter_width, dtype='float32') - pad
# Avoid frequencies that are too low
notch_freq += notch_width
# Define sinc function, avoiding division by zero
def sinc(x):
def _sinc(x):
return paddle.sin(x) / x
# The zero is at the middle index
res = paddle.concat(
[_sinc(x[:pad]), paddle.ones([1]), _sinc(x[pad + 1:])])
return res
# Compute a low-pass filter with cutoff frequency notch_freq.
hlpf = sinc(3 * (notch_freq - notch_width) * inputs)
# import torch
# hlpf *= paddle.to_tensor(torch.blackman_window(filter_width).detach().numpy())
hlpf *= blackman_window(filter_width)
hlpf /= paddle.sum(hlpf)
# Compute a high-pass filter with cutoff frequency notch_freq.
hhpf = sinc(3 * (notch_freq + notch_width) * inputs)
# hhpf *= paddle.to_tensor(torch.blackman_window(filter_width).detach().numpy())
hhpf *= blackman_window(filter_width)
hhpf /= -paddle.sum(hhpf)
hhpf[pad] += 1
# Adding filters creates notch filter
return (hlpf + hhpf).reshape([1, -1, 1])
def reverberate(waveforms,
rir_waveform,
sample_rate,
impulse_duration=0.3,
rescale_amp="avg"):
orig_shape = waveforms.shape
if len(waveforms.shape) > 3 or len(rir_waveform.shape) > 3:
raise NotImplementedError
# if inputs are mono tensors we reshape to 1, samples
if len(waveforms.shape) == 1:
waveforms = waveforms.unsqueeze(0).unsqueeze(-1)
elif len(waveforms.shape) == 2:
waveforms = waveforms.unsqueeze(-1)
if len(rir_waveform.shape) == 1: # convolve1d expects a 3d tensor !
rir_waveform = rir_waveform.unsqueeze(0).unsqueeze(-1)
elif len(rir_waveform.shape) == 2:
rir_waveform = rir_waveform.unsqueeze(-1)
# Compute the average amplitude of the clean
orig_amplitude = compute_amplitude(waveforms, waveforms.shape[1],
rescale_amp)
# Compute index of the direct signal, so we can preserve alignment
impulse_index_start = rir_waveform.abs().argmax(axis=1).item()
impulse_index_end = min(
impulse_index_start + int(sample_rate * impulse_duration),
rir_waveform.shape[1])
rir_waveform = rir_waveform[:, impulse_index_start:impulse_index_end, :]
rir_waveform = rir_waveform / paddle.norm(rir_waveform, p=2)
rir_waveform = paddle.flip(rir_waveform, [1])
waveforms = convolve1d(
waveform=waveforms,
kernel=rir_waveform,
padding=[rir_waveform.shape[1] - 1, 0], )
# Rescale to the peak amplitude of the clean waveform
waveforms = rescale(waveforms, waveforms.shape[1], orig_amplitude,
rescale_amp)
if len(orig_shape) == 1:
waveforms = waveforms.squeeze(0).squeeze(-1)
if len(orig_shape) == 2:
waveforms = waveforms.squeeze(-1)
return waveforms
def rescale(waveforms, lengths, target_lvl, amp_type="avg", scale="linear"):
assert amp_type in ["peak", "avg"]
assert scale in ["linear", "dB"]
batch_added = False
if len(waveforms.shape) == 1:
batch_added = True
waveforms = waveforms.unsqueeze(0)
waveforms = normalize(waveforms, lengths, amp_type)
if scale == "linear":
out = target_lvl * waveforms
elif scale == "dB":
out = dB_to_amplitude(target_lvl) * waveforms
else:
raise NotImplementedError("Invalid scale, choose between dB and linear")
if batch_added:
out = out.squeeze(0)
return out
def normalize(waveforms, lengths=None, amp_type="avg", eps=1e-14):
assert amp_type in ["avg", "peak"]
batch_added = False
if len(waveforms.shape) == 1:
batch_added = True
waveforms = waveforms.unsqueeze(0)
den = compute_amplitude(waveforms, lengths, amp_type) + eps
if batch_added:
waveforms = waveforms.squeeze(0)
return waveforms / den
| StarcoderdataPython |
3509254 | <reponame>cbergmiller/bacpypes
import logging
from ..debugging import DebugContents
from ..task import call_later
from ..apdu import ComplexAckPDU, ConfirmedRequestPDU, encode_max_segments_accepted, encode_max_apdu_length_accepted
from .ssm_states import *
_logger = logging.getLogger(__name__)
__all__ = ['SSM']
class SSM(DebugContents):
"""
SSM - Segmentation State Machine
"""
transactionLabels = [
'IDLE', 'SEGMENTED_REQUEST', 'AWAIT_CONFIRMATION', 'AWAIT_RESPONSE',
'SEGMENTED_RESPONSE', 'SEGMENTED_CONFIRMATION', 'COMPLETED', 'ABORTED'
]
_debug_contents = (
'ssmSAP', 'localDevice', 'device_info', 'invokeID', 'state', 'segmentAPDU', 'segmentSize', 'segmentCount',
'maxSegmentsAccepted', 'retryCount', 'segmentRetryCount', 'sentAllSegments', 'lastSequenceNumber',
'initialSequenceNumber', 'actualWindowSize', 'proposedWindowSize'
)
def __init__(self, sap, pdu_address):
"""Common parts for client and server segmentation."""
self.ssmSAP = sap # service access point
# save the address and get the device information
self.pdu_address = pdu_address
self.device_info = sap.deviceInfoCache.get_device_info(pdu_address)
self.invokeID = None # invoke ID
self.state = IDLE # initial state
self.segmentAPDU = None # refers to request or response
self.segmentSize = None # how big the pieces are
self.segmentCount = None
self.retryCount = None
self.segmentRetryCount = None
self.sentAllSegments = None
self.lastSequenceNumber = None
self.initialSequenceNumber = None
self.actualWindowSize = None
# local device object provides these or SAP provides defaults, make
# copies here so they are consistent throughout the transaction but
# they could change from one transaction to the next
self.numberOfApduRetries = getattr(sap.localDevice, 'numberOfApduRetries', sap.numberOfApduRetries)
self.apduTimeout = getattr(sap.localDevice, 'apduTimeout', sap.apduTimeout)
self.segmentationSupported = getattr(sap.localDevice, 'segmentationSupported', sap.segmentationSupported)
self.segmentTimeout = getattr(sap.localDevice, 'segmentTimeout', sap.segmentTimeout)
self.maxSegmentsAccepted = getattr(sap.localDevice, 'maxSegmentsAccepted', sap.maxSegmentsAccepted)
self.maxApduLengthAccepted = getattr(sap.localDevice, 'maxApduLengthAccepted', sap.maxApduLengthAccepted)
self.timer_handle = None
def start_timer(self, msecs):
# if this is active, pull it
if self.timer_handle:
self.timer_handle.cancel()
# now install this
self.timer_handle = call_later(msecs / 1000.0, self.handle_timeout)
def stop_timer(self):
# if this is active, pull it
if self.timer_handle:
self.timer_handle.cancel()
self.timer_handle = None
def restart_timer(self, msecs):
# if this is active, pull it
self.start_timer(msecs)
def handle_timeout(self):
raise NotImplementedError()
def set_state(self, new_state, timer=0):
"""This function is called when the derived class wants to change state."""
# make sure we have a correct transition
if (self.state == COMPLETED) or (self.state == ABORTED):
e = RuntimeError(
f'invalid state transition from {SSM.transactionLabels[self.state]} to {SSM.transactionLabels[new_state]}')
_logger.exception(e)
raise e
# stop any current timer
self.stop_timer()
# make the change
self.state = new_state
# if another timer should be started, start it
if timer:
self.start_timer(timer)
def set_segmentation_context(self, apdu):
"""This function is called to set the segmentation context."""
# set the context
self.segmentAPDU = apdu
def get_segment(self, indx):
"""
This function returns an APDU coorisponding to a particular segment of a confirmed request or complex ack.
The segmentAPDU is the context.
"""
# check for no context
if not self.segmentAPDU:
raise RuntimeError('no segmentation context established')
# check for invalid segment number
if indx >= self.segmentCount:
raise RuntimeError(f'invalid segment number {indx}, APDU has {self.segmentCount} segments')
if self.segmentAPDU.apduType == ConfirmedRequestPDU.pduType:
seg_apdu = ConfirmedRequestPDU(self.segmentAPDU.apduService)
seg_apdu.apduMaxSegs = encode_max_segments_accepted(self.maxSegmentsAccepted)
seg_apdu.apduMaxResp = encode_max_apdu_length_accepted(self.maxApduLengthAccepted)
seg_apdu.apduInvokeID = self.invokeID
# segmented response accepted?
seg_apdu.apduSA = self.segmentationSupported in ('segmentedReceive', 'segmentedBoth')
elif self.segmentAPDU.apduType == ComplexAckPDU.pduType:
seg_apdu = ComplexAckPDU(self.segmentAPDU.apduService, self.segmentAPDU.apduInvokeID)
else:
raise RuntimeError('invalid APDU type for segmentation context')
# maintain the the user data reference
seg_apdu.pduUserData = self.segmentAPDU.pduUserData
# make sure the destination is set
seg_apdu.pduDestination = self.pdu_address
# segmented message?
if self.segmentCount != 1:
seg_apdu.apduSeg = True
seg_apdu.apduMor = (indx < (self.segmentCount - 1)) # more follows
seg_apdu.apduSeq = indx % 256 # sequence number
# first segment sends proposed window size, rest get actual
if indx == 0:
seg_apdu.apduWin = self.proposedWindowSize
else:
seg_apdu.apduWin = self.actualWindowSize
else:
seg_apdu.apduSeg = False
seg_apdu.apduMor = False
# add the content
offset = indx * self.segmentSize
seg_apdu.put_data(self.segmentAPDU.pduData[offset:offset + self.segmentSize])
# success
return seg_apdu
def append_segment(self, apdu):
"""
This function appends the apdu content to the end of the current APDU being built.
The segmentAPDU is the context.
"""
# check for no context
if not self.segmentAPDU:
raise RuntimeError('no segmentation context established')
# append the data
self.segmentAPDU.put_data(apdu.pduData)
def in_window(self, seqA, seqB):
rslt = ((seqA - seqB + 256) % 256) < self.actualWindowSize
return rslt
def fill_window(self, seqNum):
"""This function sends all of the packets necessary to fill
out the segmentation window."""
for ix in range(self.actualWindowSize):
apdu = self.get_segment(seqNum + ix)
# send the message
self.ssmSAP.request(apdu)
# check for no more follows
if not apdu.apduMor:
self.sentAllSegments = True
break
| StarcoderdataPython |
397532 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import re
import sys
import subprocess
import shlex
from powerline.config import POWERLINE_ROOT, TMUX_CONFIG_DIRECTORY
from powerline.lib.config import ConfigLoader
from powerline import generate_config_finder, load_config, create_logger, finish_common_config
from powerline.shell import ShellPowerline
from powerline.lib.shell import which
from powerline.bindings.tmux import (TmuxVersionInfo, run_tmux_command, set_tmux_environment, get_tmux_version,
source_tmux_file)
from powerline.lib.encoding import get_preferred_output_encoding
from powerline.renderers.tmux import attrs_to_tmux_attrs
from powerline.commands.main import finish_args
CONFIG_FILE_NAME = re.compile(r'powerline_tmux_(?P<major>\d+)\.(?P<minor>\d+)(?P<suffix>[a-z]+)?(?:_(?P<mod>plus|minus))?\.conf')
CONFIG_MATCHERS = {
None: (lambda a, b: a.major == b.major and a.minor == b.minor),
'plus': (lambda a, b: a[:2] <= b[:2]),
'minus': (lambda a, b: a[:2] >= b[:2]),
}
CONFIG_PRIORITY = {
None: 3,
'plus': 2,
'minus': 1,
}
def list_all_tmux_configs():
'''List all version-specific tmux configuration files'''
for root, dirs, files in os.walk(TMUX_CONFIG_DIRECTORY):
dirs[:] = ()
for fname in files:
match = CONFIG_FILE_NAME.match(fname)
if match:
assert match.group('suffix') is None
yield (
os.path.join(root, fname),
CONFIG_MATCHERS[match.group('mod')],
CONFIG_PRIORITY[match.group('mod')],
TmuxVersionInfo(
int(match.group('major')),
int(match.group('minor')),
match.group('suffix'),
),
)
def get_tmux_configs(version):
'''Get tmux configuration suffix given parsed tmux version
:param TmuxVersionInfo version: Parsed tmux version.
'''
for fname, matcher, priority, file_version in list_all_tmux_configs():
if matcher(file_version, version):
yield (fname, priority + file_version.minor * 10 + file_version.major * 10000)
def source_tmux_files(pl, args, tmux_version=None, source_tmux_file=source_tmux_file):
'''Source relevant version-specific tmux configuration files
Files are sourced in the following order:
* First relevant files with older versions are sourced.
* If files for same versions are to be sourced then first _minus files are
sourced, then _plus files and then files without _minus or _plus suffixes.
'''
tmux_version = tmux_version or get_tmux_version(pl)
source_tmux_file(os.path.join(TMUX_CONFIG_DIRECTORY, 'powerline-base.conf'))
for fname, priority in sorted(get_tmux_configs(tmux_version), key=(lambda v: v[1])):
source_tmux_file(fname)
if not os.environ.get('POWERLINE_COMMAND'):
cmd = deduce_command()
if cmd:
set_tmux_environment('POWERLINE_COMMAND', deduce_command(), remove=False)
try:
run_tmux_command('refresh-client')
except subprocess.CalledProcessError:
# On tmux-2.0 this command may fail for whatever reason. Since it is
# critical just ignore the failure.
pass
class EmptyArgs(object):
def __init__(self, ext, config_path):
self.ext = [ext]
self.side = 'left'
self.config_path = None
def __getattr__(self, attr):
return None
def init_tmux_environment(pl, args, set_tmux_environment=set_tmux_environment):
'''Initialize tmux environment from tmux configuration
'''
powerline = ShellPowerline(finish_args(None, os.environ, EmptyArgs('tmux', args.config_path)))
# TODO Move configuration files loading out of Powerline object and use it
# directly
powerline.update_renderer()
# FIXME Use something more stable then `theme_kwargs`
colorscheme = powerline.renderer_options['theme_kwargs']['colorscheme']
def get_highlighting(group):
return colorscheme.get_highlighting([group], None)
for varname, highlight_group in (
('_POWERLINE_BACKGROUND_COLOR', 'background'),
('_POWERLINE_ACTIVE_WINDOW_STATUS_COLOR', 'active_window_status'),
('_POWERLINE_WINDOW_STATUS_COLOR', 'window_status'),
('_POWERLINE_ACTIVITY_STATUS_COLOR', 'activity_status'),
('_POWERLINE_BELL_STATUS_COLOR', 'bell_status'),
('_POWERLINE_WINDOW_COLOR', 'window'),
('_POWERLINE_WINDOW_DIVIDER_COLOR', 'window:divider'),
('_POWERLINE_WINDOW_CURRENT_COLOR', 'window:current'),
('_POWERLINE_WINDOW_NAME_COLOR', 'window_name'),
('_POWERLINE_SESSION_COLOR', 'session'),
):
highlight = get_highlighting(highlight_group)
set_tmux_environment(varname, powerline.renderer.hlstyle(**highlight)[2:-1])
for varname, prev_group, next_group in (
('_POWERLINE_WINDOW_CURRENT_HARD_DIVIDER_COLOR', 'window', 'window:current'),
('_POWERLINE_WINDOW_CURRENT_HARD_DIVIDER_NEXT_COLOR', 'window:current', 'window'),
('_POWERLINE_SESSION_HARD_DIVIDER_NEXT_COLOR', 'session', 'background'),
):
prev_highlight = get_highlighting(prev_group)
next_highlight = get_highlighting(next_group)
set_tmux_environment(
varname,
powerline.renderer.hlstyle(
fg=prev_highlight['bg'],
bg=next_highlight['bg'],
attrs=0,
)[2:-1]
)
for varname, attr, group in (
('_POWERLINE_ACTIVE_WINDOW_FG', 'fg', 'active_window_status'),
('_POWERLINE_WINDOW_STATUS_FG', 'fg', 'window_status'),
('_POWERLINE_ACTIVITY_STATUS_FG', 'fg', 'activity_status'),
('_POWERLINE_ACTIVITY_STATUS_ATTR', 'attrs', 'activity_status'),
('_POWERLINE_BELL_STATUS_FG', 'fg', 'bell_status'),
('_POWERLINE_BELL_STATUS_ATTR', 'attrs', 'bell_status'),
('_POWERLINE_BACKGROUND_FG', 'fg', 'background'),
('_POWERLINE_BACKGROUND_BG', 'bg', 'background'),
('_POWERLINE_SESSION_FG', 'fg', 'session'),
('_POWERLINE_SESSION_BG', 'bg', 'session'),
('_POWERLINE_SESSION_ATTR', 'attrs', 'session'),
('_POWERLINE_SESSION_PREFIX_FG', 'fg', 'session:prefix'),
('_POWERLINE_SESSION_PREFIX_BG', 'bg', 'session:prefix'),
('_POWERLINE_SESSION_PREFIX_ATTR', 'attrs', 'session:prefix'),
):
if attr == 'attrs':
attrs = attrs_to_tmux_attrs(get_highlighting(group)[attr])
set_tmux_environment(varname, ']#['.join(attrs))
set_tmux_environment(varname + '_LEGACY', (','.join(
# Tmux-1.6 does not accept no… attributes in
# window-status-…-attr options.
(attr for attr in attrs if not attr.startswith('no')))
# But it does not support empty attributes as well.
or 'none'))
else:
if powerline.common_config['term_truecolor']:
set_tmux_environment(varname, '#{0:06x}'.format(get_highlighting(group)[attr][1]))
else:
set_tmux_environment(varname, 'colour' + str(get_highlighting(group)[attr][0]))
left_dividers = powerline.renderer.theme.dividers['left']
set_tmux_environment('_POWERLINE_LEFT_HARD_DIVIDER', left_dividers['hard'])
set_tmux_environment('_POWERLINE_LEFT_SOFT_DIVIDER', left_dividers['soft'])
set_tmux_environment('_POWERLINE_LEFT_HARD_DIVIDER_SPACES', (
' ' * powerline.renderer.strwidth(left_dividers['hard'])))
TMUX_VAR_RE = re.compile('\$(_POWERLINE_\w+)')
def tmux_setup(pl, args):
tmux_environ = {}
tmux_version = get_tmux_version(pl)
def set_tmux_environment_nosource(varname, value, remove=True):
tmux_environ[varname] = value
def replace_cb(match):
return tmux_environ[match.group(1)]
def replace_env(s):
return TMUX_VAR_RE.subn(replace_cb, s)[0]
def source_tmux_file_nosource(fname):
with open(fname) as fd:
for line in fd:
if line.startswith('#') or line == '\n':
continue
args = shlex.split(line)
args = [args[0]] + [replace_env(arg) for arg in args[1:]]
run_tmux_command(*args)
if args.source is None:
args.source = tmux_version < (1, 9)
if args.source:
ste = set_tmux_environment
stf = source_tmux_file
else:
ste = set_tmux_environment_nosource
stf = source_tmux_file_nosource
init_tmux_environment(pl, args, set_tmux_environment=ste)
source_tmux_files(pl, args, tmux_version=tmux_version, source_tmux_file=stf)
def get_main_config(args):
find_config_files = generate_config_finder()
config_loader = ConfigLoader(run_once=True)
return load_config('config', find_config_files, config_loader)
def create_powerline_logger(args):
config = get_main_config(args)
common_config = finish_common_config(get_preferred_output_encoding(), config['common'])
logger, pl, get_module_attr = create_logger(common_config)
return pl
def check_command(cmd):
if which(cmd):
return cmd
def deduce_command():
'''Deduce which command to use for ``powerline``
Candidates:
* ``powerline``. Present only when installed system-wide.
* ``{powerline_root}/scripts/powerline``. Present after ``pip install -e``
was run and C client was compiled (in this case ``pip`` does not install
binary file).
* ``{powerline_root}/client/powerline.sh``. Useful when ``sh``, ``sed`` and
``socat`` are present, but ``pip`` or ``setup.py`` was not run.
* ``{powerline_root}/client/powerline.py``. Like above, but when one of
``sh``, ``sed`` and ``socat`` was not present.
* ``powerline-render``. Should not really ever be used.
* ``{powerline_root}/scripts/powerline-render``. Same.
'''
return (
None
or check_command('powerline')
or check_command(os.path.join(POWERLINE_ROOT, 'scripts', 'powerline'))
or ((which('sh') and which('sed') and which('socat'))
and check_command(os.path.join(POWERLINE_ROOT, 'client', 'powerline.sh')))
or check_command(os.path.join(POWERLINE_ROOT, 'client', 'powerline.py'))
or check_command('powerline-render')
or check_command(os.path.join(POWERLINE_ROOT, 'scripts', 'powerline-render'))
)
def shell_command(pl, args):
cmd = deduce_command()
if cmd:
print(cmd)
else:
sys.exit(1)
def uses(pl, args):
component = args.component
if not component:
raise ValueError('Must specify component')
shell = args.shell
template = 'POWERLINE_NO_{shell}_{component}'
for sh in (shell, 'shell') if shell else ('shell'):
varname = template.format(shell=sh.upper(), component=component.upper())
if os.environ.get(varname):
sys.exit(1)
config = get_main_config(args)
if component in config.get('ext', {}).get('shell', {}).get('components', ('tmux', 'prompt')):
sys.exit(0)
else:
sys.exit(1)
| StarcoderdataPython |
3381021 | import pytest
import dumbc.ast.ast as ast
from dumbc.errors import DumbTypeError
from dumbc.errors import DumbNameError
from dumbc.transform.attr_pass import AttrPass
def test_no_attrs_has_body():
foo_func = ast.Function(
ast.FunctionProto('foo', [], ast.BuiltinTypes.I32),
ast.Block([
ast.Return()
]))
ap = AttrPass()
ap.visit(foo_func)
def test_no_attrs_no_body():
foo_func = ast.Function(
ast.FunctionProto('foo', [], ast.BuiltinTypes.I32))
ap = AttrPass()
with pytest.raises(DumbTypeError):
ap.visit(foo_func)
def test_external_attr_has_body():
attrs = [ast.Attribute('external')]
foo_func = ast.Function(
ast.FunctionProto('foo', [], ast.BuiltinTypes.I32, attrs),
ast.Block([
ast.Return()
]))
ap = AttrPass()
with pytest.raises(DumbTypeError):
ap.visit(foo_func)
def test_external_attr_no_body():
attrs = [ast.Attribute('external')]
foo_func = ast.Function(
ast.FunctionProto('foo', [], ast.BuiltinTypes.I32, attrs))
ap = AttrPass()
ap.visit(foo_func)
def test_external_attr_with_args():
attrs = [ast.Attribute('external',
args=(ast.BooleanConstant(True),))]
foo_func = ast.Function(
ast.FunctionProto('foo', [], ast.BuiltinTypes.I32, attrs))
ap = AttrPass()
with pytest.raises(DumbTypeError):
ap.visit(foo_func)
def test_unknown_attr():
attrs = [ast.Attribute('foobar')]
foo_func = ast.Function(
ast.FunctionProto('foo', [], ast.BuiltinTypes.I32, attrs))
ap = AttrPass()
with pytest.raises(DumbNameError):
ap.visit(foo_func)
| StarcoderdataPython |
11202293 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import unittest
from io import StringIO
from uuid import uuid4
from google.auth.environment_vars import CREDENTIALS
from airflow.gcp.utils.credentials_provider import (
AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT, build_gcp_conn, provide_gcp_conn_and_credentials,
provide_gcp_connection, provide_gcp_credentials, temporary_environment_variable,
)
from tests.compat import mock
ENV_VALUE = "test_env"
TEMP_VARIABLE = "temp_variable"
KEY = str(uuid4())
ENV_CRED = "temp_cred"
class TestHelper(unittest.TestCase):
def test_build_gcp_conn_path(self):
value = "test"
conn = build_gcp_conn(key_file_path=value)
self.assertEqual(
"google-cloud-platform://?extra__google_cloud_platform__key_path=test", conn
)
def test_build_gcp_conn_scopes(self):
value = ["test", "test2"]
conn = build_gcp_conn(scopes=value)
self.assertEqual(
"google-cloud-platform://?extra__google_cloud_platform__scope=test%2Ctest2",
conn,
)
def test_build_gcp_conn_project(self):
value = "test"
conn = build_gcp_conn(project_id=value)
self.assertEqual(
"google-cloud-platform://?extra__google_cloud_platform__projects=test", conn
)
class TestTemporaryEnvironmentVariable(unittest.TestCase):
@mock.patch.dict(os.environ, clear=True)
def test_temporary_environment_variable_delete(self):
with temporary_environment_variable(KEY, ENV_VALUE):
self.assertEqual(os.environ.get(KEY), ENV_VALUE)
self.assertNotIn(KEY, os.environ)
@mock.patch.dict(os.environ, {KEY: ENV_VALUE})
def test_temporary_environment_variable_restore(self):
with temporary_environment_variable(KEY, TEMP_VARIABLE):
self.assertEqual(os.environ.get(KEY), TEMP_VARIABLE)
self.assertEqual(os.environ.get(KEY), ENV_VALUE)
@mock.patch.dict(os.environ, clear=True)
def test_temporary_environment_variable_error(self):
with self.assertRaises(Exception):
with temporary_environment_variable(KEY, ENV_VALUE):
self.assertEqual(os.environ.get(KEY), ENV_VALUE)
raise Exception("test")
self.assertNotIn(KEY, os.environ)
class TestProvideGcpCredentials(unittest.TestCase):
@mock.patch.dict(os.environ, {CREDENTIALS: ENV_VALUE})
@mock.patch("tempfile.NamedTemporaryFile")
def test_provide_gcp_credentials_key_content(self, mock_file):
file_dict = {"foo": "bar"}
string_file = StringIO()
file_content = json.dumps(file_dict)
file_name = "/test/mock-file"
mock_file_handler = mock_file.return_value.__enter__.return_value
mock_file_handler.name = file_name
mock_file_handler.write = string_file.write
with provide_gcp_credentials(key_file_dict=file_dict):
self.assertEqual(os.environ[CREDENTIALS], file_name)
self.assertEqual(file_content, string_file.getvalue())
self.assertEqual(os.environ[CREDENTIALS], ENV_VALUE)
@mock.patch.dict(os.environ, {CREDENTIALS: ENV_VALUE})
def test_provide_gcp_credentials_keep_environment(self):
key_path = "/test/key-path"
with provide_gcp_credentials(key_file_path=key_path):
self.assertEqual(os.environ[CREDENTIALS], key_path)
self.assertEqual(os.environ[CREDENTIALS], ENV_VALUE)
class TestProvideGcpConnection(unittest.TestCase):
@mock.patch.dict(os.environ, {AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT: ENV_VALUE})
@mock.patch("airflow.gcp.utils.credentials_provider.build_gcp_conn")
def test_provide_gcp_connection(self, mock_builder):
mock_builder.return_value = TEMP_VARIABLE
path = "path/to/file.json"
scopes = ["scopes"]
project_id = "project_id"
with provide_gcp_connection(path, scopes, project_id):
mock_builder.assert_called_once_with(
key_file_path=path, scopes=scopes, project_id=project_id
)
self.assertEqual(
os.environ[AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT], TEMP_VARIABLE
)
self.assertEqual(os.environ[AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT], ENV_VALUE)
class TestProvideGcpConnAndCredentials(unittest.TestCase):
@mock.patch.dict(
os.environ,
{AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT: ENV_VALUE, CREDENTIALS: ENV_VALUE},
)
@mock.patch("airflow.gcp.utils.credentials_provider.build_gcp_conn")
def test_provide_gcp_conn_and_credentials(self, mock_builder):
mock_builder.return_value = TEMP_VARIABLE
path = "path/to/file.json"
scopes = ["scopes"]
project_id = "project_id"
with provide_gcp_conn_and_credentials(path, scopes, project_id):
mock_builder.assert_called_once_with(
key_file_path=path, scopes=scopes, project_id=project_id
)
self.assertEqual(
os.environ[AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT], TEMP_VARIABLE
)
self.assertEqual(os.environ[CREDENTIALS], path)
self.assertEqual(os.environ[AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT], ENV_VALUE)
self.assertEqual(os.environ[CREDENTIALS], ENV_VALUE)
| StarcoderdataPython |
356130 | <reponame>MLcoinTeam/MLCoin
import argparse
from PIL import Image
# import imageio
import numpy as np
import tensorflow as tf
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
def read_tensor_from_image_file(file_name,
model,
input_height=224,
input_width=224,
input_mean=0,
input_std=255):
interpreter = tf.lite.Interpreter(model_path=model)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
img = Image.open(file_name)
img = img.resize((input_details[0]['shape'][1], input_details[0]['shape'][2]))
input_data = np.expand_dims(img, axis=0)
if input_details[0]['dtype'] == type(np.float32(1.0)):
floating_model = True
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
results = interpreter.get_tensor(output_details[0]['index'])
results = np.squeeze(results)
return results
if __name__ == "__main__":
label_file = "tensorflow/examples/label_image/data/imagenet_slim_labels.txt"
model_file = \
"tensorflow/examples/label_image/data/inception_v3_2016_08_28_frozen.pb"
parser = argparse.ArgumentParser()
parser.add_argument("--model", help="tflite model to be executed")
parser.add_argument("--image", help="image to be processed")
parser.add_argument("--labels", help="name of file containing labels")
args = parser.parse_args()
if args.model:
model_file = args.model
if args.image:
file_name = args.image
if args.labels:
label_file = args.labels
results = read_tensor_from_image_file(file_name, model_file)
top_k = results.argsort()[-5:][::-1]
labels = load_labels(label_file)
for i in top_k:
print(labels[i], results[i]) | StarcoderdataPython |
6602134 | from commander import Commander
import time
import rospy
rospy.init_node('init_drone')
con = Commander()
time.sleep(0.5)
con.move(-5,0,0)
time.sleep(0.5)
| StarcoderdataPython |
142265 | #vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
from project import *
#from project.tables import db,User
from Authentication import UserAuth
from contacts import *
class UserAuthentication(Resource):
def get(self):
headers = {'Content-Type': 'text/html'}
return make_response(render_template('login.html'), 200, headers)
def post(self):
userObj = UserAuth(request.form['username'],request.form['password'])
res = userObj.do_authentication()
if res:
flash("Login Success")
app.logger.info('Login successful')
return redirect(url_for('contacts'))
else:
flash("Password entered is Invalid")
app.logger.info('Invalid Password')
return redirect(url_for('index'))
class UserLogout(Resource):
def get(self):
session['logged_in'] = False
app.logger.info('Logout successful')
return redirect(url_for('index'))
'''
#test
@auth.error_handler
def unauthorized():
return make_response(jsonify( { 'error': 'Unauthorized access!!!' } ), 403)
@auth.verify_password
def verify_password(username_or_token, password):
# first try to authenticate by token
print('Authenticating..')
user = User.verify_auth_token(username_or_token)
#return True
if not user:
# try to authenticate with username/password
user = User.query.filter_by(username = username_or_token).first()
g.user = user
print('Authentication successful')
return True
@app.route('/api/token')
@auth.login_required
def get_auth_token():
token = g.user.generate_auth_token(600)
return jsonify({'token': token.decode('ascii'), 'duration': 600})
'''
api.add_resource(UserAuthentication, '/login',endpoint="login")
api.add_resource(UserLogout, '/logout',endpoint="logout")
| StarcoderdataPython |
8141426 | import warnings
import pytest
from ..gateways.utils import get_supported_currencies
from ..interface import GatewayConfig
@pytest.fixture
def gateway_config():
return GatewayConfig(
gateway_name="Dummy",
auto_capture=True,
supported_currencies="",
connection_params={"secret-key": "dummy"},
)
@pytest.mark.parametrize(
"supported_currencies, expected_currencies",
[
("PLN, USD, EUR", ["PLN", "USD", "EUR"]),
("PLN,EUR", ["PLN", "EUR"]),
(" PLN,EUR ", ["PLN", "EUR"]),
("USD", ["USD"]),
],
)
def test_get_supported_currencies(
supported_currencies, expected_currencies, gateway_config
):
# given
gateway_config.supported_currencies = supported_currencies
# when
currencies = get_supported_currencies(gateway_config, "Test")
# then
assert currencies == expected_currencies
def test_get_supported_currencies_default_currency(gateway_config):
# when
with warnings.catch_warnings(record=True) as warns:
currencies = get_supported_currencies(gateway_config, "Test")
expected_warning = (
"Default currency used for Test. "
"DEFAULT_CURRENCY setting is deprecated, "
"please configure supported currencies for this gateway."
)
assert any([str(warning.message) == expected_warning for warning in warns])
# then
assert currencies == ["USD"]
| StarcoderdataPython |
3373865 | <filename>denovonear/ensembl_cache.py
""" caches sequence information requested from ensembl, so we don't have to
repeatedly re-request information from the REST API if we have done so recently.
"""
import os
import sqlite3
import sys
import time
import random
import zlib
from pathlib import Path
import json
from datetime import datetime
from urllib.parse import urlparse
class EnsemblCache(object):
""" Instead of repeatedly re-acquiring data from Ensembl each run, cache
the requested data for faster retrieval
"""
today = datetime.today()
def __init__(self, cache_folder):
""" initialise the class with the local cache folder
Args:
cache_folder: path to the cache
"""
cache_folder = Path(cache_folder)
if not cache_folder.exists():
cache_folder.mkdir()
# generate a database with tables if it doesn't already exist
path = cache_folder / "ensembl_cache.db"
if not path.exists():
try:
with sqlite3.connect(str(path)) as conn:
with conn as cursor:
cursor.execute("CREATE TABLE ensembl " \
"(key text PRIMARY KEY, genome_build text, " \
"cache_date text, api_version text, data blob)")
except sqlite3.OperationalError:
time.sleep(random.uniform(1, 5))
self.conn = sqlite3.connect(str(path))
self.conn.row_factory = sqlite3.Row
self.cursor = self.conn.cursor()
def get_cached_data(self, url):
""" get cached data for a url if stored in the cache and not outdated
Args:
url: URL for the Ensembl REST service
Returns:
data if data in cache, else None
"""
key, build = self.get_key_from_url(url)
self.cursor.execute("SELECT * FROM ensembl WHERE key=? AND genome_build=?",
(key, build))
row = self.cursor.fetchone()
# if the data has been cached, check that it is not out of date
if row is not None:
data = zlib.decompress(row["data"])
diff = self.today - datetime.strptime(row["cache_date"], "%Y-%m-%d")
if diff.days < 180:
return data
return None
def cache_url_data(self, url, data, attempt=0):
""" cache the data retrieved from ensembl
Args:
url: URL for the Ensembl REST service
data: response data from Ensembl, in bytes form
"""
if attempt > 5:
raise ValueError('too many attempts at writing to the cache')
key, build = self.get_key_from_url(url)
current_date = datetime.strftime(self.today, "%Y-%m-%d")
compressed = zlib.compress(data)
t = (key, build, current_date, '9', compressed)
cmd = "INSERT OR REPLACE INTO ensembl " \
"(key, genome_build, cache_date, api_version, data) VALUES (?,?,?,?,?)"
try:
with self.conn as conn:
conn.execute(cmd, t)
except sqlite3.OperationalError:
# if we hit a sqlite locking error, wait a random time so conflicting
# instances are less likely to reconflict, then retry
time.sleep(random.uniform(1, 10))
self.cache_url_data(url, data, attempt + 1)
def get_key_from_url(self, url):
""" parses the url into a list of folder locations
We take a URL like:
http://rest.ensembl.org/sequence/id/ENST00000538324?type=genomic;expand_3prime=10
and turn it into 'sequence.id.ENST00000538324.genomic'
Args:
url: URL for the Ensembl REST service
Returns:
a parsed unique database key for the URLs data
"""
url = urlparse(url)
path = url.path.strip('/').replace('/', '.')
# find the build from the url, but convert the default server to a build
build = url.netloc.split('.')[0]
build = 'grch38' if build == 'rest' else build
# convert "LONG_ID?feature=transcript" to ['LONG_ID', "transcript"] etc
suffix = url.query.split(';')[0]
if "=" in suffix:
_, suffix = suffix.split("=")
key = path + '.' + suffix if suffix != '' else path
# replace characters not tolerated in keys
return key.replace(':', '_'), build
| StarcoderdataPython |
8039625 | <reponame>mgelbart/ray
from ray.autoscaler.sdk.sdk import (
create_or_update_cluster,
teardown_cluster,
run_on_cluster,
rsync,
get_head_node_ip,
get_worker_node_ips,
request_resources,
configure_logging,
bootstrap_config,
fillout_defaults,
register_callback_handler,
get_docker_host_mount_location,
)
__all__ = [
"create_or_update_cluster",
"teardown_cluster",
"run_on_cluster",
"rsync",
"get_head_node_ip",
"get_worker_node_ips",
"request_resources",
"configure_logging",
"bootstrap_config",
"fillout_defaults",
"register_callback_handler",
"get_docker_host_mount_location",
]
| StarcoderdataPython |
75896 | class Building(object):
"""Class representing all the data for a building
'attribute name': 'type'
swagger_types = {
'buildingId': 'str',
'nameList': 'list[str]',
'numWashers': 'int',
'numDryers': 'int',
}
'attribute name': 'Attribute name in Swagger Docs'
attribute_map = {
'buildingId': 'buildingId',
'nameList': 'nameList',
'numWashers': 'numWashers',
'numDryers': 'numDryers'
}
"""
def __init__(self, buildingId, nameList, numWashers, numDryers):
"""Class instatiation
Check if all the attributes are valid and assigns them if they are
Raises ValueError if attributes are invalid
"""
if buildingId is None:
raise ValueError("Invalid value for 'buildingId', must not be 'None'")
if nameList is None:
raise ValueError("Invalid value for 'nameList', must not be 'None'")
if numWashers is None:
raise ValueError("Invalid value for 'numWashers', must not be 'None'")
if type(numWashers) is not int:
raise ValueError("Invalid value for 'numWashers', must be an integer")
if numWashers < 0:
raise ValueError("Invalid value for 'numWashers', must not be negative")
if numDryers is None:
raise ValueError("Invalid value for 'numDryers', must not be'None'")
if type(numDryers) is not int:
raise ValueError("Invalid value for 'numDryers', must be an integer")
if numDryers < 0:
raise ValueError("Invalid value for 'numDryers', must not be negative")
self.buildingId = buildingId
self.nameList = nameList
self.numWashers = numWashers
self.numDryers = numDryers | StarcoderdataPython |
1782809 | <reponame>chainedfarmsnetwork/quick-n-dirty-meme-contest-handler
# Generated by Django 3.2.3 on 2021-05-21 21:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('web', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MemeContest',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('contest_id', models.CharField(max_length=100)),
('pub_date', models.DateTimeField(verbose_name='Date published')),
('end_date', models.DateTimeField(verbose_name='End Date')),
],
),
migrations.AddField(
model_name='memecontestpost',
name='contest_id',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='web.memecontest'),
preserve_default=False,
),
]
| StarcoderdataPython |
364906 | from .void_element import VoidElement
from ..attribute.composite import Common, HasFixedSize
from ..attribute import (
AttributeRenderer,
Accept, Alternative, Autocomplete,
Capture, Checked, DirectionName,
Disabled, Form, FormAction, FormEncodingType,
FormMethod, FormNoValidate, FormTarget, List,
Maximum, MaximumLength, Minimum, MinimumLength,
Multiple, Name, Pattern, Placeholder, ReadOnly,
Required, Size, Source, Step, Type, UseMap, Value
)
class Input(VoidElement):
"""
Represents interactive controls to accept data from user.
"""
attribute_renderer = AttributeRenderer(
*Common(),
*HasFixedSize(),
Accept(),
Alternative(),
Autocomplete(),
Capture(),
Checked(),
DirectionName(),
Disabled(),
Form(),
FormAction(),
FormEncodingType(),
FormMethod(),
FormNoValidate(),
FormTarget(),
List(),
Maximum(),
MaximumLength(),
MinimumLength(),
Minimum(),
Multiple(),
Name(),
Pattern(),
Placeholder(),
ReadOnly(),
Required(),
Size(),
Source(),
Step(),
Type(),
UseMap(),
Value()
)
def __str__(self):
return "input"
| StarcoderdataPython |
1660300 | <filename>src/nodemgr/common/windows_process_mem_cpu.py
#
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
#
from sandesh.nodeinfo.cpuinfo.ttypes import ProcessCpuInfo
class WindowsProcessMemCpuUsageData(object):
def __init__(self, _id, last_cpu, last_time):
self.last_cpu = last_cpu
self.last_time = last_time
self._id = hex(_id)[2:-1].zfill(64)
def get_process_mem_cpu_info(self):
process_mem_cpu = ProcessCpuInfo()
return process_mem_cpu
| StarcoderdataPython |
4864012 | '''
给定一个字符串,请你找出其中不含有重复字符的 最长子串 的长度。
'''
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
if len(s) <= 1:
return len(s)
maxLen = 0
strlen = len(s)
end = -1
map = set()
for i in range(strlen):
if i != 0:
map.remove(s[i - 1])
while end + 1 < strlen and s[end + 1] not in map:
map.add(s[end + 1])
end += 1
maxLen = max(maxLen, end - i + 1)
return maxLen
s = Solution()
print(s.lengthOfLongestSubstring("abcabcbb") == 3)
print(s.lengthOfLongestSubstring("bbbbb") == 1)
print(s.lengthOfLongestSubstring("pwwkew") == 3)
print(s.lengthOfLongestSubstring("") == 0)
print(s.lengthOfLongestSubstring("pwwkes") == 4)
print(s.lengthOfLongestSubstring("abba") == 2)
| StarcoderdataPython |
3509227 | import os
from flask import Flask
from config import Config
def create_app(config_class=Config):
"""
Application factory function.
Create and configure the application.
:return: app instance
"""
BASEDIR = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(config_class)
# ensure the folders exists for database, images, and invoices
try:
os.makedirs(app.instance_path)
os.makedirs(app.config['UPLOAD_FOLDER'])
os.makedirs(app.config['INVOICE_DIR'])
except OSError:
pass
from . import db
db.init_app(app)
from . import main
app.register_blueprint(main.bp)
return app
| StarcoderdataPython |
4875128 | <gh_stars>10-100
"""Install script for ``proxpi``."""
import setuptools
setuptools.setup()
| StarcoderdataPython |
9616503 | """
run.py (arxiv_collect)
----------------------
Collect a portion of "raw" arXiv data from the bulk API, and parse to the MySQL database.
"""
import boto3
import logging
import os
from sqlalchemy.orm.exc import NoResultFound
from urllib.parse import urlsplit
from nesta.core.orms.orm_utils import get_mysql_engine, try_until_allowed, insert_data, db_session
from nesta.core.orms.arxiv_orm import Base, Article, ArticleCategory, Category
from nesta.packages.arxiv.collect_arxiv import request_token, load_arxiv_categories, retrieve_arxiv_batch_rows
from nesta.core.luigihacks.s3 import parse_s3_path
def run():
db_name = os.environ["BATCHPAR_db_name"]
s3_path = os.environ["BATCHPAR_outinfo"]
start_cursor = int(os.environ["BATCHPAR_start_cursor"])
end_cursor = int(os.environ["BATCHPAR_end_cursor"])
batch_size = end_cursor - start_cursor
logging.warning(f"Retrieving {batch_size} articles between {start_cursor - 1}:{end_cursor - 1}")
# Setup the database connectors
engine = get_mysql_engine("BATCHPAR_config", "mysqldb", db_name)
try_until_allowed(Base.metadata.create_all, engine)
# load arxiv subject categories to database
bucket = 'innovation-mapping-general'
cat_file = 'arxiv_classification/arxiv_subject_classifications.csv'
load_arxiv_categories("BATCHPAR_config", db_name, bucket, cat_file)
# process data
articles = []
article_cats = []
resumption_token = request_token()
for row in retrieve_arxiv_batch_rows(start_cursor, end_cursor, resumption_token):
with db_session(engine) as session:
categories = row.pop('categories', [])
articles.append(row)
for cat in categories:
# TODO:this is inefficient and should be queried once to a set. see
# iterative proceess.
try:
session.query(Category).filter(Category.id == cat).one()
except NoResultFound:
logging.warning(f"missing category: '{cat}' for article {row['id']}. Adding to Category table")
session.add(Category(id=cat))
article_cats.append(dict(article_id=row['id'], category_id=cat))
inserted_articles, existing_articles, failed_articles = insert_data(
"BATCHPAR_config", "mysqldb", db_name,
Base, Article, articles,
return_non_inserted=True)
logging.warning(f"total article categories: {len(article_cats)}")
inserted_article_cats, existing_article_cats, failed_article_cats = insert_data(
"BATCHPAR_config", "mysqldb", db_name,
Base, ArticleCategory, article_cats,
return_non_inserted=True)
# sanity checks before the batch is marked as done
logging.warning((f'inserted articles: {len(inserted_articles)} ',
f'existing articles: {len(existing_articles)} ',
f'failed articles: {len(failed_articles)}'))
logging.warning((f'inserted article categories: {len(inserted_article_cats)} ',
f'existing article categories: {len(existing_article_cats)} ',
f'failed article categories: {len(failed_article_cats)}'))
if len(inserted_articles) + len(existing_articles) + len(failed_articles) != batch_size:
raise ValueError(f'Inserted articles do not match original data.')
if len(inserted_article_cats) + len(existing_article_cats) + len(failed_article_cats) != len(article_cats):
raise ValueError(f'Inserted article categories do not match original data.')
# Mark the task as done
s3 = boto3.resource('s3')
s3_obj = s3.Object(*parse_s3_path(s3_path))
s3_obj.put(Body="")
if __name__ == "__main__":
log_stream_handler = logging.StreamHandler()
logging.basicConfig(handlers=[log_stream_handler, ],
level=logging.INFO,
format="%(asctime)s:%(levelname)s:%(message)s")
run()
| StarcoderdataPython |
5041495 | # Generated by Django 3.1.13 on 2021-08-03 14:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nomenclatoare', '0010_historicaltiparcbolta_tiparcbolta'),
('biserici', '0038_finisajaltar_finisajnaos_finisajportic_finisajpronaos_historicalfinisajaltar_historicalfinisajnaos_h'),
]
operations = [
migrations.RemoveField(
model_name='historicalcomponentaartistica',
name='iconostas_naos_altar_materiale',
),
migrations.AlterField(
model_name='componentaartistica',
name='alte_icoane_vechi',
field=models.BooleanField(default=False, verbose_name='Alte icoane vechi'),
),
migrations.AlterField(
model_name='componentaartistica',
name='alte_icoane_vechi_detalii',
field=models.TextField(blank=True, help_text='Alte icoane vechi observații', null=True),
),
migrations.AlterField(
model_name='componentaartistica',
name='elemente_detalii',
field=models.TextField(blank=True, null=True, verbose_name='Elemente sculptate / decoruri în biserică observații'),
),
migrations.RemoveField(
model_name='componentaartistica',
name='iconostas_naos_altar_materiale',
),
migrations.AddField(
model_name='componentaartistica',
name='iconostas_naos_altar_materiale',
field=models.ManyToManyField(blank=True, related_name='iconostasuri_naos_altar', to='nomenclatoare.Material', verbose_name='Material'),
),
migrations.AlterField(
model_name='historicalcomponentaartistica',
name='alte_icoane_vechi',
field=models.BooleanField(default=False, verbose_name='Alte icoane vechi'),
),
migrations.AlterField(
model_name='historicalcomponentaartistica',
name='alte_icoane_vechi_detalii',
field=models.TextField(blank=True, help_text='Alte icoane vechi observații', null=True),
),
migrations.AlterField(
model_name='historicalcomponentaartistica',
name='elemente_detalii',
field=models.TextField(blank=True, null=True, verbose_name='Elemente sculptate / decoruri în biserică observații'),
),
]
| StarcoderdataPython |
295533 | import json
import argparse
import numpy as np
import os
import ray
import train
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", "-g", type=str, help="number of gpus")
args, _ = parser.parse_known_args()
if int(args.gpu) > 4:
ray.init(address='auto', _node_ip_address=os.environ["ip_head"].split(":")[0],
_redis_password=os.environ["redis_password"])
else:
ray.init(address='auto', _redis_password='<PASSWORD>')
# number of experiments to train
for i in range(24):
print('experiment ' + str(i))
with open('../configs/'+str(args.gpu)+'gpu/config.json', 'r+') as f:
config = json.load(f)
config['lr']= config['lr'] + np.random.uniform(0, 0.00000009)
f.seek(0)
json.dump(config, f)
f.truncate()
train.main(args.gpu)
ray.shutdown()
| StarcoderdataPython |
27495 | import logging, traceback, time
from bottle import request
from snuggle import configuration
from snuggle import mediawiki
from snuggle import errors
from snuggle.data import types
from snuggle.web.util import responses, user_data
logger = logging.getLogger("snuggle.web.processing.users")
class Events:
def __init__(self, model):
self.model = model
def action(self, session, doc):
request = types.ActionRequest.serialize(doc)
def query(self, session, query):
"""
Queries for PUBLIC events and public event content only.
"""
try:
start = time.time()
event_docs = []
for event in self.model.events.query(**query):
if event.PUBLIC:
doc = event.serialize()
doc['id'] = None
event_docs.append(doc)
end = time.time()
except Exception:
logger.error(traceback.format_exc())
return responses.database_error("getting a set of events with query %s" % query)
query['after'] = max(
query.get('after', 0),
time.time() - configuration.snuggle['changes_synchronizer']['max_age']
)
try:
snuggler, data = user_data()
event = types.EventsQueried(
query,
end-start,
len(event_docs),
snuggler,
data
)
self.model.events.insert(event)
except Exception as e:
logger.error(traceback.format_exc())
return responses.success(event_docs)
| StarcoderdataPython |
290882 | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pickle
from models.GSEM import GSEM
## definition of paths
negative_multipliers = [1, 5, 10, 15, 20, 30, 50, 100]
dataset_base_path = './data/'
result_path = './results/'
project_name = 'msb201126'
dataset_prefix = dataset_base_path
disease_path = dataset_prefix + '{0}_disease_similarities.pickle'.format(project_name)
split_prefix = dataset_base_path + '{0}_splits/'.format(project_name)
split_paths = [split_prefix + 'ratio_{0}/splits.pickle'.format(negative_multiplier) for negative_multiplier in negative_multipliers]
indication_path = dataset_prefix + '{0}_indications.pickle'.format(project_name)
## utils
def load(path):
'''
Loads any pickle file from disk.
'''
with open(path, 'rb') as handle:
loaded = pickle.load(handle)
return loaded
def threshold_similarities(matrix, percentile=75, threshold=None, remove_selfloops=False):
'''
Thresholds links in similarity matrix.
All those with a strength lower than the one computed
for the passed percentile will be cleaved off.
As an alternative you can directly pass a score threshold.
'''
if remove_selfloops:
matrix = matrix.copy()
matrix[np.diag_indices(len(matrix))] = 0
data = matrix.ravel()
if threshold is None:
threshold = np.percentile(data, percentile)
indexer = np.where(matrix < threshold)
new_matrix = matrix.copy()
new_matrix[indexer] = 0.0
return new_matrix
## load data
disease_similarities = load(disease_path)
indications = load(indication_path)
## pick similarity
disease_target = 'phenotype'
## fitting routine
def cross_validate(splits, data, regularizers, fitting_params):
num_folds = len(splits)
I, F_col = data
tol, b, maxiter, mask_learning_sets = fitting_params
aupr_scores = list()
for i in range(num_folds):
indexers = splits[i]
model = GSEM(
I,
indexers,
regularizers,
F_col,
complete_output_smoothing=False)
fitted_model = model.fit(tol, maxiter, mask_learning_sets, verbose=False)
aupr_scores.append(model.aupr(mask_learning_sets))
print('... split {0} completed. ({1:.2f} AUPR)'.format(i, aupr_scores[-1]['test']))
return aupr_scores
## plotting routine
def plot_results(scores_dict, xlabel, ylabel, path):
codes = sorted(scores_dict.keys())
scores = [scores_dict[code] for code in codes]
x = np.arange(len(scores))
means = [score.mean() for score in scores]
plt.figure(dpi=300)
for c, code in enumerate(codes):
score_array = scores[c]
n = len(score_array)
mean = score_array.mean()
std = score_array.std()
plt.errorbar([c], [mean], yerr=[std], ecolor='black', elinewidth=0.75)
plt.scatter([c]*n, score_array, s=8, facecolors='none', edgecolors='black', linewidths=0.2)
plt.fill_between([c], [score_array.min()], [score_array.max()], lw=0.3, color='grey')
plt.plot(x, means, '-', linewidth=0.75, color='black')
plt.xticks(x, codes, fontsize=8)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig(path, format='png')
plt.close()
## fitting parameters
tol = 1e-3
b = 1e-2
maxiter = int(3e3)
mask_learning_sets = {'test'}
fitting_params = (tol, b, maxiter, mask_learning_sets)
## model parameters and data
regularizers = {
'alpha': 1.0,
'beta': 0.1,
'lamda': 0.0,
'gamma': 1e4}
tau = 0.25
disease_weight_matrix = threshold_similarities(disease_similarities[disease_target], threshold=tau, remove_selfloops=False)
data = (indications, disease_weight_matrix)
## learning
print('Performing 10-folds cross-validation across different settings...')
print('\n-----------------------------------------------------------------\n')
scores = dict()
for s, split_path in enumerate(split_paths):
print('\n=======------- ratio {0}:1 -------=======\n\n'.format(negative_multipliers[s]))
splits = load(split_path)
aupr_scores = cross_validate(splits, data, regularizers, fitting_params)
test_scores = np.asarray([score['test'] for score in aupr_scores])
scores[negative_multipliers[s]] = test_scores
print('\n\t mean TEST AUPR: {0}\n\n'.format(test_scores.mean()))
## save results
plot_results(scores, 'negative multiplier', 'AUPR scores', result_path+'plot.png')
with open(result_path+'scores.pickle', 'wb') as handle:
pickle.dump(scores, handle)
print('------------------------------------\n')
print('Results saved at folder {0} .'.format(result_path))
| StarcoderdataPython |
215861 | from time import sleep
import pytest
import requests
import utils
import client_simulator_api_tests as cs
from config import BASE_URL
@pytest.fixture(autouse=True)
def run_before_each_test():
utils.remove_everything_from_db()
# ^ before each test
yield
# v after each test
@pytest.fixture(scope='module', autouse=True)
def after_all():
# ^ Will be executed before the first test
yield
# v Will be executed after the last test
utils.remove_everything_from_db()
@pytest.mark.parametrize("customer_categories", [["gov", "public"], ["corporate", "sme"]])
def test_set_process(customer_categories):
""" Test if setting of new process with variants works """
utils.post_processes_a_b("helicopter_license", "./resources/bpmn/helicopter/helicopter_vA.bpmn",
"./resources/bpmn/helicopter/helicopter_vB.bpmn",
customer_categories=customer_categories, default_version='a',
path_history="./resources/bpmn/helicopter/helicopter_vA_100.json")
assert utils.get_process_count() == 1
def test_set_2_processes():
""" Test if setting of new process with variants works """
utils.post_processes_a_b("helicopter_license", "./resources/bpmn/helicopter/helicopter_vA.bpmn",
"./resources/bpmn/helicopter/helicopter_vB.bpmn",
customer_categories=["public", "gov"], default_version='a',
path_history="./resources/bpmn/helicopter/helicopter_vA_100.json")
utils.post_processes_a_b("fast",
"./resources/bpmn/fast_a_better/fast_a_better_vA.bpmn",
"./resources/bpmn/fast_a_better/fast_a_better_vB.bpmn",
customer_categories=["public", "gov"], default_version='b',
path_history="./resources/bpmn/fast_a_better/fast_a_better_vA_100.json")
assert utils.get_process_count() == 2
def test_get_active_process_metadata():
""" Test if receiving of metadata about currently active process works """
customer_category_groups = ["gov-public", "corporate-sme"]
for customer_categories in customer_category_groups:
# given
utils.post_processes_a_b("helicopter_license", "./resources/bpmn/helicopter/helicopter_vA.bpmn",
"./resources/bpmn/helicopter/helicopter_vB.bpmn",
customer_categories=customer_categories.split('-'),
default_version='a',
path_history="./resources/bpmn/helicopter/helicopter_vA_100.json")
# when
response = requests.get(BASE_URL + "/process/active/meta")
# then
response_json = response.json()
assert response.status_code == requests.codes.ok
assert response_json.get("name") == "helicopter_license"
assert response_json.get('default_version') == 'a'
assert response_json.get('id') is not None
assert response_json.get('customer_categories') == customer_categories # should be alphabetical
assert response_json.get('default_interarrival_time_history') == 64.18521
assert response_json.get('experiment_state') == "Running, before first batch policy has been set"
assert response_json.get('datetime_added') is not None
assert response_json.get('datetime_decided') is None
assert response_json.get('number_batch_policies') == 0
assert response_json.get('number_instances') == 0
assert response_json.get("winning_versions") is None
assert response_json.get("winning_reason") is None
def test_get_active_process_variants_files():
""" Test of retrieval of active bpmn files works """
# given
test_set_2_processes()
for version in ["a", "b"]:
# given
response_given = requests.get(BASE_URL + "/process/active/meta")
response_given_json = response_given.json()
assert response_given.status_code == requests.codes.ok
assert response_given_json.get("name") == "fast"
# when
param = {"id": response_given_json.get("id")}
response = requests.get(BASE_URL + "/process/variant-file/" + version, params=param)
# then
assert response.headers['Content-Disposition'].split(";")[0] == "attachment"
assert response.headers['Content-Disposition'].split(";")[1].split(".")[1] == "bpmn"
def test_files_are_overwritten():
"""
When a process with the same name is posted, the old one should be replaced
in the filesystem as well as in the db.
"""
# given
utils.post_processes_a_b("helicopter_license", "./resources/bpmn/helicopter/helicopter_vA.bpmn",
"./resources/bpmn/helicopter/helicopter_vB.bpmn",
customer_categories=["public", "gov"], default_version='b',
path_history="./resources/bpmn/helicopter/helicopter_vA_100.json")
# when
utils.post_processes_a_b("helicopter_license", "./resources/bpmn/helicopter/helicopter_vA.bpmn",
"./resources/bpmn/helicopter/helicopter_vB.bpmn",
customer_categories=["public", "gov"], default_version='b',
path_history="./resources/bpmn/helicopter/helicopter_vA_100.json")
# then
assert utils.get_process_count() == 1
def test_no_process_active_meta_():
""" When there is no active process/experiment, the active/meta endpoint should return 404 """
response = requests.get(BASE_URL + "/process/active/meta")
assert response.status_code == requests.codes.not_found
def test_experiment_state_manual_decision():
utils.post_processes_a_b("fast", "./resources/bpmn/fast_a_better/fast_a_better_vA.bpmn",
"./resources/bpmn/fast_a_better/fast_a_better_vB.bpmn",
customer_categories=["public", "gov"], default_version='a',
path_history="./resources/bpmn/fast_a_better/fast_a_better_vA_100.json")
utils.post_bapol_currently_active_process(utils.example_batch_policy)
currently_active_p_id = utils.get_currently_active_process_id()
cs.start_client_simulation(5, 1)
assert utils.get_sum_of_started_instances_in_batch(currently_active_p_id) == 5
utils.post_manual_decision('a')
exp_state = utils.get_currently_active_process_meta().get('experiment_state')
assert 'Manual' in exp_state and 'Done' in exp_state
@pytest.mark.parametrize("customer_categories", [["gov", "public"], ["corporate", "sme"]])
def test_experiment_state_cool_off(customer_categories):
utils.post_processes_a_b("fast", "./resources/bpmn/fast_a_better/fast_a_better_vA.bpmn",
"./resources/bpmn/fast_a_better/fast_a_better_vB.bpmn",
customer_categories=customer_categories, default_version='a',
path_history="./resources/bpmn/fast_a_better/fast_a_better_vA_100.json")
utils.post_bapol_currently_active_process(utils.example_batch_policy_size(5, customer_categories))
cs.start_client_simulation(5, 1)
response_post_cool_off = requests.post(BASE_URL + "/process/active/cool-off")
assert response_post_cool_off.status_code == requests.codes.ok
assert 'Cool-Off' in response_post_cool_off.json().get('experimentState')
exp_state = utils.get_currently_active_process_meta().get('experiment_state')
assert 'Cool-Off' in exp_state
def test_cool_off_only_after_batch_finished():
utils.post_processes_a_b("fast", "./resources/bpmn/fast_a_better/fast_a_better_vA.bpmn",
"./resources/bpmn/fast_a_better/fast_a_better_vB.bpmn",
customer_categories=["public", "gov"], default_version='a',
path_history="./resources/bpmn/fast_a_better/fast_a_better_vA_100.json")
# not a batch yet
response_post_cool_off = requests.post(BASE_URL + "/process/active/cool-off")
assert response_post_cool_off.status_code == requests.codes.not_found
# not yet finish a batch
utils.post_bapol_currently_active_process(utils.example_batch_policy_size(5, ["gov", "public"]))
cs.start_client_simulation(3, 1)
response_post_cool_off = requests.post(BASE_URL + "/process/active/cool-off")
assert response_post_cool_off.status_code == requests.codes.not_found
cs.start_client_simulation(2, 1)
# finish batch
response_post_cool_off = requests.post(BASE_URL + "/process/active/cool-off")
assert response_post_cool_off.status_code == requests.codes.ok
@pytest.mark.parametrize("customer_categories", [["gov", "public"], ["corporate", "sme"]])
def test_cool_off_period(customer_categories):
utils.post_processes_a_b("fast", "./resources/bpmn/fast_a_better/fast_a_better_vA.bpmn",
"./resources/bpmn/fast_a_better/fast_a_better_vB.bpmn",
customer_categories=customer_categories, default_version='a',
path_history="./resources/bpmn/fast_a_better/fast_a_better_vA_100.json")
utils.post_bapol_currently_active_process(utils.example_batch_policy_size(5, customer_categories))
cs.start_client_simulation(5, 1)
response_post_cool_off = requests.post(BASE_URL + "/process/active/cool-off")
assert response_post_cool_off.status_code == requests.codes.ok
# checking whether cool off is done (should not be the case since the
# last instance of the first batch is not evaluated
final_prop_response = requests.get(BASE_URL + "/batch-policy-proposal/final",
params={'process-id': utils.get_currently_active_process_id()})
assert final_prop_response.status_code == requests.codes.not_found
decision_json = {
"decision": [
{
"customer_category": customer_categories[0],
"winning_version": "b"
},
{
"customer_category": customer_categories[1],
"winning_version": "a"
}
]
}
assert requests.post(BASE_URL + "/process/active/winning",
json=decision_json).status_code == requests.codes.not_found
# make sure that meta is in cool-off
meta = utils.get_currently_active_process_meta()
assert "In Cool-Off" == meta.get('experiment_state')
# start some more instances to trigger collection and learning with last open instances
cs.start_client_simulation(20, 0.5)
# cool off should be done now, check for final bapol proposal
meta = utils.get_currently_active_process_meta()
assert "Cool-Off over, waiting for final decision" == meta.get('experiment_state')
final_prop_response = requests.get(BASE_URL + "/batch-policy-proposal/final",
params={'process-id': utils.get_currently_active_process_id()})
assert final_prop_response.status_code == requests.codes.ok
# winning version should be able to be set
set_winning_response = requests.post(BASE_URL + "/process/active/winning", json=decision_json)
assert set_winning_response.status_code == requests.codes.ok
assert "Done" in set_winning_response.json().get('experiment_state') \
and "ended normally" in set_winning_response.json().get('experiment_state')
# check whether winning version and experiment state are correct in metadata
meta = utils.get_currently_active_process_meta()
assert "Done" in meta.get('experiment_state') and "ended normally" in meta.get('experiment_state')
assert meta.get('winning_versions') == [
{
"customer_category": customer_categories[0],
"winning_version": "b"
},
{
"customer_category": customer_categories[1],
"winning_version": "a"
}
]
def test_cool_off_period_already_all_evaluated():
""" Test whether entering cool-off after exp inst have already been evaluated works """
utils.post_processes_a_b("fast", "./resources/bpmn/fast_a_better/fast_a_better_vA.bpmn",
"./resources/bpmn/fast_a_better/fast_a_better_vB.bpmn",
customer_categories=["public", "gov"], default_version='a',
path_history="./resources/bpmn/fast_a_better/fast_a_better_vA_100.json")
utils.post_bapol_currently_active_process(utils.example_batch_policy_size(5, ["gov", "public"]))
process_id_active = utils.get_currently_active_process_id()
cs.start_client_simulation(5, 1)
sleep(20)
response_manual_trigger = requests.post(BASE_URL + "/process/active/trigger-fetch-learn")
assert response_manual_trigger.status_code == requests.codes.ok
response_progress = requests.get(BASE_URL + "/instance-router/aggregate-data/evaluation-progress",
params={"process-id": process_id_active})
assert response_progress.status_code == requests.codes.ok
assert response_progress.json().get("alreadyEvaluatedPerc") == 1.0
response_post_cool_off = requests.post(BASE_URL + "/process/active/cool-off")
assert response_post_cool_off.status_code == requests.codes.ok
meta = utils.get_currently_active_process_meta()
assert "Cool-Off over, waiting for final decision" == meta.get('experiment_state')
final_prop_response = requests.get(BASE_URL + "/batch-policy-proposal/final",
params={'process-id': utils.get_currently_active_process_id()})
assert final_prop_response.status_code == requests.codes.ok
# winning version should be able to be set
decision_json = {
"decision": [
{
"customer_category": "public",
"winning_version": "a"
},
{
"customer_category": "gov",
"winning_version": "b"
}
]
}
set_winning_response = requests.post(BASE_URL + "/process/active/winning", json=decision_json)
assert set_winning_response.status_code == requests.codes.ok
assert "Done" in set_winning_response.json().get('experiment_state') \
and "ended normally" in set_winning_response.json().get('experiment_state')
| StarcoderdataPython |
3532281 | # Copyright 2014 <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Printer(object):
def __init__(self, print_, should_overwrite, cols=80):
self.print_ = print_
self.should_overwrite = should_overwrite
self.cols = cols
self.last_line = ''
def flush(self):
if self.last_line:
self.print_('')
self.last_line = ''
def update(self, msg, elide=True):
if elide and len(msg) > self.cols - 5:
msg = msg[:self.cols - 5] + ' ...'
if self.should_overwrite and self.last_line:
self.print_('\r' + ' ' * len(self.last_line) + '\r', end='')
elif self.last_line:
self.print_('')
self.print_(msg, end='')
last_nl = msg.rfind('\n')
self.last_line = msg[last_nl + 1:]
| StarcoderdataPython |
1709264 | <filename>api/esprr_api/tests/test_compute.py
import pandas as pd
import numpy as np
import pytest
from esprr_api import compute, models, settings
from esprr_api.data import nsrdb
@pytest.mark.parametrize(
"args,kwargs",
(
((), {}),
((0,), {}),
((9,), {"a": "b"}),
((), {"pressure": 883232}),
((), {"temperature": pd.Series([0, 1, 2])}),
),
)
def test_cachedlocation(mocker, args, kwargs):
solpos = mocker.spy(compute.Location, "get_solarposition")
loc = compute.CachedLocation(32, -110)
times = pd.date_range("2019-01-01T00:00Z", freq="5min", periods=3)
assert solpos.call_count == 0
t1 = loc.get_solarposition(times, *args, **kwargs)
assert solpos.call_count == 1
t2 = loc.get_solarposition(times, *args, **kwargs)
assert solpos.call_count == 1
pd.testing.assert_frame_equal(t1, t2)
loc.get_solarposition(times[:-1])
assert solpos.call_count == 2
loc.get_solarposition(times[:-1])
assert solpos.call_count == 2
loc.get_solarposition(times, *args, **kwargs)
assert solpos.call_count == 3
def test_compute_single_location(system_def, mocker):
solpos = mocker.spy(compute.Location, "get_solarposition")
df = pd.DataFrame(
{
"ghi": [1100, 0],
"dni": [1000, 0],
"dhi": [100, 0],
"temp_air": [25, 25],
"wind_speed": [10, 10],
},
index=pd.DatetimeIndex(
[pd.Timestamp("2021-05-03T19:00Z"), pd.Timestamp("2021-05-04T07:00Z")]
),
)
data = models.SystemData(
location=dict(latitude=32.02, longitude=-110.9, altitude=800),
fraction_of_total=0.2,
weather_data=df,
clearsky_data=df,
)
out = compute.compute_single_location(system_def, data)
assert isinstance(out, pd.DataFrame)
assert len(out) == 2
assert set(out.columns) == {"ac_power", "clearsky_ac_power"}
assert out.ac_power.iloc[0] == 2.0
assert out.ac_power.iloc[1] == 0.0
assert out.clearsky_ac_power.iloc[0] == 2.0
assert out.clearsky_ac_power.iloc[1] == 0.0
assert solpos.call_count == 1 # cachelocation working
@pytest.mark.parametrize(
"tracker",
[
models.FixedTracking(tilt=20, azimuth=180),
models.SingleAxisTracking(
axis_tilt=20, axis_azimuth=180, gcr=0.3, backtracking=True
),
],
)
def test_compute_total_system_power(ready_dataset, system_def, mocker, tracker):
single = mocker.spy(compute, "compute_single_location")
system_def.tracking = tracker
out = compute.compute_total_system_power(system_def, ready_dataset)
assert isinstance(out, pd.DataFrame)
assert set(out.columns) == {"ac_power", "clearsky_ac_power"}
assert abs(out.ac_power.max() - 10.0) < 1e-6
assert abs(out.clearsky_ac_power.max() - 10.0) < 1e-6
assert out.ac_power.min() == 0.0
assert single.call_count == 12
def test_daytime_limits():
ind = pd.date_range("2020-01-01T00:00Z", freq="5min", periods=10)
zen = pd.Series([100, 95, 90, 85, 80, 80, 90, 95, 100, 120], index=ind)
exp = pd.Series([0, 0, 1, 1, 1, 1, 1, 1, 0, 0], index=ind).astype(bool)
out = compute._daytime_limits(5, zen)
pd.testing.assert_series_equal(out, exp)
ten = pd.Series(
[0, 1, 1, 1, 1],
index=pd.date_range("2020-01-01T00:00Z", freq="10min", periods=5),
).astype(bool)
tenout = compute._daytime_limits(10, zen)
pd.testing.assert_series_equal(ten, tenout)
pd.testing.assert_series_equal(
pd.Series(list(range(5)) + list(range(5, 0, -1)), index=ind).diff()[exp],
pd.Series(
[1.0] * 4 + [-1.0] * 2, # 90, 95
index=pd.date_range("2020-01-01T00:10Z", freq="5min", periods=6),
),
)
def test_compute_statistics(system_def):
data = pd.DataFrame(
{"ac_power": [10, 11, 12, 11], "clearsky_ac_power": [12, 11, 12, 11]},
index=pd.DatetimeIndex(
[
"2019-04-01T12:00-07:00",
"2019-04-01T13:00-07:00",
"2019-05-01T12:00-07:00",
"2019-05-01T13:00-07:00",
]
),
)
out = compute.compute_statistics(system_def, data)
assert isinstance(out, pd.DataFrame)
assert len(out.columns) == 4
assert len(out) == 5 * 6 * 2
def test_get_dataset(nsrdb_data, dataset_name):
settings.nsrdb_data_path["NSRDB_2019"] = nsrdb_data
ds = compute._get_dataset(dataset_name)
assert isinstance(ds, nsrdb.NSRDBDataset)
ds.grid
def test_run_job_no_data(system_id, dataset_name, auth0_id, mocker, ready_dataset):
mocker.patch("esprr_api.compute._get_dataset", return_value=ready_dataset)
update = mocker.patch("esprr_api.storage.StorageInterface.update_system_model_data")
compute.run_job(system_id, dataset_name, auth0_id)
assert update.call_count == 0
def test_run_job(
system_id, dataset_name, auth0_id, mocker, ready_dataset, add_example_db_data
):
mocker.patch("esprr_api.compute._get_dataset", return_value=ready_dataset)
update = mocker.patch("esprr_api.storage.StorageInterface.update_system_model_data")
compute.run_job(system_id, dataset_name, auth0_id)
assert update.call_count == 1
cargs = update.call_args[0]
assert cargs[0] == system_id
assert cargs[1] == dataset_name
assert cargs[3].startswith(b"ARROW")
assert cargs[4].startswith(b"ARROW")
def test_run_job_badid(
other_system_id, dataset_name, auth0_id, mocker, ready_dataset, add_example_db_data
):
mocker.patch("esprr_api.compute._get_dataset", return_value=ready_dataset)
update = mocker.patch("esprr_api.storage.StorageInterface.update_system_model_data")
compute.run_job(other_system_id, dataset_name, auth0_id)
assert update.call_count == 0
def test_run_job_error(
system_id,
dataset_name,
auth0_id,
mocker,
ready_dataset,
add_example_db_data,
):
update = mocker.patch("esprr_api.storage.StorageInterface.update_system_model_data")
mocker.patch("esprr_api.compute._get_dataset", return_value=ready_dataset)
mocker.patch(
"esprr_api.compute.compute_statistics", side_effect=ValueError("test err")
)
with pytest.raises(ValueError):
compute.run_job(system_id, dataset_name, auth0_id)
assert update.call_count == 1
cargs = update.call_args[0]
assert cargs[0] == system_id
assert cargs[1] == dataset_name
assert cargs[3] is None
assert cargs[4] is None
assert cargs[5] == {"message": "test err"}
variable_mult_df = pd.DataFrame(
{
"ac_power": (
[0] * (12 * 6)
+ [5] * (12 * 6)
+ [6] * (12 * 6)
+ [0] * (12 * 6)
+ [0] * (12 * 6)
+ [12] * (12 * 6)
+ [13] * (12 * 6)
+ [0] * (12 * 6)
+ [0] * (12 * 6)
+ [8] * (12 * 3)
+ [4] * (12 * 3)
+ [10] * (12 * 3)
+ [11] * (12 * 3)
+ [0] * (12 * 6)
+ [0] * (12 * 6)
+ [8] * (12 * 3)
+ [4] * (12 * 3)
+ [13] * (12 * 3)
+ [13] * (12 * 3)
+ [0] * (12 * 6)
+ [0]
),
"clearsky_ac_power": (
[0] * (12 * 6)
+ [12] * (12 * 6)
+ [13] * (12 * 6)
+ [0] * (12 * 6)
+ [0] * (12 * 6)
+ [12] * (12 * 6)
+ [13] * (12 * 6)
+ [0] * (12 * 6)
+ [0] * (12 * 6)
+ [12] * (12 * 6)
+ [13] * (12 * 6)
+ [0] * (12 * 6)
+ [0] * (12 * 6)
+ [12] * (12 * 6)
+ [13] * (12 * 6)
+ [0] * (12 * 6)
+ [0]
),
},
index=pd.date_range("2019-01-01", "2019-01-05", freq="5T", tz="utc"),
)
@pytest.mark.parametrize("data", [variable_mult_df])
def test_calculate_variable_multiplier(data):
m = compute.calculate_variable_multiplier(data)
expected_result = pd.DataFrame(
{"ac_power": [1.0, np.nan, 0.66981919, 0.5, np.nan]}, index=m.index
)
assert isinstance(m, pd.Series)
assert len(m) == (len(data) - 1) / (12 * 24) + 1
assert m.between(0.5, 1).any()
pd.testing.assert_series_equal(m, expected_result["ac_power"])
| StarcoderdataPython |
3227035 | #!/usr/bin/env python3
from .constant_mean import ConstantMean
from .constant_mean_grad import ConstantMeanGrad
from .linear_mean import LinearMean
from .mean import Mean
from .multitask_mean import MultitaskMean
from .zero_mean import ZeroMean
__all__ = ["Mean", "ConstantMean", "ConstantMeanGrad", "LinearMean", "MultitaskMean", "ZeroMean"]
| StarcoderdataPython |
9651294 | <reponame>jkbm/esports<filename>hsapp/misc.py
import requests
import urllib.request
from datetime import datetime
import time
from bs4 import BeautifulSoup
from .models import Match, Tournament, Player, Game, Group, Deck, Deckset
from django.db.models import Q
from django.core.files import File
import json
import re
from xml.etree import ElementTree
def fill_from_text(tpk):
t = Tournament.objects.get(pk=tpk)
players = ['Chakki', 'Nostam', 'Talion', 'AlSkyHigh', 'chessdude123', 'Snail', 'wtybill']
casters = []
for p in players:
obj, created = Player.objects.get_or_create(name=p)
t.players.add(obj)
t.save()
| StarcoderdataPython |
11382284 | <reponame>mattnickerson993/clinnotes2<filename>clinnotes/reflections/models.py<gh_stars>0
from django.db import models
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.conf import settings
from clinnotes.users.models import EpisodeOfCare, Patient
# Create your models here.
class Reflection(models.Model):
CATEGORY_CHOICES = [
('GE', 'General'),
('IM', 'Areas to Improve'),
('SU', 'Success'),
('FA', 'Failure'),
('O', 'Other'),
]
author = models.ForeignKey("users.User", on_delete=models.CASCADE, related_name="reflections")
date_posted = models.DateTimeField (default= timezone.now)
last_edit = models.DateTimeField(auto_now = True)
title = models.CharField(max_length=80)
details = models.TextField()
category = models.CharField(max_length=20, choices= CATEGORY_CHOICES, default='GE')
episode_of_care = models.ForeignKey(EpisodeOfCare, on_delete=models.CASCADE, null=True, blank=True, related_name="reflections")
patient = models.ForeignKey(Patient, on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
return f"{self.title} by {self.author}"
class GuidedReflection(models.Model):
CATEGORY_CHOICES = [
('GE', 'General'),
('IM', 'Areas to Improve'),
('SU', 'Success'),
('FA', 'Failue'),
('O', 'Other'),
]
author = models.ForeignKey("users.User", on_delete=models.CASCADE, related_name="guided_reflections")
date_posted = models.DateTimeField (default= timezone.now)
last_edit = models.DateTimeField(auto_now = True)
title = models.CharField(max_length=80)
question1 = models.TextField()
question2 = models.TextField()
question3 = models.TextField()
category = models.CharField(max_length=20, choices= CATEGORY_CHOICES, default='GE')
episode_of_care = models.ForeignKey(EpisodeOfCare, on_delete=models.CASCADE, null=True, blank=True, related_name="guided_reflections")
patient = models.ForeignKey(Patient, on_delete=models.CASCADE, null=True, blank=True)
| StarcoderdataPython |
11347081 | <filename>model_h.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from modules import CoAttentionMessagePassingNetwork
class DrugDrugInteractionNetworkH(nn.Module):
def __init__(
self,
n_atom_type, n_bond_type,
d_node, d_edge, d_atom_feat, d_hid,
n_prop_step,
n_side_effect=None,
n_lbls = 12,
n_head=1, dropout=0.1,
update_method='res', score_fn='trans'):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.atom_proj = nn.Linear(d_node + d_atom_feat, d_node)
self.atom_emb = nn.Embedding(n_atom_type, d_node, padding_idx=0)
self.bond_emb = nn.Embedding(n_bond_type, d_edge, padding_idx=0)
nn.init.xavier_normal_(self.atom_emb.weight)
nn.init.xavier_normal_(self.bond_emb.weight)
self.side_effect_emb = None
self.side_effect_norm_emb = None
if n_side_effect is not None:
self.side_effect_emb = nn.Embedding(n_side_effect, d_hid)
self.side_effect_norm_emb = nn.Embedding(n_side_effect, d_hid)
nn.init.xavier_normal_(self.side_effect_emb.weight)
nn.init.xavier_normal_(self.side_effect_norm_emb.weight)
self.encoder = CoAttentionMessagePassingNetwork(
d_hid=d_hid, n_head=n_head, n_prop_step=n_prop_step,
update_method=update_method, dropout=dropout)
assert update_method == 'res'
assert score_fn == 'trans'
self.head_proj = nn.Linear(d_hid, d_hid, bias=False)
self.tail_proj = nn.Linear(d_hid, d_hid, bias=False)
nn.init.xavier_normal_(self.head_proj.weight)
nn.init.xavier_normal_(self.tail_proj.weight)
self.lbl_predict = nn.Linear(d_hid, n_lbls)
self.__score_fn = score_fn
@property
def score_fn(self):
return self.__score_fn
def forward(
self,
seg_m1, atom_type1, atom_feat1, bond_type1,
inn_seg_i1, inn_idx_j1, out_seg_i1, out_idx_j1,
seg_m2, atom_type2, atom_feat2, bond_type2,
inn_seg_i2, inn_idx_j2, out_seg_i2, out_idx_j2,
se_idx, drug_se_seg):
atom1 = self.dropout(self.atom_comp(atom_feat1, atom_type1))
atom2 = self.dropout(self.atom_comp(atom_feat2, atom_type2))
bond1 = self.dropout(self.bond_emb(bond_type1))
bond2 = self.dropout(self.bond_emb(bond_type2))
d1_vec, d2_vec = self.encoder(
seg_m1, atom1, bond1, inn_seg_i1, inn_idx_j1, out_seg_i1, out_idx_j1,
seg_m2, atom2, bond2, inn_seg_i2, inn_idx_j2, out_seg_i2, out_idx_j2)
# TODO: what does this do? select pred for specific se?
d1_vec = d1_vec.index_select(0, drug_se_seg)
d2_vec = d2_vec.index_select(0, drug_se_seg)
h_d1_vec = self.head_proj(d1_vec)
h_d2_vec = self.head_proj(d2_vec)
t_d1_vec = self.tail_proj(d1_vec)
t_d2_vec = self.tail_proj(d2_vec)
if self.side_effect_emb is not None:
se_vec = self.dropout(self.side_effect_emb(se_idx))
se_norm = self.dropout(self.side_effect_norm_emb(se_idx))
h_d1_vec = self.transH_proj(h_d1_vec, se_norm)
h_d2_vec = self.transH_proj(h_d2_vec, se_norm)
t_d1_vec = self.transH_proj(t_d1_vec, se_norm)
t_d2_vec = self.transH_proj(t_d2_vec, se_norm)
e_vecs = [se_vec, d1_vec, d2_vec,
h_d1_vec, h_d2_vec, t_d1_vec, t_d2_vec]
fwd_score = self.cal_translation_score(
head=h_d1_vec,
tail=t_d2_vec,
rel=se_vec)
bwd_score = self.cal_translation_score(
head=h_d2_vec,
tail=t_d1_vec,
rel=se_vec)
score = fwd_score + bwd_score
o_loss = self.cal_orthogonal_loss(se_vec, se_norm)
n_loss = sum([self.cal_vec_norm_loss(v) for v in e_vecs])
#return score, o_loss + n_loss
return score, o_loss + n_loss, se_idx, d1_vec, d2_vec
else:
pred1 = self.lbl_predict(d1_vec)
pred2 = self.lbl_predict(d2_vec)
return pred1,pred2, d1_vec, d2_vec
def embed(self, seg_m1, atom_type1, atom_feat1, bond_type1,
inn_seg_i1, inn_idx_j1, out_seg_i1, out_idx_j1,
seg_m2, atom_type2, atom_feat2, bond_type2,
inn_seg_i2, inn_idx_j2, out_seg_i2, out_idx_j2,
se_idx=None, drug_se_seg=None):
atom1 = self.atom_comp(atom_feat1, atom_type1)
atom2 = self.atom_comp(atom_feat2, atom_type2)
bond1 = self.bond_emb(bond_type1)
bond2 = self.bond_emb(bond_type2)
d1_vec, d2_vec = self.encoder(
seg_m1, atom1, bond1, inn_seg_i1, inn_idx_j1, out_seg_i1, out_idx_j1,
seg_m2, atom2, bond2, inn_seg_i2, inn_idx_j2, out_seg_i2, out_idx_j2)
d1_vec = d1_vec.index_select(0, drug_se_seg)
d2_vec = d2_vec.index_select(0, drug_se_seg)
return d1_vec, d2_vec
def transH_proj(self, original, norm):
return original - torch.sum(original * norm, dim=1, keepdim=True) * norm
def atom_comp(self, atom_feat, atom_idx):
atom_emb = self.atom_emb(atom_idx)
node = self.atom_proj(torch.cat([atom_emb, atom_feat], -1))
return node
def cal_translation_score(self, head, tail, rel):
return torch.norm(head + rel - tail, dim=1)
def cal_vec_norm_loss(self, vec, dim=1):
norm = torch.norm(vec, dim=dim)
return torch.mean(F.relu(norm - 1))
def cal_orthogonal_loss(self, rel_emb, norm_emb):
a = torch.sum(norm_emb * rel_emb, dim=1, keepdim=True) ** 2
b = torch.sum(rel_emb ** 2, dim=1, keepdim=True) + 1e-6
return torch.sum(a / b)
| StarcoderdataPython |
3219042 | <reponame>trenton/aws-sam-cli<gh_stars>1-10
"""
Init module to scaffold a project app from a template
"""
import itertools
import logging
from cookiecutter.exceptions import CookiecutterException
from cookiecutter.main import cookiecutter
from samcli.local.common.runtime_template import RUNTIME_DEP_TEMPLATE_MAPPING
from samcli.local.init.exceptions import GenerateProjectFailedError
LOG = logging.getLogger(__name__)
def generate_project(
location=None, runtime="nodejs10.x", dependency_manager=None, output_dir=".", name="sam-sample-app", no_input=False
):
"""Generates project using cookiecutter and options given
Generate project scaffolds a project using default templates if user
doesn't provide one via location parameter. Default templates are
automatically chosen depending on runtime given by the user.
Parameters
----------
location: Path, optional
Git, HTTP, Local path or Zip containing cookiecutter template
(the default is None, which means no custom template)
runtime: str, optional
Lambda Runtime (the default is "nodejs", which creates a nodejs project)
dependency_manager: str, optional
Dependency Manager for the Lambda Runtime Project(the default is "npm" for a "nodejs" Lambda runtime)
output_dir: str, optional
Output directory where project should be generated
(the default is ".", which implies current folder)
name: str, optional
Name of the project
(the default is "sam-sample-app", which implies a project named sam-sample-app will be created)
no_input : bool, optional
Whether to prompt for input or to accept default values
(the default is False, which prompts the user for values it doesn't know for baking)
Raises
------
GenerateProjectFailedError
If the process of baking a project fails
"""
template = None
for mapping in list(itertools.chain(*(RUNTIME_DEP_TEMPLATE_MAPPING.values()))):
if runtime in mapping["runtimes"] or any([r.startswith(runtime) for r in mapping["runtimes"]]):
if not dependency_manager or dependency_manager == mapping["dependency_manager"]:
template = mapping["init_location"]
break
if not template:
msg = "Lambda Runtime {} does not support dependency manager: {}".format(runtime, dependency_manager)
raise GenerateProjectFailedError(project=name, provider_error=msg)
params = {"template": location if location else template, "output_dir": output_dir, "no_input": no_input}
LOG.debug("Parameters dict created with input given")
LOG.debug("%s", params)
if not location and name is not None:
params["extra_context"] = {"project_name": name, "runtime": runtime}
params["no_input"] = True
LOG.debug("Parameters dict updated with project name as extra_context")
LOG.debug("%s", params)
try:
LOG.debug("Baking a new template with cookiecutter with all parameters")
cookiecutter(**params)
except CookiecutterException as e:
raise GenerateProjectFailedError(project=name, provider_error=e)
| StarcoderdataPython |
8020119 | import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
from ECAgent.Core import Model
# Can be used to customize CSS of Visualizer
external_stylesheets = ['https://rawgit.com/BrandonGower-Winter/ABMECS/master/Assets/VisualizerCustom.css',
'https://rawgit.com/BrandonGower-Winter/ABMECS/master/Assets/VisualizerBase.css']
class VisualInterface:
"""
Ths is the base class for Visual Interfaces.
VisualInterface's utilize the dash package to create a WebApp to allow individuals to view the results of their
model once a run has been completed or in real-time.
There are a few things to note about the VisualInterface class:
* By calling the VisualInterface.__init__() method, your WebApp will have features setup for you: Namely, play,
stop, restart and step. It'll also include a banner with your System's name as a title on it.
* A frameFreq of 0.0 means that your system is static and will only ever be constructed once.
If you want a dynamic WebApp, you must set the frameFreq to some non-zero positive number. If your frameFreq is 0.0,
the play, stop, restart and step buttons will not be added to your WebApp.
* The server/WebApp will start once you call the VisualInterface.app.run_server().
* The frameFreq property determines how frequently (in milliseconds) the SystemManager.executeSystems() method is
called and how often your your graphs will update.
"""
def __init__(self, name, model: Model, frameFreq: float = 0.0):
self.name = name
self.model = model
self.frameFreq = frameFreq
self.running = False # Is used to determine whether a dynamic model is running or not.
# Create app
self.app = dash.Dash(
self.name, meta_tags=[{"name": "viewport", "content": "width=device-width"}],
external_stylesheets=external_stylesheets
)
# Create parameter lists
self.displays = []
self.parameters = []
self.createBaseLayout()
def isStatic(self) -> bool:
return self.frameFreq == 0.0
def execute(self):
self.render()
def render(self):
pass
def createBaseLayout(self):
"""Creates the base layout"""
# Create banner
banner = html.Div(
className="app-banner row",
children=[
html.H2(className="h2-title", children=self.name),
html.H2(className="h2-title-mobile", children=self.name),
],
)
# Add parameter header
self.addParameter(createLabel('parameter-heading', 'Parameters:'))
# If framerate > 0, create the play, stop, and restart buttons and Timestep label
if not self.isStatic():
# Add Play/Restart/Step Buttons
banner.children.append(
html.Div(
className='div-play-buttons',
id='dynamic-button',
children=[
html.Button("Play", id='play-stop-button', n_clicks=0),
html.Button('Restart', id='restart-button', n_clicks=0),
html.Button('Step', id='step-button', n_clicks=0),
dcc.Interval(
id='interval-component',
interval=self.frameFreq,
n_intervals=0
)
]
)
)
# Add Timestep label
self.parameters.append(createLabel('timestep-label', 'Timestep: 0'))
# Apply Play/Stop Callback
self.app.callback(
dash.dependencies.Output('play-stop-button', 'children'),
[dash.dependencies.Input('play-stop-button', 'n_clicks')]
)(self.play_button_callback)
# Apply executeSystems() on interval callback and Step button callback
self.app.callback(
dash.dependencies.Output('timestep-label', 'children'),
[dash.dependencies.Input('interval-component', 'n_intervals'),
dash.dependencies.Input('step-button', 'n_clicks')]
)(self.execute_system_on_play_callback)
self.app.layout = html.Div(
children=[
# Error Message
html.Div(id="error-message"),
# Top Banner
banner,
# Body of the App
html.Div(
className="row app-body",
children=[
# User Controls
html.Div(
className="four columns card",
children=html.Div(
className="bg-white user-control",
children=self.parameters)
),
# Graph
html.Div(
className="eight columns card-left",
children=self.displays,
style={'margin-left': 0}
),
dcc.Store(id="error", storage_type="memory"),
],
),
]
)
def addDisplay(self, content, add_break=True):
self.displays.append(content)
if add_break:
self.displays.append(html.Br())
def addParameter(self, content):
self.parameters.append(content)
# #################################### Class Callbacks ###########################################
def play_button_callback(self, n_clicks):
if n_clicks % 2 == 0:
self.running = False
return 'Play'
else:
self.running = True
return 'Stop'
def execute_system_on_play_callback(self, n_intervals, n_clicks):
context = dash.callback_context.triggered[0]['prop_id'].split('.')[0]
if context == 'step-button':
if not self.running:
self.model.systemManager.executeSystems()
elif self.running:
self.model.systemManager.executeSystems()
return "Timestep: {}".format(self.model.systemManager.timestep)
# ############################## Graph and Parameter Functionality ##############################
def createScatterPlot(title, data: [[[float], [float], dict]], layout_kwargs: dict = {}):
"""Creates a Scatter plot Figure. This function supports multiple traces supplied to the 'data' parameter
Data should be supplied in the following format:
[[xdata_1,ydata_1, fig_layout_1], [xdata_2, ydata_2, fig_layout_2], ..., [xdata_n,ydata_n, fig_layout_n]]
The 'fig_layout' property is optional. If it is supplied, the trace in question will be updated to include all of
the properties specified..
"""
traces = []
for data_packet in data:
scatter = go.Scatter(x=data_packet[0], y=data_packet[1])
traces.append(scatter)
if len(data_packet) > 2:
scatter.update(data_packet[2])
return go.Figure(data=traces, layout=go.Layout(title=title, **layout_kwargs))
def createScatterGLPlot(title, data: [[[float], [float], dict]], layout_kwargs: dict = {}):
"""Creates a Scatter plot Figure that will be rendered using WebGL.
This function supports multiple traces supplied to the 'data' parameter Data should be supplied in the
following format:
[[xdata_1,ydata_1, fig_layout_1], [xdata_2, ydata_2, fig_layout_2], ..., [xdata_n,ydata_n, fig_layout_n]]
The 'fig_layout' property is optional. If it is supplied, the trace in question will be updated to include all of
the properties specified..
"""
traces = []
for data_packet in data:
scatter = go.Scattergl(x=data_packet[0], y=data_packet[1])
traces.append(scatter)
if len(data_packet) > 2:
scatter.update(data_packet[2])
return go.Figure(data=traces, layout=go.Layout(title=title, **layout_kwargs))
def createBarGraph(title: str, data: [[[float], [float], dict]], layout_kwargs: dict = {}):
"""Creates a Bar Graph Figure. This function supports multiple traces supplied to the 'data' parameter
Data should be supplied in the following format:
[[xdata_1,ydata_1, fig_layout_1], [xdata_2, ydata_2, fig_layout_2], ..., [xdata_n,ydata_n, fig_layout_n]]
The 'fig_layout' property is optional. If it is supplied, the trace in question will be updated to include all of
the properties specified..
"""
traces = []
for data_packet in data:
bar = go.Bar(x=data_packet[0], y=data_packet[1])
traces.append(bar)
if len(data_packet) > 2:
bar.update(data_packet[2])
return go.Figure(data=traces, layout=go.Layout(title=title, **layout_kwargs))
def createHeatMap(title: str, data: [[float]], heatmap_kwargs: dict = {}, layout_kwargs: dict = {}):
"""Creates a HeatMap Figure object using Plotly graph objects. The data object determines the dimensions of the
heatmap. The len(data) will be the height. The len(data[i]) will be the width of the heatmap. The Heatmap is
constructed in a bottom-up and left-to-right manner.
Discrete X and Y categories can be specified, this is done by supplying xData and yData with the X and Y category
name respectively. The len(xData) must be equal to the width of your Heatmap, while len(yData) must be equal to the
height of your Heatmap.
A custom color scale can be supplied, ensure that it follows the correct format and that the threshold values are
normalized and that the color scales are in rgb like so 'rgb(r_val, g_val, b_val)'"""
return go.Figure(data=go.Heatmap(
z=data,
**heatmap_kwargs
), layout=go.Layout(title=title, **layout_kwargs))
def createHeatMapGL(title: str, data: [[float]], heatmap_kwargs: dict = {}, layout_kwargs: dict = {}):
"""Creates a HeatMap Figure object using Plotly graph objects that will be rendered by WebGL.
The data object determines the dimensions of the heatmap. The len(data) will be the height.
The len(data[i]) will be the width of the heatmap.
The Heatmap is constructed in a bottom-up and left-to-right manner.
Discrete X and Y categories can be specified, this is done by supplying xData and yData with the X and Y category
name respectively. The len(xData) must be equal to the width of your Heatmap, while len(yData) must be equal to the
height of your Heatmap.
A custom color scale can be supplied, ensure that it follows the correct format and that the threshold values are
normalized and that the color scales are in rgb like so 'rgb(r_val, g_val, b_val)'"""
return go.Figure(data=go.Heatmapgl(
z=data,
**heatmap_kwargs
), layout=go.Layout(title=title, **layout_kwargs))
def createContourMap(title: str, data: [[float]], contour_kwargs: dict = {}, layout_kwargs: dict = {}):
"""Creates a Contour Figure object using Plotly graph objects. The data object determines the dimensions of the
Contour plot. The len(data) will be the height. The len(data[i]) will be the width of the contour plot.
The contour plot is constructed in a bottom-up and left-to-right manner.
The contour plot can be customized using the contour_kwargs dict. The dict will be supplied to the contour plot
graph object when it is created. See the plotly api for a list of customizable properties. This can be similarly be
applied to layout_kwargs which can change the layout of contour plot."""
return go.Figure(data=go.Contour(
z=data,
**contour_kwargs
), layout=go.Layout(title=title, **layout_kwargs))
def createTable(title: str, headers: [str], cells: [[]], header_kwargs: dict = {}, cell_kwargs: dict = {},
layout_kwargs: dict = {}):
"""Creates a Table figure using Plotly graph objects. Table headers and cells need to be supplied separately.
The data format for the headers and cells are as follows:
Headers: [hdr1, hdr2,...,hdrN]
Cells: [column1_data, column2_data,..., columnN_data].
The Table headers and cells are customized separately using the header_kwargs and cell_kwargs parameters. The
layout of the Table can also be customized using the layout_kwargs."""
return go.Figure(data=go.Table(
header=dict(values=headers, **header_kwargs),
cells=dict(values=cells, **cell_kwargs)
), layout=go.Layout(title=title, **layout_kwargs))
def createPieChart(title: str, labels: [str], values: [float], pie_kwargs: dict = {}, layout_kwargs: dict = {}):
""" Creates a Pie Chart Figure using Plotly graph objects. Chart labels and values need to be supplied separately.
The data format for the labels and values are as follows:
Labels: [lbl1, lbl2,..., lblN]
Values: [val1, val2,..., valN]
The Pie chart can be customized using the pie_kwargs parameter. The layout of the Pie chart can be customized using
the layout_kwargs parameter."""
return go.Figure(data=go.Pie(labels=labels, values=values, **pie_kwargs),
layout=go.Layout(title=title, **layout_kwargs))
def createGraph(graphID: str, figure: go.Figure, classname: str = 'bg-white'):
return html.Div(
className=classname,
children=[
dcc.Graph(id=graphID, figure=figure)
],
style={'height': figure.layout.height}
)
def createLiveGraph(graphID: str, figure: go.Figure, vs: VisualInterface, callback, classname: str = 'bg-white'):
graph = createGraph(graphID, figure, classname)
def update_live_graph_callback(n_intervals, n_clicks, figure):
context = dash.callback_context.triggered[0]['prop_id'].split('.')[0]
if (context == 'step-button' and not vs.running) or vs.running:
return callback(figure)
else:
return figure
# Add Callback
vs.app.callback(
dash.dependencies.Output(graphID, 'figure'),
[dash.dependencies.Input('interval-component', 'n_intervals'),
dash.dependencies.Input('step-button', 'n_clicks'),
dash.dependencies.Input(graphID, 'figure')]
)(update_live_graph_callback)
return graph
def createLabel(label_id, content):
return html.Div(className="padding-top-bot", children=[html.H6(content, id=label_id)])
def createLiveLabel(label_id, initial_content, vs: VisualInterface, callback):
label = createLabel(label_id, initial_content)
def update_live_label_callback(n_intervals, n_clicks, children):
context = dash.callback_context.triggered[0]['prop_id'].split('.')[0]
if (context == 'step-button' and not vs.running) or vs.running:
return callback(children)
else:
return children
# Add Callback
vs.app.callback(
dash.dependencies.Output(label_id, 'children'),
[dash.dependencies.Input('interval-component', 'n_intervals'),
dash.dependencies.Input('step-button', 'n_clicks'),
dash.dependencies.Input(label_id, 'children')]
)(update_live_label_callback)
return label
def createSlider(slider_id: str, slider_name: str, vs: VisualInterface, set_val, min_val: float = 0.0,
max_val: float = 1.0, step: float = 0.01):
"""This function will add a slider to the parameter window of the visual interface. It will also automatically add
a callback function that will supply your custom function 'set_val' with the value of the slider"""
# Add html
slider = html.Div(
className="padding-top-bot",
children=[
html.H6('{}: [{}]'.format(slider_name, max_val), id=slider_id + '-title'),
dcc.Slider(
id=slider_id,
min=min_val,
max=max_val,
value=max_val,
step=step
)
]
)
# Add callback
def set_slider_val(value):
set_val(value)
return '{}: [{}]'.format(slider_name, value)
vs.app.callback(dash.dependencies.Output(slider_id + '-title', 'children'),
[dash.dependencies.Input(slider_id, 'value')])(set_slider_val)
return slider
def addRect(fig: go.Figure, x, y, width=1, height=1, **shape_kwargs):
"""Adds a rectangle to Figure 'fig'. x & y refer to the coordinates of the bottom left corner of the rectangle."""
x1 = x + width
y1 = y + height
fig.add_shape(
x0=x,
y0=y,
x1=x1,
y1=y1,
type='rect',
**shape_kwargs
)
def addCircle(fig: go.Figure, x, y, radius=0.5, **shape_kwargs):
"""Adds a circle to Figure 'fig'. x & y are the coordinates of the center of the circle"""
x0 = x - radius
x1 = x + radius
y0 = y - radius
y1 = y + radius
fig.add_shape(
x0=x0,
x1=x1,
y0=y0,
y1=y1,
type='circle',
**shape_kwargs
)
def createTabs(labels: [str], tabs: []):
return html.Div([
dcc.Tabs(
[
dcc.Tab(label=labels[x], children=tabs[x]) for x in range(len(labels))
]
)])
| StarcoderdataPython |
8106178 | # -*- coding: utf-8 -*-
"""
@date: 2020/12/9 上午11:54
@file: test_resnet3d_basicblock.py
@author: zj
@description:
"""
import torch
import torch.nn as nn
from tsn.model.backbones.resnet3d_basicblock import ResNet3DBasicBlock
def test_resnet3d_basicblock():
data = torch.randn(1, 64, 1, 56, 56)
inplanes = 64
planes = 128
expansion = ResNet3DBasicBlock.expansion
# 不膨胀
# 不进行空间下采样
temporal_stride = 1
spatial_stride = 1
downsample = nn.Sequential(
nn.Conv3d(inplanes, planes * expansion, kernel_size=1,
stride=(temporal_stride, spatial_stride, spatial_stride), bias=False),
nn.BatchNorm3d(planes * expansion),
)
model = ResNet3DBasicBlock(inplanes=inplanes,
planes=planes,
spatial_stride=spatial_stride,
temporal_stride=temporal_stride,
inflate=False,
downsample=downsample)
print(model)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, planes * expansion, 1, 56, 56)
# 不膨胀
# 进行空间下采样
temporal_stride = 1
spatial_stride = 2
downsample = nn.Sequential(
nn.Conv3d(inplanes, planes * expansion, kernel_size=1,
stride=(temporal_stride, spatial_stride, spatial_stride), bias=False),
nn.BatchNorm3d(planes * expansion),
)
model = ResNet3DBasicBlock(inplanes=inplanes,
planes=planes,
spatial_stride=spatial_stride,
temporal_stride=temporal_stride,
inflate=False,
downsample=downsample)
print(model)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, planes * expansion, 1, 28, 28)
def test_resnet3d_basicblock_3_1_1():
# 膨胀,不进行时间下采样
# 不进行下采样
data = torch.randn(1, 64, 8, 56, 56)
inplanes = 64
planes = 128
inflate_style = '3x1x1'
expansion = ResNet3DBasicBlock.expansion
temporal_stride = 1
spatial_stride = 1
downsample = nn.Sequential(
nn.Conv3d(inplanes, planes * expansion, kernel_size=1,
stride=(temporal_stride, spatial_stride, spatial_stride), bias=False),
nn.BatchNorm3d(planes * expansion),
)
model = ResNet3DBasicBlock(inplanes=inplanes,
planes=planes,
spatial_stride=spatial_stride,
temporal_stride=temporal_stride,
inflate=True,
inflate_style=inflate_style,
downsample=downsample)
print(model)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, planes * expansion, 8, 56, 56)
# 膨胀,进行时间下采样
# 不进行下采样
data = torch.randn(1, 64, 8, 56, 56)
inplanes = 64
planes = 128
expansion = ResNet3DBasicBlock.expansion
temporal_stride = 2
spatial_stride = 1
downsample = nn.Sequential(
nn.Conv3d(inplanes, planes * expansion, kernel_size=1,
stride=(temporal_stride, spatial_stride, spatial_stride), bias=False),
nn.BatchNorm3d(planes * expansion),
)
model = ResNet3DBasicBlock(inplanes=inplanes,
planes=planes,
spatial_stride=spatial_stride,
temporal_stride=temporal_stride,
inflate=True,
inflate_style=inflate_style,
downsample=downsample)
print(model)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, planes * expansion, 4, 56, 56)
# 膨胀,进行时间下采样
# 进行下采样
data = torch.randn(1, 64, 8, 56, 56)
inplanes = 64
planes = 128
expansion = ResNet3DBasicBlock.expansion
temporal_stride = 2
spatial_stride = 2
downsample = nn.Sequential(
nn.Conv3d(inplanes, planes * expansion, kernel_size=1,
stride=(temporal_stride, spatial_stride, spatial_stride), bias=False),
nn.BatchNorm3d(planes * expansion),
)
model = ResNet3DBasicBlock(inplanes=inplanes,
planes=planes,
spatial_stride=spatial_stride,
temporal_stride=temporal_stride,
inflate=True,
inflate_style=inflate_style,
downsample=downsample)
print(model)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, planes * expansion, 4, 28, 28)
def test_resnet3d_basicblock_3_3_3():
# 膨胀,不进行时间下采样
# 不进行下采样
data = torch.randn(1, 64, 8, 56, 56)
inplanes = 64
planes = 128
inflate_style = '3x3x3'
expansion = ResNet3DBasicBlock.expansion
temporal_stride = 1
spatial_stride = 1
downsample = nn.Sequential(
nn.Conv3d(inplanes, planes * expansion, kernel_size=1,
stride=(temporal_stride, spatial_stride, spatial_stride), bias=False),
nn.BatchNorm3d(planes * expansion),
)
model = ResNet3DBasicBlock(inplanes=inplanes,
planes=planes,
spatial_stride=spatial_stride,
temporal_stride=temporal_stride,
inflate=True,
inflate_style=inflate_style,
downsample=downsample)
print(model)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, planes * expansion, 8, 56, 56)
# 膨胀,进行时间下采样
# 不进行下采样
data = torch.randn(1, 64, 8, 56, 56)
inplanes = 64
planes = 128
expansion = ResNet3DBasicBlock.expansion
temporal_stride = 2
spatial_stride = 1
downsample = nn.Sequential(
nn.Conv3d(inplanes, planes * expansion, kernel_size=1,
stride=(temporal_stride, spatial_stride, spatial_stride), bias=False),
nn.BatchNorm3d(planes * expansion),
)
model = ResNet3DBasicBlock(inplanes=inplanes,
planes=planes,
spatial_stride=spatial_stride,
temporal_stride=temporal_stride,
inflate=True,
inflate_style=inflate_style,
downsample=downsample)
print(model)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, planes * expansion, 4, 56, 56)
# 膨胀,进行时间下采样
# 进行下采样
data = torch.randn(1, 64, 8, 56, 56)
inplanes = 64
planes = 128
expansion = ResNet3DBasicBlock.expansion
temporal_stride = 2
spatial_stride = 2
downsample = nn.Sequential(
nn.Conv3d(inplanes, planes * expansion, kernel_size=1,
stride=(temporal_stride, spatial_stride, spatial_stride), bias=False),
nn.BatchNorm3d(planes * expansion),
)
model = ResNet3DBasicBlock(inplanes=inplanes,
planes=planes,
spatial_stride=spatial_stride,
temporal_stride=temporal_stride,
inflate=True,
inflate_style=inflate_style,
downsample=downsample)
print(model)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, planes * expansion, 4, 28, 28)
if __name__ == '__main__':
print('*' * 100)
test_resnet3d_basicblock()
print('*' * 100)
test_resnet3d_basicblock_3_1_1()
print('*' * 100)
test_resnet3d_basicblock_3_3_3()
| StarcoderdataPython |
6413879 | from marshmallow import Schema, fields
from pfms.pfapi.base.pfms_base_schema import PfBaseSchema
class BaseResponse(Schema):
status = fields.String()
code = fields.String()
def get_data_response_def(self, data: dict = None):
if not data:
data = {}
data["status"] = fields.String()
data["code"] = fields.String()
return Schema.from_dict(data)
def json(self, many=False) -> str:
return self.dumps(self, many=many)
class MessageResponse(BaseResponse):
message = fields.String()
class ErrorResponse(MessageResponse):
error = fields.Dict(keys=fields.String(), values=fields.String())
class DataResponse(BaseResponse):
data = None
pagination = None
def get_list_data_type(self, data_list: list):
if len(data_list) != 0:
first_element = data_list[0]
if isinstance(first_element, int):
return fields.Integer
if isinstance(first_element, dict):
return fields.Dict
if isinstance(first_element, float):
return fields.Float
return fields.String
def add_data(self, data, many):
self.add_nested(data, many)
def add_pagination(self, data):
self.add_nested(data, False, "pagination")
def add_nested(self, data, many, key="data"):
field = None
if isinstance(data, str):
field = fields.String(default=data)
if isinstance(data, dict):
field = fields.Dict(default=data)
elif isinstance(data, PfBaseSchema):
field = fields.Nested(data, many=many)
elif isinstance(data, Schema):
field = fields.Nested(data, many=many)
elif isinstance(data, list):
field = fields.List(default=data, cls_or_instance=self.get_list_data_type(data))
if field:
if key == "data":
self.data = data
if key == "pagination":
self.pagination = data
self.fields[key] = field
self.dump_fields[key] = field
self.declared_fields[key] = field
self.load_fields[key] = field
class Pagination(Schema):
page = fields.Integer()
itemPerPage = fields.Integer()
total = fields.Integer()
totalPage = fields.Integer()
class PaginatedResponse(DataResponse):
pagination: Pagination
| StarcoderdataPython |
8090356 | import pandas as pd
import numpy as np
import os
from sklearn.metrics import *
def getfile(root, filename):
if root[-1]!='/':
root+='/'
if '.csv' not in filename:
filename = filename+'.csv'
file = root+filename
df = pd.read_csv(file,header=None)
df = np.asarray(df)
labels=[]
classes = os.listdir(root+'val/')
for en,c in enumerate(classes):
for i in range(len(os.listdir(root+'val/'+c))):
labels.append(en)
labels = np.asarray(labels)
return df,labels
def predicting(ensemble_prob):
prediction = np.zeros((ensemble_prob.shape[0],))
for i in range(ensemble_prob.shape[0]):
temp = ensemble_prob[i]
t = np.where(temp == np.max(temp))[0][0]
prediction[i] = t
return prediction
def metrics(labels,predictions,classes):
print("Classification Report:")
print(classification_report(labels, predictions, target_names = classes,digits = 4))
matrix = confusion_matrix(labels, predictions)
print("Confusion matrix:")
print(matrix)
print("\nClasswise Accuracy :{}".format(matrix.diagonal()/matrix.sum(axis = 1)))
print("\nBalanced Accuracy Score: ",balanced_accuracy_score(labels,predictions))
| StarcoderdataPython |
44998 | Mein neuer Code...
Neue zeite Codezeile ...
| StarcoderdataPython |
11361632 | input = [line.strip().split(' ') for line in open('input/day12.txt').readlines()]
registers = {
'a':0,
'b':0,
'c':0,
'd':0
}
def run_program(part=1):
if part > 1:
registers['c'] = 1
i = 0
while i < len(input):
line = input[i]
cmd = line[0]
if cmd == 'cpy':
if line[1].isdigit():
num = int(line[1])
else:
num = registers[line[1]]
reg = line[2]
registers[reg] = num
i += 1
elif cmd == 'inc':
reg = line[1]
registers[reg] += 1
i += 1
elif cmd == 'dec':
reg = line[1]
registers[reg] -= 1
i += 1
elif cmd == 'jnz':
flag = line[1]
if flag.isdigit():
val = int(flag)
else:
val = registers[flag]
num = int(line[2])
if val != 0:
i += num
else:
i += 1
return registers['a']
print(run_program(1))
print(run_program(2)) | StarcoderdataPython |
4918749 | class X:
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
print("\n"
| StarcoderdataPython |
348349 | from django.contrib import admin
from .models import UserGrade, UserLog
@admin.register(UserGrade)
class UserGradeAdmin(admin.ModelAdmin):
list_display = ('user', 'grade')
@admin.register(UserLog)
class UserLogAdmin(admin.ModelAdmin):
list_display = ('time', 'user')
list_filter = ('time', 'user')
# Register your models here.
| StarcoderdataPython |
1998351 | # -*- test-case-name: openid.test.test_server -*-
"""OpenID server protocol and logic.
Overview
========
An OpenID server must perform three tasks:
1. Examine the incoming request to determine its nature and validity.
2. Make a decision about how to respond to this request.
3. Format the response according to the protocol.
The first and last of these tasks may performed by
the L{decodeRequest<Server.decodeRequest>} and
L{encodeResponse<Server.encodeResponse>} methods of the
L{Server} object. Who gets to do the intermediate task -- deciding
how to respond to the request -- will depend on what type of request it
is.
If it's a request to authenticate a user (a X{C{checkid_setup}} or
X{C{checkid_immediate}} request), you need to decide if you will assert
that this user may claim the identity in question. Exactly how you do
that is a matter of application policy, but it generally involves making
sure the user has an account with your system and is logged in, checking
to see if that identity is hers to claim, and verifying with the user that
she does consent to releasing that information to the party making the
request.
Examine the properties of the L{CheckIDRequest} object, optionally
check L{CheckIDRequest.returnToVerified}, and and when you've come
to a decision, form a response by calling L{CheckIDRequest.answer}.
Other types of requests relate to establishing associations between client
and server and verifying the authenticity of previous communications.
L{Server} contains all the logic and data necessary to respond to
such requests; just pass the request to L{Server.handleRequest}.
OpenID Extensions
=================
Do you want to provide other information for your users
in addition to authentication? Version 2.0 of the OpenID
protocol allows consumers to add extensions to their requests.
For example, with sites using the U{Simple Registration
Extension<http://openid.net/specs/openid-simple-registration-extension-1_0.html>},
a user can agree to have their nickname and e-mail address sent to a
site when they sign up.
Since extensions do not change the way OpenID authentication works,
code to handle extension requests may be completely separate from the
L{OpenIDRequest} class here. But you'll likely want data sent back by
your extension to be signed. L{OpenIDResponse} provides methods with
which you can add data to it which can be signed with the other data in
the OpenID signature.
For example::
# when request is a checkid_* request
response = request.answer(True)
# this will a signed 'openid.sreg.timezone' parameter to the response
# as well as a namespace declaration for the openid.sreg namespace
response.fields.setArg('http://openid.net/sreg/1.0', 'timezone', 'America/Los_Angeles')
There are helper modules for a number of extensions, including
L{Attribute Exchange<openid.extensions.ax>},
L{PAPE<openid.extensions.pape>}, and
L{Simple Registration<openid.extensions.sreg>} in the L{openid.extensions}
package.
Stores
======
The OpenID server needs to maintain state between requests in order
to function. Its mechanism for doing this is called a store. The
store interface is defined in C{L{openid.store.interface.OpenIDStore}}.
Additionally, several concrete store implementations are provided, so that
most sites won't need to implement a custom store. For a store backed
by flat files on disk, see C{L{openid.store.filestore.FileOpenIDStore}}.
For stores based on MySQL or SQLite, see the C{L{openid.store.sqlstore}}
module.
Upgrading
=========
From 1.0 to 1.1
---------------
The keys by which a server looks up associations in its store have changed
in version 1.2 of this library. If your store has entries created from
version 1.0 code, you should empty it.
From 1.1 to 2.0
---------------
One of the additions to the OpenID protocol was a specified nonce
format for one-way nonces. As a result, the nonce table in the store
has changed. You'll need to run contrib/upgrade-store-1.1-to-2.0 to
upgrade your store, or you'll encounter errors about the wrong number
of columns in the oid_nonces table.
If you've written your own custom store or code that interacts
directly with it, you'll need to review the change notes in
L{openid.store.interface}.
@group Requests: OpenIDRequest, AssociateRequest, CheckIDRequest,
CheckAuthRequest
@group Responses: OpenIDResponse
@group HTTP Codes: HTTP_OK, HTTP_REDIRECT, HTTP_ERROR
@group Response Encodings: ENCODE_KVFORM, ENCODE_HTML_FORM, ENCODE_URL
"""
import time, warnings
import logging
from copy import deepcopy
from openid import cryptutil
from openid import oidutil
from openid import kvform
from openid.dh import DiffieHellman
from openid.store.nonce import mkNonce
from openid.server.trustroot import TrustRoot, verifyReturnTo
from openid.association import Association, default_negotiator, getSecretSize
from openid.message import Message, InvalidOpenIDNamespace, \
OPENID_NS, OPENID2_NS, IDENTIFIER_SELECT, OPENID1_URL_LIMIT
from openid.urinorm import urinorm
HTTP_OK = 200
HTTP_REDIRECT = 302
HTTP_ERROR = 400
BROWSER_REQUEST_MODES = ['checkid_setup', 'checkid_immediate']
ENCODE_KVFORM = ('kvform',)
ENCODE_URL = ('URL/redirect',)
ENCODE_HTML_FORM = ('HTML form',)
UNUSED = None
class OpenIDRequest(object):
"""I represent an incoming OpenID request.
@cvar mode: the C{X{openid.mode}} of this request.
@type mode: str
"""
mode = None
class CheckAuthRequest(OpenIDRequest):
"""A request to verify the validity of a previous response.
@cvar mode: "X{C{check_authentication}}"
@type mode: str
@ivar assoc_handle: The X{association handle} the response was signed with.
@type assoc_handle: str
@ivar signed: The message with the signature which wants checking.
@type signed: L{Message}
@ivar invalidate_handle: An X{association handle} the client is asking
about the validity of. Optional, may be C{None}.
@type invalidate_handle: str
@see: U{OpenID Specs, Mode: check_authentication
<http://openid.net/specs.bml#mode-check_authentication>}
"""
mode = "check_authentication"
required_fields = ["identity", "return_to", "response_nonce"]
def __init__(self, assoc_handle, signed, invalidate_handle=None):
"""Construct me.
These parameters are assigned directly as class attributes, see
my L{class documentation<CheckAuthRequest>} for their descriptions.
@type assoc_handle: str
@type signed: L{Message}
@type invalidate_handle: str
"""
self.assoc_handle = assoc_handle
self.signed = signed
self.invalidate_handle = invalidate_handle
self.namespace = OPENID2_NS
def fromMessage(klass, message, op_endpoint=UNUSED):
"""Construct me from an OpenID Message.
@param message: An OpenID check_authentication Message
@type message: L{openid.message.Message}
@returntype: L{CheckAuthRequest}
"""
self = klass.__new__(klass)
self.message = message
self.namespace = message.getOpenIDNamespace()
self.assoc_handle = message.getArg(OPENID_NS, 'assoc_handle')
self.sig = message.getArg(OPENID_NS, 'sig')
if (self.assoc_handle is None or
self.sig is None):
fmt = "%s request missing required parameter from message %s"
raise ProtocolError(
message, text=fmt % (self.mode, message))
self.invalidate_handle = message.getArg(OPENID_NS, 'invalidate_handle')
self.signed = message.copy()
# openid.mode is currently check_authentication because
# that's the mode of this request. But the signature
# was made on something with a different openid.mode.
# http://article.gmane.org/gmane.comp.web.openid.general/537
if self.signed.hasKey(OPENID_NS, "mode"):
self.signed.setArg(OPENID_NS, "mode", "id_res")
return self
fromMessage = classmethod(fromMessage)
def answer(self, signatory):
"""Respond to this request.
Given a L{Signatory}, I can check the validity of the signature and
the X{C{invalidate_handle}}.
@param signatory: The L{Signatory} to use to check the signature.
@type signatory: L{Signatory}
@returns: A response with an X{C{is_valid}} (and, if
appropriate X{C{invalidate_handle}}) field.
@returntype: L{OpenIDResponse}
"""
is_valid = signatory.verify(self.assoc_handle, self.signed)
# Now invalidate that assoc_handle so it this checkAuth message cannot
# be replayed.
signatory.invalidate(self.assoc_handle, dumb=True)
response = OpenIDResponse(self)
valid_str = (is_valid and "true") or "false"
response.fields.setArg(OPENID_NS, 'is_valid', valid_str)
if self.invalidate_handle:
assoc = signatory.getAssociation(self.invalidate_handle, dumb=False)
if not assoc:
response.fields.setArg(
OPENID_NS, 'invalidate_handle', self.invalidate_handle)
return response
def __str__(self):
if self.invalidate_handle:
ih = " invalidate? %r" % (self.invalidate_handle,)
else:
ih = ""
s = "<%s handle: %r sig: %r: signed: %r%s>" % (
self.__class__.__name__, self.assoc_handle,
self.sig, self.signed, ih)
return s
class PlainTextServerSession(object):
"""An object that knows how to handle association requests with no
session type.
@cvar session_type: The session_type for this association
session. There is no type defined for plain-text in the OpenID
specification, so we use 'no-encryption'.
@type session_type: str
@see: U{OpenID Specs, Mode: associate
<http://openid.net/specs.bml#mode-associate>}
@see: AssociateRequest
"""
session_type = 'no-encryption'
allowed_assoc_types = ['HMAC-SHA1', 'HMAC-SHA256']
def fromMessage(cls, unused_request):
return cls()
fromMessage = classmethod(fromMessage)
def answer(self, secret):
return {'mac_key': oidutil.toBase64(secret)}
class DiffieHellmanSHA1ServerSession(object):
"""An object that knows how to handle association requests with the
Diffie-Hellman session type.
@cvar session_type: The session_type for this association
session.
@type session_type: str
@ivar dh: The Diffie-Hellman algorithm values for this request
@type dh: DiffieHellman
@ivar consumer_pubkey: The public key sent by the consumer in the
associate request
@type consumer_pubkey: long
@see: U{OpenID Specs, Mode: associate
<http://openid.net/specs.bml#mode-associate>}
@see: AssociateRequest
"""
session_type = 'DH-SHA1'
hash_func = staticmethod(cryptutil.sha1)
allowed_assoc_types = ['HMAC-SHA1']
def __init__(self, dh, consumer_pubkey):
self.dh = dh
self.consumer_pubkey = consumer_pubkey
def fromMessage(cls, message):
"""
@param message: The associate request message
@type message: openid.message.Message
@returntype: L{DiffieHellmanSHA1ServerSession}
@raises ProtocolError: When parameters required to establish the
session are missing.
"""
dh_modulus = message.getArg(OPENID_NS, 'dh_modulus')
dh_gen = message.getArg(OPENID_NS, 'dh_gen')
if (dh_modulus is None and dh_gen is not None or
dh_gen is None and dh_modulus is not None):
if dh_modulus is None:
missing = 'modulus'
else:
missing = 'generator'
raise ProtocolError(message,
'If non-default modulus or generator is '
'supplied, both must be supplied. Missing %s'
% (missing,))
if dh_modulus or dh_gen:
dh_modulus = cryptutil.base64ToLong(dh_modulus)
dh_gen = cryptutil.base64ToLong(dh_gen)
dh = DiffieHellman(dh_modulus, dh_gen)
else:
dh = DiffieHellman.fromDefaults()
consumer_pubkey = message.getArg(OPENID_NS, 'dh_consumer_public')
if consumer_pubkey is None:
raise ProtocolError(message, "Public key for DH-SHA1 session "
"not found in message %s" % (message,))
consumer_pubkey = cryptutil.base64ToLong(consumer_pubkey)
return cls(dh, consumer_pubkey)
fromMessage = classmethod(fromMessage)
def answer(self, secret):
mac_key = self.dh.xorSecret(self.consumer_pubkey,
secret,
self.hash_func)
return {
'dh_server_public': cryptutil.longToBase64(self.dh.public),
'enc_mac_key': oidutil.toBase64(mac_key),
}
class DiffieHellmanSHA256ServerSession(DiffieHellmanSHA1ServerSession):
session_type = 'DH-SHA256'
hash_func = staticmethod(cryptutil.sha256)
allowed_assoc_types = ['HMAC-SHA256']
class AssociateRequest(OpenIDRequest):
"""A request to establish an X{association}.
@cvar mode: "X{C{check_authentication}}"
@type mode: str
@ivar assoc_type: The type of association. The protocol currently only
defines one value for this, "X{C{HMAC-SHA1}}".
@type assoc_type: str
@ivar session: An object that knows how to handle association
requests of a certain type.
@see: U{OpenID Specs, Mode: associate
<http://openid.net/specs.bml#mode-associate>}
"""
mode = "associate"
session_classes = {
'no-encryption': PlainTextServerSession,
'DH-SHA1': DiffieHellmanSHA1ServerSession,
'DH-SHA256': DiffieHellmanSHA256ServerSession,
}
def __init__(self, session, assoc_type):
"""Construct me.
The session is assigned directly as a class attribute. See my
L{class documentation<AssociateRequest>} for its description.
"""
super(AssociateRequest, self).__init__()
self.session = session
self.assoc_type = assoc_type
self.namespace = OPENID2_NS
def fromMessage(klass, message, op_endpoint=UNUSED):
"""Construct me from an OpenID Message.
@param message: The OpenID associate request
@type message: openid.message.Message
@returntype: L{AssociateRequest}
"""
if message.isOpenID1():
session_type = message.getArg(OPENID_NS, 'session_type')
if session_type == 'no-encryption':
logging.warn('Received OpenID 1 request with a no-encryption '
'assocaition session type. Continuing anyway.')
elif not session_type:
session_type = 'no-encryption'
else:
session_type = message.getArg(OPENID2_NS, 'session_type')
if session_type is None:
raise ProtocolError(message,
text="session_type missing from request")
try:
session_class = klass.session_classes[session_type]
except KeyError:
raise ProtocolError(message,
"Unknown session type %r" % (session_type,))
try:
session = session_class.fromMessage(message)
except ValueError, why:
raise ProtocolError(message, 'Error parsing %s session: %s' %
(session_class.session_type, why[0]))
assoc_type = message.getArg(OPENID_NS, 'assoc_type', 'HMAC-SHA1')
if assoc_type not in session.allowed_assoc_types:
fmt = 'Session type %s does not support association type %s'
raise ProtocolError(message, fmt % (session_type, assoc_type))
self = klass(session, assoc_type)
self.message = message
self.namespace = message.getOpenIDNamespace()
return self
fromMessage = classmethod(fromMessage)
def answer(self, assoc):
"""Respond to this request with an X{association}.
@param assoc: The association to send back.
@type assoc: L{openid.association.Association}
@returns: A response with the association information, encrypted
to the consumer's X{public key} if appropriate.
@returntype: L{OpenIDResponse}
"""
response = OpenIDResponse(self)
response.fields.updateArgs(OPENID_NS, {
'expires_in': '%d' % (assoc.getExpiresIn(),),
'assoc_type': self.assoc_type,
'assoc_handle': assoc.handle,
})
response.fields.updateArgs(OPENID_NS,
self.session.answer(assoc.secret))
if not (self.session.session_type == 'no-encryption' and
self.message.isOpenID1()):
# The session type "no-encryption" did not have a name
# in OpenID v1, it was just omitted.
response.fields.setArg(
OPENID_NS, 'session_type', self.session.session_type)
return response
def answerUnsupported(self, message, preferred_association_type=None,
preferred_session_type=None):
"""Respond to this request indicating that the association
type or association session type is not supported."""
if self.message.isOpenID1():
raise ProtocolError(self.message)
response = OpenIDResponse(self)
response.fields.setArg(OPENID_NS, 'error_code', 'unsupported-type')
response.fields.setArg(OPENID_NS, 'error', message)
if preferred_association_type:
response.fields.setArg(
OPENID_NS, 'assoc_type', preferred_association_type)
if preferred_session_type:
response.fields.setArg(
OPENID_NS, 'session_type', preferred_session_type)
return response
class CheckIDRequest(OpenIDRequest):
"""A request to confirm the identity of a user.
This class handles requests for openid modes X{C{checkid_immediate}}
and X{C{checkid_setup}}.
@cvar mode: "X{C{checkid_immediate}}" or "X{C{checkid_setup}}"
@type mode: str
@ivar immediate: Is this an immediate-mode request?
@type immediate: bool
@ivar identity: The OP-local identifier being checked.
@type identity: str
@ivar claimed_id: The claimed identifier. Not present in OpenID 1.x
messages.
@type claimed_id: str
@ivar trust_root: "Are you Frank?" asks the checkid request. "Who wants
to know?" C{trust_root}, that's who. This URL identifies the party
making the request, and the user will use that to make her decision
about what answer she trusts them to have. Referred to as "realm" in
OpenID 2.0.
@type trust_root: str
@ivar return_to: The URL to send the user agent back to to reply to this
request.
@type return_to: str
@ivar assoc_handle: Provided in smart mode requests, a handle for a
previously established association. C{None} for dumb mode requests.
@type assoc_handle: str
"""
def __init__(self, identity, return_to, trust_root=None, immediate=False,
assoc_handle=None, op_endpoint=None, claimed_id=None):
"""Construct me.
These parameters are assigned directly as class attributes, see
my L{class documentation<CheckIDRequest>} for their descriptions.
@raises MalformedReturnURL: When the C{return_to} URL is not a URL.
"""
self.assoc_handle = assoc_handle
self.identity = identity
self.claimed_id = claimed_id or identity
self.return_to = return_to
self.trust_root = trust_root or return_to
self.op_endpoint = op_endpoint
assert self.op_endpoint is not None
if immediate:
self.immediate = True
self.mode = "checkid_immediate"
else:
self.immediate = False
self.mode = "checkid_setup"
if self.return_to is not None and \
not TrustRoot.parse(self.return_to):
raise MalformedReturnURL(None, self.return_to)
if not self.trustRootValid():
raise UntrustedReturnURL(None, self.return_to, self.trust_root)
self.message = None
def _getNamespace(self):
warnings.warn('The "namespace" attribute of CheckIDRequest objects '
'is deprecated. Use "message.getOpenIDNamespace()" '
'instead', DeprecationWarning, stacklevel=2)
return self.message.getOpenIDNamespace()
namespace = property(_getNamespace)
def fromMessage(klass, message, op_endpoint):
"""Construct me from an OpenID message.
@raises ProtocolError: When not all required parameters are present
in the message.
@raises MalformedReturnURL: When the C{return_to} URL is not a URL.
@raises UntrustedReturnURL: When the C{return_to} URL is outside
the C{trust_root}.
@param message: An OpenID checkid_* request Message
@type message: openid.message.Message
@param op_endpoint: The endpoint URL of the server that this
message was sent to.
@type op_endpoint: str
@returntype: L{CheckIDRequest}
"""
self = klass.__new__(klass)
self.message = message
self.op_endpoint = op_endpoint
mode = message.getArg(OPENID_NS, 'mode')
if mode == "checkid_immediate":
self.immediate = True
self.mode = "checkid_immediate"
else:
self.immediate = False
self.mode = "checkid_setup"
self.return_to = message.getArg(OPENID_NS, 'return_to')
if message.isOpenID1() and not self.return_to:
fmt = "Missing required field 'return_to' from %r"
raise ProtocolError(message, text=fmt % (message,))
self.identity = message.getArg(OPENID_NS, 'identity')
self.claimed_id = message.getArg(OPENID_NS, 'claimed_id')
if message.isOpenID1():
if self.identity is None:
s = "OpenID 1 message did not contain openid.identity"
raise ProtocolError(message, text=s)
else:
if self.identity and not self.claimed_id:
s = ("OpenID 2.0 message contained openid.identity but not "
"claimed_id")
raise ProtocolError(message, text=s)
elif self.claimed_id and not self.identity:
s = ("OpenID 2.0 message contained openid.claimed_id but not "
"identity")
raise ProtocolError(message, text=s)
# There's a case for making self.trust_root be a TrustRoot
# here. But if TrustRoot isn't currently part of the "public" API,
# I'm not sure it's worth doing.
if message.isOpenID1():
trust_root_param = 'trust_root'
else:
trust_root_param = 'realm'
# Using 'or' here is slightly different than sending a default
# argument to getArg, as it will treat no value and an empty
# string as equivalent.
self.trust_root = (message.getArg(OPENID_NS, trust_root_param)
or self.return_to)
if not message.isOpenID1():
if self.return_to is self.trust_root is None:
raise ProtocolError(message, "openid.realm required when " +
"openid.return_to absent")
self.assoc_handle = message.getArg(OPENID_NS, 'assoc_handle')
# Using TrustRoot.parse here is a bit misleading, as we're not
# parsing return_to as a trust root at all. However, valid URLs
# are valid trust roots, so we can use this to get an idea if it
# is a valid URL. Not all trust roots are valid return_to URLs,
# however (particularly ones with wildcards), so this is still a
# little sketchy.
if self.return_to is not None and \
not TrustRoot.parse(self.return_to):
raise MalformedReturnURL(message, self.return_to)
# I first thought that checking to see if the return_to is within
# the trust_root is premature here, a logic-not-decoding thing. But
# it was argued that this is really part of data validation. A
# request with an invalid trust_root/return_to is broken regardless of
# application, right?
if not self.trustRootValid():
raise UntrustedReturnURL(message, self.return_to, self.trust_root)
return self
fromMessage = classmethod(fromMessage)
def idSelect(self):
"""Is the identifier to be selected by the IDP?
@returntype: bool
"""
# So IDPs don't have to import the constant
return self.identity == IDENTIFIER_SELECT
def trustRootValid(self):
"""Is my return_to under my trust_root?
@returntype: bool
"""
if not self.trust_root:
return True
tr = TrustRoot.parse(self.trust_root)
if tr is None:
raise MalformedTrustRoot(self.message, self.trust_root)
if self.return_to is not None:
return tr.validateURL(self.return_to)
else:
return True
def returnToVerified(self):
"""Does the relying party publish the return_to URL for this
response under the realm? It is up to the provider to set a
policy for what kinds of realms should be allowed. This
return_to URL verification reduces vulnerability to data-theft
attacks based on open proxies, cross-site-scripting, or open
redirectors.
This check should only be performed after making sure that the
return_to URL matches the realm.
@see: L{trustRootValid}
@raises openid.yadis.discover.DiscoveryFailure: if the realm
URL does not support Yadis discovery (and so does not
support the verification process).
@raises openid.fetchers.HTTPFetchingError: if the realm URL
is not reachable. When this is the case, the RP may be hosted
on the user's intranet.
@returntype: bool
@returns: True if the realm publishes a document with the
return_to URL listed
@since: 2.1.0
"""
return verifyReturnTo(self.trust_root, self.return_to)
def answer(self, allow, server_url=None, identity=None, claimed_id=None):
"""Respond to this request.
@param allow: Allow this user to claim this identity, and allow the
consumer to have this information?
@type allow: bool
@param server_url: DEPRECATED. Passing C{op_endpoint} to the
L{Server} constructor makes this optional.
When an OpenID 1.x immediate mode request does not succeed,
it gets back a URL where the request may be carried out
in a not-so-immediate fashion. Pass my URL in here (the
fully qualified address of this server's endpoint, i.e.
C{http://example.com/server}), and I will use it as a base for the
URL for a new request.
Optional for requests where C{CheckIDRequest.immediate} is C{False}
or C{allow} is C{True}.
@type server_url: str
@param identity: The OP-local identifier to answer with. Only for use
when the relying party requested identifier selection.
@type identity: str or None
@param claimed_id: The claimed identifier to answer with, for use
with identifier selection in the case where the claimed identifier
and the OP-local identifier differ, i.e. when the claimed_id uses
delegation.
If C{identity} is provided but this is not, C{claimed_id} will
default to the value of C{identity}. When answering requests
that did not ask for identifier selection, the response
C{claimed_id} will default to that of the request.
This parameter is new in OpenID 2.0.
@type claimed_id: str or None
@returntype: L{OpenIDResponse}
@change: Version 2.0 deprecates C{server_url} and adds C{claimed_id}.
@raises NoReturnError: when I do not have a return_to.
"""
assert self.message is not None
if not self.return_to:
raise NoReturnToError
if not server_url:
if not self.message.isOpenID1() and not self.op_endpoint:
# In other words, that warning I raised in Server.__init__?
# You should pay attention to it now.
raise RuntimeError("%s should be constructed with op_endpoint "
"to respond to OpenID 2.0 messages." %
(self,))
server_url = self.op_endpoint
if allow:
mode = 'id_res'
elif self.message.isOpenID1():
if self.immediate:
mode = 'id_res'
else:
mode = 'cancel'
else:
if self.immediate:
mode = 'setup_needed'
else:
mode = 'cancel'
response = OpenIDResponse(self)
if claimed_id and self.message.isOpenID1():
namespace = self.message.getOpenIDNamespace()
raise VersionError("claimed_id is new in OpenID 2.0 and not "
"available for %s" % (namespace,))
if allow:
if self.identity == IDENTIFIER_SELECT:
if not identity:
raise ValueError(
"This request uses IdP-driven identifier selection."
"You must supply an identifier in the response.")
response_identity = identity
response_claimed_id = claimed_id or identity
elif self.identity:
if identity and (self.identity != identity):
normalized_request_identity = urinorm(self.identity)
normalized_answer_identity = urinorm(identity)
if (normalized_request_identity !=
normalized_answer_identity):
raise ValueError(
"Request was for identity %r, cannot reply "
"with identity %r" % (self.identity, identity))
# The "identity" value in the response shall always be
# the same as that in the request, otherwise the RP is
# likely to not validate the response.
response_identity = self.identity
response_claimed_id = self.claimed_id
else:
if identity:
raise ValueError(
"This request specified no identity and you "
"supplied %r" % (identity,))
response_identity = None
if self.message.isOpenID1() and response_identity is None:
raise ValueError(
"Request was an OpenID 1 request, so response must "
"include an identifier."
)
response.fields.updateArgs(OPENID_NS, {
'mode': mode,
'return_to': self.return_to,
'response_nonce': mkNonce(),
})
if server_url:
response.fields.setArg(OPENID_NS, 'op_endpoint', server_url)
if response_identity is not None:
response.fields.setArg(
OPENID_NS, 'identity', response_identity)
if self.message.isOpenID2():
response.fields.setArg(
OPENID_NS, 'claimed_id', response_claimed_id)
else:
response.fields.setArg(OPENID_NS, 'mode', mode)
if self.immediate:
if self.message.isOpenID1() and not server_url:
raise ValueError("setup_url is required for allow=False "
"in OpenID 1.x immediate mode.")
# Make a new request just like me, but with immediate=False.
setup_request = self.__class__(
self.identity, self.return_to, self.trust_root,
immediate=False, assoc_handle=self.assoc_handle,
op_endpoint=self.op_endpoint, claimed_id=self.claimed_id)
# XXX: This API is weird.
setup_request.message = self.message
setup_url = setup_request.encodeToURL(server_url)
response.fields.setArg(OPENID_NS, 'user_setup_url', setup_url)
return response
def encodeToURL(self, server_url):
"""Encode this request as a URL to GET.
@param server_url: The URL of the OpenID server to make this request of.
@type server_url: str
@returntype: str
@raises NoReturnError: when I do not have a return_to.
"""
if not self.return_to:
raise NoReturnToError
# Imported from the alternate reality where these classes are used
# in both the client and server code, so Requests are Encodable too.
# That's right, code imported from alternate realities all for the
# love of you, id_res/user_setup_url.
q = {'mode': self.mode,
'identity': self.identity,
'claimed_id': self.claimed_id,
'return_to': self.return_to}
if self.trust_root:
if self.message.isOpenID1():
q['trust_root'] = self.trust_root
else:
q['realm'] = self.trust_root
if self.assoc_handle:
q['assoc_handle'] = self.assoc_handle
response = Message(self.message.getOpenIDNamespace())
response.updateArgs(OPENID_NS, q)
return response.toURL(server_url)
def getCancelURL(self):
"""Get the URL to cancel this request.
Useful for creating a "Cancel" button on a web form so that operation
can be carried out directly without another trip through the server.
(Except you probably want to make another trip through the server so
that it knows that the user did make a decision. Or you could simulate
this method by doing C{.answer(False).encodeToURL()})
@returntype: str
@returns: The return_to URL with openid.mode = cancel.
@raises NoReturnError: when I do not have a return_to.
"""
if not self.return_to:
raise NoReturnToError
if self.immediate:
raise ValueError("Cancel is not an appropriate response to "
"immediate mode requests.")
response = Message(self.message.getOpenIDNamespace())
response.setArg(OPENID_NS, 'mode', 'cancel')
return response.toURL(self.return_to)
def __repr__(self):
return '<%s id:%r im:%s tr:%r ah:%r>' % (self.__class__.__name__,
self.identity,
self.immediate,
self.trust_root,
self.assoc_handle)
class OpenIDResponse(object):
"""I am a response to an OpenID request.
@ivar request: The request I respond to.
@type request: L{OpenIDRequest}
@ivar fields: My parameters as a dictionary with each key mapping to
one value. Keys are parameter names with no leading "C{openid.}".
e.g. "C{identity}" and "C{mac_key}", never "C{openid.identity}".
@type fields: L{openid.message.Message}
@ivar signed: The names of the fields which should be signed.
@type signed: list of str
"""
# Implementer's note: In a more symmetric client/server
# implementation, there would be more types of OpenIDResponse
# object and they would have validated attributes according to the
# type of response. But as it is, Response objects in a server are
# basically write-only, their only job is to go out over the wire,
# so this is just a loose wrapper around OpenIDResponse.fields.
def __init__(self, request):
"""Make a response to an L{OpenIDRequest}.
@type request: L{OpenIDRequest}
"""
self.request = request
self.fields = Message(request.namespace)
def __str__(self):
return "%s for %s: %s" % (
self.__class__.__name__,
self.request.__class__.__name__,
self.fields)
def toFormMarkup(self, form_tag_attrs=None):
"""Returns the form markup for this response.
@param form_tag_attrs: Dictionary of attributes to be added to
the form tag. 'accept-charset' and 'enctype' have defaults
that can be overridden. If a value is supplied for
'action' or 'method', it will be replaced.
@returntype: str
@since: 2.1.0
"""
return self.fields.toFormMarkup(self.request.return_to,
form_tag_attrs=form_tag_attrs)
def toHTML(self, form_tag_attrs=None):
"""Returns an HTML document that auto-submits the form markup
for this response.
@returntype: str
@see: toFormMarkup
@since: 2.1.?
"""
return oidutil.autoSubmitHTML(self.toFormMarkup(form_tag_attrs))
def renderAsForm(self):
"""Returns True if this response's encoding is
ENCODE_HTML_FORM. Convenience method for server authors.
@returntype: bool
@since: 2.1.0
"""
return self.whichEncoding() == ENCODE_HTML_FORM
def needsSigning(self):
"""Does this response require signing?
@returntype: bool
"""
return self.fields.getArg(OPENID_NS, 'mode') == 'id_res'
# implements IEncodable
def whichEncoding(self):
"""How should I be encoded?
@returns: one of ENCODE_URL, ENCODE_HTML_FORM, or ENCODE_KVFORM.
@change: 2.1.0 added the ENCODE_HTML_FORM response.
"""
if self.request.mode in BROWSER_REQUEST_MODES:
if self.fields.getOpenIDNamespace() == OPENID2_NS and \
len(self.encodeToURL()) > OPENID1_URL_LIMIT:
return ENCODE_HTML_FORM
else:
return ENCODE_URL
else:
return ENCODE_KVFORM
def encodeToURL(self):
"""Encode a response as a URL for the user agent to GET.
You will generally use this URL with a HTTP redirect.
@returns: A URL to direct the user agent back to.
@returntype: str
"""
return self.fields.toURL(self.request.return_to)
def addExtension(self, extension_response):
"""
Add an extension response to this response message.
@param extension_response: An object that implements the
extension interface for adding arguments to an OpenID
message.
@type extension_response: L{openid.extension}
@returntype: None
"""
extension_response.toMessage(self.fields)
def encodeToKVForm(self):
"""Encode a response in key-value colon/newline format.
This is a machine-readable format used to respond to messages which
came directly from the consumer and not through the user agent.
@see: OpenID Specs,
U{Key-Value Colon/Newline format<http://openid.net/specs.bml#keyvalue>}
@returntype: str
"""
return self.fields.toKVForm()
class WebResponse(object):
"""I am a response to an OpenID request in terms a web server understands.
I generally come from an L{Encoder}, either directly or from
L{Server.encodeResponse}.
@ivar code: The HTTP code of this response.
@type code: int
@ivar headers: Headers to include in this response.
@type headers: dict
@ivar body: The body of this response.
@type body: str
"""
def __init__(self, code=HTTP_OK, headers=None, body=""):
"""Construct me.
These parameters are assigned directly as class attributes, see
my L{class documentation<WebResponse>} for their descriptions.
"""
self.code = code
if headers is not None:
self.headers = headers
else:
self.headers = {}
self.body = body
class Signatory(object):
"""I sign things.
I also check signatures.
All my state is encapsulated in an
L{OpenIDStore<openid.store.interface.OpenIDStore>}, which means
I'm not generally pickleable but I am easy to reconstruct.
@cvar SECRET_LIFETIME: The number of seconds a secret remains valid.
@type SECRET_LIFETIME: int
"""
SECRET_LIFETIME = 14 * 24 * 60 * 60 # 14 days, in seconds
# keys have a bogus server URL in them because the filestore
# really does expect that key to be a URL. This seems a little
# silly for the server store, since I expect there to be only one
# server URL.
_normal_key = 'http://localhost/|normal'
_dumb_key = 'http://localhost/|dumb'
def __init__(self, store):
"""Create a new Signatory.
@param store: The back-end where my associations are stored.
@type store: L{openid.store.interface.OpenIDStore}
"""
assert store is not None
self.store = store
def verify(self, assoc_handle, message):
"""Verify that the signature for some data is valid.
@param assoc_handle: The handle of the association used to sign the
data.
@type assoc_handle: str
@param message: The signed message to verify
@type message: openid.message.Message
@returns: C{True} if the signature is valid, C{False} if not.
@returntype: bool
"""
assoc = self.getAssociation(assoc_handle, dumb=True)
if not assoc:
logging.error("failed to get assoc with handle %r to verify "
"message %r"
% (assoc_handle, message))
return False
try:
valid = assoc.checkMessageSignature(message)
except ValueError, ex:
logging.exception("Error in verifying %s with %s: %s" % (message,
assoc,
ex))
return False
return valid
def sign(self, response):
"""Sign a response.
I take a L{OpenIDResponse}, create a signature for everything
in its L{signed<OpenIDResponse.signed>} list, and return a new
copy of the response object with that signature included.
@param response: A response to sign.
@type response: L{OpenIDResponse}
@returns: A signed copy of the response.
@returntype: L{OpenIDResponse}
"""
signed_response = deepcopy(response)
assoc_handle = response.request.assoc_handle
if assoc_handle:
# normal mode
# disabling expiration check because even if the association
# is expired, we still need to know some properties of the
# association so that we may preserve those properties when
# creating the fallback association.
assoc = self.getAssociation(assoc_handle, dumb=False,
checkExpiration=False)
if not assoc or assoc.expiresIn <= 0:
# fall back to dumb mode
signed_response.fields.setArg(
OPENID_NS, 'invalidate_handle', assoc_handle)
assoc_type = assoc and assoc.assoc_type or 'HMAC-SHA1'
if assoc and assoc.expiresIn <= 0:
# now do the clean-up that the disabled checkExpiration
# code didn't get to do.
self.invalidate(assoc_handle, dumb=False)
assoc = self.createAssociation(dumb=True, assoc_type=assoc_type)
else:
# dumb mode.
assoc = self.createAssociation(dumb=True)
try:
signed_response.fields = assoc.signMessage(signed_response.fields)
except kvform.KVFormError, err:
raise EncodingError(response, explanation=str(err))
return signed_response
def createAssociation(self, dumb=True, assoc_type='HMAC-SHA1'):
"""Make a new association.
@param dumb: Is this association for a dumb-mode transaction?
@type dumb: bool
@param assoc_type: The type of association to create. Currently
there is only one type defined, C{HMAC-SHA1}.
@type assoc_type: str
@returns: the new association.
@returntype: L{openid.association.Association}
"""
secret = cryptutil.getBytes(getSecretSize(assoc_type))
uniq = oidutil.toBase64(cryptutil.getBytes(4))
handle = '{%s}{%x}{%s}' % (assoc_type, int(time.time()), uniq)
assoc = Association.fromExpiresIn(
self.SECRET_LIFETIME, handle, secret, assoc_type)
if dumb:
key = self._dumb_key
else:
key = self._normal_key
self.store.storeAssociation(key, assoc)
return assoc
def getAssociation(self, assoc_handle, dumb, checkExpiration=True):
"""Get the association with the specified handle.
@type assoc_handle: str
@param dumb: Is this association used with dumb mode?
@type dumb: bool
@returns: the association, or None if no valid association with that
handle was found.
@returntype: L{openid.association.Association}
"""
# Hmm. We've created an interface that deals almost entirely with
# assoc_handles. The only place outside the Signatory that uses this
# (and thus the only place that ever sees Association objects) is
# when creating a response to an association request, as it must have
# the association's secret.
if assoc_handle is None:
raise ValueError("assoc_handle must not be None")
if dumb:
key = self._dumb_key
else:
key = self._normal_key
assoc = self.store.getAssociation(key, assoc_handle)
if assoc is not None and assoc.expiresIn <= 0:
logging.info("requested %sdumb key %r is expired (by %s seconds)" %
((not dumb) and 'not-' or '',
assoc_handle, assoc.expiresIn))
if checkExpiration:
self.store.removeAssociation(key, assoc_handle)
assoc = None
return assoc
def invalidate(self, assoc_handle, dumb):
"""Invalidates the association with the given handle.
@type assoc_handle: str
@param dumb: Is this association used with dumb mode?
@type dumb: bool
"""
if dumb:
key = self._dumb_key
else:
key = self._normal_key
self.store.removeAssociation(key, assoc_handle)
class Encoder(object):
"""I encode responses in to L{WebResponses<WebResponse>}.
If you don't like L{WebResponses<WebResponse>}, you can do
your own handling of L{OpenIDResponses<OpenIDResponse>} with
L{OpenIDResponse.whichEncoding}, L{OpenIDResponse.encodeToURL}, and
L{OpenIDResponse.encodeToKVForm}.
"""
responseFactory = WebResponse
def encode(self, response):
"""Encode a response to a L{WebResponse}.
@raises EncodingError: When I can't figure out how to encode this
message.
"""
encode_as = response.whichEncoding()
if encode_as == ENCODE_KVFORM:
wr = self.responseFactory(body=response.encodeToKVForm())
if isinstance(response, Exception):
wr.code = HTTP_ERROR
elif encode_as == ENCODE_URL:
location = response.encodeToURL()
wr = self.responseFactory(code=HTTP_REDIRECT,
headers={'location': location})
elif encode_as == ENCODE_HTML_FORM:
wr = self.responseFactory(code=HTTP_OK,
body=response.toHTML())
else:
# Can't encode this to a protocol message. You should probably
# render it to HTML and show it to the user.
raise EncodingError(response)
return wr
class SigningEncoder(Encoder):
"""I encode responses in to L{WebResponses<WebResponse>}, signing them when required.
"""
def __init__(self, signatory):
"""Create a L{SigningEncoder}.
@param signatory: The L{Signatory} I will make signatures with.
@type signatory: L{Signatory}
"""
self.signatory = signatory
def encode(self, response):
"""Encode a response to a L{WebResponse}, signing it first if appropriate.
@raises EncodingError: When I can't figure out how to encode this
message.
@raises AlreadySigned: When this response is already signed.
@returntype: L{WebResponse}
"""
# the isinstance is a bit of a kludge... it means there isn't really
# an adapter to make the interfaces quite match.
if (not isinstance(response, Exception)) and response.needsSigning():
if not self.signatory:
raise ValueError(
"Must have a store to sign this request: %s" %
(response,), response)
if response.fields.hasKey(OPENID_NS, 'sig'):
raise AlreadySigned(response)
response = self.signatory.sign(response)
return super(SigningEncoder, self).encode(response)
class Decoder(object):
"""I decode an incoming web request in to a L{OpenIDRequest}.
"""
_handlers = {
'checkid_setup': CheckIDRequest.fromMessage,
'checkid_immediate': CheckIDRequest.fromMessage,
'check_authentication': CheckAuthRequest.fromMessage,
'associate': AssociateRequest.fromMessage,
}
def __init__(self, server):
"""Construct a Decoder.
@param server: The server which I am decoding requests for.
(Necessary because some replies reference their server.)
@type server: L{Server}
"""
self.server = server
def decode(self, query):
"""I transform query parameters into an L{OpenIDRequest}.
If the query does not seem to be an OpenID request at all, I return
C{None}.
@param query: The query parameters as a dictionary with each
key mapping to one value.
@type query: dict
@raises ProtocolError: When the query does not seem to be a valid
OpenID request.
@returntype: L{OpenIDRequest}
"""
if not query:
return None
try:
message = Message.fromPostArgs(query)
except InvalidOpenIDNamespace, err:
# It's useful to have a Message attached to a ProtocolError, so we
# override the bad ns value to build a Message out of it. Kinda
# kludgy, since it's made of lies, but the parts that aren't lies
# are more useful than a 'None'.
query = query.copy()
query['openid.ns'] = OPENID2_NS
message = Message.fromPostArgs(query)
raise ProtocolError(message, str(err))
mode = message.getArg(OPENID_NS, 'mode')
if not mode:
fmt = "No mode value in message %s"
raise ProtocolError(message, text=fmt % (message,))
handler = self._handlers.get(mode, self.defaultDecoder)
return handler(message, self.server.op_endpoint)
def defaultDecoder(self, message, server):
"""Called to decode queries when no handler for that mode is found.
@raises ProtocolError: This implementation always raises
L{ProtocolError}.
"""
mode = message.getArg(OPENID_NS, 'mode')
fmt = "Unrecognized OpenID mode %r"
raise ProtocolError(message, text=fmt % (mode,))
class Server(object):
"""I handle requests for an OpenID server.
Some types of requests (those which are not C{checkid} requests) may be
handed to my L{handleRequest} method, and I will take care of it and
return a response.
For your convenience, I also provide an interface to L{Decoder.decode}
and L{SigningEncoder.encode} through my methods L{decodeRequest} and
L{encodeResponse}.
All my state is encapsulated in an
L{OpenIDStore<openid.store.interface.OpenIDStore>}, which means
I'm not generally pickleable but I am easy to reconstruct.
Example::
oserver = Server(FileOpenIDStore(data_path), "http://example.com/op")
request = oserver.decodeRequest(query)
if request.mode in ['checkid_immediate', 'checkid_setup']:
if self.isAuthorized(request.identity, request.trust_root):
response = request.answer(True)
elif request.immediate:
response = request.answer(False)
else:
self.showDecidePage(request)
return
else:
response = oserver.handleRequest(request)
webresponse = oserver.encode(response)
@ivar signatory: I'm using this for associate requests and to sign things.
@type signatory: L{Signatory}
@ivar decoder: I'm using this to decode things.
@type decoder: L{Decoder}
@ivar encoder: I'm using this to encode things.
@type encoder: L{Encoder}
@ivar op_endpoint: My URL.
@type op_endpoint: str
@ivar negotiator: I use this to determine which kinds of
associations I can make and how.
@type negotiator: L{openid.association.SessionNegotiator}
"""
def __init__(
self,
store,
op_endpoint=None,
signatoryClass=Signatory,
encoderClass=SigningEncoder,
decoderClass=Decoder):
"""A new L{Server}.
@param store: The back-end where my associations are stored.
@type store: L{openid.store.interface.OpenIDStore}
@param op_endpoint: My URL, the fully qualified address of this
server's endpoint, i.e. C{http://example.com/server}
@type op_endpoint: str
@change: C{op_endpoint} is new in library version 2.0. It
currently defaults to C{None} for compatibility with
earlier versions of the library, but you must provide it
if you want to respond to any version 2 OpenID requests.
"""
self.store = store
self.signatory = signatoryClass(self.store)
self.encoder = encoderClass(self.signatory)
self.decoder = decoderClass(self)
self.negotiator = default_negotiator.copy()
if not op_endpoint:
warnings.warn("%s.%s constructor requires op_endpoint parameter "
"for OpenID 2.0 servers" %
(self.__class__.__module__, self.__class__.__name__),
stacklevel=2)
self.op_endpoint = op_endpoint
def handleRequest(self, request):
"""Handle a request.
Give me a request, I will give you a response. Unless it's a type
of request I cannot handle myself, in which case I will raise
C{NotImplementedError}. In that case, you can handle it yourself,
or add a method to me for handling that request type.
@raises NotImplementedError: When I do not have a handler defined
for that type of request.
@returntype: L{OpenIDResponse}
"""
handler = getattr(self, 'openid_' + request.mode, None)
if handler is not None:
return handler(request)
else:
raise NotImplementedError(
"%s has no handler for a request of mode %r." %
(self, request.mode))
def openid_check_authentication(self, request):
"""Handle and respond to C{check_authentication} requests.
@returntype: L{OpenIDResponse}
"""
return request.answer(self.signatory)
def openid_associate(self, request):
"""Handle and respond to C{associate} requests.
@returntype: L{OpenIDResponse}
"""
# XXX: TESTME
assoc_type = request.assoc_type
session_type = request.session.session_type
if self.negotiator.isAllowed(assoc_type, session_type):
assoc = self.signatory.createAssociation(dumb=False,
assoc_type=assoc_type)
return request.answer(assoc)
else:
message = ('Association type %r is not supported with '
'session type %r' % (assoc_type, session_type))
(preferred_assoc_type, preferred_session_type) = \
self.negotiator.getAllowedType()
return request.answerUnsupported(
message,
preferred_assoc_type,
preferred_session_type)
def decodeRequest(self, query):
"""Transform query parameters into an L{OpenIDRequest}.
If the query does not seem to be an OpenID request at all, I return
C{None}.
@param query: The query parameters as a dictionary with each
key mapping to one value.
@type query: dict
@raises ProtocolError: When the query does not seem to be a valid
OpenID request.
@returntype: L{OpenIDRequest}
@see: L{Decoder.decode}
"""
return self.decoder.decode(query)
def encodeResponse(self, response):
"""Encode a response to a L{WebResponse}, signing it first if appropriate.
@raises EncodingError: When I can't figure out how to encode this
message.
@raises AlreadySigned: When this response is already signed.
@returntype: L{WebResponse}
@see: L{SigningEncoder.encode}
"""
return self.encoder.encode(response)
class ProtocolError(Exception):
"""A message did not conform to the OpenID protocol.
@ivar message: The query that is failing to be a valid OpenID request.
@type message: openid.message.Message
"""
def __init__(self, message, text=None, reference=None, contact=None):
"""When an error occurs.
@param message: The message that is failing to be a valid
OpenID request.
@type message: openid.message.Message
@param text: A message about the encountered error. Set as C{args[0]}.
@type text: str
"""
self.openid_message = message
self.reference = reference
self.contact = contact
assert type(message) not in [str, unicode]
Exception.__init__(self, text)
def getReturnTo(self):
"""Get the return_to argument from the request, if any.
@returntype: str
"""
if self.openid_message is None:
return None
else:
return self.openid_message.getArg(OPENID_NS, 'return_to')
def hasReturnTo(self):
"""Did this request have a return_to parameter?
@returntype: bool
"""
return self.getReturnTo() is not None
def toMessage(self):
"""Generate a Message object for sending to the relying party,
after encoding.
"""
namespace = self.openid_message.getOpenIDNamespace()
reply = Message(namespace)
reply.setArg(OPENID_NS, 'mode', 'error')
reply.setArg(OPENID_NS, 'error', str(self))
if self.contact is not None:
reply.setArg(OPENID_NS, 'contact', str(self.contact))
if self.reference is not None:
reply.setArg(OPENID_NS, 'reference', str(self.reference))
return reply
# implements IEncodable
def encodeToURL(self):
return self.toMessage().toURL(self.getReturnTo())
def encodeToKVForm(self):
return self.toMessage().toKVForm()
def toFormMarkup(self):
"""Encode to HTML form markup for POST.
@since: 2.1.0
"""
return self.toMessage().toFormMarkup(self.getReturnTo())
def toHTML(self):
"""Encode to a full HTML page, wrapping the form markup in a page
that will autosubmit the form.
@since: 2.1.?
"""
return oidutil.autoSubmitHTML(self.toFormMarkup())
def whichEncoding(self):
"""How should I be encoded?
@returns: one of ENCODE_URL, ENCODE_KVFORM, or None. If None,
I cannot be encoded as a protocol message and should be
displayed to the user.
"""
if self.hasReturnTo():
if self.openid_message.getOpenIDNamespace() == OPENID2_NS and \
len(self.encodeToURL()) > OPENID1_URL_LIMIT:
return ENCODE_HTML_FORM
else:
return ENCODE_URL
if self.openid_message is None:
return None
mode = self.openid_message.getArg(OPENID_NS, 'mode')
if mode:
if mode not in BROWSER_REQUEST_MODES:
return ENCODE_KVFORM
# According to the OpenID spec as of this writing, we are probably
# supposed to switch on request type here (GET versus POST) to figure
# out if we're supposed to print machine-readable or human-readable
# content at this point. GET/POST seems like a pretty lousy way of
# making the distinction though, as it's just as possible that the
# user agent could have mistakenly been directed to post to the
# server URL.
# Basically, if your request was so broken that you didn't manage to
# include an openid.mode, I'm not going to worry too much about
# returning you something you can't parse.
return None
class VersionError(Exception):
"""Raised when an operation was attempted that is not compatible with
the protocol version being used."""
class NoReturnToError(Exception):
"""Raised when a response to a request cannot be generated because
the request contains no return_to URL.
"""
pass
class EncodingError(Exception):
"""Could not encode this as a protocol message.
You should probably render it and show it to the user.
@ivar response: The response that failed to encode.
@type response: L{OpenIDResponse}
"""
def __init__(self, response, explanation=None):
Exception.__init__(self, response)
self.response = response
self.explanation = explanation
def __str__(self):
if self.explanation:
s = '%s: %s' % (self.__class__.__name__,
self.explanation)
else:
s = '%s for Response %s' % (
self.__class__.__name__, self.response)
return s
class AlreadySigned(EncodingError):
"""This response is already signed."""
class UntrustedReturnURL(ProtocolError):
"""A return_to is outside the trust_root."""
def __init__(self, message, return_to, trust_root):
ProtocolError.__init__(self, message)
self.return_to = return_to
self.trust_root = trust_root
def __str__(self):
return "return_to %r not under trust_root %r" % (self.return_to,
self.trust_root)
class MalformedReturnURL(ProtocolError):
"""The return_to URL doesn't look like a valid URL."""
def __init__(self, openid_message, return_to):
self.return_to = return_to
ProtocolError.__init__(self, openid_message)
class MalformedTrustRoot(ProtocolError):
"""The trust root is not well-formed.
@see: OpenID Specs, U{openid.trust_root<http://openid.net/specs.bml#mode-checkid_immediate>}
"""
pass
#class IEncodable: # Interface
# def encodeToURL(return_to):
# """Encode a response as a URL for redirection.
#
# @returns: A URL to direct the user agent back to.
# @returntype: str
# """
# pass
#
# def encodeToKvform():
# """Encode a response in key-value colon/newline format.
#
# This is a machine-readable format used to respond to messages which
# came directly from the consumer and not through the user agent.
#
# @see: OpenID Specs,
# U{Key-Value Colon/Newline format<http://openid.net/specs.bml#keyvalue>}
#
# @returntype: str
# """
# pass
#
# def whichEncoding():
# """How should I be encoded?
#
# @returns: one of ENCODE_URL, ENCODE_KVFORM, or None. If None,
# I cannot be encoded as a protocol message and should be
# displayed to the user.
# """
# pass
| StarcoderdataPython |
1849976 | <gh_stars>0
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ACC_PUBLIC = 0x1
ACC_PRIVATE = 0x2
ACC_PROTECTED = 0x4
ACC_STATIC = 0x8
ACC_FINAL = 0x10
ACC_SYNCHRONIZED = 0x20
ACC_VOLATILE = 0x40
ACC_BRIDGE = 0x40
ACC_TRANSIENT = 0x80
ACC_VARARGS = 0x80
ACC_NATIVE = 0x100
ACC_INTERFACE = 0x200
ACC_ABSTRACT = 0x400
ACC_STRICT = 0x800
ACC_SYNTHETIC = 0x1000
ACC_ANNOTATION = 0x2000
ACC_ENUM = 0x4000
ACC_CONSTRUCTOR = 0x10000
ACC_DECLARED_SYNCHRONIZED = 0x20000
# Might as well include this for completeness even though modern JVMs ignore it
ACC_SUPER = 0x20
CLASS_FLAGS = ACC_PUBLIC | ACC_FINAL | ACC_SUPER | ACC_INTERFACE | ACC_ABSTRACT | ACC_SYNTHETIC | ACC_ANNOTATION | ACC_ENUM
FIELD_FLAGS = ACC_PUBLIC | ACC_PRIVATE | ACC_PROTECTED | ACC_STATIC | ACC_FINAL | ACC_VOLATILE | ACC_TRANSIENT | ACC_SYNTHETIC | ACC_ENUM
METHOD_FLAGS = ACC_PUBLIC | ACC_PRIVATE | ACC_PROTECTED | ACC_STATIC | ACC_FINAL | ACC_SYNCHRONIZED | ACC_BRIDGE | ACC_VARARGS | ACC_NATIVE | ACC_ABSTRACT | ACC_STRICT | ACC_SYNTHETIC
| StarcoderdataPython |
4993645 | import typing
import orm
from orm import Base
def init_tables(database, replica_database=None):
metadata = orm.utils.init_tables(Base, database, replica_database=replica_database)
return metadata
| StarcoderdataPython |
8186965 | """
Toy Indexer
===========
Toy indexing example for testing purposes.
:Authors: <NAME>, <NAME>
"""
from nordlys.core.retrieval.elastic import Elastic
def main():
index_name = "toy_index"
mappings = {
"title": Elastic.analyzed_field(),
"content": Elastic.analyzed_field(),
}
docs = {
1: {"title": "Rap God",
"content": "gonna, gonna, Look, I was gonna go easy on you and not to hurt your feelings"
},
2: {"title": "Lose Yourself",
"content": "Yo, if you could just, for one minute Or one split second in time, forget everything Everything that bothers you, or your problems Everything, and follow me"
},
3: {"title": "Love The Way You Lie",
"content": "Just gonna stand there and watch me burn But that's alright, because I like the way it hurts"
},
4: {"title": "The Monster",
"content": ["gonna gonna I'm friends with the monster", "That's under my bed Get along with the voices inside of my head"]
},
5: {"title": "Beautiful",
"content": "Lately I've been hard to reach I've been too long on my own Everybody has a private world Where they can be alone"
}
}
elastic = Elastic(index_name)
elastic.create_index(mappings, force=True)
elastic.add_docs_bulk(docs)
print("index has been built")
if __name__ == "__main__":
main()
| StarcoderdataPython |
11312437 | <gh_stars>10-100
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''
postprocess
'''
import os
import numpy as np
from src.model_utils.config import config
from eval import calculate_auc
def get_acc():
"""
generate accuraty
"""
res_pred = []
res_true = []
label_list = np.load(config.label_path)
file_num = len(os.listdir(config.post_result_path))
for i in range(file_num):
f_name = "fcn4_bs" + str(config.batch_size) + "_" + str(i) + "_0.bin"
x = np.fromfile(os.path.join(config.post_result_path, f_name), np.float32)
x = x.reshape(config.batch_size, config.num_classes)
res_pred.append(x)
res_true.append(label_list[i])
res_pred = np.concatenate(res_pred, axis=0)
res_true = np.concatenate(res_true, axis=0)
auc = calculate_auc(res_true, res_pred)
print("=" * 10 + "Validation Performance" + "=" * 10)
print("AUC: {:.5f}".format(auc))
if __name__ == "__main__":
get_acc()
| StarcoderdataPython |
11234406 | <gh_stars>1-10
import utils
import hlir
import draw
class Function(draw.NodeContainer):
def __init__(self, header_node, num_params, num_retvals, external):
draw.NodeContainer.__init__(self, header_node)
self.address = self.header_node.address
self.params = []
self.num_params = num_params
self.num_retvals = num_retvals
self.external = external
self.flattened = False
def get_nodes_by_addr(self):
return {node.address: node for node in self.nodes()}
def checksum(self):
c = 0
c += hash(self.header_node)
c += hash(self.address)
c += hash(self.num_params)
c += hash(self.num_retvals)
for p in self.params:
c += hash(p)
for node in self.nodes():
c += hash(node)
for s in node.get_successors():
c += hash(s)
for p in node.get_predecessors():
c += hash(p)
return c
def bbs(self):
return [node for node in self.nodes()
if isinstance(node, hlir.BasicBlock)]
def __str__(self):
out = "Function (addr=0x%x" % self.address
out += ", params: [%s]" % ",".join(str(p) for p in self.params)
out += ", retvals: %d" % self.num_retvals
out += ") {\n"
for node in sorted(self.nodes(), key=lambda n: id(n)):
out += utils.indent(str(node) + "\n")
#out += utils.indent(str(self.header_node))
out += "}\n"
return out
| StarcoderdataPython |
5134144 | <gh_stars>0
if __name__ == '__main__':
x = [5, 7, 8, 1, 2]
y = []
for num in x:
num **= 2
y.append(num)
y.sort(reverse=True)
print(y) | StarcoderdataPython |
11282028 | from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests.utils import jitVsGlow
import unittest
class TestRelu(unittest.TestCase):
def test_relu_basic(self):
"""Basic test of the PyTorch relu Node on Glow."""
def test_f(a):
b = F.relu(a)
return F.relu(b)
x = torch.randn(4)
# make sure we have at least one negative
x[0] = -2.0
jitVsGlow(test_f, x, expected_fused_ops={"aten::relu"})
def test_relu_inplace(self):
"""Test of the PyTorch relu_ Node on Glow."""
def test_f(a):
b = F.relu(a, inplace=True)
return F.relu(b, inplace=True)
x = torch.randn(4)
# make sure we have at least one negative
x[0] = -2.0
jitVsGlow(test_f, x, expected_fused_ops={"aten::relu_"})
| StarcoderdataPython |
11239345 | <filename>samples/iot gateway example.py
# -------------------------------------------------------------------------
# Copyright (c) 2020, PTC Inc. and/or all its affiliates. All rights reserved.
# See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# IoT Gateway Example - Simple example on how to manage a connection and
# exectute various calls for the IoT Gateway components of the Kepware
# configuration API
from kepconfig import connection, error
from kepconfig.connectivity import channel
import kepconfig.iot_gateway as IoT
from kepconfig.iot_gateway import agent, iot_items
import json
# Agent name and Type to be used - constants from kepconfig.iotgateway
# can be used to identify the type of agent
agent_name = 'MQTT Agent 1'
agent_type = IoT.MQTT_CLIENT_AGENT
#Tag Address to add to the IoT agent
iot_item_name = "Channel1.Device1.Tag1"
def HTTPErrorHandler(err):
# Generic Handler for exception errors
if err.__class__ is error.KepHTTPError:
print(err.code)
print(err.msg)
print(err.url)
print(err.hdrs)
print(err.payload)
elif err.__class__ is error.KepURLError:
print(err.url)
print(err.reason)
else:
print('Different Exception Received: {}'.format(err))
# This creates a server reference that is used to target all modifications of
# the Kepware configuration
server = connection.server(host = '127.0.0.1', port = 57412, user = 'Administrator', pw = '')
# Add a Channel using the "Simulator Driver"with device and tags.
# These tags will be added to the IoT Agent.
channel_data = {
"common.ALLTYPES_NAME": "Channel1",
"common.ALLTYPES_DESCRIPTION": "This is the test channel created",
"servermain.MULTIPLE_TYPES_DEVICE_DRIVER": "Simulator",
"devices": [
{
"common.ALLTYPES_NAME": "Device1",
"common.ALLTYPES_DESCRIPTION": "Hello, new description",
"servermain.MULTIPLE_TYPES_DEVICE_DRIVER": "Simulator",
"servermain.DEVICE_MODEL": 0,
"tags": [
{
"common.ALLTYPES_NAME": "Tag1",
"common.ALLTYPES_DESCRIPTION": "Ramping Read/Write tag used to verify client connection",
"servermain.TAG_ADDRESS": "R0001",
"servermain.TAG_DATA_TYPE": 5,
"servermain.TAG_READ_WRITE_ACCESS": 1,
"servermain.TAG_SCAN_RATE_MILLISECONDS": 100,
"servermain.TAG_SCALING_TYPE": 0
},
{
"common.ALLTYPES_NAME": "Tag2",
"common.ALLTYPES_DESCRIPTION": "Constant Read/Write tag used to verify client connection",
"servermain.TAG_ADDRESS": "K0001",
"servermain.TAG_DATA_TYPE": 5,
"servermain.TAG_READ_WRITE_ACCESS": 1,
"servermain.TAG_SCAN_RATE_MILLISECONDS": 100,
"servermain.TAG_SCALING_TYPE": 0
}
]
}
]
}
try:
print("{} - {}".format("Adding Channel, Device and tags", channel.add_channel(server,channel_data)))
except Exception as err:
HTTPErrorHandler(err)
# Add the MQTT Agent with the appropriate parameters
agent_data = {
"common.ALLTYPES_NAME": agent_name,
"iot_gateway.AGENTTYPES_ENABLED": True,
"iot_gateway.MQTT_CLIENT_URL": "tcp://localhost:1883",
"iot_gateway.MQTT_CLIENT_TOPIC": "iotgateway",
"iot_gateway.MQTT_CLIENT_QOS": 1,
"iot_gateway.AGENTTYPES_RATE_MS": 10000,
"iot_gateway.AGENTTYPES_PUBLISH_FORMAT": 0,
"iot_gateway.AGENTTYPES_MAX_EVENTS": 1000,
"iot_gateway.AGENTTYPES_TIMEOUT_S": 5,
"iot_gateway.AGENTTYPES_MESSAGE_FORMAT": 0,
"iot_gateway.MQTT_CLIENT_CLIENT_ID": "",
"iot_gateway.MQTT_CLIENT_USERNAME": "",
"iot_gateway.MQTT_CLIENT_PASSWORD": ""
}
try:
print("{} - {}".format("Add the MQTT Agent", agent.add_iot_agent(server, agent_data, agent_type)))
except Exception as err:
HTTPErrorHandler(err)
# Modify a parperty of the Agent
agent_data = {
}
agent_data['common.ALLTYPES_DESCRIPTION'] = 'This is the test agent created'
try:
print("{} - {}".format("Modify property in the MQTT Agent", agent.modify_iot_agent(server,agent_data, agent_name, agent_type)))
except Exception as err:
HTTPErrorHandler(err)
# Get Agent the properties for the agent that was created. It will return the
# JSON of the properties
try:
print("{} - {}".format("Read properties of the MQTT Agent", agent.get_iot_agent(server, agent_name, agent_type)))
except Exception as err:
HTTPErrorHandler(err)
# Get a list of all MQTT Agents that are configured
try:
print("{} - {}".format("Getting list of MQTT Agents", agent.get_all_iot_agents(server, agent_type)))
except Exception as err:
HTTPErrorHandler(err)
# Add an tag or IoT Item to the MQTT Agent to start publishing
iot_item_data = {
"common.ALLTYPES_NAME": iot_item_name,
"common.ALLTYPES_DESCRIPTION": "",
"iot_gateway.IOT_ITEM_SERVER_TAG": iot_item_name,
"iot_gateway.IOT_ITEM_USE_SCAN_RATE": True,
"iot_gateway.IOT_ITEM_SCAN_RATE_MS": 1000,
"iot_gateway.IOT_ITEM_SEND_EVERY_SCAN": False,
"iot_gateway.IOT_ITEM_DEADBAND_PERCENT": 0,
"iot_gateway.IOT_ITEM_ENABLED": True,
"iot_gateway.IOT_ITEM_DATA_TYPE": 5
}
try:
print("{} - {}".format("Add new tag to the MQTT Agent", iot_items.add_iot_item(server, iot_item_data, agent_name, agent_type)))
except Exception as err:
HTTPErrorHandler(err)
# Modify properties of the tag or IoT Item. If the "common.ALLTYPES_Name" is defined
# the "modify_iot_item" function does not need have the agent name as an input
modify_iot_item = {
"common.ALLTYPES_NAME": iot_item_name,
"iot_gateway.IOT_ITEM_SCAN_RATE_MS": 500
}
try:
print("{} - {}".format("Modify the tag or IoT Item added", iot_items.modify_iot_item(server, modify_iot_item, agent_name, agent_type)))
except Exception as err:
HTTPErrorHandler(err)
# Modify properties of the tag or IoT Item. (Version 2) It is not necessary to pass JSON
# with the "common.ALLTYPES_Name" of the tag to modify. It can be passed as a input
# for the "modify_iot_item" function. "Force" will force the
# update to the Kepware Server, if "FORCE_UPDATE" not provided in the JSON data.
modify_iot_item = {
"iot_gateway.IOT_ITEM_SCAN_RATE_MS": 2000
}
try:
print("{} - {}".format("Modify the tag or IoT Item added again", iot_items.modify_iot_item(server, modify_iot_item, agent_name, agent_type, iot_item_name, force = True)))
except Exception as err:
HTTPErrorHandler(err)
# Read the tag or IoT Item configured in the MQTT Agent
try:
print("{} - {}".format("Read the properties of the IoT Item", iot_items.get_iot_item(server, iot_item_name, agent_name, agent_type)))
except Exception as err:
HTTPErrorHandler(err)
# Get a list of all tags or IoT Items configured in the MQTT Agent
try:
print("{} - {}".format("Get a list of all the IoT Items configured in the MQTT Agent", iot_items.get_all_iot_items(server, agent_name, agent_type)))
except Exception as err:
HTTPErrorHandler(err)
# Delete a tag or IoT Item configured in the MQTT Agent
try:
print("{} - {}".format("Delete the IoT Item", iot_items.del_iot_item(server, iot_item_name, agent_name, agent_type)))
except Exception as err:
HTTPErrorHandler(err)
# Delete the MQTT Agent
try:
print("{} - {}".format("Delete the MQTT Agent", agent.del_iot_agent(server, agent_name, agent_type)))
except Exception as err:
HTTPErrorHandler(err) | StarcoderdataPython |
5057848 | <filename>alf/algorithms/memory.py
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of a simple Memory module described in arXiv:1803.10760."""
import abc
import math
from typing import Callable
import tensorflow as tf
import tensorflow.keras.layers as layers
import tensorflow.keras.activations as activations
from tf_agents.networks import network
from alf.utils.common import expand_dims_as, concat_shape
class Memory(object):
"""Abstract base class for Memory."""
def __init__(self, dim, size, state_spec, name="Memory"):
"""Create an instance of `Memory`.
Args:
dim (int): dimension of memory content
size (int): number of memory slots
state_spec (nested TensorSpec): the spec for the states
name (str): name of this memory
"""
self._dim = dim
self._size = size
self._state_spec = state_spec
self._name = name
@property
def dim(self):
"""Get the dimension of each content vector."""
return self._dim
@property
def size(self):
"""Get the size of the memory (i.e. the number of memory slots)."""
return self._size
@property
def state_spec(self):
"""Get the state tensor specs."""
return self._state_spec
@abc.abstractmethod
def read(self, keys):
"""Read out memory vectors for the given keys.
Args:
keys (Tensor): shape is (b, dim) or (b, k, dim) where b is batch
size, k is the number of read keys, and dim is memory content
dimension
Returns:
resutl (Tensor): shape is same as keys. result[..., i] is the read
result for the corresponding key.
"""
pass
@abc.abstractmethod
def write(self, content):
"""Write content to memory.
The way how it is written to the memory buffer is decided by the
subclass.
Args:
content (Tensor): shape should be (b, dim)
"""
pass
class MemoryWithUsage(Memory):
"""Memory with usage indicator.
MemoryWithUsage stores memory in a matrix. During memory `write`, the memory
slot with the smallest usage is replaced by the new memory content. The
memory content can be retrived thrugh attention mechanism using `read`.
"""
def __init__(self,
dim,
size,
snapshot_only=False,
normalize=True,
scale=None,
usage_decay=None,
name='MemoryWithUsage'):
"""Create an instance of `MemoryWithUsage`.
See Methods 2.3 of "Unsupervised Predictive Memory in a Goal-Directed
Agent"
Args:
dim (int): dimension of memory content
size (int): number of memory slots
snapshot_only (bool): If True, only keeps the last snapshot of the
memory instead of keeping all the memory snapshot at every steps.
If True, gradient cannot be propagated to the writer.
normalize (bool): If True, use cosine similarity, otherwise use dot
product.
scale (None|float): Scale the similarity by this. If scale is None,
a default value is used based `normalize`. If `normalize` is True,
`scale` is default to 5.0. If `normalize` is False, `scale` is
default to `1/sqrt(dim)`.
usage_decay (None|float): The usage will be scaled by this factor
at every `write` call. If None, it is default to `1 - 1 / size`
"""
self._normalize = normalize
if scale is None:
if normalize:
scale = 5.0
else:
scale = 1. / math.sqrt(dim)
self._scale = scale
self._built = False
self._snapshot_only = snapshot_only
if usage_decay is None:
usage_decay = 1. - 1. / size
self._usage_decay = usage_decay
state_spec = (tf.TensorSpec([size, dim], dtype=tf.float32),
tf.TensorSpec([size], dtype=tf.float32))
super(MemoryWithUsage, self).__init__(
dim, size, state_spec=state_spec, name=name)
def build(self, batch_size):
"""Build the memory for batch_size.
User does not need to call this explictly. `read` and `write` will
automatically call this if the memory has not been built yet.
Note: Subsequent `write` and `read` must match this `batch_size`
Args:
batch_size (int): batch size of the model.
"""
self._batch_size = batch_size
self._initial_memory = tf.zeros((batch_size, self.size, self.dim))
self._initial_usage = tf.zeros((batch_size, self.size))
if self._snapshot_only:
self._memory = tf.Variable(self._initial_memory, trainable=False)
self._usage = tf.Variable(self._initial_usage, trainable=False)
else:
self._memory = self._initial_memory
self._usage = self._initial_usage
self._built = True
def genkey_and_read(self, keynet: Callable, query, flatten_result=True):
"""Generate key and read.
Args:
keynet (Callable): keynet(query) is a tensor of shape
(batch_size, num_keys * (dim + 1))
query (Tensor): the query from which the keys are generated
flatten_result (bool): If True, the result shape will be
(batch_size, num_keys * dim), otherwise it is
(batch_size, num_keys, dim)
Returns:
resutl Tensor: If flatten_result is True,
its shape is (batch_size, num_keys * dim), otherwise it is
(batch_size, num_keys, dim)
"""
batch_size = tf.shape(query)[0]
keys_and_scales = keynet(query)
num_keys = keys_and_scales.shape[-1] // (self.dim + 1)
assert num_keys * (self.dim + 1) == keys_and_scales.shape[-1]
keys, scales = tf.split(
keys_and_scales,
num_or_size_splits=[num_keys * self.dim, num_keys],
axis=-1)
keys = tf.reshape(
keys, concat_shape(tf.shape(keys)[:-1], [num_keys, self.dim]))
scales = tf.math.softplus(tf.reshape(scales, tf.shape(keys)[:-1]))
r = self.read(keys, scales)
if flatten_result:
r = tf.reshape(r, (batch_size, num_keys * self.dim))
return r
def read(self, keys, scale=None):
"""Read from memory.
Read the memory for given the keys. For each key in keys we will get one
result as `r = sum_i M[i] a[i]` where `M[i]` is the memory content
at location i and `a[i]` is the attention weight for key at location i.
`a` is calculated as softmax of a scaled similarity between key and
each memory content: `a[i] = exp(scale*sim[i])/(sum_i scale*sim[i])`
Args:
keys (Tensor): shape[-1] is dim.
For single key read, the shape is (batch_size, dim).
For multiple key read, the shape is (batch_szie, k, dim), where
k is the number of keys.
scale (None|float|Tensor): shape is () or keys.shape[:-1]. The
cosine similarities are multiplied with `scale` before softmax
is applied. If None, use the scale provided at constructor.
Returns:
resutl Tensor: shape is same as keys. result[..., i] is the read
result for the corresponding key.
"""
if not self._built:
self.build(keys.shape[0])
assert 2 <= len(keys.shape) <= 3
assert keys.shape[0] == self._batch_size
assert keys.shape[-1] == self.dim
if scale is None:
scale = self._scale
else:
if isinstance(scale, (int, float)):
pass
else: # assuming it's Tensor
scale = expand_dims_as(scale, keys)
sim = layers.dot([keys, self._memory],
axes=-1,
normalize=self._normalize)
sim = sim * scale
attention = activations.softmax(sim)
result = layers.dot([attention, self._memory], axes=(-1, 1))
if len(sim.shape) > 2: # multiple read keys
usage = tf.reduce_sum(
attention, axis=tf.range(1,
len(sim.shape) - 1))
else:
usage = attention
if self._snapshot_only:
self._usage.assign_add(usage)
else:
self._usage = self._usage + usage
return result
def write(self, content):
"""Write content to memory.
Append the content to memory. If the memory is full, the slot with the
smallest usage will be overriden. The usage is calculated during read as
the sum of past attentions.
Args:
content (Tensor): shape should be (b, dim)
"""
if not self._built:
self.build(content.shape[0])
assert len(content.shape) == 2
assert content.shape[0] == self._batch_size
assert content.shape[1] == self.dim
location = tf.argmin(self._usage, -1)
loc_weight = tf.one_hot(location, depth=self._size)
# reset usage for at the new location
usage = self._usage * (1 - loc_weight) + loc_weight
# update content at the new location
loc_weight = tf.expand_dims(loc_weight, 2)
memory = self._usage_decay * (1 - loc_weight) * self._memory \
+ loc_weight * tf.expand_dims(content, 1)
if self._snapshot_only:
self._usage.assign(usage)
self._memory.assign(memory)
else:
self._usage = usage
self._memory = memory
def reset(self):
"""Reset the the memory to the initial state.
Both memory and uage are set to zeros.
"""
if self._snapshot_only:
self._usage.assign(self._initial_usage)
self._memory.assign(self._initial_memory)
else:
self._usage = self._initial_usage
self._memory = self._initial_memory
@property
def usage(self):
"""Get the usage for each memory slots.
Returns:
usage (Tensor) of shape (batch_size, size)
"""
return self._usage
def __str__(self):
s = "MemoryWithUsage: size=%s dim=%s" % (self.size, self.dim) + "\n" \
+ " memory: " + str(self._memory) + "\n" \
+ " usage: " + str(self._usage)
return s
@property
def states(self):
"""Get the states of the memory.
Returns:
memory states: tuple of memory content and usage tensor.
"""
assert not self._snapshot_only, (
"states() is not supported for snapshot_only memory")
return (self._memory, self._usage)
def from_states(self, states):
"""Restore the memory from states.
Args:
states (tuple of Tensor): It is should be obtained from states().
"""
assert not self._snapshot_only, (
"from_states() is not supported for snapshot_only memory")
if states is None:
self._memory = None
self._usage = None
self._built = False
else:
tf.nest.assert_same_structure(states, self.state_spec)
self._memory, self._usage = states
self._batch_size = self._memory.shape[0]
self._built = True
| StarcoderdataPython |
1855010 | def isNum(n):#异常连续捕获分阶段判断
try:
(complex)(n)
return True
except (TypeError,ValueError):
try:
(float)(n)
return True
except (TypeError,ValueError):
try:
(int)(n)
return True
except (TypeError,ValueError):
return False
if __name__ == '__main__':
print(isNum("asd"))
print(isNum(1))
print(isNum("1.2"))
print(isNum("1.124+91j"))
print(isNum("!@#"))
| StarcoderdataPython |
12811911 | <gh_stars>0
def count_letters(text):
result={}
for letter in text:
if letter not in result:
result[letter] = 0
result[letter] += 1
return result
# The key is a letter and the value is how many times that letter is present.
count_letters("aaaaa")
{'a': 5}
count_letters("tenant")
{'t': 2, 'e':1, 'n':2, 'a':1}
count_letters("a long string with a lot of letters")
{'a':2, ' ':7, 'l':3, 'o':3, 'n':2, 'g':2, 's':2, 't':5, 'r':2, 'i':2, 'w':2, 'h':1, 'f':1, 'e':2}
| StarcoderdataPython |
11216302 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 17:56:31 2015
@author: Paco
"""
import numpy as np
class Metrics(object):
_batch_size = 10000
def __init__(self): pass
def braycurtis_pond_dist(self,X,pairs,M):
n_pairs = pairs.shape[0]
dist = np.ones((n_pairs,), dtype=np.dtype("float32"))
#L = np.linalg.cholesky(M)
for a in range(0, n_pairs, self._batch_size):
b = min(a + self._batch_size, n_pairs)
upp = np.abs(X[pairs[a:b, 0], :] - X[pairs[a:b, 1], :])
#upp = np.abs((np.dot(L,X[pairs[a:b, 0], :].T) - np.dot(L,X[pairs[a:b, 1], :].T)))
up = np.dot(upp,(M))
#up = np.dot(upp.T,upp)
downn = np.abs(X[pairs[a:b, 0], :] + X[pairs[a:b, 1], :])
#downn = np.abs((np.dot(L,X[pairs[a:b, 0], :].T) + np.dot(L,X[pairs[a:b, 1], :].T)))
down = np.dot(downn,(M))
#down = np.dot(downn.T,downn)
#dist[a:b] = np.sum(upp.T,axis=1) / np.sum(downn.T,axis=1)
dist[a:b] = (np.sum(np.dot(up,upp.T),axis=1) / np.sum(np.dot(down,downn.T),axis=1))
return dist
'''
'''
def mahalanobis_dist(self,X,pairs,M):
n_pairs = pairs.shape[0]
dist = np.ones((n_pairs,), dtype=np.dtype("float32"))
for a in range(0, n_pairs, self._batch_size):
b = min(a + self._batch_size, n_pairs)
diff = X[pairs[a:b, 0], :] - X[pairs[a:b, 1], :]
tmp = np.dot(np.dot(diff,M),diff.T)
dist[a:b] = np.sqrt(np.sum(tmp,axis=1))
return dist
'''
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.canberra.html#scipy.spatial.distance.canberra
Score = 0.32...
'''
def canberra_dist(self,X,pairs):
n_pairs = pairs.shape[0]
dist = np.ones((n_pairs,), dtype=np.dtype("float32"))
for a in range(0, n_pairs, self._batch_size):
b = min(a + self._batch_size, n_pairs)
up = np.abs(X[pairs[a:b, 0], :] - X[pairs[a:b, 1], :])
down = np.abs(X[pairs[a:b, 0], :]) + np.abs(X[pairs[a:b, 1], :])
dist[a:b] = np.sum((up / down),axis=1)
return dist
'''
http://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.spatial.distance.braycurtis.html#scipy.spatial.distance.braycurtis
Score = 0.214275245493
'''
def braycurtis_dist(self,X,pairs):
n_pairs = pairs.shape[0]
dist = np.ones((n_pairs,), dtype=np.dtype("float32"))
for a in range(0, n_pairs, self._batch_size):
b = min(a + self._batch_size, n_pairs)
up = np.sum(np.abs(X[pairs[a:b, 0], :] - X[pairs[a:b, 1], :]),axis=1)
down = np.sum(np.abs(X[pairs[a:b, 0], :] + X[pairs[a:b, 1], :]),axis=1)
dist[a:b] = (up / down)
return dist
'''
http://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.spatial.distance.cosine.html
Score = 0.357907078998
'''
def cosin_dist(self,X, pairs):
n_pairs = pairs.shape[0]
dist = np.ones((n_pairs,), dtype=np.dtype("float32"))
for a in range(0, n_pairs, self._batch_size):
b = min(a + self._batch_size, n_pairs)
up = np.sum(X[pairs[a:b, 0]]*X[pairs[a:b, 1]],axis=1)
d1 = np.linalg.norm(X[pairs[a:b, 0]],axis=1)
d2 = np.linalg.norm(X[pairs[a:b, 1]],axis=1)
dist[a:b] = np.array(1.0 - (up / (d1*d2)))
return dist
def euc_dist(self,X, pairs):
"""Compute an array of Euclidean distances between points indexed by pairs
To make it memory-efficient, we compute the array in several batches.
Parameters
----------
X : array, shape (n_samples, n_features)
Data matrix
pairs : array, shape (n_pairs, 2)
Pair indices
batch_size : int
Batch size (the smaller, the slower but less memory intensive)
Output
------
dist : array, shape (n_pairs,)
The array of distances
"""
n_pairs = pairs.shape[0]
dist = np.ones((n_pairs,), dtype=np.dtype("float32"))
for a in range(0, n_pairs, self._batch_size):
b = min(a + self._batch_size, n_pairs)
dist[a:b] = np.sqrt(np.sum((X[pairs[a:b, 0], :] - X[pairs[a:b, 1], :]) ** 2, axis=1))
return dist
| StarcoderdataPython |
1799805 | <filename>appengine/monorail/api/api_proto/features_prpc_pb2.py
# Generated by the pRPC protocol buffer compiler plugin. DO NOT EDIT!
# source: api/api_proto/features.proto
import base64
import zlib
from google.protobuf import descriptor_pb2
# Includes description of the api/api_proto/features.proto and all of its transitive
# dependencies. Includes source code info.
FILE_DESCRIPTOR_SET = descriptor_pb2.FileDescriptorSet()
FILE_DESCRIPTOR_SET.ParseFromString(zlib.decompress(base64.b64decode(
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>aQ6qvKIVnTWnVqr+vhOpVlpOWUBZ+nyy+rIU3P99XiPaosstUqN'
'xpM8Wc8nUv96vJZU7qYjRkPMqXq71hLrX1EJzrnacDa4DtJP4kEtqC33d/66GtkTlEU8oFQTPq'
'6u41cQ0jjZW4w3JVveURaWcbE/jihWVkWb2lLZIBSJFYXMX1apDjbPJtzfNVTW18yojTU/poyX'
'ldopbVZqpValXiMxO0otumlFX778bEdHFkFYizPQbVrOtjSBwSdkKEBqUefJn1YD05XmNnSjKa'
'/jDarIettZrZRJjXgxDFShnE8ry59Xs8v/R0OlpxrgcJxdtWapUK207TAG/aYqaW9vlxqPSdd4'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'skrtMWOwuhb3T0xyx/bJ5XJ5T4/XXeYI1lgnztMnHLmzz5h7t467Zgu7ddx7qrFbxy5TDs1ljw'
'Hfz6X7JMPP5SmzBuAypxK+Edsa9co9OV/IHeiS6qK9qQaeGD+t/J612jHK5/Zug50DMOAXlPJG'
'SmvEK/TEWJsb3TvRhSqq3o7xxjro6097DLu5Q13TXcxPqeTuscc67Cu29zCWyz8tiwveVNluw4'
'<KEY>nX6WrP5GuIcj9zfC<KEY>bTQIWNsqs7fHtU74piF<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'wXj0CP00rKCKnlVKmZGAFeoJjBjwOxgJwPceyJ1QoUjAhPy95uuqR4WRgKTeSFYogO3NXRIqCN'
'SLv8DFIGOfucpJBlKRYaGgWN/IFaGgWN/Ln+JikNRvPuIkBOmPHBYK047MCAXF+hebXAyIpMsN'
'9Uq63IJQLOlyC2JOl1vICg64xUJQbMAtFoJiA26xEBQbcIuFraBl3uSkMBSzIgNCQTErdUwoKG'
'ZdmOJiESuYMu9xUgSKpSJDQkGx1PBFoaBY6voKF4tawbQ5zUlRKJaOJIWCYmnriFBQLH3uZS4W'
's4KD5m1OikGxwUhKKCg2OHhKKCg2eLnAxeJWMGPe4aQ4FMtEMkJBsUz2vFBQLHO1yMWUFRwya5'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'xzNZHrmDlIypqk7Jh55ihBm8R2jIUwie1YIikUsB1LpYlt0ApdDFzaR1ns1hdjR4htENmOs7JB'
'Unac+QSJ6zgrGySu46xskLiOs7Ig6ycDz+3DFb3CJ2OnqUDYCl0PvLhPAfQH12NnSMwwivkCix'
'kmMV9gMcMk5gssZpjEfIHFDJOYL7CYESs0GZjahyu6k0moEywQtUI3A4V9CqAjucnWjKKYsyxm'
'lMScZTGjJOYsixklMWdZzCiJOctixqzQ7cDCPk0H/dBtbjox5DpvHqCmE6OmM2/e1k0nRmznzR'
'6hoNx8b1YoYDs/Mkps41ZoKbCyj7Lox5ZY2TiyXTazxDZObJfNJfLYSIYxMSYUlFuOp4QCtsuZ'
'IWKrrNC9wOvd2F7WbNEP3ovliK1Ctq+xtorYvmbeI6+FZAQTe4SCcq+xtorYvgbaahRI+hT7Fi'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'BE2SoB+Y79OcAckwJsaEgnIfxHuFAkE/SOKEC1FgnPxFA8afPkqDyg0D+YHAQo/A5JiQlDueFD'
'KIJIxBGgoGv88a5rDqp0SYHEaA/EVjkHPDtA/TBQs5f9aIp4UMIjmUZSyg/pZhHuHEYIhIJWQE'
'yYQICQMbkKmDQlLZw3lGgqKf85BCmhQkHBw+ZyRSQhpIpgUJx9XPeUgg/982zAzrFwb9gPycId'
'BhnR4R0kAyOiBkEMn0oDoG1T1gRX7JCHyha31DC0lAfQ8AxC8ZsQNUxrIiXzICX+5a5pIuY0GZ'
'LxmxQySzBY0k9KuwXiCZLWwlESC/ZBwmqSxsJ5geE9JAEloKk0Ekoakg/5QV+ZoR+MZ+MqcA4m'
'<KEY>'
'<KEY>'
'<KEY>
'FN0zpPsPReIM6f5D0T1Duv9QdM+Q7j8U3TOo+4eie4Z0/9BDQt0/FN0zpPuHonuGdP9Q+lAGfc'
'SPpD1myEcA+aEh0NjHfiTtIUM+4kfSHjPkI34k7XHIivyeEfiH+9lxCCB+z4C5C5bJWpE/NAL/'
'aD87ZqHMHxqxYyRzFu34RwaMoihzltowkH9oaMNmyZLwoUdIA8nerJBBJEdGGQsSPxIfmUVLRo'
'D8I+MA50ZbfiSmzZItPzISaSGDSLKPzKItfyK2zJItgfzIGObcaMufiC2zZMufiC2zZMufiC2H'
'rchPjcA/3s+WwwDxUyN2nMrkrMifGoE/368/5qDMn4o/yqEtfyYy58iWQP4p+6Mc+YOficw5su'
'XPROYc2fJnBg9dOVTozwwzx1hoSyB/ZlicG8euP/OwDMoeHxQyiGR2mHQZsSI/N3BXq/v0BHUZ'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'oO2Jky/jiZMvNG21vg5K86l4bzFKNJj2mOqDhgYJUIsVMGmDT8564WvB/ZgvqygfUVtDKorBNF'
'<KEY>'
'StwpVdvORK18d+sx1vNDJKWeibCSKvho6zEzwJ/QNpQX1WCNqPh26Z1VCVhAu8bgA4YoNBESoy'
'9abHBN5B8qtVQCVV9tO43HWC9v4w9fkye6i3YASclcRZrARu2v/yY06iA2al8DaJ7+iqHibm+w'
'Eio6v7C6fG9xJhmwelV8Zn7ltiYNqwea1vyypkyklpaLmgpi1pWlGSZDSE5PLM9oMozk5MLCnC'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'gfBhEH44cKSb8M+T8GEUYjjch2zDJHzOpA2esJYvpzeWw/poJtfTyxkhacRMcpJBVEIoyDjS18'
'8ZgRg1+zkJi43qLYuwPn4Z7RXWIPgBNyPuzB5wM+KBywE3Y8gKHnRZ42bqQZc1HrEcdFmDmQ65'
'GXET9ZCbEQ9VDrkZaQNdMuK+p+1mxGMU280YtYKHXRlxv/OwKyMenBx2ZYxZwbyZ5iTcosy7xf'
'CoJM+7IjABOtF9c/yKtyV/AtrjimzJnzIzuVl7eWF64eR6Y629Sf1chpfzly98YvzUVXu6XjvR'
'wk5i0+zELkw3sedIX9FfsV9iE6PN/dAp84S7nx9GNjHf7v6p+IBvd/8UNDF9RABJp3kbz6CGet'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>sTdfc1GQ4TUXBSv+GpsASr0cmN7HBOg1Xub6C6EJ'
'<KEY>ZIIJ82VdfyEywQSbIEQmmIgPCgVsJ7LDjILHJSx8iEwwaU7kOCeaYNJFQRNMcncKkQ'
'kmQfjrhAImmDJH8+ftGzAINpwNp+HU1nFgA2VgzlCq2hTM1hyznXOb5+y18xfHL13mXhwim02Z'
'kxmGRptNuWxRwqn4kFDAdop3omFZOBuY28dm6EBnY/3eKVSBOyydQoUK5uyAHDWFMTHmO4YqxJ'
'O+Y6gCdNgXCQWSbpm5/EXb2QaWYzh5qK8119sNmGdUK285dh5H+dq5c+dedt4pbe/oOU2e9Q2T'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'n29umZtaeH2+udVxvrnFvk2fb26xe4+j8BVufHESvmJuZTgnCl9xUVD4Cje+OAlf4cYXx8b3gE'
'0Qp8b3wKxYnBO77gMXBRk+YBPEqfE9GBCNgHjLPMpJMFUASgkFIG8lRAUcLd+yDgmF5fJH3C2q'
'DxPqme4MPeP9o8OdaTq4ufPy0S8ZKioxjOdUnPYQfGHCe+wzxCiPhD//H7xKkP/IUAnfZQjrmN'
'w00rL0747F1qkoRqNUe4u3I+g3qlIql1mVrlsmMcqDqpxQ/fi7vNqqbMOSDsYGErC32Eefl+Wr'
'<KEY>'
'<KEY>'
'<KEY>'
'IuyybQy7GsOSSLFzRk1oz6lmPZWK9vOZZlE9BybNhFQUMOm1lZ1KEhhzuWY8NxQUFDDrsouNR3'
'F3VoyJw5LCg4wPOuAC/HckoWdWi7nLuoC+KuwAHfcmzEzMmiDg050rEcG4m7aViQQy0MXO6Puh'
'<KEY>mComC<KEY>DLAcTQ64rvq/1tT+7tXnpw/qXnVe9sLOP2qUdqi7aV/9FD+e'
'/7apYhO8k4+3VPSRgeelrV0HA+TbNuT44RNyNuA0/Pd59vCHPZKPruZccDf49fFD1heQzcLwaY'
'Bs/Q9CCae1yt49WgwDtVADIAU/WuyNw928cVxn4ps4O1ulpr6JE92t4yImkY47/CvfUvGJbadW'
'3sZo7M5DFWP3ocoZZdWcR6v1xmrZqbZKq3p/XA9i/ZCy0JjG77SLjjvhdUDSefSIFoMPlJj/Oz'
'DIT7RapfUt4ouHMC7l7Xz3eB8LZSuHh0FVxzdwujQOLs3Ku5pPqEi/8ewB9zYRkY6CeNzkb7Tp'
'<KEY>'
<KEY>'
'<KEY>/'
'nO8tyGW/Rlg16b8NpNM6t238Hz2l3Rn9G6otxDP+o9ia4eIiH5eALkm4qR6Xv0BMj3Ga0/pKJg'
'/eZOaTvbS6aPVJpLQGG1wJyD6yXbp6sFvuh6wTrH5I1qaTPbr+/9An0DyPz3DKVIKt3l/rcdnH'
'vGZfrPuJ5+ItfpYkLP4GL+rVJhfR3t47Xw7jPkcbroDI7UJ5OvnbhHr3T7mU9hO+bsXVu7N2c/'
'Dc153X/vdo/ckfV1uc5Jx8I6e3T3dU45Sy7Gq/yrab2g+jpuGzahSwSfct2w13/dsGnNqME1sN'
'NbMBGHRui7SxrvepfU4gILtYJ7qXRSpehrpbbpB1FdQQYku4fxisqWS7XNKmL4ZCKgoa5Ag1Jm'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'woqZf3GLOa0i5EOb2czuMuRlp9Hj6Bz5r8SUojZKMzBoTf4Qk8T4qCxNZRINPrEBPUO3U5mFdr'
'jEru+meC7xokqwS1yFxT5fW99zykBucaJchu7TJ0UadA2SQhP2nlPrUvq2JDQR8nget/BT3WMC'
'MwvTl6Hpu2WZbeSpxfukOHN/TvV5Dp3Yd3fqPa5TR94vqgFfSWYe61q43y3s6t3n+hvNOf4Uj9'
'MjHof1HvCVZd5PzId8xfvd4sz9Cnu75up61Sk1wFcGu0wqtK9rTmE2a4KHEc/zk+Q9XV12cs3v'
'9VH2myqzG4IV6O2KkupAYRWgAjoGDpKkrytG/5pv0EBBplW6szyL0b/PwMoQbjX2+0cf7GDJru'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'A3DDjTx+oUT9ZwqqUWStxuYsamkqA1feg+xhE1emI1ZvPGRFMHo/mW9m40mfI23XuiJ2TTvTd6'
'RDbdBwLZbnvFz3mb7gMcsUCb7t4WdUBvUVu+TXeLD7P0pruVSPs23XGL2t10T5nHZCc9hJQUwz'
'tsqYTl23NPpWzfnnvqyFFvzz3NN+H0nnvaTAkmbhWnzaRvzz09MOTbc0/nRrw990F35x63igfN'
'9Khvz33QPUVAyw2qpG/PfdDduQ/hTXMxC24VZ8xB2bkPRTBRUHCrOKPELLhVnHHNEsaL54ISBp'
'QhMzPMOcMRTBQrYUjEkGtcvJo/pPf/MfRuNHDs6XVKoXej4ZQXenegI/TuQEfo3QF/6N1BM8VJ'
'<KEY>'
<KEY>'
's4FPGLn/aeguzzt96Irqtepju9mutKgmsNNzdClFlYLHcXfzMPoCI0+gC9t3wWNQhMZ6u9Ggp4'
'LA1W04DbvZarTXWxSr4m0DsjvjoFP0gRx5WmrWa3Zprd5uif+gEGnxfKXttcpmu95mL/JImG6V'
'HoKvdHe0SertehOwt0q1TQcFfEr0CbaOs7EB9UBOmC6Y2dwbbJjSY2SMeqMk6L9L4PIq1dZZcM'
'DAZr3dbNW3tbAUmkN+sfIQytQVSGnLutGnT0c84QXzrBwMYZDAhY54wgtuJCA6pQuZIfXbhpxg'
'XTLt3FeNDjFLds15pF2uNjEOK4/Ag2sN6uKPxUXnJ5rNyiaMO/kxhbJXWh4SrK3XnbNNZ6fUID'
'9PMTegPpvUhViqvOucnbPP0r9LeVc3HZx3QY6rdHCe/1jtUlxeQqDgvIOH1Kwcq10xh3LXfPUp'
'<KEY>'
'<KEY>
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'mjcl1hPb3GxHTOos89UxqbNcHxSTWuiISS2YsznOafhCFHVMaiHuj0kt+GNSb7mymDre0B+Tes'
'tFMX3xhjom9ZYrS9CLNwxSC3PjDYPUwl5xI1uDvnjDILWwV1xZoIXN8bF8kFrYnPmKyILzmzme'
'3wSphc0piePFFjbHx/JBFPq2i4It7LY5l+WcYUoUFGxht10UbGG3XRR6TGOQUSL6pQ1BwRY279'
'<KEY>T<KEY>sAUXBVvYQlw4YAtb4BCBIEbbL3IwFxAhpOTl'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'mCgo+FbF3aiEpfcByl0rLaHgrwdK+0QzYU95nUc7CgV/g+f+OhT8DfN1LXyIFlZvcP3pUPA3Em'
'<KEY>'
'gR5eV6MSUI7OZpV9RgiJ+65G6Hnum6sSHx6MYKJohJ7nvqsRep77oNExig8PO4HPGt2c+PhzXo'
'S4E+v1IsQ32HHqCPEN09G1pCPENzoixDfcQG005YYbqI1hqG6gtqGfa5FAbTTlZke492ZcArXR'
'lJtsBAr33nKj1U0dv5rhnKYvflWHe2+50epovS12VmEkKjw0h8mUFXNLpEZTVviSSphMWekRDm'
'<KEY>LAvfjVMa9S33Feg'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'eZKTwB0DJcV6AaSZkBaM3riZyQsFIM1jJxikDx/5GWJR+vQLQILZRy8AyXUJ9MYt97oEeuMW+/'
'Sw2W8F27xVBASgtM2WNI7+CCZKy8eHgNpK+k8/oLSHRxglaQUfmjajJAHlodke5ZzJMCYKCj4O'
'9FBJ008CysMDhxhlwAo+clEGAOWR+VAMOBDGREEZAJRHLsoAoDxyUSwr+I6ZZxQLUN4xHwmKFc'
'ZEQbEA5R0lclqA8s6hw4ySsoKPzTFGSQHKY/MdqYdUGBMFJQUoj5V0oRSgPD4l78KlreC75ilG'
'SQPKu+ZjaZ3pMCYKShpQ3lXS79OA8u7xk4wyaAU/Dc1FowwCyqfNd6W3DYYxUVAGAeXTSuQcBJ'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'2nVMb9kCZ9dm9TP+K9rHV8tNiubNgUjXluttSko6iTJ/TZ3YlT52x7kWJu9dZGqVqtP9IbY4r2'
'r2pOE7ddeGdO77njTlIFZrN2fq3+jlPO8+465afZ7067sVNvOueUXajZt5YW5sfsUqfgeCKxg4'
'cStZa+pl6ym5VtupxO2WCCjO+S0O2pCNjh84ZcYcJ3Sb5g8LydLlABefCokEEkT+hKxCtUoV82'
'YADXWPjGCpBfME7JpaoIpceFpOwqJWQQycwQY4HP+BV5RUjfpPoVaRB0kwrIhNzWQsa/Im960V'
'0qIPkVoQhSXzR4tIjQ5OSLHhK+DvZFDwlfB/uikTosJJU9eoyRQvhalnmaE0OaFCS89PYlIzEk'
'JD2tlT0mZBDJk6cYSb+kdZYTcaX5qx4SzFKAdJHC9M5W9qSQ9M7WmTFGgrxflnebIrTa/LKHFK'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'Bu3cF0BsiTpxmpzwr9hgHzTZ2IE5rf8JD6Iki6SDCjATItLROmNECeOctI/VboN9Gla28Ak5oI'
'kL9hCDRMazBdoGFeA2QiJ2QQyQOHGCuJ77SBfhoLpjYRIH/TkBpKRihdvFSSnnU7KBrC9AZI0P'
'AYXa6MfNsI/M7TXk9M6OuVoW8bsUHij/crQ79twAQN+dMFywiQ3zZ0F6VnWzE9KaSB5ICbGkQy'
'N8JY9FYb9w59zfI7YoYo+cjviD+ie5ZApo4KGUSSe0cUfeR3DZ4eRclHftdDQs/wXQ8J2X7XSJ'
'0QMojk6TOMBNT3PJnQR37PQ0If+T3pZ1Hykd+THhslH/k9TyYo+n1PppAmBQl95Pc9JPSR35ce'
'GyUf+X1PJvCRPzDMY5yIPvIHHhL6yB9IS4ySj/yB9Ngo+cgfGEeOUo3HrMiPjcA/2O8tRHREPz'
'<KEY>
'<KEY>'
'<KEY>'
'wB95WCa9lcfvidKlVSBTabKLsiJ//LQ36biO0bH/sRFLEn98JDj0J/K+Hr0SHAHyj/lNOHonGN'
'OVkAaS/L4evRQMJL+vh08Fh34qb9XRW8ERIP+E39ej14IxPSYkZee36ui9YCD5fb0EvZX3Z/u9'
'L5fQb+VpXRL8Vp6eeyW8t/K0LonOt/IS8lZeSkh6Kw/mXsAfulLkLwwMgnmqhwyjCn9hhIl/GE'
'9QQz9nW4XpGBPJsJAmvYUX57z0uJ2b19Ck5AUjAunmBeovDTPBiVj0L/nZVCQpNa44L2jxT/lt'
'xTDdnEMyKqSJpEpwXnAnf2WY/ZyInuivPJHwNPCvjN4+zgu2+2f8Nh9SBpFxIU0ke3o5L7SZf8'
'4jAVIGkSJ+xESyr9+9efeTvNrnMp3VvyuyEHflp+ttqA59xaTjVTSDb4zk80rdqNZLrT3ymL48'
'<KEY>'
'PDeSbfU6n1+vbuuM3J3rtsflqnLRqvnd6stLbaaxRFtVmvlmqbXlXt4DW2pltj/80wvmsGby5O'
'/tg8eFPjLko86F2nWn2lVn9Uw2tuzVsfHcQ555FA21B/3kehZkcC1vjPevTycL1etSfbGErRtM'
'/aGupE0y6XWiVYhbachj4At/XDYKojPu3Cc1wAVoPrsN7cOyzt6dFiOyzE2TUtxHmFf4mtXMHV'
'<KEY>'
'PL3vtkG3VcHuM6e71eK1ewUBMLAY7Tugoi4X+ndwnWpOW5L9Buu91sgeb0ohpFsqzVH2ISW0zZ'
'tXqrsu7wSZIb8ODjWCvvEgf4rVdLlW18Fa2LEHgC79lChAAdy+11x5NDeYJ8LDmURAaW6+ttPA'
'orSSWdx4djIKVhQ0txGpVStemZmioIEpXtl95Vat6pUEl/IIy/bdXqXhrZvdJqKnq+jqDqDTdG'
'hWI4WnXbqZXhK8UsghDbGKOibQKtk6+30cNzSgImN1qPsJlwC7KbO846tiAoVcGG1cC2U9OtqN'
'<KEY>'
'<KEY>'
'<KEY>'
'y4WplbmJor24UlxcWJqxUbPpwtLU3ETh9sz0OeAPPO2ZOzPzy/bS7MTcXKeiysZw2CJK71fTnp'
'wBKScm52aQFek5XSjOTC2jQt6vKTAeCDg3puylxZmpAvwCe8yAOhPFe2MMuoTBvqDVxJw9PXF7'
'<KEY>pZXJ<KEY>CQ6Y'
'nlCWINGGAuSIffkytLBTJcYX55plhcWVwuLMyfglq+C5YBKSeg7DRZeGEetcW2MrNQvIewaAeq'
'gTH77uwMfC+iUclaE2iGJbDa1LI/GzAEI4JKnp72/MzNucLNmfmpGUxeQJi7haWZU1BhhSXMUC'
'DG0AaA6QppjRUFcin929d0x6g+7cINe2L6TgEl59zQApYK3FzIbFOzbPNzavw/G7YMYbRFCD0F'
'PCzGpp2s1WtneWvxlE1D1zmMZcZIYyTIIUNP3WhX9W6ks73mlMvoaVyQpjia+<KEY>nHH'
'<KEY>'
'<KEY>'
'<KEY>'
'vm/qdx83diljrQ01hU90Sjz0eCylbImHvmymcilC1Uxcm+HkXwdJXzbHJRoYD6AvdzzEeTnu/5'
'tJlwcs5eho0quBF4zcvb312cDZ5/7qeJPULtoYHHJ3SOI0r5tWziJQYtGhjA6uvC5heDq48nrH'
'6yDXO14HuQ5rLkdH/00GZroqU8EZ8P7KeBNlTxl3g13iBydZGYofnHaVIRYdyuiYwmlz0h9TOM'
'3K6JjC6ZhE7aEy06DMpo4ruxW43bWltZ9Rm5V91cGz/Fvc0ig0bc5tae0n9dHxanPmLTcmLYwl'
'/PFqc3EJnEF95rilYawQvibXvXIujT9T5fDio0tLw5iCIldOSP/dIl/lXBrvUCbEf8uo6Ia5UC'
'xRVCiMJeLK0SEwy1w5sJZ8LfDGUyvnWbRZ2VcdjG54jSuHQk5e76icXfroOJTXzdfcWJMwlvDH'
'obwel1NE1Od1qJy6fupvLeAYufW99VmD5dz+2riLPk+X+60Gkuju72/ApJVcozwbuBYbUAfl2c'
'CyOZAbIHxk1qGVfg6wbK65T/6FMX9EKIAqR3uEAq3K/UmqpagVehDY7lpLuhfsr5dvpdqlC2Hk'
'xwOuJXrGr+rWEp8x+vXRb/tVzQf+t/2qHW/7VTve9qtyF4pZoQb+aaMuXYiChZ+hmtw1dRdtcL'
'hscBeK6T+OJF2IWHQoE+M/mNTwv/XX4i6k3/prcRfSb/21vCeL/hdqVccN')))
_INDEX = {
f.name: {
'descriptor': f,
'services': {s.name: s for s in f.service},
}
for f in FILE_DESCRIPTOR_SET.file
}
FeaturesServiceDescription = {
'file_descriptor_set': FILE_DESCRIPTOR_SET,
'file_descriptor': _INDEX[u'api/api_proto/features.proto']['descriptor'],
'service_descriptor': _INDEX[u'api/api_proto/features.proto']['services'][u'Features'],
}
| StarcoderdataPython |
11386518 | <reponame>ahameedx/intel-inb-manageability
# -*- coding: utf-8 -*-
"""
This module is to check the current node package installed on Yocto system.
Copyright (C) 2019-2022 Intel Corporation
SPDX-License-Identifier: Apache-2.0
"""
import logging
from typing import Optional
from node.constant import NODE_VERSION_PATH
logger = logging.getLogger(__name__)
def get_version() -> Optional[str]:
"""Helper function for getting version of node agent in the system
@return: version of node agent
"""
logger.debug("")
try:
with open(NODE_VERSION_PATH, 'r') as version_file:
node_pkg_version = version_file.readlines()[0].replace('\n', '')
except (FileNotFoundError, PermissionError) as error:
logger.error("Failed to get node version with error: {0}".format(error))
node_pkg_version = 'UNKNOWN'
finally:
return node_pkg_version
| StarcoderdataPython |
1785086 | <filename>adjacent_attention_network/__init__.py
from adjacent_attention_network.adjacent_attention_network import AdjacentAttentionNetwork
| StarcoderdataPython |
6423947 | <gh_stars>10-100
from sm.engine.formula_parser import format_ion_formula, safe_generate_ion_formula
ION_INS = (
'INSERT INTO graphql.ion (ion, formula, chem_mod, neutral_loss, adduct, charge, ion_formula) '
'VALUES (%s, %s, %s, %s, %s, %s, %s) '
'RETURNING id'
)
ION_SEL = (
'WITH ions AS ('
' SELECT UNNEST(%s::text[]) as fo, UNNEST(%s::text[]) as cm, '
' UNNEST(%s::text[]) as nl, UNNEST(%s::text[]) as ad'
') '
'SELECT formula, chem_mod, neutral_loss, adduct, id '
'FROM graphql.ion '
'JOIN ions ON formula = fo AND chem_mod = cm AND neutral_loss = nl AND adduct = ad '
'WHERE charge = %s'
)
def get_ion_id_mapping(db, ion_tuples, charge):
"""Get a mapping of ions to ion ids, adding missing ions to the database if necessary
Args
------------
ion_tuples : list[tuple[str, str, str, str]]
(formula, chem_mod, neutral_loss, adduct) tuples
charge : int
1 or -1
Returns
------------
dict[tuple[str, str, str, str], int]
(formula, chem_mod, neutral_loss, adduct) => ion_id
"""
if not ion_tuples:
return {}
formulas, chem_mods, neutral_losses, adducts = map(list, zip(*ion_tuples))
existing_ions = db.select(ION_SEL, [formulas, chem_mods, neutral_losses, adducts, charge])
ion_to_id = dict(((fo, cm, nl, ad), id) for fo, cm, nl, ad, id in existing_ions)
missing_ions = sorted(set(ion_tuples).difference(ion_to_id.keys()), key=lambda row: row[0])
if missing_ions:
rows = [
(format_ion_formula(*ion, charge=charge), *ion, charge, safe_generate_ion_formula(*ion))
for ion in missing_ions
]
ids = db.insert_return(ION_INS, rows)
ion_to_id.update((row[1:5], id) for id, row in zip(ids, rows))
return ion_to_id
| StarcoderdataPython |
8138175 | <reponame>BensonRen/AEM_DIM_Bench
"""
This file serves as a modulized evaluation interface for the network
"""
# Built in
import os
import sys
sys.path.append('../utils/')
# Torch
# Own
import flag_reader
from class_wrapper import Network
from model_maker import NA
from utils import data_reader
from utils.helper_functions import load_flags
from utils.evaluation_helper import plotMSELossDistrib
from utils.evaluation_helper import get_test_ratio_helper
from utils.plotsAnalysis import get_xpred_ytruth_xtruth_from_folder
from utils.plotsAnalysis import reshape_xpred_list_to_mat
from utils.create_folder_modulized import get_folder_modulized
from utils.create_folder_modulized import check_modulized_yet
# Libs
import numpy as np
import matplotlib.pyplot as plt
from thop import profile, clever_format
def modulized_evaluate_from_model(model_dir, operate_dir, FF=False, BP=False):
"""
Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval
:param model_dir: The folder to retrieve the model
:param operate_dir: The directory to operate in (with all the Xpred,Ypred files)
:return: None
"""
# Retrieve the flag object
print("Retrieving flag object for parameters")
if (model_dir.startswith("models")):
model_dir = model_dir[7:]
print("after removing prefix models/, now model_dir is:", model_dir)
print(model_dir)
flags = load_flags(os.path.join("models", model_dir))
flags.eval_model = model_dir # Reset the eval mode
if BP:
flags.backprop_step = 300
else:
flags.backprop_step = 1
flags.test_ratio = get_test_ratio_helper(flags)
if flags.data_set == 'meta_material':
save_Simulator_Ypred = False
print("this is MM dataset, there is no simple numerical simulator therefore setting the save_Simulator_Ypred to False")
flags.batch_size = 1 # For backprop eval mode, batchsize is always 1
flags.lr = 0.5
flags.eval_batch_size = 2048
flags.train_step = 500
print(flags)
# Make Network
ntwk = Network(NA, flags, train_loader=None, test_loader=None, inference_mode=True, saved_model=flags.eval_model)
# Set up the files
Xpred_list, Xt, Yt = get_xpred_ytruth_xtruth_from_folder(operate_dir)
X_init_mat = reshape_xpred_list_to_mat(Xpred_list)
# Evaluation process
print("Start eval now:")
ntwk.modulized_bp_ff(X_init_mat=X_init_mat, Ytruth=Yt, save_dir=operate_dir, save_all=True, FF=FF)
def evaluate_all(models_dir="models"):
"""
This function evaluate all the models in the models/. directory
:return: None
"""
for file in os.listdir(models_dir):
if os.path.isfile(os.path.join(models_dir, file, 'flags.obj')):
modulized_evaluate_from_model(os.path.join(models_dir, file))
return None
def get_state_of_BP_FF(folder):
"""
This function return 2 flag for BP and FF according to the folder name given
"""
# Get the label of the state of BP and FF
if 'BP_on' in folder:
BP = True
elif 'BP_off' in folder:
BP = False
else:
print("Your folder name does not indicate state of BP: ", folder)
exit()
if 'FF_on' in folder:
FF = True
elif 'FF_off' in folder:
FF = False
else:
print("Your folder name does not indicate state of FF: ", folder)
exit()
return BP, FF
def modulized_evaluate_different_dataset(gpu=None):
"""
This function is to evaluate all different datasets in the model with one function call
"""
#data_set_list = ["meta_material"]
data_set_list = ["robotic_arm","sine_wave","ballistics",]
folder_list = get_folder_modulized(gpu=gpu)
for folder in folder_list:
# Skip Random for now
#if 'Random' not in folder:
# continue;
BP, FF = get_state_of_BP_FF(folder)
# Nothing is needed if both of them are False
if BP is False and FF is False:
continue;
print("currently working on folder", folder)
# Work on each dataset
for dataset in data_set_list:
if check_modulized_yet(os.path.join(folder, dataset)):
continue;
modulized_evaluate_from_model(model_dir="retrain0" + dataset,
operate_dir=os.path.join(folder, dataset), BP=BP, FF=FF)
if __name__ == '__main__':
# Read the flag, however only the flags.eval_model is used and others are not used
#eval_flags = flag_reader.read_flag()
#####################
# different dataset #
#####################
# This is to run the single evaluation, please run this first to make sure the current model is well-trained before going to the multiple evaluation code below
#evaluate_different_dataset(multi_flag=False, eval_data_all=False, save_Simulator_Ypred=True, MSE_Simulator=False)
# This is for multi evaluation for generating the Fig 3, evaluating the models under various T values
#evaluate_different_dataset(multi_flag=True, eval_data_all=False, save_Simulator_Ypred=True, MSE_Simulator=False)
#####################
#Modulized eval Here#
#####################
modulized_evaluate_different_dataset()
| StarcoderdataPython |
1900063 | ## Copyright 2015-2019 <NAME>, <NAME>
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from nine import str
from PySide2 import QtCore
from PySide2 import QtGui
from PySide2 import QtWidgets
from PyFlow.Packages.PyFlowBase.Tools import RESOURCES_DIR
from PyFlow.UI.Tool.Tool import DockTool
from PyFlow.UI.Widgets.PropertiesFramework import PropertiesWidget
class PropertiesTool(DockTool):
"""docstring for Properties tool."""
def __init__(self):
super(PropertiesTool, self).__init__()
self.scrollArea = QtWidgets.QScrollArea(self)
self.scrollArea.setWidgetResizable(True)
self.setWidget(self.scrollArea)
self.propertiesWidget = PropertiesWidget()
self.scrollArea.setWidget(self.propertiesWidget)
self.propertiesWidget.searchBoxLayout.removeWidget(self.propertiesWidget.lockCheckBox)
self.addButton(self.propertiesWidget.lockCheckBox)
self.propertiesWidget.searchBoxLayout.removeWidget(self.propertiesWidget.tearOffCopy)
self.addButton(self.propertiesWidget.tearOffCopy)
# self.addButton(self.propertiesWidget.settingsButton)
self.setWindowTitle(self.uniqueName())
self.fillDelegate = None
self.propertiesWidget.spawnDuplicate.connect(self.onTearOffCopy)
def onTearOffCopy(self, *args, **kwargs):
instance = self.pyFlowInstance.invokeDockToolByName("PyFlowBase", self.name())
if self.fillDelegate is not None:
instance.assignPropertiesWidget(self.fillDelegate)
instance.setFloating(True)
instance.resize(self.size())
def clear(self):
self.propertiesWidget.clear()
def assignPropertiesWidget(self, propertiesFillDelegate):
self.fillDelegate = propertiesFillDelegate
if not self.propertiesWidget.isLocked():
propertiesFillDelegate(self.propertiesWidget)
@staticmethod
def isSingleton():
return False
@staticmethod
def toolTip():
return "Properties editing and displaying"
@staticmethod
def name():
return str("Properties")
| StarcoderdataPython |
132073 | import tempfile
from src.script_handler import *
from src.cyk import cyk
def run_script(s):
temp = tempfile.NamedTemporaryFile()
f = open(temp.name, 'w')
f.write(s)
f.close()
return handle_script_from_file(temp.name)
def test_list_all_graphs(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'list all graphs;'.format(path + '/test_dir'))
lines = open(path + '/script_out.txt').read()
assert capsys.readouterr().out == lines
def test_named_pattern(capsys):
path = os.path.dirname(__file__) + '/resources'
h = run_script('connect to [{}];\n'
'S = a S b S | ();'.format(path + '/test_dir'))
assert h.g.rules == {'Q0': [['a', 'S', 'b', 'S'], ['eps']], 'S': [['Q0']]}
def test_named_pattern2():
h = run_script('S = a b + | ();')
assert cyk(h.g, 'a b')
assert cyk(h.g, 'a b b b b')
assert cyk(h.g, '')
assert not cyk(h.g, 'a')
def test_named_pattern3():
h = run_script('S = a ? b;')
assert cyk(h.g, 'a b')
assert cyk(h.g, 'b')
assert not cyk(h.g, 'a')
assert not cyk(h.g, 'a b b')
assert not cyk(h.g, 'a a b')
def test_named_pattern4():
h = run_script('S = (a b)* | c;')
assert cyk(h.g, 'a b')
assert cyk(h.g, 'a b a b')
assert cyk(h.g, 'c')
assert not cyk(h.g, 'a')
assert not cyk(h.g, 'a b b')
assert not cyk(h.g, 'a a b')
assert not cyk(h.g, 'a b c')
def test_select_exists(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select exists u from [graph.txt] where (u) - S -> (_);'.format(path + '/test_dir'))
assert capsys.readouterr().out == 'True\n'
def test_select_exists2(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select exists u from [graph.txt] where (_) - S -> (u);'.format(path + '/test_dir'))
assert capsys.readouterr().out == 'True\n'
def test_select_exists3(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select exists (u, v) from [graph.txt] where (u) - S -> (v);'.format(path + '/test_dir'))
assert capsys.readouterr().out == 'True\n'
def test_select_exists4(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select exists (u, v) from [graph.txt] where (u.ID = 1) - S -> (v);'.format(path + '/test_dir'))
assert capsys.readouterr().out == 'True\n'
def test_select_exists5(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select exists (u, v) from [graph.txt] where (u.ID = 1) - S -> (v.ID = 3);'.format(path + '/test_dir'))
assert capsys.readouterr().out == 'True\n'
def test_select_exists6(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select exists (u, v) from [graph.txt] where (u.ID = 1) - S -> (v.ID = 0);'.format(path + '/test_dir'))
assert capsys.readouterr().out == 'False\n'
def test_select_exists7(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a S| ();'
'select exists (u, v) from [graph.txt] where (u.ID = 1) - S b b b -> (v.ID = 3);'.format(path + '/test_dir'))
assert capsys.readouterr().out == 'True\n'
def test_select_exists8(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a S| ();'
'select exists (u, v) from [graph.txt] where (u.ID = 1) - S b b -> (v.ID = 3);'.format(path + '/test_dir'))
assert capsys.readouterr().out == 'False\n'
def test_select_exists9(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a S | ();'
'select exists (u, v) from [graph.txt] where (u.ID = 1) - S b b -> (v.ID = 3);'
'S = a S b;'
'select exists (u, v) from [graph.txt] where (u.ID = 1) - S b b -> (v.ID = 3);'.format(path + '/test_dir'))
assert capsys.readouterr().out == 'False\nTrue\n'
def test_select_exists10(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];'
'S = a S | ();'
'select exists (u, v) from [graph.txt] where (u.ID = 1) - S c c c -> (v.ID = 3);'
'connect to [{}];'
'select exists (u, v) from [graph.txt] where (u.ID = 1) - S c c c -> (v.ID = 3);'.format(path + '/test_dir', path + '/test_dir2'))
assert capsys.readouterr().out == 'False\nTrue\n'
def test_select_count(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select count u from [graph.txt] where (u) - S -> (_);'.format(path + '/test_dir'))
assert capsys.readouterr().out == '3\n'
def test_select_count2(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select count u from [graph.txt] where (_) - S -> (u);'.format(path + '/test_dir'))
assert capsys.readouterr().out == '2\n'
def test_select_count3(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select count (u, v) from [graph.txt] where (u) - S -> (v);'.format(path + '/test_dir'))
assert capsys.readouterr().out == '6\n'
def test_select_count4(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select count (u, v) from [graph.txt] where (u.ID = 1) - S -> (v);'.format(path + '/test_dir'))
assert capsys.readouterr().out == '2\n'
def test_select_count5(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select count (u, v) from [graph.txt] where (u.ID = 1) - S -> (v.ID = 3);'.format(path + '/test_dir'))
assert capsys.readouterr().out == '1\n'
def test_select_count6(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select count (u, v) from [graph.txt] where (u.ID = 1) - S -> (v.ID = 0);'.format(path + '/test_dir'))
assert capsys.readouterr().out == '0\n'
def test_select_count7(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a S| ();'
'select count (u, v) from [graph.txt] where (u.ID = 1) - S b b b -> (v.ID = 3);'.format(path + '/test_dir'))
assert capsys.readouterr().out == '1\n'
def test_select_count8(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a S | ();'
'select count (u, v) from [graph.txt] where (u.ID = 1) - S b b -> (v.ID = 3);'
'S = a S b;'
'select count (u, v) from [graph.txt] where (u.ID = 1) - S b b -> (v.ID = 3);'.format(path + '/test_dir'))
assert capsys.readouterr().out == '0\n1\n'
def test_select_count9(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];'
'S = a S | ();'
'select count u from [graph.txt] where (u.ID = 1) - S c c c -> (v.ID = 3);'
'connect to [{}];'
'select count u from [graph.txt] where (u) - S c c c -> (v.ID = 3);'.format(path + '/test_dir', path + '/test_dir2'))
assert capsys.readouterr().out == '0\n3\n'
def test_select(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select u from [graph.txt] where (u) - S -> (_);'.format(path + '/test_dir'))
assert capsys.readouterr().out == '[0, 1, 2]\n'
def test_select2(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select u from [graph.txt] where (_) - S -> (u);'.format(path + '/test_dir'))
assert capsys.readouterr().out == '[2, 3]\n'
def test_select3(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select (u, v) from [graph.txt] where (u) - S -> (v);'.format(path + '/test_dir'))
assert capsys.readouterr().out == '[(0, 2), (0, 3), (1, 2), (1, 3), (2, 2), (2, 3)]\n'
def test_select4(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select (u, v) from [graph.txt] where (u.ID = 1) - S -> (v);'.format(path + '/test_dir'))
assert capsys.readouterr().out == '[(1, 2), (1, 3)]\n'
def test_select5(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select (u, v) from [graph.txt] where (u.ID = 1) - S -> (v.ID = 3);'.format(path + '/test_dir'))
assert capsys.readouterr().out == '[(1, 3)]\n'
def test_select6(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C;'
'C = S b;'
'select (u, v) from [graph.txt] where (u.ID = 1) - S -> (v.ID = 0);'.format(path + '/test_dir'))
assert capsys.readouterr().out == '[]\n'
def test_select7(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a S | ();'
'select (u, v) from [graph.txt] where (u.ID = 1) - S b b -> (v.ID = 3);'
'S = a S b;'
'select (u, v) from [graph.txt] where (u.ID = 1) - S b b -> (v.ID = 3);'.format(path + '/test_dir'))
assert capsys.readouterr().out == '[]\n[(1, 3)]\n'
def test_select8(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];'
'S = a S | ();'
'select u from [graph.txt] where (u.ID = 1) - S c c c -> (v.ID = 3);'
'connect to [{}];'
'select u from [graph.txt] where (u) - S c c c -> (v.ID = 3);'.format(path + '/test_dir', path + '/test_dir2'))
assert capsys.readouterr().out == '[]\n[0, 1, 2]\n'
def test_select9(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];'
'S = a S | ();'
'select u from [graph.txt] where (u.ID = 1) - a ? c + -> (v.ID = 3);'
'connect to [{}];'
'select u from [graph.txt] where (u) - a ? c + -> (v.ID = 3);'.format(path + '/test_dir', path + '/test_dir2'))
assert capsys.readouterr().out == '[]\n[1, 2, 3]\n'
def test_select10(capsys):
path = os.path.dirname(__file__) + '/resources'
run_script('connect to [{}];\n'
'S = a b | a C | ();'
'C = S b;'
'select u from [graph.txt] where (u) - S C -> (v.ID = 2);'.format(path + '/test_dir'))
assert capsys.readouterr().out == '[0, 1, 2, 3]\n'
| StarcoderdataPython |
5115985 | from fastapi import APIRouter
from typing import Any, List
from fastapi import Depends, FastAPI
from starlette.status import HTTP_201_CREATED, HTTP_404_NOT_FOUND
from sqlalchemy.orm import Session
from app.infra.postgres.crud.user import users
from app.schemas import user
from app.infra.postgres.models.base import get_db
from app.auth import functions
from app.schemas import auth
router = APIRouter()
@router.get("/", response_model=List[user.User], tags=["Users"])
def list_users(db: Session = Depends(get_db), skip: int = 0, limit: int = 100) -> Any:
data_users = users.get_all(db=db, skip=skip, limit=limit)
return data_users
@router.post(
"/",
response_model=user.User,
status_code=HTTP_201_CREATED,
tags=["Users"])
def create_users(*, db: Session = Depends(get_db),
user_obj: user.UserCreate,
current_user: auth.UserAuthBase = Depends(functions.get_current_user)) -> Any:
data_user = users.get_email(db=db, email=user_obj.email)
if data_user:
raise HTTPException(status_code=400, detail="Email already registered")
print
data_user = users.create(db=db, obj_in=user_obj)
return data_user
@router.put(
"/{email}", response_model=user.User,
responses={HTTP_404_NOT_FOUND: {"model": user.HTTPError}},
tags=["Users"])
def update_user(
*, db: Session = Depends(get_db),
email: str,
user_obj: user.UserUpdate,
current_user: auth.UserAuthBase = Depends(functions.get_current_user)) -> Any:
data_user = users.get_email(db=db, email=email)
if not data_user:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="User not found")
data_user = users.update(db=db, db_obj=data_user, obj_in=user_obj)
return data_user
@router.delete("/{email}", response_model=user.User,
responses={HTTP_404_NOT_FOUND: {"model": user.HTTPError}},
tags=["Users"])
def delete_users(*, db: Session = Depends(get_db), email: str) -> Any:
data_user = users.get_email(db=db, email=email)
if not data_user:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="User not found")
data_user = users.remove(db=db, email=email)
return data_user | StarcoderdataPython |
8115490 | from hypercomplex import Order, Names
import argparse as ap
import matplotlib as mpl
import seaborn as sea
import pylab as pyl
import numpy as np
# Color Maps: https://matplotlib.org/stable/tutorials/colors/colormaps.html
def plot(**options):
def option(name, default, **options):
if name in options:
return options[name]
return default
showneg = option("negatives", False, **options)
poscmap = option("colormap", "RdBu_r", **options)
negcmap = option("ncolormap", "PiYG_r", **options)
diverge = option("diverging", False, **options)
figsize = option("figsize", 6.0, **options)
figdpis = option("figdpi", 100.0, **options)
filename = option("filename", "P{order}.{filetype}", **options)
filetype = option("filetype", "png", **options)
order = option("order", None, **options)
named = option("named", None, **options)
save = option("save", False, **options)
show = option("show", False, **options)
if named != None:
self = Names.get(named, None)
elif order != None:
self = Order.get(order, None)
else:
self = None
if self == None or (hasattr(self, "order") and self.order > 8):
raise NotImplementedError
sea.set_style("white")
size = self.dimensions
matrix = self.matrix(asindex=True)
figure, axis = pyl.subplots(figsize=(figsize, figsize), dpi=figdpis)
numcolors = 2 * size + 1 if diverge else size
positives = sea.color_palette(poscmap, numcolors)
negatives = sea.color_palette(negcmap, numcolors)
rectangle = mpl.patches.Rectangle
extras = dict(snap=False, lw=1, zorder=1)
for (x, y), value in np.ndenumerate(matrix):
location = (y, size - x - 1)
value = value + size if diverge else abs(value) - 1
color = negatives[value] if showneg and not diverge and value < 0 else positives[value]
patch = rectangle(location, 1, 1, edgecolor=color, facecolor=color, **extras)
axis.add_patch(patch)
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
axis.set_xlim(0, size)
axis.set_ylim(0, size)
pyl.tight_layout()
if save:
output = ((filename).format(order=self.order, filetype=filetype))
pyl.savefig(output)
if show:
pyl.show()
if __name__ == "__main__":
parser = ap.ArgumentParser()
parser.add_argument("-o", "--order", type=int, default=2)
parser.add_argument("-f", "--filename", type=str, default="P{order}.{filetype}")
parser.add_argument("-t", "--filetype", type=str, default="png")
parser.add_argument("-s", "--figsize", type=float, default=6.0)
parser.add_argument("-r", "--figdpi", type=int, default=100)
parser.add_argument("-c", "--colormap", type=str, default="RdBu_r")
parser.add_argument("-x", "--ncolormap", type=str, default="PiYG_r")
parser.add_argument("-n", "--named", type=str)
parser.add_argument("--negatives", action="store_true")
parser.add_argument("--diverging", action="store_true")
parser.add_argument("--save", action="store_true")
parser.add_argument("--show", action="store_true")
args, urgs = parser.parse_known_args()
plot(**vars(args))
| StarcoderdataPython |
9788952 | <filename>code/main.py
import sys
import torch
from tqdm import tqdm
from const import MODEL_NAME, TMP_FOLDER
from model import Module
from utils import (bar, cal_auc, cal_num, get_data_loader, make_submit,
setup_seed)
GPU = torch.device('cuda:0')
PATIENCE = 5
BATCH_SIZE = 100
EPOCH = 2000
LR = 0.001
def run_train():
train_loader = get_data_loader('train', BATCH_SIZE)
val_loader = get_data_loader('val', BATCH_SIZE)
model = Module().to(GPU)
criterion = torch.nn.CrossEntropyLoss()
# optim = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=LR)
optim = torch.optim.Adam(model.parameters(), lr=LR, weight_decay=0.001)
#混合精度
scaler = torch.cuda.amp.GradScaler()
min_loss = 10000
earlystopping = 0
print('-'*75)
for epoch in range(EPOCH):
#train
model.train()
print('Epoch {}/{}'.format(epoch+1, EPOCH))
for data in bar(train_loader):
with torch.cuda.amp.autocast():
c1, c2, c3, c4, c5, c6 = model(data[0].to(GPU))
loss = criterion(c1, data[1][:, 0].to(GPU)) + \
criterion(c2, data[1][:, 1].to(GPU)) + \
criterion(c3, data[1][:, 2].to(GPU)) + \
criterion(c4, data[1][:, 3].to(GPU)) + \
criterion(c5, data[1][:, 4].to(GPU)) + \
criterion(c6, data[1][:, 5].to(GPU))
optim.zero_grad()
scaler.scale(loss).backward()
scaler.step(optim)
scaler.update()
predict_nums = cal_num(c1, c2, c3, c4, c5, c6)
label_nums = cal_num(data[1][:, 0], data[1][:, 1], data[1][:, 2], data[1][:, 3], data[1][:, 4], data[1][:, 5], label=True)
train_score = cal_auc(predict_nums, label_nums)
#validation
model.eval()
with torch.no_grad():
for data in val_loader:
c1, c2, c3, c4, c5, c6 = model(data[0].to(GPU))
val_loss = criterion(c1, data[1][:, 0].to(GPU)) + \
criterion(c2, data[1][:, 1].to(GPU)) + \
criterion(c3, data[1][:, 2].to(GPU)) + \
criterion(c4, data[1][:, 3].to(GPU)) + \
criterion(c5, data[1][:, 4].to(GPU)) + \
criterion(c6, data[1][:, 5].to(GPU))
break
predict_nums = cal_num(c1, c2, c3, c4, c5, c6)
label_nums = cal_num(data[1][:, 0], data[1][:, 1], data[1][:, 2], data[1][:, 3], data[1][:, 4], data[1][:, 5], label=True)
val_score = cal_auc(predict_nums, label_nums)
print('loss:{} / auc:{} / val_loss:{} / val_auc:{}'.format(loss, train_score, val_loss, val_score))
print('-'*75)
if val_loss < min_loss:
earlystopping = 0
min_loss = val_loss
torch.save(model.state_dict(), TMP_FOLDER + MODEL_NAME)
else:
earlystopping += 1
if earlystopping == PATIENCE:
break
def run_test():
test_loader = get_data_loader('test_a', BATCH_SIZE)
model = Module().to(GPU)
model.load_state_dict(torch.load(TMP_FOLDER + MODEL_NAME))
model.eval()
result = []
with torch.no_grad():
for data in bar(test_loader):
c1, c2, c3, c4, c5, c6 = model(data.to(GPU))
result = cal_num(c1, c2, c3, c4, c5, c6, False, result)
make_submit(result, 'test_a')
if __name__ == "__main__":
setup_seed(0)
if len(sys.argv) > 1:
cmd = sys.argv[1]
if cmd == 'test':
run_test()
else:
print('错误的命令参数`{}`'.format(cmd))
else:
run_train()
| StarcoderdataPython |
3381396 | <reponame>windowssocket/py_leetcode
class Solution:
def canCompleteCircuit(self, gas, cost):
"""
:type gas: List[int]
:type cost: List[int]
:rtype: int
"""
ret = -1
sum = total = 0
k = 0
comb = [gas[i] - cost[i] for i in range(len(gas))]
print(comb)
for i in range(len(gas)):
sum += comb[i]
if sum < 0:
sum = 0
k = i+1
total += comb[i]
if total < 0:
return ret
return k
gas = [1,2,3,4,5]
cost = [3,4,5,1,2]
s = Solution()
print(s.canCompleteCircuit(gas, cost)) | StarcoderdataPython |
1632124 | <filename>src/conference_scheduler/validator.py
from conference_scheduler import converter
from conference_scheduler.lp_problem import constraints
def array_violations(array, events, slots, beta=None):
"""Take a schedule in array form and return any violated constraints
Parameters
----------
array : np.array
a schedule in array form
events : list or tuple
of resources.Event instances
slots : list or tuple
of resources.Slot instances
constraints : list or tuple
of generator functions which each produce instances of
resources.Constraint
Returns
-------
Generator
of a list of strings indicating the nature of the violated
constraints
"""
return (
c.label
for c in constraints.all_constraints(events, slots, array, beta=beta)
if not c.condition
)
def is_valid_array(array, events, slots):
"""Take a schedule in array form and return whether it is a valid
solution for the given constraints
Parameters
----------
array : np.array
a schedule in array form
events : list or tuple
of resources.Event instances
slots : list or tuple
of resources.Slot instances
Returns
-------
bool
True if array represents a valid solution
"""
if len(array) == 0:
return False
violations = sum(1 for c in (array_violations(array, events, slots)))
return violations == 0
def is_valid_solution(solution, events, slots):
"""Take a solution and return whether it is valid for the
given constraints
Parameters
----------
solution: list or tuple
a schedule in solution form
events : list or tuple
of resources.Event instances
slots : list or tuple
of resources.Slot instances
Returns
-------
bool
True if schedule is a valid solution
"""
if len(solution) == 0:
return False
array = converter.solution_to_array(solution, events, slots)
return is_valid_array(array, events, slots)
def solution_violations(solution, events, slots):
"""Take a solution and return a list of violated constraints
Parameters
----------
solution: list or tuple
a schedule in solution form
events : list or tuple
of resources.Event instances
slots : list or tuple
of resources.Slot instances
Returns
-------
Generator
of a list of strings indicating the nature of the violated
constraints
"""
array = converter.solution_to_array(solution, events, slots)
return array_violations(array, events, slots)
def is_valid_schedule(schedule, events, slots):
"""Take a schedule and return whether it is a valid solution for the
given constraints
Parameters
----------
schedule : list or tuple
a schedule in schedule form
events : list or tuple
of resources.Event instances
slots : list or tuple
of resources.Slot instances
Returns
-------
bool
True if schedule is a valid solution
"""
if len(schedule) == 0:
return False
array = converter.schedule_to_array(schedule, events, slots)
return is_valid_array(array, events, slots)
def schedule_violations(schedule, events, slots):
"""Take a schedule and return a list of violated constraints
Parameters
----------
schedule : list or tuple
a schedule in schedule form
events : list or tuple
of resources.Event instances
slots : list or tuple
of resources.Slot instances
Returns
-------
Generator
of a list of strings indicating the nature of the violated
constraints
"""
array = converter.schedule_to_array(schedule, events, slots)
return array_violations(array, events, slots)
| StarcoderdataPython |
4930175 | from distutils.core import setup
setup(
name = 'Categorical_similarity_measures',
packages = ['Categorical_similarity_measures'],
version = '0.4',
license='MIT',
description = 'Similarity Measures Utility Package',
long_description="Determining similarity or distance between two objects is a key step for several data mining and knowledge discovery tasks. For quantitative data, Minkowski distance plays a major role in finding the distance between two entities. The prevalently known and used similarity measures are Manhattan distance which is the Minkowski distance of order 1 and the Euclidean distance which is the Minkowski distance of order 2. But, in the case of categorical data, we know that there does not exist an innate order and that makes it problematic to find the distance between two categorical points. This is a utility package for finding similarity measures such as Eskin, IOF, OF, Overlap (Simple Matching), Goodall1, Goodall2, Goodall3, Goodall4, Lin, Lin1, Morlini_Zani (S2), Variable Entropy and Variable Mutability. These similarity measures help in finding the distance between two or more objects or entities containing categorical data.",
long_description_content_type="text/markdown",
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/AjayMukundS/Categorical_similarity_measures',
download_url = 'https://github.com/AjayMukundS/Categorical_similarity_measures/archive/v_04.tar.gz',
keywords = ['Similarity', 'Distance', 'Categorical data'],
install_requires=[
'pandas',
'numpy',
'category_encoders',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| StarcoderdataPython |
378102 | <filename>tests/zxpy_test.py
import ast
import os
import pytest
import zx
@pytest.mark.parametrize(
('command', 'output'),
(
('echo hello world', 'hello world'),
('ls | tail -6 | wc -l', '6'),
("find . -maxdepth 1 -name '*\\.py' | grep -v test | wc -l", '3'),
("cat /etc/shadow 2> /dev/null; echo $?", '1'),
)
)
def test_shell_output(command: str, output: str) -> None:
assert zx.run_shell(command).rstrip('\r\n') == output
@pytest.mark.parametrize(
('command', 'stdout', 'stderr', 'return_code'),
(
('echo hello world', 'hello world\n', '', 0),
('echo -n failed && exit 200', 'failed', '', 200),
('cat .', '', 'cat: .: Is a directory\n', 1),
)
)
def test_stdout_stderr_returncode(
command: str,
stdout: str,
stderr: str,
return_code: int,
) -> None:
assert zx.run_shell_alternate(command) == (stdout, stderr, return_code)
@pytest.mark.parametrize(
('filepath'),
(
'./tests/test_files/yeses.py',
'./tests/test_files/returncode.py',
'./tests/test_files/quoting.py',
),
)
def test_files(filepath) -> None:
filename = os.path.basename(filepath)
with open(filepath) as file:
module = ast.parse(file.read())
zx.run_zxpy(filename, module)
| StarcoderdataPython |
8007096 | #!/usr/bin/python3
import os.path
import devpipeline_core.paths
import devpipeline_configure.parser
import devpipeline_configure.modifiers
def get_override_path(config, override_name, package_name):
return devpipeline_core.paths.make_path(
config, "overrides.d", override_name, "{}.conf".format(package_name)
)
def _apply_single_override(override_path, config):
if os.path.isfile(override_path):
override_config = devpipeline_configure.parser.read_config(override_path)
devpipeline_configure.modifiers.apply_config_modifiers(override_config, config)
return True
return False
def _apply_override(override_name, full_config):
for name, config in full_config.items():
applied_overrides = []
override_path = get_override_path(config, override_name, name)
if _apply_single_override(override_path, config):
applied_overrides.append(override_name)
if applied_overrides:
config.set("dp.applied_overrides", ",".join(applied_overrides))
def apply_overrides(config):
override_list = config.get("DEFAULT").get_list("dp.overrides")
for override in override_list:
_apply_override(override, config)
| StarcoderdataPython |
9665613 | <filename>deathgod/map_view.py
#!/usr/bin/env python
# encoding: utf-8
""" map_view.py - module containing classes for drawing the game map. """
import pygame
from pygame.locals import *
from . import settings
from .view import View
from .ordered_pair import x, y
from . import colors
class MapView(View):
"""This is a view that can show a portion of a GameMap object centered around the player."""
def update(self, player_state, map_state):
"""Redraws the MapView with a given map and player state."""
# clear all tiles
self.clear()
# determine what portion of the map is visible,
# negative or invalid coordinates are handled later
viewable_x_min = player_state.position[x] - (self.tile_count[x] // 2)
viewable_x_max = viewable_x_min + self.tile_count[x]
x_range = range(viewable_x_min, viewable_x_max)
#print "x_range = " + str(x_range)
viewable_y_min = player_state.position[y] - (self.tile_count[y] // 2)
viewable_y_max = viewable_y_min + self.tile_count[y]
y_range = range(viewable_y_min, viewable_y_max)
#print "y_range = " + str(y_range)
#print "viewable x_min = %d, x_max = %d, y_min = %d, y_max = %d" %\
# (viewable_x_min, viewable_x_max, viewable_y_min, viewable_y_max)
# draw the map view
k = 0 # these variables keep track of where we are on the screen
l = 0
for i in x_range:
for j in y_range:
#print "in draw loop: i = %d, j = %d, k = %d, l = %d" % (i, j, k, l)
# skip drawing the tile if the coordinates fall outside the map
if not (i < 0 or j < 0 or i >= map_state.width or j >= map_state.height):
tile = map_state.tiles[i][j]
# first draw the tile's background color
self.add_image(self.sprite_list[tile.tile_type]['bg']['v'],
self.tile_mapping_points[k][l])
# if tile has an entity, draw that
e_top = tile.get_top_visible_entity()
if e_top is not None:
self.add_image_with_offset(
e_top.sprite,
self.tile_mapping_points[k][l],
self.draw_offset
)
else: # draw the tile
self.add_image_with_offset(
self.sprite_list[tile.tile_type]['char']['v'],\
self.tile_mapping_points[k][l],\
self.draw_offset
)
#if self.hilight_on and i == self.hilight_position[x] and j == self.hilight_position[y]:
# self.add_image(self.hilight_img,
# (self.tile_mapping_points[self.hilight_position[x]],
# self.tile_mapping_points[self.hilight_position[y]]))
l = l + 1
l = 0
k = k + 1
#for i in range(self.tile_count[x]):
# for j in range(self.tile_count[y]):
# self.add_image_with_offset(self.testTile, self.tile_mapping_points[i][j], self.draw_offset)
def _gen_tile_points(self):
"""Creates 2D array of screen coordinates of all tile positions,
where (i, j) = (0, 0) is the bottom left corner
- using these mapping points to place tiles allows us to assume the origin
of the screen is the lower left corner when working on the tile level"""
columns = []
for i in range(self.tile_count[x]):
column = []
for j in range(self.tile_count[y]):
tm = (self.tile_width * i, self.height - (self.tile_height * (j+1)))
column.append(tm)
columns.append(tuple(column))
return tuple(columns)
def __init__(self, screen, rect, background_color, tile_count, sprite_list=None):
View.__init__(self, screen, rect, background_color)
self.sprite_list = sprite_list
self.tile_count = tile_count
# calculate tile dimensions
self.tile_width = int(self.width / tile_count[x])
self.tile_height = int(self.height / tile_count[y])
self.tile_size = (self.tile_width, self.tile_height)
x_diff = self.width- (self.tile_width * tile_count[x])
y_diff = self.height- (self.tile_height * tile_count[y])
print("tile dimensions = %s, x_diff = %d, y_diff = %d" % \
(str([self.tile_width, self.tile_height]), x_diff, y_diff))
self.draw_offset = (settings.map_draw_offset[x] + (x_diff/2),
settings.map_draw_offset[y] + (y_diff/2))
self.tile_mapping_points = self._gen_tile_points()
self.hilight_img = pygame.Surface((self.tile_width, self.tile_height))
self.hilight_img.fill(colors.yellow)
self.hilight_img.set_alpha(110)
self.hilight_on = True
self.hilight_position = [10, 10]
if __name__ == "__main__":
print(MapView.__doc__)
| StarcoderdataPython |
4903243 | # test cases for MynesBoard
from Pod7.MynesBoard import MynesBoard
import unittest
class MyneBoardTestCase(unittest.TestCase):
def test_board_default(self):
"""
Test if the default values of the board's attributes are correct
"""
test_board = MynesBoard()
self.assertEqual(test_board.width, 8)
self.assertEqual(test_board.height, 8)
self.assertEqual(test_board.mine_count, 10)
def test_mine_count(self):
"""
Test if the actual number of mine distributed is the same as the
default mine count
"""
test_board = MynesBoard()
count = 0
for x in range(test_board.width):
for y in range(test_board.height):
if test_board.board[y][x].value == -1:
count += 1
self.assertEqual(count, test_board.mine_count)
def test_square_number_correct(self):
"""
Randomly choose one square which is not Mine or Empty and count
the number of mines around the square. Test if the number we count
is the same as the number on this square.
"""
# Since there is no number on any square now, this test will be
# completed later.
pass
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
5163904 | # Definition for a binary tree node.
import math
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def __init__(self):
self.x = 0
self.y = 0
def isBalanced(self, root: TreeNode) -> bool:
if root is None:
return False
if root.left is not None:
x
x = isBalanced(root.left)
if root.right is not None:
y = isBalanced(root.left)
return x and y
def creatBTree(data, index):
pNode = None
if index < len(data):
if data[index] == None:
return
pNode = TreeNode(data[index])
pNode.left = creatBTree(data, 2 * index + 1) # [1, 3, 7, 15, ...]
pNode.right = creatBTree(data, 2 * index + 2) # [2, 5, 12, 25, ...]
return pNode
def main():
a = Solution()
root = [3,9,20,None,None,15,7]
root1 = [1,2,3,4,None,None,5]
print(a.maxDepth(creatBTree(root1,0)))
print(a.ans_opt)
if __name__ == "__main__":
main() | StarcoderdataPython |
9631591 | from django.contrib import admin
from core.models import Evento
class EventoAdmin(admin.ModelAdmin):
list_display = ('titulo', 'data_evento', 'data_criacao')
list_filter = ['usuario', ]
admin.site.register(Evento, EventoAdmin)
| StarcoderdataPython |
6447087 | <gh_stars>0
from enum import Enum
from ..base import Base
from .material import *
from .axis import Axis
STRUCTURAL_PROPERTY = "Objectives.Structural.Properties"
class MemberType(int, Enum):
Beam = 0
Column = 1
Generic1D = 2
Slab = 3
Wall = 4
Generic2D = 5
VoidCutter1D = 6
VoidCutter2D = 7
class BaseReferencePoint(int, Enum):
Centroid = 0
TopLeft = 1
TopCentre = 2
TopRight = 3
MidLeft = 4
MidRight = 5
BotLeft = 6
BotCentre = 7
BotRight = 8
class ReferenceSurface(int, Enum):
Top = 0
Middle = 1
Bottom = 2
class PropertyType2D(int, Enum):
Stress = 0
Fabric = 1
Plate = 2
Shell = 3
Curved = 4
Wall = 5
Strain = 6
Axi = 7
Load = 8
class PropertyType3D(int, Enum):
Solid = 0
Infinite = 1
class ShapeType(int, Enum):
Rectangular = 0
Circular = 1
I = 2
Tee = 3
Angle = 4
Channel = 5
Perimeter = 6
Box = 7
Catalogue = 8
Explicit = 9
class PropertyTypeSpring(int, Enum):
Axial = 0
Torsional = 1
General = 2
Matrix = 3
TensionOnly = 4
CompressionOnly = 5
Connector = 6
LockUp = 7
Gap = 8
Friction = 9
class PropertyTypeDamper(int, Enum):
Axial = 0
Torsional = 1
General = 2
class Property(Base, speckle_type=STRUCTURAL_PROPERTY):
name: str = None
class SectionProfile(Base, speckle_type=STRUCTURAL_PROPERTY + ".SectionProfile"):
name: str = None
shapeType: ShapeType = None
area: float = 0.0
Iyy: float = 0.0
Izz: float = 0.0
J: float = 0.0
Ky: float = 0.0
weight: float = 0.0
units: str = None
class Property1D(Property, speckle_type=STRUCTURAL_PROPERTY + ".Property1D"):
memberType: MemberType = None
Material: Material = None
SectionProfile: SectionProfile = None
BaseReferencePoint: BaseReferencePoint = None
offsetY: float = 0.0
offsetZ: float = 0.0
class Property2D(Property, speckle_type=STRUCTURAL_PROPERTY + ".Property2D"):
PropertyType2D: PropertyType2D = None
thickness: float = 0.0
Material: Material = None
axis: Axis = None
referenceSurface: ReferenceSurface = None
zOffset: float = 0.0
modifierInPlane: float = 0.0
modifierBending: float = 0.0
modifierShear: float = 0.0
modifierVolume: float = 0.0
class Property3D(Property, speckle_type=STRUCTURAL_PROPERTY + ".Property3D"):
PropertyType3D: PropertyType3D = None
Material: Material = None
axis: Axis = None
class PropertyDamper(Property, speckle_type=STRUCTURAL_PROPERTY + ".PropertyDamper"):
damperType: PropertyTypeDamper = None
dampingX: float = 0.0
dampingY: float = 0.0
dampingZ: float = 0.0
dampingXX: float = 0.0
dampingYY: float = 0.0
dampingZZ: float = 0.0
class PropertyMass(Property, speckle_type=STRUCTURAL_PROPERTY + ".PropertyMass"):
mass: float = 0.0
inertiaXX: float = 0.0
inertiaYY: float = 0.0
inertiaZZ: float = 0.0
inertiaXY: float = 0.0
inertiaYZ: float = 0.0
inertiaZX: float = 0.0
massModified: bool = None
massModifierX: float = 0.0
massModifierY: float = 0.0
massModifierZ: float = 0.0
class PropertySpring(Property, speckle_type=STRUCTURAL_PROPERTY + ".PropertySpring"):
springType: PropertyTypeSpring = None
springCurveX: float = 0.0
stiffnessX: float = 0.0
springCurveY: float = 0.0
stiffnessY: float = 0.0
springCurveZ: float = 0.0
stiffnessZ: float = 0.0
springCurveXX: float = 0.0
stiffnessXX: float = 0.0
springCurveYY: float = 0.0
stiffnessYY: float = 0.0
springCurveZZ: float = 0.0
stiffnessZZ: float = 0.0
dampingRatio: float = 0.0
dampingX: float = 0.0
dampingY: float = 0.0
dampingZ: float = 0.0
dampingXX: float = 0.0
dampingYY: float = 0.0
dampingZZ: float = 0.0
matrix: float = 0.0
postiveLockup: float = 0.0
frictionCoefficient: float = 0.0
class ReferenceSurfaceEnum(int, Enum):
Concrete = 0
Steel = 1
Timber = 2
Aluminium = 3
Masonry = 4
FRP = 5
Glass = 6
Fabric = 7
Rebar = 8
Tendon = 9
ColdFormed = 10
Other = 11
class shapeType(int, Enum):
Concrete = 0
Steel = 1
Timber = 2
Aluminium = 3
Masonry = 4
FRP = 5
Glass = 6
Fabric = 7
Rebar = 8
Tendon = 9
ColdFormed = 10
Other = 11
| StarcoderdataPython |
8044609 | # coding: utf-8
# flake8: noqa
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.16.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from gitea_api.models.api_error import APIError
from gitea_api.models.access_token import AccessToken
from gitea_api.models.add_collaborator_option import AddCollaboratorOption
from gitea_api.models.add_time_option import AddTimeOption
from gitea_api.models.annotated_tag import AnnotatedTag
from gitea_api.models.annotated_tag_object import AnnotatedTagObject
from gitea_api.models.attachment import Attachment
from gitea_api.models.branch import Branch
from gitea_api.models.branch_protection import BranchProtection
from gitea_api.models.combined_status import CombinedStatus
from gitea_api.models.comment import Comment
from gitea_api.models.commit import Commit
from gitea_api.models.commit_affected_files import CommitAffectedFiles
from gitea_api.models.commit_date_options import CommitDateOptions
from gitea_api.models.commit_meta import CommitMeta
from gitea_api.models.commit_status import CommitStatus
from gitea_api.models.commit_status_state import CommitStatusState
from gitea_api.models.commit_user import CommitUser
from gitea_api.models.contents_response import ContentsResponse
from gitea_api.models.create_access_token_option import CreateAccessTokenOption
from gitea_api.models.create_branch_protection_option import CreateBranchProtectionOption
from gitea_api.models.create_branch_repo_option import CreateBranchRepoOption
from gitea_api.models.create_email_option import CreateEmailOption
from gitea_api.models.create_file_options import CreateFileOptions
from gitea_api.models.create_fork_option import CreateForkOption
from gitea_api.models.create_gpg_key_option import CreateGPGKeyOption
from gitea_api.models.create_hook_option import CreateHookOption
from gitea_api.models.create_hook_option_config import CreateHookOptionConfig
from gitea_api.models.create_issue_comment_option import CreateIssueCommentOption
from gitea_api.models.create_issue_option import CreateIssueOption
from gitea_api.models.create_key_option import CreateKeyOption
from gitea_api.models.create_label_option import CreateLabelOption
from gitea_api.models.create_milestone_option import CreateMilestoneOption
from gitea_api.models.create_o_auth2_application_options import CreateOAuth2ApplicationOptions
from gitea_api.models.create_org_option import CreateOrgOption
from gitea_api.models.create_pull_request_option import CreatePullRequestOption
from gitea_api.models.create_pull_review_comment import CreatePullReviewComment
from gitea_api.models.create_pull_review_options import CreatePullReviewOptions
from gitea_api.models.create_release_option import CreateReleaseOption
from gitea_api.models.create_repo_option import CreateRepoOption
from gitea_api.models.create_status_option import CreateStatusOption
from gitea_api.models.create_tag_option import CreateTagOption
from gitea_api.models.create_team_option import CreateTeamOption
from gitea_api.models.create_user_option import CreateUserOption
from gitea_api.models.create_wiki_page_options import CreateWikiPageOptions
from gitea_api.models.cron import Cron
from gitea_api.models.delete_email_option import DeleteEmailOption
from gitea_api.models.delete_file_options import DeleteFileOptions
from gitea_api.models.deploy_key import DeployKey
from gitea_api.models.dismiss_pull_review_options import DismissPullReviewOptions
from gitea_api.models.edit_attachment_options import EditAttachmentOptions
from gitea_api.models.edit_branch_protection_option import EditBranchProtectionOption
from gitea_api.models.edit_deadline_option import EditDeadlineOption
from gitea_api.models.edit_git_hook_option import EditGitHookOption
from gitea_api.models.edit_hook_option import EditHookOption
from gitea_api.models.edit_issue_comment_option import EditIssueCommentOption
from gitea_api.models.edit_issue_option import EditIssueOption
from gitea_api.models.edit_label_option import EditLabelOption
from gitea_api.models.edit_milestone_option import EditMilestoneOption
from gitea_api.models.edit_org_option import EditOrgOption
from gitea_api.models.edit_pull_request_option import EditPullRequestOption
from gitea_api.models.edit_reaction_option import EditReactionOption
from gitea_api.models.edit_release_option import EditReleaseOption
from gitea_api.models.edit_repo_option import EditRepoOption
from gitea_api.models.edit_team_option import EditTeamOption
from gitea_api.models.edit_user_option import EditUserOption
from gitea_api.models.email import Email
from gitea_api.models.external_tracker import ExternalTracker
from gitea_api.models.external_wiki import ExternalWiki
from gitea_api.models.file_commit_response import FileCommitResponse
from gitea_api.models.file_delete_response import FileDeleteResponse
from gitea_api.models.file_links_response import FileLinksResponse
from gitea_api.models.file_response import FileResponse
from gitea_api.models.gpg_key import GPGKey
from gitea_api.models.gpg_key_email import GPGKeyEmail
from gitea_api.models.general_api_settings import GeneralAPISettings
from gitea_api.models.general_attachment_settings import GeneralAttachmentSettings
from gitea_api.models.general_repo_settings import GeneralRepoSettings
from gitea_api.models.general_ui_settings import GeneralUISettings
from gitea_api.models.generate_repo_option import GenerateRepoOption
from gitea_api.models.git_blob_response import GitBlobResponse
from gitea_api.models.git_entry import GitEntry
from gitea_api.models.git_hook import GitHook
from gitea_api.models.git_object import GitObject
from gitea_api.models.git_service_type import GitServiceType
from gitea_api.models.git_tree_response import GitTreeResponse
from gitea_api.models.hook import Hook
from gitea_api.models.id_assets_body import IdAssetsBody
from gitea_api.models.identity import Identity
from gitea_api.models.inline_response200 import InlineResponse200
from gitea_api.models.inline_response2001 import InlineResponse2001
from gitea_api.models.internal_tracker import InternalTracker
from gitea_api.models.issue import Issue
from gitea_api.models.issue_deadline import IssueDeadline
from gitea_api.models.issue_labels_option import IssueLabelsOption
from gitea_api.models.issue_template import IssueTemplate
from gitea_api.models.label import Label
from gitea_api.models.markdown_option import MarkdownOption
from gitea_api.models.merge_pull_request_option import MergePullRequestOption
from gitea_api.models.migrate_repo_form import MigrateRepoForm
from gitea_api.models.migrate_repo_options import MigrateRepoOptions
from gitea_api.models.milestone import Milestone
from gitea_api.models.node_info import NodeInfo
from gitea_api.models.node_info_services import NodeInfoServices
from gitea_api.models.node_info_software import NodeInfoSoftware
from gitea_api.models.node_info_usage import NodeInfoUsage
from gitea_api.models.node_info_usage_users import NodeInfoUsageUsers
from gitea_api.models.note import Note
from gitea_api.models.notification_count import NotificationCount
from gitea_api.models.notification_subject import NotificationSubject
from gitea_api.models.notification_thread import NotificationThread
from gitea_api.models.notify_subject_type import NotifySubjectType
from gitea_api.models.o_auth2_application import OAuth2Application
from gitea_api.models.organization import Organization
from gitea_api.models.organization_permissions import OrganizationPermissions
from gitea_api.models.pr_branch_info import PRBranchInfo
from gitea_api.models.payload_commit import PayloadCommit
from gitea_api.models.payload_commit_verification import PayloadCommitVerification
from gitea_api.models.payload_user import PayloadUser
from gitea_api.models.permission import Permission
from gitea_api.models.public_key import PublicKey
from gitea_api.models.pull_request import PullRequest
from gitea_api.models.pull_request_meta import PullRequestMeta
from gitea_api.models.pull_review import PullReview
from gitea_api.models.pull_review_comment import PullReviewComment
from gitea_api.models.pull_review_request_options import PullReviewRequestOptions
from gitea_api.models.reaction import Reaction
from gitea_api.models.reference import Reference
from gitea_api.models.release import Release
from gitea_api.models.repo_commit import RepoCommit
from gitea_api.models.repo_topic_options import RepoTopicOptions
from gitea_api.models.repo_transfer import RepoTransfer
from gitea_api.models.repository import Repository
from gitea_api.models.repository_meta import RepositoryMeta
from gitea_api.models.review_state_type import ReviewStateType
from gitea_api.models.search_results import SearchResults
from gitea_api.models.server_version import ServerVersion
from gitea_api.models.state_type import StateType
from gitea_api.models.stop_watch import StopWatch
from gitea_api.models.submit_pull_review_options import SubmitPullReviewOptions
from gitea_api.models.tag import Tag
from gitea_api.models.team import Team
from gitea_api.models.time_stamp import TimeStamp
from gitea_api.models.timeline_comment import TimelineComment
from gitea_api.models.topic_name import TopicName
from gitea_api.models.topic_response import TopicResponse
from gitea_api.models.tracked_time import TrackedTime
from gitea_api.models.transfer_repo_option import TransferRepoOption
from gitea_api.models.update_file_options import UpdateFileOptions
from gitea_api.models.user import User
from gitea_api.models.user_heatmap_data import UserHeatmapData
from gitea_api.models.user_settings import UserSettings
from gitea_api.models.user_settings_options import UserSettingsOptions
from gitea_api.models.watch_info import WatchInfo
from gitea_api.models.wiki_commit import WikiCommit
from gitea_api.models.wiki_commit_list import WikiCommitList
from gitea_api.models.wiki_page import WikiPage
from gitea_api.models.wiki_page_meta_data import WikiPageMetaData
| StarcoderdataPython |
3417079 | <reponame>zoheezus/python-bootcamp
my_dict = {"key1":"value1", "key2":"value2"}
print(my_dict)
print(my_dict["key1"])
prices_lookup = {"apple":2.99,"oranges":1.99,"milk":5.80}
print(prices_lookup["apple"])
d = {"k1":123,"k2":[0,1,2],"k3":{"insideKey":100}}
print(d["k2"])
print(d["k3"])
# stacking index calls
print(d["k3"]["insideKey"])
d["k4"] = "new value".upper()
print(d)
print(d.keys())
print(d.values())
print(d.items()) | StarcoderdataPython |
1745407 | <filename>pdf_rendering_service/documents/migrations/0003_auto_20210124_1643.py
# Generated by Django 3.1.5 on 2021-01-24 16:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('documents', '0002_auto_20210124_1557'),
]
operations = [
migrations.AlterField(
model_name='document',
name='status',
field=models.IntegerField(blank=True, choices=[(0, 'processing'), (1, 'done')], db_index=True, default=0),
),
]
| StarcoderdataPython |
5025269 | <gh_stars>1-10
import unittest
from unittest.mock import patch
from constants.env_variables import read_list_from_env
def dict_mock(variable, default):
return {"block": '42', "multistr": "hello;my,world", 'multiint': '1,2;3'}.get(variable, default)
class UtilTest(unittest.TestCase):
def setUp(self) -> None:
pass
@patch('bot.constants.env_variables.os.getenv', dict_mock)
def test_read_wrong_name(self):
res = read_list_from_env("no_variable", int)
self.assertEqual(res, [])
@patch('bot.constants.env_variables.os.getenv', dict_mock)
def test_read_one_string(self):
res = read_list_from_env("block", str)
self.assertEqual(res, ['42'])
@patch('bot.constants.env_variables.os.getenv', dict_mock)
def test_read_one_int(self):
res = read_list_from_env("block", int)
self.assertEqual(res, [42])
@patch('bot.constants.env_variables.os.getenv', dict_mock)
def test_read_multiple_strings(self):
res = read_list_from_env("multistr", str)
self.assertEqual(res, ['hello', 'my', 'world'])
@patch('bot.constants.env_variables.os.getenv', dict_mock)
def test_read_multiple_ints(self):
res = read_list_from_env("multiint", int)
self.assertEqual(res, [1, 2, 3])
| StarcoderdataPython |
11286495 | <reponame>Wattyyy/LeetCode<gh_stars>0
# https://leetcode.com/problems/uncrossed-lines
from collections import defaultdict
class Solution:
def maxUncrossedLines(self, A, B):
dp, m, n = defaultdict(int), len(A), len(B)
for i in range(m):
for j in range(n):
dp[(i, j)] = max(
dp[(i - 1, j - 1)] + (A[i] == B[j]), dp[(i - 1, j)], dp[(i, j - 1)]
)
return dp[(m - 1, n - 1)]
| StarcoderdataPython |
9618701 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 22:09:03 2018
@author: <NAME>
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
dataset = pd.read_csv('StudentsPerformance.csv')
X = dataset[['gender', 'race/ethnicity', 'parental level of education', 'lunch', 'test preparation course']]
Y = dataset[['math score', 'reading score', 'writing score']]
X = pd.get_dummies(X, columns = ['gender', 'race/ethnicity', 'parental level of education', 'lunch', 'test preparation course'])
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.10, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
sc1 = StandardScaler()
Y_train = sc1.fit_transform(Y_train)
Y_test = sc1.transform(Y_test)
import keras
from keras.models import Sequential
from keras.layers import Dense
def create_model():
model = Sequential()
model.add(Dense(units = 17, kernel_initializer = 'uniform', activation = 'relu', input_dim = 17))
model.add(Dense(units = 9, kernel_initializer = 'uniform', activation = 'relu'))
model.add(Dense(units = 3, kernel_initializer = 'uniform', activation = 'linear'))
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
return model
seed = 7
np.random.seed(seed)
model = KerasClassifier(build_fn = create_model, epochs = 100, batch_size = 5, verbose = 50)
from sklearn.model_selection import GridSearchCV as gscv
batch_size = [32, 64 ,100]
epochs = [25, 50, 100, 200, 150]
param_grid = dict(batch_size=batch_size, epochs=epochs)
grid = gscv(estimator=model, param_grid=param_grid, verbose = 60, n_jobs= -1)
grid_search = grid.fit(X_train, Y_train)
grid_search.best_score_#0.7499999933772616
grid_search.best_params_#{'batch_size': 100, 'epochs': 200}
| StarcoderdataPython |
57098 | <gh_stars>1-10
"""Provide a class for csv character list import.
Copyright (c) 2021 <NAME>
For further information see https://github.com/peter88213/PyWriter
Published under the MIT License (https://opensource.org/licenses/mit-license.php)
"""
import os
import re
from pywriter.csv.csv_file import CsvFile
from pywriter.model.character import Character
class CsvCharList(CsvFile):
"""csv file representation of an yWriter project's characters table.
"""
DESCRIPTION = 'Character list'
SUFFIX = '_charlist'
rowTitles = ['ID', 'Name', 'Full name', 'Aka', 'Description', 'Bio', 'Goals', 'Importance', 'Tags', 'Notes']
def read(self):
"""Parse the csv file located at filePath,
fetching the Character attributes contained.
Return a message beginning with SUCCESS or ERROR.
Extend the superclass method.
"""
message = CsvFile.read(self)
if message.startswith('ERROR'):
return message
for cells in self.rows:
if 'CrID:' in cells[0]:
crId = re.search('CrID\:([0-9]+)', cells[0]).group(1)
self.srtCharacters.append(crId)
self.characters[crId] = Character()
self.characters[crId].title = cells[1]
self.characters[crId].fullName = cells[2]
self.characters[crId].aka = cells[3]
self.characters[crId].desc = self.convert_to_yw(cells[4])
self.characters[crId].bio = cells[5]
self.characters[crId].goals = cells[6]
if Character.MAJOR_MARKER in cells[7]:
self.characters[crId].isMajor = True
else:
self.characters[crId].isMajor = False
self.characters[crId].tags = self.get_list(cells[8])
self.characters[crId].notes = self.convert_to_yw(cells[9])
return 'SUCCESS: Data read from "' + os.path.normpath(self.filePath) + '".'
| StarcoderdataPython |
1724878 | <reponame>CHHOrganization/BlackDoc
import os,sys,time
Banner_msg = "Follow Us At Telegram To Stay Upto Date."
Line = "*****************************************"
Endl2 = " 100% DC. Tool Is Done Loading "
DCWMs = " Welcome 2 DarkCity - DC. Tool "
#Clearing Screen
def clearConsole():
Refresh = 'clear'
if os.name in ('nt', 'dos'):
#If Machine is running on Windows, it will use cls
Refresh = 'cls'
os.system(Refresh)
def DC_Banner():
#Banner Data
print("|//\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\|\n\
|\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\//|\n\
|//\\\//\\\->Cryptic Hats Hackers<-//\\\//\\\|\n\
|\\\//\\\//\\\->DC. Tool V0.0.01<-//\\\//\\\//|\n\
|//\\\//\\\//\\\->SAFWAL 1.0.1<-//\\\//\\\//\\\|\n\
|\\\//\\\//\\\//\\\->Temina 1<-//\\\//\\\//\\\//|\n\
|//\\\//\\\//\\\\//\\\->.Org<-//\\\//\\\//\\\//\\\|\n\
|\\\//\\\//\\\//\\\//\\\-><-//\\\//\\\//\\\//\\\//|\n\
|//\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\//\\\|")
| StarcoderdataPython |
1664237 | <reponame>LoveBootCaptain/WeatherPi<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
from init_logging import log_string
class Config:
def __init__(self):
# read the config file
self.config_data = open('/home/pi/WeatherPi/config.json').read()
self.config = json.loads(self.config_data)
log_string('config file read by module {}'.format(self.__class__))
def get_config(self):
return self.config
if __name__ == '__main__':
Config().get_config()
| StarcoderdataPython |
8061742 | import datetime
import os
import shutil
import StringIO
from collections import Counter
from operator import attrgetter
from flask import current_app, render_template, request, flash, session, render_template_string, jsonify, redirect, make_response
from emonitor.extensions import scheduler, db, signal
from emonitor.modules.alarms.alarm import Alarm
from emonitor.modules.alarms.alarmhistory import AlarmHistory
from emonitor.modules.alarms.alarmfield import AlarmField
from emonitor.modules.alarmobjects.alarmobject import AlarmObject
from emonitor.modules.alarms.alarmreport import AlarmReport
from emonitor.modules.streets.city import City
from emonitor.modules.streets.street import Street
from emonitor.modules.cars.car import Car
from emonitor.modules.settings.settings import Settings
from emonitor.modules.settings.department import Department
from emonitor.modules.printers.printers import Printers
from emonitor.modules.monitors.monitor import Monitor
from emonitor.modules.monitors.monitorlayout import MonitorLayout
from emonitor.frontend.frontend import frontend
def getFrontendContent(**params):
"""
Deliver frontend content of module alarms
:return: data of alarms
"""
from emonitor.extensions import monitorserver
if 'alarmfilter' not in session:
session['alarmfilter'] = '7'
if request.args.get('alarmfilter'): # filter for alarms last x days, -1 no filter set
session['alarmfilter'] = request.args.get('alarmfilter', '7')
if 'area' in request.args:
params['area'] = request.args.get('area')
if 'state' in request.args:
params['activeacc'] = request.args.get('state')
if request.form.get('action') == 'updatealarm':
if request.form.get('alarm_id') != 'None': # update alarm
alarm = Alarm.getAlarms(request.form.get('alarm_id'))
else: # create new alarm
d = datetime.datetime.strptime('%s %s' % (request.form.get('edit_timestamp_date'), request.form.get('edit_timestamp_time')), "%d.%m.%Y %H:%M:%S")
alarm = Alarm(d, request.form.get('edit_keyid'), 2, 0)
db.session.add(alarm)
params['activeacc'] = 1
try:
alarm.timestamp = datetime.datetime.strptime('%s %s' % (request.form.get('edit_timestamp_date'), request.form.get('edit_timestamp_time')), "%d.%m.%Y %H:%M:%S")
except ValueError:
alarm.timestamp = datetime.datetime.now()
alarm._key = request.form.get('edit_key')
alarm.set(u'id.key', request.form.get('edit_keyid'))
alarm.set(u'k.cars1', request.form.get('val_cars1'))
alarm.set(u'k.cars2', request.form.get('val_cars2'))
alarm.set(u'k.material', request.form.get('val_material'))
alarm.set(u'marker', request.form.get('marker'))
alarm.set(u'id.city', request.form.get('edit_city'))
_city = City.getCities(id=request.form.get('edit_cityname'))
if _city:
alarm.set(u'city', _city.name)
else:
alarm.set(u'city', request.form.get('edit_cityname'))
alarm.set(u'streetno', request.form.get('edit_streetno'))
street = Street.getStreets(id=request.form.get('edit_addressid'))
hnumber = None
if street:
alarm.set(u'id.address', street.id)
try:
hnumber = [h for h in street.housenumbers if h.number == request.form.get('edit_streetno').split()[0]]
if len(hnumber) > 0:
alarm.set(u'lat', hnumber[0].points[0][0])
alarm.set(u'lng', hnumber[0].points[0][1])
except IndexError:
pass
elif request.form.get('edit_addressid') == 'None':
alarm.set(u'id.address', '')
else:
alarm.set(u'id.address', request.form.get('edit_addressid'))
alarm.set(u'address', request.form.get('edit_address'))
if request.form.get('edit_object') != '0':
alarm.set(u'id.object', request.form.get('edit_object'))
else: # remove object definition if present
if u'id.object' in alarm.attributes:
del alarm.attributes[u'id.object']
if u'object' in alarm.attributes:
del alarm.attributes[u'object']
alarm.set(u'priority', request.form.get('edit_priority'))
alarm.set(u'remark', request.form.get('edit_remark'))
alarm.set(u'person', request.form.get('edit_person'))
if request.form.get(u'edit_address2').strip() != '':
alarm.set(u'address2', request.form.get('edit_address2'))
if (request.form.get(u'marker') == '1' and not hnumber) or request.form.get('update_position') == '1':
alarm.set(u'routing', '')
alarm.set(u'lat', request.form.get('lat'))
alarm.set(u'lng', request.form.get('lng'))
alarm.set(u'zoom', request.form.get('zoom'))
try:
d = datetime.datetime.strptime('%s %s' % (request.form.get('edit_endtimestamp_date'), request.form.get('edit_endtimestamp_time')), "%d.%m.%Y %H:%M:%S")
except ValueError:
d = datetime.datetime.now()
alarm.set(u'endtimestamp', d)
db.session.commit()
signal.send('alarm', 'updated', alarmid=alarm.id)
if request.form.get('alarm_id') == u'None': # create new
Alarm.changeState(alarm.id, 0) # prepare alarm
return redirect('/alarms?area=%s&state=1' % params['area'])
elif alarm.state == 1: # active alarm update
monitorserver.sendMessage('0', 'reset') # refresh monitor layout
return redirect('/alarms?area=%s&state=0' % params['area'])
elif request.args.get('action') == 'editalarm':
if request.args.get('alarmid', '0') == '0': # add new alarm
alarm = Alarm(datetime.datetime.now(), '', 2, 0)
#flash(babel.gettext(u'alarms.alarmadded'), 'alarms.add')
else: # edit alarm
alarm = Alarm.getAlarms(id=request.args.get('alarmid'))
return render_template('frontend.alarms_edit.html', alarm=alarm, cities=City.getCities(), objects=AlarmObject.getAlarmObjects(), cars=Car.getCars(), departments=Department.getDepartments(), frontendarea=params['area'], frontendmodules=frontend.modules, frontendmoduledef=Settings.get('frontend.default'))
elif request.args.get('action') == 'refresh': # refresh alarm section
params['area'] = request.args.get('area')
params['activeacc'] = int(request.args.get('activeacc'))
elif request.args.get('action') == 'finishalarm': # finish selected alarm
Alarm.changeState(int(request.args.get('alarmid')), 2)
params['area'] = request.args.get('area')
elif request.args.get('action') == 'activatealarm': # activate selected alarm
ret = Alarm.changeState(int(request.args.get('alarmid')), 1)
if len(ret) > 0:
flash(render_template_string("{{ _('alarms.carsinuse') }}</br><b>" + ", ".join([r.name for r in sorted(ret, key=attrgetter('name'))]) + "</b>"), 'alarms')
params['area'] = request.args.get('area')
params['activeacc'] = 0
elif request.args.get('action') == 'deletealarm': # delete selected alarm
alarm = Alarm.getAlarms(id=request.args.get('alarmid'))
refresh = 1 or alarm.state == 1 # check if alarm is active
try:
# delete file if not used in any other alarm
c = Alarm.query.filter(Alarm.attributes.any(value=alarm.get('filename'), name="filename")).count()
if c == 1 and os.path.exists("{}{}".format(current_app.config.get('PATH_DONE'), alarm.get('filename'))):
os.remove("{}{}".format(current_app.config.get('PATH_DONE'), alarm.get('filename')))
except:
pass
alarm.state = -1
alarm.updateSchedules()
db.session.delete(alarm)
db.session.commit()
if refresh:
monitorserver.sendMessage('0', 'reset') # refresh monitor layout
signal.send('alarm', 'deleted', alarmid=request.args.get('alarmid'))
elif request.args.get('action') == 'archivealarm': # archive selected alarms, id=0 == all
if ";" in request.args.get('alarmid'): # archive selected alarms
for alarmid in request.args.get('alarmid').split(';'):
Alarm.changeState(int(alarmid), 3)
elif int(request.args.get('alarmid')) == 0: # archive all alarms
Alarm.changeStates(3)
else: # archive single selected alarm
Alarm.changeState(int(request.args.get('alarmid')), 3)
params['area'] = request.args.get('area')
stats = dict.fromkeys(Alarm.ALARMSTATES.keys() + ['3'], 0)
for s, c in Alarm.getAlarmCount(days=int(session['alarmfilter'])): # s=state, c=count(ids of state)
if str(s) in stats.keys():
stats[str(s)] = c
if 'area' not in params:
params['area'] = 'center'
if 'activeacc' not in params:
params['activeacc'] = 0
return render_template('frontend.alarms_smallarea.html', alarmstates=Alarm.ALARMSTATES, stats=stats, frontendarea=params['area'], activeacc=str(params['activeacc']), printdefs=Printers.getActivePrintersOfModule('alarms'), frontendmodules=frontend.modules, frontendmoduledef=Settings.get('frontend.default'), alarmfilter=session['alarmfilter'])
def getFrontendData(self):
"""
Deliver frontend content of module alarms (ajax)
:return: rendered template as string or json dict
"""
from emonitor.extensions import monitorserver
if "download" in request.path: # deliver file
with open('{}{}'.format(current_app.config.get('PATH_TMP'), request.path.split('download/')[-1]), 'rb') as data:
si = StringIO.StringIO(data.read()).getvalue()
output = make_response(si)
if request.path.split('/')[-1].startswith('temp'): # remove if filename starts with temp == temporary file
os.remove('{}{}'.format(current_app.config.get('PATH_TMP'), request.path.split('download/')[-1]))
output.headers["Content-Disposition"] = "attachment; filename=report.{}".format(request.path.split('.')[-1])
output.headers["Content-type"] = "application/x.download"
return output
if request.args.get('action') == 'editalarm':
if request.args.get('alarmid', '0') == '0': # add new alarm
alarm = Alarm(datetime.datetime.now(), '', 2, 0)
else: # edit alarm
alarm = Alarm.getAlarms(id=request.args.get('alarmid'))
return render_template('frontend.alarms_edit.html', alarm=alarm, cities=City.getCities(), objects=AlarmObject.getAlarmObjects(), cars=Car.getCars(), frontendarea=request.args.get('frontendarea'))
elif request.args.get('action') == 'alarmmonitor': # send alarm to monitor
for monitor in Monitor.getMonitors():
scheduler.deleteJobForEvent('changeLayout') # send update to monitors
for l in MonitorLayout.getLayouts(mid=int(monitor.id)):
if l.trigger == 'alarm_added':
#monitorserver.sendMessage(str(monitor.id), 'load', ['layoutid=%s' % l.id, 'alarmid=%s' % request.args.get('alarmid')]) TODO changed from list
monitorserver.sendMessage(str(monitor.id), 'load', layoutid=l.id, alarmid=request.args.get('alarmid'))
elif request.args.get('action') == 'printalarm':
Printers.getPrinters(pid=int(request.args.get('printerdef'))).doPrint(object=Alarm.getAlarms(id=int(request.args.get('alarmid'))), id=request.args.get('alarmid'), copies=1)
return ""
elif request.args.get('action') == 'routeinfo':
return render_template('frontend.alarms_routing.html', routing=Alarm.getAlarms(id=request.args.get('alarmid')).getRouting())
elif request.args.get('action') == 'routecoords':
return jsonify(Alarm.getAlarms(id=request.args.get('alarmid')).getRouting())
elif request.args.get('action') == 'message':
return render_template('frontend.alarms_message.html', alarm=Alarm.getAlarms(id=request.args.get('alarmid')), messagestates=AlarmHistory.historytypes, area=request.args.get('area'), reload=request.args.get('reload', 'true'))
elif request.args.get('action') == 'addmessage': # add message
if request.form.get('messagetext') != "":
alarm = Alarm.getAlarms(request.form.get('alarmid'))
alarm.addHistory(request.form.get('messagestate'), request.form.get('messagetext'))
db.session.commit()
return render_template('frontend.alarms_message.html', alarm=Alarm.getAlarms(request.form.get('alarmid')), messagestates=AlarmHistory.historytypes, area=request.args.get('area'))
elif request.args.get('action') == 'deletemessage': # delete selected message
alarm = Alarm.getAlarms(request.args.get('alarmid'))
for msg in alarm.history:
if str(msg.timestamp) == request.args.get('datetime'):
db.session.delete(msg)
db.session.commit()
return render_template('frontend.alarms_message.html', alarm=Alarm.getAlarms(request.args.get('alarmid')), messagestates=AlarmHistory.historytypes, area=request.args.get('area'))
elif request.args.get('action') == 'housecoordinates': # return a dict with coordinats of housenumber
if request.args.get('alarmid') != "None":
alarm = Alarm.getAlarms(id=int(request.args.get('alarmid')))
if alarm and alarm.housenumber:
return {'lat': map(lambda x: x[0], alarm.housenumber.points), 'lng': map(lambda x: x[1], alarm.housenumber.points)}
return []
elif request.args.get('action') == 'evalhouse': # try to eval housenumer
street = Street.getStreets(id=request.args.get('streetid'))
if street:
points = dict(lat=[], lng=[])
for hn in street.housenumbers:
if str(hn.number) == request.args.get('housenumber').strip():
points['lat'].extend(map(lambda x: x[0], hn.points))
points['lng'].extend(map(lambda x: x[1], hn.points))
return points
return {}
elif request.args.get('action') == 'alarmsforstate': # render alarms for given state
if 'alarmfilter' not in session:
session['alarmfilter'] = 7
return render_template('frontend.alarms_alarm.html', alarms=Alarm.getAlarms(days=int(session['alarmfilter']), state=int(request.args.get('state', '-1'))), printdefs=Printers.getActivePrintersOfModule('alarms'))
elif request.args.get('action') == 'collective': # render collective form
reports = [r for r in AlarmReport.getReports() if r.reporttype.multi]
if len(reports) == 0:
return ""
return render_template('frontend.alarms_collective.html', alarms=Alarm.getAlarms(state=2), reports=reports)
elif request.args.get('action') == 'docollective': # build collective form
if request.args.get('ids') == "":
ids = []
else:
ids = request.args.get('ids').split(',')
f = AlarmReport.getReports(request.args.get('form')).createReport(ids=ids)
_path, _filename = os.path.split(f)
shutil.move(f, "{}{}".format(current_app.config.get('PATH_TMP'), _filename))
return _filename
elif request.args.get('action') == 'alarmpriocars': # show prio cars
cars = []
c = Settings.getIntList('alarms.spc_cars.{}'.format(request.args.get('state')))
if len(c) == 0:
return ""
for alarm in Alarm.getAlarms(state=request.args.get('state')):
cars.extend([car for car in alarm.cars1 if car.id in c])
cars = Counter(cars)
return render_template('frontend.alarms_cars.html', cars=cars)
elif request.args.get('action') == 'showdetailsform': # build alarmdetails edtit form
alarm = Alarm.getAlarms(id=request.args.get('alarmid'))
if alarm.street.city:
fields = AlarmField.getAlarmFields(dept=alarm.street.city.dept)
else:
fields = AlarmField.getAlarmFields(dept=Department.getDefaultDepartment().id)
return render_template('frontend.alarms_fields.html', alarm=alarm, fields=fields, reports=AlarmReport.getReports())
elif request.args.get('action') == 'saveextform': # store ext-form values
alarm = Alarm.getAlarms(id=request.form.get('alarmid'))
for field in AlarmField.getAlarmFields(dept=alarm.street.city.dept):
field.saveForm(request, alarm)
db.session.commit()
return ""
| StarcoderdataPython |
11324076 | from os.path import join, dirname
from setuptools import setup, find_packages
exec(open(join(dirname(__file__),
'src', 'JenkinsLibrary', 'version.py')).read())
PACKAGE = 'robotframework-jenkins'
DESCRIPTION = '''Library for Robot Framework for Jenkins interaction'''
REQUIREMENTS = [
'python-jenkins>=1.*',
'robotframework>=3.*',
'requests>=2.*'
]
with open('README.md') as f:
LONG_DESCRIPTION = f.read()
CLASSIFIERS = '''
Development Status :: 5 - Production/Stable
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 3
Topic :: Software Development :: Testing
Framework :: Robot Framework
Framework :: Robot Framework :: Library
'''.strip().splitlines()
setup(
name=PACKAGE,
package_dir={'': 'src'},
packages=find_packages('src'),
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/okgolove/robotframework-jenkins',
keywords=['jenkins', 'robotframework', 'robot', 'testing'],
classifiers=CLASSIFIERS,
install_requires=REQUIREMENTS,
)
| StarcoderdataPython |
3514841 | <gh_stars>0
import re
from scrappybara.preprocessing.sanitization import sanitize
class Tokenizer(object):
"""Rule based tokenizer, using precompiled regex patterns"""
# SANITIZATION
# -------------------------------------------------------------------------->
__re_sanitization = [
(re.compile(r'[‘’′]'), "'"),
(re.compile(r'[“”″]'), '"'),
(re.compile(r'[–—]+'), ' - '),
]
# NON-SPLITABLE TEXT
# -------------------------------------------------------------------------->
# Unambiguous patterns for text that can't be split
__re_protect = [
re.compile(r"\s'n'\s", re.I), # Abbreviation for "and"
re.compile(r"\s('n|n')\s", re.I), # Abbreviations for "and" (shorter than above)
re.compile(r'(https?://|www\.)[^\s.]+(\.[^\s.]+)+(/[^\s]*)*', re.I), # URLs
re.compile(r'[^\s.]+(\.[^\s.]+)+/[^\s]*', re.I), # URLs (shorter than above)
re.compile(r'([:;]-?\))|([:;]-?\()', re.I), # Common smileys
]
# NON-SPLITABLE CHARS
# -------------------------------------------------------------------------->
__nsd = '_NSD_' # for a dot
__re_no_split_dot = re.compile(r'%s' % __nsd)
__nsge = '_NSGE_' # for '>='
__re_no_split_gte = re.compile(r'%s' % __nsge)
__nsle = '_NSLE_' # for '<='
__re_no_split_lte = re.compile(r'%s' % __nsle)
__rnsge = '_RNSGE_' # for '=>'
__re_no_split_rgte = re.compile(r'%s' % __rnsge)
__rnsle = '_RNSLE_' # for '=<'
__re_no_split_rlte = re.compile(r'%s' % __rnsle)
# Common abbreviations that can be followed by an uppercase letter.
# These abbreviations must be unambiguous (can't be equal to any common word).
# Abbreviations below are listed without their ending ".", and in lower case.
__abbr = [
# Age & Genders
'm', 'mr', 'mrs', 'ms', 'sr',
# Jobs & degrees
'dr', 'prof', 'rev', 'gov', 'sen', 'rep',
# Military ranks
'lt', 'sgt',
# Locations
'ave', 'st',
# Misc
'ed', 'feat',
]
__abbr_options = r'(%s)\.' % '|'.join([abbr for abbr in __abbr])
# Patterns that analyze the surrounding context of a single symbol to protect parts from splitting
__re_no_split = [
# Dots
(re.compile(r'\.(?=\s[a-z])'), __nsd), # Dot followed by space and lower case letter
(re.compile(r'\.(?=\s?[,:;?!])'), __nsd), # Dot followed by punctuation
(re.compile(r'\.(?=\s\d+\b)'), __nsd), # Dot followed by a digit
(re.compile(r'\b%s(?=\s[A-Z])' % __abbr_options, re.I), r'\g<1>%s' % __nsd), # Common abbreviation
# Quotes
(re.compile(r'(?<=\s)(\d[\d,.]*)\'(\d[\d,.]*)"', re.I), r'\g<1>ft\g<2>in'), # Inches and feet
(re.compile(r'(?<=\s)(\d[\d,.]*)\'', re.I), r'\g<1>ft'), # Feet
(re.compile(r'(?<=\s)(\d[\d,.]*)"', re.I), r'\g<1>in'), # Inches
# < and >
(re.compile(r'\s?>\s?=\s?', re.I), __nsge), # >=
(re.compile(r'\s?<\s?=\s?', re.I), __nsle), # <=
(re.compile(r'\s?=\s?>\s?', re.I), __rnsge), # =>
(re.compile(r'\s?=\s?<\s?', re.I), __rnsle), # =<
]
# CHARS TO ALWAYS SEGMENT OFF
# -------------------------------------------------------------------------->
__re_always_split = re.compile(r'([.?!]{2,}|…+|=+|!+|\?+|"+|;+|\|+|\\+|\(+|\)+|{+|}+|\[+|]+|–+)')
# DOTS
# They cause issues because of abbreviations and acronyms.
# We protect some tokens first, then seperate remaining dots in a token.
# -------------------------------------------------------------------------->
# Patterns that protect a single token from splitting
# Full match
__re_token_nosplit_dot = [
re.compile(r'(\w\.){2,}', re.I), # Any series of dotted single chars: "f.f.f."
re.compile(r'\.?([^.]+\.){2,}[^.]+', re.I), # Any series of dotted words: "asdf.asdfadsf.asdf"
re.compile(r'[a-zA-Z]\.', re.I), # Single char followed by a dot: "A. Gray"
re.compile(r'\d\.(?=(\s[^A-Z]))', re.I), # Single digit followed by a dot (the number doesn't need the dot)
re.compile(r'(\d+\.){2,}', re.I), # Series of numbers and dots
re.compile(r'\.{2,}', re.I), # Multiple dots
]
# Split dots in a token
# Partial match
__re_token_split_dot = [
(re.compile(r'([a-z]+)(\.)([A-Z\'])'), r'\g<1> \g<2> \g<3>'),
(re.compile(r'\.$'), r' \g<0>'),
]
# TEXT TO SEGMENT OFF ACCORDING TO CONTEXT
# -------------------------------------------------------------------------->
__re_segment_off = [
# Special words to split
(re.compile(r'(?<=\b)(can)(not)(?=\b)', re.I), r'\g<1> \g<2>'),
# Star *
(re.compile(r'(?<=[a-z])\*+(?=[a-z])', re.I), ' * '),
# Plus +
(re.compile(r'(?<=[a-z][a-z][a-z])\+(?=[a-z]{3,})', re.I), ' + '),
(re.compile(r'(\d+)\+(?=[a-z])', re.I), r'\g<1>+ '),
# Ampercase &
(re.compile(r'(?<=\b)&(?!\b)'), ' & '),
(re.compile(r'(?<!\b)&(?=\b)'), ' & '),
(re.compile(r'([a-zA-Z][a-z]{2,})&([a-zA-Z][a-z]{2,})'), r'\g<1> & \g<2>'),
# Slash /
(re.compile(r'(?<=[a-z\d][a-z])/', re.I), ' / '),
(re.compile(r'/(?=[a-z][a-z\d])', re.I), ' / '),
# Percentage %
(re.compile(r'(?<=[a-z])%', re.I), ' % '),
(re.compile(r'%(?=[a-z])', re.I), ' % '),
# Colon :
(re.compile(r':(?=\s|$)'), ' : '),
(re.compile(r'(?<=\s):'), ' : '),
(re.compile(r'([a-zA-Z][a-z]{2,}):([a-zA-Z][a-zA-Z]+)'), r'\g<1> : \g<2>'),
(re.compile(r'([a-z]{3,}):(\d+)', re.I), r'\g<1> : \g<2>'),
(re.compile(r'([0-9]+):([a-zA-Z]+)', re.I), r'\g<1> : \g<2>'), # 2:Engage
# Greater than >
(re.compile(r'(?<!-)>'), ' > '),
# Less than <
(re.compile(r'<(?!-)'), ' < '),
# Comma ,
(re.compile(r'(?<!\d),'), ' , '),
(re.compile(r',(?!\d)'), ' , '),
# Single quote '
(re.compile(r"(^|\s)'(?!s)"), " ' "),
(re.compile(r"'(\s|$)"), " ' "),
(re.compile(r"(?<=\w\w\w)'(?=\w\w\w)", re.I), " ' "),
(re.compile(r"(^|\s)'(?=s\w)", re.I), " ' "),
(re.compile(r"(n't|'re|'ll|'ve|'m|'d|'s)(?=\s|$)", re.I), r' \g<1> '),
# Bullets (often * or -)
(re.compile(r'\s([*-])([a-zA-Z]+)(?=\s|$)'), r' \g<1> \g<2> '),
]
# TEXT TO REATTACH
# -------------------------------------------------------------------------->
__re_reattach = [
(re.compile(r'\b([A-Z])\s([A-Z])\s&\s([A-Z])\b'), r'\g<1>\g<2>&\g<3>'),
(re.compile(r'\b([A-Z])\s&\s([A-Z])\s([A-Z])\b'), r'\g<1>&\g<2>\g<3>'),
(re.compile(r'\b([A-Z])\s&\s([A-Z])\b'), r'\g<1>&\g<2>'),
(re.compile(r'(\.{2,}) \.'), r'\g<1>.'),
]
def __call__(self, text):
"""Returns a list of tokens"""
if not text:
return ['']
text = sanitize(text, self.__re_sanitization)
# Protect tokens
protected, text = self.__protect_text(text)
# Protect single symbols
for nsp in self.__re_no_split:
text = re.sub(nsp[0], nsp[1], text)
# Segment off unambiguous patterns
text = re.sub(self.__re_always_split, r' \g<0> ', text)
# Segment off ending dots
tokens = []
for token in text.split():
if any([re.fullmatch(regex, token) for regex in self.__re_token_nosplit_dot]):
tokens.append(token)
else:
for pattern, rep in self.__re_token_split_dot:
token = re.sub(pattern, rep, token)
tokens.extend(token.split())
text = ' '.join(tokens)
# Segment off other symbols
for pattern, replace in self.__re_segment_off:
text = re.sub(pattern, replace, text)
text = ' '.join(text.split())
# Re-establish symbols
text = re.sub(self.__re_no_split_dot, '.', text)
text = re.sub(self.__re_no_split_gte, ' >= ', text)
text = re.sub(self.__re_no_split_lte, ' <= ', text)
text = re.sub(self.__re_no_split_rgte, ' => ', text)
text = re.sub(self.__re_no_split_rlte, ' =< ', text)
# Re-attach text
for replace in self.__re_reattach:
text = re.sub(replace[0], replace[1], text)
# Re-establish protected patterns
for key, value in protected.items():
text = text.replace(key, value)
return text.split()
def __protect_text(self, text):
"""Detects patterns that should not be tokenized"""
protected = {}
index = 1
for pattern in self.__re_protect:
match = re.search(pattern, text)
while match:
token = match.group()
key = '_PROTECTED_%d_' % index
protected[key] = token
text = text.replace(token, key)
match = re.search(pattern, text)
index += 1
return protected, text
| StarcoderdataPython |
4872270 | <filename>examples/bio_based/run_test_SMA2.py
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 11:20, 20/10/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
from opfunu.cec_basic.cec2014_nobias import *
from mealpy.bio_based import SMA
from mealpy.swarm_based import HGS
from mealpy.human_based import GSKA
from mealpy.physics_based import EO
from mealpy.evolutionary_based import MA, FPA, ES, EP, DE
from mealpy.problem import Problem
from mealpy.utils.termination import Termination
# Setting parameters
# A - Different way to provide lower bound and upper bound. Here are some examples:
## A1. When you have different lower bound and upper bound for each parameters
problem_dict1 = {
"obj_func": F5,
"lb": [-3, -5, 1, -10, ],
"ub": [5, 10, 100, 30, ],
"minmax": "min",
"verbose": True,
}
if __name__ == "__main__":
problem_obj1 = Problem(problem_dict1)
### Your parameter problem can be an instane of Problem class or just dict like above
model1 = DE.JADE(problem_obj1, epoch=100, pop_size=50)
model1.solve(mode="sequential") | StarcoderdataPython |
4842573 | <filename>paper_uploads/views/__init__.py<gh_stars>1-10
from . import collection, file, image # noqa: 401
| StarcoderdataPython |
6556774 | <reponame>VimalArjunan51437/HACKERRANK-PYTHON
# FOR LEARNING PURPOSE
# BASICS OF STRINGS
def merge(string, k):
for i in range(0, len(string), k):
ans = "" # empty string to store sub_string
for x in string[i : i+k]:
if (x not in ans): #it will not allow repeated character in substring
ans+=x
print(ans)
string=input("Enter the sequencse : ") # remove the comment while trying it.
k=int(input()) # Length of each Sub-string
merge(string,k) # Calling the Function | StarcoderdataPython |
11219556 | <reponame>Nataliyi/aioaerospike
from dataclasses import dataclass
from enum import IntEnum
from struct import Struct
from typing import List, Type
from bcrypt import hashpw
BCRYPT_SALT = b"$2a$10$7EqJtq98hPqEX7fNZaFWoO"
class AdminCommandsType(IntEnum):
AUTHENTICATE = 0
CREATE_USER = 1
DROP_USER = 2
SET_PASSWORD = 3
CHANGE_PASSWORD = 4
GRANT_ROLES = 5
REVOKE_ROLES = 6
QUERY_USERS = 9
CREATE_ROLE = 10
DROP_ROLE = 11
GRANT_PRIVILEGES = 12
REVOKE_PRIVILEGES = 13
SET_WHITELIST = 14
QUERY_ROLES = 16
LOGIN = 20
class FieldTypes(IntEnum):
USER = 0
PASSWORD = 1
OLD_PASSWORD = 2
CREDENTIAL = 3
CLEAR_PASSWORD = 4
SESSION_TOKEN = 5
SESSION_TTL = 6
ROLES = 10
ROLE = 11
PRIVILEGES = 12
WHITELIST = 13
@dataclass
class Field:
FORMAT = Struct("!IB")
field_type: FieldTypes
data: bytes
def pack(self) -> bytes:
length = len(self.data)
return self.FORMAT.pack(length, self.field_type) + self.data
@classmethod
def parse(cls: Type["Field"], data: bytes) -> "Field":
length, field_type = cls.FORMAT.unpack(data[: cls.FORMAT.size])
data = data[cls.FORMAT.size : length]
return cls(field_type=field_type, data=data)
def __len__(self):
return len(self.data)
@dataclass
class AdminMessage:
FORMAT = Struct("!16xBB")
command_type: AdminCommandsType
fields: List[Field]
def pack(self) -> bytes:
fields_count = len(self.fields)
fields_data = b""
for field in self.fields:
fields_data += field.pack()
return self.FORMAT.pack(self.command_type, fields_count) + fields_data
@classmethod
def parse(cls: Type["AdminMessage"], data: bytes) -> "AdminMessage":
command_type, fields_count = cls.FORMAT.unpack(data[: cls.FORMAT.size])
fields = []
data_left = data[cls.FORMAT.size :]
for _i in range(fields_count):
field = Field.parse(data_left)
fields.append(field)
data_left = data_left[Field.FORMAT.size + len(field) :]
return cls(fields=fields, command_type=command_type)
@classmethod
def login(cls: Type["AdminMessage"], user: str, password: str) -> bytes:
hashed_pass = <PASSWORD>(password)
user_field = Field(FieldTypes.USER, user.encode("utf-8"))
password_field = Field(FieldTypes.PASSWORD, <PASSWORD>)
return cls(
command_type=AdminCommandsType.LOGIN,
fields=[user_field, password_field],
).pack()
def hash_password(password: str) -> bytes:
"""
Hashes password according to Aerospike algorithm
"""
return hashpw(password.encode("utf-8"), BCRYPT_SALT)
| StarcoderdataPython |
8029676 | #! /usr/bin/python3
import sys
import os
import time
from typing import Dict, List, Set, Tuple
import re
import math
from collections import defaultdict
Values = Tuple[int, ...]
Particle = Tuple[Values, Values, Values]
def get_manhatan_value(values: Values) -> int:
return abs(values[0]) + abs(values[1]) + abs(values[2])
def part1(particles: List[Particle]) -> int:
closest_particle = 0
lowest_acceleration = sys.maxsize
lowest_velocity = sys.maxsize
for index, (_, velocity, acceleration) in enumerate(particles):
acceleration_total = get_manhatan_value(acceleration)
if acceleration_total < lowest_acceleration:
lowest_acceleration = acceleration_total
closest_particle = index
lowest_velocity = get_manhatan_value(velocity)
if acceleration_total == lowest_acceleration and get_manhatan_value(velocity) < lowest_velocity:
closest_particle = index
lowest_velocity = get_manhatan_value(velocity)
return closest_particle
def get_quadratic_abc(particle_a: Particle, particle_b: Particle, coordinate: int) -> Tuple[float, float, int]:
p_a_p = particle_a[0][coordinate]
p_a_a = particle_a[2][coordinate]
p_a_v = particle_a[1][coordinate] + p_a_a / 2
p_b_p = particle_b[0][coordinate]
p_b_a = particle_b[2][coordinate]
p_b_v = particle_b[1][coordinate] + p_b_a / 2
return (p_a_a - p_b_a) / 2, p_a_v - p_b_v, p_a_p - p_b_p
def get_colition_times(particle_a: Particle, particle_b: Particle) -> List[int]:
a, b, c = get_quadratic_abc(particle_a, particle_b, 0)
times: List[float] = []
if a == 0:
if b != 0:
times.append(-c / b)
else:
bb = b * b
ac4 = a * c * 4
if bb < ac4:
return []
elif bb == ac4:
times.append(-b / (2 * a))
else:
rt = math.sqrt(bb - ac4)
times.append((-b + rt) / (2 * a))
times.append((-b - rt) / (2 * a))
int_times = map(int, filter(lambda t: t >= 0 and round(t) == t, times))
result: List[int] = []
for t in int_times:
collide = True
for k in [1, 2]:
a, b, c = get_quadratic_abc(particle_a, particle_b, k)
if a * t * t + b * t + c != 0:
collide = False
break
if collide:
result.append(t)
return result
def part2(particles: List[Particle]) -> int:
collisions: Dict[int, List[Tuple[int, int]]] = defaultdict(list)
for this_index in range(len(particles) - 1):
for other_index in range(this_index + 1, len(particles)):
for time in get_colition_times(particles[this_index], particles[other_index]):
collisions[time].append((this_index, other_index))
particle_indexes: Set[int] = set(range(len(particles)))
for time in sorted(list(collisions.keys())):
collided_to_temove: Set[int] = set()
for index_a, index_b in collisions[time]:
if index_a in particle_indexes and index_b in particle_indexes:
collided_to_temove.add(index_a)
collided_to_temove.add(index_b)
particle_indexes -= collided_to_temove
return len(particle_indexes)
def solve(particles: List[Particle]) -> Tuple[int, int]:
return (
part1(particles),
part2(particles)
)
line_regex = re.compile(
r"^p=<(?P<p>[^>]+)>, v=<(?P<v>[^>]+)>, a=<(?P<a>[^>]+)>$")
def parse_line(line: str) -> Particle:
match = line_regex.match(line)
if match:
return \
tuple(map(int, match.group("p").split(","))), \
tuple(map(int, match.group("v").split(","))), \
tuple(map(int, match.group("a").split(",")))
raise Exception("Bad format", line)
def get_input(file_path: str) -> List[Particle]:
if not os.path.isfile(file_path):
raise FileNotFoundError(file_path)
with open(file_path, "r") as file:
return [parse_line(line) for line in file.readlines()]
def main():
if len(sys.argv) != 2:
raise Exception("Please, add input file path as parameter")
start = time.perf_counter()
part1_result, part2_result = solve(get_input(sys.argv[1]))
end = time.perf_counter()
print("P1:", part1_result)
print("P2:", part2_result)
print()
print(f"Time: {end - start:.7f}")
if __name__ == "__main__":
main()
| StarcoderdataPython |
6427662 | import inspect
import itertools
import logging
import warnings
from collections import OrderedDict
from collections.abc import Mapping
from timeit import default_timer
import nengo
import nengo.utils.numpy as npext
import numpy as np
from nengo.builder.processes import SimProcess
from nengo.exceptions import ReadonlyError, SimulatorClosed, ValidationError
from nengo.simulator import SimulationData as NengoSimulationData
from nengo_loihi.builder import Model
from nengo_loihi.emulator import EmulatorInterface
from nengo_loihi.hardware import HAS_NXSDK, HardwareInterface
logger = logging.getLogger(__name__)
class Simulator:
"""NengoLoihi simulator for Loihi hardware and emulator.
The simulator takes a `nengo.Network` and builds internal data structures
to run the model defined by that network on Loihi emulator or hardware.
Run the simulator with the `.Simulator.run` method, and access probed data
through the ``data`` attribute.
Building and running the simulation allocates resources. To properly free
these resources, call the `.Simulator.close` method. Alternatively,
`.Simulator.close` will automatically be called if you use
``with`` syntax::
with nengo_loihi.Simulator(my_network) as sim:
sim.run(0.1)
print(sim.data[my_probe])
Note that the ``data`` attribute is still accessible even when a simulator
has been closed. Running the simulator, however, will raise an error.
Parameters
----------
network : Network
A network object to be built and then simulated. If None,
then the *model* parameter must be provided instead.
dt : float, optional (Default: 0.001)
The length of a simulator timestep, in seconds.
seed : int, optional (Default: None)
A seed for all stochastic operators used in this simulator.
Will be set to ``network.seed + 1`` if not given.
model : Model, optional (Default: None)
A `.Model` that contains build artifacts to be simulated.
Usually the simulator will build this model for you; however, if you
want to build the network manually, or you want to inject build
artifacts in the model before building the network, then you can
pass in a `.Model` instance.
precompute : bool, optional (Default: None)
Whether model inputs should be precomputed to speed up simulation.
When *precompute* is False, the simulator will be run one step
at a time in order to use model outputs as inputs in other parts
of the model. By default, the simulator will choose ``True`` if it
works for your model, and ``False`` otherwise.
target : str, optional (Default: None)
Whether the simulator should target the emulator (``'sim'``) or
Loihi hardware (``'loihi'``). If None, *target* will default to
``'loihi'`` if NxSDK is installed, and the emulator if it is not.
progress_bar : bool or `nengo.utils.progress.ProgressBar`, optional
Progress bar for displaying build and simulation progress. If ``True``, the
default progress bar will be used. If ``False``, the progress bar will be
disabled. For more control, pass in a ``ProgressBar`` instance.
remove_passthrough : bool, optional
Whether to remove passthrough `nengo.Node` objects from the model (i.e. Nodes
with ``output=None`` that only act as intermediaries between other objects).
This will often allow more of the network to be run on-chip, but can sometimes
require significantly larger connections (e.g. more input and output axons).
hardware_options : dict, optional (Default: {})
Dictionary of additional configuration for the hardware.
See `.hardware.HardwareInterface` for possible parameters.
Attributes
----------
closed : bool
Whether the simulator has been closed.
Once closed, it cannot be reopened.
data : ProbeDict
The dictionary mapping from Nengo objects to the data associated
with those objects. In particular, each `nengo.Probe` maps to
the data probed while running the simulation.
model : Model
The `.Model` containing the data structures necessary for
simulating the network.
precompute : bool
Whether model inputs should be precomputed to speed up simulation.
When *precompute* is False, the simulator will be run one step
at a time in order to use model outputs as inputs in other parts
of the model.
"""
def __init__( # noqa: C901
self,
network,
dt=0.001,
seed=None,
model=None,
precompute=None,
target=None,
progress_bar=None,
remove_passthrough=True,
hardware_options=None,
):
# initialize values used in __del__ and close() first
self.closed = True
self.network = network
self.sims = OrderedDict()
self.timers = Timers()
self.timers.start("build")
self.seed = seed
self._n_steps = 0
self._time = 0
hardware_options = {} if hardware_options is None else hardware_options
if progress_bar:
warnings.warn("nengo-loihi does not support progress bars")
if model is None:
self.model = Model(dt=float(dt), label="%s, dt=%f" % (network, dt))
else:
assert isinstance(
model, Model
), "model is not type 'nengo_loihi.builder.Model'"
self.model = model
assert self.model.dt == dt
if network is None:
raise ValidationError("network parameter must not be None", attr="network")
if target is None:
target = "loihi" if HAS_NXSDK else "sim"
self.target = target
logger.info("Simulator target is %r", target)
# Build the network into the model
self.model.build(
network,
precompute=precompute,
remove_passthrough=remove_passthrough,
discretize=target != "simreal",
)
# Create host_pre and host simulators if necessary
self.precompute = self.model.split.precompute
logger.info("Simulator precompute is %r", self.precompute)
assert precompute is None or precompute == self.precompute
if self.model.split.precomputable() and not self.precompute:
warnings.warn(
"Model is precomputable. Setting precompute=False may slow execution."
)
if len(self.model.host_pre.params) > 0:
assert self.precompute
self.sims["host_pre"] = nengo.Simulator(
network=None,
dt=self.dt,
model=self.model.host_pre,
progress_bar=False,
optimize=False,
)
if len(self.model.host.params) > 0:
self.sims["host"] = nengo.Simulator(
network=None,
dt=self.dt,
model=self.model.host,
progress_bar=False,
optimize=False,
)
self._probe_outputs = self.model.params
self.data = SimulationData(self._probe_outputs)
for sim in self.sims.values():
self.data.add_fallback(sim.data)
if seed is None:
if network is not None and network.seed is not None:
seed = network.seed + 1
else:
seed = np.random.randint(npext.maxint)
if target in ("simreal", "sim"):
self.sims["emulator"] = EmulatorInterface(self.model, seed=seed)
elif target == "loihi":
assert HAS_NXSDK, "Must have NxSDK installed to use Loihi hardware"
use_snips = not self.precompute and self.sims.get("host", None) is not None
self.sims["loihi"] = HardwareInterface(
self.model, use_snips=use_snips, seed=seed, **hardware_options
)
else:
raise ValidationError("Must be 'simreal', 'sim', or 'loihi'", attr="target")
assert "emulator" in self.sims or "loihi" in self.sims
self._runner = StepRunner(self.model, self.sims, self.precompute, self.timers)
self.closed = False
self.timers.stop("build")
def __del__(self):
"""Raise a ResourceWarning if we are deallocated while open."""
if not self.closed:
warnings.warn(
"Simulator with model=%s was deallocated while open. Please "
"close simulators manually to ensure resources are properly "
"freed." % self.model,
ResourceWarning,
)
def __enter__(self):
self.timers.start("connect")
for sim in self.sims.values():
sim.__enter__()
self.timers.stop("connect")
return self
def __exit__(self, exc_type, exc_value, traceback):
for sim in self.sims.values():
sim.__exit__(exc_type, exc_value, traceback)
self.close()
@property
def dt(self):
"""(float) The step time of the simulator."""
return self.model.dt
@dt.setter
def dt(self, dummy):
raise ReadonlyError(attr="dt", obj=self)
@property
def n_steps(self):
"""(int) The current time step of the simulator."""
return self._n_steps
@property
def time(self):
"""(float) The current time of the simulator."""
return self._time
def clear_probes(self):
"""Clear all probe histories."""
for probe in self.model.nengo_probes:
self._probe_outputs[probe].clear()
for sim in self.sims.values():
sim.clear_probes()
self.data.reset() # clear probe cache
def close(self):
"""Closes the simulator.
Any call to `.Simulator.run`, `.Simulator.run_steps`,
`.Simulator.step`, and `.Simulator.reset` on a closed simulator raises
a ``SimulatorClosed`` exception.
"""
for sim in self.sims.values():
if not sim.closed:
sim.close()
self._runner = None
self.closed = True
def _probe(self):
"""Copy all probed signals to buffers."""
self._probe_step_time()
for probe in self.model.nengo_probes:
if probe in self.model.chip2host_params:
continue
assert probe.sample_every is None, "probe.sample_every not implemented"
assert "loihi" not in self.sims or "emulator" not in self.sims
loihi_probe = self.model.objs[probe]["out"]
sim = self.sims["loihi" if "loihi" in self.sims else "emulator"]
data = sim.collect_probe_output(loihi_probe)
self._probe_outputs[probe].extend(data)
def _probe_step_time(self):
self._time = self._n_steps * self.dt
def reset(self, seed=None):
"""Reset the simulator state.
Parameters
----------
seed : int, optional
A seed for all stochastic operators used in the simulator.
This will change the random sequences generated for noise
or inputs (e.g. from processes), but not the built objects
(e.g. ensembles, connections).
"""
if self.closed:
raise SimulatorClosed("Cannot reset closed Simulator.")
# TODO: this will involve adding a probe reset function that resets the probe
# synapse state/time back to initial (e.g. `filter_functions` and
# `filter_step_counters` in `emulator.interface.ProbeState`
raise NotImplementedError()
def run(self, time_in_seconds):
"""Simulate for the given length of time.
If the given length of time is not a multiple of ``dt``,
it will be rounded to the nearest ``dt``. For example, if ``dt``
is 0.001 and ``run`` is called with ``time_in_seconds=0.0006``,
the simulator will advance one timestep, resulting in the actual
simulator time being 0.001.
The given length of time must be positive. The simulator cannot
be run backwards.
Parameters
----------
time_in_seconds : float
Amount of time to run the simulation for. Must be positive.
"""
if time_in_seconds < 0:
raise ValidationError(
"Must be positive (got %g)" % (time_in_seconds,), attr="time_in_seconds"
)
steps = int(np.round(float(time_in_seconds) / self.dt))
if steps == 0:
warnings.warn(
"%g results in running for 0 timesteps. Simulator "
"still at time %g." % (time_in_seconds, self.time)
)
else:
logger.info(
"Running %s for %f seconds, or %d steps",
self.model.label,
time_in_seconds,
steps,
)
self.run_steps(steps)
def run_steps(self, steps):
"""Simulate for the given number of ``dt`` steps.
Parameters
----------
steps : int
Number of steps to run the simulation for.
"""
if self.closed:
raise SimulatorClosed("Simulator cannot run because it is closed.")
self._runner.run_steps(steps)
self._n_steps += steps
logger.info("Finished running for %d steps", steps)
self._probe()
def step(self):
"""Advance the simulator by 1 step (``dt`` seconds)."""
self.run_steps(1)
def trange(self, sample_every=None, dt=None):
"""Create a vector of times matching probed data.
Note that the range does not start at 0 as one might expect, but at
the first timestep (i.e., ``dt``).
Parameters
----------
sample_every : float, optional (Default: None)
The sampling period of the probe to create a range for.
If None, a time value for every ``dt`` will be produced.
"""
period = 1 if sample_every is None else sample_every / self.dt
steps = np.arange(1, self.n_steps + 1)
return self.dt * steps[steps % period < 1]
class StepRunner:
def __init__(self, model, sims, precompute, timers):
self.model = model
self.timers = timers
self.host_pre = sims.get("host_pre", None)
self.host = sims.get("host", None)
self.emulator = sims.get("emulator", None)
self.loihi = sims.get("loihi", None)
self.queues = self._extract_queues()
self.probes_receivers = self._extract_probes_receivers()
run_steps = {
(
True,
"host_pre",
"host",
"loihi",
): self.loihi_precomputed_host_pre_and_host,
(True, "host_pre", "loihi"): self.loihi_precomputed_host_pre_only,
(True, "host", "loihi"): self.loihi_precomputed_host_only,
(False, "host", "loihi"): self.loihi_bidirectional_with_host,
(True, "loihi"): self.loihi_only,
(False, "loihi"): self.loihi_only,
(
True,
"host_pre",
"host",
"emulator",
): self.emu_precomputed_host_pre_and_host,
(True, "host_pre", "emulator"): self.emu_precomputed_host_pre_only,
(True, "host", "emulator"): self.emu_precomputed_host_only,
(False, "host", "emulator"): self.emu_bidirectional_with_host,
(True, "emulator"): self.emu_only,
(False, "emulator"): self.emu_only,
}
run_config = (precompute,) + tuple(sims)
self.run_steps = run_steps[run_config]
def _chip2host(self, sim):
sim.chip2host(self.probes_receivers)
@staticmethod
def _get_step_f(op, sim):
"""Returns a Process's step function given the SimProcess op and Simulator"""
step_ix = sim._step_order.index(op)
step_f = sim._steps[step_ix]
return inspect.getclosurevars(step_f).nonlocals["step_f"]
@staticmethod
def _get_simprocess_op_dict(ops):
return {id(op.process): op for op in ops if isinstance(op, SimProcess)}
def _extract_probes_receivers(self):
host_simprocess = self._get_simprocess_op_dict(self.model.host.operators)
probes_receivers = OrderedDict()
for probe, receiver in self.model.chip2host_receivers.items():
op = host_simprocess.get(id(receiver.output), None)
assert (
op is not None and op.process is receiver.output
), f"Could not find op for receiver: {receiver}"
receiver_step = self._get_step_f(op, self.host)
probes_receivers[self.model.objs[probe]["out"]] = receiver_step
return probes_receivers
def _extract_queues(self):
host_simprocess = self._get_simprocess_op_dict(self.model.host.operators)
hostpre_simprocess = self._get_simprocess_op_dict(self.model.host_pre.operators)
queues = {}
for sender in itertools.chain(
self.model.host2chip_senders, self.model.host2chip_pes_senders
):
sender_id = id(sender.output)
op = None
if sender_id in host_simprocess:
op, sim = host_simprocess[sender_id], self.host
elif sender_id in hostpre_simprocess:
op, sim = hostpre_simprocess[sender_id], self.host_pre
assert (
op is not None and op.process is sender.output
), f"Could not find op for sender: {sender}"
queues[sender] = self._get_step_f(op, sim).queue
return queues
def _host2chip(self, sim):
# Handle ChipReceiveNode and ChipReceiveNeurons
spikes = []
for sender, receiver in self.model.host2chip_senders.items():
spike_target = self.model.spike_targets[receiver]
assert spike_target is not None
queue = self.queues[sender]
for t, x in queue:
ti = round(t / self.model.dt)
spike_idxs = x.nonzero()[0]
spikes.append((spike_target, ti, spike_idxs))
queue.clear()
# Handle PESModulatoryTarget
errors = OrderedDict()
for sender, receiver in self.model.host2chip_pes_senders.items():
error_target = receiver.error_target
assert error_target is not None
conn = self.model.nengo_probe_conns[error_target]
error_synapse = self.model.objs[conn]["decoders"]
assert error_synapse.learning
queue = self.queues[sender]
for t, x in queue:
ti = round(t / self.model.dt)
errors_ti = errors.get(ti, None)
if errors_ti is None:
errors_ti = OrderedDict()
errors[ti] = errors_ti
if error_synapse in errors_ti:
errors_ti[error_synapse] += x
else:
errors_ti[error_synapse] = x.copy()
queue.clear()
errors = [
(synapse, ti, e) for ti, ee in errors.items() for synapse, e in ee.items()
]
sim.host2chip(spikes, errors)
def emu_precomputed_host_pre_and_host(self, steps):
self.timers.start("run")
self.host_pre.run_steps(steps)
self._host2chip(self.emulator)
self.emulator.run_steps(steps)
self._chip2host(self.emulator)
self.host.run_steps(steps)
self.timers.stop("run")
def emu_precomputed_host_pre_only(self, steps):
self.timers.start("run")
self.host_pre.run_steps(steps)
self._host2chip(self.emulator)
self.emulator.run_steps(steps)
self.timers.stop("run")
def emu_precomputed_host_only(self, steps):
self.timers.start("run")
self.emulator.run_steps(steps)
self._chip2host(self.emulator)
self.host.run_steps(steps)
self.timers.stop("run")
def emu_only(self, steps):
self.timers.start("run")
self.emulator.run_steps(steps)
self.timers.stop("run")
def emu_bidirectional_with_host(self, steps):
self.timers.start("run")
for _ in range(steps):
self.host.step()
self._host2chip(self.emulator)
self.emulator.step()
self._chip2host(self.emulator)
self.timers.stop("run")
def loihi_precomputed_host_pre_and_host(self, steps):
self.timers.start("run")
self.host_pre.run_steps(steps)
self._host2chip(self.loihi)
self.loihi.run_steps(steps, blocking=True)
self._chip2host(self.loihi)
self.host.run_steps(steps)
self.timers.stop("run")
def loihi_precomputed_host_pre_only(self, steps):
self.timers.start("run")
self.host_pre.run_steps(steps)
self._host2chip(self.loihi)
self.loihi.run_steps(steps, blocking=True)
self.timers.stop("run")
def loihi_precomputed_host_only(self, steps):
self.timers.start("run")
self.loihi.run_steps(steps, blocking=True)
self._chip2host(self.loihi)
self.host.run_steps(steps)
self.timers.stop("run")
def loihi_only(self, steps):
self.timers.start("run")
self.loihi.run_steps(steps)
self.timers.stop("run")
def loihi_bidirectional_with_host(self, steps):
self.timers.start("startup")
self.loihi.run_steps(steps, blocking=False)
self.timers.stop("startup")
self.timers.start("run")
for _ in range(steps):
self.host.step()
self._host2chip(self.loihi)
self._chip2host(self.loihi)
self.timers.stop("run")
self.timers.start("shutdown")
logger.info("Waiting for run_steps to complete...")
self.loihi.wait_for_completion()
logger.info("run_steps completed")
self.timers.stop("shutdown")
class Timers(Mapping):
def __init__(self):
self._totals = OrderedDict()
self._last_start = {}
def __getitem__(self, key):
return self._totals[key]
def __iter__(self):
return iter(self._totals)
def __len__(self):
return len(self._totals)
def __repr__(self):
return "<Timers: {%s}>" % (
", ".join(["%r: %.4f" % (k, self._totals[k]) for k in self._totals]),
)
def reset(self, key):
self._totals[key] = 0.0
if key in self._last_start:
del self._last_start[key]
def start(self, key):
self._last_start[key] = default_timer()
if key not in self._totals:
self._totals[key] = 0.0
def stop(self, key):
self._totals[key] = default_timer() - self._last_start[key]
del self._last_start[key]
class SimulationData(NengoSimulationData): # pylint: disable=too-many-ancestors
"""Map from Probe -> ndarray
This is more like a view on the dict that the simulator manipulates.
However, for speed reasons, the simulator uses Python lists,
and we want to return NumPy arrays. Additionally, this mapping
is readonly, which is more appropriate for its purpose.
"""
def __init__(self, raw):
super().__init__(raw=raw)
self.fallbacks = []
def add_fallback(self, fallback):
assert isinstance(fallback, NengoSimulationData)
self.fallbacks.append(fallback)
def __getitem__(self, key):
target = self.raw
if key not in target:
for fallback in self.fallbacks:
if key in fallback:
target = fallback.raw
break
assert key in target, "probed object not found"
if key not in self._cache or len(self._cache[key]) != len(target[key]):
rval = target[key]
if isinstance(rval, list):
rval = np.asarray(rval)
rval.setflags(write=False)
self._cache[key] = rval
return self._cache[key]
def __iter__(self):
for k in self.raw:
yield k
for fallback in self.fallbacks:
for k in fallback:
yield k
def __len__(self):
return len(self.raw) + sum(len(d) for d in self.fallbacks)
# TODO: Should we override __repr__ and __str__?
| StarcoderdataPython |
3309486 | from pprint import pprint # noqa
import csv
from memorious.helpers import make_id
from opensanctions.models import Entity
def parse_row(context, data):
row = data.get('row')
uid = make_id(row.get('Effective_Date'), row.get('Name'))
entity = Entity.create('us-bis-denied', uid)
entity.type = Entity.TYPE_ENTITY
entity.name = row.get('Name')
entity.updated_at = row.get('Effective_Date')
entity.program = row.get('FR_Citation')
entity.summary = row.get('Action')
address = entity.create_address()
address.street = row.get('Street_Address')
address.postal_code = row.get('Postal_Code')
address.region = row.get('State')
address.city = row.get('City')
address.country = row.get('Country')
# pprint(entity.to_dict())
context.emit(data=entity.to_dict())
def parse(context, data):
res = context.http.rehash(data)
with open(res.file_path, 'r') as csvfile:
for row in csv.DictReader(csvfile, delimiter='\t'):
context.emit(data={'row': row})
| StarcoderdataPython |
3576633 | from loaders.gene_loader import GeneLoader
from loaders.disease_loader import DiseaseLoader
import gzip
import csv
from files import *
from mod import MOD
class SGD(MOD):
species = "Saccharomyces cerevisiae"
@staticmethod
def gene_href(gene_id):
return "http://www.yeastgenome.org/locus/" + gene_id + "/overview"
@staticmethod
def get_organism_names():
return ["Saccharomyces cerevisiae", "S. cerevisiae", "YEAST"]
@staticmethod
def gene_id_from_panther(panther_id):
# example: SGD=S000000226
return panther_id.split("=")[1]
def load_genes(self, batch_size, test_set):
path = "tmp"
S3File("mod-datadumps", "SGD_0.6.0_1.tar.gz", path).download()
TARFile(path, "SGD_0.6.0_1.tar.gz").extract_all()
gene_data = JSONFile().get_data(path + "/SGD_0.6_basicGeneInformation.json")
gene_lists = GeneLoader().get_data(gene_data, batch_size, test_set)
for entry in gene_lists:
yield entry
def load_go(self):
path = "tmp"
S3File("mod-datadumps/GO/ANNOT", "gene_association.sgd.gz", path).download()
go_annot_dict = {}
with gzip.open(path + "/gene_association.sgd.gz", 'rb') as file:
reader = csv.reader(file, delimiter='\t')
for line in reader:
if line[0].startswith('!'):
continue
gene = line[0] + ":" + line[1]
go_id = line[4]
prefix = line[0]
if gene in go_annot_dict:
go_annot_dict[gene]['go_id'].append(go_id)
else:
go_annot_dict[gene] = {
'gene_id': gene,
'go_id': [go_id],
'species': SGD.species,
'prefix':prefix
}
return go_annot_dict
def load_diseases(self):
path = "tmp"
S3File("mod-datadumps", "SGD_0.6.0_1.tar.gz", path).download()
TARFile(path, "SGD_0.6.0_1.tar.gz").extract_all()
disease_data = JSONFile().get_data(path + "/SGD_0.6_diseaseAssociation.json")
gene_disease_dict = DiseaseLoader().get_data(disease_data)
return gene_disease_dict
| StarcoderdataPython |
4835049 | <gh_stars>10-100
# -*- coding: utf-8 -*-
u"""
ノードタイプのラッパークラスのマネージャー。
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ..common import *
from ..pyutils import Singleton, parentClasses
from .typeinfo import isDerivedNodeType, getInheritedNodeTypes
__all__ = ['nodetypes']
_FIX_SLOTS = True #: 標準 CyObject クラスのスロットを固定する。
#------------------------------------------------------------------------------
class NodeTypes(with_metaclass(Singleton, object)):
u"""
ノードタイプのラッパークラスのマネージャー。
唯一のインスタンスである `nodetypes` が生成済み。
全てのノードクラスはこの属性としてアクセスできる。
cymel では、プラグインも含む全てのノードタイプの
ラッパークラスが提供されるが、機能実装のために
あらかじめ用意されている主要なノードタイプ以外は、
最初にアクセスしたときなどの必要なときに自動生成される。
システムが提供する標準的なクラス名は、
ノードタイプ名の先頭を大文字にした名前となる。
既存のノードクラスを継承してカスタムクラスを作ることもできる。
カスタムクラスはノードタイプのみから純粋に決まるものでも、
その他の任意の条件によって決まるものでも良い。
カスタムクラスは、
システムに登録するなどせずにそのまま利用可能だが、
`registerNodeClass` によって登録することもできる。
インスタンスを得る場合、
そのクラスを直接指定すればインスタンスを得られるが、
クラスが登録してあれば
`.CyObject` からインスタンスを得ることで
自動的にクラスを決定させることができる。
"""
def __getattr__(self, name):
u"""
ベーシッククラスを得る。
登録されたクラスは __dict__ に追加されていくので、
その場合はこの特殊メソッドは呼ばれない。
同名のベーシッククラスと検査メソッド付きクラスは、
ベーシッククラスが優先される。
未登録のクラス名を指定した場合、
クラス名の先頭を小文字にしたノードタイプ名の
ベーシッククラスとして自動生成、登録される。
:param `str` name: クラス名。
:rtype: `type`
"""
if _RE_STARTS_WITH_CAPITAL_match(name):
try:
return self.basicNodeClass(name[0].lower() + name[1:])
except ValueError:
return self.basicNodeClass(name)
raise ValueError('unknown class name: ' + name)
def registerNodeClass(self, cls, nodetype):
u"""
ノードクラスを登録する。
ノードクラスは、適合検査のためのスタティックメソッド
``_verifyNode`` を持つ **検査メソッド付きクラス** と、
それを持たない **ベーシッククラス** に分けられる。
適合検査のためのスタティックメソッドの仕様は以下の通り。
* _verifyNode(mfn, name)
引数に、
Python API 2 の :mayaapi2:`MFnDependencyNode`
派生クラスのインスタンス(例えば dagNode なら
:mayaapi2:`MFnDagNode` など)と、
ノードのパーシャルパスなどのユニーク名が渡され、
適合の可否を表すブール値を返すものとする。
``_verifyNode`` を実装した場合、さらに
`Node.createNode <.Node_c.createNode>`
もオーバーライドして、
ノード生成時に条件を満たすようにすることを推奨する。
ベーシッククラスは、ノードタイプへの紐付けが厳格で、
抽象タイプも含む全てのノードタイプごとに1つずつ存在するか
自動生成される。
カスタムクラスをベーシッククラスとして登録する場合も、
ノードタイプと矛盾しない親クラスを継承しなければならない。
クラス実装で、継承すべき親クラスを取得するには
`parentBasicNodeClass` が便利である。
1つのノードタイプへの競合する登録は上書き登録となり、
警告が出力された上で古いクラスの登録はサブクラスも含めて
全て抹消される(システムが先んじて行った自動登録を上書き
してもさして害は無い)。
一方、適合検査メソッド付きクラスは、
ノードタイプへの紐付けが厳格でなくても問題ない。
クラスの継承と登録するノードタイプに矛盾さえ無ければ、
抽象タイプも含むどのノードタイプに登録するのも自由である。
複数の検査メソッド付きクラスが、1つのノードタイプに競合して
登録されることは問題なく、
また、同じクラスを複数のノードタイプに登録しても構わない。
登録時は、ノードタイプごとに管理されているリストの最初に挿入
されるため、後から登録したものほど優先される。
:type cls: `type`
:param cls: 登録するクラス。
:param `str` nodetype: 紐付けるノードタイプ名。
"""
# 検査メソッド付きクラスの場合。
if hasattr(cls, '_verifyNode'):
# 親クラスの一致をチェック。
invalid = True
for sc in parentClasses(cls):
typs = _clsNodeTypeDict_get(sc)
if typs:
# 親クラスが紐付けられたノードタイプのいずれかの派生でなければエラー。
invalid = True
for typ in typs:
if isDerivedNodeType(nodetype, typ):
invalid = False
break
if invalid:
break
if invalid:
raise ValueError("registerNoeClass: class inheritance does not match node type: " + repr(cls))
# 評価リストの先頭に追加。
#print('# RegisterConditionalNodeClass: %s %r %r' % (nodetype, exact, cls))
clss = _evalAbstrClsDict_get(nodetype)
if clss:
try:
clss.remove(cls)
except ValueError:
pass
else:
warning('registerNoeClass: updated the same class registration for: ' + nodetype + ' ' + repr(cls))
clss.insert(0, cls)
else:
_evalAbstrClsDict[nodetype] = [cls]
# 登録。
typs = _clsNodeTypeDict_get(cls)
if typs:
_clsNodeTypeDict[cls] = (nodetype,) + typs
else:
_clsNodeTypeDict[cls] = (nodetype,)
name = cls.__name__
old = self.__dict__.get(name)
if not old or hasattr(old, '_verifyNode'):
# 同名のベーシッククラスが在る場合、属性ではそちらが優先される。
setattr(self, name, cls)
# ベーシッククラスの場合。
else:
# 親クラスの一致をチェック。
if nodetype != 'node':
# 親タイプに完全に一致するベーシッククラスを継承していなければならない。
invalid = True
for sc in cls.mro()[1:-3]: # 最後の2個は [Node_c, CyObject, object] なので省いている。
typ = _clsNodeTypeDict_get(sc)
if typ:
# 親に _verifyNode 属性が無いことは必然なので、ノードタイプのみをチェックする。
if typ[0] == getInheritedNodeTypes(nodetype)[1]:
invalid = False
break
if invalid:
raise ValueError("class inheritance missmatch for maya nodetype hierarchy: %s(%s)" % (cls.__name__, base.__name__))
# ノードタイプの登録が既にあれば、警告を出力しつつ削除する。
oldcls = _basicClsDict_get(nodetype)
if oldcls:
self.deregisterNodeClass(oldcls, warn=True)
# 登録。
self.__registerBasicNodeCls(nodetype, cls)
def deregisterNodeClass(self, cls, warn=False):
u"""
ノードクラスとそのサブクラスの登録を削除する。
:type cls: `type`
:param cls: 登録を削除するクラス。
:param `bool` warn: 削除しながら警告メッセージを出力するかどうか。
"""
cnt = _deregisterNodeClass(_evalAbstrClsDict, cls, warn)
cnt += _deregisterNodeClass(_basicClsDict, cls, warn)
if not cnt:
raise ValueError('unknown class: ' + repr(cls))
def relatedNodeTypes(self, cls):
u"""
クラスに結び付けられているノードタイプのタプルを得る。
ベーシッククラスのノードタイプは1つだが、
検査メソッド付きカスタムクラスの場合は
複数タイプへの紐付けも有り得る。
:type cls: `type`
:param cls: クラス。
:rtype: `tuple`
"""
typs = _clsNodeTypeDict_get(cls)
if typs:
return typs
for sc in cls.mro()[1:-3]: # 最後の3個は [Node_c, CyObject, object] なので省いている。
typs = _clsNodeTypeDict_get(sc)
if typs:
return typs
return EMPTY_TUPLE
def basicNodeClass(self, nodetype, nodename=None):
u"""
ノードタイプ名のみを条件として決まるベーシッククラスを得る。
:param `str` nodetype: ノードタイプ名。
:param `str` nodename:
実際のノードを特定する名前。
必須ではないが、指定すると未知のタイプの処理がやや高速。
:rtype: `type`
"""
return _basicClsDict_get(nodetype) or self.__newBasicNodeClass(getInheritedNodeTypes(nodetype, nodename))
def parentBasicNodeClass(self, nodetype, nodename=None):
u"""
指定ノードタイプの親タイプ名のみを条件として決まるベーシッククラスを得る。
:param `str` nodetype: ノードタイプ名。
:param `str` nodename:
実際のノードを特定する名前。
必須ではないが、指定すると未知のタイプの処理がやや高速。
:rtype: `type` ('node' を指定した場合のみ `None` となる)
"""
inherited = getInheritedNodeTypes(nodetype, nodename)[1:]
if inherited:
return _basicClsDict_get(inherited[0]) or self.__newBasicNodeClass(inherited)
def __newBasicNodeClass(self, inherited):
u"""
ノードタイプ名のみを条件として決まるベーシッククラスを新規に登録して得る。
指定タイプは未登録である前提。
少なくとも、最上位のタイプ node だけは登録されている前提。
継承タイプ(それらも必要なら登録)を順次得て、指定タイプを登録する。
:param `list` inherited: 先頭を指定タイプとする継承リスト。
:rtype: `type`
"""
i = 1
typ = inherited[i]
cls = _basicClsDict_get(typ)
while not cls:
i += 1
typ = inherited[i]
cls = _basicClsDict_get(typ)
i -= 1
while i >= 0:
typ = inherited[i]
cls = type(typ[0].upper() + typ[1:], (cls,), _CLS_DEFAULT_ATTRS)
self.__registerBasicNodeCls(typ, cls)
i -= 1
return cls
def __registerBasicNodeCls(self, nodetype, cls):
u"""
ノードタイプ名のみを条件として決まるシンプルなラッパークラスを属性にセットする。
同名の検査メソッド付きクラスがあったとしても属性では優先される。
"""
#print('# RegisterBasicNodeClass: %s %r' % (nodetype, cls))
_basicClsDict[nodetype] = cls
_clsNodeTypeDict[cls] = (nodetype,)
setattr(self, cls.__name__, cls)
def __decideClass(self, nodename, nodetype, getMFn, basecls=None):
u"""
登録されたクラスの中からノードに最適なものを決定する。
:param `str` nodename: ノードを特定する名前。
:param `str` nodetype: ノードタイプ名。
:param mfn:
効率的に API 2 ファンクションセットを得るための
呼び出し可能オブジェクト。
:param basecls:
検査メソッド付きクラスを指定することで、
テストするクラスをその派生クラスに限定する。
マッチするものが無ければ None が返される。
:rtype: `type` or None
"""
if basecls:
# 検査メソッド付きノードクラス辞書の中から basecls 派生クラスを調べる。
if _evalAbstrClsDict and basecls in _clsNodeTypeDict:
mfn = None
for typ in getInheritedNodeTypes(nodetype, nodename):
for cls in _evalAbstrClsDict_get(typ, EMPTY_TUPLE):
if issubclass(cls, basecls): # <-- この判定が加わるだけ。
if mfn is None:
mfn = getMFn()
if cls._verifyNode(mfn, nodename):
return cls
# basecls が未登録なら、それだけを適合検査する。
elif basecls._verifyNode(getMFn(), nodename):
return basecls
# ベーシッククラスは認めない。
else:
# 検査メソッド付きノードクラス辞書を調べる。
if _evalAbstrClsDict:
mfn = None
for typ in getInheritedNodeTypes(nodetype, nodename):
for cls in _evalAbstrClsDict_get(typ, EMPTY_TUPLE):
if mfn is None:
mfn = getMFn()
if cls._verifyNode(mfn, nodename):
return cls
# ベーシックノードクラスを得る。
return self.basicNodeClass(nodetype, nodename)
#------------------------------------------------------------------------------
def _deregisterNodeClass(dic, cls, warn):
u"""
クラスの種類ごとの `NodeTypes.deregisterNodeClass` サブルーチン。
:param `dict` dic: クラスの種類に応じた登録辞書。
:param `type` cls: クラス。
:param `callable` proc: クラス登録削除用プロシージャ。
:rtype: `int`
"""
cnt = 0
for typ in list(dic):
subc = dic[typ]
if issubclass(subc, cls):
del dic[typ]
del _clsNodeTypeDict[subc]
if warn:
warning('node class deregistered: ' + repr(subc))
cnt += 1
return cnt
_evalAbstrClsDict = {} #: 検査メソッド付きクラス辞書。
_basicClsDict = {} #: ベーシッククラス辞書。
_clsNodeTypeDict = {} #: クラスに紐付けられたノードタイプの辞書。
_evalAbstrClsDict_get = _evalAbstrClsDict.get
_basicClsDict_get = _basicClsDict.get
_clsNodeTypeDict_get = _clsNodeTypeDict.get
_RE_STARTS_WITH_CAPITAL_match = re.compile(r'[A-Z]').match
nodetypes = NodeTypes() #: `NodeTypes` の唯一のインスタンス。
_CLS_DEFAULT_ATTRS = {'__slots__': tuple()} if _FIX_SLOTS else {}
| StarcoderdataPython |