max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tests/test_preempt_return.py | vpv11110000/pyss | 0 | 300 | <reponame>vpv11110000/pyss<gh_stars>0
# #!/usr/bin/python
# -*- coding: utf-8 -*-
# test_preempt_return.py
# pylint: disable=line-too-long,missing-docstring,bad-whitespace, unused-argument, too-many-locals
import sys
import os
import random
import unittest
DIRNAME_MODULE = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))) + os.sep
sys.path.append(DIRNAME_MODULE)
sys.path.append(DIRNAME_MODULE + "pyss" + os.sep)
from pyss import pyssobject
from pyss.pyss_model import PyssModel
from pyss.segment import Segment
from pyss.generate import Generate
from pyss.terminate import Terminate
from pyss import logger
from pyss.table import Table
from pyss.handle import Handle
from pyss.enter import Enter
from pyss.leave import Leave
from pyss.storage import Storage
from pyss.advance import Advance
from pyss.preempt import Preempt
from pyss.g_return import GReturn
from pyss.facility import Facility
from pyss.seize import Seize
from pyss.release import Release
from pyss.transfer import Transfer
from pyss.test import Test
from pyss.pyss_const import *
class TestPreemptReturn(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
# @unittest.skip("testing skipping test_preempt_return_001")
def test_preempt_return_001(self):
"""Тест Preempt - Return
Формируется один транзакт в момент времени 1.
Прерывает работу устройства F_1 на 5 единиц времени.
Выходит из модели в момент времени 6.
"""
logger.info("--- test_preempt_return_001 ----------------------------------")
### MODEL ----------------------------------
m = PyssModel()
sgm = Segment(m)
#
m[OPTIONS].setAllFalse()
MAX_TIME = 20
#
list_all_transact = []
#
MAX_TIME = 20
#
F_1 = "F_1"
# ОКУ
Facility(m, facilityName=F_1)
#
def funcTransactTo_list_all_transact(owner, transact):
# складируем транзакты в список
list_all_transact.append(transact)
### SEGMENT ----------------------------
# формируется одна заявка в момент времени 1
Generate(sgm, med_value=None,
modificatorFunc=None,
first_tx=1,
max_amount=1)
Handle(sgm, handlerFunc=funcTransactTo_list_all_transact)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))
#
Preempt(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY]))
#
Advance(sgm, meanTime=5, modificatorFunc=None)
GReturn(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:not self.assertNotIn(F_1, t[FACILITY]))
#
Terminate(sgm, deltaTerminate=0)
# ЗАПУСК ----------------------
m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME)
# ТЕСТЫ ----------------------
for t in list_all_transact:
self.assertEqual(t[TIME_CREATED], 1)
self.assertEqual(t[TERMINATED_TIME], 6)
print str(["%s:%s" % (k, t[k])
for k in t.keys() if k
in [TIME_CREATED, TERMINATED_TIME]])
# @unittest.skip("testing skipping test_preempt_return_002")
def test_preempt_return_002(self):
"""Тест Preempt - Return
Формируется транзакт A в момент времени 1.
Идёт на обработку устройством F_1 в течение 3 единиц времени.
Формируется транзакт B в момент времени 2.
Прерывает работу устройства на 5 единиц времени.
Транзакт B выходит из модели в момент времени 7.
Транзакт А выходит из модели в момент времени 9.
Обработка транзакта А была прервана с 2 по 7.
"""
logger.info("--- test_preempt_return_002 ----------------------------------")
### MODEL ----------------------------------
m = PyssModel()
sgm = Segment(m)
#
m[OPTIONS].setAllFalse()
MAX_TIME = 20
# CONSTS
TRANSACT_A = "A"
TRANSACT_B = "B"
#
list_all_transact = []
tA = []
tB = []
#
F_1 = "F_1"
# ОКУ
facility_1 = Facility(m, facilityName=F_1)
#
def funcTransactTo_list_all_transact(owner, transact):
# складируем транзакты в список
list_all_transact.append(transact)
def setTransactLabel(owner, transact):
if transact[NUM] == 1:
transact[LABEL] = TRANSACT_A
tA.append(transact)
elif transact[NUM] == 2:
transact[LABEL] = TRANSACT_B
tB.append(transact)
# функция проверки условия
def checkTest(o):
t=m.getCurrentTransact()
if t[LABEL] == TRANSACT_B:
return False
return True
def printAllTransact(owner, transact):
print "Time=%s" % str(m.getCurTime())
print "\n".join([str(t) for t in list_all_transact])
print "tA=%s" % str(tA[0])
print "tB=%s" % str(tB[0])
### SEGMENT ----------------------------
# формируется одна заявка в момент времени 1
Generate(sgm,
med_value=1,
modificatorFunc=None,
first_tx=1,
max_amount=2)
# вспомогательные операции
Handle(sgm, handlerFunc=funcTransactTo_list_all_transact)
Handle(sgm, handlerFunc=setTransactLabel)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))
#
# первый транзакт проходит, второй направляется к метке "to_preempt"
Test(sgm, funcCondition=checkTest, move2block="to_preempt")
# только первый транзакт
Seize(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY]))
#
Advance(sgm, meanTime=3, modificatorFunc=None)
Release(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))
#
Transfer(sgm, funcTransfer=lambda o, t: o.findBlockByLabel("to_term"))
#---
# только второй транзакт
Preempt(sgm, facilityName=F_1, label="to_preempt")
# test
# .addBlock(handle.Handle(handlerFunc=lambda o,t:self.assertEqual(tA[0][REMAIND_TIME], None)))
Handle(sgm, handlerFunc=printAllTransact)
Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY]))
#
Handle(sgm, handlerFunc=printAllTransact)
Advance(sgm, meanTime=5, modificatorFunc=None)
GReturn(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][REMAIND_TIME], 2))
Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][SCHEDULED_TIME], 9))
Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))
#
Handle(sgm, handlerFunc=printAllTransact)
# все транзакты
Terminate(sgm, label="to_term", deltaTerminate=0)
# ЗАПУСК ----------------------
m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME)
# ТЕСТЫ ----------------------
for t in list_all_transact:
# Формируется транзакт A в момент времени 1.
# Идёт на обработку устройством F_1 в течение 3 единиц времени.
# Формируется транзакт B в момент времени 2.
# Прерывает работу устройства на 5 единиц времени.
# Транзакт B выходит из модели в момент времени 7.
# Транзакт А выходит из модели в момент времени 9.
# Обработка транзакта А была прервана с 2 по 7.
print str(["%s:%s" % (k, t[k])
for k in t.keys() if k
in [TIME_CREATED, TERMINATED_TIME, LIFE_TIME_LIST]])
if t[LABEL] == TRANSACT_A:
self.assertEqual(t[TIME_CREATED], 1)
self.assertEqual(t[REMAIND_TIME], 2)
self.assertEqual(t[TERMINATED_TIME], 9)
self.assertListEqual(t[LIFE_TIME_LIST], [
{'start': 1, 'state': 'actived'},
{'start': 2, 'state': 'preempted'},
{'start': 7, 'state': 'actived'},
{'start': 9, 'state': 'deleted'}])
elif t[LABEL] == TRANSACT_B:
self.assertEqual(t[TIME_CREATED], 2)
self.assertEqual(t[TERMINATED_TIME], 7)
self.assertListEqual(t[LIFE_TIME_LIST], [
{'start': 2, 'state': 'actived'},
{'start': 7, 'state': 'deleted'}])
if __name__ == '__main__':
unittest.main(module="test_preempt_return")
| 1.898438 | 2 |
python/ray/rllib/ddpg2/ddpg_evaluator.py | songqing/ray | 1 | 301 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import ray
from ray.rllib.ddpg2.models import DDPGModel
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.optimizers import PolicyEvaluator
from ray.rllib.utils.filter import NoFilter
from ray.rllib.utils.process_rollout import process_rollout
from ray.rllib.utils.sampler import SyncSampler
class DDPGEvaluator(PolicyEvaluator):
def __init__(self, registry, env_creator, config):
self.env = ModelCatalog.get_preprocessor_as_wrapper(
registry, env_creator(config["env_config"]))
# contains model, target_model
self.model = DDPGModel(registry, self.env, config)
self.sampler = SyncSampler(
self.env, self.model.model, NoFilter(),
config["num_local_steps"], horizon=config["horizon"])
def sample(self):
"""Returns a batch of samples."""
rollout = self.sampler.get_data()
rollout.data["weights"] = np.ones_like(rollout.data["rewards"])
# since each sample is one step, no discounting needs to be applied;
# this does not involve config["gamma"]
samples = process_rollout(
rollout, NoFilter(),
gamma=1.0, use_gae=False)
return samples
def update_target(self):
"""Updates target critic and target actor."""
self.model.update_target()
def compute_gradients(self, samples):
"""Returns critic, actor gradients."""
return self.model.compute_gradients(samples)
def apply_gradients(self, grads):
"""Applies gradients to evaluator weights."""
self.model.apply_gradients(grads)
def compute_apply(self, samples):
grads, _ = self.compute_gradients(samples)
self.apply_gradients(grads)
def get_weights(self):
"""Returns model weights."""
return self.model.get_weights()
def set_weights(self, weights):
"""Sets model weights."""
self.model.set_weights(weights)
def get_completed_rollout_metrics(self):
"""Returns metrics on previously completed rollouts.
Calling this clears the queue of completed rollout metrics.
"""
return self.sampler.get_metrics()
RemoteDDPGEvaluator = ray.remote(DDPGEvaluator)
| 2.1875 | 2 |
python/sysmap/graph.py | harryherold/sysmap | 1 | 302 | <reponame>harryherold/sysmap
from graphviz import Digraph
from collections import namedtuple
class NetworkGraph:
''' Representation of the network connections.
This class contains the entities in the network e.g. hosts or switches.
And the connections between them.
'''
Vertex = namedtuple('Vertexes', ['hosts', 'switches'])
_edges = []
def _sanitize_edge_connection(self, edge):
''' Update '_to' and '_form' field of a edge.
:param edge: One edge connection.
:type edge: dict
:returns: Updated edge with _to and _from key.
:rtype: dict
'''
if edge['to_guid'].startswith('S'):
to_collection = 'switches/'
elif edge['to_guid'].startswith('H'):
to_collection = 'hosts/'
if edge['from_guid'].startswith('S'):
from_collection = 'switches/'
elif edge['from_guid'].startswith('H'):
from_collection = 'hosts/'
edge.update({
'_to': to_collection + edge['to_guid'],
'_from': from_collection + edge['from_guid']
})
return edge
def _sanitize_vertexes(self, vertex):
''' Update '_key' field of vertex to appropriate guid.
:param vertex: Vertex
:type vertex: dict
:returns: An updated dict, '_key' field with 'guid' value.
:rtype: dict
'''
vertex.update({'_key': vertex['guid']})
return vertex
def __init__(self, hsts=None, switches=None, connections=None):
self._vertexes = self.Vertex(hosts=[self._sanitize_vertexes(h) for h in hsts],
switches=[self._sanitize_vertexes(s) for s in switches])
self._edges = [self._sanitize_edge_connection(c) for c in connections]
@property
def vertexes(self):
''' Returns a concatenated list of all vertexes.
:returns: List of vertexes, contains of hosts and switches.
:rtype: List[dict]
'''
return self._vertexes.hosts + self._vertexes.switches
@property
def switches(self):
''' Returns a list of all 'switch' vertexes.
:returns: List of all switches.
:rtype: List[dict]
'''
return self._vertexes.switches
@property
def hosts(self):
''' Returns a list of all 'host' vertexes.
:returns: List of all hosts.
:rtype: List[dict]
'''
return self._vertexes.hosts
@property
def edges(self):
''' Return a list of all 'connection' edges.
:returns: List of all connections.
:rtype: List[dict]
'''
return self._edges
def to_graph(self, graphargs):
''' Draw a dot graph of the network graph.
:params graphargs: Arguments to graphviz.Digraph.
:type graphargs: dict
'''
graph = Digraph(**graphargs)
for v in self._vertexes:
graph.node(v['guid'], v['description'])
for c in self._edges:
graph.edge(c['from_guid'], c['to_guid'])
graph.render()
| 2.984375 | 3 |
png/imageRecognition_Simple.py | tanthanadon/senior | 0 | 303 | from math import sqrt
from skimage import data
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.color import rgb2gray
from skimage import io
import matplotlib.pyplot as plt
image = io.imread("star.jpg")
image_gray = rgb2gray(image)
blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1)
# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)
blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1)
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)
blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01)
blobs_list = [blobs_log, blobs_dog, blobs_doh]
colors = ['yellow', 'lime', 'red']
titles = ['Laplacian of Gaussian', 'Difference of Gaussian',
'Determinant of Hessian']
sequence = zip(blobs_list, colors, titles)
fig, axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True)
ax = axes.ravel()
for idx, (blobs, color, title) in enumerate(sequence):
ax[idx].set_title(title)
ax[idx].imshow(image)
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color=color, linewidth=2, fill=False)
ax[idx].add_patch(c)
ax[idx].set_axis_off()
plt.tight_layout()
plt.show() | 2.828125 | 3 |
indexof.py | gnuchev/homework | 0 | 304 | def indexof(listofnames, value):
if value in listofnames:
value_index = listofnames.index(value)
return(listofnames, value_index)
else: return(-1)
| 3.578125 | 4 |
Day22_Pong/ball.py | syt1209/PythonProjects | 1 | 305 | from turtle import Turtle
SPEED = 10
class Ball(Turtle):
def __init__(self):
super().__init__()
self.penup()
self.color("white")
self.shape("circle")
self.move_speed = 0.1
self.y_bounce = 1
self.x_bounce = 1
def move(self):
new_x = self.xcor() + SPEED*self.x_bounce
new_y = self.ycor() + SPEED*self.y_bounce
self.goto(new_x, new_y)
def reset(self):
self.goto(0, 0)
self.move_speed = 0.1
self.x_bounce *= -1
| 3.6875 | 4 |
programs/combine/jry2/treedef.py | lsrcz/SyGuS | 1 | 306 | from jry2.semantics import Expr
class TreeNode:
pass
class TreeLeaf(TreeNode):
def __init__(self, term):
self.term = term
def getExpr(self):
return self.term
class TreeInnerNode(TreeNode):
def __init__(self, pred, left, right):
self.pred = pred
self.left = left
self.right = right
def getExpr(self):
return Expr('ite', self.pred, self.left.getExpr(), self.right.getExpr())
| 3 | 3 |
src/sage/modular/dirichlet.py | hsm207/sage | 1 | 307 | # -*- coding: utf-8 -*-
r"""
Dirichlet characters
A :class:`DirichletCharacter` is the extension of a homomorphism
.. MATH::
(\ZZ/N\ZZ)^* \to R^*,
for some ring `R`, to the map `\ZZ/N\ZZ \to R` obtained by sending
those `x\in\ZZ/N\ZZ` with `\gcd(N,x)>1` to `0`.
EXAMPLES::
sage: G = DirichletGroup(35)
sage: x = G.gens()
sage: e = x[0]*x[1]^2; e
Dirichlet character modulo 35 of conductor 35 mapping 22 |--> zeta12^3, 31 |--> zeta12^2 - 1
sage: e.order()
12
This illustrates a canonical coercion::
sage: e = DirichletGroup(5, QQ).0
sage: f = DirichletGroup(5,CyclotomicField(4)).0
sage: e*f
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -zeta4
AUTHORS:
- <NAME> (2005-09-02): Fixed bug in comparison of Dirichlet
characters. It was checking that their values were the same, but
not checking that they had the same level!
- <NAME> (2006-01-07): added more examples
- <NAME> (2006-05-21): added examples of everything; fix a
*lot* of tiny bugs and design problem that became clear when
creating examples.
- <NAME> (2008-02-16): speed up __call__ method for
Dirichlet characters, miscellaneous fixes
- <NAME> (2014-03-06): use UniqueFactory to cache DirichletGroups
"""
# ****************************************************************************
# Copyright (C) 2004-2006 <NAME> <<EMAIL>>
# Copyright (C) 2014 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import print_function
import sage.categories.all as cat
from sage.misc.all import prod
import sage.misc.prandom as random
import sage.modules.free_module as free_module
import sage.modules.free_module_element as free_module_element
import sage.rings.all as rings
import sage.rings.number_field.number_field as number_field
from sage.libs.pari import pari
from sage.categories.map import Map
from sage.rings.rational_field import is_RationalField
from sage.rings.complex_mpfr import is_ComplexField
from sage.rings.qqbar import is_AlgebraicField
from sage.rings.ring import is_Ring
from sage.misc.functional import round
from sage.misc.cachefunc import cached_method
from sage.misc.fast_methods import WithEqualityById
from sage.structure.element import MultiplicativeGroupElement
from sage.structure.gens_py import multiplicative_iterator
from sage.structure.parent import Parent
from sage.structure.sequence import Sequence
from sage.structure.factory import UniqueFactory
from sage.structure.richcmp import richcmp
from sage.arith.all import (binomial, bernoulli, kronecker, factor, gcd,
lcm, fundamental_discriminant, euler_phi, factorial, valuation)
def trivial_character(N, base_ring=rings.RationalField()):
r"""
Return the trivial character of the given modulus, with values in the given
base ring.
EXAMPLES::
sage: t = trivial_character(7)
sage: [t(x) for x in [0..20]]
[0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1]
sage: t(1).parent()
Rational Field
sage: trivial_character(7, Integers(3))(1).parent()
Ring of integers modulo 3
"""
return DirichletGroup(N, base_ring)(1)
TrivialCharacter = trivial_character
def kronecker_character(d):
"""
Return the quadratic Dirichlet character (d/.) of minimal
conductor.
EXAMPLES::
sage: kronecker_character(97*389*997^2)
Dirichlet character modulo 37733 of conductor 37733 mapping 1557 |--> -1, 37346 |--> -1
::
sage: a = kronecker_character(1)
sage: b = DirichletGroup(2401,QQ)(a) # NOTE -- over QQ!
sage: b.modulus()
2401
AUTHORS:
- <NAME> (2006-08-06)
"""
d = rings.Integer(d)
if d == 0:
raise ValueError("d must be nonzero")
D = fundamental_discriminant(d)
G = DirichletGroup(abs(D), rings.RationalField())
return G([kronecker(D,u) for u in G.unit_gens()])
def kronecker_character_upside_down(d):
"""
Return the quadratic Dirichlet character (./d) of conductor d, for
d0.
EXAMPLES::
sage: kronecker_character_upside_down(97*389*997^2)
Dirichlet character modulo 37506941597 of conductor 37733 mapping 13533432536 |--> -1, 22369178537 |--> -1, 14266017175 |--> 1
AUTHORS:
- <NAME> (2006-08-06)
"""
d = rings.Integer(d)
if d <= 0:
raise ValueError("d must be positive")
G = DirichletGroup(d, rings.RationalField())
return G([kronecker(u.lift(),d) for u in G.unit_gens()])
def is_DirichletCharacter(x):
r"""
Return True if x is of type DirichletCharacter.
EXAMPLES::
sage: from sage.modular.dirichlet import is_DirichletCharacter
sage: is_DirichletCharacter(trivial_character(3))
True
sage: is_DirichletCharacter([1])
False
"""
return isinstance(x, DirichletCharacter)
class DirichletCharacter(MultiplicativeGroupElement):
"""
A Dirichlet character.
"""
def __init__(self, parent, x, check=True):
r"""
Create a Dirichlet character with specified values on
generators of `(\ZZ/n\ZZ)^*`.
INPUT:
- ``parent`` -- :class:`DirichletGroup`, a group of Dirichlet
characters
- ``x`` -- one of the following:
- tuple or list of ring elements: the values of the
Dirichlet character on the standard generators of
`(\ZZ/N\ZZ)^*` as returned by
:meth:`sage.rings.finite_rings.integer_mod_ring.IntegerModRing_generic.unit_gens`.
- vector over `\ZZ/e\ZZ`, where `e` is the order of the
standard root of unity for ``parent``.
In both cases, the orders of the elements must divide the
orders of the respective generators of `(\ZZ/N\ZZ)^*`.
OUTPUT:
The Dirichlet character defined by `x` (type
:class:`DirichletCharacter`).
EXAMPLES::
sage: G.<e> = DirichletGroup(13)
sage: G
Group of Dirichlet characters modulo 13 with values in Cyclotomic Field of order 12 and degree 4
sage: e
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12
sage: loads(e.dumps()) == e
True
::
sage: G, x = DirichletGroup(35).objgens()
sage: e = x[0]*x[1]; e
Dirichlet character modulo 35 of conductor 35 mapping 22 |--> zeta12^3, 31 |--> zeta12^2
sage: e.order()
12
sage: loads(e.dumps()) == e
True
TESTS::
sage: G = DirichletGroup(10)
sage: TestSuite(G[1]).run()
It is checked that the orders of the elements in `x` are
admissible (see :trac:`17283`)::
sage: k.<i> = CyclotomicField(4)
sage: G = DirichletGroup(192)
sage: G([i, -1, -1])
Traceback (most recent call last):
...
ValueError: values (= (zeta16^4, -1, -1)) must have multiplicative orders dividing (2, 16, 2), respectively
sage: from sage.modular.dirichlet import DirichletCharacter
sage: M = FreeModule(Zmod(16), 3)
sage: DirichletCharacter(G, M([4, 8, 8]))
Traceback (most recent call last):
...
ValueError: values (= (4, 8, 8) modulo 16) must have additive orders dividing (2, 16, 2), respectively
"""
MultiplicativeGroupElement.__init__(self, parent)
if check:
orders = parent.integers_mod().unit_group().gens_orders()
if len(x) != len(orders):
raise ValueError("wrong number of values (= {}) on generators (want {})".format(x, len(orders)))
if free_module_element.is_FreeModuleElement(x):
x = parent._module(x)
if any(u * v for u, v in zip(x, orders)):
raise ValueError("values (= {} modulo {}) must have additive orders dividing {}, respectively"
.format(x, parent.zeta_order(), orders))
self.element.set_cache(x)
else:
R = parent.base_ring()
x = tuple(map(R, x))
if R.is_exact() and any(u**v != 1 for u, v in zip(x, orders)):
raise ValueError("values (= {}) must have multiplicative orders dividing {}, respectively"
.format(x, orders))
self.values_on_gens.set_cache(x)
else:
if free_module_element.is_FreeModuleElement(x):
self.element.set_cache(x)
else:
self.values_on_gens.set_cache(x)
@cached_method
def __eval_at_minus_one(self):
r"""
Efficiently evaluate the character at -1 using knowledge of its
order. This is potentially much more efficient than computing the
value of -1 directly using dlog and a large power of the image root
of unity.
We use the following. Proposition: Suppose eps is a character mod
`p^n`, where `p` is a prime. Then
`\varepsilon(-1) = -1` if and only if `p = 2` and
the factor of eps at 4 is nontrivial or `p > 2` and 2 does
not divide `\phi(p^n)/\mbox{\rm ord}(\varepsilon)`.
EXAMPLES::
sage: chi = DirichletGroup(20).0; chi._DirichletCharacter__eval_at_minus_one()
-1
"""
D = self.decomposition()
val = self.base_ring()(1)
for e in D:
if e.modulus() % 2 == 0:
if e.modulus() % 4 == 0:
val *= e.values_on_gens()[0] # first gen is -1 for 2-power modulus
elif (euler_phi(e.parent().modulus()) / e.order()) % 2:
val *= -1
return val
def __call__(self, m):
"""
Return the value of this character at the integer `m`.
.. warning::
A table of values of the character is made the first time
you call this (unless `m` equals -1)
EXAMPLES::
sage: G = DirichletGroup(60)
sage: e = prod(G.gens(), G(1))
sage: e
Dirichlet character modulo 60 of conductor 60 mapping 31 |--> -1, 41 |--> -1, 37 |--> zeta4
sage: e(-1)
-1
sage: e(2)
0
sage: e(7)
-zeta4
sage: Integers(60).unit_gens()
(31, 41, 37)
sage: e(31)
-1
sage: e(41)
-1
sage: e(37)
zeta4
sage: e(31*37)
-zeta4
sage: parent(e(31*37))
Cyclotomic Field of order 4 and degree 2
"""
N = self.modulus()
m = m % N
if self.values.is_in_cache() or m != N - 1:
return self.values()[m]
else:
return self.__eval_at_minus_one()
def change_ring(self, R):
"""
Return the base extension of ``self`` to ``R``.
INPUT:
- ``R`` -- either a ring admitting a conversion map from the
base ring of ``self``, or a ring homomorphism with the base
ring of ``self`` as its domain
EXAMPLES::
sage: e = DirichletGroup(7, QQ).0
sage: f = e.change_ring(QuadraticField(3, 'a'))
sage: f.parent()
Group of Dirichlet characters modulo 7 with values in Number Field in a with defining polynomial x^2 - 3 with a = 1.732050807568878?
::
sage: e = DirichletGroup(13).0
sage: e.change_ring(QQ)
Traceback (most recent call last):
...
TypeError: Unable to coerce zeta12 to a rational
We test the case where `R` is a map (:trac:`18072`)::
sage: K.<i> = QuadraticField(-1)
sage: chi = DirichletGroup(5, K)[1]
sage: chi(2)
i
sage: f = K.complex_embeddings()[0]
sage: psi = chi.change_ring(f)
sage: psi(2)
-1.83697019872103e-16 - 1.00000000000000*I
"""
if self.base_ring() is R:
return self
G = self.parent().change_ring(R)
return G.element_class(G, [R(x) for x in self.values_on_gens()])
def _richcmp_(self, other, op):
"""
Compare ``self`` to ``other``.
.. NOTE::
Since there is no coercion between Dirichlet groups
of different moduli, characters of different moduli
compare as unequal, even if they define identical
functions on ``ZZ``.
EXAMPLES::
sage: e = DirichletGroup(16)([-1, 1])
sage: f = e.restrict(8)
sage: e == e
True
sage: f == f
True
sage: e == f
False
sage: k = DirichletGroup(7)([-1])
sage: k == e
False
"""
return richcmp(self.values_on_gens(), other.values_on_gens(), op)
def __hash__(self):
"""
Return the hash of ``self``.
EXAMPLES::
sage: e = DirichletGroup(16)([-1, 1])
sage: hash(e) == hash((-1,1))
True
"""
return hash(self.values_on_gens())
def __invert__(self):
"""
Return the multiplicative inverse of self.
EXAMPLES::
sage: e = DirichletGroup(13).0
sage: f = ~e
sage: f*e
Dirichlet character modulo 13 of conductor 1 mapping 2 |--> 1
"""
G = self.parent()
if G.zeta.is_in_cache():
x = -self.element()
else:
x = tuple(~z for z in self.values_on_gens())
return G.element_class(G, x, check=False)
def _mul_(self, other):
"""
Return the product of self and other.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: b
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4
sage: a*b # indirect doctest
Dirichlet character modulo 20 of conductor 20 mapping 11 |--> -1, 17 |--> zeta4
Multiplying elements whose parents have different zeta orders works::
sage: a = DirichletGroup(3, QQ, zeta=1, zeta_order=1)(1)
sage: b = DirichletGroup(3, QQ, zeta=-1, zeta_order=2)([-1])
sage: a * b # indirect doctest
Dirichlet character modulo 3 of conductor 3 mapping 2 |--> -1
"""
G = self.parent()
if G.zeta.is_in_cache():
x = self.element() + other.element()
else:
x = tuple(y * z for y, z in zip(self.values_on_gens(), other.values_on_gens()))
return G.element_class(G, x, check=False)
def __copy__(self):
"""
Return a (shallow) copy of this Dirichlet character.
EXAMPLES::
sage: G.<a> = DirichletGroup(11)
sage: b = copy(a)
sage: a is b
False
sage: a.element() is b.element()
False
sage: a.values_on_gens() is b.values_on_gens()
True
"""
# This method exists solely because of a bug in the cPickle module --
# see modsym/manin_symbols.py.
G = self.parent()
return G.element_class(G, self.values_on_gens(), check=False)
def __pow__(self, n):
"""
Return self raised to the power of n
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a^2
Dirichlet character modulo 20 of conductor 1 mapping 11 |--> 1, 17 |--> 1
sage: b^2
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> -1
"""
G = self.parent()
if G.zeta.is_in_cache():
x = n * self.element()
else:
x = tuple(z**n for z in self.values_on_gens())
return G.element_class(G, x, check=False)
def _repr_short_(self):
r"""
A short string representation of self, often used in string representations of modular forms
EXAMPLES::
sage: chi = DirichletGroup(24).0
sage: chi._repr_short_()
'[-1, 1, 1]'
"""
return str(list(self.values_on_gens()))
def _repr_(self):
"""
String representation of self.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: repr(a) # indirect doctest
'Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1'
TESTS:
Dirichlet characters modulo 1 and 2 are printed correctly (see
:trac:`17338`)::
sage: DirichletGroup(1)[0]
Dirichlet character modulo 1 of conductor 1
sage: DirichletGroup(2)[0]
Dirichlet character modulo 2 of conductor 1
"""
s = 'Dirichlet character modulo %s of conductor %s' % (self.modulus(), self.conductor())
r = len(self.values_on_gens())
if r != 0:
s += ' mapping '
for i in range(r):
if i != 0:
s += ', '
s += str(self.parent().unit_gens()[i]) + ' |--> ' + str(self.values_on_gens()[i])
return s
def _latex_(self):
r"""
LaTeX representation of self.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(16)
sage: latex(b) # indirect doctest
\hbox{Dirichlet character modulo } 16 \hbox{ of conductor } 16 \hbox{ mapping } 15 \mapsto 1,\ 5 \mapsto \zeta_{4}
TESTS:
Dirichlet characters modulo 1 and 2 are printed correctly (see
:trac:`17338`)::
sage: latex(DirichletGroup(1)[0])
\hbox{Dirichlet character modulo } 1 \hbox{ of conductor } 1
sage: latex(DirichletGroup(2)[0])
\hbox{Dirichlet character modulo } 2 \hbox{ of conductor } 1
"""
s = r'\hbox{Dirichlet character modulo } %s \hbox{ of conductor } %s' % (self.modulus(), self.conductor())
r = len(self.values_on_gens())
if r != 0:
s += r' \hbox{ mapping } '
for i in range(r):
if i != 0:
s += r',\ '
s += self.parent().unit_gens()[i]._latex_() + r' \mapsto ' + self.values_on_gens()[i]._latex_()
return s
def base_ring(self):
"""
Returns the base ring of this Dirichlet character.
EXAMPLES::
sage: G = DirichletGroup(11)
sage: G.gen(0).base_ring()
Cyclotomic Field of order 10 and degree 4
sage: G = DirichletGroup(11, RationalField())
sage: G.gen(0).base_ring()
Rational Field
"""
return self.parent().base_ring()
def bar(self):
"""
Return the complex conjugate of this Dirichlet character.
EXAMPLES::
sage: e = DirichletGroup(5).0
sage: e
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> zeta4
sage: e.bar()
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -zeta4
"""
return ~self
def bernoulli(self, k, algorithm='recurrence', cache=True, **opts):
r"""
Returns the generalized Bernoulli number `B_{k,eps}`.
INPUT:
- ``k`` -- a non-negative integer
- ``algorithm`` -- either ``'recurrence'`` (default) or
``'definition'``
- ``cache`` -- if True, cache answers
- ``**opts`` -- optional arguments; not used directly, but
passed to the :func:`bernoulli` function if this is called
OUTPUT:
Let `\varepsilon` be a (not necessarily primitive) character
of modulus `N`. This function returns the generalized
Bernoulli number `B_{k,\varepsilon}`, as defined by the
following identity of power series (see for example
[DI1995]_, Section 2.2):
.. MATH::
\sum_{a=1}^N \frac{\varepsilon(a) t e^{at}}{e^{Nt}-1}
= sum_{k=0}^{\infty} \frac{B_{k,\varepsilon}}{k!} t^k.
ALGORITHM:
The ``'recurrence'`` algorithm computes generalized Bernoulli
numbers via classical Bernoulli numbers using the formula in
[Coh2007]_, Proposition 9.4.5; this is usually optimal. The
``definition`` algorithm uses the definition directly.
.. WARNING::
In the case of the trivial Dirichlet character modulo 1,
this function returns `B_{1,\varepsilon} = 1/2`, in
accordance with the above definition, but in contrast to
the value `B_1 = -1/2` for the classical Bernoulli number.
Some authors use an alternative definition giving
`B_{1,\varepsilon} = -1/2`; see the discussion in
[Coh2007]_, Section 9.4.1.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.bernoulli(5)
7430/13*zeta12^3 - 34750/13*zeta12^2 - 11380/13*zeta12 + 9110/13
sage: eps = DirichletGroup(9).0
sage: eps.bernoulli(3)
10*zeta6 + 4
sage: eps.bernoulli(3, algorithm="definition")
10*zeta6 + 4
TESTS:
Check that :trac:`17586` is fixed::
sage: DirichletGroup(1)[0].bernoulli(1)
1/2
"""
if cache:
try:
self.__bernoulli
except AttributeError:
self.__bernoulli = {}
if k in self.__bernoulli:
return self.__bernoulli[k]
N = self.modulus()
K = self.base_ring()
if N == 1:
# By definition, the first Bernoulli number of the trivial
# character is 1/2, in contrast to the value B_1 = -1/2.
ber = K.one()/2 if k == 1 else K(bernoulli(k))
elif self(-1) != K((-1)**k):
ber = K.zero()
elif algorithm == "recurrence":
# The following code is pretty fast, at least compared to
# the other algorithm below. That said, I'm sure it could
# be sped up by a factor of 10 or more in many cases,
# especially since we end up computing all the Bernoulli
# numbers up to k, which should be done with power series
# instead of calls to the Bernoulli function. Likewise
# computing all binomial coefficients can be done much
# more efficiently.
v = self.values()
S = lambda n: sum(v[r] * r**n for r in range(1, N))
ber = K(sum(binomial(k,j) * bernoulli(j, **opts) *
N**(j-1) * S(k-j) for j in range(k+1)))
elif algorithm == "definition":
# This is better since it computes the same thing, but requires
# no arith in a poly ring over a number field.
prec = k+2
R = rings.PowerSeriesRing(rings.QQ, 't')
t = R.gen()
# g(t) = t/(e^{Nt}-1)
g = t/((N*t).exp(prec) - 1)
# h(n) = g(t)*e^{nt}
h = [0] + [g * ((n*t).exp(prec)) for n in range(1,N+1)]
ber = sum([self(a)*h[a][k] for a in range(1,N+1)]) * factorial(k)
else:
raise ValueError("algorithm = '%s' unknown"%algorithm)
if cache:
self.__bernoulli[k] = ber
return ber
def lfunction(self, prec=53, algorithm='pari'):
"""
Return the L-function of ``self``.
The result is a wrapper around a PARI L-function or around
the ``lcalc`` program.
INPUT:
- ``prec`` -- precision (default 53)
- ``algorithm`` -- 'pari' (default) or 'lcalc'
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: L = a.lfunction(); L
PARI L-function associated to Dirichlet character modulo 20
of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: L(4)
0.988944551741105
With the algorithm "lcalc"::
sage: a = a.primitive_character()
sage: L = a.lfunction(algorithm='lcalc'); L
L-function with complex Dirichlet coefficients
sage: L.value(4) # abs tol 1e-14
0.988944551741105 - 5.16608739123418e-18*I
"""
if algorithm is None:
algorithm = 'pari'
if algorithm == 'pari':
from sage.lfunctions.pari import lfun_character, LFunction
Z = LFunction(lfun_character(self), prec=prec)
Z.rename('PARI L-function associated to %s' % self)
return Z
elif algorithm == 'lcalc':
from sage.libs.lcalc.lcalc_Lfunction import Lfunction_from_character
return Lfunction_from_character(self)
raise ValueError('algorithm must be "pari" or "lcalc"')
@cached_method
def conductor(self):
"""
Computes and returns the conductor of this character.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.conductor()
4
sage: b.conductor()
5
sage: (a*b).conductor()
20
TESTS::
sage: G.<a, b> = DirichletGroup(20)
sage: type(G(1).conductor())
<type 'sage.rings.integer.Integer'>
"""
if self.modulus() == 1 or self.is_trivial():
return rings.Integer(1)
F = factor(self.modulus())
if len(F) > 1:
return prod([d.conductor() for d in self.decomposition()])
p = F[0][0]
# When p is odd, and x =/= 1, the conductor is the smallest p**r such that
# Order(x) divides EulerPhi(p**r) = p**(r-1)*(p-1).
# For a given r, whether or not the above divisibility holds
# depends only on the factor of p**(r-1) on the right hand side.
# Since p-1 is coprime to p, this smallest r such that the
# divisibility holds equals Valuation(Order(x),p)+1.
cond = p**(valuation(self.order(),p) + 1)
if p == 2 and F[0][1] > 2 and self.values_on_gens()[1].multiplicative_order() != 1:
cond *= 2
return rings.Integer(cond)
@cached_method
def decomposition(self):
r"""
Return the decomposition of self as a product of Dirichlet
characters of prime power modulus, where the prime powers exactly
divide the modulus of this character.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: c = a*b
sage: d = c.decomposition(); d
[Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> zeta4]
sage: d[0].parent()
Group of Dirichlet characters modulo 4 with values in Cyclotomic Field of order 4 and degree 2
sage: d[1].parent()
Group of Dirichlet characters modulo 5 with values in Cyclotomic Field of order 4 and degree 2
We can't multiply directly, since coercion of one element into the
other parent fails in both cases::
sage: d[0]*d[1] == c
Traceback (most recent call last):
...
TypeError: unsupported operand parent(s) for *: 'Group of Dirichlet characters modulo 4 with values in Cyclotomic Field of order 4 and degree 2' and 'Group of Dirichlet characters modulo 5 with values in Cyclotomic Field of order 4 and degree 2'
We can multiply if we're explicit about where we want the
multiplication to take place.
::
sage: G(d[0])*G(d[1]) == c
True
Conductors that are divisible by various powers of 2 present
some problems as the multiplicative group modulo `2^k` is
trivial for `k = 1` and non-cyclic for `k \ge 3`::
sage: (DirichletGroup(18).0).decomposition()
[Dirichlet character modulo 2 of conductor 1, Dirichlet character modulo 9 of conductor 9 mapping 2 |--> zeta6]
sage: (DirichletGroup(36).0).decomposition()
[Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1, Dirichlet character modulo 9 of conductor 1 mapping 2 |--> 1]
sage: (DirichletGroup(72).0).decomposition()
[Dirichlet character modulo 8 of conductor 4 mapping 7 |--> -1, 5 |--> 1, Dirichlet character modulo 9 of conductor 1 mapping 2 |--> 1]
"""
D = self.parent().decomposition()
vals = [[z] for z in self.values_on_gens()]
if self.modulus() % 8 == 0: # 2 factors at 2.
vals[0].append(vals[1][0])
del vals[1]
elif self.modulus() % 4 == 2: # 0 factors at 2.
vals = [1] + vals
return [D[i](vals[i]) for i in range(len(D))]
def extend(self, M):
"""
Returns the extension of this character to a Dirichlet character
modulo the multiple M of the modulus.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: H.<c> = DirichletGroup(4)
sage: c.extend(20)
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: a
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: c.extend(20) == a
True
"""
if M % self.modulus() != 0:
raise ArithmeticError("M(=%s) must be a multiple of the modulus(=%s)"%(M,self.modulus()))
H = DirichletGroup(M, self.base_ring())
return H(self)
def _pari_conversion(self):
r"""
Prepare data for the conversion of the character to Pari.
OUTPUT:
pair (G, v) where G is `(\ZZ / N \ZZ)^*` where `N` is the modulus
EXAMPLES::
sage: chi4 = DirichletGroup(4).gen()
sage: chi4._pari_conversion()
([[4, [0]], [2, [2], [3]], [[2]~, Vecsmall([2])],
[[4], [[1, matrix(0,2)]], Mat(1), [3], [2], [0]], Mat(1)], [1])
sage: chi = DirichletGroup(24)([1,-1,-1]); chi
Dirichlet character modulo 24 of conductor 24
mapping 7 |--> 1, 13 |--> -1, 17 |--> -1
sage: chi._pari_conversion()
([[24, [0]], [8, [2, 2, 2], [7, 13, 17]],
[[2, 2, 3]~, Vecsmall([3, 3, 1])],
[[8, 8, 3], [[1, matrix(0,2)], [1, matrix(0,2)], [2, Mat([2, 1])]],
[1, 0, 0; 0, 1, 0; 0, 0, 1], [7, 13, 17], [2, 2, 2], [0, 0, 0]],
[1, 0, 0; 0, 1, 0; 0, 0, 1]], [0, 1, 1])
"""
G = pari.znstar(self.modulus(), 1)
pari_orders = G[1][1]
pari_gens = G[1][2]
# one should use the following, but this does not work
# pari_orders = G.cyc()
# pari_gens = G.gen()
values_on_gens = (self(x) for x in pari_gens)
# now compute the input for pari (list of exponents)
P = self.parent()
if is_ComplexField(P.base_ring()):
zeta = P.zeta()
zeta_argument = zeta.argument()
v = [int(x.argument() / zeta_argument) for x in values_on_gens]
else:
dlog = P._zeta_dlog
v = [dlog[x] for x in values_on_gens]
m = P.zeta_order()
v = [(vi * oi) // m for vi, oi in zip(v, pari_orders)]
return (G, v)
def conrey_number(self):
r"""
Return the Conrey number for this character.
This is a positive integer coprime to q that identifies a
Dirichlet character of modulus q.
See https://www.lmfdb.org/knowledge/show/character.dirichlet.conrey
EXAMPLES::
sage: chi4 = DirichletGroup(4).gen()
sage: chi4.conrey_number()
3
sage: chi = DirichletGroup(24)([1,-1,-1]); chi
Dirichlet character modulo 24 of conductor 24
mapping 7 |--> 1, 13 |--> -1, 17 |--> -1
sage: chi.conrey_number()
5
sage: chi = DirichletGroup(60)([1,-1,I])
sage: chi.conrey_number()
17
sage: chi = DirichletGroup(420)([1,-1,-I,1])
sage: chi.conrey_number()
113
TESTS::
sage: eps1 = DirichletGroup(5)([-1])
sage: eps2 = DirichletGroup(5,QQ)([-1])
sage: eps1.conrey_number() == eps2.conrey_number()
True
"""
G, v = self._pari_conversion()
return pari.znconreyexp(G, v).sage()
def lmfdb_page(self):
r"""
Open the LMFDB web page of the character in a browser.
See https://www.lmfdb.org
EXAMPLES::
sage: E = DirichletGroup(4).gen()
sage: E.lmfdb_page() # optional -- webbrowser
"""
import webbrowser
lmfdb_url = 'https://www.lmfdb.org/Character/Dirichlet/{}/{}'
url = lmfdb_url.format(self.modulus(), self.conrey_number())
webbrowser.open(url)
def galois_orbit(self, sort=True):
r"""
Return the orbit of this character under the action of the absolute
Galois group of the prime subfield of the base ring.
EXAMPLES::
sage: G = DirichletGroup(30); e = G.1
sage: e.galois_orbit()
[Dirichlet character modulo 30 of conductor 5 mapping 11 |--> 1, 7 |--> -zeta4,
Dirichlet character modulo 30 of conductor 5 mapping 11 |--> 1, 7 |--> zeta4]
Another example::
sage: G = DirichletGroup(13)
sage: G.galois_orbits()
[
[Dirichlet character modulo 13 of conductor 1 mapping 2 |--> 1],
...,
[Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -1]
]
sage: e = G.0
sage: e
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12
sage: e.galois_orbit()
[Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12,
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -zeta12^3 + zeta12,
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12^3 - zeta12,
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -zeta12]
sage: e = G.0^2; e
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12^2
sage: e.galois_orbit()
[Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12^2, Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -zeta12^2 + 1]
A non-example::
sage: chi = DirichletGroup(7, Integers(9), zeta = Integers(9)(2)).0
sage: chi.galois_orbit()
Traceback (most recent call last):
...
TypeError: Galois orbits only defined if base ring is an integral domain
"""
if not self.base_ring().is_integral_domain():
raise TypeError("Galois orbits only defined if base ring is an integral domain")
k = self.order()
if k <= 2:
return [self]
P = self.parent()
z = self.element()
o = int(z.additive_order())
Auts = set([m % o for m in P._automorphisms()])
v = [P.element_class(P, m * z, check=False) for m in Auts]
if sort:
v.sort()
return v
def gauss_sum(self, a=1):
r"""
Return a Gauss sum associated to this Dirichlet character.
The Gauss sum associated to `\chi` is
.. MATH::
g_a(\chi) = \sum_{r \in \ZZ/m\ZZ} \chi(r)\,\zeta^{ar},
where `m` is the modulus of `\chi` and `\zeta` is a primitive
`m^{th}` root of unity.
FACTS: If the modulus is a prime `p` and the character is
nontrivial, then the Gauss sum has absolute value `\sqrt{p}`.
CACHING: Computed Gauss sums are *not* cached with this character.
EXAMPLES::
sage: G = DirichletGroup(3)
sage: e = G([-1])
sage: e.gauss_sum(1)
2*zeta6 - 1
sage: e.gauss_sum(2)
-2*zeta6 + 1
sage: norm(e.gauss_sum())
3
::
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.gauss_sum()
-zeta156^46 + zeta156^45 + zeta156^42 + zeta156^41 + 2*zeta156^40 + zeta156^37 - zeta156^36 - zeta156^34 - zeta156^33 - zeta156^31 + 2*zeta156^30 + zeta156^28 - zeta156^24 - zeta156^22 + zeta156^21 + zeta156^20 - zeta156^19 + zeta156^18 - zeta156^16 - zeta156^15 - 2*zeta156^14 - zeta156^10 + zeta156^8 + zeta156^7 + zeta156^6 + zeta156^5 - zeta156^4 - zeta156^2 - 1
sage: factor(norm(e.gauss_sum()))
13^24
TESTS:
The field of algebraic numbers is supported (:trac:`19056`)::
sage: G = DirichletGroup(7, QQbar)
sage: G[1].gauss_sum()
-2.440133358345538? + 1.022618791871794?*I
Check that :trac:`19060` is fixed::
sage: K.<z> = CyclotomicField(8)
sage: G = DirichletGroup(13, K)
sage: chi = G([z^2])
sage: chi.gauss_sum()
zeta52^22 + zeta52^21 + zeta52^19 - zeta52^16 + zeta52^15 + zeta52^14 + zeta52^12 - zeta52^11 - zeta52^10 - zeta52^7 - zeta52^5 + zeta52^4
Check that :trac:`25127` is fixed::
sage: G = DirichletGroup(1)
sage: chi = G.one()
sage: chi.gauss_sum()
1
.. SEEALSO::
- :func:`sage.arith.misc.gauss_sum` for general finite fields
- :func:`sage.rings.padics.misc.gauss_sum` for a `p`-adic version
"""
G = self.parent()
K = G.base_ring()
chi = self
m = G.modulus()
if is_ComplexField(K):
return self.gauss_sum_numerical(a=a)
elif is_AlgebraicField(K):
L = K
zeta = L.zeta(m)
elif number_field.is_CyclotomicField(K) or is_RationalField(K):
chi = chi.minimize_base_ring()
n = lcm(m, G.zeta_order())
L = rings.CyclotomicField(n)
zeta = L.gen(0) ** (n // m)
else:
raise NotImplementedError("Gauss sums only currently implemented when the base ring is a cyclotomic field, QQ, QQbar, or a complex field")
zeta = zeta ** a
g = L(chi(0))
z = L.one()
for c in chi.values()[1:]:
z *= zeta
g += L(c)*z
return g
def gauss_sum_numerical(self, prec=53, a=1):
r"""
Return a Gauss sum associated to this Dirichlet character as an
approximate complex number with prec bits of precision.
INPUT:
- ``prec`` -- integer (default: 53), *bits* of precision
- ``a`` -- integer, as for :meth:`gauss_sum`.
The Gauss sum associated to `\chi` is
.. MATH::
g_a(\chi) = \sum_{r \in \ZZ/m\ZZ} \chi(r)\,\zeta^{ar},
where `m` is the modulus of `\chi` and `\zeta` is a primitive
`m^{th}` root of unity.
EXAMPLES::
sage: G = DirichletGroup(3)
sage: e = G.0
sage: abs(e.gauss_sum_numerical())
1.7320508075...
sage: sqrt(3.0)
1.73205080756888
sage: e.gauss_sum_numerical(a=2)
-...e-15 - 1.7320508075...*I
sage: e.gauss_sum_numerical(a=2, prec=100)
4.7331654313260708324703713917e-30 - 1.7320508075688772935274463415*I
sage: G = DirichletGroup(13)
sage: H = DirichletGroup(13, CC)
sage: e = G.0
sage: f = H.0
sage: e.gauss_sum_numerical()
-3.07497205... + 1.8826966926...*I
sage: f.gauss_sum_numerical()
-3.07497205... + 1.8826966926...*I
sage: abs(e.gauss_sum_numerical())
3.60555127546...
sage: abs(f.gauss_sum_numerical())
3.60555127546...
sage: sqrt(13.0)
3.60555127546399
TESTS:
The field of algebraic numbers is supported (:trac:`19056`)::
sage: G = DirichletGroup(7, QQbar)
sage: G[1].gauss_sum_numerical()
-2.44013335834554 + 1.02261879187179*I
"""
G = self.parent()
K = G.base_ring()
if is_ComplexField(K):
phi = lambda t : t
CC = K
elif is_AlgebraicField(K):
from sage.rings.complex_mpfr import ComplexField
CC = ComplexField(prec)
phi = CC.coerce_map_from(K)
elif number_field.is_CyclotomicField(K) or is_RationalField(K):
phi = K.complex_embedding(prec)
CC = phi.codomain()
else:
raise NotImplementedError("Gauss sums only currently implemented when the base ring is a cyclotomic field, QQ, QQbar, or a complex field")
zeta = CC.zeta(G.modulus()) ** a
g = phi(self(0))
z = CC.one()
for c in self.values()[1:]:
z *= zeta
g += phi(c)*z
return g
def jacobi_sum(self, char, check=True):
r"""
Return the Jacobi sum associated to these Dirichlet characters
(i.e., J(self,char)).
This is defined as
.. MATH::
J(\chi, \psi) = \sum_{a \in \ZZ / N\ZZ} \chi(a) \psi(1-a)
where `\chi` and `\psi` are both characters modulo `N`.
EXAMPLES::
sage: D = DirichletGroup(13)
sage: e = D.0
sage: f = D[-2]
sage: e.jacobi_sum(f)
3*zeta12^2 + 2*zeta12 - 3
sage: f.jacobi_sum(e)
3*zeta12^2 + 2*zeta12 - 3
sage: p = 7
sage: DP = DirichletGroup(p)
sage: f = DP.0
sage: e.jacobi_sum(f)
Traceback (most recent call last):
...
NotImplementedError: Characters must be from the same Dirichlet Group.
sage: all_jacobi_sums = [(DP[i].values_on_gens(),DP[j].values_on_gens(),DP[i].jacobi_sum(DP[j]))
....: for i in range(p-1) for j in range(i, p-1)]
sage: for s in all_jacobi_sums:
....: print(s)
((1,), (1,), 5)
((1,), (zeta6,), -1)
((1,), (zeta6 - 1,), -1)
((1,), (-1,), -1)
((1,), (-zeta6,), -1)
((1,), (-zeta6 + 1,), -1)
((zeta6,), (zeta6,), -zeta6 + 3)
((zeta6,), (zeta6 - 1,), 2*zeta6 + 1)
((zeta6,), (-1,), -2*zeta6 - 1)
((zeta6,), (-zeta6,), zeta6 - 3)
((zeta6,), (-zeta6 + 1,), 1)
((zeta6 - 1,), (zeta6 - 1,), -3*zeta6 + 2)
((zeta6 - 1,), (-1,), 2*zeta6 + 1)
((zeta6 - 1,), (-zeta6,), -1)
((zeta6 - 1,), (-zeta6 + 1,), -zeta6 - 2)
((-1,), (-1,), 1)
((-1,), (-zeta6,), -2*zeta6 + 3)
((-1,), (-zeta6 + 1,), 2*zeta6 - 3)
((-zeta6,), (-zeta6,), 3*zeta6 - 1)
((-zeta6,), (-zeta6 + 1,), -2*zeta6 + 3)
((-zeta6 + 1,), (-zeta6 + 1,), zeta6 + 2)
Let's check that trivial sums are being calculated correctly::
sage: N = 13
sage: D = DirichletGroup(N)
sage: g = D(1)
sage: g.jacobi_sum(g)
11
sage: sum([g(x)*g(1-x) for x in IntegerModRing(N)])
11
And sums where exactly one character is nontrivial (see :trac:`6393`)::
sage: G = DirichletGroup(5); X=G.list(); Y=X[0]; Z=X[1]
sage: Y.jacobi_sum(Z)
-1
sage: Z.jacobi_sum(Y)
-1
Now let's take a look at a non-prime modulus::
sage: N = 9
sage: D = DirichletGroup(N)
sage: g = D(1)
sage: g.jacobi_sum(g)
3
We consider a sum with values in a finite field::
sage: g = DirichletGroup(17, GF(9,'a')).0
sage: g.jacobi_sum(g**2)
2*a
TESTS:
This shows that :trac:`6393` has been fixed::
sage: G = DirichletGroup(5); X = G.list(); Y = X[0]; Z = X[1]
sage: # Y is trivial and Z is quartic
sage: sum([Y(x)*Z(1-x) for x in IntegerModRing(5)])
-1
sage: # The value -1 above is the correct value of the Jacobi sum J(Y, Z).
sage: Y.jacobi_sum(Z); Z.jacobi_sum(Y)
-1
-1
"""
if check:
if self.parent() != char.parent():
raise NotImplementedError("Characters must be from the same Dirichlet Group.")
return sum([self(x) * char(1-x) for x in rings.IntegerModRing(self.modulus())])
def kloosterman_sum(self, a=1, b=0):
r"""
Return the "twisted" Kloosterman sum associated to this Dirichlet character.
This includes Gauss sums, classical Kloosterman sums, Salié sums, etc.
The Kloosterman sum associated to `\chi` and the integers a,b is
.. MATH::
K(a,b,\chi) = \sum_{r \in (\ZZ/m\ZZ)^\times} \chi(r)\,\zeta^{ar+br^{-1}},
where `m` is the modulus of `\chi` and `\zeta` is a primitive
`m` th root of unity. This reduces to the Gauss sum if `b=0`.
This method performs an exact calculation and returns an element of a
suitable cyclotomic field; see also :meth:`.kloosterman_sum_numerical`,
which gives an inexact answer (but is generally much quicker).
CACHING: Computed Kloosterman sums are *not* cached with this
character.
EXAMPLES::
sage: G = DirichletGroup(3)
sage: e = G([-1])
sage: e.kloosterman_sum(3,5)
-2*zeta6 + 1
sage: G = DirichletGroup(20)
sage: e = G([1 for u in G.unit_gens()])
sage: e.kloosterman_sum(7,17)
-2*zeta20^6 + 2*zeta20^4 + 4
TESTS::
sage: G = DirichletGroup(20, UniversalCyclotomicField())
sage: e = G([1 for u in G.unit_gens()])
sage: e.kloosterman_sum(7,17)
-2*E(5) - 4*E(5)^2 - 4*E(5)^3 - 2*E(5)^4
sage: G = DirichletGroup(12, QQbar)
sage: e = G.gens()[0]
sage: e.kloosterman_sum(5,11)
Traceback (most recent call last):
...
NotImplementedError: Kloosterman sums not implemented over this ring
"""
G = self.parent()
zo = G.zeta_order()
m = G.modulus()
g = 0
L = rings.CyclotomicField(m.lcm(zo))
zeta = L.gen(0)
try:
self(1) * zeta**(a+b)
except TypeError:
raise NotImplementedError('Kloosterman sums not implemented '
'over this ring')
n = zeta.multiplicative_order()
zeta = zeta**(n // m)
for c in m.coprime_integers(m):
e = rings.Mod(c, m)
g += self(c) * zeta**int(a*e + b*e**(-1))
return g
def kloosterman_sum_numerical(self, prec=53, a=1, b=0):
r"""
Return the Kloosterman sum associated to this Dirichlet character as
an approximate complex number with prec bits of precision.
See also :meth:`.kloosterman_sum`, which calculates the sum
exactly (which is generally slower).
INPUT:
- ``prec`` -- integer (default: 53), *bits* of precision
- ``a`` -- integer, as for :meth:`.kloosterman_sum`
- ``b`` -- integer, as for :meth:`.kloosterman_sum`.
EXAMPLES::
sage: G = DirichletGroup(3)
sage: e = G.0
The real component of the numerical value of e is near zero::
sage: v=e.kloosterman_sum_numerical()
sage: v.real() < 1.0e15
True
sage: v.imag()
1.73205080756888
sage: G = DirichletGroup(20)
sage: e = G.1
sage: e.kloosterman_sum_numerical(53,3,11)
3.80422606518061 - 3.80422606518061*I
"""
G = self.parent()
K = G.base_ring()
if not (number_field.is_CyclotomicField(K) or is_RationalField(K)):
raise NotImplementedError("Kloosterman sums only currently implemented when the base ring is a cyclotomic field or QQ.")
phi = K.complex_embedding(prec)
CC = phi.codomain()
g = 0
m = G.modulus()
zeta = CC.zeta(m)
for c in m.coprime_integers(m):
e = rings.Mod(c, m)
z = zeta ** int(a*e + b*(e**(-1)))
g += phi(self(c))*z
return g
@cached_method
def is_even(self):
r"""
Return ``True`` if and only if `\varepsilon(-1) = 1`.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.is_even()
False
sage: e(-1)
-1
sage: [e.is_even() for e in G]
[True, False, True, False, True, False, True, False, True, False, True, False]
sage: G = DirichletGroup(13, CC)
sage: e = G.0
sage: e.is_even()
False
sage: e(-1)
-1.000000...
sage: [e.is_even() for e in G]
[True, False, True, False, True, False, True, False, True, False, True, False]
sage: G = DirichletGroup(100000, CC)
sage: G.1.is_even()
True
Note that ``is_even`` need not be the negation of
is_odd, e.g., in characteristic 2::
sage: G.<e> = DirichletGroup(13, GF(4,'a'))
sage: e.is_even()
True
sage: e.is_odd()
True
"""
R = self.base_ring()
# self(-1) is either +1 or -1
if not R.is_exact():
return abs(self(-1) - R(1)) < 0.5
return self(-1) == R(1)
@cached_method
def is_odd(self):
r"""
Return ``True`` if and only if
`\varepsilon(-1) = -1`.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.is_odd()
True
sage: [e.is_odd() for e in G]
[False, True, False, True, False, True, False, True, False, True, False, True]
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.is_odd()
True
sage: [e.is_odd() for e in G]
[False, True, False, True, False, True, False, True, False, True, False, True]
sage: G = DirichletGroup(100000, CC)
sage: G.0.is_odd()
True
Note that ``is_even`` need not be the negation of
is_odd, e.g., in characteristic 2::
sage: G.<e> = DirichletGroup(13, GF(4,'a'))
sage: e.is_even()
True
sage: e.is_odd()
True
"""
R = self.base_ring()
# self(-1) is either +1 or -1
if not R.is_exact():
return abs(self(-1) - R(-1)) < 0.5
return self(-1) == R(-1)
@cached_method
def is_primitive(self):
"""
Return ``True`` if and only if this character is
primitive, i.e., its conductor equals its modulus.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.is_primitive()
False
sage: b.is_primitive()
False
sage: (a*b).is_primitive()
True
sage: G.<a,b> = DirichletGroup(20, CC)
sage: a.is_primitive()
False
sage: b.is_primitive()
False
sage: (a*b).is_primitive()
True
"""
return (self.conductor() == self.modulus())
@cached_method
def is_trivial(self):
r"""
Returns ``True`` if this is the trivial character,
i.e., has order 1.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.is_trivial()
False
sage: (a^2).is_trivial()
True
"""
if self.element.is_in_cache():
return not self.element()
one = self.base_ring().one()
return all(x == one for x in self.values_on_gens())
def kernel(self):
r"""
Return the kernel of this character.
OUTPUT: Currently the kernel is returned as a list. This may
change.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.kernel()
[1, 9, 13, 17]
sage: b.kernel()
[1, 11]
"""
one = self.base_ring().one()
return [x for x in range(self.modulus()) if self(x) == one]
def maximize_base_ring(self):
r"""
Let
.. MATH::
\varepsilon : (\ZZ/N\ZZ)^* \to \QQ(\zeta_n)
be a Dirichlet character. This function returns an equal Dirichlet
character
.. MATH::
\chi : (\ZZ/N\ZZ)^* \to \QQ(\zeta_m)
where `m` is the least common multiple of `n` and
the exponent of `(\ZZ/N\ZZ)^*`.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20,QQ)
sage: b.maximize_base_ring()
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> -1
sage: b.maximize_base_ring().base_ring()
Cyclotomic Field of order 4 and degree 2
sage: DirichletGroup(20).base_ring()
Cyclotomic Field of order 4 and degree 2
"""
g = rings.IntegerModRing(self.modulus()).unit_group_exponent()
if g == 1:
g = 2
z = self.base_ring().zeta()
n = z.multiplicative_order()
m = lcm(g,n)
if n == m:
return self
K = rings.CyclotomicField(m)
return self.change_ring(K)
def minimize_base_ring(self):
r"""
Return a Dirichlet character that equals this one, but over as
small a subfield (or subring) of the base ring as possible.
.. note::
This function is currently only implemented when the base
ring is a number field. It's the identity function in
characteristic p.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: e = DirichletGroup(13).0
sage: e.base_ring()
Cyclotomic Field of order 12 and degree 4
sage: e.minimize_base_ring().base_ring()
Cyclotomic Field of order 12 and degree 4
sage: (e^2).minimize_base_ring().base_ring()
Cyclotomic Field of order 6 and degree 2
sage: (e^3).minimize_base_ring().base_ring()
Cyclotomic Field of order 4 and degree 2
sage: (e^12).minimize_base_ring().base_ring()
Rational Field
TESTS:
Check that :trac:`18479` is fixed::
sage: f = Newforms(Gamma1(25), names='a')[1]
sage: eps = f.character()
sage: eps.minimize_base_ring() == eps
True
A related bug (see :trac:`18086`)::
sage: K.<a,b>=NumberField([x^2 + 1, x^2 - 3])
sage: chi = DirichletGroup(7, K).0
sage: chi.minimize_base_ring()
Dirichlet character modulo 7 of conductor 7 mapping 3 |--> -1/2*b*a + 1/2
"""
R = self.base_ring()
if R.is_prime_field():
return self
p = R.characteristic()
if p:
K = rings.IntegerModRing(p)
elif self.order() <= 2:
K = rings.QQ
elif (isinstance(R, number_field.NumberField_generic)
and euler_phi(self.order()) < R.absolute_degree()):
K = rings.CyclotomicField(self.order())
else:
return self
try:
return self.change_ring(K)
except (TypeError, ValueError, ArithmeticError):
return self
def modulus(self):
"""
The modulus of this character.
EXAMPLES::
sage: e = DirichletGroup(100, QQ).0
sage: e.modulus()
100
sage: e.conductor()
4
"""
return self.parent().modulus()
def level(self):
"""
Synonym for modulus.
EXAMPLES::
sage: e = DirichletGroup(100, QQ).0
sage: e.level()
100
"""
return self.modulus()
@cached_method
def multiplicative_order(self):
"""
The order of this character.
EXAMPLES::
sage: e = DirichletGroup(100).1
sage: e.order() # same as multiplicative_order, since group is multiplicative
20
sage: e.multiplicative_order()
20
sage: e = DirichletGroup(100).0
sage: e.multiplicative_order()
2
"""
if self.parent().zeta.is_in_cache():
return self.element().additive_order()
return lcm([z.multiplicative_order() for z in self.values_on_gens()])
def primitive_character(self):
"""
Returns the primitive character associated to self.
EXAMPLES::
sage: e = DirichletGroup(100).0; e
Dirichlet character modulo 100 of conductor 4 mapping 51 |--> -1, 77 |--> 1
sage: e.conductor()
4
sage: f = e.primitive_character(); f
Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1
sage: f.modulus()
4
"""
return self.restrict(self.conductor())
def restrict(self, M):
"""
Returns the restriction of this character to a Dirichlet character
modulo the divisor M of the modulus, which must also be a multiple
of the conductor of this character.
EXAMPLES::
sage: e = DirichletGroup(100).0
sage: e.modulus()
100
sage: e.conductor()
4
sage: e.restrict(20)
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: e.restrict(4)
Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1
sage: e.restrict(50)
Traceback (most recent call last):
...
ValueError: conductor(=4) must divide M(=50)
"""
M = int(M)
if self.modulus()%M != 0:
raise ValueError("M(=%s) must divide the modulus(=%s)"%(M,self.modulus()))
if M%self.conductor() != 0:
raise ValueError("conductor(=%s) must divide M(=%s)"%(self.conductor(),M))
H = DirichletGroup(M, self.base_ring())
return H(self)
@cached_method
def values(self):
"""
Return a list of the values of this character on each integer
between 0 and the modulus.
EXAMPLES::
sage: e = DirichletGroup(20)(1)
sage: e.values()
[0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1]
sage: e = DirichletGroup(20).gen(0)
sage: e.values()
[0, 1, 0, -1, 0, 0, 0, -1, 0, 1, 0, -1, 0, 1, 0, 0, 0, 1, 0, -1]
sage: e = DirichletGroup(20).gen(1)
sage: e.values()
[0, 1, 0, -zeta4, 0, 0, 0, zeta4, 0, -1, 0, 1, 0, -zeta4, 0, 0, 0, zeta4, 0, -1]
sage: e = DirichletGroup(21).gen(0) ; e.values()
[0, 1, -1, 0, 1, -1, 0, 0, -1, 0, 1, -1, 0, 1, 0, 0, 1, -1, 0, 1, -1]
sage: e = DirichletGroup(21, base_ring=GF(37)).gen(0) ; e.values()
[0, 1, 36, 0, 1, 36, 0, 0, 36, 0, 1, 36, 0, 1, 0, 0, 1, 36, 0, 1, 36]
sage: e = DirichletGroup(21, base_ring=GF(3)).gen(0) ; e.values()
[0, 1, 2, 0, 1, 2, 0, 0, 2, 0, 1, 2, 0, 1, 0, 0, 1, 2, 0, 1, 2]
::
sage: chi = DirichletGroup(100151, CyclotomicField(10)).0
sage: ls = chi.values() ; ls[0:10]
[0,
1,
-zeta10^3,
-zeta10,
-zeta10,
1,
zeta10^3 - zeta10^2 + zeta10 - 1,
zeta10,
zeta10^3 - zeta10^2 + zeta10 - 1,
zeta10^2]
TESTS:
Test that :trac:`11783` and :trac:`14368` are fixed::
sage: chi = DirichletGroup(1).list()[0]
sage: chi.values()
[1]
sage: chi(1)
1
"""
G = self.parent()
R = G.base_ring()
mod = self.parent().modulus()
if mod == 1:
return [R.one()]
elif mod == 2:
return [R.zero(), R.one()]
result_list = [R.zero()] * mod
gens = G.unit_gens()
orders = G.integers_mod().unit_group().gens_orders()
R_values = G._zeta_powers
val_on_gen = self.element()
exponents = [0] * len(orders)
n = G.integers_mod().one()
value = val_on_gen.base_ring().zero()
while True:
# record character value on n
result_list[n] = R_values[value]
# iterate:
# increase the exponent vector by 1,
# increase n accordingly, and increase value
i = 0
while True:
try:
exponents[i] += 1
except IndexError: # Done!
return result_list
value += val_on_gen[i]
n *= gens[i]
if exponents[i] < orders[i]:
break
exponents[i] = 0
i += 1
@cached_method(do_pickle=True)
def values_on_gens(self):
r"""
Return a tuple of the values of ``self`` on the standard
generators of `(\ZZ/N\ZZ)^*`, where `N` is the modulus.
EXAMPLES::
sage: e = DirichletGroup(16)([-1, 1])
sage: e.values_on_gens ()
(-1, 1)
.. NOTE::
The constructor of :class:`DirichletCharacter` sets the
cache of :meth:`element` or of :meth:`values_on_gens`. The cache of
one of these methods needs to be set for the other method to work properly,
these caches have to be stored when pickling an instance of
:class:`DirichletCharacter`.
"""
pows = self.parent()._zeta_powers
return tuple([pows[i] for i in self.element()])
@cached_method(do_pickle=True)
def element(self):
r"""
Return the underlying `\ZZ/n\ZZ`-module
vector of exponents.
.. warning::
Please do not change the entries of the returned vector;
this vector is mutable *only* because immutable vectors are
not implemented yet.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.element()
(2, 0)
sage: b.element()
(0, 1)
.. NOTE::
The constructor of :class:`DirichletCharacter` sets the
cache of :meth:`element` or of :meth:`values_on_gens`. The cache of
one of these methods needs to be set for the other method to work properly,
these caches have to be stored when pickling an instance of
:class:`DirichletCharacter`.
"""
P = self.parent()
M = P._module
if is_ComplexField(P.base_ring()):
zeta = P.zeta()
zeta_argument = zeta.argument()
v = M([int(round(x.argument() / zeta_argument))
for x in self.values_on_gens()])
else:
dlog = P._zeta_dlog
v = M([dlog[x] for x in self.values_on_gens()])
v.set_immutable()
return v
def __setstate__(self, state):
r"""
Restore a pickled element from ``state``.
TESTS::
sage: e = DirichletGroup(16)([-1, 1])
sage: loads(dumps(e)) == e
True
"""
# values_on_gens() used an explicit cache __values_on_gens in the past
# we need to set the cache of values_on_gens() from that if we encounter it in a pickle
values_on_gens_key = '_DirichletCharacter__values_on_gens'
values_on_gens = None
state_dict = state[1]
if values_on_gens_key in state_dict:
values_on_gens = state_dict[values_on_gens_key]
del state_dict[values_on_gens_key]
# element() used an explicit cache __element in the past
# we need to set the cache of element() from that if we encounter it in a pickle
element_key = '_DirichletCharacter__element'
element = None
if element_key in state_dict:
element = state_dict[element_key]
del state_dict[element_key]
super(DirichletCharacter, self).__setstate__(state)
if values_on_gens is not None:
self.values_on_gens.set_cache(values_on_gens)
if element is not None:
self.element.set_cache(element)
class DirichletGroupFactory(UniqueFactory):
r"""
Construct a group of Dirichlet characters modulo `N`.
INPUT:
- ``N`` -- positive integer
- ``base_ring`` -- commutative ring; the value ring for the
characters in this group (default: the cyclotomic field
`\QQ(\zeta_n)`, where `n` is the exponent of `(\ZZ/N\ZZ)^*`)
- ``zeta`` -- (optional) root of unity in ``base_ring``
- ``zeta_order`` -- (optional) positive integer; this must be the
order of ``zeta`` if both are specified
- ``names`` -- ignored (needed so ``G.<...> = DirichletGroup(...)``
notation works)
- ``integral`` -- boolean (default: ``False``); whether to replace
the default cyclotomic field by its rings of integers as the
base ring. This is ignored if ``base_ring`` is not ``None``.
OUTPUT:
The group of Dirichlet characters modulo `N` with values in a
subgroup `V` of the multiplicative group `R^*` of ``base_ring``.
This is the group of homomorphisms `(\ZZ/N\ZZ)^* \to V` with
pointwise multiplication. The group `V` is determined as follows:
- If both ``zeta`` and ``zeta_order`` are omitted, then `V` is
taken to be `R^*`, or equivalently its `n`-torsion subgroup,
where `n` is the exponent of `(\ZZ/N\ZZ)^*`. Many operations,
such as finding a set of generators for the group, are only
implemented if `V` is cyclic and a generator for `V` can be
found.
- If ``zeta`` is specified, then `V` is taken to be the cyclic
subgroup of `R^*` generated by ``zeta``. If ``zeta_order`` is
also given, it must be the multiplicative order of ``zeta``;
this is useful if the base ring is not exact or if the order of
``zeta`` is very large.
- If ``zeta`` is not specified but ``zeta_order`` is, then `V` is
taken to be the group of roots of unity of order dividing
``zeta_order`` in `R`. In this case, `R` must be a domain (so
`V` is cyclic), and `V` must have order ``zeta_order``.
Furthermore, a generator ``zeta`` of `V` is computed, and an
error is raised if such ``zeta`` cannot be found.
EXAMPLES:
The default base ring is a cyclotomic field of order the exponent
of `(\ZZ/N\ZZ)^*`::
sage: DirichletGroup(20)
Group of Dirichlet characters modulo 20 with values in Cyclotomic Field of order 4 and degree 2
We create the group of Dirichlet character mod 20 with values in
the rational numbers::
sage: G = DirichletGroup(20, QQ); G
Group of Dirichlet characters modulo 20 with values in Rational Field
sage: G.order()
4
sage: G.base_ring()
Rational Field
The elements of G print as lists giving the values of the character
on the generators of `(Z/NZ)^*`::
sage: list(G)
[Dirichlet character modulo 20 of conductor 1 mapping 11 |--> 1, 17 |--> 1, Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1, Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> -1, Dirichlet character modulo 20 of conductor 20 mapping 11 |--> -1, 17 |--> -1]
Next we construct the group of Dirichlet character mod 20, but with
values in `\QQ(\zeta_n)`::
sage: G = DirichletGroup(20)
sage: G.1
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4
We next compute several invariants of ``G``::
sage: G.gens()
(Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1, Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4)
sage: G.unit_gens()
(11, 17)
sage: G.zeta()
zeta4
sage: G.zeta_order()
4
In this example we create a Dirichlet group with values in a
number field::
sage: R.<x> = PolynomialRing(QQ)
sage: K.<a> = NumberField(x^4 + 1)
sage: DirichletGroup(5, K)
Group of Dirichlet characters modulo 5 with values in Number Field in a with defining polynomial x^4 + 1
An example where we give ``zeta``, but not its order::
sage: G = DirichletGroup(5, K, a); G
Group of Dirichlet characters modulo 5 with values in the group of order 8 generated by a in Number Field in a with defining polynomial x^4 + 1
sage: G.list()
[Dirichlet character modulo 5 of conductor 1 mapping 2 |--> 1, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> a^2, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -1, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -a^2]
We can also restrict the order of the characters, either with or
without specifying a root of unity::
sage: DirichletGroup(5, K, zeta=-1, zeta_order=2)
Group of Dirichlet characters modulo 5 with values in the group of order 2 generated by -1 in Number Field in a with defining polynomial x^4 + 1
sage: DirichletGroup(5, K, zeta_order=2)
Group of Dirichlet characters modulo 5 with values in the group of order 2 generated by -1 in Number Field in a with defining polynomial x^4 + 1
::
sage: G.<e> = DirichletGroup(13)
sage: loads(G.dumps()) == G
True
::
sage: G = DirichletGroup(19, GF(5))
sage: loads(G.dumps()) == G
True
We compute a Dirichlet group over a large prime field::
sage: p = next_prime(10^40)
sage: g = DirichletGroup(19, GF(p)); g
Group of Dirichlet characters modulo 19 with values in Finite Field of size 10000000000000000000000000000000000000121
Note that the root of unity has small order, i.e., it is not the
largest order root of unity in the field::
sage: g.zeta_order()
2
::
sage: r4 = CyclotomicField(4).ring_of_integers()
sage: G = DirichletGroup(60, r4)
sage: G.gens()
(Dirichlet character modulo 60 of conductor 4 mapping 31 |--> -1, 41 |--> 1, 37 |--> 1, Dirichlet character modulo 60 of conductor 3 mapping 31 |--> 1, 41 |--> -1, 37 |--> 1, Dirichlet character modulo 60 of conductor 5 mapping 31 |--> 1, 41 |--> 1, 37 |--> zeta4)
sage: val = G.gens()[2].values_on_gens()[2] ; val
zeta4
sage: parent(val)
Gaussian Integers in Cyclotomic Field of order 4 and degree 2
sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val)
17
sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) * GF(29)(3)
22
sage: r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3
22
sage: parent(r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3)
Residue field of Fractional ideal (-2*zeta4 + 5)
::
sage: DirichletGroup(60, integral=True)
Group of Dirichlet characters modulo 60 with values in Gaussian Integers in Cyclotomic Field of order 4 and degree 2
sage: parent(DirichletGroup(60, integral=True).gens()[2].values_on_gens()[2])
Gaussian Integers in Cyclotomic Field of order 4 and degree 2
If the order of ``zeta`` cannot be determined automatically, we
can specify it using ``zeta_order``::
sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6))
Traceback (most recent call last):
...
NotImplementedError: order of element not known
sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6), zeta_order=6)
Group of Dirichlet characters modulo 7 with values in the group of order 6 generated by 0.500000000000000 + 0.866025403784439*I in Complex Field with 53 bits of precision
If the base ring is not a domain (in which case the group of roots
of unity is not necessarily cyclic), some operations still work,
such as creation of elements::
sage: G = DirichletGroup(5, Zmod(15)); G
Group of Dirichlet characters modulo 5 with values in Ring of integers modulo 15
sage: chi = G([13]); chi
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> 13
sage: chi^2
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> 4
sage: chi.multiplicative_order()
4
Other operations only work if ``zeta`` is specified::
sage: G.gens()
Traceback (most recent call last):
...
NotImplementedError: factorization of polynomials over rings with composite characteristic is not implemented
sage: G = DirichletGroup(5, Zmod(15), zeta=2); G
Group of Dirichlet characters modulo 5 with values in the group of order 4 generated by 2 in Ring of integers modulo 15
sage: G.gens()
(Dirichlet character modulo 5 of conductor 5 mapping 2 |--> 2,)
TESTS:
Dirichlet groups are cached, creating two groups with the same parameters
yields the same object::
sage: DirichletGroup(60) is DirichletGroup(60)
True
"""
def create_key(self, N, base_ring=None, zeta=None, zeta_order=None,
names=None, integral=False):
"""
Create a key that uniquely determines a Dirichlet group.
TESTS::
sage: DirichletGroup.create_key(60)
(Cyclotomic Field of order 4 and degree 2, 60, None, None)
An example to illustrate that ``base_ring`` is a part of the key::
sage: k = DirichletGroup.create_key(2, base_ring=QQ); k
(Rational Field, 2, None, None)
sage: l = DirichletGroup.create_key(2, base_ring=CC); l
(Complex Field with 53 bits of precision, 2, None, None)
sage: k == l
False
sage: G = DirichletGroup.create_object(None, k); G
Group of Dirichlet characters modulo 2 with values in Rational Field
sage: H = DirichletGroup.create_object(None, l); H
Group of Dirichlet characters modulo 2 with values in Complex Field with 53 bits of precision
sage: G == H
False
If ``base_ring`` was not be a part of the key, the keys would compare
equal and the caching would be broken::
sage: k = k[1:]; k
(2, None, None)
sage: l = l[1:]; l
(2, None, None)
sage: k == l
True
sage: DirichletGroup(2, base_ring=QQ) is DirichletGroup(2, base_ring=CC)
False
If the base ring is not an integral domain, an error will be
raised if only ``zeta_order`` is specified::
sage: DirichletGroup(17, Integers(15))
Group of Dirichlet characters modulo 17 with values in Ring of integers modulo 15
sage: DirichletGroup(17, Integers(15), zeta_order=4)
Traceback (most recent call last):
...
ValueError: base ring (= Ring of integers modulo 15) must be an integral domain if only zeta_order is specified
sage: G = DirichletGroup(17, Integers(15), zeta=7); G
Group of Dirichlet characters modulo 17 with values in the group of order 4 generated by 7 in Ring of integers modulo 15
sage: G.order()
4
sage: DirichletGroup(-33)
Traceback (most recent call last):
...
ValueError: modulus should be positive
"""
modulus = rings.Integer(N)
if modulus <= 0:
raise ValueError('modulus should be positive')
if base_ring is None:
if not (zeta is None and zeta_order is None):
raise ValueError("zeta and zeta_order must be None if base_ring not specified")
e = rings.IntegerModRing(modulus).unit_group_exponent()
base_ring = rings.CyclotomicField(e)
if integral:
base_ring = base_ring.ring_of_integers()
if not is_Ring(base_ring):
raise TypeError("base_ring (= %s) must be a ring" % base_ring)
# If either zeta or zeta_order is given, compute the other.
if zeta is not None:
zeta = base_ring(zeta)
if zeta_order is None:
zeta_order = zeta.multiplicative_order()
elif zeta_order is not None:
if not base_ring.is_integral_domain():
raise ValueError("base ring (= %s) must be an integral domain if only zeta_order is specified"
% base_ring)
zeta_order = rings.Integer(zeta_order)
zeta = base_ring.zeta(zeta_order)
return (base_ring, modulus, zeta, zeta_order)
def create_object(self, version, key, **extra_args):
"""
Create the object from the key (extra arguments are ignored). This is
only called if the object was not found in the cache.
TESTS::
sage: K = CyclotomicField(4)
sage: DirichletGroup.create_object(None, (K, 60, K.gen(), 4))
Group of Dirichlet characters modulo 60 with values in the group of order 4 generated by zeta4 in Cyclotomic Field of order 4 and degree 2
"""
base_ring, modulus, zeta, zeta_order = key
return DirichletGroup_class(base_ring, modulus, zeta, zeta_order)
DirichletGroup = DirichletGroupFactory("DirichletGroup")
def is_DirichletGroup(x):
"""
Returns True if x is a Dirichlet group.
EXAMPLES::
sage: from sage.modular.dirichlet import is_DirichletGroup
sage: is_DirichletGroup(DirichletGroup(11))
True
sage: is_DirichletGroup(11)
False
sage: is_DirichletGroup(DirichletGroup(11).0)
False
"""
return isinstance(x, DirichletGroup_class)
class DirichletGroup_class(WithEqualityById, Parent):
"""
Group of Dirichlet characters modulo `N` with values in a ring `R`.
"""
Element = DirichletCharacter
def __init__(self, base_ring, modulus, zeta, zeta_order):
"""
Create a Dirichlet group.
Not to be called directly (use the factory function ``DirichletGroup``).
The ``DirichletGroup`` factory ensures that either both
``zeta`` and ``zeta_order`` are specified, or that both are
``None``. In the former case, it also ensures that ``zeta``
is an element of ``base_ring`` and that ``zeta_order`` is an
element of ``ZZ``.
TESTS::
sage: G = DirichletGroup(7, base_ring=Integers(9), zeta=2) # indirect doctest
sage: TestSuite(G).run()
sage: G.base() # check that Parent.__init__ has been called
Ring of integers modulo 9
sage: DirichletGroup(13) == DirichletGroup(13)
True
sage: DirichletGroup(13) == DirichletGroup(13, QQ)
False
"""
from sage.categories.groups import Groups
category = Groups().Commutative()
if base_ring.is_integral_domain() or base_ring.is_finite():
# The group of n-th roots of unity in the base ring is
# finite, and hence this Dirichlet group is finite too.
# In particular, it is finitely generated; the added
# FinitelyGenerated() here means that the group has a
# distinguished set of generators.
category = category.Finite().FinitelyGenerated()
Parent.__init__(self, base_ring, category=category)
self._zeta = zeta
self._zeta_order = zeta_order
self._modulus = modulus
self._integers = rings.IntegerModRing(modulus)
def __setstate__(self, state):
"""
Used for unpickling old instances.
TESTS::
sage: G = DirichletGroup(9)
sage: loads(dumps(G)) is G
True
"""
self._set_element_constructor()
if '_zeta_order' in state:
state['_zeta_order'] = rings.Integer(state['_zeta_order'])
super(DirichletGroup_class, self).__setstate__(state)
@property
def _module(self):
"""
Return the free module used to represent Dirichlet characters.
TESTS::
sage: DirichletGroup(12)._module
Vector space of dimension 2 over Ring of integers modulo 2
"""
return free_module.FreeModule(rings.IntegerModRing(self.zeta_order()),
len(self.unit_gens()))
@property
def _zeta_powers(self):
"""
Return a list of powers of the distinguished root of unity.
TESTS::
sage: DirichletGroup(5)._zeta_powers
[1, zeta4, -1, -zeta4]
"""
R = self.base_ring()
a = R.one()
w = [a]
zeta = self.zeta()
zeta_order = self.zeta_order()
if is_ComplexField(R):
for i in range(1, zeta_order):
a = a * zeta
a._set_multiplicative_order(zeta_order/gcd(zeta_order, i))
w.append(a)
else:
for i in range(1, zeta_order):
a = a * zeta
w.append(a)
return w
@property
def _zeta_dlog(self):
"""
Return a dictionary that can be used to compute discrete
logarithms in the value group of this Dirichlet group.
TESTS::
sage: DirichletGroup(5)._zeta_dlog
{-1: 2, -zeta4: 3, zeta4: 1, 1: 0}
"""
return {z: i for i, z in enumerate(self._zeta_powers)}
def change_ring(self, R, zeta=None, zeta_order=None):
"""
Return the base extension of ``self`` to ``R``.
INPUT:
- ``R`` -- either a ring admitting a conversion map from the
base ring of ``self``, or a ring homomorphism with the base
ring of ``self`` as its domain
- ``zeta`` -- (optional) root of unity in ``R``
- ``zeta_order`` -- (optional) order of ``zeta``
EXAMPLES::
sage: G = DirichletGroup(7,QQ); G
Group of Dirichlet characters modulo 7 with values in Rational Field
sage: G.change_ring(CyclotomicField(6))
Group of Dirichlet characters modulo 7 with values in Cyclotomic Field of order 6 and degree 2
TESTS:
We test the case where `R` is a map (:trac:`18072`)::
sage: K.<i> = QuadraticField(-1)
sage: f = K.complex_embeddings()[0]
sage: D = DirichletGroup(5, K)
sage: D.change_ring(f)
Group of Dirichlet characters modulo 5 with values in Complex Field with 53 bits of precision
"""
if zeta is None and self._zeta is not None:
# A root of unity was explicitly given; we use it over the
# new base ring as well.
zeta = self._zeta
if zeta_order is None:
# We reuse _zeta_order if we know that it stays the
# same; otherwise it will be recomputed as the order
# of R(zeta) by the DirichletGroup factory.
p = R.characteristic()
if p == 0 or p.gcd(self._zeta_order) == 1:
zeta_order = self._zeta_order
else:
# No root of unity specified; use the same zeta_order
# (which may still be None).
zeta_order = self._zeta_order
# Map zeta to the new parent
if zeta is not None:
zeta = R(zeta)
if isinstance(R, Map):
R = R.codomain()
return DirichletGroup(self.modulus(), R,
zeta=zeta,
zeta_order=zeta_order)
def base_extend(self, R):
"""
Return the base extension of ``self`` to ``R``.
INPUT:
- ``R`` -- either a ring admitting a *coercion* map from the
base ring of ``self``, or a ring homomorphism with the base
ring of ``self`` as its domain
EXAMPLES::
sage: G = DirichletGroup(7,QQ); G
Group of Dirichlet characters modulo 7 with values in Rational Field
sage: H = G.base_extend(CyclotomicField(6)); H
Group of Dirichlet characters modulo 7 with values in Cyclotomic Field of order 6 and degree 2
Note that the root of unity can change::
sage: H.zeta()
zeta6
This method (in contrast to :meth:`change_ring`) requires a
coercion map to exist::
sage: G.base_extend(ZZ)
Traceback (most recent call last):
...
TypeError: no coercion map from Rational Field to Integer Ring is defined
Base-extended Dirichlet groups do not silently get roots of
unity with smaller order than expected (:trac:`6018`)::
sage: G = DirichletGroup(10, QQ).base_extend(CyclotomicField(4))
sage: H = DirichletGroup(10, CyclotomicField(4))
sage: G is H
True
sage: G3 = DirichletGroup(31, CyclotomicField(3))
sage: G5 = DirichletGroup(31, CyclotomicField(5))
sage: K30 = CyclotomicField(30)
sage: G3.gen(0).base_extend(K30) * G5.gen(0).base_extend(K30)
Dirichlet character modulo 31 of conductor 31 mapping 3 |--> -zeta30^7 + zeta30^5 + zeta30^4 + zeta30^3 - zeta30 - 1
When a root of unity is specified, base extension still works
if the new base ring is not an integral domain::
sage: f = DirichletGroup(17, ZZ, zeta=-1).0
sage: g = f.base_extend(Integers(15))
sage: g(3)
14
sage: g.parent().zeta()
14
"""
if not (isinstance(R, Map)
or R.has_coerce_map_from(self.base_ring())):
raise TypeError("no coercion map from %s to %s is defined"
% (self.base_ring(), R))
return self.change_ring(R)
def _element_constructor_(self, x):
"""
Construct a Dirichlet character from `x`.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: K = G.base_ring()
sage: G(1)
Dirichlet character modulo 13 of conductor 1 mapping 2 |--> 1
sage: G([-1])
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -1
sage: G([K.0])
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12
sage: G(0)
Traceback (most recent call last):
...
TypeError: cannot convert 0 to an element of Group of Dirichlet characters modulo 13 with values in Cyclotomic Field of order 12 and degree 4
sage: G = DirichletGroup(6)
sage: G(DirichletGroup(3).0)
Dirichlet character modulo 6 of conductor 3 mapping 5 |--> -1
sage: G(DirichletGroup(15).0)
Dirichlet character modulo 6 of conductor 3 mapping 5 |--> -1
sage: G(DirichletGroup(15).1)
Traceback (most recent call last):
...
TypeError: conductor must divide modulus
sage: H = DirichletGroup(16, QQ); H(DirichletGroup(16).1)
Traceback (most recent call last):
...
TypeError: Unable to coerce zeta4 to a rational
"""
R = self.base_ring()
try:
if x == R.one():
x = [R.one()] * len(self.unit_gens())
except (TypeError, ValueError, ArithmeticError):
pass
if isinstance(x, list): # list of values on each unit generator
return self.element_class(self, x)
elif not isinstance(x, DirichletCharacter):
raise TypeError("cannot convert %s to an element of %s" % (x, self))
elif not x.conductor().divides(self.modulus()):
raise TypeError("conductor must divide modulus")
a = []
for u in self.unit_gens():
v = u.lift()
# have to do this, since e.g., unit gens mod 11 are not units mod 22.
while x.modulus().gcd(v) != 1:
v += self.modulus()
a.append(R(x(v)))
return self.element_class(self, a)
def _coerce_map_from_(self, X):
"""
Decide whether there is a coercion map from `X`.
There is conversion between Dirichlet groups of different
moduli, but no coercion. This implies that Dirichlet
characters of different moduli do not compare as equal.
TESTS::
sage: trivial_character(6) == trivial_character(3) # indirect doctest
False
sage: trivial_character(3) == trivial_character(9)
False
sage: trivial_character(3) == DirichletGroup(3, QQ).0^2
True
"""
return (isinstance(X, DirichletGroup_class) and
self.modulus() == X.modulus() and
self.base_ring().has_coerce_map_from(X.base_ring()) and
(self._zeta is None or
(X._zeta is not None and
self.base_ring()(X._zeta) in self._zeta_powers)))
def __len__(self):
"""
Return the number of elements of this Dirichlet group. This is the
same as self.order().
EXAMPLES::
sage: len(DirichletGroup(20))
8
sage: len(DirichletGroup(20, QQ))
4
sage: len(DirichletGroup(20, GF(5)))
8
sage: len(DirichletGroup(20, GF(2)))
1
sage: len(DirichletGroup(20, GF(3)))
4
"""
return self.order()
def _repr_(self):
"""
Return a print representation of this group, which can be renamed.
EXAMPLES::
sage: G = DirichletGroup(11)
sage: repr(G) # indirect doctest
'Group of Dirichlet characters modulo 11 with values in Cyclotomic Field of order 10 and degree 4'
sage: G.rename('Dir(11)')
sage: G
Dir(11)
"""
s = "Group of Dirichlet characters modulo %s with values in " % self.modulus()
if self._zeta is not None:
s += "the group of order %s generated by %s in " % (self._zeta_order, self._zeta)
s += str(self.base_ring())
return s
@cached_method
def decomposition(self):
r"""
Returns the Dirichlet groups of prime power modulus corresponding
to primes dividing modulus.
(Note that if the modulus is 2 mod 4, there will be a "factor" of
`(\ZZ/2\ZZ)^*`, which is the trivial group.)
EXAMPLES::
sage: DirichletGroup(20).decomposition()
[
Group of Dirichlet characters modulo 4 with values in Cyclotomic Field of order 4 and degree 2,
Group of Dirichlet characters modulo 5 with values in Cyclotomic Field of order 4 and degree 2
]
sage: DirichletGroup(20,GF(5)).decomposition()
[
Group of Dirichlet characters modulo 4 with values in Finite Field of size 5,
Group of Dirichlet characters modulo 5 with values in Finite Field of size 5
]
"""
R = self.base_ring()
return Sequence([DirichletGroup(p**r,R) for p, r \
in factor(self.modulus())],
cr=True,
universe = cat.Objects())
def exponent(self):
"""
Return the exponent of this group.
EXAMPLES::
sage: DirichletGroup(20).exponent()
4
sage: DirichletGroup(20,GF(3)).exponent()
2
sage: DirichletGroup(20,GF(2)).exponent()
1
sage: DirichletGroup(37).exponent()
36
"""
return self.zeta_order()
@cached_method
def _automorphisms(self):
"""
Compute the automorphisms of self. These are always given by raising to
a power, so the return value is a list of integers.
At present this is only implemented if the base ring has characteristic 0 or a prime.
EXAMPLES::
sage: DirichletGroup(17)._automorphisms()
[1, 3, 5, 7, 9, 11, 13, 15]
sage: DirichletGroup(17, GF(11^4, 'a'))._automorphisms()
[1, 11, 121, 1331]
sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5))._automorphisms()
Traceback (most recent call last):
...
NotImplementedError: Automorphisms for finite non-field base rings not implemented
sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2))._automorphisms()
Traceback (most recent call last):
...
NotImplementedError: Automorphisms for finite non-field base rings not implemented
"""
n = self.zeta_order()
R = self.base_ring()
p = R.characteristic()
if p == 0:
Auts = [e for e in range(1,n) if gcd(e,n) == 1]
else:
if not rings.ZZ(p).is_prime():
raise NotImplementedError("Automorphisms for finite non-field base rings not implemented")
# The automorphisms in characteristic p are
# k-th powering for
# k = 1, p, p^2, ..., p^(r-1),
# where p^r = 1 (mod n), so r is the mult order of p modulo n.
r = rings.IntegerModRing(n)(p).multiplicative_order()
Auts = [p**m for m in range(0,r)]
return Auts
def galois_orbits(self, v=None, reps_only=False, sort=True, check=True):
"""
Return a list of the Galois orbits of Dirichlet characters in self,
or in v if v is not None.
INPUT:
- ``v`` - (optional) list of elements of self
- ``reps_only`` - (optional: default False) if True
only returns representatives for the orbits.
- ``sort`` - (optional: default True) whether to sort
the list of orbits and the orbits themselves (slightly faster if
False).
- ``check`` - (optional, default: True) whether or not
to explicitly coerce each element of v into self.
The Galois group is the absolute Galois group of the prime subfield
of Frac(R). If R is not a domain, an error will be raised.
EXAMPLES::
sage: DirichletGroup(20).galois_orbits()
[
[Dirichlet character modulo 20 of conductor 20 mapping 11 |--> -1, 17 |--> -1],
...,
[Dirichlet character modulo 20 of conductor 1 mapping 11 |--> 1, 17 |--> 1]
]
sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5)).galois_orbits()
Traceback (most recent call last):
...
TypeError: Galois orbits only defined if base ring is an integral domain
sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2)).galois_orbits()
Traceback (most recent call last):
...
TypeError: Galois orbits only defined if base ring is an integral domain
"""
if v is None:
v = self.list()
else:
if check:
v = [self(x) for x in v]
G = []
seen_so_far = set([])
for x in v:
z = x.element()
e = tuple(z) # change when there are immutable vectors (and below)
if e in seen_so_far:
continue
orbit = x.galois_orbit(sort=sort)
if reps_only:
G.append(x)
else:
G.append(orbit)
for z in orbit:
seen_so_far.add(tuple(z.element()))
G = Sequence(G, cr=True)
if sort:
G.sort()
return G
def gen(self, n=0):
"""
Return the n-th generator of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.gen(0)
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: G.gen(1)
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4
sage: G.gen(2)
Traceback (most recent call last):
...
IndexError: n(=2) must be between 0 and 1
::
sage: G.gen(-1)
Traceback (most recent call last):
...
IndexError: n(=-1) must be between 0 and 1
"""
n = int(n)
g = self.gens()
if n<0 or n>=len(g):
raise IndexError("n(=%s) must be between 0 and %s"%(n,len(g)-1))
return g[n]
@cached_method
def gens(self):
"""
Returns generators of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.gens()
(Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1, Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4)
"""
g = []
ord = self.zeta_order()
M = self._module
zero = M(0)
orders = self.integers_mod().unit_group().gens_orders()
for i in range(len(self.unit_gens())):
z = zero.__copy__()
z[i] = ord//gcd(ord, orders[i])
g.append(self.element_class(self, z, check=False))
return tuple(g)
def integers_mod(self):
r"""
Returns the group of integers `\ZZ/N\ZZ`
where `N` is the modulus of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.integers_mod()
Ring of integers modulo 20
"""
return self._integers
__iter__ = multiplicative_iterator
def list(self):
"""
Return a list of the Dirichlet characters in this group.
EXAMPLES::
sage: DirichletGroup(5).list()
[Dirichlet character modulo 5 of conductor 1 mapping 2 |--> 1,
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> zeta4,
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -1,
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -zeta4]
"""
return self._list_from_iterator()
def modulus(self):
"""
Returns the modulus of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.modulus()
20
"""
return self._modulus
def ngens(self):
"""
Returns the number of generators of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.ngens()
2
"""
return len(self.gens())
@cached_method
def order(self):
"""
Return the number of elements of self. This is the same as
len(self).
EXAMPLES::
sage: DirichletGroup(20).order()
8
sage: DirichletGroup(37).order()
36
"""
ord = rings.Integer(1)
for g in self.gens():
ord *= int(g.order())
return ord
def random_element(self):
"""
Return a random element of self.
The element is computed by multiplying a random power of each
generator together, where the power is between 0 and the order of
the generator minus 1, inclusive.
EXAMPLES::
sage: DirichletGroup(37).random_element()
Dirichlet character modulo 37 of conductor 37 mapping 2 |--> zeta36^4
sage: DirichletGroup(20).random_element()
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: DirichletGroup(60).random_element()
Dirichlet character modulo 60 of conductor 3 mapping 31 |--> 1, 41 |--> -1, 37 |--> 1
"""
e = self(1)
for i in range(self.ngens()):
g = self.gen(i)
n = random.randrange(g.order())
e *= g**n
return e
def unit_gens(self):
r"""
Returns the minimal generators for the units of
`(\ZZ/N\ZZ)^*`, where `N` is the
modulus of self.
EXAMPLES::
sage: DirichletGroup(37).unit_gens()
(2,)
sage: DirichletGroup(20).unit_gens()
(11, 17)
sage: DirichletGroup(60).unit_gens()
(31, 41, 37)
sage: DirichletGroup(20,QQ).unit_gens()
(11, 17)
"""
return self._integers.unit_gens()
@cached_method
def zeta(self):
"""
Return the chosen root of unity in the base ring.
EXAMPLES::
sage: DirichletGroup(37).zeta()
zeta36
sage: DirichletGroup(20).zeta()
zeta4
sage: DirichletGroup(60).zeta()
zeta4
sage: DirichletGroup(60,QQ).zeta()
-1
sage: DirichletGroup(60, GF(25,'a')).zeta()
2
"""
zeta = self._zeta
if zeta is None:
R = self.base_ring()
e = self._integers.unit_group_exponent()
for d in reversed(e.divisors()):
try:
zeta = R.zeta(d)
break
except ValueError:
pass
self.zeta_order.set_cache(d)
return zeta
@cached_method
def zeta_order(self):
"""
Return the order of the chosen root of unity in the base ring.
EXAMPLES::
sage: DirichletGroup(20).zeta_order()
4
sage: DirichletGroup(60).zeta_order()
4
sage: DirichletGroup(60, GF(25,'a')).zeta_order()
4
sage: DirichletGroup(19).zeta_order()
18
"""
order = self._zeta_order
if order is None:
order = self.zeta().multiplicative_order()
return order
| 2.40625 | 2 |
src/biotite/file.py | danijoo/biotite | 208 | 308 | <reponame>danijoo/biotite
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite"
__author__ = "<NAME>"
__all__ = ["File", "TextFile", "InvalidFileError"]
import abc
import io
import warnings
from .copyable import Copyable
import copy
class File(Copyable, metaclass=abc.ABCMeta):
"""
Base class for all file classes.
The constructor creates an empty file, that can be filled with data
using the class specific setter methods.
Conversely, the class method :func:`read()` reads a file from disk
(or a file-like object from other sources).
In order to write the instance content into a file the
:func:`write()` method is used.
"""
def __init__(self):
# Support for deprecated instance method 'read()':
# When creating an instance, the 'read()' class method is
# replaced by the instance method, so that subsequent
# 'read()' calls are delegated to the instance method
self.read = self._deprecated_read
@classmethod
@abc.abstractmethod
def read(cls, file):
"""
Parse a file (or file-like object).
Parameters
----------
file : file-like object or str
The file to be read.
Alternatively a file path can be supplied.
Returns
-------
file_object : File
An instance from the respective :class:`File` subclass
representing the parsed file.
"""
pass
def _deprecated_read(self, file, *args, **kwargs):
"""
Support for deprecated instance method :func:`read()`.
Internally this calls the :func:`read()` class method and
replaces the data in `self` with the data from the newly created
:class:`File` object
"""
warnings.warn(
"Instance method 'read()' is deprecated, "
"use class method instead",
DeprecationWarning
)
cls = type(self)
new_file = cls.read(file, *args, **kwargs)
self.__dict__.update(new_file.__dict__)
@abc.abstractmethod
def write(self, file):
"""
Write the contents of this :class:`File` object into a file.
Parameters
----------
file_name : file-like object or str
The file to be written to.
Alternatively a file path can be supplied.
"""
pass
class TextFile(File, metaclass=abc.ABCMeta):
"""
Base class for all line based text files.
When reading a file, the text content is saved as list of strings,
one for each line.
When writing a file, this list is written into the file.
Attributes
----------
lines : list
List of string representing the lines in the text file.
PROTECTED: Do not modify from outside.
"""
def __init__(self):
super().__init__()
self.lines = []
@classmethod
def read(cls, file, *args, **kwargs):
# File name
if isinstance(file, str):
with open(file, "r") as f:
lines = f.read().splitlines()
# File object
else:
if not is_text(file):
raise TypeError("A file opened in 'text' mode is required")
lines = file.read().splitlines()
file_object = cls(*args, **kwargs)
file_object.lines = lines
return file_object
@staticmethod
def read_iter(file):
"""
Create an iterator over each line of the given text file.
Parameters
----------
file : file-like object or str
The file to be read.
Alternatively a file path can be supplied.
Yields
------
line : str
The current line in the file.
"""
# File name
if isinstance(file, str):
with open(file, "r") as f:
while True:
line = f.readline()
if not line:
break
yield line
# File object
else:
if not is_text(file):
raise TypeError("A file opened in 'text' mode is required")
while True:
line = file.readline()
if not line:
break
yield line
def write(self, file):
"""
Write the contents of this object into a file
(or file-like object).
Parameters
----------
file_name : file-like object or str
The file to be written to.
Alternatively a file path can be supplied.
"""
if isinstance(file, str):
with open(file, "w") as f:
f.write("\n".join(self.lines) + "\n")
else:
if not is_text(file):
raise TypeError("A file opened in 'text' mode is required")
file.write("\n".join(self.lines) + "\n")
def __copy_fill__(self, clone):
super().__copy_fill__(clone)
clone.lines = copy.copy(self.lines)
def __str__(self):
return("\n".join(self.lines))
class InvalidFileError(Exception):
"""
Indicates that the file is not suitable for the requested action,
either because the file does not contain the required data or
because the file is malformed.
"""
pass
def wrap_string(text, width):
"""
A much simpler and hence much more efficient version of
`textwrap.wrap()`.
This function simply wraps the given `text` after `width`
characters, ignoring sentences, whitespaces, etc.
"""
lines = []
for i in range(0, len(text), width):
lines.append(text[i : i+width])
return lines
def is_binary(file):
if isinstance(file, io.BufferedIOBase):
return True
# for file wrappers, e.g. 'TemporaryFile'
elif hasattr(file, "file") and isinstance(file.file, io.BufferedIOBase):
return True
else:
return False
def is_text(file):
if isinstance(file, io.TextIOBase):
return True
# for file wrappers, e.g. 'TemporaryFile'
elif hasattr(file, "file") and isinstance(file.file, io.TextIOBase):
return True
else:
return False
| 2.90625 | 3 |
src/cms/views/push_notifications/push_notification_sender.py | mckinly/cms-django | 0 | 309 | """
Module for sending Push Notifications
"""
import logging
import requests
from django.conf import settings
from ...models import PushNotificationTranslation
from ...models import Region
from ...constants import push_notifications as pnt_const
logger = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods
class PushNotificationSender:
"""
Sends push notifications via FCM HTTP API.
Definition: https://firebase.google.com/docs/cloud-messaging/http-server-ref#downstream-http-messages-json
"""
fcm_url = "https://fcm.googleapis.com/fcm/send"
def __init__(self, push_notification):
"""
Load relevant push notification translations and prepare content for sending
:param push_notification: the push notification that should be sent
:type push_notification: ~cms.models.push_notifications.push_notification.PushNotification
"""
self.push_notification = push_notification
self.prepared_pnts = []
self.primary_pnt = PushNotificationTranslation.objects.get(
push_notification=push_notification,
language=push_notification.region.default_language,
)
if len(self.primary_pnt.title) > 0:
self.prepared_pnts.append(self.primary_pnt)
self.load_secondary_pnts()
self.auth_key = self.get_auth_key()
def load_secondary_pnts(self):
"""
Load push notification translations in other languages
"""
secondary_pnts = PushNotificationTranslation.objects.filter(
push_notification=self.push_notification
).exclude(id=self.primary_pnt.id)
for secondary_pnt in secondary_pnts:
if (
secondary_pnt.title == ""
and pnt_const.USE_MAIN_LANGUAGE == self.push_notification.mode
):
secondary_pnt.title = self.primary_pnt.title
secondary_pnt.text = self.primary_pnt.text
self.prepared_pnts.append(secondary_pnt)
if len(secondary_pnt.title) > 0:
self.prepared_pnts.append(secondary_pnt)
def is_valid(self):
"""
Check if all data for sending push notifications is available
:return: all prepared push notification translations are valid
:rtype: bool
"""
if self.auth_key is None:
return False
for pnt in self.prepared_pnts:
if not pnt.title:
logger.debug("%r has no title", pnt)
return False
return True
@staticmethod
def get_auth_key():
"""
Get FCM API auth key
:return: FCM API auth key
:rtype: str
"""
fcm_auth_config_key = "fcm_auth_key"
auth_key = settings.FCM_KEY
if auth_key.exists():
logger.debug("Got fcm_auth_key from database")
return auth_key.first().value
logger.warning(
"Could not get %r from configuration database", fcm_auth_config_key
)
return None
def send_pn(self, pnt):
"""
Send single push notification translation
:param pnt: the prepared push notification translation to be sent
:type pnt: ~cms.models.push_notifications.push_notification_translation.PushNotificationTranslation
:return: Response of the :mod:`requests` library
:rtype: ~requests.Response
"""
if settings.DEBUG:
region_slug = Region.objects.get(
id=settings.TEST_BLOG_ID
).slug # Testumgebung - prevent sending PNs to actual users in development
else:
region_slug = self.push_notification.region.slug
payload = {
"to": f"/topics/{region_slug}-{pnt.language.slug}-{self.push_notification.channel}",
"notification": {"title": pnt.title, "body": pnt.text},
"data": {
"lanCode": pnt.language.slug,
"city": self.push_notification.region.slug,
},
}
headers = {"Authorization": f"key={self.auth_key}"}
return requests.post(self.fcm_url, json=payload, headers=headers)
# pylint: disable=too-many-arguments
def send_all(self):
"""
Send all prepared push notification translations
:return: Success status
:rtype: bool
"""
status = True
for pnt in self.prepared_pnts:
res = self.send_pn(pnt)
if res.status_code == 200:
logger.info("%r sent, FCM id: %r", pnt, res.json()["message_id"])
else:
status = False
logger.warning(
"Received invalid response from FCM for %r, status: %r, body: %r",
pnt,
res.status_code,
res.text,
)
return status
| 2.4375 | 2 |
tests/functional/index/create/test_03.py | reevespaul/firebird-qa | 0 | 310 | #coding:utf-8
#
# id: functional.index.create.03
# title: CREATE ASC INDEX
# decription: CREATE ASC INDEX
#
# Dependencies:
# CREATE DATABASE
# CREATE TABLE
# SHOW INDEX
# tracker_id:
# min_versions: []
# versions: 1.0
# qmid: functional.index.create.create_index_03
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 1.0
# resources: None
substitutions_1 = []
init_script_1 = """CREATE TABLE t( a INTEGER);
commit;"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """CREATE ASC INDEX test ON t(a);
SHOW INDEX test;"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """TEST INDEX ON T(A)"""
@pytest.mark.version('>=1.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| 1.804688 | 2 |
app/logic/httpcommon/Page.py | imvu/bluesteel | 10 | 311 | <filename>app/logic/httpcommon/Page.py<gh_stars>1-10
""" Page object file """
class Page():
""" Page object, it contains information about the pare we are refering, index, items per page, etc. """
page_index = 0
items_per_page = 0
def __init__(self, items_per_page, page_index):
""" Creates the page """
self.page_index = int(page_index)
self.items_per_page = int(items_per_page)
| 2.875 | 3 |
models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_personreid-res18_market1501_176_80_1.1G_1.3/code/core/data_manager.py | guochunhe/Vitis-AI | 1 | 312 | # Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, absolute_import
import glob
import re
from os import path as osp
from .market1501 import Market1501
__factory = {
'market1501': Market1501
}
def get_names():
return list(__factory.keys())
def init_dataset(name, *args, **kwargs):
if name not in __factory.keys():
raise KeyError("Unknown datasets: {}".format(name))
return __factory[name](*args, **kwargs)
| 1.828125 | 2 |
PyBank/.ipynb_checkpoints/Pymain-checkpoint.py | yash5OG/PythonChallengeW3-Y5 | 0 | 313 | {
"cells": [
{
"cell_type": "code",
"execution_count": 64,
"metadata": {},
"outputs": [],
"source": [
"# Import libraries\n",
"import os, csv"
]
},
{
"cell_type": "code",
"execution_count": 65,
"metadata": {},
"outputs": [],
"source": [
"#variables for the script\n",
"months = [] #list of months\n",
"pl =[] #list of monthly PL\n",
"pl_changes = [] #list of P&L Changes\n",
"n_months = 0 #count of months\n",
"pl_total = 0 #total of P&L\n",
"plc = 0 #variable to track PL changes\n",
"avg_pl_change = 0 #average of changes in PL\n",
"maxpl = 0 #maximum increase in profits\n",
"minpl = 0 #maximum decrease in losses\n",
"max_i = 0 #index for max pl\n",
"min_i = 0 #index for min pl\n",
"\n",
"#read the resource file\n",
"bankcsv = os.path.join(\".\", \"Resources\", \"budget_data.csv\") #set path\n",
"\n",
"\n",
"#read file\n",
"with open(bankcsv, 'r') as csv_file:\n",
" csv_reader = csv.reader(csv_file,delimiter=\",\")\n",
" header = next(csv_reader)\n",
" \n",
" #for loop to update the counters and lists\n",
" for row in csv_reader:\n",
" n_months += 1\n",
" pl_total += int(row[1])\n",
" pl.append(row[1])\n",
" months.append(row[0])"
]
},
{
"cell_type": "code",
"execution_count": 66,
"metadata": {},
"outputs": [],
"source": [
"# loop to track the PL change values\n",
"pl_changes = [] \n",
"plc = int(pl[0])\n",
"for i in range(1, len(pl)):\n",
" pl_changes.append(int(pl[i]) - plc)\n",
" plc = int(pl[i])\n",
" i += 1\n",
"#print(pl_changes)"
]
},
{
"cell_type": "code",
"execution_count": 67,
"metadata": {},
"outputs": [],
"source": [
"#calculate the average PL Changes, max and min\n",
"avg_pl_change = sum(pl_changes) / len(pl_changes)\n",
"maxpl = max(pl_changes)\n",
"minpl = min(pl_changes)\n",
"#print(avg_pl_change, maxpl, minpl)\n",
"#print(pl_changes.index(maxpl))\n",
"#print(len(pl_changes))"
]
},
{
"cell_type": "code",
"execution_count": 68,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Financial Analysis\n",
"---------------------------------------------------------------------\n",
"Total Months: 86\n",
"Total: $38382578\n",
"Average Change: $-2315.12\n",
"Greatest Increase in Profits: Feb-2012 ($1926159)\n",
"Greatest Decrease in Profits: Sep-2013 ($-2196167)\n"
]
}
],
"source": [
"#find dates for max and min PL changes\n",
"max_i = pl_changes.index(maxpl) +1 #adding +1 since the changes are calculated one row above\n",
"min_i = pl_changes.index(minpl) +1\n",
"\n",
"maxmonth = months[max_i]\n",
"minmonth = months[min_i]\n",
"\n",
"#print output to the terminal\n",
"\n",
"print(\"Financial Analysis\")\n",
"print(\"-\"*69)\n",
"print(f\"Total Months: {n_months}\")\n",
"print(f\"Total: ${round(pl_total,2)}\")\n",
"print(f\"Average Change: ${round(avg_pl_change,2)}\")\n",
"print(f\"Greatest Increase in Profits: {maxmonth} (${maxpl})\")\n",
"print(f\"Greatest Decrease in Profits: {minmonth} (${minpl})\")\n"
]
},
{
"cell_type": "code",
"execution_count": 69,
"metadata": {},
"outputs": [],
"source": [
"# write summary to txt file\n",
"output = os.path.join(\".\",\"Analysis\", \"summary.txt\")\n",
"\n",
"# use \"\\n\" to create a new line\n",
"with open(output, 'w') as output:\n",
" output.write(\"Financial Analysis\\n\")\n",
" output.write(\"-\"*69 + \"\\n\")\n",
" output.write(f\"Total Months: {n_months}\\n\")\n",
" output.write(f\"Total: ${round(pl_total,2)}\\n\")\n",
" output.write(f\"Average Change: ${round(avg_pl_change,2)}\\n\")\n",
" output.write(f\"Greatest Increase in Profits: {maxmonth} (${maxpl})\\n\")\n",
" output.write(f\"Greatest Decrease in Profits: {minmonth} (${minpl})\\n\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| 2.390625 | 2 |
xlib/api/win32/oleaut32/oleaut32.py | jkennedyvz/DeepFaceLive | 0 | 314 | <reponame>jkennedyvz/DeepFaceLive<gh_stars>0
from ctypes import POINTER, Structure
from ..wintypes import VARIANT, dll_import
@dll_import('OleAut32')
def VariantInit( pvarg : POINTER(VARIANT) ) -> None: ...
| 1.46875 | 1 |
azure-devops/azext_devops/vstsCompressed/service_hooks/v4_0/models/__init__.py | vijayraavi/azure-devops-cli-extension | 0 | 315 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .models import Consumer
from .models import ConsumerAction
from .models import Event
from .models import EventTypeDescriptor
from .models import ExternalConfigurationDescriptor
from .models import FormattedEventMessage
from .models import IdentityRef
from .models import InputDescriptor
from .models import InputFilter
from .models import InputFilterCondition
from .models import InputValidation
from .models import InputValue
from .models import InputValues
from .models import InputValuesError
from .models import InputValuesQuery
from .models import Notification
from .models import NotificationDetails
from .models import NotificationResultsSummaryDetail
from .models import NotificationsQuery
from .models import NotificationSummary
from .models import Publisher
from .models import PublisherEvent
from .models import PublishersQuery
from .models import ReferenceLinks
from .models import ResourceContainer
from .models import SessionToken
from .models import Subscription
from .models import SubscriptionsQuery
from .models import VersionedResource
__all__ = [
'Consumer',
'ConsumerAction',
'Event',
'EventTypeDescriptor',
'ExternalConfigurationDescriptor',
'FormattedEventMessage',
'IdentityRef',
'InputDescriptor',
'InputFilter',
'InputFilterCondition',
'InputValidation',
'InputValue',
'InputValues',
'InputValuesError',
'InputValuesQuery',
'Notification',
'NotificationDetails',
'NotificationResultsSummaryDetail',
'NotificationsQuery',
'NotificationSummary',
'Publisher',
'PublisherEvent',
'PublishersQuery',
'ReferenceLinks',
'ResourceContainer',
'SessionToken',
'Subscription',
'SubscriptionsQuery',
'VersionedResource',
]
| 1.203125 | 1 |
pizdyuk/pzd_logging.py | DeathAdder1999/Pizdyuk | 1 | 316 | <filename>pizdyuk/pzd_logging.py<gh_stars>1-10
import datetime as date
from pzd_utils import datetime_to_str
class PizdyukLogger:
__logger = None
def __init__(self):
global __logger
if self.__logger:
raise RuntimeError("Logger instance already exists")
@staticmethod
def get_logger():
global __logger
if not PizdyukLogger._PizdyukLogger__logger:
PizdyukLogger._PizdyukLogger__logger = PizdyukLogger()
return PizdyukLogger._PizdyukLogger__logger
def log_info(self, msg):
self.__log(msg, "INFO")
def log_warning(self, warning):
self.__log(warning, "WARNING")
def log_error(self, error):
self.__log(error, "ERROR")
def log_fatal(self, fatal):
self.__log(fatal, "FATAL")
def __log(self, msg, lvl):
date_str = datetime_to_str(date.datetime.now())
log = "[{0}] [{1}] {2}".format(lvl, date_str, msg)
print(log)
| 2.78125 | 3 |
beta_reconstruction/crystal_relations.py | LightForm-group/beta-reconstruction | 0 | 317 | import numpy as np
from defdap.quat import Quat
hex_syms = Quat.symEqv("hexagonal")
# subset of hexagonal symmetries that give unique orientations when the
# Burgers transformation is applied
unq_hex_syms = [
hex_syms[0],
hex_syms[5],
hex_syms[4],
hex_syms[2],
hex_syms[10],
hex_syms[11]
]
cubic_syms = Quat.symEqv("cubic")
# subset of cubic symmetries that give unique orientations when the
# Burgers transformation is applied
unq_cub_syms = [
cubic_syms[0],
cubic_syms[7],
cubic_syms[9],
cubic_syms[1],
cubic_syms[22],
cubic_syms[16],
cubic_syms[12],
cubic_syms[15],
cubic_syms[4],
cubic_syms[8],
cubic_syms[21],
cubic_syms[20]
]
# HCP -> BCC
burg_eulers = np.array([135, 90, 354.74]) * np.pi / 180
burg_trans = Quat.fromEulerAngles(*burg_eulers).conjugate
| 2.453125 | 2 |
a2.py | Changhong-Jiang/test | 0 | 318 | print('222')
| 1.429688 | 1 |
app/api/v1/views/auth_views.py | emdeechege/Questionaire-API | 0 | 319 | <filename>app/api/v1/views/auth_views.py
from flask import jsonify, Blueprint, request, json, make_response
from werkzeug.security import generate_password_hash, check_password_hash
from datetime import datetime
from ..utils.validators import Validation
from ..models.auth_models import Users
v1_auth_blueprint = Blueprint('auth', __name__, url_prefix='/api/v1')
USER = Users()
VALIDATOR = Validation()
@v1_auth_blueprint.route('/signup', methods=['POST'])
def signup():
"""View that controls creation of new users"""
try:
data = request.get_json()
except:
return jsonify({
"status": 400,
"message": "Invalid input"
}), 400
firstname = data.get('firstname')
lastname = data.get('lastname')
othername = data.get('othername')
email = data.get('email')
phone_number = data.get('phone_number')
username = data.get('username')
is_admin = data.get('is_admin')
password = data.get('password')
if not firstname or not firstname.split():
return make_response(jsonify({
"status": 400,
"message": "Firstname is required"
})), 400
if not lastname or not lastname.split():
return make_response(jsonify({
"status": 400,
"message": "Lastname is required"
})), 400
if not email or not email.split():
return make_response(jsonify({
"status": 400,
"message": "Email is required"
})), 400
if not phone_number:
return make_response(jsonify({
"status": 400,
"message": "Phone number is required"
})), 400
if not username or not username.split():
return make_response(jsonify({
"status": 400,
"message": "Username is required"
})), 400
if not password or not password.split():
return make_response(jsonify({
"status": 400,
"message": "Password is required"
})), 400
if not VALIDATOR.validate_phone_number(phone_number):
return jsonify({
"status": 400,
"message": "Please input valid phone number"
}), 400
if VALIDATOR.validate_password(password):
return jsonify({
"status": 400,
"message": "Password not valid"
}), 400
if not VALIDATOR.validate_email(email):
return jsonify({
"status": 400,
"message": "Invalid email"
}), 400
if VALIDATOR.username_exists(username):
return jsonify({
"status": 400,
"message": "Username exists"
}), 400
if VALIDATOR.email_exists(email):
return jsonify({
"status": 400,
"message": "Email exists"
}), 400
password = generate_password_hash(
password, method='pbkdf2:sha256', salt_length=8)
res = USER.signup(
firstname, lastname, othername, email, phone_number, username, is_admin, password)
return jsonify({
"status": 201,
"data": [{
"firstname": firstname,
"lastname": lastname,
"othername": othername,
"email": email,
"phone_number": phone_number,
"username": username,
"is_admin": is_admin
}]
}), 201
@v1_auth_blueprint.route('/login', methods=['POST'])
def login():
""" A view to control users login """
try:
data = request.get_json()
except:
return make_response(jsonify({
"status": 400,
"message": "Wrong input"
})), 400
username = data.get('username')
password = data.get('password')
if not username:
return make_response(jsonify({
"status": 400,
"message": "Username is required"
})), 400
if not password:
return make_response(jsonify({
"status": 400,
"message": "Password is required"
})), 400
if not VALIDATOR.username_exists(username):
return jsonify({
"status": 404,
"message": "User does not exist"
}), 404
auth_token = user.generate_auth_token(username)
return make_response(jsonify({
"status": 200,
"message": 'Logged in successfuly',
"token": auth_token
})), 200
| 2.578125 | 3 |
pint/testsuite/test_definitions.py | s-avni/pint | 0 | 320 | # -*- coding: utf-8 -*-
from __future__ import division, unicode_literals, print_function, absolute_import
from pint.util import (UnitsContainer)
from pint.converters import (ScaleConverter, OffsetConverter)
from pint.definitions import (Definition, PrefixDefinition, UnitDefinition,
DimensionDefinition, AliasDefinition)
from pint.testsuite import BaseTestCase
class TestDefinition(BaseTestCase):
def test_invalid(self):
self.assertRaises(ValueError, Definition.from_string, 'x = [time] * meter')
self.assertRaises(ValueError, Definition.from_string, '[x] = [time] * meter')
def test_prefix_definition(self):
for definition in ('m- = 1e-3', 'm- = 10**-3', 'm- = 0.001'):
x = Definition.from_string(definition)
self.assertIsInstance(x, PrefixDefinition)
self.assertEqual(x.name, 'm')
self.assertEqual(x.aliases, ())
self.assertEqual(x.converter.to_reference(1000), 1)
self.assertEqual(x.converter.from_reference(0.001), 1)
self.assertEqual(str(x), 'm')
x = Definition.from_string('kilo- = 1e-3 = k-')
self.assertIsInstance(x, PrefixDefinition)
self.assertEqual(x.name, 'kilo')
self.assertEqual(x.aliases, ())
self.assertEqual(x.symbol, 'k')
self.assertEqual(x.converter.to_reference(1000), 1)
self.assertEqual(x.converter.from_reference(.001), 1)
x = Definition.from_string('kilo- = 1e-3 = k- = anotherk-')
self.assertIsInstance(x, PrefixDefinition)
self.assertEqual(x.name, 'kilo')
self.assertEqual(x.aliases, ('anotherk', ))
self.assertEqual(x.symbol, 'k')
self.assertEqual(x.converter.to_reference(1000), 1)
self.assertEqual(x.converter.from_reference(.001), 1)
def test_baseunit_definition(self):
x = Definition.from_string('meter = [length]')
self.assertIsInstance(x, UnitDefinition)
self.assertTrue(x.is_base)
self.assertEqual(x.reference, UnitsContainer({'[length]': 1}))
def test_unit_definition(self):
x = Definition.from_string('coulomb = ampere * second')
self.assertIsInstance(x, UnitDefinition)
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, ScaleConverter)
self.assertEqual(x.converter.scale, 1)
self.assertEqual(x.reference, UnitsContainer(ampere=1, second=1))
x = Definition.from_string('faraday = 96485.3399 * coulomb')
self.assertIsInstance(x, UnitDefinition)
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, ScaleConverter)
self.assertEqual(x.converter.scale, 96485.3399)
self.assertEqual(x.reference, UnitsContainer(coulomb=1))
x = Definition.from_string('degF = 9 / 5 * kelvin; offset: 255.372222')
self.assertIsInstance(x, UnitDefinition)
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, OffsetConverter)
self.assertEqual(x.converter.scale, 9/5)
self.assertEqual(x.converter.offset, 255.372222)
self.assertEqual(x.reference, UnitsContainer(kelvin=1))
x = Definition.from_string('turn = 6.28 * radian = _ = revolution = = cycle = _')
self.assertIsInstance(x, UnitDefinition)
self.assertEqual(x.name, 'turn')
self.assertEqual(x.aliases, ('revolution', 'cycle'))
self.assertEqual(x.symbol, 'turn')
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, ScaleConverter)
self.assertEqual(x.converter.scale, 6.28)
self.assertEqual(x.reference, UnitsContainer(radian=1))
def test_dimension_definition(self):
x = DimensionDefinition('[time]', '', (), converter='')
self.assertTrue(x.is_base)
self.assertEqual(x.name, '[time]')
x = Definition.from_string('[speed] = [length]/[time]')
self.assertIsInstance(x, DimensionDefinition)
self.assertEqual(x.reference, UnitsContainer({'[length]': 1, '[time]': -1}))
def test_alias_definition(self):
x = Definition.from_string("@alias meter = metro = metr")
self.assertIsInstance(x, AliasDefinition)
self.assertEqual(x.name, "meter")
self.assertEqual(x.aliases, ("metro", "metr"))
| 2.359375 | 2 |
electrum/dnssec.py | Jesusown/electrum | 5,905 | 321 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Check DNSSEC trust chain.
# Todo: verify expiration dates
#
# Based on
# http://backreference.org/2010/11/17/dnssec-verification-with-dig/
# https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py
import dns
import dns.name
import dns.query
import dns.dnssec
import dns.message
import dns.resolver
import dns.rdatatype
import dns.rdtypes.ANY.NS
import dns.rdtypes.ANY.CNAME
import dns.rdtypes.ANY.DLV
import dns.rdtypes.ANY.DNSKEY
import dns.rdtypes.ANY.DS
import dns.rdtypes.ANY.NSEC
import dns.rdtypes.ANY.NSEC3
import dns.rdtypes.ANY.NSEC3PARAM
import dns.rdtypes.ANY.RRSIG
import dns.rdtypes.ANY.SOA
import dns.rdtypes.ANY.TXT
import dns.rdtypes.IN.A
import dns.rdtypes.IN.AAAA
from .logging import get_logger
_logger = get_logger(__name__)
# hard-coded trust anchors (root KSKs)
trust_anchors = [
# KSK-2017:
dns.rrset.from_text('.', 1 , 'IN', 'DNSKEY', '<KEY>),
# KSK-2010:
dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY', '257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W<KEY>S Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='),
]
def _check_query(ns, sub, _type, keys):
q = dns.message.make_query(sub, _type, want_dnssec=True)
response = dns.query.tcp(q, ns, timeout=5)
assert response.rcode() == 0, 'No answer'
answer = response.answer
assert len(answer) != 0, ('No DNS record found', sub, _type)
assert len(answer) != 1, ('No DNSSEC record found', sub, _type)
if answer[0].rdtype == dns.rdatatype.RRSIG:
rrsig, rrset = answer
elif answer[1].rdtype == dns.rdatatype.RRSIG:
rrset, rrsig = answer
else:
raise Exception('No signature set in record')
if keys is None:
keys = {dns.name.from_text(sub):rrset}
dns.dnssec.validate(rrset, rrsig, keys)
return rrset
def _get_and_validate(ns, url, _type):
# get trusted root key
root_rrset = None
for dnskey_rr in trust_anchors:
try:
# Check if there is a valid signature for the root dnskey
root_rrset = _check_query(ns, '', dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr})
break
except dns.dnssec.ValidationFailure:
# It's OK as long as one key validates
continue
if not root_rrset:
raise dns.dnssec.ValidationFailure('None of the trust anchors found in DNS')
keys = {dns.name.root: root_rrset}
# top-down verification
parts = url.split('.')
for i in range(len(parts), 0, -1):
sub = '.'.join(parts[i-1:])
name = dns.name.from_text(sub)
# If server is authoritative, don't fetch DNSKEY
query = dns.message.make_query(sub, dns.rdatatype.NS)
response = dns.query.udp(query, ns, 3)
assert response.rcode() == dns.rcode.NOERROR, "query error"
rrset = response.authority[0] if len(response.authority) > 0 else response.answer[0]
rr = rrset[0]
if rr.rdtype == dns.rdatatype.SOA:
continue
# get DNSKEY (self-signed)
rrset = _check_query(ns, sub, dns.rdatatype.DNSKEY, None)
# get DS (signed by parent)
ds_rrset = _check_query(ns, sub, dns.rdatatype.DS, keys)
# verify that a signed DS validates DNSKEY
for ds in ds_rrset:
for dnskey in rrset:
htype = 'SHA256' if ds.digest_type == 2 else 'SHA1'
good_ds = dns.dnssec.make_ds(name, dnskey, htype)
if ds == good_ds:
break
else:
continue
break
else:
raise Exception("DS does not match DNSKEY")
# set key for next iteration
keys = {name: rrset}
# get TXT record (signed by zone)
rrset = _check_query(ns, url, _type, keys)
return rrset
def query(url, rtype):
# 8.8.8.8 is Google's public DNS server
nameservers = ['8.8.8.8']
ns = nameservers[0]
try:
out = _get_and_validate(ns, url, rtype)
validated = True
except Exception as e:
_logger.info(f"DNSSEC error: {repr(e)}")
out = dns.resolver.resolve(url, rtype)
validated = False
return out, validated
| 1.476563 | 1 |
specs/d3d11.py | ds-hwang/apitrace | 1 | 322 | ##########################################################################
#
# Copyright 2012 <NAME>
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
from dxgi import *
from d3dcommon import *
from d3d11sdklayers import *
HRESULT = MAKE_HRESULT([
"D3D11_ERROR_FILE_NOT_FOUND",
"D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS",
"D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS",
"D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD",
"D3DERR_INVALIDCALL",
"D3DERR_WASSTILLDRAWING",
])
ID3D11DepthStencilState = Interface("ID3D11DepthStencilState", ID3D11DeviceChild)
ID3D11BlendState = Interface("ID3D11BlendState", ID3D11DeviceChild)
ID3D11RasterizerState = Interface("ID3D11RasterizerState", ID3D11DeviceChild)
ID3D11Resource = Interface("ID3D11Resource", ID3D11DeviceChild)
ID3D11Buffer = Interface("ID3D11Buffer", ID3D11Resource)
ID3D11Texture1D = Interface("ID3D11Texture1D", ID3D11Resource)
ID3D11Texture2D = Interface("ID3D11Texture2D", ID3D11Resource)
ID3D11Texture3D = Interface("ID3D11Texture3D", ID3D11Resource)
ID3D11View = Interface("ID3D11View", ID3D11DeviceChild)
ID3D11ShaderResourceView = Interface("ID3D11ShaderResourceView", ID3D11View)
ID3D11RenderTargetView = Interface("ID3D11RenderTargetView", ID3D11View)
ID3D11DepthStencilView = Interface("ID3D11DepthStencilView", ID3D11View)
ID3D11UnorderedAccessView = Interface("ID3D11UnorderedAccessView", ID3D11View)
ID3D11VertexShader = Interface("ID3D11VertexShader", ID3D11DeviceChild)
ID3D11HullShader = Interface("ID3D11HullShader", ID3D11DeviceChild)
ID3D11DomainShader = Interface("ID3D11DomainShader", ID3D11DeviceChild)
ID3D11GeometryShader = Interface("ID3D11GeometryShader", ID3D11DeviceChild)
ID3D11PixelShader = Interface("ID3D11PixelShader", ID3D11DeviceChild)
ID3D11ComputeShader = Interface("ID3D11ComputeShader", ID3D11DeviceChild)
ID3D11InputLayout = Interface("ID3D11InputLayout", ID3D11DeviceChild)
ID3D11SamplerState = Interface("ID3D11SamplerState", ID3D11DeviceChild)
ID3D11Asynchronous = Interface("ID3D11Asynchronous", ID3D11DeviceChild)
ID3D11Query = Interface("ID3D11Query", ID3D11Asynchronous)
ID3D11Predicate = Interface("ID3D11Predicate", ID3D11Query)
ID3D11Counter = Interface("ID3D11Counter", ID3D11Asynchronous)
ID3D11ClassInstance = Interface("ID3D11ClassInstance", ID3D11DeviceChild)
ID3D11ClassLinkage = Interface("ID3D11ClassLinkage", ID3D11DeviceChild)
ID3D11CommandList = Interface("ID3D11CommandList", ID3D11DeviceChild)
ID3D11Device = Interface("ID3D11Device", IUnknown)
D3D11_INPUT_CLASSIFICATION = Enum("D3D11_INPUT_CLASSIFICATION", [
"D3D11_INPUT_PER_VERTEX_DATA",
"D3D11_INPUT_PER_INSTANCE_DATA",
])
D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET = FakeEnum(UINT, [
"D3D11_APPEND_ALIGNED_ELEMENT",
])
D3D11_INPUT_ELEMENT_DESC = Struct("D3D11_INPUT_ELEMENT_DESC", [
(LPCSTR, "SemanticName"),
(UINT, "SemanticIndex"),
(DXGI_FORMAT, "Format"),
(UINT, "InputSlot"),
(D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET, "AlignedByteOffset"),
(D3D11_INPUT_CLASSIFICATION, "InputSlotClass"),
(UINT, "InstanceDataStepRate"),
])
D3D11_FILL_MODE = Enum("D3D11_FILL_MODE", [
"D3D11_FILL_WIREFRAME",
"D3D11_FILL_SOLID",
])
D3D11_PRIMITIVE_TOPOLOGY = Enum("D3D11_PRIMITIVE_TOPOLOGY", [
"D3D11_PRIMITIVE_TOPOLOGY_UNDEFINED",
"D3D11_PRIMITIVE_TOPOLOGY_POINTLIST",
"D3D11_PRIMITIVE_TOPOLOGY_LINELIST",
"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP",
"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST",
"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP",
"D3D11_PRIMITIVE_TOPOLOGY_LINELIST_ADJ",
"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ",
"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ",
"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ",
"D3D11_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_2_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_3_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_4_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_5_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_6_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_7_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_8_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_9_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_10_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_11_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_12_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_13_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_14_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_15_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_16_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_17_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_18_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_19_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_20_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_21_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_22_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_23_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_24_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_25_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_26_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_27_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_28_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_29_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_30_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_31_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_32_CONTROL_POINT_PATCHLIST",
])
D3D11_PRIMITIVE = Enum("D3D11_PRIMITIVE", [
"D3D11_PRIMITIVE_UNDEFINED",
"D3D11_PRIMITIVE_POINT",
"D3D11_PRIMITIVE_LINE",
"D3D11_PRIMITIVE_TRIANGLE",
"D3D11_PRIMITIVE_LINE_ADJ",
"D3D11_PRIMITIVE_TRIANGLE_ADJ",
"D3D11_PRIMITIVE_1_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_2_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_3_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_4_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_5_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_6_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_7_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_8_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_9_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_10_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_11_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_12_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_13_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_14_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_15_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_16_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_17_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_18_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_19_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_20_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_21_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_22_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_23_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_24_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_25_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_26_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_27_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_28_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_29_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_30_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_31_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_32_CONTROL_POINT_PATCH",
])
D3D11_CULL_MODE = Enum("D3D11_CULL_MODE", [
"D3D11_CULL_NONE",
"D3D11_CULL_FRONT",
"D3D11_CULL_BACK",
])
D3D11_SO_DECLARATION_ENTRY = Struct("D3D11_SO_DECLARATION_ENTRY", [
(UINT, "Stream"),
(LPCSTR, "SemanticName"),
(UINT, "SemanticIndex"),
(BYTE, "StartComponent"),
(BYTE, "ComponentCount"),
(BYTE, "OutputSlot"),
])
D3D11_VIEWPORT = Struct("D3D11_VIEWPORT", [
(FLOAT, "TopLeftX"),
(FLOAT, "TopLeftY"),
(FLOAT, "Width"),
(FLOAT, "Height"),
(FLOAT, "MinDepth"),
(FLOAT, "MaxDepth"),
])
D3D11_RESOURCE_DIMENSION = Enum("D3D11_RESOURCE_DIMENSION", [
"D3D11_RESOURCE_DIMENSION_UNKNOWN",
"D3D11_RESOURCE_DIMENSION_BUFFER",
"D3D11_RESOURCE_DIMENSION_TEXTURE1D",
"D3D11_RESOURCE_DIMENSION_TEXTURE2D",
"D3D11_RESOURCE_DIMENSION_TEXTURE3D",
])
D3D11_SRV_DIMENSION = Enum("D3D11_SRV_DIMENSION", [
"D3D11_SRV_DIMENSION_UNKNOWN",
"D3D11_SRV_DIMENSION_BUFFER",
"D3D11_SRV_DIMENSION_TEXTURE1D",
"D3D11_SRV_DIMENSION_TEXTURE1DARRAY",
"D3D11_SRV_DIMENSION_TEXTURE2D",
"D3D11_SRV_DIMENSION_TEXTURE2DARRAY",
"D3D11_SRV_DIMENSION_TEXTURE2DMS",
"D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY",
"D3D11_SRV_DIMENSION_TEXTURE3D",
"D3D11_SRV_DIMENSION_TEXTURECUBE",
"D3D11_SRV_DIMENSION_TEXTURECUBEARRAY",
"D3D11_SRV_DIMENSION_BUFFEREX",
])
D3D11_DSV_DIMENSION = Enum("D3D11_DSV_DIMENSION", [
"D3D11_DSV_DIMENSION_UNKNOWN",
"D3D11_DSV_DIMENSION_TEXTURE1D",
"D3D11_DSV_DIMENSION_TEXTURE1DARRAY",
"D3D11_DSV_DIMENSION_TEXTURE2D",
"D3D11_DSV_DIMENSION_TEXTURE2DARRAY",
"D3D11_DSV_DIMENSION_TEXTURE2DMS",
"D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY",
])
D3D11_RTV_DIMENSION = Enum("D3D11_RTV_DIMENSION", [
"D3D11_RTV_DIMENSION_UNKNOWN",
"D3D11_RTV_DIMENSION_BUFFER",
"D3D11_RTV_DIMENSION_TEXTURE1D",
"D3D11_RTV_DIMENSION_TEXTURE1DARRAY",
"D3D11_RTV_DIMENSION_TEXTURE2D",
"D3D11_RTV_DIMENSION_TEXTURE2DARRAY",
"D3D11_RTV_DIMENSION_TEXTURE2DMS",
"D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY",
"D3D11_RTV_DIMENSION_TEXTURE3D",
])
D3D11_UAV_DIMENSION = Enum("D3D11_UAV_DIMENSION", [
"D3D11_UAV_DIMENSION_UNKNOWN",
"D3D11_UAV_DIMENSION_BUFFER",
"D3D11_UAV_DIMENSION_TEXTURE1D",
"D3D11_UAV_DIMENSION_TEXTURE1DARRAY",
"D3D11_UAV_DIMENSION_TEXTURE2D",
"D3D11_UAV_DIMENSION_TEXTURE2DARRAY",
"D3D11_UAV_DIMENSION_TEXTURE3D",
])
D3D11_USAGE = Enum("D3D11_USAGE", [
"D3D11_USAGE_DEFAULT",
"D3D11_USAGE_IMMUTABLE",
"D3D11_USAGE_DYNAMIC",
"D3D11_USAGE_STAGING",
])
D3D11_BIND_FLAG = Flags(UINT, [
"D3D11_BIND_VERTEX_BUFFER",
"D3D11_BIND_INDEX_BUFFER",
"D3D11_BIND_CONSTANT_BUFFER",
"D3D11_BIND_SHADER_RESOURCE",
"D3D11_BIND_STREAM_OUTPUT",
"D3D11_BIND_RENDER_TARGET",
"D3D11_BIND_DEPTH_STENCIL",
"D3D11_BIND_UNORDERED_ACCESS",
])
D3D11_CPU_ACCESS_FLAG = Flags(UINT, [
"D3D11_CPU_ACCESS_WRITE",
"D3D11_CPU_ACCESS_READ",
])
D3D11_RESOURCE_MISC_FLAG = Flags(UINT, [
"D3D11_RESOURCE_MISC_GENERATE_MIPS",
"D3D11_RESOURCE_MISC_SHARED",
"D3D11_RESOURCE_MISC_TEXTURECUBE",
"D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS",
"D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS",
"D3D11_RESOURCE_MISC_BUFFER_STRUCTURED",
"D3D11_RESOURCE_MISC_RESOURCE_CLAMP",
"D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX",
"D3D11_RESOURCE_MISC_GDI_COMPATIBLE",
])
D3D11_MAP = Enum("D3D11_MAP", [
"D3D11_MAP_READ",
"D3D11_MAP_WRITE",
"D3D11_MAP_READ_WRITE",
"D3D11_MAP_WRITE_DISCARD",
"D3D11_MAP_WRITE_NO_OVERWRITE",
])
D3D11_MAP_FLAG = Flags(UINT, [
"D3D11_MAP_FLAG_DO_NOT_WAIT",
])
D3D11_RAISE_FLAG = Flags(UINT, [
"D3D11_RAISE_FLAG_DRIVER_INTERNAL_ERROR",
])
D3D11_CLEAR_FLAG = Flags(UINT, [
"D3D11_CLEAR_DEPTH",
"D3D11_CLEAR_STENCIL",
])
D3D11_RECT = Alias("D3D11_RECT", RECT)
D3D11_BOX = Struct("D3D11_BOX", [
(UINT, "left"),
(UINT, "top"),
(UINT, "front"),
(UINT, "right"),
(UINT, "bottom"),
(UINT, "back"),
])
ID3D11DeviceChild.methods += [
StdMethod(Void, "GetDevice", [Out(Pointer(ObjPointer(ID3D11Device)), "ppDevice")]),
StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "guid"), Out(Pointer(UINT), "pDataSize"), Out(OpaquePointer(Void), "pData")]),
StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "guid"), (UINT, "DataSize"), (OpaqueBlob(Const(Void), "DataSize"), "pData")]),
StdMethod(HRESULT, "SetPrivateDataInterface", [(REFGUID, "guid"), (OpaquePointer(Const(IUnknown)), "pData")]),
]
D3D11_COMPARISON_FUNC = Enum("D3D11_COMPARISON_FUNC", [
"D3D11_COMPARISON_NEVER",
"D3D11_COMPARISON_LESS",
"D3D11_COMPARISON_EQUAL",
"D3D11_COMPARISON_LESS_EQUAL",
"D3D11_COMPARISON_GREATER",
"D3D11_COMPARISON_NOT_EQUAL",
"D3D11_COMPARISON_GREATER_EQUAL",
"D3D11_COMPARISON_ALWAYS",
])
D3D11_DEPTH_WRITE_MASK = Enum("D3D11_DEPTH_WRITE_MASK", [
"D3D11_DEPTH_WRITE_MASK_ZERO",
"D3D11_DEPTH_WRITE_MASK_ALL",
])
D3D11_STENCIL_OP = Enum("D3D11_STENCIL_OP", [
"D3D11_STENCIL_OP_KEEP",
"D3D11_STENCIL_OP_ZERO",
"D3D11_STENCIL_OP_REPLACE",
"D3D11_STENCIL_OP_INCR_SAT",
"D3D11_STENCIL_OP_DECR_SAT",
"D3D11_STENCIL_OP_INVERT",
"D3D11_STENCIL_OP_INCR",
"D3D11_STENCIL_OP_DECR",
])
D3D11_DEPTH_STENCILOP_DESC = Struct("D3D11_DEPTH_STENCILOP_DESC", [
(D3D11_STENCIL_OP, "StencilFailOp"),
(D3D11_STENCIL_OP, "StencilDepthFailOp"),
(D3D11_STENCIL_OP, "StencilPassOp"),
(D3D11_COMPARISON_FUNC, "StencilFunc"),
])
D3D11_DEPTH_STENCIL_DESC = Struct("D3D11_DEPTH_STENCIL_DESC", [
(BOOL, "DepthEnable"),
(D3D11_DEPTH_WRITE_MASK, "DepthWriteMask"),
(D3D11_COMPARISON_FUNC, "DepthFunc"),
(BOOL, "StencilEnable"),
(UINT8, "StencilReadMask"),
(UINT8, "StencilWriteMask"),
(D3D11_DEPTH_STENCILOP_DESC, "FrontFace"),
(D3D11_DEPTH_STENCILOP_DESC, "BackFace"),
])
ID3D11DepthStencilState.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_DEPTH_STENCIL_DESC), "pDesc")]),
]
D3D11_BLEND = Enum("D3D11_BLEND", [
"D3D11_BLEND_ZERO",
"D3D11_BLEND_ONE",
"D3D11_BLEND_SRC_COLOR",
"D3D11_BLEND_INV_SRC_COLOR",
"D3D11_BLEND_SRC_ALPHA",
"D3D11_BLEND_INV_SRC_ALPHA",
"D3D11_BLEND_DEST_ALPHA",
"D3D11_BLEND_INV_DEST_ALPHA",
"D3D11_BLEND_DEST_COLOR",
"D3D11_BLEND_INV_DEST_COLOR",
"D3D11_BLEND_SRC_ALPHA_SAT",
"D3D11_BLEND_BLEND_FACTOR",
"D3D11_BLEND_INV_BLEND_FACTOR",
"D3D11_BLEND_SRC1_COLOR",
"D3D11_BLEND_INV_SRC1_COLOR",
"D3D11_BLEND_SRC1_ALPHA",
"D3D11_BLEND_INV_SRC1_ALPHA",
])
D3D11_BLEND_OP = Enum("D3D11_BLEND_OP", [
"D3D11_BLEND_OP_ADD",
"D3D11_BLEND_OP_SUBTRACT",
"D3D11_BLEND_OP_REV_SUBTRACT",
"D3D11_BLEND_OP_MIN",
"D3D11_BLEND_OP_MAX",
])
D3D11_COLOR_WRITE_ENABLE = Enum("D3D11_COLOR_WRITE_ENABLE", [
"D3D11_COLOR_WRITE_ENABLE_ALL",
"D3D11_COLOR_WRITE_ENABLE_RED",
"D3D11_COLOR_WRITE_ENABLE_GREEN",
"D3D11_COLOR_WRITE_ENABLE_BLUE",
"D3D11_COLOR_WRITE_ENABLE_ALPHA",
])
D3D11_RENDER_TARGET_BLEND_DESC = Struct("D3D11_RENDER_TARGET_BLEND_DESC", [
(BOOL, "BlendEnable"),
(D3D11_BLEND, "SrcBlend"),
(D3D11_BLEND, "DestBlend"),
(D3D11_BLEND_OP, "BlendOp"),
(D3D11_BLEND, "SrcBlendAlpha"),
(D3D11_BLEND, "DestBlendAlpha"),
(D3D11_BLEND_OP, "BlendOpAlpha"),
(UINT8, "RenderTargetWriteMask"),
])
D3D11_BLEND_DESC = Struct("D3D11_BLEND_DESC", [
(BOOL, "AlphaToCoverageEnable"),
(BOOL, "IndependentBlendEnable"),
(Array(D3D11_RENDER_TARGET_BLEND_DESC, 8), "RenderTarget"),
])
ID3D11BlendState.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_BLEND_DESC), "pDesc")]),
]
D3D11_RASTERIZER_DESC = Struct("D3D11_RASTERIZER_DESC", [
(D3D11_FILL_MODE, "FillMode"),
(D3D11_CULL_MODE, "CullMode"),
(BOOL, "FrontCounterClockwise"),
(INT, "DepthBias"),
(FLOAT, "DepthBiasClamp"),
(FLOAT, "SlopeScaledDepthBias"),
(BOOL, "DepthClipEnable"),
(BOOL, "ScissorEnable"),
(BOOL, "MultisampleEnable"),
(BOOL, "AntialiasedLineEnable"),
])
ID3D11RasterizerState.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_RASTERIZER_DESC), "pDesc")]),
]
D3D11_SUBRESOURCE_DATA = Struct("D3D11_SUBRESOURCE_DATA", [
(OpaquePointer(Const(Void)), "pSysMem"),
(UINT, "SysMemPitch"),
(UINT, "SysMemSlicePitch"),
])
D3D11_MAPPED_SUBRESOURCE = Struct("D3D11_MAPPED_SUBRESOURCE", [
(OpaquePointer(Void), "pData"),
(UINT, "RowPitch"),
(UINT, "DepthPitch"),
])
ID3D11Resource.methods += [
StdMethod(Void, "GetType", [Out(Pointer(D3D11_RESOURCE_DIMENSION), "pResourceDimension")]),
StdMethod(Void, "SetEvictionPriority", [(UINT, "EvictionPriority")]),
StdMethod(UINT, "GetEvictionPriority", []),
]
D3D11_BUFFER_DESC = Struct("D3D11_BUFFER_DESC", [
(UINT, "ByteWidth"),
(D3D11_USAGE, "Usage"),
(D3D11_BIND_FLAG, "BindFlags"),
(D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"),
(D3D11_RESOURCE_MISC_FLAG, "MiscFlags"),
(UINT, "StructureByteStride"),
])
ID3D11Buffer.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_BUFFER_DESC), "pDesc")]),
]
D3D11_TEXTURE1D_DESC = Struct("D3D11_TEXTURE1D_DESC", [
(UINT, "Width"),
(UINT, "MipLevels"),
(UINT, "ArraySize"),
(DXGI_FORMAT, "Format"),
(D3D11_USAGE, "Usage"),
(D3D11_BIND_FLAG, "BindFlags"),
(D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"),
(D3D11_RESOURCE_MISC_FLAG, "MiscFlags"),
])
ID3D11Texture1D.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_TEXTURE1D_DESC), "pDesc")]),
]
D3D11_TEXTURE2D_DESC = Struct("D3D11_TEXTURE2D_DESC", [
(UINT, "Width"),
(UINT, "Height"),
(UINT, "MipLevels"),
(UINT, "ArraySize"),
(DXGI_FORMAT, "Format"),
(DXGI_SAMPLE_DESC, "SampleDesc"),
(D3D11_USAGE, "Usage"),
(D3D11_BIND_FLAG, "BindFlags"),
(D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"),
(D3D11_RESOURCE_MISC_FLAG, "MiscFlags"),
])
ID3D11Texture2D.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_TEXTURE2D_DESC), "pDesc")]),
]
D3D11_TEXTURE3D_DESC = Struct("D3D11_TEXTURE3D_DESC", [
(UINT, "Width"),
(UINT, "Height"),
(UINT, "Depth"),
(UINT, "MipLevels"),
(DXGI_FORMAT, "Format"),
(D3D11_USAGE, "Usage"),
(D3D11_BIND_FLAG, "BindFlags"),
(D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"),
(D3D11_RESOURCE_MISC_FLAG, "MiscFlags"),
])
ID3D11Texture3D.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_TEXTURE3D_DESC), "pDesc")]),
]
D3D11_TEXTURECUBE_FACE = Enum("D3D11_TEXTURECUBE_FACE", [
"D3D11_TEXTURECUBE_FACE_POSITIVE_X",
"D3D11_TEXTURECUBE_FACE_NEGATIVE_X",
"D3D11_TEXTURECUBE_FACE_POSITIVE_Y",
"D3D11_TEXTURECUBE_FACE_NEGATIVE_Y",
"D3D11_TEXTURECUBE_FACE_POSITIVE_Z",
"D3D11_TEXTURECUBE_FACE_NEGATIVE_Z",
])
ID3D11View.methods += [
StdMethod(Void, "GetResource", [Out(Pointer(ObjPointer(ID3D11Resource)), "ppResource")]),
]
D3D11_BUFFER_SRV = Struct("D3D11_BUFFER_SRV", [
(Union(None, [(UINT, "FirstElement"), (UINT, "ElementOffset")]), None),
(Union(None, [(UINT, "NumElements"), (UINT, "ElementWidth")]), None),
])
D3D11_BUFFEREX_SRV_FLAG = Flags(UINT, [
"D3D11_BUFFEREX_SRV_FLAG_RAW",
])
D3D11_BUFFEREX_SRV = Struct("D3D11_BUFFEREX_SRV", [
(UINT, "FirstElement"),
(UINT, "NumElements"),
(D3D11_BUFFEREX_SRV_FLAG, "Flags"),
])
D3D11_TEX1D_SRV = Struct("D3D11_TEX1D_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
])
D3D11_TEX1D_ARRAY_SRV = Struct("D3D11_TEX1D_ARRAY_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2D_SRV = Struct("D3D11_TEX2D_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
])
D3D11_TEX2D_ARRAY_SRV = Struct("D3D11_TEX2D_ARRAY_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX3D_SRV = Struct("D3D11_TEX3D_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
])
D3D11_TEXCUBE_SRV = Struct("D3D11_TEXCUBE_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
])
D3D11_TEXCUBE_ARRAY_SRV = Struct("D3D11_TEXCUBE_ARRAY_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
(UINT, "First2DArrayFace"),
(UINT, "NumCubes"),
])
D3D11_TEX2DMS_SRV = Struct("D3D11_TEX2DMS_SRV", [
(UINT, "UnusedField_NothingToDefine"),
])
D3D11_TEX2DMS_ARRAY_SRV = Struct("D3D11_TEX2DMS_ARRAY_SRV", [
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_SHADER_RESOURCE_VIEW_DESC = Struct("D3D11_SHADER_RESOURCE_VIEW_DESC", [
(DXGI_FORMAT, "Format"),
(D3D11_SRV_DIMENSION, "ViewDimension"),
(Union(None, [
(D3D11_BUFFER_SRV, "Buffer"),
(D3D11_TEX1D_SRV, "Texture1D"),
(D3D11_TEX1D_ARRAY_SRV, "Texture1DArray"),
(D3D11_TEX2D_SRV, "Texture2D"),
(D3D11_TEX2D_ARRAY_SRV, "Texture2DArray"),
(D3D11_TEX2DMS_SRV, "Texture2DMS"),
(D3D11_TEX2DMS_ARRAY_SRV, "Texture2DMSArray"),
(D3D11_TEX3D_SRV, "Texture3D"),
(D3D11_TEXCUBE_SRV, "TextureCube"),
(D3D11_TEXCUBE_ARRAY_SRV, "TextureCubeArray"),
(D3D11_BUFFEREX_SRV, "BufferEx"),
]), None),
])
ID3D11ShaderResourceView.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_SHADER_RESOURCE_VIEW_DESC), "pDesc")]),
]
D3D11_BUFFER_RTV = Struct("D3D11_BUFFER_RTV", [
(Union(None, [(UINT, "FirstElement"), (UINT, "ElementOffset")]), None),
(Union(None, [(UINT, "NumElements"), (UINT, "ElementWidth")]), None),
])
D3D11_TEX1D_RTV = Struct("D3D11_TEX1D_RTV", [
(UINT, "MipSlice"),
])
D3D11_TEX1D_ARRAY_RTV = Struct("D3D11_TEX1D_ARRAY_RTV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2D_RTV = Struct("D3D11_TEX2D_RTV", [
(UINT, "MipSlice"),
])
D3D11_TEX2DMS_RTV = Struct("D3D11_TEX2DMS_RTV", [
(UINT, "UnusedField_NothingToDefine"),
])
D3D11_TEX2D_ARRAY_RTV = Struct("D3D11_TEX2D_ARRAY_RTV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2DMS_ARRAY_RTV = Struct("D3D11_TEX2DMS_ARRAY_RTV", [
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX3D_RTV = Struct("D3D11_TEX3D_RTV", [
(UINT, "MipSlice"),
(UINT, "FirstWSlice"),
(UINT, "WSize"),
])
D3D11_RENDER_TARGET_VIEW_DESC = Struct("D3D11_RENDER_TARGET_VIEW_DESC", [
(DXGI_FORMAT, "Format"),
(D3D11_RTV_DIMENSION, "ViewDimension"),
(Union(None, [
(D3D11_BUFFER_RTV, "Buffer"),
(D3D11_TEX1D_RTV, "Texture1D"),
(D3D11_TEX1D_ARRAY_RTV, "Texture1DArray"),
(D3D11_TEX2D_RTV, "Texture2D"),
(D3D11_TEX2D_ARRAY_RTV, "Texture2DArray"),
(D3D11_TEX2DMS_RTV, "Texture2DMS"),
(D3D11_TEX2DMS_ARRAY_RTV, "Texture2DMSArray"),
(D3D11_TEX3D_RTV, "Texture3D"),
]), None),
])
ID3D11RenderTargetView.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_RENDER_TARGET_VIEW_DESC), "pDesc")]),
]
D3D11_TEX1D_DSV = Struct("D3D11_TEX1D_DSV", [
(UINT, "MipSlice"),
])
D3D11_TEX1D_ARRAY_DSV = Struct("D3D11_TEX1D_ARRAY_DSV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2D_DSV = Struct("D3D11_TEX2D_DSV", [
(UINT, "MipSlice"),
])
D3D11_TEX2D_ARRAY_DSV = Struct("D3D11_TEX2D_ARRAY_DSV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2DMS_DSV = Struct("D3D11_TEX2DMS_DSV", [
(UINT, "UnusedField_NothingToDefine"),
])
D3D11_TEX2DMS_ARRAY_DSV = Struct("D3D11_TEX2DMS_ARRAY_DSV", [
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_DSV_FLAG = Flags(UINT, [
"D3D11_DSV_READ_ONLY_DEPTH",
"D3D11_DSV_READ_ONLY_STENCIL",
])
D3D11_DEPTH_STENCIL_VIEW_DESC = Struct("D3D11_DEPTH_STENCIL_VIEW_DESC", [
(DXGI_FORMAT, "Format"),
(D3D11_DSV_DIMENSION, "ViewDimension"),
(D3D11_DSV_FLAG, "Flags"),
(Union(None, [
(D3D11_TEX1D_DSV, "Texture1D"),
(D3D11_TEX1D_ARRAY_DSV, "Texture1DArray"),
(D3D11_TEX2D_DSV, "Texture2D"),
(D3D11_TEX2D_ARRAY_DSV, "Texture2DArray"),
(D3D11_TEX2DMS_DSV, "Texture2DMS"),
(D3D11_TEX2DMS_ARRAY_DSV, "Texture2DMSArray"),
]), None),
])
ID3D11DepthStencilView.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_DEPTH_STENCIL_VIEW_DESC), "pDesc")]),
]
D3D11_BUFFER_UAV_FLAG = Flags(UINT, [
"D3D11_BUFFER_UAV_FLAG_RAW",
"D3D11_BUFFER_UAV_FLAG_APPEND",
"D3D11_BUFFER_UAV_FLAG_COUNTER",
])
D3D11_BUFFER_UAV = Struct("D3D11_BUFFER_UAV", [
(UINT, "FirstElement"),
(UINT, "NumElements"),
(D3D11_BUFFER_UAV_FLAG, "Flags"),
])
D3D11_TEX1D_UAV = Struct("D3D11_TEX1D_UAV", [
(UINT, "MipSlice"),
])
D3D11_TEX1D_ARRAY_UAV = Struct("D3D11_TEX1D_ARRAY_UAV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2D_UAV = Struct("D3D11_TEX2D_UAV", [
(UINT, "MipSlice"),
])
D3D11_TEX2D_ARRAY_UAV = Struct("D3D11_TEX2D_ARRAY_UAV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX3D_UAV = Struct("D3D11_TEX3D_UAV", [
(UINT, "MipSlice"),
(UINT, "FirstWSlice"),
(UINT, "WSize"),
])
D3D11_UNORDERED_ACCESS_VIEW_DESC = Struct("D3D11_UNORDERED_ACCESS_VIEW_DESC", [
(DXGI_FORMAT, "Format"),
(D3D11_UAV_DIMENSION, "ViewDimension"),
(Union(None, [
(D3D11_BUFFER_UAV, "Buffer"),
(D3D11_TEX1D_UAV, "Texture1D"),
(D3D11_TEX1D_ARRAY_UAV, "Texture1DArray"),
(D3D11_TEX2D_UAV, "Texture2D"),
(D3D11_TEX2D_ARRAY_UAV, "Texture2DArray"),
(D3D11_TEX3D_UAV, "Texture3D"),
]), None),
])
ID3D11UnorderedAccessView.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_UNORDERED_ACCESS_VIEW_DESC), "pDesc")]),
]
D3D11_FILTER = Enum("D3D11_FILTER", [
"D3D11_FILTER_MIN_MAG_MIP_POINT",
"D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR",
"D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT",
"D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR",
"D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT",
"D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR",
"D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT",
"D3D11_FILTER_MIN_MAG_MIP_LINEAR",
"D3D11_FILTER_ANISOTROPIC",
"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_POINT",
"D3D11_FILTER_COMPARISON_MIN_MAG_POINT_MIP_LINEAR",
"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT",
"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_MIP_LINEAR",
"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_MIP_POINT",
"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR",
"D3D11_FILTER_COMPARISON_MIN_MAG_LINEAR_MIP_POINT",
"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_LINEAR",
"D3D11_FILTER_COMPARISON_ANISOTROPIC",
])
D3D11_FILTER_TYPE = Enum("D3D11_FILTER_TYPE", [
"D3D11_FILTER_TYPE_POINT",
"D3D11_FILTER_TYPE_LINEAR",
])
D3D11_TEXTURE_ADDRESS_MODE = Enum("D3D11_TEXTURE_ADDRESS_MODE", [
"D3D11_TEXTURE_ADDRESS_WRAP",
"D3D11_TEXTURE_ADDRESS_MIRROR",
"D3D11_TEXTURE_ADDRESS_CLAMP",
"D3D11_TEXTURE_ADDRESS_BORDER",
"D3D11_TEXTURE_ADDRESS_MIRROR_ONCE",
])
D3D11_SAMPLER_DESC = Struct("D3D11_SAMPLER_DESC", [
(D3D11_FILTER, "Filter"),
(D3D11_TEXTURE_ADDRESS_MODE, "AddressU"),
(D3D11_TEXTURE_ADDRESS_MODE, "AddressV"),
(D3D11_TEXTURE_ADDRESS_MODE, "AddressW"),
(FLOAT, "MipLODBias"),
(UINT, "MaxAnisotropy"),
(D3D11_COMPARISON_FUNC, "ComparisonFunc"),
(Array(FLOAT, 4), "BorderColor"),
(FLOAT, "MinLOD"),
(FLOAT, "MaxLOD"),
])
ID3D11SamplerState.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_SAMPLER_DESC), "pDesc")]),
]
D3D11_FORMAT_SUPPORT = Flags(UINT, [
"D3D11_FORMAT_SUPPORT_BUFFER",
"D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER",
"D3D11_FORMAT_SUPPORT_IA_INDEX_BUFFER",
"D3D11_FORMAT_SUPPORT_SO_BUFFER",
"D3D11_FORMAT_SUPPORT_TEXTURE1D",
"D3D11_FORMAT_SUPPORT_TEXTURE2D",
"D3D11_FORMAT_SUPPORT_TEXTURE3D",
"D3D11_FORMAT_SUPPORT_TEXTURECUBE",
"D3D11_FORMAT_SUPPORT_SHADER_LOAD",
"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE",
"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_COMPARISON",
"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_MONO_TEXT",
"D3D11_FORMAT_SUPPORT_MIP",
"D3D11_FORMAT_SUPPORT_MIP_AUTOGEN",
"D3D11_FORMAT_SUPPORT_RENDER_TARGET",
"D3D11_FORMAT_SUPPORT_BLENDABLE",
"D3D11_FORMAT_SUPPORT_DEPTH_STENCIL",
"D3D11_FORMAT_SUPPORT_CPU_LOCKABLE",
"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RESOLVE",
"D3D11_FORMAT_SUPPORT_DISPLAY",
"D3D11_FORMAT_SUPPORT_CAST_WITHIN_BIT_LAYOUT",
"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET",
"D3D11_FORMAT_SUPPORT_MULTISAMPLE_LOAD",
"D3D11_FORMAT_SUPPORT_SHADER_GATHER",
"D3D11_FORMAT_SUPPORT_BACK_BUFFER_CAST",
"D3D11_FORMAT_SUPPORT_TYPED_UNORDERED_ACCESS_VIEW",
"D3D11_FORMAT_SUPPORT_SHADER_GATHER_COMPARISON",
])
D3D11_FORMAT_SUPPORT2 = Enum("D3D11_FORMAT_SUPPORT2", [
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_BITWISE_OPS",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_COMPARE_STORE_OR_COMPARE_EXCHANGE",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_EXCHANGE",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_SIGNED_MIN_OR_MAX",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_UNSIGNED_MIN_OR_MAX",
"D3D11_FORMAT_SUPPORT2_UAV_TYPED_LOAD",
"D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE",
])
ID3D11Asynchronous.methods += [
StdMethod(UINT, "GetDataSize", []),
]
D3D11_ASYNC_GETDATA_FLAG = Flags(UINT, [
"D3D11_ASYNC_GETDATA_DONOTFLUSH",
])
D3D11_QUERY = Enum("D3D11_QUERY", [
"D3D11_QUERY_EVENT",
"D3D11_QUERY_OCCLUSION",
"D3D11_QUERY_TIMESTAMP",
"D3D11_QUERY_TIMESTAMP_DISJOINT",
"D3D11_QUERY_PIPELINE_STATISTICS",
"D3D11_QUERY_OCCLUSION_PREDICATE",
"D3D11_QUERY_SO_STATISTICS",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE",
"D3D11_QUERY_SO_STATISTICS_STREAM0",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM0",
"D3D11_QUERY_SO_STATISTICS_STREAM1",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM1",
"D3D11_QUERY_SO_STATISTICS_STREAM2",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM2",
"D3D11_QUERY_SO_STATISTICS_STREAM3",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM3",
])
D3D11_QUERY_MISC_FLAG = Flags(UINT, [
"D3D11_QUERY_MISC_PREDICATEHINT",
])
D3D11_QUERY_DESC = Struct("D3D11_QUERY_DESC", [
(D3D11_QUERY, "Query"),
(D3D11_QUERY_MISC_FLAG, "MiscFlags"),
])
ID3D11Query.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_QUERY_DESC), "pDesc")]),
]
D3D11_QUERY_DATA_TIMESTAMP_DISJOINT = Struct("D3D11_QUERY_DATA_TIMESTAMP_DISJOINT", [
(UINT64, "Frequency"),
(BOOL, "Disjoint"),
])
D3D11_QUERY_DATA_PIPELINE_STATISTICS = Struct("D3D11_QUERY_DATA_PIPELINE_STATISTICS", [
(UINT64, "IAVertices"),
(UINT64, "IAPrimitives"),
(UINT64, "VSInvocations"),
(UINT64, "GSInvocations"),
(UINT64, "GSPrimitives"),
(UINT64, "CInvocations"),
(UINT64, "CPrimitives"),
(UINT64, "PSInvocations"),
(UINT64, "HSInvocations"),
(UINT64, "DSInvocations"),
(UINT64, "CSInvocations"),
])
D3D11_QUERY_DATA_SO_STATISTICS = Struct("D3D11_QUERY_DATA_SO_STATISTICS", [
(UINT64, "NumPrimitivesWritten"),
(UINT64, "PrimitivesStorageNeeded"),
])
D3D11_COUNTER = Enum("D3D11_COUNTER", [
"D3D11_COUNTER_DEVICE_DEPENDENT_0",
])
D3D11_COUNTER_TYPE = Enum("D3D11_COUNTER_TYPE", [
"D3D11_COUNTER_TYPE_FLOAT32",
"D3D11_COUNTER_TYPE_UINT16",
"D3D11_COUNTER_TYPE_UINT32",
"D3D11_COUNTER_TYPE_UINT64",
])
D3D11_COUNTER_DESC = Struct("D3D11_COUNTER_DESC", [
(D3D11_COUNTER, "Counter"),
(UINT, "MiscFlags"),
])
D3D11_COUNTER_INFO = Struct("D3D11_COUNTER_INFO", [
(D3D11_COUNTER, "LastDeviceDependentCounter"),
(UINT, "NumSimultaneousCounters"),
(UINT8, "NumDetectableParallelUnits"),
])
ID3D11Counter.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_COUNTER_DESC), "pDesc")]),
]
D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS = Enum("D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS", [
"D3D11_STANDARD_MULTISAMPLE_PATTERN",
"D3D11_CENTER_MULTISAMPLE_PATTERN",
])
D3D11_DEVICE_CONTEXT_TYPE = Enum("D3D11_DEVICE_CONTEXT_TYPE", [
"D3D11_DEVICE_CONTEXT_IMMEDIATE",
"D3D11_DEVICE_CONTEXT_DEFERRED",
])
D3D11_CLASS_INSTANCE_DESC = Struct("D3D11_CLASS_INSTANCE_DESC", [
(UINT, "InstanceId"),
(UINT, "InstanceIndex"),
(UINT, "TypeId"),
(UINT, "ConstantBuffer"),
(UINT, "BaseConstantBufferOffset"),
(UINT, "BaseTexture"),
(UINT, "BaseSampler"),
(BOOL, "Created"),
])
ID3D11ClassInstance.methods += [
StdMethod(Void, "GetClassLinkage", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), "ppLinkage")]),
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_CLASS_INSTANCE_DESC), "pDesc")]),
StdMethod(Void, "GetInstanceName", [Out(LPSTR, "pInstanceName"), Out(Pointer(SIZE_T), "pBufferLength")]),
StdMethod(Void, "GetTypeName", [Out(LPSTR, "pTypeName"), Out(Pointer(SIZE_T), "pBufferLength")]),
]
ID3D11ClassLinkage.methods += [
StdMethod(HRESULT, "GetClassInstance", [(LPCSTR, "pClassInstanceName"), (UINT, "InstanceIndex"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), "ppInstance")]),
StdMethod(HRESULT, "CreateClassInstance", [(LPCSTR, "pClassTypeName"), (UINT, "ConstantBufferOffset"), (UINT, "ConstantVectorOffset"), (UINT, "TextureOffset"), (UINT, "SamplerOffset"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), "ppInstance")]),
]
ID3D11CommandList.methods += [
StdMethod(UINT, "GetContextFlags", []),
]
D3D11_FEATURE_DATA_THREADING = Struct("D3D11_FEATURE_DATA_THREADING", [
(BOOL, "DriverConcurrentCreates"),
(BOOL, "DriverCommandLists"),
])
D3D11_FEATURE_DATA_DOUBLES = Struct("D3D11_FEATURE_DATA_DOUBLES", [
(BOOL, "DoublePrecisionFloatShaderOps"),
])
D3D11_FEATURE_DATA_FORMAT_SUPPORT = Struct("D3D11_FEATURE_DATA_FORMAT_SUPPORT", [
(DXGI_FORMAT, "InFormat"),
(D3D11_FORMAT_SUPPORT, "OutFormatSupport"),
])
D3D11_FEATURE_DATA_FORMAT_SUPPORT2 = Struct("D3D11_FEATURE_DATA_FORMAT_SUPPORT2", [
(DXGI_FORMAT, "InFormat"),
(D3D11_FORMAT_SUPPORT2, "OutFormatSupport2"),
])
D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS = Struct("D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS", [
(BOOL, "ComputeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x"),
])
D3D11_FEATURE, D3D11_FEATURE_DATA = EnumPolymorphic("D3D11_FEATURE", "Feature", [
("D3D11_FEATURE_THREADING", Pointer(D3D11_FEATURE_DATA_THREADING)),
("D3D11_FEATURE_DOUBLES", Pointer(D3D11_FEATURE_DATA_DOUBLES)),
("D3D11_FEATURE_FORMAT_SUPPORT", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT)),
("D3D11_FEATURE_FORMAT_SUPPORT2", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT2)),
("D3D11_FEATURE_D3D10_X_HARDWARE_OPTIONS", Pointer(D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS)),
], Blob(Void, "FeatureSupportDataSize"), False)
ID3D11DeviceContext.methods += [
StdMethod(Void, "VSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "PSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "PSSetShader", [(ObjPointer(ID3D11PixelShader), "pPixelShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "PSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "VSSetShader", [(ObjPointer(ID3D11VertexShader), "pVertexShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "DrawIndexed", [(UINT, "IndexCount"), (UINT, "StartIndexLocation"), (INT, "BaseVertexLocation")]),
StdMethod(Void, "Draw", [(UINT, "VertexCount"), (UINT, "StartVertexLocation")]),
StdMethod(HRESULT, "Map", [(ObjPointer(ID3D11Resource), "pResource"), (UINT, "Subresource"), (D3D11_MAP, "MapType"), (D3D11_MAP_FLAG, "MapFlags"), Out(Pointer(D3D11_MAPPED_SUBRESOURCE), "pMappedResource")]),
StdMethod(Void, "Unmap", [(ObjPointer(ID3D11Resource), "pResource"), (UINT, "Subresource")]),
StdMethod(Void, "PSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "IASetInputLayout", [(ObjPointer(ID3D11InputLayout), "pInputLayout")]),
StdMethod(Void, "IASetVertexBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppVertexBuffers"), (Pointer(Const(UINT)), "pStrides"), (Pointer(Const(UINT)), "pOffsets")]),
StdMethod(Void, "IASetIndexBuffer", [(ObjPointer(ID3D11Buffer), "pIndexBuffer"), (DXGI_FORMAT, "Format"), (UINT, "Offset")]),
StdMethod(Void, "DrawIndexedInstanced", [(UINT, "IndexCountPerInstance"), (UINT, "InstanceCount"), (UINT, "StartIndexLocation"), (INT, "BaseVertexLocation"), (UINT, "StartInstanceLocation")]),
StdMethod(Void, "DrawInstanced", [(UINT, "VertexCountPerInstance"), (UINT, "InstanceCount"), (UINT, "StartVertexLocation"), (UINT, "StartInstanceLocation")]),
StdMethod(Void, "GSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "GSSetShader", [(ObjPointer(ID3D11GeometryShader), "pShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "IASetPrimitiveTopology", [(D3D11_PRIMITIVE_TOPOLOGY, "Topology")]),
StdMethod(Void, "VSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "VSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "Begin", [(ObjPointer(ID3D11Asynchronous), "pAsync")]),
StdMethod(Void, "End", [(ObjPointer(ID3D11Asynchronous), "pAsync")]),
StdMethod(HRESULT, "GetData", [(ObjPointer(ID3D11Asynchronous), "pAsync"), Out(OpaqueBlob(Void, "DataSize"), "pData"), (UINT, "DataSize"), (D3D11_ASYNC_GETDATA_FLAG, "GetDataFlags")]),
StdMethod(Void, "SetPredication", [(ObjPointer(ID3D11Predicate), "pPredicate"), (BOOL, "PredicateValue")]),
StdMethod(Void, "GSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "GSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "OMSetRenderTargets", [(UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), "NumViews"), "ppRenderTargetViews"), (ObjPointer(ID3D11DepthStencilView), "pDepthStencilView")]),
StdMethod(Void, "OMSetRenderTargetsAndUnorderedAccessViews", [(UINT, "NumRTVs"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), "NumRTVs"), "ppRenderTargetViews"), (ObjPointer(ID3D11DepthStencilView), "pDepthStencilView"), (UINT, "UAVStartSlot"), (UINT, "NumUAVs"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), "NumUAVs"), "ppUnorderedAccessViews"), (Pointer(Const(UINT)), "pUAVInitialCounts")]),
StdMethod(Void, "OMSetBlendState", [(ObjPointer(ID3D11BlendState), "pBlendState"), (Array(Const(FLOAT), 4), "BlendFactor"), (UINT, "SampleMask")]),
StdMethod(Void, "OMSetDepthStencilState", [(ObjPointer(ID3D11DepthStencilState), "pDepthStencilState"), (UINT, "StencilRef")]),
StdMethod(Void, "SOSetTargets", [(UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppSOTargets"), (Pointer(Const(UINT)), "pOffsets")]),
StdMethod(Void, "DrawAuto", []),
StdMethod(Void, "DrawIndexedInstancedIndirect", [(ObjPointer(ID3D11Buffer), "pBufferForArgs"), (UINT, "AlignedByteOffsetForArgs")]),
StdMethod(Void, "DrawInstancedIndirect", [(ObjPointer(ID3D11Buffer), "pBufferForArgs"), (UINT, "AlignedByteOffsetForArgs")]),
StdMethod(Void, "Dispatch", [(UINT, "ThreadGroupCountX"), (UINT, "ThreadGroupCountY"), (UINT, "ThreadGroupCountZ")]),
StdMethod(Void, "DispatchIndirect", [(ObjPointer(ID3D11Buffer), "pBufferForArgs"), (UINT, "AlignedByteOffsetForArgs")]),
StdMethod(Void, "RSSetState", [(ObjPointer(ID3D11RasterizerState), "pRasterizerState")]),
StdMethod(Void, "RSSetViewports", [(UINT, "NumViewports"), (Array(Const(D3D11_VIEWPORT), "NumViewports"), "pViewports")]),
StdMethod(Void, "RSSetScissorRects", [(UINT, "NumRects"), (Array(Const(D3D11_RECT), "NumRects"), "pRects")]),
StdMethod(Void, "CopySubresourceRegion", [(ObjPointer(ID3D11Resource), "pDstResource"), (UINT, "DstSubresource"), (UINT, "DstX"), (UINT, "DstY"), (UINT, "DstZ"), (ObjPointer(ID3D11Resource), "pSrcResource"), (UINT, "SrcSubresource"), (Pointer(Const(D3D11_BOX)), "pSrcBox")]),
StdMethod(Void, "CopyResource", [(ObjPointer(ID3D11Resource), "pDstResource"), (ObjPointer(ID3D11Resource), "pSrcResource")]),
StdMethod(Void, "UpdateSubresource", [(ObjPointer(ID3D11Resource), "pDstResource"), (UINT, "DstSubresource"), (Pointer(Const(D3D11_BOX)), "pDstBox"), (OpaquePointer(Const(Void)), "pSrcData"), (UINT, "SrcRowPitch"), (UINT, "SrcDepthPitch")]),
StdMethod(Void, "CopyStructureCount", [(ObjPointer(ID3D11Buffer), "pDstBuffer"), (UINT, "DstAlignedByteOffset"), (ObjPointer(ID3D11UnorderedAccessView), "pSrcView")]),
StdMethod(Void, "ClearRenderTargetView", [(ObjPointer(ID3D11RenderTargetView), "pRenderTargetView"), (Array(Const(FLOAT), 4), "ColorRGBA")]),
StdMethod(Void, "ClearUnorderedAccessViewUint", [(ObjPointer(ID3D11UnorderedAccessView), "pUnorderedAccessView"), (Array(Const(UINT), 4), "Values")]),
StdMethod(Void, "ClearUnorderedAccessViewFloat", [(ObjPointer(ID3D11UnorderedAccessView), "pUnorderedAccessView"), (Array(Const(FLOAT), 4), "Values")]),
StdMethod(Void, "ClearDepthStencilView", [(ObjPointer(ID3D11DepthStencilView), "pDepthStencilView"), (D3D11_CLEAR_FLAG, "ClearFlags"), (FLOAT, "Depth"), (UINT8, "Stencil")]),
StdMethod(Void, "GenerateMips", [(ObjPointer(ID3D11ShaderResourceView), "pShaderResourceView")]),
StdMethod(Void, "SetResourceMinLOD", [(ObjPointer(ID3D11Resource), "pResource"), (FLOAT, "MinLOD")]),
StdMethod(FLOAT, "GetResourceMinLOD", [(ObjPointer(ID3D11Resource), "pResource")]),
StdMethod(Void, "ResolveSubresource", [(ObjPointer(ID3D11Resource), "pDstResource"), (UINT, "DstSubresource"), (ObjPointer(ID3D11Resource), "pSrcResource"), (UINT, "SrcSubresource"), (DXGI_FORMAT, "Format")]),
StdMethod(Void, "ExecuteCommandList", [(ObjPointer(ID3D11CommandList), "pCommandList"), (BOOL, "RestoreContextState")]),
StdMethod(Void, "HSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "HSSetShader", [(ObjPointer(ID3D11HullShader), "pHullShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "HSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "HSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "DSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "DSSetShader", [(ObjPointer(ID3D11DomainShader), "pDomainShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "DSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "DSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "CSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "CSSetUnorderedAccessViews", [(UINT, "StartSlot"), (UINT, "NumUAVs"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), "NumUAVs"), "ppUnorderedAccessViews"), (Pointer(Const(UINT)), "pUAVInitialCounts")]),
StdMethod(Void, "CSSetShader", [(ObjPointer(ID3D11ComputeShader), "pComputeShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "CSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "CSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "VSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "PSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "PSGetShader", [Out(Pointer(ObjPointer(ID3D11PixelShader)), "ppPixelShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "PSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "VSGetShader", [Out(Pointer(ObjPointer(ID3D11VertexShader)), "ppVertexShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "PSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "IAGetInputLayout", [Out(Pointer(ObjPointer(ID3D11InputLayout)), "ppInputLayout")]),
StdMethod(Void, "IAGetVertexBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppVertexBuffers"), Out(Pointer(UINT), "pStrides"), Out(Pointer(UINT), "pOffsets")]),
StdMethod(Void, "IAGetIndexBuffer", [Out(Pointer(ObjPointer(ID3D11Buffer)), "pIndexBuffer"), Out(Pointer(DXGI_FORMAT), "Format"), Out(Pointer(UINT), "Offset")]),
StdMethod(Void, "GSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "GSGetShader", [Out(Pointer(ObjPointer(ID3D11GeometryShader)), "ppGeometryShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "IAGetPrimitiveTopology", [Out(Pointer(D3D11_PRIMITIVE_TOPOLOGY), "pTopology")]),
StdMethod(Void, "VSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "VSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "GetPredication", [Out(Pointer(ObjPointer(ID3D11Predicate)), "ppPredicate"), Out(Pointer(BOOL), "pPredicateValue")]),
StdMethod(Void, "GSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "GSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "OMGetRenderTargets", [(UINT, "NumViews"), (Array(ObjPointer(ID3D11RenderTargetView), "NumViews"), "ppRenderTargetViews"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), "ppDepthStencilView")]),
StdMethod(Void, "OMGetRenderTargetsAndUnorderedAccessViews", [(UINT, "NumRTVs"), (Array(ObjPointer(ID3D11RenderTargetView), "NumRTVs"), "ppRenderTargetViews"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), "ppDepthStencilView"), (UINT, "UAVStartSlot"), (UINT, "NumUAVs"), (Array(ObjPointer(ID3D11UnorderedAccessView), "NumUAVs"), "ppUnorderedAccessViews")]),
StdMethod(Void, "OMGetBlendState", [Out(Pointer(ObjPointer(ID3D11BlendState)), "ppBlendState"), Out(Array(FLOAT, 4), "BlendFactor"), Out(Pointer(UINT), "pSampleMask")]),
StdMethod(Void, "OMGetDepthStencilState", [Out(Pointer(ObjPointer(ID3D11DepthStencilState)), "ppDepthStencilState"), Out(Pointer(UINT), "pStencilRef")]),
StdMethod(Void, "SOGetTargets", [(UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppSOTargets")]),
StdMethod(Void, "RSGetState", [Out(Pointer(ObjPointer(ID3D11RasterizerState)), "ppRasterizerState")]),
StdMethod(Void, "RSGetViewports", [Out(Pointer(UINT), "pNumViewports"), Out(Array(D3D11_VIEWPORT, "*pNumViewports"), "pViewports")]),
StdMethod(Void, "RSGetScissorRects", [Out(Pointer(UINT), "pNumRects"), Out(Array(D3D11_RECT, "*pNumRects"), "pRects")]),
StdMethod(Void, "HSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "HSGetShader", [Out(Pointer(ObjPointer(ID3D11HullShader)), "ppHullShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "HSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "HSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "DSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "DSGetShader", [Out(Pointer(ObjPointer(ID3D11DomainShader)), "ppDomainShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "DSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "DSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "CSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "CSGetUnorderedAccessViews", [(UINT, "StartSlot"), (UINT, "NumUAVs"), (Array(ObjPointer(ID3D11UnorderedAccessView), "NumUAVs"), "ppUnorderedAccessViews")]),
StdMethod(Void, "CSGetShader", [Out(Pointer(ObjPointer(ID3D11ComputeShader)), "ppComputeShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "CSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "CSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "ClearState", []),
StdMethod(Void, "Flush", []),
StdMethod(D3D11_DEVICE_CONTEXT_TYPE, "GetType", []),
StdMethod(UINT, "GetContextFlags", []),
StdMethod(HRESULT, "FinishCommandList", [(BOOL, "RestoreDeferredContextState"), Out(Pointer(ObjPointer(ID3D11CommandList)), "ppCommandList")]),
]
D3D11_CREATE_DEVICE_FLAG = Flags(UINT, [
"D3D11_CREATE_DEVICE_SINGLETHREADED",
"D3D11_CREATE_DEVICE_DEBUG",
"D3D11_CREATE_DEVICE_SWITCH_TO_REF",
"D3D11_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS",
"D3D11_CREATE_DEVICE_BGRA_SUPPORT",
])
ID3D11Device.methods += [
StdMethod(HRESULT, "CreateBuffer", [(Pointer(Const(D3D11_BUFFER_DESC)), "pDesc"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Buffer)), "ppBuffer")]),
StdMethod(HRESULT, "CreateTexture1D", [(Pointer(Const(D3D11_TEXTURE1D_DESC)), "pDesc"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Texture1D)), "ppTexture1D")]),
StdMethod(HRESULT, "CreateTexture2D", [(Pointer(Const(D3D11_TEXTURE2D_DESC)), "pDesc"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Texture2D)), "ppTexture2D")]),
StdMethod(HRESULT, "CreateTexture3D", [(Pointer(Const(D3D11_TEXTURE3D_DESC)), "pDesc"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Texture3D)), "ppTexture3D")]),
StdMethod(HRESULT, "CreateShaderResourceView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_SHADER_RESOURCE_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11ShaderResourceView)), "ppSRView")]),
StdMethod(HRESULT, "CreateUnorderedAccessView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_UNORDERED_ACCESS_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11UnorderedAccessView)), "ppUAView")]),
StdMethod(HRESULT, "CreateRenderTargetView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_RENDER_TARGET_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11RenderTargetView)), "ppRTView")]),
StdMethod(HRESULT, "CreateDepthStencilView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_DEPTH_STENCIL_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), "ppDepthStencilView")]),
StdMethod(HRESULT, "CreateInputLayout", [(Array(Const(D3D11_INPUT_ELEMENT_DESC), "NumElements"), "pInputElementDescs"), (UINT, "NumElements"), (Blob(Const(Void), "BytecodeLength"), "pShaderBytecodeWithInputSignature"), (SIZE_T, "BytecodeLength"), Out(Pointer(ObjPointer(ID3D11InputLayout)), "ppInputLayout")]),
StdMethod(HRESULT, "CreateVertexShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11VertexShader)), "ppVertexShader")]),
StdMethod(HRESULT, "CreateGeometryShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), "ppGeometryShader")]),
StdMethod(HRESULT, "CreateGeometryShaderWithStreamOutput", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (Array(Const(D3D11_SO_DECLARATION_ENTRY), "NumEntries"), "pSODeclaration"), (UINT, "NumEntries"), (Array(Const(UINT), "NumStrides"), "pBufferStrides"), (UINT, "NumStrides"), (UINT, "RasterizedStream"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), "ppGeometryShader")]),
StdMethod(HRESULT, "CreatePixelShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11PixelShader)), "ppPixelShader")]),
StdMethod(HRESULT, "CreateHullShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11HullShader)), "ppHullShader")]),
StdMethod(HRESULT, "CreateDomainShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11DomainShader)), "ppDomainShader")]),
StdMethod(HRESULT, "CreateComputeShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11ComputeShader)), "ppComputeShader")]),
StdMethod(HRESULT, "CreateClassLinkage", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), "ppLinkage")]),
StdMethod(HRESULT, "CreateBlendState", [(Pointer(Const(D3D11_BLEND_DESC)), "pBlendStateDesc"), Out(Pointer(ObjPointer(ID3D11BlendState)), "ppBlendState")]),
StdMethod(HRESULT, "CreateDepthStencilState", [(Pointer(Const(D3D11_DEPTH_STENCIL_DESC)), "pDepthStencilDesc"), Out(Pointer(ObjPointer(ID3D11DepthStencilState)), "ppDepthStencilState")]),
StdMethod(HRESULT, "CreateRasterizerState", [(Pointer(Const(D3D11_RASTERIZER_DESC)), "pRasterizerDesc"), Out(Pointer(ObjPointer(ID3D11RasterizerState)), "ppRasterizerState")]),
StdMethod(HRESULT, "CreateSamplerState", [(Pointer(Const(D3D11_SAMPLER_DESC)), "pSamplerDesc"), Out(Pointer(ObjPointer(ID3D11SamplerState)), "ppSamplerState")]),
StdMethod(HRESULT, "CreateQuery", [(Pointer(Const(D3D11_QUERY_DESC)), "pQueryDesc"), Out(Pointer(ObjPointer(ID3D11Query)), "ppQuery")]),
StdMethod(HRESULT, "CreatePredicate", [(Pointer(Const(D3D11_QUERY_DESC)), "pPredicateDesc"), Out(Pointer(ObjPointer(ID3D11Predicate)), "ppPredicate")]),
StdMethod(HRESULT, "CreateCounter", [(Pointer(Const(D3D11_COUNTER_DESC)), "pCounterDesc"), Out(Pointer(ObjPointer(ID3D11Counter)), "ppCounter")]),
StdMethod(HRESULT, "CreateDeferredContext", [(UINT, "ContextFlags"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppDeferredContext")]),
StdMethod(HRESULT, "OpenSharedResource", [(HANDLE, "hResource"), (REFIID, "ReturnedInterface"), Out(Pointer(ObjPointer(Void)), "ppResource")]),
StdMethod(HRESULT, "CheckFormatSupport", [(DXGI_FORMAT, "Format"), Out(Pointer(D3D11_FORMAT_SUPPORT), "pFormatSupport")]),
StdMethod(HRESULT, "CheckMultisampleQualityLevels", [(DXGI_FORMAT, "Format"), (UINT, "SampleCount"), Out(Pointer(UINT), "pNumQualityLevels")]),
StdMethod(Void, "CheckCounterInfo", [Out(Pointer(D3D11_COUNTER_INFO), "pCounterInfo")]),
StdMethod(HRESULT, "CheckCounter", [(Pointer(Const(D3D11_COUNTER_DESC)), "pDesc"), Out(Pointer(D3D11_COUNTER_TYPE), "pType"), Out(Pointer(UINT), "pActiveCounters"), Out(LPSTR, "szName"), Out(Pointer(UINT), "pNameLength"), Out(LPSTR, "szUnits"), Out(Pointer(UINT), "pUnitsLength"), Out(LPSTR, "szDescription"), Out(Pointer(UINT), "pDescriptionLength")]),
StdMethod(HRESULT, "CheckFeatureSupport", [(D3D11_FEATURE, "Feature"), Out(D3D11_FEATURE_DATA, "pFeatureSupportData"), (UINT, "FeatureSupportDataSize")]),
StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "guid"), Out(Pointer(UINT), "pDataSize"), Out(OpaquePointer(Void), "pData")]),
StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "guid"), (UINT, "DataSize"), (OpaqueBlob(Const(Void), "DataSize"), "pData")]),
StdMethod(HRESULT, "SetPrivateDataInterface", [(REFGUID, "guid"), (OpaquePointer(Const(IUnknown)), "pData")]),
StdMethod(D3D_FEATURE_LEVEL, "GetFeatureLevel", []),
StdMethod(D3D11_CREATE_DEVICE_FLAG, "GetCreationFlags", []),
StdMethod(HRESULT, "GetDeviceRemovedReason", []),
StdMethod(Void, "GetImmediateContext", [Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppImmediateContext")]),
StdMethod(HRESULT, "SetExceptionMode", [(D3D11_RAISE_FLAG, "RaiseFlags")]),
StdMethod(UINT, "GetExceptionMode", []),
]
d3d11 = API("d3d11")
d3d11.addFunctions([
StdFunction(HRESULT, "D3D11CreateDevice", [(ObjPointer(IDXGIAdapter), "pAdapter"), (D3D_DRIVER_TYPE, "DriverType"), (HMODULE, "Software"), (D3D11_CREATE_DEVICE_FLAG, "Flags"), (Array(Const(D3D_FEATURE_LEVEL), "FeatureLevels"), "pFeatureLevels"), (UINT, "FeatureLevels"), (UINT, "SDKVersion"), Out(Pointer(ObjPointer(ID3D11Device)), "ppDevice"), Out(Pointer(D3D_FEATURE_LEVEL), "pFeatureLevel"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppImmediateContext")]),
StdFunction(HRESULT, "D3D11CreateDeviceAndSwapChain", [(ObjPointer(IDXGIAdapter), "pAdapter"), (D3D_DRIVER_TYPE, "DriverType"), (HMODULE, "Software"), (D3D11_CREATE_DEVICE_FLAG, "Flags"), (Array(Const(D3D_FEATURE_LEVEL), "FeatureLevels"), "pFeatureLevels"), (UINT, "FeatureLevels"), (UINT, "SDKVersion"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC)), "pSwapChainDesc"), Out(Pointer(ObjPointer(IDXGISwapChain)), "ppSwapChain"), Out(Pointer(ObjPointer(ID3D11Device)), "ppDevice"), Out(Pointer(D3D_FEATURE_LEVEL), "pFeatureLevel"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppImmediateContext")]),
# XXX: Undocumented functions, called by d3d11sdklayers.dll when D3D11_CREATE_DEVICE_DEBUG is set
StdFunction(HRESULT, "D3D11CoreRegisterLayers", [LPCVOID, DWORD], internal=True),
StdFunction(SIZE_T, "D3D11CoreGetLayeredDeviceSize", [LPCVOID, DWORD], internal=True),
StdFunction(HRESULT, "D3D11CoreCreateLayeredDevice", [LPCVOID, DWORD, LPCVOID, (REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppvObj")], internal=True),
StdFunction(HRESULT, "D3D11CoreCreateDevice", [DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD], internal=True),
])
d3d11.addInterfaces([
IDXGIAdapter1,
IDXGIDevice1,
IDXGIResource,
ID3D11Debug,
ID3D11InfoQueue,
ID3D11SwitchToRef,
])
| 1.375 | 1 |
day08.py | Pil0u/adventofcode2020 | 0 | 323 | <filename>day08.py
from copy import deepcopy
def boot(seq):
index = 0
played_indices = set()
acc = 0
while True:
if index == len(seq):
return True, acc
if index in played_indices:
return False, acc
played_indices.add(index)
line = seq[index].split()
op = line[0]
value = int(line[1])
if op == 'nop':
index += 1
if op == 'acc':
acc += value
index += 1
if op == 'jmp':
index += value
def generate_sequences(list_):
all_seqs = []
for idx, value in enumerate(list_):
if value[:3] == 'nop':
seq = deepcopy(list_)
seq[idx] = 'jmp' + value[3:]
all_seqs.append(seq)
if value[:3] == 'jmp':
seq = deepcopy(list_)
seq[idx] = 'nop' + value[3:]
all_seqs.append(seq)
return all_seqs
def result(input_):
# Part 1
part_one = boot(input_)[1]
# Part 2
all_sequences = generate_sequences(input_)
for sequence in all_sequences:
result = boot(sequence)
if result[0] is not False:
part_two = result[1]
break
return part_one, part_two
| 2.875 | 3 |
train_fcn.py | onlyNata/segModel | 3 | 324 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 26 16:34:21 2018
@author: LiHongWang
"""
import os
import tensorflow as tf
from model import fcn_vgg
from model import fcn_mobile
from model import fcn_resnet_v2
from data import input_data
slim = tf.contrib.slim
def main():
num_classes=2
tfRecorf_dir= 'D:/dataSet/kitti/road/sub_um_lane_tra66.tfrecord'
train_dir = './fm2/'
if not os.path.exists(train_dir):
os.makedirs(train_dir)
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
tf.logging.set_verbosity(tf.logging.INFO)
with tf.device("/cpu:0"):
samples=input_data.get_images_labels(tfRecorf_dir,num_classes,66,
crop_size=[224,224],
batch_size=4)
batch_queue = slim.prefetch_queue.prefetch_queue(samples,
capacity=128 )
tra_batch = batch_queue.dequeue()
logit,prediction=fcn_mobile.fcn_mobv1(tra_batch['image'],num_classes)
# logit,prediction=fcn_vgg.fcn_vgg16(tra_batch['image'],num_classes)
# logit,prediction=fcn_resnet_v2.fcn_res101(tra_batch['image'],num_classes)
cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit,
labels=tf.squeeze(tra_batch['label'], squeeze_dims=[3]),name="entropy")
loss = tf.reduce_mean(cross_entropy,name='loss')
slim.losses.add_loss(loss)
total_loss = slim.losses.get_total_loss()
# print("image", tra_batch['image'])
# print("label", tf.cast(tra_batch['label']*255, tf.uint8))
# print("prediction", tf.cast(prediction*255, tf.uint8))
# Create some summaries to visualize the training process:
tf.summary.scalar('losses/Total_Loss', total_loss)
tf.summary.image("image", tra_batch['image'], max_outputs=4)
tf.summary.image("label", tf.cast(tra_batch['label']*255, tf.uint8), max_outputs=4)
tf.summary.image("prediction", tf.cast(prediction*255, tf.uint8), max_outputs=4)
lr = tf.train.exponential_decay(0.001,
global_step,
10000,
0.8,
staircase=True)
#lr = tf.constant(0.001, tf.float32)
tf.summary.scalar('learning_rate', lr)
for variable in slim.get_model_variables():
tf.summary.histogram(variable.op.name, variable)
# Specify the optimizer and create the train op:
optimizer = tf.train.RMSPropOptimizer(lr,0.9)
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run the training:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
config=tf.ConfigProto(gpu_options=gpu_options)
final_loss = slim.learning.train(train_op,
logdir=train_dir,
log_every_n_steps=100,
save_summaries_secs=20,
save_interval_secs=1800,
init_fn=None,#fcn_mobile.get_init_fn(),
session_config=config,
number_of_steps=65000)
print('Finished training. Last batch loss %f' % final_loss)
if __name__=='__main__':
main() | 2.34375 | 2 |
setup.py | xbabka01/filetype.py | 0 | 325 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
from setuptools import find_packages, setup
setup(
name='filetype',
version='1.0.7',
description='Infer file type and MIME type of any file/buffer. '
'No external dependencies.',
long_description=codecs.open('README.rst', 'r',
encoding='utf-8', errors='ignore').read(),
keywords='file libmagic magic infer numbers magicnumbers discovery mime '
'type kind',
url='https://github.com/h2non/filetype.py',
download_url='https://github.com/h2non/filetype.py/tarball/master',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
license_files=['LICENSE'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: System',
'Topic :: System :: Filesystems',
'Topic :: Utilities'],
platforms=['any'],
packages=find_packages(exclude=['dist', 'build', 'docs', 'tests',
'examples']),
package_data={'filetype': ['LICENSE', '*.md']},
zip_safe=True)
| 1.28125 | 1 |
demos/netmiko_textfsm.py | ryanaa08/NPA | 4 | 326 | # make sure templates are present and netmiko knows about them
# git clone https://github.com/networktocode/ntc-templates
# export NET_TEXTFSM=/home/ntc/ntc-templates/templates/
# see https://github.com/networktocode/ntc-templates/tree/master/templates
# for list of templates
from netmiko import ConnectHandler
import json
user = 'ntc'
pwd = '<PASSWORD>'
d_type = 'cisco_ios'
csr1 = ConnectHandler(ip='csr1', username=user, password=pwd, device_type=d_type)
sh_ip_int_br = csr1.send_command("show ip int brief", use_textfsm=True)
# [{'status': 'up', 'intf': 'GigabitEthernet1', 'ipaddr': '10.0.0.51', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet2', 'ipaddr': 'unassigned', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet3', 'ipaddr': 'unassigned', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet4', 'ipaddr': '172.16.17.32', 'proto': 'up'}, {'status': 'up', 'intf': 'Loopback100', 'ipaddr': '10.200.1.20', 'proto': 'up'}]
# is type list
print (type(sh_ip_int_br))
# list of dicts
print (type(sh_ip_int_br[0]))
for each_dict in sh_ip_int_br:
print "\n"
for key in each_dict.keys():
print key
for each_dict in sh_ip_int_br:
print "\n"
for key, value in each_dict.items():
print key + " is " + value
sh_ver_ios = csr1.send_command("show version", use_textfsm=True)
# [{'running_image': 'packages.conf', 'hostname': 'csr1', 'uptime': '6 hours, 59 minutes', 'config_register': '0x2102', 'hardware': ['CSR1000V'], 'version': '16.6.2', 'serial': ['9KIBQAQ3OPE'], 'rommon': 'IOS-XE'}]
# print the json nicely
print (json.dumps(sh_ver_ios, indent=4))
print sh_ver_ios
# list
print type(sh_ver_ios)
# each item is a dict
print type(sh_ver_ios[0])
# list of dicts with some nested lists with the dicts
for each_dict in sh_ver_ios:
print "\n"
for key, value in each_dict.items():
if type(value) is list:
print key + " is "
for list_entry in value:
print list_entry
if type(value) is str:
print key + " is " + value
| 2.21875 | 2 |
iap/validate_jwt.py | spitfire55/python-docs-samples | 4 | 327 | <gh_stars>1-10
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample showing how to validate the Identity-Aware Proxy (IAP) JWT.
This code should be used by applications in Google Compute Engine-based
environments (such as Google App Engine flexible environment, Google
Compute Engine, or Google Container Engine) to provide an extra layer
of assurance that a request was authorized by IAP.
For applications running in the App Engine standard environment, use
App Engine's Users API instead.
"""
# [START iap_validate_jwt]
import jwt
import requests
def validate_iap_jwt_from_app_engine(iap_jwt, cloud_project_number,
cloud_project_id):
"""Validate a JWT passed to your App Engine app by Identity-Aware Proxy.
Args:
iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion header.
cloud_project_number: The project *number* for your Google Cloud project.
This is returned by 'gcloud projects describe $PROJECT_ID', or
in the Project Info card in Cloud Console.
cloud_project_id: The project *ID* for your Google Cloud project.
Returns:
(user_id, user_email, error_str).
"""
expected_audience = '/projects/{}/apps/{}'.format(
cloud_project_number, cloud_project_id)
return _validate_iap_jwt(iap_jwt, expected_audience)
def validate_iap_jwt_from_compute_engine(iap_jwt, cloud_project_number,
backend_service_id):
"""Validate an IAP JWT for your (Compute|Container) Engine service.
Args:
iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion header.
cloud_project_number: The project *number* for your Google Cloud project.
This is returned by 'gcloud projects describe $PROJECT_ID', or
in the Project Info card in Cloud Console.
backend_service_id: The ID of the backend service used to access the
application. See
https://cloud.google.com/iap/docs/signed-headers-howto
for details on how to get this value.
Returns:
(user_id, user_email, error_str).
"""
expected_audience = '/projects/{}/global/backendServices/{}'.format(
cloud_project_number, backend_service_id)
return _validate_iap_jwt(iap_jwt, expected_audience)
def _validate_iap_jwt(iap_jwt, expected_audience):
try:
key_id = jwt.get_unverified_header(iap_jwt).get('kid')
if not key_id:
return (None, None, '**ERROR: no key ID**')
key = get_iap_key(key_id)
decoded_jwt = jwt.decode(
iap_jwt, key,
algorithms=['ES256'],
audience=expected_audience)
return (decoded_jwt['sub'], decoded_jwt['email'], '')
except (jwt.exceptions.InvalidTokenError,
requests.exceptions.RequestException) as e:
return (None, None, '**ERROR: JWT validation error {}**'.format(e))
def get_iap_key(key_id):
"""Retrieves a public key from the list published by Identity-Aware Proxy,
re-fetching the key file if necessary.
"""
key_cache = get_iap_key.key_cache
key = key_cache.get(key_id)
if not key:
# Re-fetch the key file.
resp = requests.get(
'https://www.gstatic.com/iap/verify/public_key')
if resp.status_code != 200:
raise Exception(
'Unable to fetch IAP keys: {} / {} / {}'.format(
resp.status_code, resp.headers, resp.text))
key_cache = resp.json()
get_iap_key.key_cache = key_cache
key = key_cache.get(key_id)
if not key:
raise Exception('Key {!r} not found'.format(key_id))
return key
# Used to cache the Identity-Aware Proxy public keys. This code only
# refetches the file when a JWT is signed with a key not present in
# this cache.
get_iap_key.key_cache = {}
# [END iap_validate_jwt]
| 2.15625 | 2 |
examples/calc.py | manatlan/htag | 1 | 328 | <filename>examples/calc.py<gh_stars>1-10
import os,sys; sys.path.insert(0,os.path.dirname(os.path.dirname(__file__)))
from htag import Tag
"""
This example show you how to make a "Calc App"
(with physical buttons + keyboard events)
There is no work for rendering the layout ;-)
Can't be simpler !
"""
class Calc(Tag.div):
statics=[Tag.H.style("""
.mycalc *,button {font-size:2em;font-family: monospace}
""")]
def init(self):
self.txt=""
self.aff = Tag.Div(" ",_style="border:1px solid black")
self["class"]="mycalc"
self <= self.aff
self <= Tag.button("C", _onclick=self.bind( self.clean) )
self <= [Tag.button(i, _onclick=self.bind( self.press, i) ) for i in "0123456789+-x/."]
self <= Tag.button("=", _onclick=self.bind( self.compute ) )
#-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ with real keyboard
self["onkeyup"] = self.bind( self.presskey, b"event.key" )
def presskey(self,key):
if key in "0123456789+-*/.":
self.press(key)
elif key=="Enter":
self.compute()
elif key in ["Delete","Backspace"]:
self.clean()
#-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/
def press(self,val):
self.txt += val
self.aff.set( self.txt )
def compute(self):
try:
self.txt = str(eval(self.txt.replace("x","*")))
self.aff.set( self.txt )
except:
self.txt = ""
self.aff.set( "Error" )
def clean(self):
self.txt=""
self.aff.set(" ")
if __name__=="__main__":
# import logging
# logging.basicConfig(format='[%(levelname)-5s] %(name)s: %(message)s',level=logging.DEBUG)
# logging.getLogger("htag.tag").setLevel( logging.INFO )
# and execute it in a pywebview instance
from htag.runners import *
# here is another runner, in a simple browser (thru ajax calls)
BrowserHTTP( Calc ).run()
# PyWebWiew( Calc ).run()
| 3.265625 | 3 |
res/example1.py | tghira16/Giraphics | 1 | 329 | <filename>res/example1.py
from giraphics.graphing.graph import Graph
def func(x):
return (x-3)*(x+2)*x*0.2
g = Graph(800,600,8,6, 'example1.svg')
g.bg()
g.grid()
g.axes()
g.graph(func)
g.save()
g.display() | 2.890625 | 3 |
tools/data.py | seanys/2D-Irregular-Packing-Algorithm | 29 | 330 | from tools.geofunc import GeoFunc
import pandas as pd
import json
def getData(index):
'''报错数据集有(空心):han,jakobs1,jakobs2 '''
'''形状过多暂时未处理:shapes、shirt、swim、trousers'''
name=["ga","albano","blaz1","blaz2","dighe1","dighe2","fu","han","jakobs1","jakobs2","mao","marques","shapes","shirts","swim","trousers"]
print("开始处理",name[index],"数据集")
'''暂时没有考虑宽度,全部缩放来表示'''
scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50]
print("缩放",scale[index],"倍")
df = pd.read_csv("data/"+name[index]+".csv")
polygons=[]
for i in range(0,df.shape[0]):
for j in range(0,df['num'][i]):
poly=json.loads(df['polygon'][i])
GeoFunc.normData(poly,scale[index])
polygons.append(poly)
return polygons
| 3.109375 | 3 |
src/trw/reporting/__init__.py | civodlu/trw | 3 | 331 | <reponame>civodlu/trw
#from trw.utils import collect_hierarchical_module_name, collect_hierarchical_parameter_name, get_batch_n, to_value, \
# safe_lookup, len_batch
from .export import as_image_ui8, as_rgb_image, export_image, export_sample, export_as_image
from .table_sqlite import TableStream, SQLITE_TYPE_PATTERN, get_table_number_of_rows
from .reporting_bokeh import report, create_default_reporting_options
from .reporting_bokeh_samples import PanelDataSamplesTabular
| 1.195313 | 1 |
vframe_cli/commands/templates/image-mp.py | julescarbon/vframe | 1 | 332 | #############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2020 <NAME> and VFRAME
# https://vframe.io
#
#############################################################################
import click
@click.command('')
@click.option('-i', '--input', 'opt_dir_in', required=True)
@click.option('-r', '--recursive', 'opt_recursive', is_flag=True)
@click.option('-e', '--ext', 'opt_exts', default=['jpg', 'png'], multiple=True,
help='Glob extension')
@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
help='Slice list of files')
@click.option('-t', '--threads', 'opt_threads', default=None)
@click.pass_context
def cli(ctx, opt_dir_in, opt_recursive, opt_exts, opt_slice, opt_threads):
"""Multiprocessor image template"""
# ------------------------------------------------
# imports
from os.path import join
from pathlib import Path
from dataclasses import asdict
import numpy as np
import cv2 as cv
from tqdm import tqdm
from pathos.multiprocessing import ProcessingPool as Pool
from pathos.multiprocessing import cpu_count
from vframe.settings import app_cfg
from vframe.settings.modelzoo_cfg import modelzoo
from vframe.models.dnn import DNN
from vframe.image.dnn_factory import DNNFactory
from vframe.utils import file_utils
from vframe.utils.video_utils import FileVideoStream, mediainfo
log = app_cfg.LOG
# set N threads
if not opt_threads:
opt_threads = cpu_count() # maximum
# glob items
fp_items = file_utils.glob_multi(opt_dir_in, opt_exts, recursive=opt_recursive)
if any(opt_slice):
fp_items = fp_items[opt_slice[0]:opt_slice[1]]
log.info(f'Processing: {len(fp_items):,} files')
# -----------------------------------------------------------
# start pool worker
def pool_worker(pool_item):
# init threaded video reader
fp = pool_item['fp']
result = {'fp': fp}
# add media metadata
im = cv.imread(fp)
for i in range(20):
im = cv.blur(im, (35,35))
return result
# end pool worker
# -----------------------------------------------------------
# convert file list into object with
pool_items = [{'fp': fp} for fp in fp_items]
# init processing pool iterator
# use imap instead of map via @hkyi Stack Overflow 41920124
desc = f'image-mp x{opt_threads}'
with Pool(opt_threads) as p:
pool_results = list(tqdm(p.imap(pool_worker, pool_items), total=len(fp_items), desc=desc)) | 2.109375 | 2 |
src/learndash/api_resources/user.py | MarkMacDon/learndash-python | 0 | 333 | <filename>src/learndash/api_resources/user.py
import learndash
from learndash.api_resources.abstract import ListableAPIResource
from learndash.api_resources.abstract import RetrievableAPIResource
from learndash.api_resources.abstract import UpdateableAPIResource
from learndash.api_resources.abstract import NestedAPIResource
from learndash.api_resources.typing import UserDict
from learndash.api_resources.typing import UserCourseProgressDict
from learndash.api_resources.typing import UserCourseDict
from learndash.api_resources.typing import UserGroupDict
from learndash.api_resources.typing import UserQuizProgressDict
class User(RetrievableAPIResource[UserDict], ListableAPIResource[UserDict]):
api_path = learndash.path_users
def course_progress(self, id=None):
return UserCourseProgress(id, parent=self)
def courses(self, id=None):
return UserCourse(id, parent=self)
def groups(self, id=None):
return UserGroup(id, parent=self)
def quiz_progress(self, id=None):
return UserQuizProgress(id, parent=self)
class UserCourseProgress(ListableAPIResource[UserCourseProgressDict], NestedAPIResource):
api_path = learndash.path_user_course_progress
# class UserCourseProgressSteps(ListableAPIResource, NestedAPIResource):
class UserCourse(ListableAPIResource[UserCourseDict], UpdateableAPIResource, NestedAPIResource): # also deletable
api_path = learndash.path_user_courses
def instance_url(self):
# This endpoint accepts updates and deletions at it's base endpoint
return self.class_url()
class UserGroup(ListableAPIResource[UserGroupDict], UpdateableAPIResource, NestedAPIResource): # also deleteable
api_path = learndash.path_user_groups
def instance_url(self):
# This endpoint accepts updates and deletions at it's base endpoint
return self.class_url()
class UserQuizProgress(ListableAPIResource[UserQuizProgressDict], NestedAPIResource):
api_path = learndash.path_user_quiz_progress
| 2.140625 | 2 |
lib/galaxy/tool_util/deps/container_resolvers/__init__.py | sneumann/galaxy | 1 | 334 | <filename>lib/galaxy/tool_util/deps/container_resolvers/__init__.py<gh_stars>1-10
"""The module defines the abstract interface for resolving container images for tool execution."""
from abc import (
ABCMeta,
abstractmethod,
abstractproperty,
)
import six
from galaxy.util.dictifiable import Dictifiable
@six.python_2_unicode_compatible
@six.add_metaclass(ABCMeta)
class ContainerResolver(Dictifiable):
"""Description of a technique for resolving container images for tool execution."""
# Keys for dictification.
dict_collection_visible_keys = ['resolver_type', 'can_uninstall_dependencies']
can_uninstall_dependencies = False
def __init__(self, app_info=None, **kwds):
"""Default initializer for ``ContainerResolver`` subclasses."""
self.app_info = app_info
self.resolver_kwds = kwds
def _get_config_option(self, key, default=None):
"""Look in resolver-specific settings for option and then fallback to
global settings.
"""
if self.app_info and hasattr(self.app_info, key):
return getattr(self.app_info, key)
else:
return default
@abstractmethod
def resolve(self, enabled_container_types, tool_info, **kwds):
"""Find a container matching all supplied requirements for tool.
The supplied argument is a :class:`galaxy.tool_util.deps.containers.ToolInfo` description
of the tool and its requirements.
"""
@abstractproperty
def resolver_type(self):
"""Short label for the type of container resolution."""
def _container_type_enabled(self, container_description, enabled_container_types):
"""Return a boolean indicating if the specified container type is enabled."""
return container_description.type in enabled_container_types
def __str__(self):
return "%s[]" % self.__class__.__name__
| 2.359375 | 2 |
projects/eyetracking/gen_adhd_sin.py | nirdslab/streaminghub | 0 | 335 | <reponame>nirdslab/streaminghub
#!/usr/bin/env python3
import glob
import os
import pandas as pd
import dfs
SRC_DIR = f"{dfs.get_data_dir()}/adhd_sin_orig"
OUT_DIR = f"{dfs.get_data_dir()}/adhd_sin"
if __name__ == '__main__':
files = glob.glob(f"{SRC_DIR}/*.csv")
file_names = list(map(os.path.basename, files))
for file_name in file_names:
df: pd.DataFrame = pd.read_csv(f'{SRC_DIR}/{file_name}').set_index('EyeTrackerTimestamp').sort_index()[
['GazePointX (ADCSpx)', 'GazePointY (ADCSpx)', 'PupilLeft', 'PupilRight']].reset_index()
df.columns = ['t', 'x', 'y', 'dl', 'dr']
# fill blanks (order=interpolate(inter)->bfill+ffill(edges))->zerofill
df = df.apply(lambda x: x.interpolate().fillna(method="bfill").fillna(method="ffill")).fillna(0)
df['x'] = df['x'] / 1920
df['y'] = df['y'] / 1080
df['d'] = (df['dl'] + df['dr']) / 2
# start with t=0, and set unit to ms
df['t'] = (df['t'] - df['t'].min()) / 1000
df = df[['t', 'x', 'y', 'd']].round(6).set_index('t')
df.to_csv(f'{OUT_DIR}/{file_name}')
print(f'Processed: {file_name}')
| 2.171875 | 2 |
dataProcessing.py | TauferLab/PENGUIN | 0 | 336 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import matplotlib.pyplot as plt
import CurveFit
import shutil
#find all DIRECTORIES containing non-hidden files ending in FILENAME
def getDataDirectories(DIRECTORY, FILENAME="valLoss.txt"):
directories=[]
for directory in os.scandir(DIRECTORY):
for item in os.scandir(directory):
if item.name.endswith(FILENAME) and not item.name.startswith("."):
directories.append(directory.path)
return directories
#get all non-hidden data files in DIRECTORY with extension EXT
def getDataFiles(DIRECTORY, EXT='txt'):
datafiles=[]
for item in os.scandir(DIRECTORY):
if item.name.endswith("."+EXT) and not item.name.startswith("."):
datafiles.append(item.path)
return datafiles
#checking if loss ever doesn't decrease for numEpochs epochs in a row.
def stopsDecreasing(loss, epoch, numEpochs):
minLoss=np.inf
epochMin=0
for i in range(0,loss.size):
if loss[i] < minLoss:
minLoss=loss[i]
epochMin=epoch[i]
elif (epoch[i]-epochMin) >= numEpochs:
return i, minLoss
return i, minLoss
#dirpath is where the accuracy and loss files are stored. want to move the files into the same format expected by grabNNData.
def createFolders(SEARCHDIR, SAVEDIR):
for item in os.scandir(SEARCHDIR):
name=str(item.name)
files=name.split('-')
SAVEFULLDIR=SAVEDIR+str(files[0])
if not os.path.exists(SAVEFULLDIR):
try:
os.makedirs(SAVEFULLDIR)
except FileExistsError:
#directory already exists--must have been created between the if statement & our attempt at making directory
pass
shutil.move(item.path, SAVEFULLDIR+"/"+str(files[1]))
#a function to read in information (e.g. accuracy, loss) stored at FILENAME
def grabNNData(FILENAME, header='infer', sep=' '):
data = pd.read_csv(FILENAME, sep, header=header)
if ('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns) and ('batch_size' in data.columns) and ('learning_rate' in data.columns):
sortedData=data.sort_values(by="epochs", axis=0, ascending=True)
epoch=np.array(sortedData['epochs'])
trainLoss=np.array(sortedData['trainLoss'])
valLoss=np.array(sortedData['valLoss'])
valAcc=np.array(sortedData['valAcc'])
batch_size=np.array(sortedData['batch_size'])
learning_rate=np.array(sortedData['learning_rate'])
convKers=np.array(sortedData['convKernels'])
return(epoch, trainLoss, valLoss, valAcc, batch_size, learning_rate, convKers)
elif ('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns):
sortedData=data.sort_values(by="epochs", axis=0, ascending=True)
epoch=np.array(sortedData['epochs'])
trainLoss=np.array(sortedData['trainLoss'])
valLoss=np.array(sortedData['valLoss'])
valAcc=np.array(sortedData['valAcc'])
else:
print("Missing a column in NN datafile")
raise Exception('NN datafile is missing one of the expected columns: epochs trainLoss valLoss valAcc [optional extra columns: batch_size, learning_rate]')
#slice data could be used to test values of E other than E=0.5, which we use by default
def sliceData(xsize, x, y, z=None, w=None):
#we can slice the data to sample less often, but not more often. We verify that we're not being asked for a granularity that is smaller than the frequency of datapoints in the vectors.
if x[0] > xsize:
return x,y,z,w
else:
result=(1.0/x[0])*xsize
#result is how often we should take datapoints if we wish to consider values every xsize
x=x[int(result-1)::int(result)]
y=y[int(result-1)::int(result)]
if z is not None:
z=z[int(result-1)::int(result)]
if w is None:
return x,y,z
else:
return x,y
#if we get to this point in function, it means z and w are both not None.
w=w[int(result-1)::int(result)]
return x,y,z,w
| 2.890625 | 3 |
algo_probs/newcoder/classic/nc52.py | Jackthebighead/recruiment-2022 | 0 | 337 | <filename>algo_probs/newcoder/classic/nc52.py
# 题意:给出一个仅包含字符'(',')','{','}','['和']',的字符串,判断给出的字符串是否是合法的括号序列。括号必须以正确的顺序关闭,"()"和"()[]{}"都是合法的括号序列,但"(]"和"([)]"不合法。
# @param s string字符串
# @return bool布尔型
#
class Solution:
def isValid(self , s ):
# write code here
if not s: return True
stack = []
dic = {'{':'}','[':']','(':')'}
for char in s:
if not stack or char in dic: stack.append(char)
elif stack and dic.get(stack[-1])!=char: return False
else:
stack.pop()
continue
return True
| 3.328125 | 3 |
piecrust/processing/util.py | airbornemint/PieCrust2 | 0 | 338 | import os.path
import time
import logging
import yaml
from piecrust.processing.base import Processor
logger = logging.getLogger(__name__)
class _ConcatInfo(object):
timestamp = 0
files = None
delim = "\n"
class ConcatProcessor(Processor):
PROCESSOR_NAME = 'concat'
def __init__(self):
super(ConcatProcessor, self).__init__()
self._cache = {}
def matches(self, path):
return path.endswith('.concat')
def getDependencies(self, path):
info = self._load(path)
return info.files
def getOutputFilenames(self, filename):
return [filename[:-7]]
def process(self, path, out_dir):
dirname, filename = os.path.split(path)
out_path = os.path.join(out_dir, filename[:-7])
info = self._load(path)
if not info.files:
raise Exception("No files specified in: %s" %
os.path.relpath(path, self.app.root_dir))
logger.debug("Concatenating %d files to: %s" %
(len(info.files), out_path))
encoded_delim = info.delim.encode('utf8')
with open(out_path, 'wb') as ofp:
for p in info.files:
with open(p, 'rb') as ifp:
ofp.write(ifp.read())
if info.delim:
ofp.write(encoded_delim)
return True
def _load(self, path):
cur_time = time.time()
info = self._cache.get(path)
if (info is not None and
(cur_time - info.timestamp <= 1 or
os.path.getmtime(path) < info.timestamp)):
return info
if info is None:
info = _ConcatInfo()
self._cache[path] = info
with open(path, 'r') as fp:
config = yaml.load(fp)
info.files = config.get('files', [])
info.delim = config.get('delim', "\n")
info.timestamp = cur_time
path_mode = config.get('path_mode', 'relative')
if path_mode == 'relative':
dirname, _ = os.path.split(path)
info.files = [os.path.join(dirname, f) for f in info.files]
elif path_mode == 'absolute':
info.files = [os.path.join(self.app.root_dir, f)
for f in info.files]
else:
raise Exception("Unknown path mode: %s" % path_mode)
return info
| 2.421875 | 2 |
src/events/cell_pressed.py | ArcosJuan/Get-out-of-my-fucking-maze | 2 | 339 | from src.events import Event
class CellPressed(Event):
def __init__(self, position):
self.position = position
def get_position(self):
return self.position | 2.265625 | 2 |
TopQuarkAnalysis/TopJetCombination/python/TtSemiLepJetCombMaxSumPtWMass_cfi.py | ckamtsikis/cmssw | 852 | 340 | import FWCore.ParameterSet.Config as cms
#
# module to make the MaxSumPtWMass jet combination
#
findTtSemiLepJetCombMaxSumPtWMass = cms.EDProducer("TtSemiLepJetCombMaxSumPtWMass",
## jet input
jets = cms.InputTag("selectedPatJets"),
## lepton input
leps = cms.InputTag("selectedPatMuons"),
## maximum number of jets to be considered
maxNJets = cms.int32(4),
## nominal WMass parameter (in GeV)
wMass = cms.double(80.4),
## use b-tagging two distinguish between light and b jets
useBTagging = cms.bool(False),
## choose algorithm for b-tagging
bTagAlgorithm = cms.string("trackCountingHighEffBJetTags"),
## minimum b discriminator value required for b jets and
## maximum b discriminator value allowed for non-b jets
minBDiscBJets = cms.double(1.0),
maxBDiscLightJets = cms.double(3.0)
)
| 1.898438 | 2 |
xortool/__init__.py | runapp/xortool | 14 | 341 | <reponame>runapp/xortool<filename>xortool/__init__.py
#!/usr/bin/env python
#-*- coding:utf-8 -*-
__all__ = ["args", "colors", "libcolors", "routine"]
__version__ = "0.96"
| 1.210938 | 1 |
baopig/ressources/ressources.py | ChreSyr/baopig | 0 | 342 | <gh_stars>0
from baopig.pybao.objectutilities import Object
from baopig.pybao.issomething import *
class RessourcePack:
def config(self, **kwargs):
for name, value in kwargs.items():
self.__setattr__('_'+name, value)
class FontsRessourcePack(RessourcePack):
def __init__(self,
file=None,
height=15,
color=(0, 0, 0),
):
assert is_color(color)
self._file = file
self._height = height
self._color = color
file = property(lambda self: self._file)
color = property(lambda self: self._color)
height = property(lambda self: self._height)
class ScenesRessourcePack(RessourcePack):
def __init__(self,
background_color=(170, 170, 170),
):
assert is_color(background_color)
self._background_color = background_color
background_color = property(lambda self: self._background_color)
# TODO : ButtonRessourcePack.style.create_surface(size)
class _RessourcePack:
def __init__(self):
self.font = FontsRessourcePack()
self.scene = ScenesRessourcePack()
ressources = _RessourcePack()
| 2.4375 | 2 |
bufr_extract_unique_stations.py | glamod/glamod-misc | 0 | 343 | #!/usr/bin/python2.7
"""
Extract unique set of station locations (and names) along with number of obs
RJHD - Exeter - October 2017
"""
# ECMWF import defaults
import traceback
import sys
from eccodes import *
# RJHD imports
import cartopy
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import gc
VERBOSE = 1 # verbose error reporting.
ATTRS = [
'code',
'units',
'scale',
'reference',
'width'
]
INTMDI = 2147483647
#***************************************************
def process_file(infilename, station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year):
infile = open(infilename)
year = int(infilename.split(".")[0].split("_")[-1])
cmatch = 0
counter = 0
# loop all messages (with stop statement)
while 1:
"""OPEN MESSAGE"""
# get handle for message
bufr = codes_bufr_new_from_file(infile)
if bufr is None:
break
if counter%100000 == 0:
print "message: {:d}".format(counter)
# we need to instruct ecCodes to expand all the descriptors
# i.e. unpack the data values
codes_set(bufr, 'unpack', 1)
"""ITERATOR TO EXTRACT KEYS"""
these_keys = []
# get BUFR key iterator
iterid = codes_bufr_keys_iterator_new(bufr)
# loop over the keys
while codes_bufr_keys_iterator_next(iterid):
# print key name
keyname = codes_bufr_keys_iterator_get_name(iterid)
# print(" %s" % keyname)
these_keys += [keyname]
# delete the key iterator
codes_bufr_keys_iterator_delete(iterid)
# Use these to select obs from land/marine surface
name_keys = ["#1#shipOrMobileLandStationIdentifier", "#1#stationNumber"]
processed = False
for nk in name_keys:
if nk in these_keys:
try:
name = codes_get(bufr, nk)
lat = codes_get(bufr, "#1#latitude")
lon = codes_get(bufr, "#1#longitude")
sloc = tloc = nloc = [-1]
if name in station_names:
sloc, = np.where(station_names == name)
if lat in latitudes:
tloc, = np.where(latitudes == lat)
if lon in longitudes:
nloc, = np.where(longitudes == lon)
if tloc[0] == -1 and nloc[0] == -1:
# if not in list, then add
station_names = np.append(station_names, name)
latitudes = np.append(latitudes, lat)
longitudes = np.append(longitudes, lon)
observations = np.append(observations, 1)
start_year = np.append(start_year, year)
end_year = np.append(end_year, year)
# allow splitting of land and marine/mobile
if nk == "#1#stationNumber":
fixed_station = np.append(fixed_station, True)
else:
fixed_station = np.append(fixed_station, False)
elif (tloc[0] != -1 or nloc[0] != -1) and tloc[0] != nloc[0]:
# add if one element of position is unique
station_names = np.append(station_names, name)
latitudes = np.append(latitudes, lat)
longitudes = np.append(longitudes, lon)
observations = np.append(observations, 1)
start_year = np.append(start_year, year)
end_year = np.append(end_year, year)
# allow splitting of land and marine/mobile
if nk == "#1#stationNumber":
fixed_station = np.append(fixed_station, True)
else:
fixed_station = np.append(fixed_station, False)
elif tloc[0] != -1 and tloc[0] == nloc[0]:
# if position matches exactly, up observation counter
observations[tloc[0]] += 1
end_year[tloc[0]] = year
# allow splitting of land and marine/mobile
if nk == "#1#stationNumber":
if fixed_station[tloc[0]] != True:
# if listed as land and now marine, take marine
fixed_station[tloc[0]] = False
else:
if fixed_station[tloc[0]] != False:
# easier to leave as mobile/marine than to move
# hopefully will stand out later
pass
else:
cmatch += 1
processed = True
except CodesInternalError:
raw_input("key error?")
# check for new keys which give station ID information
if not processed:
other_keys = ["#1#carrierBalloonOrAircraftIdentifier", "#1#aircraftFlightNumber"]
new_key = True
for ok in other_keys:
if ok in these_keys: new_key = False
if new_key:
raw_input(these_keys)
# if counter > 10000: break
counter += 1
codes_release(bufr)
# print "Number of unique locations in this year: {}".format(len(latitudes))
return station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year # process_file
#***************************************************
def scatter_map(outname, data, lons, lats, cmap, bounds, cb_label, title = "", figtext = "", doText = False):
'''
Standard scatter map
:param str outname: output filename root
:param array data: data to plot
:param array lons: longitudes
:param array lats: latitudes
:param obj cmap: colourmap to use
:param array bounds: bounds for discrete colormap
:param str cb_label: colorbar label
'''
norm=mpl.cm.colors.BoundaryNorm(bounds,cmap.N)
fig = plt.figure(figsize =(10,6.5))
plt.clf()
ax = plt.axes([0.05, 0.10, 0.90, 0.90], projection=cartopy.crs.Robinson())
ax.gridlines() #draw_labels=True)
ax.add_feature(cartopy.feature.LAND, zorder = 0, facecolor = "0.9", edgecolor = "k")
ax.coastlines()
ext = ax.get_extent() # save the original extent
scatter = plt.scatter(lons, lats, c = data, cmap = cmap, norm = norm, s=10, \
transform = cartopy.crs.Geodetic(), edgecolor = "r", linewidth = 0.1)
cb=plt.colorbar(scatter, orientation = 'horizontal', pad = 0.05, fraction = 0.05, \
aspect = 30, ticks = bounds[1:-1], label = cb_label, drawedges=True)
# thicken border of colorbar and the dividers
# http://stackoverflow.com/questions/14477696/customizing-colorbar-border-color-on-matplotlib
# cb.set_ticklabels(["{:g}".format(b) for b in bounds[1:-1]])
# cb.outline.set_color('k')
# cb.outline.set_linewidth(2)
cb.dividers.set_color('k')
cb.dividers.set_linewidth(2)
ax.set_extent(ext, ax.projection) # fix the extent change from colormesh
plt.title(title)
if doText: plt.text(0.01, 0.98, "#stations: {}".format(data.shape[0]), transform = ax.transAxes, fontsize = 10)
plt.savefig(outname)
plt.close()
return # scatter_map
#***************************************************
def main(ms = "era40_", year = 1980):
LOCS = "/group_workspaces/jasmin2/c3s311a_lot2/data/incoming/mars/v20170628/data/"
print year
station_names = np.array([])
fixed_station = np.array([])
latitudes = np.array([])
longitudes = np.array([])
observations = np.array([])
start_year = np.array([])
end_year = np.array([])
if ms == "erai_" and year < 1979:
return
else:
INFILE = "{}mars_{}{}.bufr".format(LOCS, ms, year)
try:
station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year = \
process_file(INFILE, station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year)
except CodesInternalError as err:
if VERBOSE:
traceback.print_exc(file=sys.stderr)
else:
sys.stderr.write(err.msg + '\n')
land = np.where(np.array(fixed_station) == True)
marine = np.where(np.array(fixed_station) == False)
bounds = np.linspace(0,max(observations),10).astype(int)
cmap = plt.cm.YlOrRd_r
if ms == "erai_":
title = "MARS - SYNOP - {}".format(year)
else:
title = "MARS - ERA40 - {}".format(year)
scatter_map("mars_{}{}_land_observations.png".format(ms, year), observations[land], longitudes[land], latitudes[land], cmap, bounds, "Number of Observations", title, doText = True)
scatter_map("mars_{}{}_marine_observations.png".format(ms, year), observations[marine], longitudes[marine], latitudes[marine], cmap, bounds, "Number of Observations", title)
station_names = 0
fixed_station = 0
latitudes = 0
longitudes = 0
observations = 0
start_year = 0
end_year = 0
land = 0
marine = 0
gc.collect()
return # main
#***************************************************
if __name__ == "__main__":
import argparse
# set up keyword arguments
parser = argparse.ArgumentParser()
parser.add_argument('--ms', dest='ms', action='store', default = "era40_",
help='Run on ERA40 ["era40_"] (default) or ERA-I ["erai_"] data')
parser.add_argument('--year', dest='year', action='store', default = 1980,
help='Which year to process - default 1980')
args = parser.parse_args()
main(ms = args.ms, year = args.year)
sys.exit()
#***************************************************
# END
#***************************************************
| 2.5625 | 3 |
libsaas/services/twilio/applications.py | MidtownFellowship/libsaas | 155 | 344 | <gh_stars>100-1000
from libsaas import http, parsers
from libsaas.services import base
from libsaas.services.twilio import resource
class ApplicationsBase(resource.TwilioResource):
path = 'Applications'
class Application(ApplicationsBase):
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
class Applications(ApplicationsBase):
@base.apimethod
def get(self, FriendlyName=None, Page=None, PageSize=None, AfterSid=None):
"""
Fetch the Applications belonging to an account.
:var FriendlyName: Only return the Account resources with friendly
names that exactly match this name.
:vartype FriendlyName: str
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class ConnectAppsBase(resource.TwilioResource):
path = 'ConnectApps'
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class ConnectApp(ConnectAppsBase):
pass
class ConnectApps(ConnectAppsBase):
@base.apimethod
def get(self, Page=None, PageSize=None, AfterSid=None):
"""
Fetch the Connect Apps belonging to an account.
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
class AuthorizedConnectAppsBase(resource.TwilioResource):
path = 'AuthorizedConnectApps'
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class AuthorizedConnectApp(AuthorizedConnectAppsBase):
pass
class AuthorizedConnectApps(AuthorizedConnectAppsBase):
@base.apimethod
def get(self, Page=None, PageSize=None, AfterSid=None):
"""
Fetch the Authorized Connect Apps belonging to an account.
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
| 2.578125 | 3 |
research/gnn/sgcn/postprocess.py | leelige/mindspore | 1 | 345 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
postprocess.
"""
import os
import argparse
import numpy as np
from src.ms_utils import calculate_auc
from mindspore import context, load_checkpoint
def softmax(x):
t_max = np.max(x, axis=1, keepdims=True) # returns max of each row and keeps same dims
e_x = np.exp(x - t_max) # subtracts each row with its max value
t_sum = np.sum(e_x, axis=1, keepdims=True) # returns sum of each row and keeps same dims
f_x = e_x / t_sum
return f_x
def score_model(preds, test_pos, test_neg, weight, bias):
"""
Score the model on the test set edges in each epoch.
Args:
epoch (LongTensor): Training epochs.
Returns:
auc(Float32): AUC result.
f1(Float32): F1-Score result.
"""
score_positive_edges = np.array(test_pos, dtype=np.int32).T
score_negative_edges = np.array(test_neg, dtype=np.int32).T
test_positive_z = np.concatenate((preds[score_positive_edges[0, :], :],
preds[score_positive_edges[1, :], :]), axis=1)
test_negative_z = np.concatenate((preds[score_negative_edges[0, :], :],
preds[score_negative_edges[1, :], :]), axis=1)
# operands could not be broadcast together with shapes (4288,128) (128,3)
scores = np.dot(np.concatenate((test_positive_z, test_negative_z), axis=0), weight) + bias
probability_scores = np.exp(softmax(scores))
predictions = probability_scores[:, 0]/probability_scores[:, 0:2].sum(1)
# predictions = predictions.asnumpy()
targets = [0]*len(test_pos) + [1]*len(test_neg)
auc, f1 = calculate_auc(targets, predictions)
return auc, f1
def get_acc():
"""get infer Accuracy."""
parser = argparse.ArgumentParser(description='postprocess')
parser.add_argument('--dataset_name', type=str, default='bitcoin-otc', choices=['bitcoin-otc', 'bitcoin-alpha'],
help='dataset name')
parser.add_argument('--result_path', type=str, default='./ascend310_infer/input/', help='result Files')
parser.add_argument('--label_path', type=str, default='', help='y_test npy Files')
parser.add_argument('--mask_path', type=str, default='', help='test_mask npy Files')
parser.add_argument("--checkpoint_file", type=str, default='sgcn_alpha_f1.ckpt', help="Checkpoint file path.")
parser.add_argument("--edge_path", nargs="?",
default="./input/bitcoin_alpha.csv", help="Edge list csv.")
parser.add_argument("--features-path", nargs="?",
default="./input/bitcoin_alpha.csv", help="Edge list csv.")
parser.add_argument("--test-size", type=float,
default=0.2, help="Test dataset size. Default is 0.2.")
parser.add_argument("--seed", type=int, default=42,
help="Random seed for sklearn pre-training. Default is 42.")
parser.add_argument("--spectral-features", default=True, dest="spectral_features", action="store_true")
parser.add_argument("--reduction-iterations", type=int,
default=30, help="Number of SVD iterations. Default is 30.")
parser.add_argument("--reduction-dimensions", type=int,
default=64, help="Number of SVD feature extraction dimensions. Default is 64.")
args_opt = parser.parse_args()
# Runtime
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=0)
# Create network
test_pos = np.load(os.path.join(args_opt.result_path, 'pos_test.npy'))
test_neg = np.load(os.path.join(args_opt.result_path, 'neg_test.npy'))
# Load parameters from checkpoint into network
param_dict = load_checkpoint(args_opt.checkpoint_file)
print(type(param_dict))
print(param_dict)
print(type(param_dict['regression_weights']))
print(param_dict['regression_weights'])
# load_param_into_net(net, param_dict)
pred = np.fromfile('./result_Files/repos_0.bin', np.float32)
if args_opt.dataset_name == 'bitcoin-otc':
pred = pred.reshape(5881, 64)
else:
pred = pred.reshape(3783, 64)
auc, f1 = score_model(pred, test_pos, test_neg, param_dict['regression_weights'].asnumpy(),
param_dict['regression_bias'].asnumpy())
print("Test set results:", "auc=", "{:.5f}".format(auc), "f1=", "{:.5f}".format(f1))
if __name__ == '__main__':
get_acc()
| 2.296875 | 2 |
pykeops/common/get_options.py | dvolgyes/keops | 1 | 346 | import re
import numpy as np
from collections import OrderedDict
import pykeops
import pykeops.config
############################################################
# define backend
############################################################
class SetBackend():
"""
This class is used to centralized the options used in PyKeops.
"""
dev = OrderedDict([('CPU',0),('GPU',1)])
grid = OrderedDict([('1D',0),('2D',1)])
memtype = OrderedDict([('host',0), ('device',1)])
possible_options_list = ['auto',
'CPU',
'GPU',
'GPU_1D', 'GPU_1D_device', 'GPU_1D_host',
'GPU_2D', 'GPU_2D_device', 'GPU_2D_host'
]
def define_tag_backend(self, backend, variables):
"""
Try to make a good guess for the backend... available methods are: (host means Cpu, device means Gpu)
CPU : computations performed with the host from host arrays
GPU_1D_device : computations performed on the device from device arrays, using the 1D scheme
GPU_2D_device : computations performed on the device from device arrays, using the 2D scheme
GPU_1D_host : computations performed on the device from host arrays, using the 1D scheme
GPU_2D_host : computations performed on the device from host data, using the 2D scheme
:param backend (str), variables (tuple)
:return (tagCPUGPU, tag1D2D, tagHostDevice)
"""
# check that the option is valid
if (backend not in self.possible_options_list):
raise ValueError('Invalid backend. Should be one of ', self.possible_options_list)
# auto : infer everything
if backend == 'auto':
return int(pykeops.config.gpu_available), self._find_grid(), self._find_mem(variables)
split_backend = re.split('_',backend)
if len(split_backend) == 1: # CPU or GPU
return self.dev[split_backend[0]], self._find_grid(), self._find_mem(variables)
elif len(split_backend) == 2: # GPU_1D or GPU_2D
return self.dev[split_backend[0]], self.grid[split_backend[1]], self._find_mem(variables)
elif len(split_backend) == 3: # the option is known
return self.dev[split_backend[0]], self.grid[split_backend[1]], self.memtype[split_backend[2]]
def define_backend(self, backend, variables):
tagCPUGPU, tag1D2D, tagHostDevice = self.define_tag_backend(backend, variables)
return self.dev[tagCPUGPU], self.grid[tag1D2D], self.memtype[tagHostDevice]
@staticmethod
def _find_dev():
return int(pykeops.config.gpu_available)
@staticmethod
def _find_mem(variables):
if all([type(var) is np.ndarray for var in variables ]): # Infer if we're working with numpy arrays or torch tensors:
MemType = 0
elif pykeops.config.torch_found:
import torch
if all([type(var) in [torch.Tensor, torch.nn.parameter.Parameter] for var in variables]):
from pykeops.torch.utils import is_on_device
VarsAreOnGpu = tuple(map(is_on_device, tuple(variables)))
if all(VarsAreOnGpu):
MemType = 1
elif not any(VarsAreOnGpu):
MemType = 0
else:
raise ValueError('At least two input variables have different memory locations (Cpu/Gpu).')
else:
raise TypeError('All variables should either be numpy arrays or torch tensors.')
return MemType
@staticmethod
def _find_grid():
return 0
def get_tag_backend(backend, variables, str = False):
"""
entry point to get the correct backend
"""
res = SetBackend()
if not str:
return res.define_tag_backend(backend, variables)
else:
return res.define_backend(backend, variables)
| 2.703125 | 3 |
prepare_features_vc.py | tkm2261/dnn-voice-changer | 13 | 347 | <filename>prepare_features_vc.py
"""Prepare acoustic features for one-to-one voice conversion.
usage:
prepare_features_vc.py [options] <DATA_ROOT> <source_speaker> <target_speaker>
options:
--max_files=<N> Max num files to be collected. [default: 100]
--dst_dir=<d> Destination directory [default: data/cmu_arctic_vc].
--overwrite Overwrite files.
-h, --help show this help message and exit
"""
from __future__ import division, print_function, absolute_import
from docopt import docopt
import numpy as np
from nnmnkwii.datasets import FileSourceDataset
from nnmnkwii import preprocessing as P
from nnmnkwii.preprocessing.alignment import DTWAligner
from nnmnkwii.datasets import cmu_arctic, voice_statistics, vcc2016
import pysptk
import pyworld
from scipy.io import wavfile
from tqdm import tqdm
from os.path import basename, splitext, exists, expanduser, join, dirname
import os
import sys
from hparams import vc as hp
from hparams import hparams_debug_string
# vcc2016.WavFileDataSource and voice_statistics.WavFileDataSource can be
# drop-in replacement. See below for details:
# https://r9y9.github.io/nnmnkwii/latest/references/datasets.html#builtin-data-sources
class MGCSource(cmu_arctic.WavFileDataSource):
def __init__(self, data_root, speakers, max_files=None):
super(MGCSource, self).__init__(data_root, speakers,
max_files=max_files)
self.alpha = None
def collect_features(self, wav_path):
fs, x = wavfile.read(wav_path)
x = x.astype(np.float64)
f0, timeaxis = pyworld.dio(x, fs, frame_period=hp.frame_period)
f0 = pyworld.stonemask(x, f0, timeaxis, fs)
spectrogram = pyworld.cheaptrick(x, f0, timeaxis, fs)
spectrogram = P.trim_zeros_frames(spectrogram)
if self.alpha is None:
self.alpha = pysptk.util.mcepalpha(fs)
mgc = pysptk.sp2mc(spectrogram, order=hp.order, alpha=self.alpha)
# Drop 0-th coefficient
mgc = mgc[:, 1:]
# 50Hz cut-off MS smoothing
hop_length = int(fs * (hp.frame_period * 0.001))
modfs = fs / hop_length
mgc = P.modspec_smoothing(mgc, modfs, cutoff=50)
# Add delta
mgc = P.delta_features(mgc, hp.windows)
return mgc.astype(np.float32)
if __name__ == "__main__":
args = docopt(__doc__)
print("Command line args:\n", args)
DATA_ROOT = args["<DATA_ROOT>"]
source_speaker = args["<source_speaker>"]
target_speaker = args["<target_speaker>"]
max_files = int(args["--max_files"])
dst_dir = args["--dst_dir"]
overwrite = args["--overwrite"]
print(hparams_debug_string(hp))
X_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [source_speaker],
max_files=max_files))
Y_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [target_speaker],
max_files=max_files))
skip_feature_extraction = exists(join(dst_dir, "X")) \
and exists(join(dst_dir, "Y"))
if overwrite:
skip_feature_extraction = False
if skip_feature_extraction:
print("Features seems to be prepared, skipping feature extraction.")
sys.exit(0)
# Create dirs
for speaker, name in [(source_speaker, "X"), (target_speaker, "Y")]:
d = join(dst_dir, name)
print("Destination dir for {}: {}".format(speaker, d))
if not exists(d):
os.makedirs(d)
# Convert to arrays
print("Convert datasets to arrays")
X, Y = X_dataset.asarray(verbose=1), Y_dataset.asarray(verbose=1)
# Alignment
print("Perform alignment")
X, Y = DTWAligner().transform((X, Y))
print("Save features to disk")
for idx, (x, y) in tqdm(enumerate(zip(X, Y))):
# paths
src_name = splitext(basename(X_dataset.collected_files[idx][0]))[0]
tgt_name = splitext(basename(Y_dataset.collected_files[idx][0]))[0]
src_path = join(dst_dir, "X", src_name)
tgt_path = join(dst_dir, "Y", tgt_name)
# Trim and ajast frames
x = P.trim_zeros_frames(x)
y = P.trim_zeros_frames(y)
x, y = P.adjust_frame_lengths(x, y, pad=True, divisible_by=2)
# Save
np.save(src_path, x)
np.save(tgt_path, y)
| 2.375 | 2 |
lib/tests/streamlit/pydeck_test.py | zgtz/streamlit | 1 | 348 | <filename>lib/tests/streamlit/pydeck_test.py
# Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pandas as pd
import pydeck as pdk
from tests import testutil
import streamlit as st
import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart
df1 = pd.DataFrame({"lat": [1, 2, 3, 4], "lon": [10, 20, 30, 40]})
class PyDeckTest(testutil.DeltaGeneratorTestCase):
def test_basic(self):
"""Test that pydeck object orks."""
st.pydeck_chart(
pdk.Deck(
layers=[
pdk.Layer("ScatterplotLayer", data=df1),
]
)
)
el = self.get_delta_from_queue().new_element
actual = json.loads(el.deck_gl_json_chart.json)
self.assertEqual(actual["layers"][0]["@@type"], "ScatterplotLayer")
self.assertEqual(
actual["layers"][0]["data"],
[
{"lat": 1, "lon": 10},
{"lat": 2, "lon": 20},
{"lat": 3, "lon": 30},
{"lat": 4, "lon": 40},
],
)
def test_no_args(self):
"""Test that it can be called with no args."""
st.pydeck_chart()
el = self.get_delta_from_queue().new_element
actual = json.loads(el.deck_gl_json_chart.json)
self.assertEqual(actual, deck_gl_json_chart.EMPTY_MAP)
| 2.59375 | 3 |
sdks/python/apache_beam/io/gcp/bigquery_tools.py | Doctusoft/beam | 0 | 349 | <filename>sdks/python/apache_beam/io/gcp/bigquery_tools.py
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tools used by BigQuery sources and sinks.
Classes, constants and functions in this file are experimental and have no
backwards compatibility guarantees.
These tools include wrappers and clients to interact with BigQuery APIs.
NOTHING IN THIS FILE HAS BACKWARDS COMPATIBILITY GUARANTEES.
"""
from __future__ import absolute_import
import datetime
import decimal
import json
import logging
import re
import sys
import time
import uuid
from builtins import object
from future.utils import iteritems
from apache_beam import coders
from apache_beam.internal.gcp import auth
from apache_beam.internal.gcp.json_value import from_json_value
from apache_beam.internal.gcp.json_value import to_json_value
from apache_beam.internal.http_client import get_new_http
from apache_beam.io.gcp.internal.clients import bigquery
from apache_beam.options import value_provider
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.runners.dataflow.native_io import iobase as dataflow_io
from apache_beam.transforms import DoFn
from apache_beam.utils import retry
# Protect against environments where bigquery library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apitools.base.py.exceptions import HttpError
except ImportError:
pass
# pylint: enable=wrong-import-order, wrong-import-position
MAX_RETRIES = 3
JSON_COMPLIANCE_ERROR = 'NAN, INF and -INF values are not JSON compliant.'
def default_encoder(obj):
if isinstance(obj, decimal.Decimal):
return str(obj)
raise TypeError(
"Object of type '%s' is not JSON serializable" % type(obj).__name__)
def get_hashable_destination(destination):
"""Parses a table reference into a (project, dataset, table) tuple.
Args:
destination: Either a TableReference object from the bigquery API.
The object has the following attributes: projectId, datasetId, and
tableId. Or a string representing the destination containing
'PROJECT:DATASET.TABLE'.
Returns:
A string representing the destination containing
'PROJECT:DATASET.TABLE'.
"""
if isinstance(destination, bigquery.TableReference):
return '%s:%s.%s' % (
destination.projectId, destination.datasetId, destination.tableId)
else:
return destination
def parse_table_schema_from_json(schema_string):
"""Parse the Table Schema provided as string.
Args:
schema_string: String serialized table schema, should be a valid JSON.
Returns:
A TableSchema of the BigQuery export from either the Query or the Table.
"""
json_schema = json.loads(schema_string)
def _parse_schema_field(field):
"""Parse a single schema field from dictionary.
Args:
field: Dictionary object containing serialized schema.
Returns:
A TableFieldSchema for a single column in BigQuery.
"""
schema = bigquery.TableFieldSchema()
schema.name = field['name']
schema.type = field['type']
if 'mode' in field:
schema.mode = field['mode']
else:
schema.mode = 'NULLABLE'
if 'description' in field:
schema.description = field['description']
if 'fields' in field:
schema.fields = [_parse_schema_field(x) for x in field['fields']]
return schema
fields = [_parse_schema_field(f) for f in json_schema['fields']]
return bigquery.TableSchema(fields=fields)
def parse_table_reference(table, dataset=None, project=None):
"""Parses a table reference into a (project, dataset, table) tuple.
Args:
table: The ID of the table. The ID must contain only letters
(a-z, A-Z), numbers (0-9), or underscores (_). If dataset argument is None
then the table argument must contain the entire table reference:
'DATASET.TABLE' or 'PROJECT:DATASET.TABLE'. This argument can be a
bigquery.TableReference instance in which case dataset and project are
ignored and the reference is returned as a result. Additionally, for date
partitioned tables, appending '$YYYYmmdd' to the table name is supported,
e.g. 'DATASET.TABLE$YYYYmmdd'.
dataset: The ID of the dataset containing this table or null if the table
reference is specified entirely by the table argument.
project: The ID of the project containing this table or null if the table
reference is specified entirely by the table (and possibly dataset)
argument.
Returns:
A TableReference object from the bigquery API. The object has the following
attributes: projectId, datasetId, and tableId.
Raises:
ValueError: if the table reference as a string does not match the expected
format.
"""
if isinstance(table, bigquery.TableReference):
return table
elif callable(table):
return table
elif isinstance(table, value_provider.ValueProvider):
return table
table_reference = bigquery.TableReference()
# If dataset argument is not specified, the expectation is that the
# table argument will contain a full table reference instead of just a
# table name.
if dataset is None:
match = re.match(
r'^((?P<project>.+):)?(?P<dataset>\w+)\.(?P<table>[\w\$]+)$', table)
if not match:
raise ValueError(
'Expected a table reference (PROJECT:DATASET.TABLE or '
'DATASET.TABLE) instead of %s.' % table)
table_reference.projectId = match.group('project')
table_reference.datasetId = match.group('dataset')
table_reference.tableId = match.group('table')
else:
table_reference.projectId = project
table_reference.datasetId = dataset
table_reference.tableId = table
return table_reference
# -----------------------------------------------------------------------------
# BigQueryWrapper.
class BigQueryWrapper(object):
"""BigQuery client wrapper with utilities for querying.
The wrapper is used to organize all the BigQuery integration points and
offer a common place where retry logic for failures can be controlled.
In addition it offers various functions used both in sources and sinks
(e.g., find and create tables, query a table, etc.).
"""
TEMP_TABLE = 'temp_table_'
TEMP_DATASET = 'temp_dataset_'
def __init__(self, client=None):
self.client = client or bigquery.BigqueryV2(
http=get_new_http(),
credentials=auth.get_service_credentials(),
response_encoding=None if sys.version_info[0] < 3 else 'utf8')
self._unique_row_id = 0
# For testing scenarios where we pass in a client we do not want a
# randomized prefix for row IDs.
self._row_id_prefix = '' if client else uuid.uuid4()
self._temporary_table_suffix = uuid.uuid4().hex
@property
def unique_row_id(self):
"""Returns a unique row ID (str) used to avoid multiple insertions.
If the row ID is provided, BigQuery will make a best effort to not insert
the same row multiple times for fail and retry scenarios in which the insert
request may be issued several times. This comes into play for sinks executed
in a local runner.
Returns:
a unique row ID string
"""
self._unique_row_id += 1
return '%s_%d' % (self._row_id_prefix, self._unique_row_id)
def _get_temp_table(self, project_id):
return parse_table_reference(
table=BigQueryWrapper.TEMP_TABLE + self._temporary_table_suffix,
dataset=BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix,
project=project_id)
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_query_location(self, project_id, query, use_legacy_sql):
"""
Get the location of tables referenced in a query.
This method returns the location of the first referenced table in the query
and depends on the BigQuery service to provide error handling for
queries that reference tables in multiple locations.
"""
reference = bigquery.JobReference(jobId=uuid.uuid4().hex,
projectId=project_id)
request = bigquery.BigqueryJobsInsertRequest(
projectId=project_id,
job=bigquery.Job(
configuration=bigquery.JobConfiguration(
dryRun=True,
query=bigquery.JobConfigurationQuery(
query=query,
useLegacySql=use_legacy_sql,
)),
jobReference=reference))
response = self.client.jobs.Insert(request)
if response.statistics is None:
# This behavior is only expected in tests
logging.warning(
"Unable to get location, missing response.statistics. Query: %s",
query)
return None
referenced_tables = response.statistics.query.referencedTables
if referenced_tables: # Guards against both non-empty and non-None
table = referenced_tables[0]
location = self.get_table_location(
table.projectId,
table.datasetId,
table.tableId)
logging.info("Using location %r from table %r referenced by query %s",
location, table, query)
return location
logging.debug("Query %s does not reference any tables.", query)
return None
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _insert_copy_job(self,
project_id,
job_id,
from_table_reference,
to_table_reference,
create_disposition=None,
write_disposition=None):
reference = bigquery.JobReference()
reference.jobId = job_id
reference.projectId = project_id
request = bigquery.BigqueryJobsInsertRequest(
projectId=project_id,
job=bigquery.Job(
configuration=bigquery.JobConfiguration(
copy=bigquery.JobConfigurationTableCopy(
destinationTable=to_table_reference,
sourceTable=from_table_reference,
createDisposition=create_disposition,
writeDisposition=write_disposition,
)
),
jobReference=reference,
)
)
logging.info("Inserting job request: %s", request)
response = self.client.jobs.Insert(request)
logging.info("Response was %s", response)
return response.jobReference
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _insert_load_job(self,
project_id,
job_id,
table_reference,
source_uris,
schema=None,
write_disposition=None,
create_disposition=None):
reference = bigquery.JobReference(jobId=job_id, projectId=project_id)
request = bigquery.BigqueryJobsInsertRequest(
projectId=project_id,
job=bigquery.Job(
configuration=bigquery.JobConfiguration(
load=bigquery.JobConfigurationLoad(
sourceUris=source_uris,
destinationTable=table_reference,
schema=schema,
writeDisposition=write_disposition,
createDisposition=create_disposition,
sourceFormat='NEWLINE_DELIMITED_JSON',
autodetect=schema is None,
)
),
jobReference=reference,
)
)
response = self.client.jobs.Insert(request)
return response.jobReference
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _start_query_job(self, project_id, query, use_legacy_sql, flatten_results,
job_id, dry_run=False):
reference = bigquery.JobReference(jobId=job_id, projectId=project_id)
request = bigquery.BigqueryJobsInsertRequest(
projectId=project_id,
job=bigquery.Job(
configuration=bigquery.JobConfiguration(
dryRun=dry_run,
query=bigquery.JobConfigurationQuery(
query=query,
useLegacySql=use_legacy_sql,
allowLargeResults=True,
destinationTable=self._get_temp_table(project_id),
flattenResults=flatten_results)),
jobReference=reference))
response = self.client.jobs.Insert(request)
return response.jobReference.jobId
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _get_query_results(self, project_id, job_id,
page_token=None, max_results=10000):
request = bigquery.BigqueryJobsGetQueryResultsRequest(
jobId=job_id, pageToken=page_token, projectId=project_id,
maxResults=max_results)
response = self.client.jobs.GetQueryResults(request)
return response
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_timeout_or_quota_issues_filter)
def _insert_all_rows(self, project_id, dataset_id, table_id, rows,
skip_invalid_rows=False):
"""Calls the insertAll BigQuery API endpoint.
Docs for this BQ call: https://cloud.google.com/bigquery/docs/reference\
/rest/v2/tabledata/insertAll."""
# The rows argument is a list of
# bigquery.TableDataInsertAllRequest.RowsValueListEntry instances as
# required by the InsertAll() method.
request = bigquery.BigqueryTabledataInsertAllRequest(
projectId=project_id, datasetId=dataset_id, tableId=table_id,
tableDataInsertAllRequest=bigquery.TableDataInsertAllRequest(
skipInvalidRows=skip_invalid_rows,
# TODO(silviuc): Should have an option for ignoreUnknownValues?
rows=rows))
response = self.client.tabledata.InsertAll(request)
# response.insertErrors is not [] if errors encountered.
return not response.insertErrors, response.insertErrors
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_table(self, project_id, dataset_id, table_id):
"""Lookup a table's metadata object.
Args:
client: bigquery.BigqueryV2 instance
project_id, dataset_id, table_id: table lookup parameters
Returns:
bigquery.Table instance
Raises:
HttpError if lookup failed.
"""
request = bigquery.BigqueryTablesGetRequest(
projectId=project_id, datasetId=dataset_id, tableId=table_id)
response = self.client.tables.Get(request)
return response
def _create_table(self, project_id, dataset_id, table_id, schema):
table = bigquery.Table(
tableReference=bigquery.TableReference(
projectId=project_id, datasetId=dataset_id, tableId=table_id),
schema=schema)
request = bigquery.BigqueryTablesInsertRequest(
projectId=project_id, datasetId=dataset_id, table=table)
response = self.client.tables.Insert(request)
logging.debug("Created the table with id %s", table_id)
# The response is a bigquery.Table instance.
return response
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_or_create_dataset(self, project_id, dataset_id, location=None):
# Check if dataset already exists otherwise create it
try:
dataset = self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest(
projectId=project_id, datasetId=dataset_id))
return dataset
except HttpError as exn:
if exn.status_code == 404:
dataset_reference = bigquery.DatasetReference(
projectId=project_id, datasetId=dataset_id)
dataset = bigquery.Dataset(datasetReference=dataset_reference)
if location is not None:
dataset.location = location
request = bigquery.BigqueryDatasetsInsertRequest(
projectId=project_id, dataset=dataset)
response = self.client.datasets.Insert(request)
# The response is a bigquery.Dataset instance.
return response
else:
raise
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _is_table_empty(self, project_id, dataset_id, table_id):
request = bigquery.BigqueryTabledataListRequest(
projectId=project_id, datasetId=dataset_id, tableId=table_id,
maxResults=1)
response = self.client.tabledata.List(request)
# The response is a bigquery.TableDataList instance.
return response.totalRows == 0
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _delete_table(self, project_id, dataset_id, table_id):
request = bigquery.BigqueryTablesDeleteRequest(
projectId=project_id, datasetId=dataset_id, tableId=table_id)
try:
self.client.tables.Delete(request)
except HttpError as exn:
if exn.status_code == 404:
logging.warning('Table %s:%s.%s does not exist', project_id,
dataset_id, table_id)
return
else:
raise
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _delete_dataset(self, project_id, dataset_id, delete_contents=True):
request = bigquery.BigqueryDatasetsDeleteRequest(
projectId=project_id, datasetId=dataset_id,
deleteContents=delete_contents)
try:
self.client.datasets.Delete(request)
except HttpError as exn:
if exn.status_code == 404:
logging.warning('Dataset %s:%s does not exist', project_id,
dataset_id)
return
else:
raise
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_table_location(self, project_id, dataset_id, table_id):
table = self.get_table(project_id, dataset_id, table_id)
return table.location
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def create_temporary_dataset(self, project_id, location):
dataset_id = BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix
# Check if dataset exists to make sure that the temporary id is unique
try:
self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest(
projectId=project_id, datasetId=dataset_id))
if project_id is not None:
# Unittests don't pass projectIds so they can be run without error
raise RuntimeError(
'Dataset %s:%s already exists so cannot be used as temporary.'
% (project_id, dataset_id))
except HttpError as exn:
if exn.status_code == 404:
logging.warning(
'Dataset %s:%s does not exist so we will create it as temporary '
'with location=%s',
project_id, dataset_id, location)
self.get_or_create_dataset(project_id, dataset_id, location=location)
else:
raise
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def clean_up_temporary_dataset(self, project_id):
temp_table = self._get_temp_table(project_id)
try:
self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest(
projectId=project_id, datasetId=temp_table.datasetId))
except HttpError as exn:
if exn.status_code == 404:
logging.warning('Dataset %s:%s does not exist', project_id,
temp_table.datasetId)
return
else:
raise
self._delete_dataset(temp_table.projectId, temp_table.datasetId, True)
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_job(self, project, job_id, location=None):
request = bigquery.BigqueryJobsGetRequest()
request.jobId = job_id
request.projectId = project
request.location = location
return self.client.jobs.Get(request)
def perform_load_job(self,
destination,
files,
job_id,
schema=None,
write_disposition=None,
create_disposition=None):
"""Starts a job to load data into BigQuery.
Returns:
bigquery.JobReference with the information about the job that was started.
"""
return self._insert_load_job(
destination.projectId, job_id, destination, files,
schema=schema,
create_disposition=create_disposition,
write_disposition=write_disposition)
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_or_create_table(
self, project_id, dataset_id, table_id, schema,
create_disposition, write_disposition):
"""Gets or creates a table based on create and write dispositions.
The function mimics the behavior of BigQuery import jobs when using the
same create and write dispositions.
Args:
project_id: The project id owning the table.
dataset_id: The dataset id owning the table.
table_id: The table id.
schema: A bigquery.TableSchema instance or None.
create_disposition: CREATE_NEVER or CREATE_IF_NEEDED.
write_disposition: WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE.
Returns:
A bigquery.Table instance if table was found or created.
Raises:
RuntimeError: For various mismatches between the state of the table and
the create/write dispositions passed in. For example if the table is not
empty and WRITE_EMPTY was specified then an error will be raised since
the table was expected to be empty.
"""
from apache_beam.io.gcp.bigquery import BigQueryDisposition
found_table = None
try:
found_table = self.get_table(project_id, dataset_id, table_id)
except HttpError as exn:
if exn.status_code == 404:
if create_disposition == BigQueryDisposition.CREATE_NEVER:
raise RuntimeError(
'Table %s:%s.%s not found but create disposition is CREATE_NEVER.'
% (project_id, dataset_id, table_id))
else:
raise
# If table exists already then handle the semantics for WRITE_EMPTY and
# WRITE_TRUNCATE write dispositions.
if found_table:
table_empty = self._is_table_empty(project_id, dataset_id, table_id)
if (not table_empty and
write_disposition == BigQueryDisposition.WRITE_EMPTY):
raise RuntimeError(
'Table %s:%s.%s is not empty but write disposition is WRITE_EMPTY.'
% (project_id, dataset_id, table_id))
# Delete the table and recreate it (later) if WRITE_TRUNCATE was
# specified.
if write_disposition == BigQueryDisposition.WRITE_TRUNCATE:
self._delete_table(project_id, dataset_id, table_id)
# Create a new table potentially reusing the schema from a previously
# found table in case the schema was not specified.
if schema is None and found_table is None:
raise RuntimeError(
'Table %s:%s.%s requires a schema. None can be inferred because the '
'table does not exist.'
% (project_id, dataset_id, table_id))
if found_table and write_disposition != BigQueryDisposition.WRITE_TRUNCATE:
return found_table
else:
created_table = self._create_table(project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
schema=schema or found_table.schema)
logging.info('Created table %s.%s.%s with schema %s. Result: %s.',
project_id, dataset_id, table_id,
schema or found_table.schema,
created_table)
# if write_disposition == BigQueryDisposition.WRITE_TRUNCATE we delete
# the table before this point.
if write_disposition == BigQueryDisposition.WRITE_TRUNCATE:
# BigQuery can route data to the old table for 2 mins max so wait
# that much time before creating the table and writing it
logging.warning('Sleeping for 150 seconds before the write as ' +
'BigQuery inserts can be routed to deleted table ' +
'for 2 mins after the delete and create.')
# TODO(BEAM-2673): Remove this sleep by migrating to load api
time.sleep(150)
return created_table
else:
return created_table
def run_query(self, project_id, query, use_legacy_sql, flatten_results,
dry_run=False):
job_id = self._start_query_job(project_id, query, use_legacy_sql,
flatten_results, job_id=uuid.uuid4().hex,
dry_run=dry_run)
if dry_run:
# If this was a dry run then the fact that we get here means the
# query has no errors. The start_query_job would raise an error otherwise.
return
page_token = None
while True:
response = self._get_query_results(project_id, job_id, page_token)
if not response.jobComplete:
# The jobComplete field can be False if the query request times out
# (default is 10 seconds). Note that this is a timeout for the query
# request not for the actual execution of the query in the service. If
# the request times out we keep trying. This situation is quite possible
# if the query will return a large number of rows.
logging.info('Waiting on response from query: %s ...', query)
time.sleep(1.0)
continue
# We got some results. The last page is signalled by a missing pageToken.
yield response.rows, response.schema
if not response.pageToken:
break
page_token = response.pageToken
def insert_rows(self, project_id, dataset_id, table_id, rows,
skip_invalid_rows=False):
"""Inserts rows into the specified table.
Args:
project_id: The project id owning the table.
dataset_id: The dataset id owning the table.
table_id: The table id.
rows: A list of plain Python dictionaries. Each dictionary is a row and
each key in it is the name of a field.
skip_invalid_rows: If there are rows with insertion errors, whether they
should be skipped, and all others should be inserted successfully.
Returns:
A tuple (bool, errors). If first element is False then the second element
will be a bigquery.InserttErrorsValueListEntry instance containing
specific errors.
"""
# Prepare rows for insertion. Of special note is the row ID that we add to
# each row in order to help BigQuery avoid inserting a row multiple times.
# BigQuery will do a best-effort if unique IDs are provided. This situation
# can happen during retries on failures.
# TODO(silviuc): Must add support to writing TableRow's instead of dicts.
final_rows = []
for row in rows:
json_object = bigquery.JsonObject()
for k, v in iteritems(row):
if isinstance(v, decimal.Decimal):
# decimal values are converted into string because JSON does not
# support the precision that decimal supports. BQ is able to handle
# inserts into NUMERIC columns by receiving JSON with string attrs.
v = str(v)
json_object.additionalProperties.append(
bigquery.JsonObject.AdditionalProperty(
key=k, value=to_json_value(v)))
final_rows.append(
bigquery.TableDataInsertAllRequest.RowsValueListEntry(
insertId=str(self.unique_row_id),
json=json_object))
result, errors = self._insert_all_rows(
project_id, dataset_id, table_id, final_rows, skip_invalid_rows)
return result, errors
def _convert_cell_value_to_dict(self, value, field):
if field.type == 'STRING':
# Input: "XYZ" --> Output: "XYZ"
return value
elif field.type == 'BOOLEAN':
# Input: "true" --> Output: True
return value == 'true'
elif field.type == 'INTEGER':
# Input: "123" --> Output: 123
return int(value)
elif field.type == 'FLOAT':
# Input: "1.23" --> Output: 1.23
return float(value)
elif field.type == 'TIMESTAMP':
# The UTC should come from the timezone library but this is a known
# issue in python 2.7 so we'll just hardcode it as we're reading using
# utcfromtimestamp.
# Input: 1478134176.985864 --> Output: "2016-11-03 00:49:36.985864 UTC"
dt = datetime.datetime.utcfromtimestamp(float(value))
return dt.strftime('%Y-%m-%d %H:%M:%S.%f UTC')
elif field.type == 'BYTES':
# Input: "YmJi" --> Output: "YmJi"
return value
elif field.type == 'DATE':
# Input: "2016-11-03" --> Output: "2016-11-03"
return value
elif field.type == 'DATETIME':
# Input: "2016-11-03T00:49:36" --> Output: "2016-11-03T00:49:36"
return value
elif field.type == 'TIME':
# Input: "00:49:36" --> Output: "00:49:36"
return value
elif field.type == 'RECORD':
# Note that a schema field object supports also a RECORD type. However
# when querying, the repeated and/or record fields are flattened
# unless we pass the flatten_results flag as False to the source
return self.convert_row_to_dict(value, field)
elif field.type == 'NUMERIC':
return decimal.Decimal(value)
elif field.type == 'GEOGRAPHY':
return value
else:
raise RuntimeError('Unexpected field type: %s' % field.type)
def convert_row_to_dict(self, row, schema):
"""Converts a TableRow instance using the schema to a Python dict."""
result = {}
for index, field in enumerate(schema.fields):
value = None
if isinstance(schema, bigquery.TableSchema):
cell = row.f[index]
value = from_json_value(cell.v) if cell.v is not None else None
elif isinstance(schema, bigquery.TableFieldSchema):
cell = row['f'][index]
value = cell['v'] if 'v' in cell else None
if field.mode == 'REPEATED':
if value is None:
# Ideally this should never happen as repeated fields default to
# returning an empty list
result[field.name] = []
else:
result[field.name] = [self._convert_cell_value_to_dict(x['v'], field)
for x in value]
elif value is None:
if not field.mode == 'NULLABLE':
raise ValueError('Received \'None\' as the value for the field %s '
'but the field is not NULLABLE.' % field.name)
result[field.name] = None
else:
result[field.name] = self._convert_cell_value_to_dict(value, field)
return result
# -----------------------------------------------------------------------------
# BigQueryReader, BigQueryWriter.
class BigQueryReader(dataflow_io.NativeSourceReader):
"""A reader for a BigQuery source."""
def __init__(self, source, test_bigquery_client=None, use_legacy_sql=True,
flatten_results=True, kms_key=None):
self.source = source
self.test_bigquery_client = test_bigquery_client
if auth.is_running_in_gce:
self.executing_project = auth.executing_project
elif hasattr(source, 'pipeline_options'):
self.executing_project = (
source.pipeline_options.view_as(GoogleCloudOptions).project)
else:
self.executing_project = None
# TODO(silviuc): Try to automatically get it from gcloud config info.
if not self.executing_project and test_bigquery_client is None:
raise RuntimeError(
'Missing executing project information. Please use the --project '
'command line option to specify it.')
self.row_as_dict = isinstance(self.source.coder, RowAsDictJsonCoder)
# Schema for the rows being read by the reader. It is initialized the
# first time something gets read from the table. It is not required
# for reading the field values in each row but could be useful for
# getting additional details.
self.schema = None
self.use_legacy_sql = use_legacy_sql
self.flatten_results = flatten_results
self.kms_key = kms_key
if self.source.table_reference is not None:
# If table schema did not define a project we default to executing
# project.
project_id = self.source.table_reference.projectId
if not project_id:
project_id = self.executing_project
self.query = 'SELECT * FROM [%s:%s.%s];' % (
project_id,
self.source.table_reference.datasetId,
self.source.table_reference.tableId)
elif self.source.query is not None:
self.query = self.source.query
else:
# Enforce the "modes" enforced by BigQuerySource.__init__.
# If this exception has been raised, the BigQuerySource "modes" have
# changed and this method will need to be updated as well.
raise ValueError("BigQuerySource must have either a table or query")
def _get_source_location(self):
"""
Get the source location (e.g. ``"EU"`` or ``"US"``) from either
- :data:`source.table_reference`
or
- The first referenced table in :data:`source.query`
See Also:
- :meth:`BigQueryWrapper.get_query_location`
- :meth:`BigQueryWrapper.get_table_location`
Returns:
Optional[str]: The source location, if any.
"""
if self.source.table_reference is not None:
tr = self.source.table_reference
return self.client.get_table_location(
tr.projectId if tr.projectId is not None else self.executing_project,
tr.datasetId, tr.tableId)
else: # It's a query source
return self.client.get_query_location(
self.executing_project,
self.source.query,
self.source.use_legacy_sql)
def __enter__(self):
self.client = BigQueryWrapper(client=self.test_bigquery_client)
self.client.create_temporary_dataset(
self.executing_project, location=self._get_source_location())
return self
def __exit__(self, exception_type, exception_value, traceback):
self.client.clean_up_temporary_dataset(self.executing_project)
def __iter__(self):
for rows, schema in self.client.run_query(
project_id=self.executing_project, query=self.query,
use_legacy_sql=self.use_legacy_sql,
flatten_results=self.flatten_results):
if self.schema is None:
self.schema = schema
for row in rows:
if self.row_as_dict:
yield self.client.convert_row_to_dict(row, schema)
else:
yield row
class BigQueryWriter(dataflow_io.NativeSinkWriter):
"""The sink writer for a BigQuerySink."""
def __init__(self, sink, test_bigquery_client=None, buffer_size=None):
self.sink = sink
self.test_bigquery_client = test_bigquery_client
self.row_as_dict = isinstance(self.sink.coder, RowAsDictJsonCoder)
# Buffer used to batch written rows so we reduce communication with the
# BigQuery service.
self.rows_buffer = []
self.rows_buffer_flush_threshold = buffer_size or 1000
# Figure out the project, dataset, and table used for the sink.
self.project_id = self.sink.table_reference.projectId
# If table schema did not define a project we default to executing project.
if self.project_id is None and hasattr(sink, 'pipeline_options'):
self.project_id = (
sink.pipeline_options.view_as(GoogleCloudOptions).project)
assert self.project_id is not None
self.dataset_id = self.sink.table_reference.datasetId
self.table_id = self.sink.table_reference.tableId
def _flush_rows_buffer(self):
if self.rows_buffer:
logging.info('Writing %d rows to %s:%s.%s table.', len(self.rows_buffer),
self.project_id, self.dataset_id, self.table_id)
passed, errors = self.client.insert_rows(
project_id=self.project_id, dataset_id=self.dataset_id,
table_id=self.table_id, rows=self.rows_buffer)
self.rows_buffer = []
if not passed:
raise RuntimeError('Could not successfully insert rows to BigQuery'
' table [%s:%s.%s]. Errors: %s' %
(self.project_id, self.dataset_id,
self.table_id, errors))
def __enter__(self):
self.client = BigQueryWrapper(client=self.test_bigquery_client)
self.client.get_or_create_table(
self.project_id, self.dataset_id, self.table_id, self.sink.table_schema,
self.sink.create_disposition, self.sink.write_disposition)
return self
def __exit__(self, exception_type, exception_value, traceback):
self._flush_rows_buffer()
def Write(self, row):
self.rows_buffer.append(row)
if len(self.rows_buffer) > self.rows_buffer_flush_threshold:
self._flush_rows_buffer()
class RowAsDictJsonCoder(coders.Coder):
"""A coder for a table row (represented as a dict) to/from a JSON string.
This is the default coder for sources and sinks if the coder argument is not
specified.
"""
def encode(self, table_row):
# The normal error when dumping NAN/INF values is:
# ValueError: Out of range float values are not JSON compliant
# This code will catch this error to emit an error that explains
# to the programmer that they have used NAN/INF values.
try:
return json.dumps(
table_row, allow_nan=False, default=default_encoder).encode('utf-8')
except ValueError as e:
raise ValueError('%s. %s' % (e, JSON_COMPLIANCE_ERROR))
def decode(self, encoded_table_row):
return json.loads(encoded_table_row.decode('utf-8'))
class RetryStrategy(object):
RETRY_ALWAYS = 'RETRY_ALWAYS'
RETRY_NEVER = 'RETRY_NEVER'
RETRY_ON_TRANSIENT_ERROR = 'RETRY_ON_TRANSIENT_ERROR'
_NON_TRANSIENT_ERRORS = {'invalid', 'invalidQuery', 'notImplemented'}
@staticmethod
def should_retry(strategy, error_message):
if strategy == RetryStrategy.RETRY_ALWAYS:
return True
elif strategy == RetryStrategy.RETRY_NEVER:
return False
elif (strategy == RetryStrategy.RETRY_ON_TRANSIENT_ERROR and
error_message not in RetryStrategy._NON_TRANSIENT_ERRORS):
return True
else:
return False
class AppendDestinationsFn(DoFn):
"""Adds the destination to an element, making it a KV pair.
Outputs a PCollection of KV-pairs where the key is a TableReference for the
destination, and the value is the record itself.
Experimental; no backwards compatibility guarantees.
"""
def __init__(self, destination):
self.destination = AppendDestinationsFn._get_table_fn(destination)
@staticmethod
def _value_provider_or_static_val(elm):
if isinstance(elm, value_provider.ValueProvider):
return elm
else:
# The type argument is a NoOp, because we assume the argument already has
# the proper formatting.
return value_provider.StaticValueProvider(lambda x: x, value=elm)
@staticmethod
def _get_table_fn(destination):
if callable(destination):
return destination
else:
return lambda x: AppendDestinationsFn._value_provider_or_static_val(
destination).get()
def process(self, element):
yield (self.destination(element), element)
| 1.671875 | 2 |
VENV/lib/python3.6/site-packages/PyInstaller/hooks/hook-PyQt5.py | workingyifei/display-pattern-generator | 3 | 350 | #-----------------------------------------------------------------------------
# Copyright (c) 2005-2017, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
from PyInstaller.utils.hooks import (
get_module_attribute, is_module_satisfies, qt_menu_nib_dir, get_module_file_attribute,
collect_data_files)
from PyInstaller.compat import getsitepackages, is_darwin, is_win
# On Windows system PATH has to be extended to point to the PyQt5 directory.
# The PySide directory contains Qt dlls. We need to avoid including different
# version of Qt libraries when there is installed another application (e.g. QtCreator)
if is_win:
from PyInstaller.utils.win32.winutils import extend_system_path
extend_system_path([os.path.join(x, 'PyQt5') for x in getsitepackages()])
extend_system_path([os.path.join(os.path.dirname(get_module_file_attribute('PyQt5')),
'Qt', 'bin')])
# In the new consolidated mode any PyQt depends on _qt
hiddenimports = ['sip', 'PyQt5.Qt']
# Collect just the qt.conf file.
datas = [x for x in collect_data_files('PyQt5', False, os.path.join('Qt', 'bin')) if
x[0].endswith('qt.conf')]
# For Qt<5.4 to work on Mac OS X it is necessary to include `qt_menu.nib`.
# This directory contains some resource files necessary to run PyQt or PySide
# app.
if is_darwin:
# Version of the currently installed Qt 5.x shared library.
qt_version = get_module_attribute('PyQt5.QtCore', 'QT_VERSION_STR')
if is_module_satisfies('Qt < 5.4', qt_version):
datas = [(qt_menu_nib_dir('PyQt5'), '')]
| 1.890625 | 2 |
tests/ast/nodes/test_from_node.py | upgradvisor/vyper | 1,471 | 351 | from vyper import ast as vy_ast
def test_output_class():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert isinstance(new_node, vy_ast.Int)
def test_source():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert old_node.src == new_node.src
assert old_node.node_source_code == new_node.node_source_code
def test_kwargs():
old_node = vy_ast.parse_to_ast("42").body[0].value
new_node = vy_ast.Int.from_node(old_node, value=666)
assert old_node.value == 42
assert new_node.value == 666
def test_compare_nodes():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert not vy_ast.compare_nodes(old_node, new_node)
def test_new_node_has_no_parent():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert new_node._parent is None
assert new_node._depth == 0
| 2.421875 | 2 |
generator/modules/opencv.py | dayta-ai/deepo | 1 | 352 | # -*- coding: utf-8 -*-
from .__module__ import Module, dependency, source, version
from .tools import Tools
from .boost import Boost
from .python import Python
@dependency(Tools, Python, Boost)
@source('git')
@version('4.0.1')
class Opencv(Module):
def build(self):
return r'''
RUN ln -fs /usr/share/zoneinfo/Asia/Hong_Kong /etc/localtime && \
DEBIAN_FRONTEND=noninteractive \
add-apt-repository "deb http://security.ubuntu.com/ubuntu xenial-security main" && \
apt update && \
$APT_INSTALL \
libatlas-base-dev \
libgflags-dev \
libgoogle-glog-dev \
libhdf5-serial-dev \
libleveldb-dev \
liblmdb-dev \
libprotobuf-dev \
libsnappy-dev \
protobuf-compiler \
libopencv-dev \
yasm \
libjpeg-dev \
libjasper-dev \
libavcodec-dev \
libavformat-dev \
libswscale-dev \
libdc1394-22-dev \
libv4l-dev \
libtbb-dev \
libqt4-dev \
libgtk2.0-dev \
libfaac-dev \
libmp3lame-dev \
libopencore-amrnb-dev \
libopencore-amrwb-dev \
libtheora-dev \
libvorbis-dev \
libxvidcore-dev \
x264 \
v4l-utils \
ffmpeg \
&& \
$GIT_CLONE --branch {0} https://github.com/opencv/opencv opencv && \
$GIT_CLONE --branch {0} https://github.com/opencv/opencv_contrib.git opencv_contrib && \
mkdir -p opencv/build && cd opencv/build && \
cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D WITH_IPP=OFF \
-D WITH_CUDA=OFF \
-D WITH_TBB=ON \
-D WITH_V4L=ON \
-D WITH_QT=ON \
-D WITH_OPENCL=ON \
-D WITH_GTK=ON \
-D WITH_LIBV4L=ON \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D WITH_FFMPEG=ON \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \
.. && \
make -j"$(nproc)" install && \
ln -s /usr/local/include/opencv4/opencv2 /usr/local/include/opencv2
'''.format(self.version)
| 1.625 | 2 |
day16/solve16.py | jmacarthur/aoc2017 | 0 | 353 | #!/usr/bin/python
import sys
import copy
stage_length = 16
stage = map(chr, range(ord('a'),ord('a')+stage_length))
def spin(amount):
"""To save time, this function isn't used except at the end.
Normally, a counter marks the start of the stage and this changes
instead. """
global stage
stage = stage[amount:] + stage[:amount]
def swap(pos1, pos2):
global stage
(stage[pos1], stage[pos2]) = (stage[pos2], stage[pos1])
with open(sys.argv[1], 'rt') as f:
program = ",".join(f.readlines()).split(",")
n = 0
pos = 0
arguments_list = [x[1:].strip().split("/") for x in program]
action_list = [x[0] for x in program]
history = []
# Change this to 1 for the solution to part 1.
iterations = 1000000000
while n<iterations:
for s in range(0,len(program)):
arguments = arguments_list[s]
if action_list[s] == 's':
pos += stage_length-int(arguments[0])
elif action_list[s] == 'x':
swap((int(arguments[0])+pos)%stage_length, (int(arguments[1])+pos)%stage_length)
elif action_list[s] == 'p':
pos1 = stage.index(arguments[0])
pos2 = stage.index(arguments[1])
swap(pos1, pos2)
if stage in history:
print("Duplicate found: %r at index %d matches at stage %d"%(stage, history.index(stage), n))
loop_length = n - history.index(stage)
complete_cycles = (iterations - n) / loop_length
n += complete_cycles * loop_length
history.append(copy.copy(stage))
n += 1
spin(pos % stage_length)
print "".join(stage)
| 3.390625 | 3 |
skimage/segmentation/tests/test_felzenszwalb.py | jaberg/scikits-image | 2 | 354 | <reponame>jaberg/scikits-image
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from nose.tools import assert_greater
from skimage.segmentation import felzenszwalb
def test_grey():
# very weak tests. This algorithm is pretty unstable.
img = np.zeros((20, 21))
img[:10, 10:] = 0.2
img[10:, :10] = 0.4
img[10:, 10:] = 0.6
seg = felzenszwalb(img, sigma=0)
# we expect 4 segments:
assert_equal(len(np.unique(seg)), 4)
# that mostly respect the 4 regions:
for i in range(4):
hist = np.histogram(img[seg == i], bins=[0, 0.1, 0.3, 0.5, 1])[0]
assert_greater(hist[i], 40)
def test_color():
# very weak tests. This algorithm is pretty unstable.
img = np.zeros((20, 21, 3))
img[:10, :10, 0] = 1
img[10:, :10, 1] = 1
img[10:, 10:, 2] = 1
seg = felzenszwalb(img, sigma=0)
# we expect 4 segments:
assert_equal(len(np.unique(seg)), 4)
assert_array_equal(seg[:10, :10], 0)
assert_array_equal(seg[10:, :10], 2)
assert_array_equal(seg[:10, 10:], 1)
assert_array_equal(seg[10:, 10:], 3)
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| 2.375 | 2 |
tests/middleware/test_csrf_middleware.py | w3x10e8/core | 0 | 355 | <filename>tests/middleware/test_csrf_middleware.py
from masonite.request import Request
from masonite.view import View
from masonite.auth.Csrf import Csrf
from masonite.app import App
from masonite.middleware import CsrfMiddleware
from masonite.testsuite.TestSuite import generate_wsgi
import pytest
from masonite.exceptions import InvalidCSRFToken
class TestCSRFMiddleware:
def setup_method(self):
self.app = App()
self.request = Request(generate_wsgi())
self.view = View(self.app)
self.app.bind('Request', self.request)
self.request = self.app.make('Request')
self.middleware = CsrfMiddleware(self.request, Csrf(self.request), self.view)
def test_middleware_shares_correct_input(self):
self.middleware.before()
assert 'csrf_field' in self.view.dictionary
assert self.view.dictionary['csrf_field'].startswith("<input type='hidden' name='__token' value='")
def test_middleware_throws_exception_on_post(self):
self.request.environ['REQUEST_METHOD'] = 'POST'
self.middleware.exempt = []
with pytest.raises(InvalidCSRFToken):
self.middleware.before()
def test_incoming_token_does_not_throw_exception_with_token(self):
self.request.environ['REQUEST_METHOD'] = 'POST'
self.request.request_variables.update({'__token': self.request.get_cookie('csrf_token')})
self.middleware.exempt = []
self.middleware.before()
| 2.28125 | 2 |
phoible/views.py | ltxom/phoible | 31 | 356 | <reponame>ltxom/phoible
from pyramid.view import view_config
import os
@view_config(route_name='faq', renderer='faq.mako')
def faq_view(request):
dir_path = os.path.dirname(__file__)
faq_file = os.path.join(dir_path, 'static/faq_with_indexes.html')
with open(faq_file, 'r') as f:
faq_page = f.read()
return {'content': faq_page}
@view_config(route_name='conventions', renderer='conventions.mako')
def conventions_view(request):
dir_path = os.path.dirname(__file__)
conventions_file = os.path.join(dir_path, 'static/conventions.html')
with open(conventions_file, 'r') as file:
conventions_page = file.read().replace('\n', '')
return {'content': conventions_page}
| 2.234375 | 2 |
tests/restapi/test_routes.py | aiace9/aiida-core | 0 | 357 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=too-many-lines
"""Unittests for REST API."""
import tempfile
from flask_cors.core import ACL_ORIGIN
from aiida import orm
from aiida.backends.testbase import AiidaTestCase
from aiida.common import json
from aiida.common.links import LinkType
from aiida.restapi.run_api import configure_api
class RESTApiTestCase(AiidaTestCase):
"""
Setup of the tests for the AiiDA RESTful-api
"""
_url_prefix = '/api/v4'
_dummy_data = {}
_PERPAGE_DEFAULT = 20
_LIMIT_DEFAULT = 400
@classmethod
def setUpClass(cls, *args, **kwargs): # pylint: disable=too-many-locals, too-many-statements
"""
Add objects to the database for different requests/filters/orderings etc.
"""
super().setUpClass()
api = configure_api(catch_internal_server=True)
cls.app = api.app
cls.app.config['TESTING'] = True
# create test inputs
cell = ((2., 0., 0.), (0., 2., 0.), (0., 0., 2.))
structure = orm.StructureData(cell=cell)
structure.append_atom(position=(0., 0., 0.), symbols=['Ba'])
structure.store()
structure.add_comment('This is test comment.')
structure.add_comment('Add another comment.')
cif = orm.CifData(ase=structure.get_ase())
cif.store()
parameter1 = orm.Dict(dict={'a': 1, 'b': 2})
parameter1.store()
parameter2 = orm.Dict(dict={'c': 3, 'd': 4})
parameter2.store()
kpoint = orm.KpointsData()
kpoint.set_kpoints_mesh([4, 4, 4])
kpoint.store()
resources = {'num_machines': 1, 'num_mpiprocs_per_machine': 1}
calcfunc = orm.CalcFunctionNode(computer=cls.computer)
calcfunc.store()
calc = orm.CalcJobNode(computer=cls.computer)
calc.set_option('resources', resources)
calc.set_attribute('attr1', 'OK')
calc.set_attribute('attr2', 'OK')
calc.set_extra('extra1', False)
calc.set_extra('extra2', 'extra_info')
calc.add_incoming(structure, link_type=LinkType.INPUT_CALC, link_label='link_structure')
calc.add_incoming(parameter1, link_type=LinkType.INPUT_CALC, link_label='link_parameter')
aiida_in = 'The input file\nof the CalcJob node'
# Add the calcjob_inputs folder with the aiida.in file to the CalcJobNode repository
with tempfile.NamedTemporaryFile(mode='w+') as handle:
handle.write(aiida_in)
handle.flush()
handle.seek(0)
calc.put_object_from_filelike(handle, 'calcjob_inputs/aiida.in', force=True)
calc.store()
# create log message for calcjob
import logging
from aiida.common.log import LOG_LEVEL_REPORT
from aiida.common.timezone import now
from aiida.orm import Log
log_record = {
'time': now(),
'loggername': 'loggername',
'levelname': logging.getLevelName(LOG_LEVEL_REPORT),
'dbnode_id': calc.id,
'message': 'This is a template record message',
'metadata': {
'content': 'test'
},
}
Log(**log_record)
aiida_out = 'The output file\nof the CalcJob node'
retrieved_outputs = orm.FolderData()
# Add the calcjob_outputs folder with the aiida.out file to the FolderData node
with tempfile.NamedTemporaryFile(mode='w+') as handle:
handle.write(aiida_out)
handle.flush()
handle.seek(0)
retrieved_outputs.put_object_from_filelike(handle, 'calcjob_outputs/aiida.out', force=True)
retrieved_outputs.store()
retrieved_outputs.add_incoming(calc, link_type=LinkType.CREATE, link_label='retrieved')
kpoint.add_incoming(calc, link_type=LinkType.CREATE, link_label='create')
calc1 = orm.CalcJobNode(computer=cls.computer)
calc1.set_option('resources', resources)
calc1.store()
dummy_computers = [{
'label': 'test1',
'hostname': 'test1.epfl.ch',
'transport_type': 'ssh',
'scheduler_type': 'pbspro',
}, {
'label': 'test2',
'hostname': 'test2.epfl.ch',
'transport_type': 'ssh',
'scheduler_type': 'torque',
}, {
'label': 'test3',
'hostname': 'test3.epfl.ch',
'transport_type': 'local',
'scheduler_type': 'slurm',
}, {
'label': 'test4',
'hostname': 'test4.epfl.ch',
'transport_type': 'ssh',
'scheduler_type': 'slurm',
}]
for dummy_computer in dummy_computers:
computer = orm.Computer(**dummy_computer)
computer.store()
# Prepare typical REST responses
cls.process_dummy_data()
def get_dummy_data(self):
return self._dummy_data
def get_url_prefix(self):
return self._url_prefix
@classmethod
def process_dummy_data(cls):
# pylint: disable=fixme
"""
This functions prepare atomic chunks of typical responses from the
RESTapi and puts them into class attributes
"""
# TODO: Storing the different nodes as lists and accessing them
# by their list index is very fragile and a pain to debug.
# Please change this!
computer_projections = ['id', 'uuid', 'name', 'hostname', 'transport_type', 'scheduler_type']
computers = orm.QueryBuilder().append(orm.Computer, tag='comp', project=computer_projections).order_by({
'comp': [{
'id': {
'order': 'asc'
}
}]
}).dict()
# Cast UUID into a string (e.g. in sqlalchemy it comes as a UUID object)
computers = [_['comp'] for _ in computers]
for comp in computers:
if comp['uuid'] is not None:
comp['uuid'] = str(comp['uuid'])
cls._dummy_data['computers'] = computers
calculation_projections = ['id', 'uuid', 'user_id', 'node_type']
calculations = orm.QueryBuilder().append(orm.CalculationNode, tag='calc',
project=calculation_projections).order_by({
'calc': [{
'id': {
'order': 'desc'
}
}]
}).dict()
calculations = [_['calc'] for _ in calculations]
for calc in calculations:
if calc['uuid'] is not None:
calc['uuid'] = str(calc['uuid'])
cls._dummy_data['calculations'] = calculations
data_projections = ['id', 'uuid', 'user_id', 'node_type']
data_types = {
'cifdata': orm.CifData,
'parameterdata': orm.Dict,
'structuredata': orm.StructureData,
'data': orm.Data,
}
for label, dataclass in data_types.items():
data = orm.QueryBuilder().append(dataclass, tag='data', project=data_projections).order_by({
'data': [{
'id': {
'order': 'desc'
}
}]
}).dict()
data = [_['data'] for _ in data]
for datum in data:
if datum['uuid'] is not None:
datum['uuid'] = str(datum['uuid'])
cls._dummy_data[label] = data
def split_path(self, url):
# pylint: disable=no-self-use
"""
Split the url with "?" to get url path and it's parameters
:param url: Web url
:return: url path and url parameters
"""
parts = url.split('?')
path = ''
query_string = ''
if parts:
path = parts[0]
if len(parts) > 1:
query_string = parts[1]
return path, query_string
def compare_extra_response_data(self, node_type, url, response, uuid=None):
"""
In url response, we pass some extra information/data along with the node
results. e.g. url method, node_type, path, pk, query_string, url,
url_root,
etc.
:param node_type: url requested fot the type of the node
:param url: web url
:param response: url response
:param uuid: url requested for the node pk
"""
path, query_string = self.split_path(url)
self.assertEqual(response['method'], 'GET')
self.assertEqual(response['resource_type'], node_type)
self.assertEqual(response['path'], path)
self.assertEqual(response['id'], uuid)
self.assertEqual(response['query_string'], query_string)
self.assertEqual(response['url'], f'http://localhost{url}')
self.assertEqual(response['url_root'], 'http://localhost/')
# node details and list with limit, offset, page, perpage
def process_test(
self,
entity_type,
url,
full_list=False,
empty_list=False,
expected_list_ids=None,
expected_range=None,
expected_errormsg=None,
uuid=None,
result_node_type=None,
result_name=None
):
# pylint: disable=too-many-arguments
"""
Check whether response matches expected values.
:param entity_type: url requested for the type of the node
:param url: web url
:param full_list: if url is requested to get full list
:param empty_list: if the response list is empty
:param expected_list_ids: list of expected ids from data
:param expected_range: [start, stop] range of expected ids from data
:param expected_errormsg: expected error message in response
:param uuid: url requested for the node pk
:param result_node_type: node type in response data
:param result_name: result name in response e.g. incoming, outgoing
"""
if expected_list_ids is None:
expected_list_ids = []
if expected_range is None:
expected_range = []
if result_node_type is None and result_name is None:
result_node_type = entity_type
result_name = entity_type
url = self._url_prefix + url
with self.app.test_client() as client:
rv_response = client.get(url)
response = json.loads(rv_response.data)
if expected_errormsg:
self.assertEqual(response['message'], expected_errormsg)
else:
if full_list:
expected_data = self._dummy_data[result_node_type]
elif empty_list:
expected_data = []
elif expected_list_ids:
expected_data = [self._dummy_data[result_node_type][i] for i in expected_list_ids]
elif expected_range != []:
expected_data = self._dummy_data[result_node_type][expected_range[0]:expected_range[1]]
else:
from aiida.common.exceptions import InputValidationError
raise InputValidationError('Pass the expected range of the dummydata')
expected_node_uuids = [node['uuid'] for node in expected_data]
result_node_uuids = [node['uuid'] for node in response['data'][result_name]]
self.assertEqual(expected_node_uuids, result_node_uuids)
self.compare_extra_response_data(entity_type, url, response, uuid)
class RESTApiTestSuite(RESTApiTestCase):
# pylint: disable=too-many-public-methods
"""
Define unittests for rest api
"""
############### generic endpoints ########################
def test_server(self):
"""
Test that /server endpoint returns AiiDA version
"""
url = f'{self.get_url_prefix()}/server'
from aiida import __version__
with self.app.test_client() as client:
response = client.get(url)
data = json.loads(response.data)['data']
self.assertEqual(__version__, data['AiiDA_version'])
self.assertEqual(self.get_url_prefix(), data['API_prefix'])
def test_base_url(self):
"""
Test that / returns list of endpoints
"""
with self.app.test_client() as client:
data_base = json.loads(client.get(self.get_url_prefix() + '/').data)['data']
data_server = json.loads(client.get(self.get_url_prefix() + '/server/endpoints').data)['data']
self.assertTrue(len(data_base['available_endpoints']) > 0)
self.assertDictEqual(data_base, data_server)
def test_cors_headers(self):
"""
Test that REST API sets cross-origin resource sharing headers
"""
url = f'{self.get_url_prefix()}/server'
with self.app.test_client() as client:
response = client.get(url)
headers = response.headers
self.assertEqual(headers.get(ACL_ORIGIN), '*')
############### computers endpoint ########################
def test_computers_details(self):
"""
Requests the details of single computer
"""
node_uuid = self.get_dummy_data()['computers'][1]['uuid']
RESTApiTestCase.process_test(
self, 'computers', f'/computers/{str(node_uuid)}', expected_list_ids=[1], uuid=node_uuid
)
def test_computers_list(self):
"""
Get the full list of computers from database
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True)
def test_computers_list_limit_offset(self):
"""
Get the list of computers from database using limit
and offset parameter.
It should return the no of rows specified in limit from
database starting from the no. specified in offset
"""
RESTApiTestCase.process_test(
self, 'computers', '/computers?limit=2&offset=2&orderby=+id', expected_range=[2, 4]
)
def test_computers_list_limit_only(self):
"""
Get the list of computers from database using limit
parameter.
It should return the no of rows specified in limit from
database.
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?limit=2&orderby=+id', expected_range=[None, 2])
def test_computers_list_offset_only(self):
"""
Get the list of computers from database using offset
parameter
It should return all the rows from database starting from
the no. specified in offset
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?offset=2&orderby=+id', expected_range=[2, None])
def test_computers_list_limit_offset_perpage(self):
"""
If we pass the limit, offset and perpage at same time, it
would return the error message.
"""
expected_error = 'perpage key is incompatible with limit and offset'
RESTApiTestCase.process_test(
self, 'computers', '/computers?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error
)
def test_computers_list_page_limit_offset(self):
"""
If we use the page, limit and offset at same time, it
would return the error message.
"""
expected_error = 'requesting a specific page is incompatible with ' \
'limit and offset'
RESTApiTestCase.process_test(
self, 'computers', '/computers/page/2?offset=2&limit=1&orderby=+id', expected_errormsg=expected_error
)
def test_complist_pagelimitoffset_perpage(self):
"""
If we use the page, limit, offset and perpage at same time, it
would return the error message.
"""
expected_error = 'perpage key is incompatible with limit and offset'
RESTApiTestCase.process_test(
self,
'computers',
'/computers/page/2?offset=2&limit=1&perpage=2&orderby=+id',
expected_errormsg=expected_error
)
def test_computers_list_page_default(self):
"""
it returns the no. of rows defined as default perpage option
from database.
no.of pages = total no. of computers in database / perpage
"/page" acts as "/page/1?perpage=default_value"
"""
RESTApiTestCase.process_test(self, 'computers', '/computers/page?orderby=+id', full_list=True)
def test_computers_list_page_perpage(self):
"""
no.of pages = total no. of computers in database / perpage
Using this formula it returns the no. of rows for requested page
"""
RESTApiTestCase.process_test(
self, 'computers', '/computers/page/1?perpage=2&orderby=+id', expected_range=[None, 2]
)
def test_computers_list_page_perpage_exceed(self):
"""
no.of pages = total no. of computers in database / perpage
If we request the page which exceeds the total no. of pages then
it would return the error message.
"""
expected_error = 'Non existent page requested. The page range is [1 : ' \
'3]'
RESTApiTestCase.process_test(
self, 'computers', '/computers/page/4?perpage=2&orderby=+id', expected_errormsg=expected_error
)
############### list filters ########################
def test_computers_filter_id1(self):
"""
Add filter on the id of computer and get the filtered computer
list (e.g. id=1)
"""
node_pk = self.get_dummy_data()['computers'][1]['id']
RESTApiTestCase.process_test(self, 'computers', f'/computers?id={str(node_pk)}', expected_list_ids=[1])
def test_computers_filter_id2(self):
"""
Add filter on the id of computer and get the filtered computer
list (e.g. id > 2)
"""
node_pk = self.get_dummy_data()['computers'][1]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?id>{str(node_pk)}&orderby=+id', expected_range=[2, None]
)
def test_computers_filter_pk(self):
"""
Add filter on the id of computer and get the filtered computer
list (e.g. id=1)
"""
node_pk = self.get_dummy_data()['computers'][1]['id']
RESTApiTestCase.process_test(self, 'computers', f'/computers?pk={str(node_pk)}', expected_list_ids=[1])
def test_computers_filter_name(self):
"""
Add filter for the name of computer and get the filtered computer
list
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?name="test1"', expected_list_ids=[1])
def test_computers_filter_hostname(self):
"""
Add filter for the hostname of computer and get the filtered computer
list
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?hostname="test1.epfl.ch"', expected_list_ids=[1])
def test_computers_filter_transport_type(self):
"""
Add filter for the transport_type of computer and get the filtered
computer
list
"""
RESTApiTestCase.process_test(
self, 'computers', '/computers?transport_type="local"&name="test3"&orderby=+id', expected_list_ids=[3]
)
############### list orderby ########################
def test_computers_orderby_id_asc(self):
"""
Returns the computers list ordered by "id" in ascending
order
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=id', full_list=True)
def test_computers_orderby_id_asc_sign(self):
"""
Returns the computers list ordered by "+id" in ascending
order
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True)
def test_computers_orderby_id_desc(self):
"""
Returns the computers list ordered by "id" in descending
order
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=-id', expected_list_ids=[4, 3, 2, 1, 0])
def test_computers_orderby_name_asc(self):
"""
Returns the computers list ordered by "name" in ascending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=name', expected_list_ids=[1, 2, 3, 4]
)
def test_computers_orderby_name_asc_sign(self):
"""
Returns the computers list ordered by "+name" in ascending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=+name', expected_list_ids=[1, 2, 3, 4]
)
def test_computers_orderby_name_desc(self):
"""
Returns the computers list ordered by "name" in descending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-name', expected_list_ids=[4, 3, 2, 1]
)
def test_computers_orderby_scheduler_type_asc(self):
"""
Returns the computers list ordered by "scheduler_type" in ascending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?transport_type=\"ssh\"&pk>{str(node_pk)}&orderby=scheduler_type",
expected_list_ids=[1, 4, 2]
)
def test_comp_orderby_scheduler_ascsign(self):
"""
Returns the computers list ordered by "+scheduler_type" in ascending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?transport_type=\"ssh\"&pk>{str(node_pk)}&orderby=+scheduler_type",
expected_list_ids=[1, 4, 2]
)
def test_computers_orderby_schedulertype_desc(self):
"""
Returns the computers list ordered by "scheduler_type" in descending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?pk>{str(node_pk)}&transport_type=\"ssh\"&orderby=-scheduler_type",
expected_list_ids=[2, 4, 1]
)
############### list orderby combinations #######################
def test_computers_orderby_mixed1(self):
"""
Returns the computers list first order by "transport_type" in
ascending order and if it is having same transport_type, order it
by "id"
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f'/computers?pk>{str(node_pk)}&orderby=transport_type,id',
expected_list_ids=[3, 1, 2, 4]
)
def test_computers_orderby_mixed2(self):
"""
Returns the computers list first order by "scheduler_type" in
descending order and if it is having same scheduler_type, order it
by "name"
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f'/computers?pk>{str(node_pk)}&orderby=-scheduler_type,name',
expected_list_ids=[2, 3, 4, 1]
)
def test_computers_orderby_mixed3(self):
"""
Returns the computers list first order by "scheduler_type" in
ascending order and if it is having same scheduler_type, order it
by "hostname" descending order
Response::
test4 slurm
test3 slurm
test2 torque
test1 pbspro
localhost pbspro
==========
Expected::
test1 pbspro
localhost pbspro
test4 slurm
test3 slurm
test2 torque
test1 test4
RESTApiTestCase.process_test(self, "computers",
"/computers?orderby=+scheduler_type,
-hostname",
expected_list_ids=[1,0,4,3,2])
"""
############### list filter combinations #######################
def test_computers_filter_mixed1(self):
"""
Add filter for the hostname and id of computer and get the
filtered computer list
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f"/computers?id>{str(node_pk)}&hostname=\"test1.epfl.ch\"", expected_list_ids=[1]
)
def test_computers_filter_mixed2(self):
"""
Add filter for the id, hostname and transport_type of the computer
and get the filtered computer list
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?id>{str(node_pk)}&hostname=\"test3.epfl.ch\"&transport_type=\"ssh\"",
empty_list=True
)
############### list all parameter combinations #######################
def test_computers_mixed1(self):
"""
url parameters: id, limit and offset
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?id>{str(node_pk)}&limit=2&offset=3&orderby=+id', expected_list_ids=[4]
)
def test_computers_mixed2(self):
"""
url parameters: id, page, perpage
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers/page/2?id>{str(node_pk)}&perpage=2&orderby=+id', expected_list_ids=[3, 4]
)
def test_computers_mixed3(self):
"""
url parameters: id, transport_type, orderby
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?id>={str(node_pk)}&transport_type=\"ssh\"&orderby=-id&limit=2",
expected_list_ids=[4, 2]
)
########## pass unknown url parameter ###########
def test_computers_unknown_param(self):
"""
url parameters: id, limit and offset
from aiida.common.exceptions import InputValidationError
RESTApiTestCase.node_exception(self, "/computers?aa=bb&id=2", InputValidationError)
"""
############### calculation retrieved_inputs and retrieved_outputs #############
def test_calculation_retrieved_inputs(self):
"""
Get the list of given calculation retrieved_inputs
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/input_files'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data'], [{'name': 'calcjob_inputs', 'type': 'DIRECTORY'}])
def test_calculation_retrieved_outputs(self):
"""
Get the list of given calculation retrieved_outputs
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/output_files'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data'], [{'name': 'calcjob_outputs', 'type': 'DIRECTORY'}])
############### calculation incoming #############
def test_calculation_inputs(self):
"""
Get the list of give calculation incoming
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
self.process_test(
'nodes',
f'/nodes/{str(node_uuid)}/links/incoming?orderby=id',
expected_list_ids=[5, 3],
uuid=node_uuid,
result_node_type='data',
result_name='incoming'
)
def test_calculation_input_filters(self):
"""
Get filtered incoming list for given calculations
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
self.process_test(
'nodes',
f"/nodes/{str(node_uuid)}/links/incoming?node_type=\"data.dict.Dict.\"",
expected_list_ids=[3],
uuid=node_uuid,
result_node_type='data',
result_name='incoming'
)
def test_calculation_iotree(self):
"""
Get filtered incoming list for given calculations
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/links/tree?in_limit=1&out_limit=1'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(len(response['data']['nodes']), 1)
self.assertEqual(len(response['data']['nodes'][0]['incoming']), 1)
self.assertEqual(len(response['data']['nodes'][0]['outgoing']), 1)
self.assertEqual(len(response['data']['metadata']), 1)
expected_attr = [
'ctime', 'mtime', 'id', 'node_label', 'node_type', 'uuid', 'description', 'incoming', 'outgoing'
]
received_attr = response['data']['nodes'][0].keys()
for attr in expected_attr:
self.assertIn(attr, received_attr)
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)
############### calculation attributes #############
def test_calculation_attributes(self):
"""
Get list of calculation attributes
"""
attributes = {
'attr1': 'OK',
'attr2': 'OK',
'resources': {
'num_machines': 1,
'num_mpiprocs_per_machine': 1
},
}
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertNotIn('message', response)
self.assertEqual(response['data']['attributes'], attributes)
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)
def test_contents_attributes_filter(self):
"""
Get list of calculation attributes with filter attributes_filter
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f"{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes?attributes_filter=\"attr1\""
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertNotIn('message', response)
self.assertEqual(response['data']['attributes'], {'attr1': 'OK'})
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)
############### calculation node attributes filter #############
def test_calculation_attributes_filter(self):
"""
Get the list of given calculation attributes filtered
"""
attributes = {
'attr1': 'OK',
'attr2': 'OK',
'resources': {
'num_machines': 1,
'num_mpiprocs_per_machine': 1
},
}
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data']['nodes'][0]['attributes'], attributes)
############### calculation node extras_filter #############
def test_calculation_extras_filter(self):
"""
Get the list of given calculation extras filtered
"""
extras = {'extra1': False, 'extra2': 'extra_info'}
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?extras=true&extras_filter=extra1,extra2'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data']['nodes'][0]['extras']['extra1'], extras['extra1'])
self.assertEqual(response['data']['nodes'][0]['extras']['extra2'], extras['extra2'])
############### structure node attributes filter #############
def test_structure_attributes_filter(self):
"""
Get the list of given calculation attributes filtered
"""
cell = [[2., 0., 0.], [0., 2., 0.], [0., 0., 2.]]
node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true&attributes_filter=cell'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertEqual(response['data']['nodes'][0]['attributes']['cell'], cell)
############### node attributes_filter with pagination #############
def test_node_attributes_filter_pagination(self):
"""
Check that node attributes specified in attributes_filter are
returned as a dictionary when pagination is set
"""
expected_attributes = ['resources', 'cell']
url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources,cell'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertNotEqual(len(response['data']['nodes']), 0)
for node in response['data']['nodes']:
self.assertIn('attributes', node)
self.assertNotIn('attributes.resources', node)
self.assertNotIn('attributes.cell', node)
self.assertEqual(len(node['attributes']), len(expected_attributes))
for attr in expected_attributes:
self.assertIn(attr, node['attributes'])
############### node get one attributes_filter with pagination #############
def test_node_single_attributes_filter(self):
"""
Check that when only one node attribute is specified in attributes_filter
only this attribute is returned as a dictionary when pagination is set
"""
expected_attribute = ['resources']
url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertNotEqual(len(response['data']['nodes']), 0)
for node in response['data']['nodes']:
self.assertEqual(list(node['attributes'].keys()), expected_attribute)
############### node extras_filter with pagination #############
def test_node_extras_filter_pagination(self):
"""
Check that node extras specified in extras_filter are
returned as a dictionary when pagination is set
"""
expected_extras = ['extra1', 'extra2']
url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra1,extra2'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertNotEqual(len(response['data']['nodes']), 0)
for node in response['data']['nodes']:
self.assertIn('extras', node)
self.assertNotIn('extras.extra1', node)
self.assertNotIn('extras.extra2', node)
self.assertEqual(len(node['extras']), len(expected_extras))
for extra in expected_extras:
self.assertIn(extra, node['extras'])
############### node get one extras_filter with pagination #############
def test_node_single_extras_filter(self):
"""
Check that when only one node extra is specified in extras_filter
only this extra is returned as a dictionary when pagination is set
"""
expected_extra = ['extra2']
url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra2'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertNotEqual(len(response['data']['nodes']), 0)
for node in response['data']['nodes']:
self.assertEqual(list(node['extras'].keys()), expected_extra)
############### node full_type filter #############
def test_nodes_full_type_filter(self):
"""
Get the list of nodes filtered by full_type
"""
expected_node_uuids = []
for calc in self.get_dummy_data()['calculations']:
if calc['node_type'] == 'process.calculation.calcjob.CalcJobNode.':
expected_node_uuids.append(calc['uuid'])
url = f"{self.get_url_prefix()}/nodes/?full_type=\"process.calculation.calcjob.CalcJobNode.|\""
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
for node in response['data']['nodes']:
self.assertIn(node['uuid'], expected_node_uuids)
############### Structure visualization and download #############
def test_structure_derived_properties(self):
"""
Get the list of give calculation incoming
"""
node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/derived_properties'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertNotIn('message', response)
self.assertEqual(
response['data']['derived_properties']['dimensionality'], {
'dim': 3,
'value': 8.0,
'label': 'volume'
}
)
self.assertEqual(response['data']['derived_properties']['formula'], 'Ba')
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)
def test_structure_download(self):
"""
Test download of structure file
"""
from aiida.orm import load_node
node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=xsf'
with self.app.test_client() as client:
rv_obj = client.get(url)
structure_data = load_node(node_uuid)._exportcontent('xsf')[0] # pylint: disable=protected-access
self.assertEqual(rv_obj.data, structure_data)
def test_cif(self):
"""
Test download of cif file
"""
from aiida.orm import load_node
node_uuid = self.get_dummy_data()['cifdata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=cif'
with self.app.test_client() as client:
rv_obj = client.get(url)
cif = load_node(node_uuid)._prepare_cif()[0] # pylint: disable=protected-access
self.assertEqual(rv_obj.data, cif)
############### projectable_properties #############
def test_projectable_properties(self):
"""
test projectable_properties endpoint
"""
for nodetype in ['nodes', 'processes', 'computers', 'users', 'groups']:
url = f'{self.get_url_prefix()}/{nodetype}/projectable_properties'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertNotIn('message', response)
expected_keys = ['display_name', 'help_text', 'is_display', 'is_foreign_key', 'type']
# check fields
for _, pinfo in response['data']['fields'].items():
available_keys = pinfo.keys()
for prop in expected_keys:
self.assertIn(prop, available_keys)
# check order
available_properties = response['data']['fields'].keys()
for prop in response['data']['ordering']:
self.assertIn(prop, available_properties)
def test_node_namespace(self):
"""
Test the rest api call to get list of available node namespace
"""
url = f'{self.get_url_prefix()}/nodes/full_types'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
expected_data_keys = ['path', 'namespace', 'subspaces', 'label', 'full_type']
response_keys = response['data'].keys()
for dkay in expected_data_keys:
self.assertIn(dkay, response_keys)
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response)
def test_comments(self):
"""
Get the node comments
"""
node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/comments'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)['data']['comments']
all_comments = []
for comment in response:
all_comments.append(comment['message'])
self.assertEqual(sorted(all_comments), sorted(['This is test comment.', 'Add another comment.']))
def test_repo(self):
"""
Test to get repo list or repo file contents for given node
"""
from aiida.orm import load_node
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/list?filename=\"calcjob_inputs\""
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data']['repo_list'], [{'type': 'FILE', 'name': 'aiida.in'}])
url = f"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/contents?filename=\"calcjob_inputs/aiida.in\""
with self.app.test_client() as client:
response_obj = client.get(url)
input_file = load_node(node_uuid).get_object_content('calcjob_inputs/aiida.in', mode='rb')
self.assertEqual(response_obj.data, input_file)
def test_process_report(self):
"""
Test process report
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/processes/{str(node_uuid)}/report'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
expected_keys = response['data'].keys()
for key in ['logs']:
self.assertIn(key, expected_keys)
expected_log_keys = response['data']['logs'][0].keys()
for key in ['time', 'loggername', 'levelname', 'dbnode_id', 'message']:
self.assertIn(key, expected_log_keys)
def test_download_formats(self):
"""
test for download format endpoint
"""
url = f'{self.get_url_prefix()}/nodes/download_formats'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
for key in ['data.structure.StructureData.|', 'data.cif.CifData.|']:
self.assertIn(key, response['data'].keys())
for key in ['cif', 'xsf', 'xyz']:
self.assertIn(key, response['data']['data.structure.StructureData.|'])
self.assertIn('cif', response['data']['data.cif.CifData.|'])
| 1.875 | 2 |
processmonitor.py | yletallec/processmonitor | 0 | 358 | <gh_stars>0
"""Process Monitor
Usage:
processmonitor.py <process_name> <overall_duration> [<sampling_interval>]
processmonitor.py -h|--help
processmonitor.py -v|--version
Options:
<process_name> Process name argument.
<overall_duration> Overall duration of the monitoring in seconds.
<sampling_interval> Sampling interval in seconds (optional, default 5).
-h --help Show this screen.
-v --version Show version.
"""
from docopt import docopt
from utils import string_to_integer
from process import Process
from threading import Event, Thread
from datetime import datetime
import os
import sys
import csv
import time
from enum import IntEnum
class ExitStatus(IntEnum):
OK = 0
BAD_DURATION = 1
BAD_INTERVAL = 2
INTERVAL_GT_DURATION = 3
def call_repeatedly(interval, func, *args):
stopped = Event()
def loop():
iteration = 1
while not stopped.wait(interval - time.time() % interval):
func(*args, iteration)
iteration = iteration + 1
Thread(target=loop).start()
return stopped.set
def print_average():
cpu_avg, mem_avg, files_avg = Process.metrics_average()
if cpu_avg != None and mem_avg != None and files_avg != None:
print(f"Metrics Avg.: %CPU: {cpu_avg}, MEMORY(B): {mem_avg}, OPEN FILES: {files_avg}")
return True
return False
def generate_report(name, duration, interval):
if len(Process.metrics) == 0:
return False
ts = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
filename = f"{ts}_process-metrics-report_{name}_{duration}_{interval}.csv"
with open(f"{filename}", mode='w') as report:
writer = csv.writer(report, delimiter=',')
writer.writerow(['ITERATION', '%CPU', 'MEMORY(B)', 'OPEN FILES'])
iteration = 1
for metric in Process.metrics:
writer.writerow([
iteration,
metric.cpu,
metric.mem,
metric.files])
iteration = iteration + 1
reportpath = f"./{filename}"
print(f"Metrics report: {reportpath}")
return True
def raise_memory_leak_warning(name):
if (Process.has_memory_leaks(name)):
print(f"WARNING: possible memory leaks detected for process \'{name}\'")
return True
return False
def main():
args = docopt(__doc__, version='Process Monitor 1.0')
if not args['<sampling_interval>']:
args['<sampling_interval>'] = 5
name = args['<process_name>']
try:
duration = string_to_integer(args['<overall_duration>'])
except:
print("duration parameter is not an integer")
return ExitStatus.BAD_DURATION
try:
interval = string_to_integer(args['<sampling_interval>'])
except:
print("interval parameter is not an integer")
return ExitStatus.BAD_INTERVAL
if interval > duration:
print("interval parameter is greater than duration parameter")
return ExitStatus.INTERVAL_GT_DURATION
print("---------------------------------------------")
print(" Process Monitor")
print("---------------------------------------------")
print(f"Monitoring process \'{name}\' every {interval} sec for {duration} sec")
cancel_future_calls = call_repeatedly(interval, Process.monitor, name)
time.sleep(duration)
cancel_future_calls()
print_average()
generate_report(name, duration, interval)
raise_memory_leak_warning(name)
return ExitStatus.OK
def init():
if __name__ == '__main__':
if len(sys.argv) == 1:
sys.argv.append('-h')
sys.exit(main())
init()
| 2.8125 | 3 |
Projects/DeepLearningTechniques/MobileNet_v2/tiny_imagenet/data_loader.py | Tim232/Python-Things | 2 | 359 | <gh_stars>1-10
import os
import re
import numpy as np
from Projects.DeepLearningTechniques.MobileNet_v2.tiny_imagenet.constants import *
class DataLoader:
# todo train/test/validation => (클래스 당 500/50/50)
def __init__(self):
self.image_width = flags.FLAGS.image_width
self.image_height = flags.FLAGS.image_height
self.batch_size = flags.FLAGS.batch_size
self.data_path = flags.FLAGS.data_path
self.img_reg = re.compile('.*\\.jpeg', re.IGNORECASE)
self.init_class()
self.init_annotation()
def init_class(self):
self.cls = {}
for idx, dir in enumerate(os.listdir(os.path.join(self.data_path, 'train'))):
self.cls[dir] = idx
def init_annotation(self):
self.anno = {}
for line in open(os.path.join(self.data_path, 'val', 'val_annotations.txt')):
filename, label, *_ = line.split('\t')
self.anno[filename] = label
def init_train(self):
train_x, train_y = [], []
for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'train')):
for file in files:
if self.img_reg.match(file):
train_x.append(os.path.join(path, file))
train_y.append(self.cls[re.match('(.+)\\_\d+\\.jpeg', file, re.IGNORECASE).group(1)])
self.train_len = len(train_y)
#todo train data random sort
random_sort = np.random.permutation(self.train_len)
train_x, train_y = np.asarray(train_x, dtype=np.string_)[random_sort], np.asarray(train_y, dtype=np.int64)[random_sort]
#todo (Numpy / List) => Tensor 로 변환
with tf.variable_scope(name_or_scope='data_tensor'):
self.train_x = tf.convert_to_tensor(value=train_x, dtype=tf.string, name='train_x')
self.train_y = tf.convert_to_tensor(value=train_y, dtype=tf.int64, name='train_y')
def init_validation(self):
valid_x, valid_y = [], []
for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'val')):
for file in files:
if self.img_reg.match(file):
valid_x.append(os.path.join(path, file))
valid_y.append(self.cls[self.anno[file]])
self.valid_len = len(valid_y)
#todo validataion data random sort
random_sort = np.random.permutation(self.valid_len)
valid_x, valid_y = np.asarray(valid_x, dtype=np.string_)[random_sort], np.asarray(valid_y, dtype=np.int64)[random_sort]
#todo (Numpy / List) -> Tensor 로 변환
with tf.variable_scope(name_or_scope='data_tensor'):
self.valid_x = tf.convert_to_tensor(value=valid_x, dtype=tf.string, name='valid_x')
self.valid_y = tf.convert_to_tensor(value=valid_y, dtype=tf.int64, name='valid_y')
def init_test(self):
test_x = []
for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'test')):
for file in files:
test_x.append(os.path.join(path, file))
self.test_len = len(test_x)
#todo (Numpy / List) -> Tensor 로 변환
with tf.variable_scope(name_or_scope='data_tensor'):
self.test_x = tf.convert_to_tensor(value=test_x, dtype=tf.string, name='test_x')
def train_normal(self, x, y):
with tf.variable_scope(name_or_scope='train_normal'):
x = tf.read_file(filename=x)
x = tf.image.decode_png(contents=x, channels=3, name='decode_png')
x = tf.divide(tf.cast(x, tf.float32), 255.)
x = tf.subtract(x, [0.4921, 0.4833, 0.4484])
x = tf.divide(x, [0.2465, 0.2431, 0.2610])
return x, y
def train_random_crop(self, x, y):
with tf.variable_scope(name_or_scope='train_random_crop'):
x = tf.read_file(filename=x)
x = tf.image.decode_png(contents=x, channels=3, name='decode_png')
x = tf.pad(x, [[0, 0], [4, 4], [4, 4], [0, 0]], name='padding')
# x = tf.image.resize_images(images=x, size=(self.image_height+8, self.image_width+8), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
x = tf.random_crop(value=x, size=(self.image_height, self.image_width, 3))
x = tf.divide(tf.cast(x, tf.float32), 255.)
x = tf.subtract(x, [0.4921, 0.4833, 0.4484])
x = tf.divide(x, [0.2465, 0.2431, 0.2610])
return x, y
def valid_normal(self, x, y):
with tf.variable_scope(name_or_scope='valid_normal'):
x = tf.read_file(filename=x)
x = tf.image.decode_png(contents=x, channels=3, name='decode_png')
x = tf.divide(tf.cast(x, tf.float32), 255.)
x = tf.subtract(x, [0.4921, 0.4833, 0.4484])
x = tf.divide(x, [0.2465, 0.2431, 0.2610])
return x, y
def test_normal(self, x):
with tf.variable_scope(name_or_scope='test_normal'):
x = tf.read_file(filename=x)
x = tf.image.decode_png(contents=x, channels=3, name='decode_png')
x = tf.divide(tf.cast(x, tf.float32), 255.)
x = tf.subtract(x, [0.4921, 0.4833, 0.4484])
x = tf.divide(x, [0.2465, 0.2431, 0.2610])
return x
def dataset_batch_loader(self, dataset, ref_func, name):
with tf.variable_scope(name_or_scope=name):
dataset_map = dataset.map(ref_func).batch(self.batch_size)
iterator = dataset_map.make_one_shot_iterator()
batch_input = iterator.get_next()
return batch_input
def train_loader(self):
with tf.variable_scope('train_loader'):
'''
repeat(): 데이터셋이 끝에 도달했을 때 다시 처음부터 수행하게 하는 함수
shuffle(): 데이터셋에 대해 random sort 기능을 수행하는 함수 (괄호안에 값이 전체 데이터 수보다 크면 전체 데이터에 대한 random sort)
'''
dataset = tf.data.Dataset.from_tensor_slices((self.train_x, self.train_y)).repeat()
normal_batch = self.dataset_batch_loader(dataset, self.train_normal, name='normal_batch')
random_crop_batch = self.dataset_batch_loader(dataset, self.train_random_crop, name='random_crop_batch')
return normal_batch, random_crop_batch
def valid_loader(self):
with tf.variable_scope('valid_loader'):
dataset = tf.data.Dataset.from_tensor_slices((self.valid_x, self.valid_y)).repeat()
normal_batch = self.dataset_batch_loader(dataset, self.valid_normal, name='normal_batch')
return normal_batch
def test_loader(self):
with tf.variable_scope('test_loader'):
dataset = tf.data.Dataset.from_tensor_slices(self.test_x).repeat()
normal_batch = self.dataset_batch_loader(dataset, self.test_normal, name='normal_batch')
return normal_batch | 2.34375 | 2 |
MarkReport/MarkReport.py | dedukun/MarkReport | 0 | 360 | <filename>MarkReport/MarkReport.py
#!/usr/bin/env python3
# Command line flags
import os
import glob
import re
import pyinotify
import subprocess
from sys import stdout, stderr
from time import time, sleep
from tempfile import gettempdir
from distutils.dir_util import copy_tree
from shutil import copyfile
from weasyprint import HTML
import argparse
parser = argparse.ArgumentParser(
description='Converts Markdown to elegant PDF reports')
parser.add_argument('--basic', dest='basic', action='store_true',
help='Do not enrich HTML with LaTeX and syntax highlighting (faster builds)')
parser.add_argument('--watch', dest='watch', action='store_true',
help='Watch the current folder for changes and rebuild automatically')
parser.add_argument('--quiet', dest='quiet', action='store_true',
help='Do not output any information')
parser.add_argument("--timeout", type=int, default=2,
help='Page generation timeout')
parser.add_argument("--base-html", type=str, default="",
help='The path to the base HTML file')
parser.set_defaults(watch=False)
args = parser.parse_args()
# Check directory
ok = False
for file in os.listdir("."):
if file.endswith(".md"):
ok = True
break
if not ok:
stderr.write("No markdown file found in the current folder")
exit(1)
if args.base_html != "":
if not os.path.isfile(args.base_html):
stderr.write("The given base HTML file doesn't exist")
exit(1)
script_path = os.path.dirname(os.path.realpath(__file__))
# Temp dir
timestamp = str(int(time()))
tmp_dir = gettempdir() + "/" + timestamp + "_md-report/"
os.makedirs(tmp_dir, exist_ok=True)
# Headless browser
if not args.basic:
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
options = Options()
options.headless = True
options.log.level = "trace"
d = DesiredCapabilities.FIREFOX
d['loggingPrefs'] = {'browser': 'ALL'}
driver = webdriver.Firefox(options=options, capabilities=d)
driver.set_page_load_timeout(args.timeout)
prev_compile_time = 0
def recompile(notifier):
if notifier is not None and (notifier.maskname != "IN_MODIFY" or notifier.pathname.endswith(".pdf")):
return
global prev_compile_time
if time() - prev_compile_time < 1:
return
prev_compile_time = time()
if not args.quiet:
stdout.write("\rBuilding the PDF file...")
stdout.flush()
files = glob.glob(tmp_dir + '/*.md')
for f in files:
os.remove(f)
if args.base_html == "":
copyfile(script_path + "/base.html", tmp_dir + "/base.html")
else:
copyfile(args.base_html, tmp_dir + "/base.html")
if not os.path.islink(tmp_dir + "/src"):
os.symlink(script_path + "/src", tmp_dir + "/src")
copy_tree(".", tmp_dir)
# Markdown parsing
subprocess.check_output(script_path + "/md-parsing " +
tmp_dir, shell=True).decode('utf-8')
html_file_name = tmp_dir + "output.html"
# Interpret JS code
if not args.basic:
driver.get("file:///" + html_file_name)
sleep(2)
elem = driver.find_element_by_xpath("//*")
interpreted_html = elem.get_attribute("outerHTML")
with open(html_file_name, "w") as html_out_file:
html_out_file.write(interpreted_html)
# Create final PDF file
pdf = HTML(html_file_name).write_pdf()
f = open("output.pdf", 'wb')
f.write(pdf)
if not args.quiet:
stdout.write("\rDone. ")
stdout.flush()
recompile(None)
if not args.watch:
if not args.basic:
driver.quit()
exit(0)
watch_manager = pyinotify.WatchManager()
event_notifier = pyinotify.Notifier(watch_manager, recompile)
watch_manager.add_watch(os.path.abspath("."), pyinotify.ALL_EVENTS, rec=True)
event_notifier.loop()
if not args.basic:
driver.quit()
| 2.46875 | 2 |
DFS/13023.py | kjh9267/BOJ_Python | 0 | 361 | # https://www.acmicpc.net/problem/13023
import sys
sys.setrecursionlimit(999999999)
def dfs_all():
is_possible = [False]
for node in range(N):
visited = [False for _ in range(N)]
dfs(node, 0, visited, is_possible)
if is_possible[0]:
return 1
return 0
def dfs(cur, depth, visited, is_possible):
if visited[cur]:
return
if depth == target_depth:
is_possible[0] = True
return
visited[cur] = True
for nxt in graph[cur]:
dfs(nxt, depth + 1, visited, is_possible)
visited[cur] = False
if __name__ == '__main__':
input = __import__('sys').stdin.readline
target_depth = 4
N, M = map(int, input().split())
graph = [list() for _ in range(N)]
for _ in range(M):
a, b = map(int, input().split())
graph[a].append(b)
graph[b].append(a)
print(dfs_all())
| 3.046875 | 3 |
experiments/bst/setup.py | bigchaindb/privacy-protocols | 68 | 362 | <reponame>bigchaindb/privacy-protocols
"""bst: BigchainDB Sharing Tools"""
from setuptools import setup, find_packages
install_requires = [
'base58~=0.2.2',
'PyNaCl~=1.1.0',
'bigchaindb-driver',
'click==6.7',
'colorama',
]
setup(
name='bst',
version='0.1.0',
description='bst: BigchainDB Sharing Tools',
long_description=(
'A collection of scripts with different patterns to share'
'private data on BigchainDB.'),
url='https://github.com/vrde/bst/',
author='<NAME>',
author_email='<EMAIL>',
license='AGPLv3',
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Database',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Software Development',
'Natural Language :: English',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
],
packages=find_packages(),
entry_points={
'console_scripts': [
'bst=bst.cli:main'
],
},
install_requires=install_requires
)
| 1.570313 | 2 |
polyaxon/db/admin/job_resources.py | elyase/polyaxon | 0 | 363 | <gh_stars>0
from django.contrib import admin
from db.models.job_resources import JobResources
admin.site.register(JobResources)
| 1.25 | 1 |
voting_ml/main.py | tommy-waltmann/voting-ml | 0 | 364 | <reponame>tommy-waltmann/voting-ml<filename>voting_ml/main.py
import numpy as np
import sklearn
import subprocess
from sklearn import model_selection, tree
import data
import feature_selection
import model_sel
import os
import matplotlib.pyplot as plt
import seaborn as sns
def main():
#parameter space
list_test_size = [0.1,0.15,0.2] # decide this
list_ftsel_method = ['chi2','mutlinfo','pca','dt']
list_num_features = [10,15,20] # decide this
list_Kfold = [3,5]
list_corr_threshold = [1,0.5,0.6,0.7] # decide this
param_space = {
'criterion': ['gini', 'entropy'],
'max_depth': [2, 3, 4, 5, 7],
'min_samples_split': [2, 5, 10],
'min_samples_leaf': [2, 5, 10],
'max_leaf_nodes': [2, 4, 6, 8, 10, 12, 15],
}
repeat = 1
#output dictrionary list
list_output_dict = []
# output directory path
outdir = "../results/run1/"
if(not os.path.isdir(outdir)):
os.mkdir(outdir)
o_models_file = open(outdir+"models.csv","w")
o_models_file.write("test size,run num,ftsel method,Kfold,number of features,correlation threshold,best features,criterion,max_depth,max_leaf_nodes,min_samples_leaf,min_samples_split,training accuracy,test accuracy\n")
#splitting data and weights into train, test (refer to optimal_params.py)
poll_data = data.PollDataProxy(remove_nan=False, convert_to_float=False)
acc = []
'''refer to optimal_params.py. Functions from this python scripts are transferred here. (get_bad_questions() and separate_weights().)'''
for ts in list_test_size:
for run_num in range(repeat):
all_data, all_data_questions = poll_data.all_data_except(get_bad_questions())
X = all_data[:, :-1]
y = all_data[:, -1]
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y,
test_size=ts,
shuffle=True)
X_train, weights_train, questions = separate_weights(X_train, all_data_questions[:-1])
X_test, weights_test, _ = separate_weights(X_test, all_data_questions[:-1])
print("Number of Training Samples:", len(X_train))
print("Number of Testing Samples:", len(X_test))
data_dict = {
'X_train': X_train,
'X_test': X_test,
'y_train': y_train,
'y_test': y_test
}
weights_dict = {
'weights_train': weights_train,
'weights_test': weights_test}
for meth in list_ftsel_method:
'''Create class objects of the current selection method'''
for thres in list_corr_threshold:
data_ranked_dict, ranked_questions = {}, []
ftsel_obj =None
if(meth=='chi2'):
ftsel_obj = feature_selection.FeatureSelection(
necess_que_file="../extern/manage_data/list_all_questions.txt",
unnecess_que_file="../extern/manage_data/list_unnecessary_columns.txt",
bool_necess_que=False,
run_name="test_chi2"
)
data_ranked_dict, ranked_questions = ftsel_obj.ftsel_chi2(data_dict, thres)
elif(meth=='mutlinfo'):
ftsel_obj = feature_selection.FeatureSelection(
necess_que_file="../extern/manage_data/list_all_questions.txt",
unnecess_que_file="../extern/manage_data/list_unnecessary_columns.txt",
bool_necess_que=False,
run_name="test_mutlinfo"
)
data_ranked_dict, ranked_questions = ftsel_obj.ftsel_mutlinfo(data_dict, thres)
elif(meth=='pca'):
ftsel_obj = feature_selection.FeatureSelection(
necess_que_file="../extern/manage_data/list_all_questions.txt",
unnecess_que_file="../extern/manage_data/list_unnecessary_columns.txt",
bool_necess_que=False,
run_name="test_pca"
)
data_ranked_dict,_ = ftsel_obj.ftsel_pca(data_dict)
fts = data_sel_dict['X_train'].shape[1]
questions_int = list(map(str, list(range(1,fts+1,1))))
ranked_questions = ["ft_"+x for x in questions_int]
elif(meth=='dt'):
ftsel_obj = feature_selection.FeatureSelection(
necess_que_file="../extern/manage_data/list_all_questions.txt",
unnecess_que_file="../extern/manage_data/list_unnecessary_columns.txt",
bool_necess_que=False,
run_name="test_dt"
)
data_ranked_dict, ranked_questions = ftsel_obj.ftsel_decision_tree_method(data_dict, thres)
for num in list_num_features:
data_sel_dict, sel_questions = ftsel_obj.select_num_features(data_ranked_dict, num, ranked_questions)
ftsel_obj.plot_heatmap(data_sel_dict['X_train'], sel_questions)
for K in list_Kfold:
'''Here create a class onject of "model_sel" and output all the best parameters and values into "list_output_dict". Then, can create a .csv file to list all the models and accuracies.'''
model_obj = model_sel.model_sel(ts, run_num, meth, param_space, K, num, thres, data_sel_dict ,weights_dict, sel_questions, outdir).select_model()
# intermediate = model_obj.select_model()
acc.append(model_obj['test_acc'])
o_models_file.write(str(ts)+",")
o_models_file.write(str(run_num)+",")
o_models_file.write(meth+",")
o_models_file.write(str(K)+",")
o_models_file.write(str(num)+",")
o_models_file.write(str(thres)+",")
for ii in range(len(model_obj['best_features'])):
o_models_file.write(model_obj['best_features'][ii]+" ")
o_models_file.write(",")
o_models_file.write(model_obj['best_params']['criterion']+",")
o_models_file.write(str(model_obj['best_params']['max_depth'])+",")
o_models_file.write(str(model_obj['best_params']['max_leaf_nodes'])+",")
o_models_file.write(str(model_obj['best_params']['min_samples_leaf'])+",")
o_models_file.write(str(model_obj['best_params']['min_samples_split'])+",")
o_models_file.write(str(model_obj['train_acc'])+",")
o_models_file.write(str(model_obj['test_acc'])+",")
o_models_file.write("\n")
list_output_dict.append(model_obj)
'''Once all the models are run, select the model with best test accuracy and return the output dict for that model.'''
o_models_file.close()
best_index = np.argmax(acc)
best_model_dict = list_output_dict[best_index]
print("The best model parameters:")
print(best_model_dict)
def get_bad_questions():
f = open("../extern/manage_data/list_unnecessary_columns.txt", 'r')
bad_questions = f.readline().split(',')
bad_questions[-1] = bad_questions[-1][:-1] # chop the \n off the end
bad_questions.remove('weight') # need weight for training
return bad_questions
def separate_weights(X_train, column_names):
"""
Removes the column containing weights from X_train, and returns it as
a separate array.
"""
weight_column_idx = column_names.index('weight')
weights = X_train[:, weight_column_idx]
new_X_train = np.delete(X_train, weight_column_idx, axis=1)
new_questions = column_names
new_questions.remove('weight')
return new_X_train, weights, new_questions
if __name__ == "__main__":
main()
| 3 | 3 |
src/the_tale/the_tale/game/heroes/tests/test_logic.py | al-arz/the-tale | 0 | 365 | <reponame>al-arz/the-tale
import smart_imports
smart_imports.all()
class HeroDescriptionTests(utils_testcase.TestCase):
def setUp(self):
super().setUp()
game_logic.create_test_map()
account = self.accounts_factory.create_account(is_fast=True)
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(account)
self.hero = self.storage.accounts_to_heroes[account.id]
def test_no_description(self):
self.assertEqual(logic.get_hero_description(self.hero.id), '')
def test_has_description(self):
logic.set_hero_description(self.hero.id, 'bla-bla')
self.assertEqual(logic.get_hero_description(self.hero.id), 'bla-bla')
def test_update_description(self):
logic.set_hero_description(self.hero.id, 'bla-bla')
logic.set_hero_description(self.hero.id, 'new description')
self.assertEqual(logic.get_hero_description(self.hero.id), 'new description')
class CreateHero(utils_testcase.TestCase):
def setUp(self):
super().setUp()
game_logic.create_test_map()
self.account = accounts_prototypes.AccountPrototype.create(nick='nick-xxx',
email='<EMAIL>',
is_fast=False)
self.attributes = {'is_fast': False,
'is_bot': False,
'might': 0,
'active_state_end_at': datetime.datetime.now() + datetime.timedelta(days=3),
'premium_state_end_at': datetime.datetime.fromtimestamp(0),
'ban_state_end_at': datetime.datetime.fromtimestamp(0)}
def test_default(self):
logic.create_hero(account_id=self.account.id, attributes=self.attributes)
hero = logic.load_hero(self.account.id)
self.assertEqual(hero.id, self.account.id)
self.assertEqual(hero.account_id, self.account.id)
self.assertIn(hero.gender, (game_relations.GENDER.MALE,
game_relations.GENDER.FEMALE))
self.assertEqual(hero.preferences.energy_regeneration_type, hero.race.energy_regeneration)
self.assertEqual(hero.habit_honor.raw_value, 0)
self.assertEqual(hero.habit_peacefulness.raw_value, 0)
self.assertTrue(hero.preferences.archetype.is_NEUTRAL)
self.assertTrue(hero.upbringing.is_PHILISTINE)
self.assertTrue(hero.first_death.is_FROM_THE_MONSTER_FANGS)
self.assertTrue(hero.death_age.is_MATURE)
def test_account_attributes_required(self):
for attribute in self.attributes.keys():
with self.assertRaises(exceptions.HeroAttributeRequiredError):
logic.create_hero(account_id=self.account.id,
attributes={key: value for key, value in self.attributes.items() if key != attribute })
def test_account_attributes(self):
attributes = {'is_fast': random.choice((True, False)),
'is_bot': random.choice((True, False)),
'might': random.randint(1, 1000),
'active_state_end_at': datetime.datetime.fromtimestamp(1),
'premium_state_end_at': datetime.datetime.fromtimestamp(2),
'ban_state_end_at': datetime.datetime.fromtimestamp(3)}
logic.create_hero(account_id=self.account.id, attributes=attributes)
hero = logic.load_hero(self.account.id)
self.assertEqual(hero.is_fast, attributes['is_fast'])
self.assertEqual(hero.is_bot, attributes['is_bot'])
self.assertEqual(hero.might, attributes['might'])
self.assertEqual(hero.active_state_end_at, attributes['active_state_end_at'])
self.assertEqual(hero.premium_state_end_at, attributes['premium_state_end_at'])
self.assertEqual(hero.ban_state_end_at, attributes['ban_state_end_at'])
def test_attributes(self):
self.attributes.update({'race': game_relations.RACE.random(),
'gender': game_relations.GENDER.random(),
'name': game_names.generator().get_name(game_relations.RACE.random(),
game_relations.GENDER.random()),
'peacefulness': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER),
'honor': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER),
'archetype': game_relations.ARCHETYPE.random(),
'upbringing': tt_beings_relations.UPBRINGING.random(),
'first_death': tt_beings_relations.FIRST_DEATH.random(),
'death_age': tt_beings_relations.AGE.random()})
logic.create_hero(account_id=self.account.id, attributes=self.attributes)
hero = logic.load_hero(self.account.id)
self.assertEqual(hero.race, self.attributes['race'])
self.assertEqual(hero.gender, self.attributes['gender'])
self.assertEqual(hero.utg_name, self.attributes['name'])
self.assertEqual(hero.habit_peacefulness.raw_value, self.attributes['peacefulness'])
self.assertEqual(hero.habit_honor.raw_value, self.attributes['honor'])
self.assertEqual(hero.preferences.archetype, self.attributes['archetype'])
self.assertEqual(hero.upbringing, self.attributes['upbringing'])
self.assertEqual(hero.first_death, self.attributes['first_death'])
self.assertEqual(hero.death_age, self.attributes['death_age'])
class RegisterSpendingTests(utils_testcase.TestCase):
def setUp(self):
super().setUp()
self.places = game_logic.create_test_map()
account = self.accounts_factory.create_account()
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(account)
self.hero = self.storage.accounts_to_heroes[account.id]
self.hero.premium_state_end_at
game_tt_services.debug_clear_service()
@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True)
def test_not_in_place(self):
self.hero.position.set_position(0, 0)
self.assertEqual(self.hero.position.place_id, None)
logic.register_spending(self.hero, 100)
impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])
self.assertEqual(impacts, [])
@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: False)
def test_can_not_change_place_power(self):
self.hero.position.set_place(self.places[0])
logic.register_spending(self.hero, 100)
impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])
self.assertEqual(impacts, [])
@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True)
def test_can_change_place_power(self):
self.hero.position.set_place(self.places[0])
logic.register_spending(self.hero, 100)
impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])
self.assertEqual(len(impacts), 1)
self.assertEqual(impacts[0].amount, 100)
self.assertTrue(impacts[0].target_type.is_PLACE)
self.assertEqual(impacts[0].target_id, self.places[0].id)
@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True)
def test_can_change_place_power__below_zero(self):
self.hero.position.set_place(self.places[0])
logic.register_spending(self.hero, 100)
logic.register_spending(self.hero, -50)
impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])
self.assertEqual(len(impacts), 1)
self.assertEqual(impacts[0].amount, 150)
class GetPlacesPathModifiersTests(places_helpers.PlacesTestsMixin,
utils_testcase.TestCase):
def setUp(self):
super().setUp()
self.places = game_logic.create_test_map()
account = self.accounts_factory.create_account(is_fast=True)
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(account)
self.hero = self.storage.accounts_to_heroes[account.id]
def place_0_cost(self):
return logic.get_places_path_modifiers(self.hero)[self.places[0].id]
def test_every_place_has_modifier(self):
modifiers = logic.get_places_path_modifiers(self.hero)
self.assertEqual(set(modifiers.keys()), {place.id for place in self.places})
def test_race_bonus(self):
self.places[0].race = game_relations.RACE.random(exclude=(self.hero.race,))
with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA):
self.places[0].race = self.hero.race
def test_modifier_bonus(self):
self.assertFalse(self.places[0].is_modifier_active())
with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA):
self.places[0].set_modifier(places_modifiers.CITY_MODIFIERS.FORT)
self.create_effect(self.places[0].id,
value=100500,
attribute=places_relations.ATTRIBUTE.MODIFIER_FORT,
delta=0)
self.places[0].refresh_attributes()
self.assertTrue(self.places[0].is_modifier_active())
def test_home_place(self):
with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA):
self.hero.preferences.set(relations.PREFERENCE_TYPE.PLACE, self.places[0])
def test_friend(self):
with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA):
self.hero.preferences.set(relations.PREFERENCE_TYPE.FRIEND, self.places[0].persons[0])
def test_enemy(self):
with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA):
self.hero.preferences.set(relations.PREFERENCE_TYPE.ENEMY, self.places[0].persons[0])
def test_tax(self):
self.places[0].attrs.size = 10
self.places[0].refresh_attributes()
self.assertEqual(self.places[0].attrs.tax, 0)
with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA):
self.create_effect(self.places[0].id,
value=100,
attribute=places_relations.ATTRIBUTE.TAX,
delta=0)
self.places[0].refresh_attributes()
HABITS_DELTAS = [(-1, -1, -c.PATH_MODIFIER_MINOR_DELTA),
(-1, 0, 0),
(-1, +1, +c.PATH_MODIFIER_MINOR_DELTA),
( 0, -1, 0),
( 0, 0, 0),
( 0, +1, 0),
(+1, -1, +c.PATH_MODIFIER_MINOR_DELTA),
(+1, 0, 0),
(+1, +1, -c.PATH_MODIFIER_MINOR_DELTA)]
def test_habits__honor(self):
for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS:
self.places[0].habit_honor.set_habit(0)
self.hero.habit_honor.set_habit(0)
with self.check_almost_delta(self.place_0_cost, expected_delta):
self.places[0].habit_honor.set_habit(place_direction * c.HABITS_BORDER)
self.hero.habit_honor.set_habit(hero_direction * c.HABITS_BORDER)
def test_habits__peacefulness(self):
for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS:
self.places[0].habit_peacefulness.set_habit(0)
self.hero.habit_peacefulness.set_habit(0)
with self.check_almost_delta(self.place_0_cost, expected_delta):
self.places[0].habit_peacefulness.set_habit(place_direction * c.HABITS_BORDER)
self.hero.habit_peacefulness.set_habit(hero_direction * c.HABITS_BORDER)
| 2.390625 | 2 |
tinylinks/tests/test_app/models.py | brad/django-tinylinks | 11 | 366 | <filename>tinylinks/tests/test_app/models.py<gh_stars>10-100
"""Dummy model needed for tests."""
pass
| 1.046875 | 1 |
postcipes/hydraulic_jump.py | timofeymukha/postcipes | 0 | 367 | # This file is part of postcipes
# (c) <NAME>
# The code is released under the MIT Licence.
# See LICENCE.txt and the Legal section in the README for more information
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .postcipe import Postcipe
import turbulucid as tbl
from scipy.interpolate import interp1d
import numpy as np
import h5py
__all__ = ["HydraulicJump"]
class HydraulicJump(Postcipe):
def __init__(self, path):
Postcipe.__init__(self)
self.case = tbl.Case(path)
self.case['alphag'] = 1 - self.case['alpha.waterMean']
self.U = self.case.boundary_data("inlet", sort="y")[1]['UMean'][0, 0]
y_inlet = self.case.boundary_data("inlet", sort="y")[0][:, 1]
inlet_edge_length = tbl.edge_lengths(self.case, "inlet")
self.d = y_inlet[-1] + 0.5*inlet_edge_length[-1]
self.Fr1 = self.U/np.sqrt(9.81*self.d)
self.d2 = self.d*(np.sqrt(1 + 8*self.Fr1**2) - 1)/2
self.Fr2 = self.U/np.sqrt(9.81*self.d2)
iso05 = tbl.isoline(self.case, "alpha.waterMean", 0.5)
idx = iso05[:, 0].argsort()
self.xfs = iso05[idx, 0]
self.yfs = iso05[idx, 1]
idx_toe = np.argmin(np.abs(self.d*1.1 - self.yfs[:int(self.yfs.size/2)]))
self.xtoe = self.xfs[idx_toe]
| 2.25 | 2 |
main/SimulationSettings/ScreenshotsSteppable/Simulation/screenshots_steppables.py | JulianoGianlupi/nh-cc3d-4x-base-tool | 0 | 368 | from cc3d.core.PySteppables import *
from cc3d import CompuCellSetup
from random import random
class ScreenshotSteppable(SteppableBasePy):
def __init__(self, frequency=10):
SteppableBasePy.__init__(self, frequency)
def step(self, mcs):
if mcs in [3, 5, 19,20, 23, 29, 31]:
self.request_screenshot(mcs=mcs, screenshot_label='Cell_Field_CellField_2D_XY_0')
| 2.328125 | 2 |
aesara/gpuarray/optdb.py | anirudhacharya/aesara | 1 | 369 | <filename>aesara/gpuarray/optdb.py
from aesara.compile import optdb
from aesara.graph.opt import GraphToGPULocalOptGroup, TopoOptimizer, local_optimizer
from aesara.graph.optdb import (
EquilibriumDB,
LocalGroupDB,
OptimizationDatabase,
SequenceDB,
)
gpu_optimizer = EquilibriumDB()
gpu_cut_copies = EquilibriumDB()
# Not used for an EquilibriumOptimizer. It has the "tracks" that we need for GraphToGPUDB.
gpu_optimizer2 = EquilibriumDB()
gpu_seqopt = SequenceDB()
# do not add 'fast_run' to these two as this would always enable gpuarray mode
optdb.register(
"gpuarray_opt",
gpu_seqopt,
optdb.__position__.get("add_destroy_handler", 49.5) - 1,
"gpuarray",
)
pool_db = LocalGroupDB()
pool_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup)
pool_db2.__name__ = "pool_db2"
matrix_ops_db = LocalGroupDB()
matrix_ops_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup)
matrix_ops_db2.__name__ = "matrix_ops_db2"
abstract_batch_norm_db = LocalGroupDB()
abstract_batch_norm_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup)
abstract_batch_norm_db2.__name__ = "abstract_batch_norm_db2"
abstract_batch_norm_groupopt = LocalGroupDB()
abstract_batch_norm_groupopt.__name__ = "gpuarray_batchnorm_opts"
def register_opt(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop("name")) or local_opt.__name__
gpu_optimizer.register(name, local_opt, "fast_run", "gpuarray", *tags)
return local_opt
return f
def register_opt2(tracks, *tags, **kwargs):
"""
Decorator for the new GraphToGPU optimizer.
Takes an extra parameter(Op) compared to register_opt decorator.
Parameters
----------
tracks : List of Op class Or Op instance or None
The Node's Op to which optimization is being applied.
tags : String
The optimization tag to which the optimizer will be registered.
"""
def f(local_opt):
name = (kwargs and kwargs.pop("name")) or local_opt.__name__
if isinstance(local_opt, OptimizationDatabase):
opt = local_opt
else:
opt = local_optimizer(tracks)(local_opt)
gpu_optimizer2.register(name, opt, "fast_run", "gpuarray", *tags)
return local_opt
return f
def register_inplace(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop("name")) or local_opt.__name__
optdb.register(
name,
TopoOptimizer(local_opt, failure_callback=TopoOptimizer.warn_inplace),
60,
"fast_run",
"inplace",
"gpuarray",
*tags,
)
return local_opt
return f
# Register GPU convolution implementation
# They are tried in a specific order so we can control
# which ones take precedence over others.
abstractconv_groupopt = LocalGroupDB()
abstractconv_groupopt.__name__ = "gpuarray_abstractconv_opts"
register_opt("fast_compile")(abstractconv_groupopt)
class GraphToGPUDB(OptimizationDatabase):
"""
Retrieves the list local optimizers based on the optimizer flag's value
from EquilibriumOptimizer by calling the method query.
"""
def query(self, *tags, **kwtags):
from aesara.gpuarray.opt import GraphToGPU
opt = gpu_optimizer2.query(*tags, **kwtags)
return GraphToGPU(opt.local_optimizers_all, opt.local_optimizers_map)
| 2.109375 | 2 |
jenkinsapi/node.py | imsardine/jenkinsapi | 0 | 370 | <reponame>imsardine/jenkinsapi<gh_stars>0
"""
Module for jenkinsapi Node class
"""
from jenkinsapi.jenkinsbase import JenkinsBase
from jenkinsapi.custom_exceptions import PostRequired
import logging
try:
from urllib import quote as urlquote
except ImportError:
# Python3
from urllib.parse import quote as urlquote
log = logging.getLogger(__name__)
class Node(JenkinsBase):
"""
Class to hold information on nodes that are attached as slaves
to the master jenkins instance
"""
def __init__(self, baseurl, nodename, jenkins_obj):
"""
Init a node object by providing all relevant pointers to it
:param baseurl: basic url for querying information on a node
:param nodename: hostname of the node
:param jenkins_obj: ref to the jenkins obj
:return: Node obj
"""
self.name = nodename
self.jenkins = jenkins_obj
JenkinsBase.__init__(self, baseurl)
def get_jenkins_obj(self):
return self.jenkins
def __str__(self):
return self.name
def is_online(self):
return not self.poll(tree='offline')['offline']
def is_temporarily_offline(self):
return self.poll(tree='temporarilyOffline')['temporarilyOffline']
def is_jnlpagent(self):
return self._data['jnlpAgent']
def is_idle(self):
return self._data['idle']
def set_online(self):
"""
Set node online.
Before change state verify client state: if node set 'offline'
but 'temporarilyOffline' is not set - client has connection problems
and AssertionError raised.
If after run node state has not been changed raise AssertionError.
"""
self.poll()
# Before change state check if client is connected
if self._data['offline'] and not self._data['temporarilyOffline']:
raise AssertionError("Node is offline and not marked as "
"temporarilyOffline, check client "
"connection: offline = %s, "
"temporarilyOffline = %s" %
(self._data['offline'],
self._data['temporarilyOffline']))
elif self._data['offline'] and self._data['temporarilyOffline']:
self.toggle_temporarily_offline()
if self._data['offline']:
raise AssertionError("The node state is still offline, "
"check client connection:"
" offline = %s, "
"temporarilyOffline = %s" %
(self._data['offline'],
self._data['temporarilyOffline']))
def set_offline(self, message="requested from jenkinsapi"):
"""
Set node offline.
If after run node state has not been changed raise AssertionError.
: param message: optional string explain why you are taking this
node offline
"""
if not self._data['offline']:
self.toggle_temporarily_offline(message)
data = self.poll(tree='offline,temporarilyOffline')
if not data['offline']:
raise AssertionError("The node state is still online:" +
"offline = %s , temporarilyOffline = %s" %
(data['offline'],
data['temporarilyOffline']))
def toggle_temporarily_offline(self, message="requested from jenkinsapi"):
"""
Switches state of connected node (online/offline) and
set 'temporarilyOffline' property (True/False)
Calling the same method again will bring node status back.
:param message: optional string can be used to explain why you
are taking this node offline
"""
initial_state = self.is_temporarily_offline()
url = self.baseurl + \
"/toggleOffline?offlineMessage=" + urlquote(message)
try:
html_result = self.jenkins.requester.get_and_confirm_status(url)
except PostRequired:
html_result = self.jenkins.requester.post_and_confirm_status(
url,
data={})
self.poll()
log.debug(html_result)
state = self.is_temporarily_offline()
if initial_state == state:
raise AssertionError(
"The node state has not changed: temporarilyOffline = %s" %
state)
| 2.328125 | 2 |
edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/GenericHazards.py | srcarter3/awips2 | 0 | 371 | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 05/07/2015 4027 randerso Migrated A1 OB9.16 code to A2
# 06/17/2015 4027 dgilling Perform case-insensitive
# comparisons in foundCTAs.
# 07/13/2015 4648 randerso Fix bullets in follow up products
# 02/24/2016 5411 randerso Make bullet headers upper case
# 07/15/2016 5749 randerso Replaced ellipses with commas in hazardBodyText
#
##
# This is a base file that is not intended to be overridden.
##
#-------------------------------------------------------------------------
# Description: This product is a template for creating Hazard Products.
#-------------------------------------------------------------------------
# Copying:
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#-------------------------------------------------------------------------
# Standard and Local file names and Locations:
# GenericHazards
#-------------------------------------------------------------------------
# Customization Points:
#
# DEFINITION SECTION
#
# Required Configuration Items:
#
# displayName If not None, defines how product appears in GFE GUI
#
# You must set the following:
#
# productName defines name of product e.g. "Zone Forecast Product"
# fullStationID Full station identifier, 4 letter, such as "KSLC".
# wmoID WMO ID code for product header, such as "FOUS45"
# pil Product pil, such as "SFTBOS"
# areaName (opt.) Area name for product header, such as "Western New York"
# wfoCityState City,state that the WFO is located in, such as "Buffalo NY"
#
# Optional Configuration Items
#
# mapNameForCombinations Name of the map background that is used for
# creating/editing the combinations file. This must
# be defined or the GFE zone combiner
# database Source database for product. Can be "Official",
# "Fcst" or "ISC"
# outputFile Defines the output location of the finished product.
# Product is saved if autoWrite is 1.
# debug If on, debug_print statements will appear.
# textdbPil Defines the awips product identifier
# (e.g., DENCCFDEN) that is used to store the product
# in the AWIPS text database. The product is not
# automatically stored unless autoStore is 1. This
# value is also used for the default GUI entry for
# storage.
# awipsWANPil Defines the awips product identifier
# (e.g., KBOUCCFDEN) that is used to transmit the
# product to the AWIPS WAN. The product is not
# automatically transmitted unless autoSend is 1.
# This value is also used for the default GUI
# entry for storage.
# autoSend If set to 1, then the product will be automatically
# sent on the AWIPS WAN to the "autoSendAddress" with
# the "awipsWANPil after product creation.
# autoStore If set to 1, then the product will be automatically
# stored into the text database using the "textdbPil"
# after product creation.
# autoWrite If set to 1, then the product will be automatically
# written to the "output" named disk file after
# product creation.
#
# lineLength max length of each line
#
# defaultEditAreas defines edit areas, default is Combinations
#
# purgeTime Maximum number of hours past issuance time for the
# expire time.
# includeCities If 1, cities will be included in the area header
# accurateCities If 1, cities are determined from grids
# citiesPhrase "Including the cities of" phrase used when including
# cities
# includeZoneNames If 1, zone names will be included in the area header
# easPhrase Optional EAS phrase to be include in product header
#
# hazardSamplingThreshold Defines the percentage coverage or number of
# grid points in a zone that must contain the hazard
# in order for it to be considered. Tuple (percent, points)
# includeOverviewHeadline If 1, the overview header is templated
# includeOverview If 1, the overview section is templated
# bulletProd If 1, the product will use a bullet format
#-------------------------------------------------------------------------
# Weather Elements Needed:
# Hazards
#-------------------------------------------------------------------------
# Edit Areas Needed: None
#-------------------------------------------------------------------------
# Associated Utilities Files e.g. Combinations file:
# Combinations file
#-------------------------------------------------------------------------
# Component Products:
# Hazards
#-------------------------------------------------------------------------
# Development tasks that are identified and in progress:
#
# To look up tasks and their status, see the Text Product User Guide
# Section on "Tkgnats: Task Reporting System".
#-------------------------------------------------------------------------
# Additional Information:
#-------------------------------------------------------------------------
# Example Output:
#-------------------------------------------------------------------------
import LogStream
import TextRules
import SampleAnalysis
import time, string, types, copy, re
import CallToActions
import AbsTime
class TextProduct(TextRules.TextRules, SampleAnalysis.SampleAnalysis,
CallToActions.CallToActions):
Definition = {
"type": "smart",
"displayName": None,
# Source database for product. Can be "Official", "Fcst" or "ISC"
"database": "Official",
# Defines output location of finished product.
"outputFile": "{prddir}/TEXT/genHaz.txt",
"debug": 0,
# Name of map background for creating Combinations
# Can be:
# Zones_BOU
# FireWxZones_BOU
# Counties
# Marine_Zones_BOU
"mapNameForCombinations": "Zones_<site>",
## Edit Areas: Create Combinations file with edit area combinations.
## Can be:
## EditAreas_PublicZones_BOU
## EditAreas_FireWx_BOU
## EditAreas_FIPS_BOU
## EditAreas_MarineZones_BOU
"defaultEditAreas" : "EditAreas_PublicZones_<site>_<MultiPil>",
# product identifiers
"productName": "Generic Hazard Product", # product name
"fullStationID": "<fullStationID>", # full station identifier (4letter)
"wmoID": "<wmoID>", # WMO ID
"pil": "<pil>", # Product pil
"areaName": "", # Name of state, such as "Georgia" -- optional
"wfoCityState": "<wfoCityState>", # Location of WFO - city,state
"textdbPil": "<textdbPil>", # Product ID for storing to AWIPS text database.
"awipsWANPil": "<awipsWANPil>", # Product ID for transmitting to AWIPS WAN.
"periodCombining" : 0, # If 1, combine periods, if possible
# automatic functions
"autoSend": 0, #set to 1 to automatically transmit product
"autoSendAddress": "000", #transmission address
"autoStore": 0, #set to 1 to automatically store product in textDB
"autoWrite": 0, #set to 1 to automatically write product to file
# Area Dictionary -- Descriptive information about zones
"areaDictionary": "AreaDictionary",
# Language
"language": "english",
"lineLength": 66, #Maximum line length
"purgeTime": 8, # Maximum hours for expireTime
"includeCities": 1 , # Cities included in area header
"accurateCities": 0, # Include all cities in area header
"cityLocation": "CityLocation", # City lat/lon dictionary to use
"cityDescriptor":"Including the cities of",
"includeZoneNames":1, # Zone names will be included in the area header
"easPhrase" :"", # Optional EAS phrase to be include in product header
"includeOverviewHeadline": 1, #include overview header
"includeOverview": 1, #include overview section
"bulletProd": 0, # do not default to bullets
"hazardSamplingThreshold": (10, None), #(%cov, #points)
"callToAction": 1,
}
def __init__(self):
TextRules.TextRules.__init__(self)
SampleAnalysis.SampleAnalysis.__init__(self)
self.__overviewText = ""
self.__procCTA = None
def generateForecast(self, argDict):
# Generate Text Phrases for a list of edit areas
# Get variables
error = self._getVariables(argDict)
if error is not None:
return error
# Get the segments
hazardsC = argDict['hazards']
segmentList = self.organizeHazards(hazardsC.rawAnalyzedTable())
if len(segmentList) == 0:
return "No hazards to report"
# Determine time ranges
error = self._determineTimeRanges(argDict)
if error is not None:
return error
# Initialize the output string
fcst = ""
fcst = self._preProcessProduct(fcst, argDict)
# Generate the product for each segment in the segmentList
fraction = 0
fractionOne = 1.0/float(len(segmentList))
percent = 50.0
self.setProgressPercentage(50)
for segmentAreas in segmentList:
self.progressMessage(fraction, percent, "Making Product for Segment")
fcst = self._preProcessArea(fcst, segmentAreas, self._expireTime, argDict)
fcst = self._makeProduct(fcst, segmentAreas, argDict)
fcst = self._postProcessArea(fcst, segmentAreas, argDict)
fraction = fractionOne
fcst = self._postProcessProduct(fcst, argDict)
return fcst
def _getVariables(self, argDict):
# Make argDict accessible
self.__argDict = argDict
# Get Definition variables
self._definition = argDict["forecastDef"]
for key in self._definition.keys():
exec "self._" + key + "= self._definition[key]"
# Get VariableList
varDict = argDict["varDict"]
for key in varDict.keys():
if type(key) is types.TupleType:
label, variable = key
exec "self._" + variable + "= varDict[key]"
self._language = argDict["language"]
# Set up information for Hazards product
self._hazards = argDict['hazards']
self._combinations = argDict["combinations"]
return None
def _determineTimeRanges(self, argDict):
# Set up the time range for 0-240 hours
self._timeRange = self.createTimeRange(0, 240)
self._ddhhmmTime = self.getCurrentTime(
argDict, "%d%H%M", shiftToLocal=0, stripLeading=0)
self._issueTime = AbsTime.AbsTime(argDict['creationTime'])
self._currentTime = argDict['creationTime']
self._expireTime = self._issueTime + self._purgeTime*3600
self._timeLabel = self.getCurrentTime(
argDict, "%l%M %p %Z %a %b %e %Y", stripLeading=1)
return None
def _preProcessProduct(self, fcst, argDict):
# Product header
if self._areaName != "":
self._areaName = " for " + self._areaName
issuedByString = self.getIssuedByString()
productName = self.checkTestMode(argDict,
self._productName + self._areaName)
if len(self._easPhrase) != 0:
eas = self._easPhrase + '\n'
else:
eas = ''
s = self._wmoID + " " + self._fullStationID + " " + \
self._ddhhmmTime + "\n" + self._pil + "\n\n"
fcst = fcst + s.upper()
s = eas + productName + "\n" +\
"National Weather Service " + self._wfoCityState + \
"\n" + issuedByString + self._timeLabel + "\n\n"
fcst = fcst + s
fcst = fcst + "Default overview section\n"
return fcst
def _preProcessArea(self, fcst, segmentAreas, expireTime, argDict):
# This is the header for an edit area combination
areaHeader = self.makeAreaHeader(
argDict, "", self._issueTime, expireTime,
self._areaDictionary, None, cityDescriptor=self._cityDescriptor,
areaList=segmentAreas, includeCities=self._includeCities,
includeZoneNames = self._includeZoneNames,
accurateCities = self._accurateCities)
fcst = fcst + areaHeader
return fcst
def _makeProduct(self, fcst, segmentAreas, argDict):
argDict["language"] = self._language
# Generate Narrative Forecast for Edit Area
# get the hazards text
# We only need to get headlines for the first edit area
# in the segment since all areas in the segment have
# the same headlines
editArea = segmentAreas[0]
areaLabel = editArea
headlines = self.generateProduct("Hazards", argDict, area = editArea,
areaLabel=areaLabel,
timeRange = self._timeRange)
fcst = fcst + headlines
return fcst
def _postProcessArea(self, fcst, segmentAreas, argDict):
return fcst + "\n\n$$\n\n"
def _postProcessProduct(self, fcst, argDict):
#
# If an overview exists for this product, insert it
#
overview = self.finalOverviewText()
overviewSearch = re.compile(r'Default overview section', re.DOTALL)
fcst = overviewSearch.sub(overview, fcst)
#
# Added to place line feeds in the CAP tags to keep separate from CTAs
fcst = string.replace(fcst, \
r"PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.", \
r"\nPRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.\n")
fcst = string.replace(fcst, "\n ","\n")
fcst = string.replace(fcst, "&&", "\n&&\n")
# Prevent empty Call to Action Tags
fcst = re.sub(r'\nPRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.\s*&&\n', \
"", fcst)
fcst = self._indentBulletText(fcst)
#
# Clean up multiple line feeds
#
fixMultiLF = re.compile(r'(\n\n)\n*', re.DOTALL)
fcst = fixMultiLF.sub(r'\1', fcst)
# finish progress meter
self.setProgressPercentage(100)
self.progressMessage(0, 100, self._displayName + " Complete")
return fcst
def allowedHazards(self):
return []
# Added for DR 21194
def _bulletDict(self):
return []
# Added for DR 21309
def _bulletOrder(self):
return []
## Replaced by 21309 code
## def _getBullets(self, newBulletList, argDict):
##
## ### get the bullet dictionary and split the bullets
## bDict = self._bulletDict()
## bLine = bDict.get(eachHazard['phen'])
## print 20* "*" + (eachHazard['phen'])
## bList = newBulletList.split(",")
##
## ### initialize the bullet output
## bullets = ""
##
## ### loop through the bullets and format the output
## for b in bList:
## bullets = bullets + "* " + b + "...|* Enter bullet text *|\n\n"
## # bullets = bullets + "\n"
## return bullets
def _indentBulletText(self, prevText):
print prevText
### if previous text is empty, return nothing
if prevText is None:
return prevText
###
### split the text
###
bullets = []
bullets = string.split(prevText, '\n\n')
if len(bullets) <= 1:
return prevText
###
### process the text
###
outText = ""
for b in bullets:
### if first character is a * we found a bullet
if re.match("\*", b):
### remove line feeds
removeLF = re.compile(r'(s*[^\n])\n([^\n])', re.DOTALL)
bullet = removeLF.sub(r'\1 \2',b)
### indent code
bullet = self.indentText(bullet, indentFirstString = '',
indentNextString = ' ', maxWidth=self._lineLength,
breakStrings=[" ", "..."])
###
### the "-" in the breakStrings line above is causing issues with
### offices that use "-20 degrees" in the text.
###
outText = outText + bullet + "\n\n"
else: ### not a bullet, CTA text
outText = outText + b + "\n\n"
### that's it
print outText
return outText
# The _hazardTimePhrases method is passed a hazard key, and returns
# time phrase wording consistent with that generated by the headline
# algorithms in DiscretePhrases.
#
def hazardTimePhrases(self, hazard, argDict, prefixSpace=True):
timeWords = self.getTimingPhrase(hazard, argDict['creationTime'])
if prefixSpace and len(timeWords):
timeWords = " " + timeWords #add a leading space
return timeWords
#
# The method hazardBodyText creates an attribution phrase
#
def hazardBodyText(self, hazardList, argDict):
bulletProd = self._bulletProd
hazardBodyPhrase = ''
#
# First, sort the hazards for this segment by importance
#
sortedHazardList = []
for each in ['W', 'Y', 'A', 'O', 'S']:
for eachHazard in hazardList:
if eachHazard['sig'] == each:
if eachHazard not in sortedHazardList:
sortedHazardList.append(eachHazard)
#
# Next, break them into individual lists based on action
#
newList = []
canList = []
expList = []
extList = []
conList = []
upgList = []
statementList = []
for eachHazard in sortedHazardList:
if eachHazard['sig'] in ['S']and eachHazard['phen'] in ['CF', 'LS']:
statementList.append(eachHazard)
elif eachHazard['act'] in ['NEW', 'EXA', 'EXB']:
newList.append(eachHazard)
elif eachHazard['act'] in ['CAN']:
canList.append(eachHazard)
elif eachHazard['act'] in ['EXP']:
expList.append(eachHazard)
elif eachHazard['act'] in ['EXT']:
extList.append(eachHazard)
elif eachHazard['act'] in ['UPG']:
upgList.append(eachHazard)
else:
conList.append(eachHazard)
#
# Now, go through each list and build the phrases
#
nwsIntroUsed = 0
#
# This is for the new hazards
#
phraseCount = 0
lastHdln = None
for eachHazard in newList:
hdln = eachHazard['hdln']
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
endTimePhrase = self.hazardTimePhrases(eachHazard, argDict)
hazNameA = self.hazardName(eachHazard['hdln'], argDict, True)
hazNameACap = self.sentence(hazNameA, addPeriod=False)
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
if hazName in ["Winter Weather Advisory", "Winter Storm Warning", "Beach Hazards Statement"]:
forPhrase = " for |* Enter hazard type *|"
else:
forPhrase =""
if nwsIntroUsed == 0:
hazardBodyPhrase = "The National Weather Service in " + self._wfoCity
nwsIntroUsed = 1
if phraseCount == 0:
phraseCount = 1
if eachHazard['phen'] in ['HU', 'TR', 'TY']:
hazardBodyPhrase = hazardBodyPhrase + " has issued " + \
hazNameA + ". "
else:
hazardBodyPhrase += " has issued " + hazNameA + forPhrase + \
", which is in effect" + endTimePhrase + ". "
elif phraseCount == 1:
phraseCount = 2
if hdln != lastHdln:
if eachHazard['phen'] in ['HU', 'TR', 'TY']:
hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \
" has also been issued."
else:
hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \
" has also been issued. This " + hazName + forPhrase + \
" is in effect" + endTimePhrase + ". "
else:
if eachHazard['phen'] in ['HU', 'TR', 'TY']:
hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \
" has also been issued."
else:
hazardBodyPhrase = hazardBodyPhrase + hazNameACap + forPhrase + \
" has also been issued" + endTimePhrase + ". "
else:
if eachHazard['phen'] in ['HU', 'TR', 'TY']:
hazardBodyPhrase += "In addition, " + \
hazNameA + " has been issued."
else:
hazardBodyPhrase += "In addition, " + \
hazNameA + forPhrase + " has been issued. This " + hazName + \
" is in effect" + endTimePhrase + ". "
lastHdln = hdln
#
# This is for the can hazards
#
for eachHazard in canList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
if nwsIntroUsed == 0:
hazardBodyPhrase = "The National Weather Service in " +\
self._wfoCity
nwsIntroUsed = 1
hazardBodyPhrase = hazardBodyPhrase + \
" has cancelled the " + hazName + ". "
else:
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" has been cancelled. "
#
# This is for the exp hazards
#
phraseCount = 0
for eachHazard in expList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
if self._bulletProd:
continue # No attribution for this case if it is a bullet product
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
if eachHazard['endTime'] <= argDict['creationTime']:
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" is no longer in effect. "
else:
expTimeCurrent = argDict['creationTime']
timeWords = self.getTimingPhrase(eachHazard, expTimeCurrent)
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" will expire " + timeWords + ". "
#
# This is for ext hazards
#
for eachHazard in extList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
if self._bulletProd:
continue # No attribution for this case if it is a bullet product
endTimePhrase = self.hazardTimePhrases(eachHazard, argDict)
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" is now in effect" + endTimePhrase + ". "
#
# This is for upgrade hazards
#
for eachHazard in upgList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" is no longer in effect. "
#
# This is for con hazards
#
for eachHazard in conList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
if self._bulletProd:
continue # No attribution for this case if it is a bullet product
endTimePhrase = self.hazardTimePhrases(eachHazard, argDict)
hazNameA = self.hazardName(eachHazard['hdln'], argDict, True)
hazardBodyPhrase = hazardBodyPhrase + hazNameA + \
" remains in effect" + endTimePhrase + ". "
#
# This is for statement hazards
#
for eachHazard in statementList:
hazardBodyPhrase = "...|* Add statement headline *|...\n\n"
#
# This adds segment text
#
segmentText = ''
#
# Check that this segment codes to determine capture or not,
# and frame captured text or not
#
incTextFlag, incFramingCodes, skipCTAs, forceCTAList = \
self.useCaptureText(sortedHazardList)
#
#
# Check that the previous text exists
#
foundCTAs = []
for eachHazard in sortedHazardList:
if eachHazard.has_key('prevText'):
prevText = eachHazard['prevText']
if eachHazard['pil'] == 'MWS':
startPara = 0
else:
startPara = 1
segmentText, foundCTAs = self.cleanCapturedText(prevText,
startPara, addFramingCodes = False,
skipCTAs = skipCTAs)
tester = segmentText[0]
if tester == '*':
startPara = 1
else:
startPara = 2
segmentText, foundCTAs = self.cleanCapturedText(prevText,
startPara, addFramingCodes = False,
skipCTAs = skipCTAs)
#
# Check that the segment text isn't very short or blank
#
if len(segmentText) < 6:
incTextFlag = 0
# DR 21309 code addition from Middendorf (BYZ)
#
# Now if there is a new hazard and previous segment Text, then
# we may have to add bullets.
#
if incTextFlag and bulletProd:
for eachHazard in sortedHazardList:
if not eachHazard.has_key('prevText'):
newBullets = string.split(self._bulletDict().get(eachHazard['phen']),",")
print "newBullets = ", newBullets
print "segment text is: ", segmentText
for bullet in newBullets:
if re.search("\* " + bullet + "\.\.\.", segmentText, flags=re.IGNORECASE) is None:
print bullet + " not in segmentText"
start = self._bulletOrder().index(bullet) + 1
end = len(self._bulletOrder())
bulletFlag = 1
for i in range(start,end):
if (re.search("\* " + self._bulletOrder()[i] + "\.\.\.", segmentText, flags=re.IGNORECASE) is not None) and bulletFlag:
print "* " + self._bulletOrder()[i] + "... found!"
segmentTextSplit = re.split("\* " + self._bulletOrder()[i] + "\.\.\.", segmentText, flags=re.IGNORECASE)
segmentText = string.join(segmentTextSplit,"* " + bullet.upper() + \
"...|* Enter bullet text *|\n\n* " + self._bulletOrder()[i] + "...")
bulletFlag = 0
if bulletFlag:
print "appending to bottom list of bullets!"
segmentTextSplit = re.split("PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.", segmentText, flags=re.IGNORECASE)
segmentText = "\n" + string.join(segmentTextSplit,"* " + bullet.upper() + \
"...|* Enter bullet text *|\n\nPRECAUTIONARY/PREPAREDNESS ACTIONS...")
bulletFlag = 0
#
# Now if there is a can/exp hazard and previous segment Text, then
# we may have to remove bullets.
#
if incTextFlag and bulletProd:
# First make list of bullets that we need to keep.
keepBulletList = []
for eachHazard in sortedHazardList:
if eachHazard['act'] not in ["CAN","EXP"]:
saveBullets = string.split(self._bulletDict().get(eachHazard['phen']),",")
for saveBullet in saveBullets:
if saveBullet not in keepBulletList:
keepBulletList.append(saveBullet)
# Now determine which bullets we have to remove.
removeBulletList = []
for eachHazard in sortedHazardList:
if eachHazard['act'] in ["CAN","EXP"]:
canBullets = string.split(self._bulletDict().get(eachHazard['phen']),",")
for canBullet in canBullets:
if canBullet not in keepBulletList and canBullet not in removeBulletList:
removeBulletList.append(canBullet)
print "hazardBodyText info: keepBulletList: ",keepBulletList
print "hazardBodyText info: removeBulletList: ",removeBulletList
# Finally remove the bullets no longer needed.
for bullet in removeBulletList:
if re.search("\* "+ bullet + "\.\.\.", segmentText, flags=re.IGNORECASE) is not None:
segmentTextSplit = re.split("\* " + bullet + "\.\.\.", segmentText, flags=re.IGNORECASE)
print "segmentTextSplit is ", segmentTextSplit
segmentTextSplit2 = string.split(segmentTextSplit[1],"*",1)
if len(segmentTextSplit2) == 2:
segmentTextSplit[1] = "*" + segmentTextSplit2[1]
else:
segmentTextSplit2 = re.split("PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.", segmentTextSplit[1], 1, flags=re.IGNORECASE)
if len(segmentTextSplit2) == 2:
segmentTextSplit[1] = "PRECAUTIONARY/PREPAREDNESS ACTIONS..." + segmentTextSplit2[1]
segmentText = string.join(segmentTextSplit,"")
if removeBulletList != []:
segmentText = "|*\n" + segmentText + "*|"
else:
segmentText = segmentText
#
# If segment passes the above checks, add the text
#
print "hazardBodyText info: incTextFlag: ",incTextFlag
if incTextFlag:
print "hazardBodyText info: segmentText: ",segmentText
hazardBodyPhrase = hazardBodyPhrase + "\n\n" + \
segmentText + '\n\n'
elif bulletProd:
bulletFlag = 0
if eachHazard['act'] == 'CAN':
hazardBodyPhrase = hazardBodyPhrase + \
"\n\n|* Wrap-up text goes here *|.\n"
elif eachHazard['act'] == 'EXP':
hazardBodyPhrase = hazardBodyPhrase + \
"\n\n|* Wrap-up text goes here *|.\n"
else:
bulletFlag = 1
## print "bulletFlag is: ",bulletFlag
if bulletFlag:
newBulletList = []
bullets = ""
for eachHazard in sortedHazardList:
### get the default bullets for all hazards from the bullet diction
newBullets = string.split(self._bulletDict().get(eachHazard['phen']),",")
for newBullet in newBullets:
if newBullet not in newBulletList:
newBulletList.append(newBullet)
print "my bullets are: ", newBulletList
### Determine the correct order for all bullets
bulletOrder = self._bulletOrder()
staticBulletOrder = self._bulletOrder()
for bullet in staticBulletOrder:
print "correct bullet order should be: ", bulletOrder
if bullet not in newBulletList:
bulletOrder.remove(bullet)
print "reordered bullets are: ", bulletOrder
for b in bulletOrder:
bullets = bullets + "* " + b.upper() + "...|* Enter bullet text *|\n\n"
hazardBodyPhrase = hazardBodyPhrase + "\n\n" + bullets
# If segment doesn't pass the checks, put in framing codes
else:
hazardBodyPhrase = hazardBodyPhrase + \
"\n\n|* Statement text goes here *|.\n\n"
# End code for DR 21310
#
# This adds the call to action statements. This is only performed
# if the segment is 'NEW' or if the previous text has been discarded
# due to a CAN/EXP/UPG segment
#
# remove items from forceCTAList if they exist in foundCTAs. Note
# that the formats of these lists are different, thus this code
# is more complicated
for ent in foundCTAs:
#only process CTAs that are vtec phen/sig based
if ent.find('.') == 2:
phensig = (ent[0:2], ent[3]) #phen.sig
if phensig in forceCTAList:
del forceCTAList[forceCTAList.index(phensig)]
hazardBodyPhrase = hazardBodyPhrase + '\n\n'
ctas = []
for (phen,sig) in forceCTAList:
hazardPhenSig = phen + "." + sig
cta = self.defaultCTA(hazardPhenSig)
if cta not in ctas:
ctas.append(cta)
if len(ctas) > 0:
hazardBodyPhrase = hazardBodyPhrase + \
'PRECAUTIONARY/PREPAREDNESS ACTIONS...\n\n'
for c in ctas:
hazardBodyPhrase = hazardBodyPhrase + c + '\n\n'
hazardBodyPhrase = hazardBodyPhrase + '&&\n\n'
# Make sure there is only one CAP tag pairs
hazardBodyPhrase = re.sub(r'&&\s*PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.\n', \
"", hazardBodyPhrase)
return hazardBodyPhrase
def finalOverviewText(self):
#if didn't calculate any, use the default
if len(self.__overviewText) == 0:
if self._includeOverviewHeadline:
overviewHeadline = "...|*Overview headline (must edit)*|...\n\n"
else:
overviewHeadline = ""
if self._includeOverview:
overviewBody = ".|*Overview (must edit)*|.\n\n"
else:
overviewBody = ""
#assemble the lines
overview = overviewHeadline + overviewBody
return overview
else:
return self.__overviewText
def overviewText(self, hazardList, pil):
#
# This method finds an overview in the previous product
#
overview = ""
for each in hazardList:
if (each.has_key('prevOverviewText') and
each.has_key('pil') and
each.has_key('endTime') and
each.has_key('act')):
if (each['pil'] == pil and
each['endTime'] > self._currentTime and
each['act'] not in ['CAN', 'EXP']):
overview = each['prevOverviewText']
self.__overviewText, dummy = self.cleanCapturedText(
overview, 0)
break
def useCaptureText(self, hazardList):
#Based on the hazardlist, returns a tuple indicating:
# (inc capture text, inc framing codes, skip CTAs, forceCTAList)
#
# For the values to be considered, the 'hdln' value must be
# present in the list, or it needs to be a Statement (sig="S")
cans = ['CAN','UPG','EXP']
acts = ['NEW','EXT','EXA','EXB','CON']
foundACTS = 0
foundCANS = 0
foundSig = []
for eh in hazardList:
if eh['act'] in acts and (len(eh['hdln']) or eh['sig'] == 'S'):
foundACTS = 1
if eh['act'] in cans and (len(eh['hdln']) or eh['sig'] == 'S'):
foundCANS = 1
if eh['sig'] not in foundSig:
foundSig.append(eh['sig'])
includeFrameCodes = 0
includeText = 1
skipCTAs = 0
forceCTAList = []
# all actions are in CAN, UPG, EXP only (don't include text)
if foundCANS and not foundACTS:
if 'S' in foundSig and len(foundSig) == 1: #only S
includeFrameCodes = 1 #capture text, but frame it
else:
includeText = 0 #end of non statement
# something in CANS and something in acts (frame it, include text)
elif foundCANS and foundACTS:
includeFrameCodes = 1
skipCTAs = 1
for eh in hazardList:
if eh['act'] in acts and \
(eh['phen'], eh['sig']) not in forceCTAList and \
len(eh['hdln']):
forceCTAList.append((eh['phen'], eh['sig']))
#everything in active entries, captured text is used, but still
# need to handle the "NEW" entries.
else:
for eh in hazardList:
if eh['act'] in ['NEW'] and len(eh['hdln']):
forceCTAList.append((eh['phen'], eh['sig']))
return (includeText, includeFrameCodes, skipCTAs, forceCTAList)
def cleanCapturedText(self, text, paragraphs, addFramingCodes = False,
skipCTAs = False):
#
# This method takes a block of text, wraps it preserving blank lines,
# then returns the part after 'paragraphs'. So, if paragraphs is 0, it
# returns the whole thing, if it's 2, it returns paragraphs 2 -> end, etc.
# Headlines are always removed.
# Framing codes are added if specified.
#
paras = self.convertSingleParas(text) #single paragraphs
# keep track of any call to actions found
foundCTAs = []
# Process the paragraphs, keep only the interested ones
paraCount = 0
processedText = ''
for eachPara in paras:
if paraCount >= paragraphs:
found = self.ctasFound(eachPara) #get list of ctas found
if skipCTAs and len(found):
pass
else:
processedText = processedText + eachPara + '\n\n'
#keep track of remaining CTAs in processed text
for f in found:
if f not in foundCTAs:
foundCTAs.append(f)
if eachPara.find('...') == 0:
pass #ignore headlines
paraCount = paraCount + 1
# Add framing codes
if addFramingCodes:
processedText = processedText.rstrip()
processedText = "|*\n" + processedText + "*|\n"
# Wrap
processedText = self.endline(processedText,
linelength=self._lineLength, breakStr=[" ", "-", "..."])
return processedText, foundCTAs
def decodeBulletedText(self, prevText):
# returns the bullet paragraph text or None, returns the
# regular text after the bullets. The afterText is text up to
# the next bullet or up to "The National Weather Service". Note
# that this only correctly handles the 1st set of entries in
# a segment, thus double events will only decode the first set
# of bullets and text. The multipleRecords is set to 1 in the
# event that there are multiple sets of bullets. In this case
# only the 1st set was captured/decoded.
# (hazard, time, basis, impact, afterText, multipleRecords)
if prevText is None:
return (None, None, None, None, None, None)
# find the bullets
bullets = []
buf = prevText.split('\n\n* ')
if len(buf) <= 1:
return (None, None, None, None, None, None)
multRecords = 0 #indicator of multiple sets of bullets
for x in xrange(len(buf)):
if x == 0:
continue #headlines and text before the bullets
bullets.append(buf[x])
# find only the bulleted text, defined by the double line feed term.
# of the text
regText = "" #regular text after bullets
for x in xrange(1, len(bullets)):
index = bullets[x].find('\n\n')
if index != -1:
regText = bullets[x][index+2:]
bullets[x] = bullets[x][0:index] #eliminate after bullet text
if len(bullets) > x+2: #more bullets are present
multRecords = 1
bullets = bullets[0:x+1] #only interested in these bullets
break
# regular text is the remainder of the text. However we only
# want text from the last in the series of bullets to the
# beginning of any next NWS phrase.
lines = regText.split('\n')
for x in xrange(len(lines)):
if lines[x].find('The National Weather Service') == 0:
lines = lines[0:x] #eliminate following lines
break
regText = ("\n").join(lines)
# now clean up the text
for x in xrange(len(bullets)):
bullets[x] = string.replace(bullets[x],'\n',' ')
removeLF = re.compile(r'(s*[^\n])\n([^\n])', re.DOTALL)
regText = removeLF.sub(r'\1 \2',regText)
# extract out each section for returning the values
if len(bullets) >= 1:
hazard = bullets[0]
else:
hazard = None
if len(bullets) >= 2:
time = bullets[1]
else:
time = None
if len(bullets) >= 3:
basis = bullets[2]
else:
basis = None
if len(bullets) >= 4:
impact = bullets[3]
else:
impact = None
if len(regText) == 0:
regText = None #no regular text after bullets
return (hazard, time, basis, impact, regText, multRecords)
def substituteBulletedText(self, capText, defaultText, frameit="Never"):
#returns a properly formatted bulleted text based on
#the capText variable. If capText is None or 0 length, then
#the default text is used. frameit can be "Never", in which
#nothing is wrapped in framing codes, "Always" in which the
#text (default or cap) is wrapped in framing codes, or
#DefaultOnly" in which just the default text is wrapped.
if capText is not None and len(capText):
textToUse = capText[0].upper()+capText[1:]
if frameit == "Always":
textToUse = "|* " + textToUse + " *|"
else:
textToUse = defaultText
if frameit == "Always" or frameit == "DefaultOnly":
textToUse = "|* " + textToUse + " *|"
# add bullet codes
textToUse = "* " + textToUse
# format it
return self.indentText(textToUse, indentFirstString = '',
indentNextString = ' ', maxWidth=self._lineLength,
breakStrings=[" ", "-", "..."])
def convertSingleParas(self, text):
#returns a list of paragraphs based on the input text.
lf = re.compile(r'(s*[^\n])\n([^\n])', re.DOTALL)
ptext = lf.sub(r'\1 \2', text)
ptext = ptext.replace('\n\n', '\n')
paragraphs = ptext.split('\n')
return paragraphs
def ctasFound(self, text):
#returns types of ctas found. The identifier is the pil (e.g., ZFP),
#phen/sig (e.g., DU.Y), or GENERIC. Uses the CallToAction definitions.
#convert text to single paragraphs
paragraphs = self.convertSingleParas(text)
for x in xrange(len(paragraphs)):
paragraphs[x] = string.replace(paragraphs[x],' ','')
#make list of call to actions (type, cta text)
if self.__procCTA is None:
self.__procCTA = []
ctao = CallToActions.CallToActions()
d = ctao.ctaDict()
for k in d.keys():
func = d[k]
items = func()
for it in items:
if type(it) == types.TupleType:
it = it[1] #get second string which is the CTA
ctaParas = self.convertSingleParas(it)
for cta in ctaParas:
self.__procCTA.append((k,string.replace(cta,' ','')))
d = ctao.ctaPilDict()
for k in d.keys():
func = d[k]
items = func()
for it in items:
if type(it) == types.TupleType:
it = it[1] #get second string which is the CTA
ctaParas = self.convertSingleParas(it)
for cta in ctaParas:
self.__procCTA.append((k,string.replace(cta,' ','')))
ctas = ctao.genericCTAs()
for it in ctas:
if type(it) == types.TupleType:
it = it[1] #get second string which is the CTA
ctaParas = self.convertSingleParas(it)
for cta in ctaParas:
self.__procCTA.append(("GENERIC",
string.replace(cta,' ','')))
#compare
found = []
for para in paragraphs:
for (ctaType, cta) in self.__procCTA:
## Added following line to account for framing code issues in CTA
cta = re.sub("\|\*.*\*\|","",cta)
# We want this comparison to be case-insensitive just in case
# the site is not transmitting in mixed case yet.
if para.upper() == cta.upper() and ctaType not in found:
found.append(ctaType)
return found
| 1.179688 | 1 |
virtualscreening/vina/spark/buried_areas.py | rodrigofaccioli/drugdesign | 3 | 372 | <filename>virtualscreening/vina/spark/buried_areas.py
from pyspark import SparkContext, SparkConf, SparkFiles
from pyspark.sql import SQLContext, Row
import ConfigParser as configparser
from subprocess import Popen, PIPE
from datetime import datetime
from vina_utils import get_directory_complex_pdb_analysis, get_files_pdb, get_name_model_pdb, get_ligand_from_receptor_ligand_model, get_separator_filename_mode, get_directory_pdb_analysis, loading_pdb_2_list, get_name_receptor_pdb, get_files_pdb_filter
import os, sys
from os_utils import preparing_path
from gromacs_utils import get_value_from_xvg_sasa
from pdb_io import replace_chain_atom_line
from database_io import load_database
def sorting_buried_area(sc, buried_areaRDD):
sqlCtx = SQLContext(sc)
buried_areaRDD = sc.parallelize(buried_areaRDD)
#buried_areaRDD = buried_areaRDD.map(lambda p: Row(receptor=str(p[0]), ligand=str(p[1]), model=int(p[2]), buried_lig_rec=float(p[3]), buried_lig_rec_perc=float(p[4]), buried_lig_lig_perc=float(p[5]) ) )
buried_areaRDD = buried_areaRDD.map(lambda p: Row(pose=str(p[0]), buried_total=float(p[1]) ) )
buried_area_table = sqlCtx.createDataFrame(buried_areaRDD)
buried_area_table.registerTempTable("buried_area")
buried_area_sorted_by_buried_total = sqlCtx.sql("SELECT * FROM buried_area ORDER BY buried_total DESC") #buried_lig_lig_perc
return buried_area_sorted_by_buried_total
def save_receptor_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc):
f_buried_area = open(path_file_buried_area,"w")
for area in buried_area_sorted_by_lig_rec_perc:
#splited_line = area[0].split("_-_")
#aux_recep = splited_line[0]
#aux_lig = str(splited_line[1])
#preparing receptor
#receptor = str(str(aux_recep).replace("compl_", " ")).strip()
#preparing ligand
#splited_aux_lig = str(aux_lig).split(get_separator_filename_mode())
#ligand = splited_aux_lig[0]
#model = splited_aux_lig[1]
pose = area[0]
buried_total = "{:.4f}".format(area[1])
#line = receptor+"\t"+ligand+"\t"+model+"\t"+str(buried_lig_rec)+"\t"+str(buried_lig_rec_perc)+"\t"+str(buried_lig_lig_perc)+"\n"
line = pose+"\t"+str(buried_total)+"\n"
f_buried_area.write(line)
f_buried_area.close()
def save_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc):
f_buried_area = open(path_file_buried_area,"w")
line = "# buried_area_total[nm2]\tpose"+"\n"
f_buried_area.write(line)
for area in buried_area_sorted_by_lig_rec_perc:
#receptor = area[0]
#ligand = area[1]
#model = area[2]
pose = str(str(area[0]).replace("compl_", " ")).strip()
buried_total = "{:.4f}".format(area[1])
#buried_lig_rec_perc = "{:.4f}".format(area[4])
#buried_lig_lig_perc = "{:.4f}".format(area[5])
#line = receptor+"\t"+ligand+"\t"+str(model)+"\t"+str(buried_lig_rec)+"\t"+str(buried_lig_rec_perc)+"\t"+str(buried_lig_lig_perc)+"\n"
line = str(buried_total)+"\t"+str(pose)+"\n"
f_buried_area.write(line)
f_buried_area.close()
def save_normalized_buried_area(path_file_buried_area, full_dataRDD):
f_buried_area = open(path_file_buried_area,"w")
line = "# normalized_buried_area_total[nm2]\tpose"+"\n"
f_buried_area.write(line)
for area in full_dataRDD.collect():
pose = str(str(area[0]).replace("compl_", " ")).strip()
normalized_buried_total = "{:.4f}".format(area[1])
line = str(normalized_buried_total)+"\t"+str(pose)+"\n"
f_buried_area.write(line)
f_buried_area.close()
def loading_lines_from_area_files(line):
line_splited = str(line).split()
#line_ret = ( str(line_splited[0]), str(line_splited[1]), int(line_splited[2]), float(line_splited[3]), float(line_splited[4]), float(line_splited[5]) )
line_ret = ( str(line_splited[0]), float(line_splited[1]) )
return line_ret
def get_files_area(mypath):
only_mol2_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith(".area"):
f_path = os.path.join(root,file)
only_mol2_file.append(f_path)
return only_mol2_file
def save_log(finish_time, start_time):
log_file_name = 'vs_buried_areas.log'
current_path = os.getcwd()
path_file = os.path.join(current_path, log_file_name)
log_file = open(path_file, 'w')
diff_time = finish_time - start_time
msg = 'Starting ' + str(start_time) +'\n'
log_file.write(msg)
msg = 'Finishing ' + str(finish_time) +'\n'
log_file.write(msg)
msg = 'Time Execution (seconds): ' + str(diff_time.total_seconds()) +'\n'
log_file.write(msg)
def main():
config = configparser.ConfigParser()
config.read('config.ini')
#Path for Gromacs project
gromacs_path = preparing_path(config.get('DRUGDESIGN', 'gromacs_path'))
#Path where PDB ligand are - They are NOT participated in docking
pdb_ligand_path = config.get('DEFAULT', 'pdb_ligand_path')
#Path that contains all files for analysis
path_analysis = config.get('DEFAULT', 'path_analysis')
#Ligand Database file
ligand_database = config.get('DEFAULT', 'ligand_database_path_file')
#Path where all pdb receptor are
path_receptor_pdb = config.get('DEFAULT', 'pdb_path')
#Path for saving pdb files of models generated by VS
path_analysis_pdb = get_directory_pdb_analysis(path_analysis)
# Create SPARK config
maxResultSize = str(config.get('SPARK', 'maxResultSize'))
conf = (SparkConf().set("spark.driver.maxResultSize", maxResultSize))
# Create context
sc = SparkContext(conf=conf)
sqlCtx = SQLContext(sc)
#Adding Python Source file
#Path for drugdesign project
path_spark_drugdesign = config.get('DRUGDESIGN', 'path_spark_drugdesign')
sc.addPyFile(os.path.join(path_spark_drugdesign,"vina_utils.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"os_utils.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"gromacs_utils.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"pdb_io.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"database_io.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"json_utils.py"))
#Adding bash scripts
sc.addFile(os.path.join(path_spark_drugdesign,"make_ndx_buried_area_total.sh"))
sc.addFile(os.path.join(path_spark_drugdesign,"make_sasa_rec_buried_area_total.sh"))
#Parameters form command line
#Indicates probe. Example: 0.14
probe = float(sys.argv[1])
#Indicates ndots. Example: 24
ndots = int(sys.argv[2])
#Broadcast
path_analysis_pdb_complex_b = sc.broadcast(path_analysis_pdb)
gromacs_path = sc.broadcast(gromacs_path)
pdb_ligand_path = sc.broadcast(pdb_ligand_path)
probe = sc.broadcast(probe)
ndots = sc.broadcast(ndots)
start_time = datetime.now()
os.environ["GMX_MAXBACKUP"]="-1"
#Loading all PDB receptor files into memory
list_all_pdb_receptor_files_path = []
all_receptor_for_complex = get_files_pdb(path_receptor_pdb)
for receptor in all_receptor_for_complex:
list_all_pdb_receptor_files_path.append(loading_pdb_2_list(receptor))
#Computing Buried areas
for pdb_receptor_files in list_all_pdb_receptor_files_path:
#Getting receptor name by fully path
base_file_name_receptor = get_name_receptor_pdb(str(pdb_receptor_files[0]))
#PDB file loaded into memory is sent by broadcast
pdb_file_receptor = pdb_receptor_files[1]
pdb_file_receptor = sc.broadcast(pdb_file_receptor)
#Loading PDB model files based on receptor into memory
base_file_name_receptor_for_filter = base_file_name_receptor+"_-_"
all_model_for_complex = get_files_pdb_filter(path_analysis_pdb,base_file_name_receptor_for_filter)
all_model_for_complexRDD = sc.parallelize(all_model_for_complex)
all_model_filesRDD = all_model_for_complexRDD.map(loading_pdb_2_list).collect()
# ********** Starting function **********************************************************
def compute_buried_area(pdb_complex):
chZ = "chZ"
sasa_complex = -1.0
sasa_rec = -1.0
sasa_lig = -1.0
buried_total = -1.0
returned_list = []
try:
base_name = get_name_model_pdb(pdb_complex)
ligand_name = get_ligand_from_receptor_ligand_model(base_name)
f_pdb_ligand_no_docking = os.path.join(pdb_ligand_path.value,ligand_name+".pdb")
f_ndx = os.path.join(path_analysis_pdb_complex_b.value,base_name+".ndx")
f_temp_sasa_complex = os.path.join(path_analysis_pdb_complex_b.value,base_name+"_sasa_complex.xvg")
f_temp_sasa_rec = os.path.join(path_analysis_pdb_complex_b.value,base_name+"_sasa_rec.xvg")
f_temp_sasa_lig = os.path.join(path_analysis_pdb_complex_b.value,base_name+"_sasa_lig.xvg")
# Makes the index file with the ligand (chain z) and the rest (non chain z)
script_make_ndx = SparkFiles.get("make_ndx_buried_area_total.sh") #Getting bash script that was copied by addFile command
command = script_make_ndx + " " + gromacs_path.value + " "+ pdb_complex + " "+ f_ndx
process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
command = gromacs_path.value +"gmx sasa -f " + pdb_complex + " -s " + pdb_complex + " -nopbc " + " -n " + f_ndx + " -surface System " + " -output System "+ " -xvg none " + " -o " + f_temp_sasa_complex
process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
# Makes f_temp_sasa_rec file
script_make_sasa_rec = SparkFiles.get("make_sasa_rec_buried_area_total.sh") #Getting bash script that was copied by addFile command
command = script_make_sasa_rec + " " + gromacs_path.value + " "+ pdb_complex + " "+ f_ndx + " " + f_temp_sasa_rec
process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
command = gromacs_path.value +"gmx sasa -f " + pdb_complex + " -s " + pdb_complex + " -nopbc " + " -n " + f_ndx + " -surface chZ " + " -output chZ "+ " -xvg none " + " -o " + f_temp_sasa_lig
process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
sasa_complex = get_value_from_xvg_sasa(f_temp_sasa_complex)
sasa_rec = get_value_from_xvg_sasa(f_temp_sasa_rec)
sasa_lig = get_value_from_xvg_sasa(f_temp_sasa_lig)
buried_total = sasa_rec + sasa_lig - sasa_complex
#Generating result - See column sorting because resultaed file will be created based on this sorting
returned_list = (base_name, buried_total)
except:
returned_list = (base_name, float(0))
#Deleting files
if os.path.exists(f_ndx):
os.remove(f_ndx)
if os.path.exists(f_temp_sasa_complex):
os.remove(f_temp_sasa_complex)
if os.path.exists(f_temp_sasa_rec):
os.remove(f_temp_sasa_rec)
if os.path.exists(f_temp_sasa_lig):
os.remove(f_temp_sasa_lig)
return returned_list
# ********** Finish function **********************************************************
# ********** Starting function **********************************************************
def save_model_receptor(list_receptor_model_file):
receptor_file = pdb_file_receptor.value #Obtained from broadcast
model_file = list_receptor_model_file[0]
full_path_for_save_complex = list_receptor_model_file[1]
#Open file for writting the complex
f_compl = open(full_path_for_save_complex, "w")
#Insert lines of receptor
for item in receptor_file:
f_compl.write(item)
#Insert lines of model and insert Z chain
for item in model_file:
item = replace_chain_atom_line(item,"d","z")
f_compl.write(item)
f_compl.close()
# ********** Finish function **********************************************************
# ********** Starting function **********************************************************
def build_list_model_for_complex(model):
full_path_model = model[0]
model_file = model[1]
path_pdb_complex = path_analysis_pdb_complex_b.value #Obtained from broadcast
#Building complex file based on model file name
base_name_model = get_name_model_pdb(full_path_model)
complex_name = "compl_"+base_name_model+".pdb"
full_path_for_save_complex = os.path.join(path_pdb_complex,complex_name)
list_receptor_model_file = (model_file, full_path_for_save_complex)
save_model_receptor(list_receptor_model_file)
list_ret = compute_buried_area(full_path_for_save_complex)
os.remove(full_path_for_save_complex)
return list_ret
# ********** Finish function **********************************************************
all_model_filesRDD = sc.parallelize(all_model_filesRDD)
all_model_filesRDD = all_model_filesRDD.map(build_list_model_for_complex).collect()
#Saving buried area of receptor
full_area_file = os.path.join(path_analysis,base_file_name_receptor+".area")
save_receptor_buried_area(full_area_file, all_model_filesRDD)
#Loading all area file
all_area_file = os.path.join(path_analysis,"*.area")
buried_areaRDD = sc.textFile(all_area_file).map(loading_lines_from_area_files).collect()
#Sorting by buried_total column
buried_area_sorted_by_buried_total = sorting_buried_area(sc, buried_areaRDD)
buried_area_sorted_by_buried_total.cache()
buried_area_sorted_by_buried_total_LIST = buried_area_sorted_by_buried_total.map(lambda p: (p.pose, p.buried_total) ).collect()
#Saving buried area file
path_file_buried_area = os.path.join(path_analysis, "summary_buried_areas_total.dat")
save_buried_area(path_file_buried_area, buried_area_sorted_by_buried_total_LIST)
#Calculating normalized buried area
#Loading database
rdd_database = load_database(sc, ligand_database)
#Creating Dataframe
database_table = sqlCtx.createDataFrame(rdd_database)
database_table.registerTempTable("database")
number_pose_ligandRDD = buried_area_sorted_by_buried_total.map(lambda p: Row(buried_total=int(p.buried_total), ligand=get_ligand_from_receptor_ligand_model(p.pose), pose=str(p.pose) ) ).collect()
number_pose_ligand_table = sqlCtx.createDataFrame(number_pose_ligandRDD)
number_pose_ligand_table.registerTempTable("buried_area_total_sort")
sql = """
SELECT pose, (b.buried_total / a.heavyAtom) as normalized_buried_area
FROM database a
JOIN buried_area_total_sort b ON b.ligand = a.ligand
ORDER BY normalized_buried_area DESC
"""
#Getting all data
full_dataRDD = sqlCtx.sql(sql)
#Saving normalized buried area file
path_file_buried_area = os.path.join(path_analysis, "summary_normalized_buried_areas.dat")
save_normalized_buried_area(path_file_buried_area, full_dataRDD)
#Removing all area files
all_area_files = get_files_area(path_analysis)
for area_file in all_area_files:
os.remove(area_file)
finish_time = datetime.now()
save_log(finish_time, start_time)
main()
| 2.28125 | 2 |
oase-root/web_app/views/system/mail/action_mail.py | Masa-Yasuno/oase | 9 | 373 | <gh_stars>1-10
# Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
[概要]
MAILアクション用画面表示補助クラス
"""
import pytz
import datetime
import json
import socket
import traceback
from django.http import HttpResponse
from django.http import HttpResponseServerError
from django.db import transaction
from django.conf import settings
from libs.commonlibs import define as defs
from libs.commonlibs.oase_logger import OaseLogger
from libs.commonlibs.aes_cipher import AESCipher
from web_app.models.models import ActionType
from web_app.models.mail_models import MailDriver
from web_app.templatetags.common import get_message
from web_app.serializers.unicode_check import UnicodeCheck
logger = OaseLogger.get_instance() # ロガー初期化
class mailDriverInfo():
def __init__(self, drv_id, act_id, name, ver, icon_name):
self.drv_id = drv_id
self.act_id = act_id
self.name = name
self.ver = ver
self.icon_name = icon_name
def __str__(self):
return '%s(ver%s)' % (self.name, self.ver)
def get_driver_name(self):
return '%s Driver ver%s' % (self.name, self.ver)
def get_driver_id(self):
return self.drv_id
def get_icon_name(self):
return self.icon_name
@classmethod
def get_template_file(cls):
return 'system/mail/action_mail.html'
@classmethod
def get_info_list(cls, user_groups):
try:
mail_driver_obj_list = MailDriver.objects.all()
except Exception as e:
# ここでの例外は大外で拾う
raise
protocol_dict = cls.get_define()['dict']
mail_driver_dto_list = []
cipher = AESCipher(settings.AES_KEY)
for mail_obj in mail_driver_obj_list:
mail_info = mail_obj.__dict__
if mail_obj.password:
mail_info['password'] = <PASSWORD>.decrypt(mail_obj.password)
mail_info['protocol_str'] = protocol_dict[mail_obj.protocol]
mail_driver_dto_list.append(mail_info)
return mail_driver_dto_list
@classmethod
def get_group_list(cls, user_groups):
"""
[概要]
グループ一覧を取得する(システム管理グループを除く)
"""
return []
@classmethod
def get_define(cls):
protocol_dict = {key_value['v']: key_value['k'] for key_value in defs.SMTP_PROTOCOL.LIST_ALL}
defines = {
'list_all': defs.SMTP_PROTOCOL.LIST_ALL,
'dict': protocol_dict,
}
return defines
def record_lock(self, json_str, request):
logger.logic_log('LOSI00001', 'None', request=request)
driver_id = self.get_driver_id()
# 更新前にレコードロック
if json_str['json_str']['ope'] in (defs.DABASE_OPECODE.OPE_UPDATE, defs.DABASE_OPECODE.OPE_DELETE):
drvinfo_modify = int(json_str['json_str']['mail_driver_id'])
MailDriver.objects.select_for_update().filter(pk=drvinfo_modify)
logger.logic_log('LOSI00002', 'Record locked.(driver_id=%s)' % driver_id, request=request)
def modify(self, json_str, request):
"""
[メソッド概要]
グループのDB更新処理
"""
logger.logic_log('LOSI00001', 'None', request=request)
error_flag = False
error_msg = {
'mail_disp_name' : '',
'protocol' : '',
'smtp_server' : '',
'port' : '',
'user' : '',
'password' : '',
}
now = datetime.datetime.now(pytz.timezone('UTC'))
emo_chk = UnicodeCheck()
# 成功時データ
response = {"status": "success",}
try:
rq = json_str['json_str']
ope = int(rq['ope'])
#削除以外の場合の入力チェック
if ope != defs.DABASE_OPECODE.OPE_DELETE:
error_flag = self._validate(rq, error_msg, request)
if error_flag:
raise UserWarning('validation error.')
# パスワードを暗号化 空なら空文字
cipher = AESCipher(settings.AES_KEY)
if ope == defs.DABASE_OPECODE.OPE_UPDATE:
encrypted_password = <PASSWORD>.encrypt(rq['password']) if rq['password'] else ''
driver_info_mod = MailDriver.objects.get(mail_driver_id=rq['mail_driver_id'])
driver_info_mod.mail_disp_name = rq['mail_disp_name']
driver_info_mod.protocol = rq['protocol']
driver_info_mod.smtp_server = rq['smtp_server']
driver_info_mod.port = rq['port']
driver_info_mod.user = rq['user']
driver_info_mod.password = <PASSWORD>
driver_info_mod.last_update_user = request.user.user_name
driver_info_mod.last_update_timestamp = now
driver_info_mod.save(force_update=True)
elif ope == defs.DABASE_OPECODE.OPE_DELETE:
MailDriver.objects.filter(pk=rq['mail_driver_id']).delete()
elif ope == defs.DABASE_OPECODE.OPE_INSERT:
encrypted_password = <PASSWORD>.encrypt(rq['password']) if rq['password'] else ''
driver_info_reg = MailDriver(
mail_disp_name = rq['mail_disp_name'],
protocol = rq['protocol'],
smtp_server = rq['smtp_server'],
port = rq['port'],
user = rq['user'],
password = <PASSWORD>,
last_update_user = request.user.user_name,
last_update_timestamp = now
).save(force_insert=True)
except MailDriver.DoesNotExist:
logger.logic_log('LOSM07006', "mail_driver_id", mail_driver_id, request=request)
except Exception as e:
logger.logic_log('LOSI00005', traceback.format_exc(), request=request)
response = {
'status': 'failure',
'error_msg': error_msg, # エラー詳細(エラーアイコンで出す)
}
logger.logic_log('LOSI00002', 'response=%s' % response, request=request)
return response
def _validate(self, rq, error_msg, request):
"""
[概要]
入力チェック
[引数]
rq: dict リクエストされた入力データ
error_msg: dict
[戻り値]
"""
logger.logic_log('LOSI00001', 'data: %s, error_msg:%s'%(rq, error_msg))
error_flag = False
emo_chk = UnicodeCheck()
emo_flag = False
emo_flag_ita_disp_name = False
emo_flag_hostname = False
if len(rq['mail_disp_name']) == 0:
error_flag = True
error_msg['mail_disp_name'] += get_message('MOSJA27201', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07001', 'mail_disp_name', request=request)
if len(rq['mail_disp_name']) > 64:
error_flag = True
error_msg['mail_disp_name'] += get_message('MOSJA27202', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'mail_disp_name', 64, rq['mail_disp_name'], request=request)
# 絵文字チェック
value_list = emo_chk.is_emotion(rq['mail_disp_name'])
if len(value_list) > 0:
error_flag = True
emo_flag = True
error_msg['mail_disp_name'] += get_message('MOSJA27216', request.user.get_lang_mode(), showMsgId=False) + '\n'
if len(rq['protocol']) == 0:
error_flag = True
error_msg['protocol'] += get_message('MOSJA27212', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07001', 'protocol', request=request)
if len(rq['protocol']) > 64:
error_flag = True
error_msg['protocol'] += get_message('MOSJA27213', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'protocol', 64, rq['protocol'], request=request)
if len(rq['smtp_server']) == 0:
error_flag = True
error_msg['smtp_server'] += get_message('MOSJA27203', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07001', 'smtp_server', request=request)
if len(rq['smtp_server']) > 128:
error_flag = True
error_msg['smtp_server'] += get_message('MOSJA27204', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'smtp_server', 64, rq['smtp_server'], request=request)
# 絵文字チェック
value_list = emo_chk.is_emotion(rq['smtp_server'])
if len(value_list) > 0:
error_flag = True
error_msg['smtp_server'] += get_message('MOSJA27217', request.user.get_lang_mode(), showMsgId=False) + '\n'
if len(rq['port']) == 0:
error_flag = True
error_msg['port'] += get_message('MOSJA27205', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07001', 'port', request=request)
try:
tmp_port = int(rq['port'])
if 0 > tmp_port or tmp_port > 65535:
error_flag = True
error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07003', 'port', rq['port'], request=request)
except ValueError:
error_flag = True
error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07003', 'port', rq['port'], request=request)
if len(rq['user']) > 64:
error_flag = True
error_msg['user'] += get_message('MOSJA27207', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'user', 64, rq['user'], request=request)
# 絵文字チェック
value_list = emo_chk.is_emotion(rq['user'])
if len(value_list) > 0:
error_flag = True
error_msg['user'] += get_message('MOSJA27218', request.user.get_lang_mode(), showMsgId=False) + '\n'
if len(rq['password']) > 64:
error_flag = True
error_msg['password'] += get_message('<PASSWORD>', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'password', 64, rq['password'], request=request)
# 絵文字チェック
value_list = emo_chk.is_emotion(rq['password'])
if len(value_list) > 0:
error_flag = True
error_msg['password'] += get_message('<PASSWORD>', request.user.get_lang_mode(), showMsgId=False) + '\n'
if not emo_flag:
duplication = MailDriver.objects.filter(mail_disp_name=rq['mail_disp_name'])
if len(duplication) == 1 and int(rq['mail_driver_id']) != duplication[0].mail_driver_id:
error_flag = True
error_msg['mail_disp_name'] += get_message('MOSJA27209', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07004', 'mail_disp_name', rq['mail_disp_name'], request=request)
if error_flag == False:
# 疎通確認
resp_code = -1
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
resp_code = sock.connect_ex((rq['smtp_server'], int(rq['port']))) # host名名前解決が必要/etc/hostsとか
sock.close()
except Exception as e:
pass
if resp_code != 0:
error_flag = True
#todo 仮でこのエラーは名前に入れている
error_msg['mail_disp_name'] += get_message('MOSJA27215', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07005', rq['smtp_server'], rq['port'], request=request)
return error_flag
| 1.851563 | 2 |
queue/animal_queue.py | cozek/code-practice | 0 | 374 | #!/usr/bin/env python3
from typing import Any, Union
class Animal:
def __init__(self, name: str) -> None:
self.name = name
def set_order(self, order: int) -> None:
self.order = order
def peek_order(self) -> int:
return self.order
def __str__(self) -> str:
return f"{self.name}"
class Node:
def __init__(self, data: Any):
self.data = data
self.next_node = None
class LinkedList:
def __init__(self) -> None:
self.head = None
self.tail = None
def __str__(self) -> str:
current = self.head
string = f""
while current.next_node is not None:
string += f"{current.data} -> "
current = current.next_node
return string + "END"
def is_empty(self) -> bool:
if self.head is None:
return True
else:
return False
def insert(self, item: Any) -> None:
if self.is_empty():
self.head = Node(item)
self.tail = self.head
else:
new_node = Node(item)
self.tail.next_node = new_node
self.tail = self.tail.next_node
def remove(self) -> Any:
if self.head is None:
raise ("Empty LinkedList!")
else:
data = self.head.data
self.head = self.head.next_node
return data
def peak(self):
return self.head.data
class Dog(Animal):
def __init__(self, name: str):
super().__init__(name)
class Cat(Animal):
def __init__(self, name: str):
super().__init__(name)
class AnimalQueue:
def __init__(self) -> None:
self.dogs = LinkedList()
self.cats = LinkedList()
self.order = 0
def enqueue(self, animal: Union[Dog, Cat]) -> None:
if not isinstance(animal, (Dog, Cat)):
raise Exception("Expected Dog or Cat!")
else:
animal.set_order(self.order)
self.order += 1
if isinstance(animal, Dog):
self.dogs.insert(animal)
elif isinstance(animal, Cat):
self.cats.insert(animal)
def dequeAny(self) -> Union[Dog, Cat]:
if self.dogs.is_empty():
return self.dequeCat()
elif self.cats.is_empty():
return self.dequeDog()
if self.dogs.head.data.peek_order() > self.cats.head.data.peek_order():
return self.dequeCat()
else:
return self.dequeDog()
def print_cats(self) -> str:
string = ""
cat = self.cats.head
while cat is not None:
string += f"{cat.data.name} {cat.data.peek_order()} | "
cat = cat.next_node
return string
def dequeDog(self) -> Dog:
return self.dogs.remove()
def dequeCat(self) -> Cat:
return self.cats.remove()
def main():
q = AnimalQueue()
dogs = [Dog("d1"), Dog("d2"), Dog("d3")]
cats = [Cat("c1"), Cat("c2"), Cat("c3")]
both = []
while cats != []:
both.append(cats.pop())
both.append(dogs.pop())
[q.enqueue(animal) for animal in both]
string = ""
for anim in both:
string += f"{anim.name} {anim.order} | "
print(string)
# print(q.print_cats())
get = q.dequeDog()
print(get.order,get.name)
get = q.dequeAny()
print(get.order,get.name)
if __name__ == "__main__":
main()
| 3.734375 | 4 |
ophyd/areadetector/detectors.py | NSLS-II/ophyd | 16 | 375 | # vi: ts=4 sw=4
'''AreaDetector Devices
`areaDetector`_ detector abstractions
.. _areaDetector: https://areadetector.github.io/master/index.html
'''
import warnings
from .base import (ADBase, ADComponent as C)
from . import cam
__all__ = ['DetectorBase',
'AreaDetector',
'AdscDetector',
'Andor3Detector',
'AndorDetector',
'BrukerDetector',
'DexelaDetector',
'EmergentVisionDetector',
'EigerDetector',
'FirewireLinDetector',
'FirewireWinDetector',
'GreatEyesDetector',
'LightFieldDetector',
'Mar345Detector',
'MarCCDDetector',
'PSLDetector',
'PerkinElmerDetector',
'PICamDetector',
'PilatusDetector',
'PixiradDetector',
'PointGreyDetector',
'ProsilicaDetector',
'PvcamDetector',
'RoperDetector',
'SimDetector',
'URLDetector',
'UVCDetector',
'Xspress3Detector'
]
class DetectorBase(ADBase):
"""
The base class for the hardware-specific classes that follow.
Note that Plugin also inherits from ADBase.
This adds some AD-specific methods that are not shared by the plugins.
"""
_default_configuration_attrs = (ADBase._default_configuration_attrs +
('cam', ))
def generate_datum(self, key, timestamp, datum_kwargs=None):
"""
Notify plugins of acquisition being complete.
When a new acquisition is started, this method is called with a
key which is a label like 'light', 'dark', or 'gain8'.
It in turn calls ``generate_datum`` on all of the plugins that have
that method.
File plugins are identified by searching for a
:meth:`~ophyd.areadetector.filestore_mixins.FileStoreBase.generate_datum`
method that must have the signature ::
def generate_datum(key: str, timestamp: float, datum_kwargs: dict):
...
Parameters
----------
key : str
The label for the datum that should be generated
timestamp : float
The time of the trigger
datum_kwargs : Dict[str, Any], optional
Any datum kwargs that should go to all children.
"""
if datum_kwargs is None:
datum_kwargs = {}
file_plugins = [s for s in self._signals.values() if
hasattr(s, 'generate_datum')]
for p in file_plugins:
if p.enable.get():
p.generate_datum(key, timestamp, datum_kwargs)
def dispatch(self, key, timestamp):
warnings.warn(
".dispatch is deprecated, use .generate_datum instead",
stacklevel=2
)
return self.generate_datum(key, timestamp, {})
dispatch.__doc__ = generate_datum.__doc__
def make_data_key(self):
source = 'PV:{}'.format(self.prefix)
# This shape is expected to match arr.shape for the array.
shape = (self.cam.num_images.get(),
self.cam.array_size.array_size_y.get(),
self.cam.array_size.array_size_x.get())
return dict(shape=shape, source=source, dtype='array',
external='FILESTORE:')
def collect_asset_docs(self):
file_plugins = [s for s in self._signals.values() if
hasattr(s, 'collect_asset_docs')]
for p in file_plugins:
yield from p.collect_asset_docs()
class AreaDetector(DetectorBase):
cam = C(cam.AreaDetectorCam, 'cam1:')
class SimDetector(DetectorBase):
_html_docs = ['simDetectorDoc.html']
cam = C(cam.SimDetectorCam, 'cam1:')
class AdscDetector(DetectorBase):
_html_docs = ['adscDoc.html']
cam = C(cam.AdscDetectorCam, 'cam1:')
class AndorDetector(DetectorBase):
_html_docs = ['andorDoc.html']
cam = C(cam.AndorDetectorCam, 'cam1:')
class Andor3Detector(DetectorBase):
_html_docs = ['andor3Doc.html']
cam = C(cam.Andor3DetectorCam, 'cam1:')
class BrukerDetector(DetectorBase):
_html_docs = ['BrukerDoc.html']
cam = C(cam.BrukerDetectorCam, 'cam1:')
class DexelaDetector(DetectorBase):
_html_docs = ['DexelaDoc.html']
cam = C(cam.DexelaDetectorCam, 'cam1:')
class EmergentVisionDetector(DetectorBase):
_html_docs = ['EVTDoc.html']
cam = C(cam.EmergentVisionDetectorCam, 'cam1:')
class EigerDetector(DetectorBase):
_html_docs = ['EigerDoc.html']
cam = C(cam.EigerDetectorCam, 'cam1:')
class FirewireLinDetector(DetectorBase):
_html_docs = ['FirewireWinDoc.html']
cam = C(cam.FirewireLinDetectorCam, 'cam1:')
class FirewireWinDetector(DetectorBase):
_html_docs = ['FirewireWinDoc.html']
cam = C(cam.FirewireWinDetectorCam, 'cam1:')
class GreatEyesDetector(DetectorBase):
_html_docs = [] # the documentation is not public
cam = C(cam.GreatEyesDetectorCam, 'cam1:')
class LightFieldDetector(DetectorBase):
_html_docs = ['LightFieldDoc.html']
cam = C(cam.LightFieldDetectorCam, 'cam1:')
class Mar345Detector(DetectorBase):
_html_docs = ['Mar345Doc.html']
cam = C(cam.Mar345DetectorCam, 'cam1:')
class MarCCDDetector(DetectorBase):
_html_docs = ['MarCCDDoc.html']
cam = C(cam.MarCCDDetectorCam, 'cam1:')
class PerkinElmerDetector(DetectorBase):
_html_docs = ['PerkinElmerDoc.html']
cam = C(cam.PerkinElmerDetectorCam, 'cam1:')
class PSLDetector(DetectorBase):
_html_docs = ['PSLDoc.html']
cam = C(cam.PSLDetectorCam, 'cam1:')
class PICamDetector(DetectorBase):
_html_docs = ['PICamDoc.html']
cam = C(cam.PICamDetectorCam, 'cam1:')
class PilatusDetector(DetectorBase):
_html_docs = ['pilatusDoc.html']
cam = C(cam.PilatusDetectorCam, 'cam1:')
class PixiradDetector(DetectorBase):
_html_docs = ['PixiradDoc.html']
cam = C(cam.PixiradDetectorCam, 'cam1:')
class PointGreyDetector(DetectorBase):
_html_docs = ['PointGreyDoc.html']
cam = C(cam.PointGreyDetectorCam, 'cam1:')
class ProsilicaDetector(DetectorBase):
_html_docs = ['prosilicaDoc.html']
cam = C(cam.ProsilicaDetectorCam, 'cam1:')
class PvcamDetector(DetectorBase):
_html_docs = ['pvcamDoc.html']
cam = C(cam.PvcamDetectorCam, 'cam1:')
class RoperDetector(DetectorBase):
_html_docs = ['RoperDoc.html']
cam = C(cam.RoperDetectorCam, 'cam1:')
class URLDetector(DetectorBase):
_html_docs = ['URLDoc.html']
cam = C(cam.URLDetectorCam, 'cam1:')
class UVCDetector(DetectorBase):
_html_docs = ['UVCDoc.html']
cam = C(cam.UVCDetectorCam, 'cam1:')
class Xspress3Detector(DetectorBase):
_html_docs = ['Xspress3Doc.html']
cam = C(cam.Xspress3DetectorCam, 'det1:')
| 2.0625 | 2 |
python/EXERCICIO 96 - FUNCAO QUE CALCULA A AREA.py | debor4h/exerciciosPython | 1 | 376 | def area(msg):#declaracao da funcao com o parametro msg
print(msg)#aqui msg e a area
print('Controle de Terrenos')
print('-' * 20)
l = float(input('Largura (m): '))
c = float(input('Comprimento (m): '))
area(f'A área do seu terreno {l}X{c} é de {l*c}m².')
| 3.609375 | 4 |
auth_iam/dashboard/auth/routes.py | santiher/dash-auth-example | 11 | 377 | import os
from functools import wraps
from os.path import join as join_path
from dash import Dash
from flask import make_response, render_template_string, redirect
excluded_resources_endpoints = (
'static', '_dash_assets.static', '/_favicon.ico', '/login', '/logout',
'/_user', '/auth')
def add_routes(app, authorizer):
"""Adds authentication endpoints to a flask app.
Decorates other endpoints to grant access.
The endpoints are:
* /login
* Method: GET
* /logout
* Method: GET
* Erases cookies
* /auth
* Method: GET
* Validates cookies if present or header authentication
* Header:
'Authorization: DASHBOARD-AUTH username=([^/]*)/password=([^/]*)'
* Sets cookies on login
* Rejects unauthorized users
Parameters
----------
app: flask.Flask or dash.Dash
The flask or dash application
excluded_resources_endpoints: tuple(str)
Tuple with endpoints where access must not be checked.
"""
def login():
ok, _ = authorizer.validate()
if ok:
return make_response(redirect('/'), 307)
return render_template_string(login_template)
def logout():
_, response = authorizer.clean_cookie()
return response
def auth():
_, response = authorizer.validate()
return response
def authorize_endpoint(function):
@wraps(function)
def authorized_function(*args, **kwargs):
ok, response = authorizer.validate()
if ok:
return function(*args, **kwargs)
return response
return authorized_function
if isinstance(app, Dash):
app = app.server
login_template = load_template('login.html')
app.add_url_rule('/auth', '/auth', auth)
app.add_url_rule('/login', '/login', login)
app.add_url_rule('/logout', '/logout', logout)
for endpoint, function in app.view_functions.items():
if endpoint not in excluded_resources_endpoints:
app.view_functions[endpoint] = authorize_endpoint(function)
def load_template(filename):
"""Loads the login html template."""
pyfile_path = os.path.dirname(os.path.abspath(__file__))
path = join_path(pyfile_path, 'templates', filename)
with open(path, 'r') as f:
return f.read().strip()
| 2.65625 | 3 |
amazon/model_api/migrations/0005_remove_order_datetimecreated_alter_order__id_and_more.py | gabrielkarras/SOEN341 | 3 | 378 | # Generated by Django 4.0.1 on 2022-04-07 01:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('model_api', '0004_remove_order_created_remove_order_id_and_more'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='dateTimeCreated',
),
migrations.AlterField(
model_name='order',
name='_id',
field=models.AutoField(editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='orderedproduct',
name='_id',
field=models.AutoField(editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='orderedproduct',
name='price',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
| 1.640625 | 2 |
items/migrations/0001_initial.py | tony-joseph/livre | 1 | 379 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-21 12:22
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BookCopy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('book_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In Circulation'), (3, 'Temporarily Unavailable'), (4, 'Unavailable'), (5, 'Protected'), (6, 'Damaged')])),
('remarks', models.TextField(blank=True, default='')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='BookDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1024)),
('author', models.CharField(default='Unknown', max_length=1024)),
('description', models.TextField(blank=True, default='')),
('publisher', models.CharField(blank=True, default='', max_length=512)),
('published_on', models.DateField(blank=True, null=True)),
('pages', models.PositiveIntegerField(blank=True, default=0, null=True)),
('ddc', models.CharField(blank=True, default='', max_length=1024)),
('llcc', models.CharField(blank=True, default='', max_length=1024)),
('isbn', models.CharField(blank=True, default='', max_length=1024)),
('tags', models.CharField(blank=True, max_length=1024, null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=512)),
('slug', models.SlugField(max_length=128, unique=True)),
('description', models.TextField(blank=True, default='')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_updated_by', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512)),
('short_code', models.CharField(db_index=True, max_length=8, unique=True)),
('description', models.TextField(blank=True, default='')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='language_updated_by', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Periodical',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1024)),
('description', models.TextField(blank=True, default='')),
('publisher', models.CharField(blank=True, default='', max_length=512)),
('tags', models.CharField(blank=True, max_length=1024, null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language')),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_updated_by', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PeriodicalIssue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('issue_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In Circulation'), (3, 'Temporarily Unavailable'), (4, 'Unavailable'), (5, 'Protected'), (6, 'Damaged')])),
('published_on', models.DateField(blank=True, null=True)),
('volume', models.PositiveIntegerField(blank=True, null=True)),
('issue', models.PositiveIntegerField(blank=True, null=True)),
('remarks', models.TextField(blank=True, default='')),
('tags', models.CharField(blank=True, max_length=1024, null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('periodical', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Periodical')),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_issue_updated_by', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='bookdetail',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category'),
),
migrations.AddField(
model_name='bookdetail',
name='created_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='bookdetail',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language'),
),
migrations.AddField(
model_name='bookdetail',
name='updated_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_detail_updated_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='bookcopy',
name='book_detail',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.BookDetail'),
),
migrations.AddField(
model_name='bookcopy',
name='created_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='bookcopy',
name='updated_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_copy_updated_by', to=settings.AUTH_USER_MODEL),
),
]
| 1.671875 | 2 |
compliance_suite/exceptions/user_config_exception.py | alextsaihi/rnaget-compliance-suite | 1 | 380 | <filename>compliance_suite/exceptions/user_config_exception.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""Module compliance_suite.exceptions.user_config_exception.py
This module contains class definition for user config file exceptions.
"""
class UserConfigException(Exception):
"""Exception for user config file-related errors"""
pass | 2.328125 | 2 |
2021/day15/aoc-2021-d15.py | bbornstein/aoc | 0 | 381 | <reponame>bbornstein/aoc
#!/usr/bin/env python3
# Advent of Code 2021, Day 15 (https://adventofcode.com/2021/day/15)
# Author: <NAME>
import collections
import heapq
Point = collections.namedtuple('Point', ['x', 'y'])
Point.__add__ = lambda self, q: Point(self[0] + q[0], self[1] + q[1])
class RiskMap:
def __init__ (self):
"""Creates a new (empty) risk-level map.
Individual risk-levels as specific positions are accessible via
`RiskMap[Point]`.
See also `RiskMap.load()`
"""
self._factor = 1
self._levels = [ ]
self._nrows = 0
self._ncols = 0
def __getitem__ (self, pos):
"""Returns the risk-level at position `pos`, i.e. `RiskMap[pos]`."""
if self._factor > 1:
risk = self._levels[pos.y % self._nrows][pos.x % self._ncols]
risk += pos.y // self._nrows
risk += pos.x // self._ncols
if risk > 9:
risk = risk % 9
else:
risk = self._levels[pos.y][pos.x]
return risk
@staticmethod
def load (filename):
"""Creates a new risk-level map from `filename`."""
rmap = RiskMap()
with open(filename) as stream:
for line in stream.readlines():
rmap.append([ int(c) for c in line.strip() ])
return rmap
@property
def ncols (self):
"""The number of columns in this `RiskMap`."""
return self._factor * self._ncols
@property
def nrows (self):
"""The number of rows in this `RiskMap`."""
return self._factor * self._nrows
def append (self, row):
"""Appends `row` to this `RiskMap`."""
if len(self._levels) == 0:
self._ncols = len(row)
self._levels.append(row)
self._nrows += 1
def neighbors (self, pos):
"""Iterable 4-neighbors (up, down, left, right) for `pos`ition."""
deltas = (0, -1), (0, 1), (-1, 0), (1, 0)
adjacent = ( pos + Point(*delta) for delta in deltas )
yield from ( p for p in adjacent if self.valid(p) )
def resize (self, factor):
"""Resizes this `RiskMap` by setting its expansion factor to `factor`
copies both horizontally and vertically.
"""
self._factor = factor
def valid (self, pos):
"""Indicates whether or not `pos` is valid (inside this `RiskMap`)."""
return pos.y in range(0, self.nrows) and pos.x in range(0, self.ncols)
def search (rmap, start, end):
"""Searches `RiskMap` `rmap` (breadth-first) to find the least risky
path from `start` to `end`. Returns the total risk of that path.
"""
risk = 0
queue = [ (rmap[p], p) for p in rmap.neighbors(start) ]
visited = { start }
heapq.heapify(queue)
while len(queue) > 0:
risk, current = heapq.heappop(queue)
if current == end:
break
for pos in rmap.neighbors(current):
if pos not in visited:
heapq.heappush( queue, ((rmap[pos] + risk), pos) )
visited.add(pos)
return risk
filename = 'aoc-2021-d15.txt'
rmap = RiskMap.load(filename)
start = Point(0, 0)
end = Point(rmap.ncols - 1, rmap.nrows - 1)
# Part 1
#
# Q: Lowest total risk of any path from the top left to the bottom right?
# A: Total Risk = 755
print(f'Part 1: Total Risk = {search(rmap, start, end):4}')
# Part 2
#
# Q: Lowest total risk of any path from the top left to the bottom right?
# A: Total Risk = 3016
rmap.resize(factor=5)
end = Point(rmap.ncols - 1, rmap.nrows - 1)
print(f'Part 2: Total Risk = {search(rmap, start, end)}')
| 3.328125 | 3 |
indico/modules/events/abstracts/compat.py | aiforrural/Digital-Events-Example | 1 | 382 | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import redirect
from indico.modules.events.abstracts.models.abstracts import Abstract
from indico.web.flask.util import url_for
from indico.web.rh import RHSimple
@RHSimple.wrap_function
def compat_abstract(endpoint, confId, friendly_id, track_id=None, management=False):
abstract = Abstract.find(event_id=confId, friendly_id=friendly_id).first_or_404()
return redirect(url_for('abstracts.' + endpoint, abstract, management=management))
| 1.78125 | 2 |
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/models/database_account_list_keys_result_py3.py | limingu/azure-cli-extensions | 2 | 383 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .database_account_list_read_only_keys_result_py3 import DatabaseAccountListReadOnlyKeysResult
class DatabaseAccountListKeysResult(DatabaseAccountListReadOnlyKeysResult):
"""The access keys for the given database account.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar primary_readonly_master_key: Base 64 encoded value of the primary
read-only key.
:vartype primary_readonly_master_key: str
:ivar secondary_readonly_master_key: Base 64 encoded value of the
secondary read-only key.
:vartype secondary_readonly_master_key: str
:ivar primary_master_key: Base 64 encoded value of the primary read-write
key.
:vartype primary_master_key: str
:ivar secondary_master_key: Base 64 encoded value of the secondary
read-write key.
:vartype secondary_master_key: str
"""
_validation = {
'primary_readonly_master_key': {'readonly': True},
'secondary_readonly_master_key': {'readonly': True},
'primary_master_key': {'readonly': True},
'secondary_master_key': {'readonly': True},
}
_attribute_map = {
'primary_readonly_master_key': {'key': 'primaryReadonlyMasterKey', 'type': 'str'},
'secondary_readonly_master_key': {'key': 'secondaryReadonlyMasterKey', 'type': 'str'},
'primary_master_key': {'key': 'primaryMasterKey', 'type': 'str'},
'secondary_master_key': {'key': 'secondaryMasterKey', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(DatabaseAccountListKeysResult, self).__init__(**kwargs)
self.primary_master_key = None
self.secondary_master_key = None
| 1.96875 | 2 |
Google/google_books/scrape_google_books.py | dimitryzub/blog-posts-archive | 0 | 384 | from parsel import Selector
import requests, json, re
params = {
"q": "<NAME>",
"tbm": "bks",
"gl": "us",
"hl": "en"
}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.87 Safari/537.36",
}
html = requests.get("https://www.google.com/search", params=params, headers=headers, timeout=30)
selector = Selector(text=html.text)
books_results = []
# https://regex101.com/r/mapBs4/1
book_thumbnails = re.findall(r"s=\\'data:image/jpg;base64,(.*?)\\'", str(selector.css("script").getall()), re.DOTALL)
for book_thumbnail, book_result in zip(book_thumbnails, selector.css(".Yr5TG")):
title = book_result.css(".DKV0Md::text").get()
link = book_result.css(".bHexk a::attr(href)").get()
displayed_link = book_result.css(".tjvcx::text").get()
snippet = book_result.css(".cmlJmd span::text").get()
author = book_result.css(".fl span::text").get()
author_link = f'https://www.google.com/search{book_result.css(".N96wpd .fl::attr(href)").get()}'
date_published = book_result.css(".fl+ span::text").get()
preview_link = book_result.css(".R1n8Q a.yKioRe:nth-child(1)::attr(href)").get()
more_editions_link = book_result.css(".R1n8Q a.yKioRe:nth-child(2)::attr(href)").get()
books_results.append({
"title": title,
"link": link,
"displayed_link": displayed_link,
"snippet": snippet,
"author": author,
"author_link": author_link,
"date_published": date_published,
"preview_link": preview_link,
"more_editions_link": f"https://www.google.com{more_editions_link}" if more_editions_link is not None else None,
"thumbnail": bytes(bytes(book_thumbnail, "ascii").decode("unicode-escape"), "ascii").decode("unicode-escape")
})
| 2.8125 | 3 |
Python/Higher-Or-Lower/hol/__init__.py | AustinTSchaffer/DailyProgrammer | 1 | 385 | <reponame>AustinTSchaffer/DailyProgrammer<filename>Python/Higher-Or-Lower/hol/__init__.py
r"""
Contains classes and methods that can be used when simulating the game
Higher-or-Lower and performing statistical analysis on different games.
"""
from hol import (
cards,
constants,
)
from hol._hol import (
generate_all_games,
should_pick_higher,
is_a_winning_game,
generate_win_statistics,
)
| 2.640625 | 3 |
Lib/hTools2/dialogs/glyphs/slide.py | gferreira/hTools2 | 11 | 386 | # [h] slide selected glyphs
from mojo.roboFont import CurrentFont, CurrentGlyph, version
from vanilla import *
from hTools2 import hDialog
from hTools2.modules.fontutils import get_full_name, get_glyphs
from hTools2.modules.messages import no_font_open, no_glyph_selected
class slideGlyphsDialog(hDialog):
'''A dialog to slide the selected glyphs vertically and/or horizontally.
.. image:: imgs/glyphs/slide.png
'''
_moveX = 0
_moveY = 0
_xMax = 1000
_xMin = -1000
_yMax = 500
_yMin = -500
font = None
font_name = '(no font selected)'
def __init__(self):
# window
self.title = "slide"
self.button_width = 70
self.column_1 = 20
self.column_2 = 240
self.width = self.column_1 + self.column_2 + self.button_width + self.padding_x*3
self.height = self.text_height*3 + self.padding_y*4
self.w = HUDFloatingWindow((self.width, self.height), self.title)
x = self.padding_x
y = self.padding_y
# current font name
self.w.box = Box(
(x, y, self.column_1 + self.column_2, self.text_height))
self.w.box.text = TextBox(
(5, 0, self.column_1 + self.column_2, self.text_height),
self.font_name,
sizeStyle=self.size_style)
x += (self.column_2 + self.column_1 + self.padding_x)
self.w.button_update_font = SquareButton(
(x, y, self.button_width, self.text_height),
"update",
callback=self.update_font_callback,
sizeStyle=self.size_style)
# x slider
x = self.padding_x
y += self.text_height + self.padding_y
self.w.x_label = TextBox(
(x, y + 5, self.column_1, self.text_height),
"x",
sizeStyle=self.size_style)
x += self.column_1
self.w.x_slider = Slider(
(x, y, self.column_2, self.text_height),
value=0,
maxValue=self._xMax,
minValue=self._xMin,
callback=self.slide_callback,
sizeStyle=self.size_style)
x += (self.column_2 + self.padding_x)
self.w.button_restore_x = SquareButton(
(x, y, self.button_width, self.text_height),
"reset x",
callback=self.restore_x_callback,
sizeStyle=self.size_style)
# y slider
x = self.padding_x
y += (self.text_height + self.padding_y)
self.w.y_label = TextBox(
(x, y + 5, self.column_1, self.text_height),
"y",
sizeStyle=self.size_style)
x += self.column_1
self.w.y_slider = Slider(
(x, y, self.column_2, self.text_height),
value=0,
maxValue=self._yMax,
minValue=self._yMin,
callback=self.slide_callback,
sizeStyle=self.size_style)
x += (self.column_2 + self.padding_x)
self.w.button_restore_y = SquareButton(
(x, y, self.button_width, self.text_height),
"reset y",
callback=self.restore_y_callback,
sizeStyle=self.size_style)
# open
self.w.open()
self.update_font()
# callbacks
def restore_x(self):
self._moveX = 0
self.w.x_slider.set(self._moveX)
def restore_y(self):
self._moveY = 0
self.w.y_slider.set(self._moveY)
def restore_x_callback(self, sender):
self.restore_x()
def restore_y_callback(self, sender):
self.restore_y()
def update_font(self):
self.font = CurrentFont()
if self.font is not None:
self.w.box.text.set(get_full_name(self.font))
self.set_defaults()
self.restore_x()
self.restore_y()
else:
print no_font_open
def set_defaults(self):
self._xMax = self.font.info.unitsPerEm
self._yMax = self.font.info.unitsPerEm / 2
self._xMin = -self._xMax
self._yMin = -self._yMax
def update_font_callback(self, sender):
self.update_font()
def slide_callback(self, sender):
xValue = self.w.x_slider.get()
yValue = self.w.y_slider.get()
x = self._moveX - xValue
y = self._moveY - yValue
self._moveX = xValue
self._moveY = yValue
glyph_names = get_glyphs(self.font)
if len(glyph_names) > 0:
for glyph_name in glyph_names:
# RF 2.0
if version[0] == '2':
self.font[glyph_name].moveBy((-x, -y))
# RF 1.8.X
else:
self.font[glyph_name].move((-x, -y))
else:
print no_glyph_selected
| 2.703125 | 3 |
werobot/utils.py | lilac/WeRobot | 2 | 387 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import io
import json
import os
import random
import re
import string
import time
from functools import wraps
from hashlib import sha1
import six
try:
from secrets import choice
except ImportError:
from random import choice
string_types = (six.string_types, six.text_type, six.binary_type)
re_type = type(re.compile("regex_test"))
def get_signature(token, timestamp, nonce, *args):
sign = [token, timestamp, nonce] + list(args)
sign.sort()
sign = to_binary(''.join(sign))
return sha1(sign).hexdigest()
def check_signature(token, timestamp, nonce, signature):
if not (token and timestamp and nonce and signature):
return False
sign = get_signature(token, timestamp, nonce)
return sign == signature
def check_token(token):
return re.match('^[A-Za-z0-9]{3,32}$', token)
def cached_property(method):
prop_name = '_{}'.format(method.__name__)
@wraps(method)
def wrapped_func(self, *args, **kwargs):
if not hasattr(self, prop_name):
setattr(self, prop_name, method(self, *args, **kwargs))
return getattr(self, prop_name)
return property(wrapped_func)
def to_text(value, encoding="utf-8"):
if isinstance(value, six.text_type):
return value
if isinstance(value, six.binary_type):
return value.decode(encoding)
return six.text_type(value)
def to_binary(value, encoding="utf-8"):
if isinstance(value, six.binary_type):
return value
if isinstance(value, six.text_type):
return value.encode(encoding)
return six.binary_type(value)
def is_string(value):
return isinstance(value, string_types)
def byte2int(s, index=0):
"""Get the ASCII int value of a character in a string.
:param s: a string
:param index: the position of desired character
:return: ASCII int value
"""
if six.PY2:
return ord(s[index])
return s[index]
def generate_token(length=''):
if not length:
length = random.randint(3, 32)
length = int(length)
assert 3 <= length <= 32
letters = string.ascii_letters + string.digits
return ''.join(choice(letters) for _ in range(length))
def json_loads(s):
s = to_text(s)
return json.loads(s)
def json_dumps(d):
return json.dumps(d)
def pay_sign_dict(
appid,
pay_sign_key,
add_noncestr=True,
add_timestamp=True,
add_appid=True,
**kwargs
):
"""
支付参数签名
"""
assert pay_sign_key, "PAY SIGN KEY IS EMPTY"
if add_appid:
kwargs.update({'appid': appid})
if add_noncestr:
kwargs.update({'noncestr': generate_token()})
if add_timestamp:
kwargs.update({'timestamp': int(time.time())})
params = kwargs.items()
_params = [
(k.lower(), v) for k, v in kwargs.items() if k.lower() != "appid"
]
_params += [('appid', appid), ('appkey', pay_sign_key)]
_params.sort()
sign = '&'.join(["%s=%s" % (str(p[0]), str(p[1]))
for p in _params]).encode("utf-8")
sign = sha1(sign).hexdigest()
sign_type = 'SHA1'
return dict(params), sign, sign_type
def make_error_page(url):
with io.open(
os.path.join(os.path.dirname(__file__), 'contrib/error.html'),
'r',
encoding='utf-8'
) as error_page:
return error_page.read().replace('{url}', url)
def is_regex(value):
return isinstance(value, re_type)
| 2.203125 | 2 |
tensorflow/python/ops/standard_ops.py | ashutom/tensorflow-upstream | 8 | 388 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
"""Import names of Tensor Flow standard Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import platform as _platform
import sys as _sys
from tensorflow.python import autograph
from tensorflow.python.training.experimental import loss_scaling_gradient_tape
# pylint: disable=g-bad-import-order
# Imports the following modules so that @RegisterGradient get executed.
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import cudnn_rnn_grad
from tensorflow.python.ops import data_flow_grad
from tensorflow.python.ops import manip_grad
from tensorflow.python.ops import math_grad
from tensorflow.python.ops import random_grad
from tensorflow.python.ops import rnn_grad
from tensorflow.python.ops import sparse_grad
from tensorflow.python.ops import state_grad
from tensorflow.python.ops import tensor_array_grad
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.array_ops import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.check_ops import *
from tensorflow.python.ops.clip_ops import *
from tensorflow.python.ops.special_math_ops import *
# TODO(vrv): Switch to import * once we're okay with exposing the module.
from tensorflow.python.ops.confusion_matrix import confusion_matrix
from tensorflow.python.ops.control_flow_ops import Assert
from tensorflow.python.ops.control_flow_ops import case
from tensorflow.python.ops.control_flow_ops import cond
from tensorflow.python.ops.control_flow_ops import group
from tensorflow.python.ops.control_flow_ops import no_op
from tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin
# pylint: enable=redefined-builtin
from tensorflow.python.eager import wrap_function
from tensorflow.python.ops.control_flow_ops import while_loop
from tensorflow.python.ops.batch_ops import *
from tensorflow.python.ops.critical_section_ops import *
from tensorflow.python.ops.data_flow_ops import *
from tensorflow.python.ops.functional_ops import *
from tensorflow.python.ops.gradients import *
from tensorflow.python.ops.histogram_ops import *
from tensorflow.python.ops.init_ops import *
from tensorflow.python.ops.io_ops import *
from tensorflow.python.ops.linalg_ops import *
from tensorflow.python.ops.logging_ops import Print
from tensorflow.python.ops.logging_ops import get_summary_op
from tensorflow.python.ops.logging_ops import timestamp
from tensorflow.python.ops.lookup_ops import initialize_all_tables
from tensorflow.python.ops.lookup_ops import tables_initializer
from tensorflow.python.ops.manip_ops import *
from tensorflow.python.ops.math_ops import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.numerics import *
from tensorflow.python.ops.parsing_ops import *
from tensorflow.python.ops.partitioned_variables import *
from tensorflow.python.ops.proto_ops import *
from tensorflow.python.ops.ragged import ragged_dispatch as _ragged_dispatch
from tensorflow.python.ops.ragged import ragged_operators as _ragged_operators
from tensorflow.python.ops.random_ops import *
from tensorflow.python.ops.script_ops import py_func
from tensorflow.python.ops.session_ops import *
from tensorflow.python.ops.sort_ops import *
from tensorflow.python.ops.sparse_ops import *
from tensorflow.python.ops.state_ops import assign
from tensorflow.python.ops.state_ops import assign_add
from tensorflow.python.ops.state_ops import assign_sub
from tensorflow.python.ops.state_ops import count_up_to
from tensorflow.python.ops.state_ops import scatter_add
from tensorflow.python.ops.state_ops import scatter_div
from tensorflow.python.ops.state_ops import scatter_mul
from tensorflow.python.ops.state_ops import scatter_sub
from tensorflow.python.ops.state_ops import scatter_min
from tensorflow.python.ops.state_ops import scatter_max
from tensorflow.python.ops.state_ops import scatter_update
from tensorflow.python.ops.state_ops import scatter_nd_add
from tensorflow.python.ops.state_ops import scatter_nd_sub
# TODO(simister): Re-enable once binary size increase due to scatter_nd
# ops is under control.
# from tensorflow.python.ops.state_ops import scatter_nd_mul
# from tensorflow.python.ops.state_ops import scatter_nd_div
from tensorflow.python.ops.state_ops import scatter_nd_update
from tensorflow.python.ops.stateless_random_ops import *
from tensorflow.python.ops.string_ops import *
from tensorflow.python.ops.template import *
from tensorflow.python.ops.tensor_array_ops import *
from tensorflow.python.ops.variable_scope import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.variables import *
from tensorflow.python.ops.parallel_for.control_flow_ops import vectorized_map
# pylint: disable=g-import-not-at-top
if _platform.system() == "Windows":
from tensorflow.python.compiler.tensorrt import trt_convert_windows as trt
else:
from tensorflow.python.compiler.tensorrt import trt_convert as trt
# pylint: enable=g-import-not-at-top
# pylint: enable=wildcard-import
# pylint: enable=g-bad-import-order
# These modules were imported to set up RaggedTensor operators and dispatchers:
del _ragged_dispatch, _ragged_operators
| 1.632813 | 2 |
src/tango_scaling_test/TestDeviceServer/__main__.py | rtobar/sdp-prototype | 0 | 389 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Test Tango device server for use with scaling tests."""
import sys
import time
import argparse
import tango
from tango.server import run
from TestDevice import TestDevice
def init_callback():
"""Report server start up times.
This callback is executed post server initialisation.
"""
# pylint: disable=global-statement
global START_TIME
db = tango.Database()
elapsed = time.time() - START_TIME
list_devices()
exported_devices = list(db.get_device_exported('test/*'))
num_devices = len(exported_devices)
file = open('results.txt', 'a')
file.write(',{},{}\n'.format(elapsed, elapsed / num_devices))
print('>> Time taken to start devices: {:.4f} s ({:.4f} s/dev)'
.format(elapsed, elapsed / num_devices))
def delete_server():
"""Delete the TestDeviceServer from the tango db."""
db = tango.Database()
db.set_timeout_millis(50000)
server = 'TestDeviceServer/1'
server_list = list(db.get_server_list(server))
if server in server_list:
start_time = time.time()
db.delete_server('TestDeviceServer/1')
print('- Delete server: {:.4f} s'.format(time.time() - start_time))
def register(num_devices):
"""Register devices in the tango db."""
db = tango.Database()
device_info = tango.DbDevInfo()
device_info.server = 'TestDeviceServer/1'
# pylint: disable=protected-access
device_info._class = 'TestDevice'
start_time = time.time()
for device_id in range(num_devices):
device_info.name = 'test/test_device/{:05d}'.format(device_id)
db.add_device(device_info)
elapsed = time.time() - start_time
file = open('results.txt', 'a')
file.write('{},{},{}'.format(num_devices, elapsed, elapsed/num_devices))
print('- Register devices: {:.4f} s ({:.4f} s/device)'
.format(elapsed, elapsed / num_devices))
def list_devices():
"""List tango devices associated with the TestDeviceServer."""
db = tango.Database()
server_instance = 'TestDeviceServer/1'
device_class = 'TestDevice'
devices = list(db.get_device_name(server_instance, device_class))
print('- No. registered devices: {}'.format(len(devices)))
exported_devices = list(db.get_device_exported('test/*'))
print('- No. running devices: {}'.format(len(exported_devices)))
def main(args=None, **kwargs):
"""Run (start) the device server."""
run([TestDevice], verbose=True, msg_stream=sys.stdout,
post_init_callback=init_callback, raises=False,
args=args, **kwargs)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Device registration time.')
PARSER.add_argument('num_devices', metavar='N', type=int,
default=1, nargs='?',
help='Number of devices to start.')
ARGS = PARSER.parse_args()
delete_server()
time.sleep(0.5)
list_devices()
print('* Registering {} devices'.format(ARGS.num_devices))
register(ARGS.num_devices)
list_devices()
print('* Starting server ...')
sys.argv = ['TestDeviceServer', '1', '-v4']
START_TIME = time.time()
main()
| 2.484375 | 2 |
test/test_pipeline.py | ParikhKadam/haystack | 1 | 390 | from pathlib import Path
import pytest
from haystack.document_store.elasticsearch import ElasticsearchDocumentStore
from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, \
DocumentSearchPipeline, RootNode
from haystack.retriever.dense import DensePassageRetriever
from haystack.retriever.sparse import ElasticsearchRetriever
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
def test_load_yaml(document_store_with_docs):
# test correct load of indexing pipeline from yaml
pipeline = Pipeline.load_from_yaml(Path("samples/pipeline/test_pipeline.yaml"),
pipeline_name="indexing_pipeline")
pipeline.run(file_path=Path("samples/pdf/sample_pdf_1.pdf"), top_k_retriever=10, top_k_reader=3)
# test correct load of query pipeline from yaml
pipeline = Pipeline.load_from_yaml(Path("samples/pipeline/test_pipeline.yaml"), pipeline_name="query_pipeline")
prediction = pipeline.run(query="Who made the PDF specification?", top_k_retriever=10, top_k_reader=3)
assert prediction["query"] == "Who made the PDF specification?"
assert prediction["answers"][0]["answer"] == "Adobe Systems"
# test invalid pipeline name
with pytest.raises(Exception):
Pipeline.load_from_yaml(path=Path("samples/pipeline/test_pipeline.yaml"), pipeline_name="invalid")
@pytest.mark.slow
@pytest.mark.elasticsearch
@pytest.mark.parametrize(
"retriever_with_docs, document_store_with_docs", [("elasticsearch", "elasticsearch")], indirect=True
)
def test_graph_creation(reader, retriever_with_docs, document_store_with_docs):
pipeline = Pipeline()
pipeline.add_node(name="ES", component=retriever_with_docs, inputs=["Query"])
with pytest.raises(AssertionError):
pipeline.add_node(name="Reader", component=retriever_with_docs, inputs=["ES.output_2"])
with pytest.raises(AssertionError):
pipeline.add_node(name="Reader", component=retriever_with_docs, inputs=["ES.wrong_edge_label"])
with pytest.raises(Exception):
pipeline.add_node(name="Reader", component=retriever_with_docs, inputs=["InvalidNode"])
with pytest.raises(Exception):
pipeline = Pipeline()
pipeline.add_node(name="ES", component=retriever_with_docs, inputs=["InvalidNode"])
@pytest.mark.slow
@pytest.mark.elasticsearch
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
def test_extractive_qa_answers(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
prediction = pipeline.run(query="Who lives in Berlin?", top_k_retriever=10, top_k_reader=3)
assert prediction is not None
assert prediction["query"] == "Who lives in Berlin?"
assert prediction["answers"][0]["answer"] == "Carla"
assert prediction["answers"][0]["probability"] <= 1
assert prediction["answers"][0]["probability"] >= 0
assert prediction["answers"][0]["meta"]["meta_field"] == "test1"
assert prediction["answers"][0]["context"] == "My name is Carla and I live in Berlin"
assert len(prediction["answers"]) == 3
@pytest.mark.elasticsearch
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
def test_extractive_qa_offsets(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
prediction = pipeline.run(query="Who lives in Berlin?", top_k_retriever=10, top_k_reader=5)
assert prediction["answers"][0]["offset_start"] == 11
assert prediction["answers"][0]["offset_end"] == 16
start = prediction["answers"][0]["offset_start"]
end = prediction["answers"][0]["offset_end"]
assert prediction["answers"][0]["context"][start:end] == prediction["answers"][0]["answer"]
@pytest.mark.slow
@pytest.mark.elasticsearch
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
def test_extractive_qa_answers_single_result(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
query = "testing finder"
prediction = pipeline.run(query=query, top_k_retriever=1, top_k_reader=1)
assert prediction is not None
assert len(prediction["answers"]) == 1
@pytest.mark.elasticsearch
@pytest.mark.parametrize(
"retriever,document_store",
[("embedding", "memory"), ("embedding", "faiss"), ("embedding", "milvus"), ("embedding", "elasticsearch")],
indirect=True,
)
def test_faq_pipeline(retriever, document_store):
documents = [
{"text": "How to test module-1?", 'meta': {"source": "wiki1", "answer": "Using tests for module-1"}},
{"text": "How to test module-2?", 'meta': {"source": "wiki2", "answer": "Using tests for module-2"}},
{"text": "How to test module-3?", 'meta': {"source": "wiki3", "answer": "Using tests for module-3"}},
{"text": "How to test module-4?", 'meta': {"source": "wiki4", "answer": "Using tests for module-4"}},
{"text": "How to test module-5?", 'meta': {"source": "wiki5", "answer": "Using tests for module-5"}},
]
document_store.write_documents(documents)
document_store.update_embeddings(retriever)
pipeline = FAQPipeline(retriever=retriever)
output = pipeline.run(query="How to test this?", top_k_retriever=3)
assert len(output["answers"]) == 3
assert output["answers"][0]["query"].startswith("How to")
assert output["answers"][0]["answer"].startswith("Using tests")
if isinstance(document_store, ElasticsearchDocumentStore):
output = pipeline.run(query="How to test this?", filters={"source": ["wiki2"]}, top_k_retriever=5)
assert len(output["answers"]) == 1
@pytest.mark.elasticsearch
@pytest.mark.parametrize(
"retriever,document_store",
[("embedding", "memory"), ("embedding", "faiss"), ("embedding", "milvus"), ("embedding", "elasticsearch")],
indirect=True,
)
def test_document_search_pipeline(retriever, document_store):
documents = [
{"text": "Sample text for document-1", 'meta': {"source": "wiki1"}},
{"text": "Sample text for document-2", 'meta': {"source": "wiki2"}},
{"text": "Sample text for document-3", 'meta': {"source": "wiki3"}},
{"text": "Sample text for document-4", 'meta': {"source": "wiki4"}},
{"text": "Sample text for document-5", 'meta': {"source": "wiki5"}},
]
document_store.write_documents(documents)
document_store.update_embeddings(retriever)
pipeline = DocumentSearchPipeline(retriever=retriever)
output = pipeline.run(query="How to test this?", top_k_retriever=4)
assert len(output.get('documents', [])) == 4
if isinstance(document_store, ElasticsearchDocumentStore):
output = pipeline.run(query="How to test this?", filters={"source": ["wiki2"]}, top_k_retriever=5)
assert len(output["documents"]) == 1
@pytest.mark.slow
@pytest.mark.elasticsearch
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
def test_extractive_qa_answers_with_translator(reader, retriever_with_docs, en_to_de_translator, de_to_en_translator):
base_pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
pipeline = TranslationWrapperPipeline(
input_translator=de_to_en_translator,
output_translator=en_to_de_translator,
pipeline=base_pipeline
)
prediction = pipeline.run(query="Wer lebt in Berlin?", top_k_retriever=10, top_k_reader=3)
assert prediction is not None
assert prediction["query"] == "Wer lebt in Berlin?"
assert "Carla" in prediction["answers"][0]["answer"]
assert prediction["answers"][0]["probability"] <= 1
assert prediction["answers"][0]["probability"] >= 0
assert prediction["answers"][0]["meta"]["meta_field"] == "test1"
assert prediction["answers"][0]["context"] == "My name is Carla and I live in Berlin"
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
@pytest.mark.parametrize("reader", ["farm"], indirect=True)
def test_join_document_pipeline(document_store_with_docs, reader):
es = ElasticsearchRetriever(document_store=document_store_with_docs)
dpr = DensePassageRetriever(
document_store=document_store_with_docs,
query_embedding_model="facebook/dpr-question_encoder-single-nq-base",
passage_embedding_model="facebook/dpr-ctx_encoder-single-nq-base",
use_gpu=False,
)
document_store_with_docs.update_embeddings(dpr)
query = "Where does Carla lives?"
# test merge without weights
join_node = JoinDocuments(join_mode="merge")
p = Pipeline()
p.add_node(component=es, name="R1", inputs=["Query"])
p.add_node(component=dpr, name="R2", inputs=["Query"])
p.add_node(component=join_node, name="Join", inputs=["R1", "R2"])
results = p.run(query=query)
assert len(results["documents"]) == 3
# test merge with weights
join_node = JoinDocuments(join_mode="merge", weights=[1000, 1], top_k_join=2)
p = Pipeline()
p.add_node(component=es, name="R1", inputs=["Query"])
p.add_node(component=dpr, name="R2", inputs=["Query"])
p.add_node(component=join_node, name="Join", inputs=["R1", "R2"])
results = p.run(query=query)
assert results["documents"][0].score > 1000
assert len(results["documents"]) == 2
# test concatenate
join_node = JoinDocuments(join_mode="concatenate")
p = Pipeline()
p.add_node(component=es, name="R1", inputs=["Query"])
p.add_node(component=dpr, name="R2", inputs=["Query"])
p.add_node(component=join_node, name="Join", inputs=["R1", "R2"])
results = p.run(query=query)
assert len(results["documents"]) == 3
# test join_node with reader
join_node = JoinDocuments()
p = Pipeline()
p.add_node(component=es, name="R1", inputs=["Query"])
p.add_node(component=dpr, name="R2", inputs=["Query"])
p.add_node(component=join_node, name="Join", inputs=["R1", "R2"])
p.add_node(component=reader, name="Reader", inputs=["Join"])
results = p.run(query=query)
assert results["answers"][0]["answer"] == "Berlin"
def test_parallel_paths_in_pipeline_graph():
class A(RootNode):
def run(self, **kwargs):
kwargs["output"] = "A"
return kwargs, "output_1"
class B(RootNode):
def run(self, **kwargs):
kwargs["output"] += "B"
return kwargs, "output_1"
class C(RootNode):
def run(self, **kwargs):
kwargs["output"] += "C"
return kwargs, "output_1"
class D(RootNode):
def run(self, **kwargs):
kwargs["output"] += "D"
return kwargs, "output_1"
class E(RootNode):
def run(self, **kwargs):
kwargs["output"] += "E"
return kwargs, "output_1"
class JoinNode(RootNode):
def run(self, **kwargs):
kwargs["output"] = kwargs["inputs"][0]["output"] + kwargs["inputs"][1]["output"]
return kwargs, "output_1"
pipeline = Pipeline()
pipeline.add_node(name="A", component=A(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A"])
pipeline.add_node(name="C", component=C(), inputs=["B"])
pipeline.add_node(name="E", component=E(), inputs=["C"])
pipeline.add_node(name="D", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E"])
output = pipeline.run(query="test")
assert output["output"] == "ABDABCE"
pipeline = Pipeline()
pipeline.add_node(name="A", component=A(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A"])
pipeline.add_node(name="C", component=C(), inputs=["B"])
pipeline.add_node(name="D", component=D(), inputs=["B"])
pipeline.add_node(name="E", component=JoinNode(), inputs=["C", "D"])
output = pipeline.run(query="test")
assert output["output"] == "ABCABD"
def test_parallel_paths_in_pipeline_graph_with_branching():
class AWithOutput1(RootNode):
outgoing_edges = 2
def run(self, **kwargs):
kwargs["output"] = "A"
return kwargs, "output_1"
class AWithOutput2(RootNode):
outgoing_edges = 2
def run(self, **kwargs):
kwargs["output"] = "A"
return kwargs, "output_2"
class AWithOutputAll(RootNode):
outgoing_edges = 2
def run(self, **kwargs):
kwargs["output"] = "A"
return kwargs, "output_all"
class B(RootNode):
def run(self, **kwargs):
kwargs["output"] += "B"
return kwargs, "output_1"
class C(RootNode):
def run(self, **kwargs):
kwargs["output"] += "C"
return kwargs, "output_1"
class D(RootNode):
def run(self, **kwargs):
kwargs["output"] += "D"
return kwargs, "output_1"
class E(RootNode):
def run(self, **kwargs):
kwargs["output"] += "E"
return kwargs, "output_1"
class JoinNode(RootNode):
def run(self, **kwargs):
if kwargs.get("inputs"):
kwargs["output"] = ""
for input_dict in kwargs["inputs"]:
kwargs["output"] += (input_dict["output"])
return kwargs, "output_1"
pipeline = Pipeline()
pipeline.add_node(name="A", component=AWithOutput1(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A.output_1"])
pipeline.add_node(name="C", component=C(), inputs=["A.output_2"])
pipeline.add_node(name="D", component=E(), inputs=["B"])
pipeline.add_node(name="E", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E", "C"])
output = pipeline.run(query="test")
assert output["output"] == "ABEABD"
pipeline = Pipeline()
pipeline.add_node(name="A", component=AWithOutput2(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A.output_1"])
pipeline.add_node(name="C", component=C(), inputs=["A.output_2"])
pipeline.add_node(name="D", component=E(), inputs=["B"])
pipeline.add_node(name="E", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E", "C"])
output = pipeline.run(query="test")
assert output["output"] == "AC"
pipeline = Pipeline()
pipeline.add_node(name="A", component=AWithOutputAll(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A.output_1"])
pipeline.add_node(name="C", component=C(), inputs=["A.output_2"])
pipeline.add_node(name="D", component=E(), inputs=["B"])
pipeline.add_node(name="E", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E", "C"])
output = pipeline.run(query="test")
assert output["output"] == "ACABEABD"
| 2.25 | 2 |
src/telr/TELR_assembly.py | dominik-handler/TELR | 22 | 391 | <gh_stars>10-100
import sys
import os
import subprocess
import shutil
import time
import logging
from Bio import SeqIO
from multiprocessing import Pool
import pysam
from telr.TELR_utility import mkdir, check_exist, format_time
def get_local_contigs(
assembler,
polisher,
contig_dir,
vcf_parsed,
out,
sample_name,
bam,
raw_reads,
thread,
presets,
polish_iterations,
):
"""Perform local assembly using reads from parsed VCF file in parallel"""
# Prepare reads used for local assembly and polishing
sv_reads_dir = os.path.join(out, "sv_reads")
try:
prep_assembly_inputs(
vcf_parsed, out, sample_name, bam, raw_reads, sv_reads_dir, read_type="sv"
)
except Exception as e:
print(e)
print("Prepare local assembly input data failed, exiting...")
sys.exit(1)
mkdir(contig_dir)
k = 0
asm_pa_list = []
with open(vcf_parsed, "r") as input:
for line in input:
entry = line.replace("\n", "").split("\t")
contig_name = "_".join([entry[0], entry[1], entry[2]])
# rename variant reads
sv_reads = sv_reads_dir + "/contig" + str(k)
sv_reads_rename = sv_reads_dir + "/" + contig_name + ".reads.fa"
os.rename(sv_reads, sv_reads_rename)
thread_asm = 1
asm_pa = [
sv_reads_rename,
contig_dir,
contig_name,
thread_asm,
presets,
assembler,
polisher,
polish_iterations,
]
asm_pa_list.append(asm_pa)
k = k + 1
# run assembly in parallel
logging.info("Perform local assembly of non-reference TE loci...")
start_time = time.time()
try:
pool = Pool(processes=thread)
contig_list = pool.map(run_assembly_polishing, asm_pa_list)
pool.close()
pool.join()
except Exception as e:
print(e)
print("Local assembly failed, exiting...")
sys.exit(1)
proc_time = time.time() - start_time
# merge all contigs
assembly_passed_loci = set()
merged_contigs = os.path.join(out, sample_name + ".contigs.fa")
with open(merged_contigs, "w") as merged_output_handle:
for contig in contig_list:
if check_exist(contig):
contig_name = os.path.basename(contig).replace(".cns.fa", "")
assembly_passed_loci.add(contig_name)
parsed_contig = os.path.join(contig_dir, contig_name + ".cns.ctg1.fa")
with open(contig, "r") as input:
records = SeqIO.parse(input, "fasta")
for record in records:
if record.id == "ctg1" or record.id == "contig_1":
record.id = contig_name
record.description = "len=" + str(len(record.seq))
SeqIO.write(record, merged_output_handle, "fasta")
with open(parsed_contig, "w") as parsed_output_handle:
SeqIO.write(record, parsed_output_handle, "fasta")
logging.info("Local assembly finished in " + format_time(proc_time))
return merged_contigs, assembly_passed_loci
def run_assembly_polishing(args):
reads = args[0]
asm_dir = args[1]
contig_name = args[2]
thread = args[3]
presets = args[4]
assembler = args[5]
polisher = args[6]
polish_iterations = args[7]
# run assembly
if assembler == "wtdbg2":
asm_cns = run_wtdbg2_assembly(reads, asm_dir, contig_name, thread, presets)
else:
asm_cns = run_flye_assembly(reads, asm_dir, contig_name, thread, presets)
if not check_exist(asm_cns):
print("assembly failed")
return None
# run polishing
if polish_iterations > 0:
if polisher == "wtdbg2":
asm_cns = run_wtdbg2_polishing(
asm_cns, reads, thread, polish_iterations, presets
)
else:
asm_cns = run_flye_polishing(
asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets
)
if check_exist(asm_cns):
return asm_cns
else:
return None
def run_flye_polishing(
asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets
):
"""Run Flye polishing"""
if presets == "pacbio":
presets_flye = "--pacbio-raw"
else:
presets_flye = "--nano-raw"
tmp_out_dir = os.path.join(asm_dir, contig_name)
mkdir(tmp_out_dir)
try:
subprocess.call(
[
"flye",
"--polish-target",
asm_cns,
presets_flye,
reads,
"--out-dir",
tmp_out_dir,
"--thread",
str(thread),
"--iterations",
str(polish_iterations),
]
)
except Exception as e:
print(e)
print("Polishing failed, exiting...")
return None
# rename contig file
polished_contig = os.path.join(
tmp_out_dir, "polished_" + str(polish_iterations) + ".fasta"
)
if check_exist(polished_contig):
os.rename(polished_contig, asm_cns)
shutil.rmtree(tmp_out_dir)
return asm_cns
else:
return None
def run_wtdbg2_polishing(asm_cns, reads, threads, polish_iterations, presets):
"""Run wtdbg2 polishing"""
if presets == "pacbio":
presets_minimap2 = "map-pb"
else:
presets_minimap2 = "map-ont"
# polish consensus
threads = str(min(threads, 4))
bam = asm_cns + ".bam"
k = 0
while True:
# align reads to contigs
command = (
"minimap2 -t "
+ threads
+ " -ax "
+ presets_minimap2
+ " -r2k "
+ asm_cns
+ " "
+ reads
+ " | samtools sort -@"
+ threads
+ " > "
+ bam
)
try:
subprocess.run(
command,
shell=True,
timeout=300,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
except subprocess.TimeoutExpired:
print("fail to map reads to contig: " + asm_cns)
return
# run wtpoa-cns to get polished contig
cns_tmp = asm_cns + ".tmp"
command = (
"samtools view -F0x900 "
+ bam
+ " | wtpoa-cns -t "
+ threads
+ " -d "
+ asm_cns
+ " -i - -fo "
+ cns_tmp
)
try:
subprocess.run(
command,
shell=True,
timeout=300,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
except subprocess.TimeoutExpired:
print("fail to polish contig: " + asm_cns)
return
if check_exist(cns_tmp):
os.rename(cns_tmp, asm_cns)
os.remove(bam)
else:
break
k = k + 1
if k >= polish_iterations:
break
if check_exist(asm_cns):
return asm_cns
else:
print("polishing failed for " + asm_cns + "\n")
return None
def run_flye_assembly(sv_reads, asm_dir, contig_name, thread, presets):
"""Run Flye assembly"""
if presets == "pacbio":
presets_flye = "--pacbio-raw"
else:
presets_flye = "--nano-raw"
tmp_out_dir = os.path.join(asm_dir, contig_name)
mkdir(tmp_out_dir)
try:
subprocess.call(
[
"flye",
presets_flye,
sv_reads,
"--out-dir",
tmp_out_dir,
"--thread",
str(thread),
"--iterations",
"0",
]
)
except Exception as e:
print(e)
print("Assembly failed, exiting...")
return
# rename contigs
contig_path = os.path.join(tmp_out_dir, "assembly.fasta")
contig_path_new = os.path.join(asm_dir, contig_name + ".cns.fa")
if check_exist(contig_path):
os.rename(contig_path, contig_path_new)
# remove tmp files
shutil.rmtree(tmp_out_dir)
return contig_path_new
else:
print("assembly failed")
return None
def run_wtdbg2_assembly(sv_reads, asm_dir, contig_name, thread, presets):
"""Run wtdbg2 assembly"""
if presets == "pacbio":
presets_wtdbg2 = "rs"
else:
presets_wtdbg2 = "ont"
prefix = sv_reads.replace(".reads.fa", "")
try:
subprocess.run(
[
"wtdbg2",
"-x",
presets_wtdbg2,
"-q",
"-AS",
"1",
"-g",
"30k",
"-t",
str(thread),
"-i",
sv_reads,
"-fo",
prefix,
],
timeout=300,
)
except subprocess.TimeoutExpired:
print("fail to build contig layout for contig: " + contig_name)
return
except Exception as e:
print(e)
print("wtdbg2 failed, exiting...")
return None
# derive consensus
contig_layout = prefix + ".ctg.lay.gz"
if check_exist(contig_layout):
cns_thread = str(min(thread, 4))
consensus = prefix + ".cns.fa"
try:
subprocess.run(
[
"wtpoa-cns",
"-q",
"-t",
cns_thread,
"-i",
contig_layout,
"-fo",
consensus,
],
timeout=300,
)
except subprocess.TimeoutExpired:
print("fail to assemble contig: " + contig_name)
return None
if check_exist(consensus):
consensus_rename = os.path.join(asm_dir, contig_name + ".cns.fa")
os.rename(consensus, consensus_rename)
return consensus_rename
else:
return None
def prep_assembly_inputs(
vcf_parsed, out, sample_name, bam, raw_reads, reads_dir, read_type="sv"
):
"""Prepare reads for local assembly"""
# logging.info("Prepare reads for local assembly")
if read_type == "sv": # TODO: figure out what this does
# extract read IDs
read_ids = os.path.join(out, sample_name + ".id")
with open(vcf_parsed, "r") as input, open(read_ids, "w") as output:
for line in input:
entry = line.replace("\n", "").split("\t")
read_list = entry[8].split(",")
for read in read_list:
output.write(read + "\n")
else: # TODO: think about using this for assembly, filter for cigar reads
window = 1000
samfile = pysam.AlignmentFile(bam, "rb")
read_ids = os.path.join(out, sample_name + ".id")
vcf_parsed_new = vcf_parsed + ".new"
with open(vcf_parsed, "r") as input, open(read_ids, "w") as output, open(
vcf_parsed_new, "w"
) as VCF:
for line in input:
entry = line.replace("\n", "").split("\t")
# get sniffles read list
read_list = entry[8].split(",")
reads_sniffles = set(read_list)
ins_chr = entry[0]
ins_breakpoint = round((int(entry[1]) + int(entry[2])) / 2)
start = ins_breakpoint - window
end = ins_breakpoint + window
reads = set()
# coverage = 0
for read in samfile.fetch(ins_chr, start, end):
reads.add(read.query_name)
for read in reads:
output.write(read + "\n")
# write
out_line = line.replace("\n", "") + "\t" + str(len(reads))
VCF.write(out_line + "\n")
vcf_parsed = vcf_parsed_new
# generate unique ID list
read_ids_unique = read_ids + ".unique"
command = "cat " + read_ids + " | sort | uniq"
with open(read_ids_unique, "w") as output:
subprocess.call(command, stdout=output, shell=True)
# filter raw reads using read list
subset_fa = os.path.join(out, sample_name + ".subset.fa")
command = "seqtk subseq " + raw_reads + " " + read_ids_unique + " | seqtk seq -a"
with open(subset_fa, "w") as output:
subprocess.call(command, stdout=output, shell=True)
# reorder reads
subset_fa_reorder = out + "/" + sample_name + ".subset.reorder.fa"
extract_reads(subset_fa, read_ids, subset_fa_reorder)
# separate reads into multiple files, using csplit
mkdir(reads_dir)
csplit_prefix = reads_dir + "/contig"
m = []
k = 1
with open(vcf_parsed, "r") as input:
for line in input:
entry = line.replace("\n", "").split("\t")
if read_type == "sv":
k = k + 2 * (len(entry[8].split(",")))
else:
k = k + 2 * int(entry[14])
m.append(k)
if len(m) == 1:
subprocess.call(["cp", subset_fa_reorder, reads_dir + "/contig0"])
elif len(m) == 0:
print("No insertion detected, exiting...")
else:
m = m[:-1]
index = " ".join(str(i) for i in m)
command = (
"csplit -s -f " + csplit_prefix + " -n 1 " + subset_fa_reorder + " " + index
)
subprocess.call(command, shell=True)
# remove tmp files
os.remove(read_ids)
os.remove(read_ids_unique)
os.remove(subset_fa)
os.remove(subset_fa_reorder)
def extract_reads(reads, list, out):
"""Extract reads from fasta using read ID list"""
record_dict = SeqIO.index(reads, "fasta")
with open(out, "wb") as output_handle, open(list, "r") as ID:
for entry in ID:
entry = entry.replace("\n", "")
output_handle.write(record_dict.get_raw(entry))
| 1.960938 | 2 |
pygmt/tests/test_clib.py | aliciaha1997/pygmt | 0 | 392 | # pylint: disable=protected-access
"""
Test the wrappers for the C API.
"""
import os
from contextlib import contextmanager
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
import xarray as xr
from packaging.version import Version
from pygmt import Figure, clib
from pygmt.clib.conversion import dataarray_to_matrix
from pygmt.clib.session import FAMILIES, VIAS
from pygmt.exceptions import (
GMTCLibError,
GMTCLibNoSessionError,
GMTInvalidInput,
GMTVersionError,
)
from pygmt.helpers import GMTTempFile
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
with clib.Session() as _lib:
gmt_version = Version(_lib.info["version"])
@contextmanager
def mock(session, func, returns=None, mock_func=None):
"""
Mock a GMT C API function to make it always return a given value.
Used to test that exceptions are raised when API functions fail by
producing a NULL pointer as output or non-zero status codes.
Needed because it's not easy to get some API functions to fail without
inducing a Segmentation Fault (which is a good thing because libgmt usually
only fails with errors).
"""
if mock_func is None:
def mock_api_function(*args): # pylint: disable=unused-argument
"""
A mock GMT API function that always returns a given value.
"""
return returns
mock_func = mock_api_function
get_libgmt_func = session.get_libgmt_func
def mock_get_libgmt_func(name, argtypes=None, restype=None):
"""
Return our mock function.
"""
if name == func:
return mock_func
return get_libgmt_func(name, argtypes, restype)
setattr(session, "get_libgmt_func", mock_get_libgmt_func)
yield
setattr(session, "get_libgmt_func", get_libgmt_func)
def test_getitem():
"""
Test that I can get correct constants from the C lib.
"""
ses = clib.Session()
assert ses["GMT_SESSION_EXTERNAL"] != -99999
assert ses["GMT_MODULE_CMD"] != -99999
assert ses["GMT_PAD_DEFAULT"] != -99999
assert ses["GMT_DOUBLE"] != -99999
with pytest.raises(GMTCLibError):
ses["A_WHOLE_LOT_OF_JUNK"] # pylint: disable=pointless-statement
def test_create_destroy_session():
"""
Test that create and destroy session are called without errors.
"""
# Create two session and make sure they are not pointing to the same memory
session1 = clib.Session()
session1.create(name="test_session1")
assert session1.session_pointer is not None
session2 = clib.Session()
session2.create(name="test_session2")
assert session2.session_pointer is not None
assert session2.session_pointer != session1.session_pointer
session1.destroy()
session2.destroy()
# Create and destroy a session twice
ses = clib.Session()
for __ in range(2):
with pytest.raises(GMTCLibNoSessionError):
ses.session_pointer # pylint: disable=pointless-statement
ses.create("session1")
assert ses.session_pointer is not None
ses.destroy()
with pytest.raises(GMTCLibNoSessionError):
ses.session_pointer # pylint: disable=pointless-statement
def test_create_session_fails():
"""
Check that an exception is raised when failing to create a session.
"""
ses = clib.Session()
with mock(ses, "GMT_Create_Session", returns=None):
with pytest.raises(GMTCLibError):
ses.create("test-session-name")
# Should fail if trying to create a session before destroying the old one.
ses.create("test1")
with pytest.raises(GMTCLibError):
ses.create("test2")
def test_destroy_session_fails():
"""
Fail to destroy session when given bad input.
"""
ses = clib.Session()
with pytest.raises(GMTCLibNoSessionError):
ses.destroy()
ses.create("test-session")
with mock(ses, "GMT_Destroy_Session", returns=1):
with pytest.raises(GMTCLibError):
ses.destroy()
ses.destroy()
def test_call_module():
"""
Run a command to see if call_module works.
"""
data_fname = os.path.join(TEST_DATA_DIR, "points.txt")
out_fname = "test_call_module.txt"
with clib.Session() as lib:
with GMTTempFile() as out_fname:
lib.call_module("info", "{} -C ->{}".format(data_fname, out_fname.name))
assert os.path.exists(out_fname.name)
output = out_fname.read().strip()
assert output == "11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338"
def test_call_module_invalid_arguments():
"""
Fails for invalid module arguments.
"""
with clib.Session() as lib:
with pytest.raises(GMTCLibError):
lib.call_module("info", "bogus-data.bla")
def test_call_module_invalid_name():
"""
Fails when given bad input.
"""
with clib.Session() as lib:
with pytest.raises(GMTCLibError):
lib.call_module("meh", "")
def test_call_module_error_message():
"""
Check is the GMT error message was captured.
"""
with clib.Session() as lib:
try:
lib.call_module("info", "bogus-data.bla")
except GMTCLibError as error:
assert "Module 'info' failed with status code" in str(error)
assert "gmtinfo [ERROR]: Cannot find file bogus-data.bla" in str(error)
def test_method_no_session():
"""
Fails when not in a session.
"""
# Create an instance of Session without "with" so no session is created.
lib = clib.Session()
with pytest.raises(GMTCLibNoSessionError):
lib.call_module("gmtdefaults", "")
with pytest.raises(GMTCLibNoSessionError):
lib.session_pointer # pylint: disable=pointless-statement
def test_parse_constant_single():
"""
Parsing a single family argument correctly.
"""
lib = clib.Session()
for family in FAMILIES:
parsed = lib._parse_constant(family, valid=FAMILIES)
assert parsed == lib[family]
def test_parse_constant_composite():
"""
Parsing a composite constant argument (separated by |) correctly.
"""
lib = clib.Session()
test_cases = ((family, via) for family in FAMILIES for via in VIAS)
for family, via in test_cases:
composite = "|".join([family, via])
expected = lib[family] + lib[via]
parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS)
assert parsed == expected
def test_parse_constant_fails():
"""
Check if the function fails when given bad input.
"""
lib = clib.Session()
test_cases = [
"SOME_random_STRING",
"GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR",
"GMT_IS_DATASET|NOT_A_PROPER_VIA",
"NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX",
"NOT_A_PROPER_FAMILY|ALSO_INVALID",
]
for test_case in test_cases:
with pytest.raises(GMTInvalidInput):
lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS)
# Should also fail if not given valid modifiers but is using them anyway.
# This should work...
lib._parse_constant(
"GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=VIAS
)
# But this shouldn't.
with pytest.raises(GMTInvalidInput):
lib._parse_constant(
"GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=None
)
def test_create_data_dataset():
"""
Run the function to make sure it doesn't fail badly.
"""
with clib.Session() as lib:
# Dataset from vectors
data_vector = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_VECTOR",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[10, 20, 1, 0], # columns, rows, layers, dtype
)
# Dataset from matrices
data_matrix = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_MATRIX",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[10, 20, 1, 0],
)
assert data_vector != data_matrix
def test_create_data_grid_dim():
"""
Create a grid ignoring range and inc.
"""
with clib.Session() as lib:
# Grids from matrices using dim
lib.create_data(
family="GMT_IS_GRID|GMT_VIA_MATRIX",
geometry="GMT_IS_SURFACE",
mode="GMT_CONTAINER_ONLY",
dim=[10, 20, 1, 0],
)
def test_create_data_grid_range():
"""
Create a grid specifying range and inc instead of dim.
"""
with clib.Session() as lib:
# Grids from matrices using range and int
lib.create_data(
family="GMT_IS_GRID|GMT_VIA_MATRIX",
geometry="GMT_IS_SURFACE",
mode="GMT_CONTAINER_ONLY",
ranges=[150.0, 250.0, -20.0, 20.0],
inc=[0.1, 0.2],
)
def test_create_data_fails():
"""
Check that create_data raises exceptions for invalid input and output.
"""
# Passing in invalid mode
with pytest.raises(GMTInvalidInput):
with clib.Session() as lib:
lib.create_data(
family="GMT_IS_DATASET",
geometry="GMT_IS_SURFACE",
mode="Not_a_valid_mode",
dim=[0, 0, 1, 0],
ranges=[150.0, 250.0, -20.0, 20.0],
inc=[0.1, 0.2],
)
# Passing in invalid geometry
with pytest.raises(GMTInvalidInput):
with clib.Session() as lib:
lib.create_data(
family="GMT_IS_GRID",
geometry="Not_a_valid_geometry",
mode="GMT_CONTAINER_ONLY",
dim=[0, 0, 1, 0],
ranges=[150.0, 250.0, -20.0, 20.0],
inc=[0.1, 0.2],
)
# If the data pointer returned is None (NULL pointer)
with pytest.raises(GMTCLibError):
with clib.Session() as lib:
with mock(lib, "GMT_Create_Data", returns=None):
lib.create_data(
family="GMT_IS_DATASET",
geometry="GMT_IS_SURFACE",
mode="GMT_CONTAINER_ONLY",
dim=[11, 10, 2, 0],
)
def test_virtual_file():
"""
Test passing in data via a virtual file with a Dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (5, 3)
for dtype in dtypes:
with clib.Session() as lib:
family = "GMT_IS_DATASET|GMT_VIA_MATRIX"
geometry = "GMT_IS_POINT"
dataset = lib.create_data(
family=family,
geometry=geometry,
mode="GMT_CONTAINER_ONLY",
dim=[shape[1], shape[0], 1, 0], # columns, rows, layers, dtype
)
data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
lib.put_matrix(dataset, matrix=data)
# Add the dataset to a virtual file and pass it along to gmt info
vfargs = (family, geometry, "GMT_IN|GMT_IS_REFERENCE", dataset)
with lib.open_virtual_file(*vfargs) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T]
)
expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds)
assert output == expected
def test_virtual_file_fails():
"""
Check that opening and closing virtual files raises an exception for non-
zero return codes.
"""
vfargs = (
"GMT_IS_DATASET|GMT_VIA_MATRIX",
"GMT_IS_POINT",
"GMT_IN|GMT_IS_REFERENCE",
None,
)
# Mock Open_VirtualFile to test the status check when entering the context.
# If the exception is raised, the code won't get to the closing of the
# virtual file.
with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=1):
with pytest.raises(GMTCLibError):
with lib.open_virtual_file(*vfargs):
print("Should not get to this code")
# Test the status check when closing the virtual file
# Mock the opening to return 0 (success) so that we don't open a file that
# we won't close later.
with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=0), mock(
lib, "GMT_Close_VirtualFile", returns=1
):
with pytest.raises(GMTCLibError):
with lib.open_virtual_file(*vfargs):
pass
print("Shouldn't get to this code either")
def test_virtual_file_bad_direction():
"""
Test passing an invalid direction argument.
"""
with clib.Session() as lib:
vfargs = (
"GMT_IS_DATASET|GMT_VIA_MATRIX",
"GMT_IS_POINT",
"GMT_IS_GRID", # The invalid direction argument
0,
)
with pytest.raises(GMTInvalidInput):
with lib.open_virtual_file(*vfargs):
print("This should have failed")
def test_virtualfile_from_vectors():
"""
Test the automation for transforming vectors to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
size = 10
for dtype in dtypes:
x = np.arange(size, dtype=dtype)
y = np.arange(size, size * 2, 1, dtype=dtype)
z = np.arange(size * 2, size * 3, 1, dtype=dtype)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, z) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(i.min(), i.max()) for i in (x, y, z)]
)
expected = "<vector memory>: N = {}\t{}\n".format(size, bounds)
assert output == expected
@pytest.mark.parametrize("dtype", [str, object])
def test_virtualfile_from_vectors_one_string_or_object_column(dtype):
"""
Test passing in one column with string or object dtype into virtual file
dataset.
"""
size = 5
x = np.arange(size, dtype=np.int32)
y = np.arange(size, size * 2, 1, dtype=np.int32)
strings = np.array(["a", "bc", "defg", "hijklmn", "opqrst"], dtype=dtype)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, strings) as vfile:
with GMTTempFile() as outfile:
lib.call_module("convert", f"{vfile} ->{outfile.name}")
output = outfile.read(keep_tabs=True)
expected = "".join(f"{i}\t{j}\t{k}\n" for i, j, k in zip(x, y, strings))
assert output == expected
@pytest.mark.parametrize("dtype", [str, object])
def test_virtualfile_from_vectors_two_string_or_object_columns(dtype):
"""
Test passing in two columns of string or object dtype into virtual file
dataset.
"""
size = 5
x = np.arange(size, dtype=np.int32)
y = np.arange(size, size * 2, 1, dtype=np.int32)
strings1 = np.array(["a", "bc", "def", "ghij", "klmno"], dtype=dtype)
strings2 = np.array(["pqrst", "uvwx", "yz!", "@#", "$"], dtype=dtype)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, strings1, strings2) as vfile:
with GMTTempFile() as outfile:
lib.call_module("convert", f"{vfile} ->{outfile.name}")
output = outfile.read(keep_tabs=True)
expected = "".join(
f"{h}\t{i}\t{j} {k}\n" for h, i, j, k in zip(x, y, strings1, strings2)
)
assert output == expected
def test_virtualfile_from_vectors_transpose():
"""
Test transforming matrix columns to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (7, 5)
for dtype in dtypes:
data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(*data.T) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} -C ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["{:.0f}\t{:.0f}".format(col.min(), col.max()) for col in data.T]
)
expected = "{}\n".format(bounds)
assert output == expected
def test_virtualfile_from_vectors_diff_size():
"""
Test the function fails for arrays of different sizes.
"""
x = np.arange(5)
y = np.arange(6)
with clib.Session() as lib:
with pytest.raises(GMTInvalidInput):
with lib.virtualfile_from_vectors(x, y):
print("This should have failed")
def test_virtualfile_from_matrix():
"""
Test transforming a matrix to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (7, 5)
for dtype in dtypes:
data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
with clib.Session() as lib:
with lib.virtualfile_from_matrix(data) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T]
)
expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds)
assert output == expected
def test_virtualfile_from_matrix_slice():
"""
Test transforming a slice of a larger array to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (10, 6)
for dtype in dtypes:
full_data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
rows = 5
cols = 3
data = full_data[:rows, :cols]
with clib.Session() as lib:
with lib.virtualfile_from_matrix(data) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T]
)
expected = "<matrix memory>: N = {}\t{}\n".format(rows, bounds)
assert output == expected
def test_virtualfile_from_vectors_pandas():
"""
Pass vectors to a dataset using pandas Series.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
size = 13
for dtype in dtypes:
data = pd.DataFrame(
data=dict(
x=np.arange(size, dtype=dtype),
y=np.arange(size, size * 2, 1, dtype=dtype),
z=np.arange(size * 2, size * 3, 1, dtype=dtype),
)
)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(data.x, data.y, data.z) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
[
"<{:.0f}/{:.0f}>".format(i.min(), i.max())
for i in (data.x, data.y, data.z)
]
)
expected = "<vector memory>: N = {}\t{}\n".format(size, bounds)
assert output == expected
def test_virtualfile_from_vectors_arraylike():
"""
Pass array-like vectors to a dataset.
"""
size = 13
x = list(range(0, size, 1))
y = tuple(range(size, size * 2, 1))
z = range(size * 2, size * 3, 1)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, z) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(min(i), max(i)) for i in (x, y, z)]
)
expected = "<vector memory>: N = {}\t{}\n".format(size, bounds)
assert output == expected
def test_extract_region_fails():
"""
Check that extract region fails if nothing has been plotted.
"""
Figure()
with pytest.raises(GMTCLibError):
with clib.Session() as lib:
lib.extract_region()
def test_extract_region_two_figures():
"""
Extract region should handle multiple figures existing at the same time.
"""
# Make two figures before calling extract_region to make sure that it's
# getting from the current figure, not the last figure.
fig1 = Figure()
region1 = np.array([0, 10, -20, -10])
fig1.coast(region=region1, projection="M6i", frame=True, land="black")
fig2 = Figure()
fig2.basemap(region="US.HI+r5", projection="M6i", frame=True)
# Activate the first figure and extract the region from it
# Use in a different session to avoid any memory problems.
with clib.Session() as lib:
lib.call_module("figure", "{} -".format(fig1._name))
with clib.Session() as lib:
wesn1 = lib.extract_region()
npt.assert_allclose(wesn1, region1)
# Now try it with the second one
with clib.Session() as lib:
lib.call_module("figure", "{} -".format(fig2._name))
with clib.Session() as lib:
wesn2 = lib.extract_region()
npt.assert_allclose(wesn2, np.array([-165.0, -150.0, 15.0, 25.0]))
def test_write_data_fails():
"""
Check that write data raises an exception for non-zero return codes.
"""
# It's hard to make the C API function fail without causing a Segmentation
# Fault. Can't test this if by giving a bad file name because if
# output=='', GMT will just write to stdout and spaces are valid file
# names. Use a mock instead just to exercise this part of the code.
with clib.Session() as lib:
with mock(lib, "GMT_Write_Data", returns=1):
with pytest.raises(GMTCLibError):
lib.write_data(
"GMT_IS_VECTOR",
"GMT_IS_POINT",
"GMT_WRITE_SET",
[1] * 6,
"some-file-name",
None,
)
def test_dataarray_to_matrix_works():
"""
Check that dataarray_to_matrix returns correct output.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=0, stop=4, num=3)
y = np.linspace(start=5, stop=9, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=np.flipud(data))
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[x[1] - x[0], y[1] - y[0]])
def test_dataarray_to_matrix_negative_x_increment():
"""
Check if dataarray_to_matrix returns correct output with flipped x.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=4, stop=0, num=3)
y = np.linspace(start=5, stop=9, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=np.flip(data, axis=(0, 1)))
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])])
def test_dataarray_to_matrix_negative_y_increment():
"""
Check that dataarray_to_matrix returns correct output with flipped y.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=0, stop=4, num=3)
y = np.linspace(start=9, stop=5, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=data)
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])])
def test_dataarray_to_matrix_negative_x_and_y_increment():
"""
Check that dataarray_to_matrix returns correct output with flipped x/y.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=4, stop=0, num=3)
y = np.linspace(start=9, stop=5, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=np.fliplr(data))
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])])
def test_dataarray_to_matrix_dims_fails():
"""
Check that it fails for > 2 dims.
"""
# Make a 3D regular grid
data = np.ones((10, 12, 11), dtype="float32")
x = np.arange(11)
y = np.arange(12)
z = np.arange(10)
grid = xr.DataArray(data, coords=[("z", z), ("y", y), ("x", x)])
with pytest.raises(GMTInvalidInput):
dataarray_to_matrix(grid)
def test_dataarray_to_matrix_inc_fails():
"""
Check that it fails for variable increments.
"""
data = np.ones((4, 5), dtype="float64")
x = np.linspace(0, 1, 5)
y = np.logspace(2, 3, 4)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
with pytest.raises(GMTInvalidInput):
dataarray_to_matrix(grid)
def test_get_default():
"""
Make sure get_default works without crashing and gives reasonable results.
"""
with clib.Session() as lib:
assert lib.get_default("API_GRID_LAYOUT") in ["rows", "columns"]
assert int(lib.get_default("API_CORES")) >= 1
assert Version(lib.get_default("API_VERSION")) >= Version("6.2.0")
def test_get_default_fails():
"""
Make sure get_default raises an exception for invalid names.
"""
with clib.Session() as lib:
with pytest.raises(GMTCLibError):
lib.get_default("NOT_A_VALID_NAME")
def test_info_dict():
"""
Make sure the clib.Session.info dict is working.
"""
# Check if there are no errors or segfaults from getting all of the
# properties.
with clib.Session() as lib:
assert lib.info
# Mock GMT_Get_Default to return always the same string
def mock_defaults(api, name, value): # pylint: disable=unused-argument
"""
Put 'bla' in the value buffer.
"""
value.value = b"bla"
return 0
ses = clib.Session()
ses.create("test-session")
with mock(ses, "GMT_Get_Default", mock_func=mock_defaults):
# Check for an empty dictionary
assert ses.info
for key in ses.info:
assert ses.info[key] == "bla"
ses.destroy()
def test_fails_for_wrong_version():
"""
Make sure the clib.Session raises an exception if GMT is too old.
"""
# Mock GMT_Get_Default to return an old version
def mock_defaults(api, name, value): # pylint: disable=unused-argument
"""
Return an old version.
"""
if name == b"API_VERSION":
value.value = b"5.4.3"
else:
value.value = b"bla"
return 0
lib = clib.Session()
with mock(lib, "GMT_Get_Default", mock_func=mock_defaults):
with pytest.raises(GMTVersionError):
with lib:
assert lib.info["version"] != "5.4.3"
# Make sure the session is closed when the exception is raised.
with pytest.raises(GMTCLibNoSessionError):
assert lib.session_pointer
| 2.375 | 2 |
stubs/_pytest/_code.py | questioneer-ltd/scrut | 0 | 393 | <reponame>questioneer-ltd/scrut
"""Type stubs for _pytest._code."""
# This class actually has more functions than are specified here.
# We don't use these features, so I don't think its worth including
# them in our type stub. We can always change it later.
class ExceptionInfo:
@property
def value(self) -> Exception: ...
| 1.882813 | 2 |
Prime Factorization/prime_factorization_II.py | rayvantsahni/Let-us-Math | 2 | 394 | def get_primes(n):
primes = [] # stores the prime numbers within the reange of the number
sieve = [False] * (n + 1) # stores boolean values indicating whether a number is prime or not
sieve[0] = sieve[1] = True # marking 0 and 1 as not prime
for i in range(2, n + 1): # loops over all the numbers to check for prime numbers
if sieve[i]: # checks whether a number is not prime
continue # skips the loop if the number is not a prime number
primes.append(i) # adds a number into list if it is a prime number
for j in range(i ** 2, n + 1, i): # loops over all multiples of the prime number starting from the sqaure of the prime number
sieve[j] = True # marks the multiple of the prime number as not prime
return primes # returns the list containing prime numbers
def get_factorization(n):
prime_factors = [] # stores the prime factorization of the number
for prime in get_primes(n): # looping over all the prime numbers
while n != 1: # keeps diving the number by a certain prime number until the number is 1
if n % prime == 0: # checks if the number is divisible by a particular prime number
prime_factors.append(prime) # add the prime factor in the list if it divides the number
n /= prime # reducing the number after dividing it by the prime number
else:
break # if the number is not divisible by the paricular prime number then the inner loop breaks and the number is further divided by the next prime number until the number becomes 1
return prime_factors # returns the list containing the prime factorization of the number
if __name__ == "__main__":
n = int(input("Enter a number: "))
print(get_factorization(n))
| 4.21875 | 4 |
pandas/core/indexes/range.py | mujtahidalam/pandas | 2 | 395 | from __future__ import annotations
from datetime import timedelta
import operator
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Callable,
Hashable,
List,
cast,
)
import warnings
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import Dtype
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCTimedeltaIndex
from pandas.core import ops
import pandas.core.common as com
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
)
from pandas.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
from pandas import Index
_empty_range = range(0)
class RangeIndex(NumericIndex):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), range, or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
dtype : np.int64
Unused, accepted for homogeneity with other index types.
copy : bool, default False
Unused, accepted for homogeneity with other index types.
name : object, optional
Name to be stored in the index.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
_can_hold_na = False
_range: range
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
) -> RangeIndex:
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if isinstance(start, RangeIndex):
return start.copy(name=name)
elif isinstance(start, range):
return cls._simple_new(start, name=name)
# validate the arguments
if com.all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be called with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, name=name)
@classmethod
def from_range(
cls, data: range, name=None, dtype: Dtype | None = None
) -> RangeIndex:
"""
Create RangeIndex from a range object.
Returns
-------
RangeIndex
"""
if not isinstance(data, range):
raise TypeError(
f"{cls.__name__}(...) must be called with object coercible to a "
f"range, {repr(data)} was passed"
)
cls._validate_dtype(dtype)
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: range, name: Hashable = None) -> RangeIndex:
result = object.__new__(cls)
assert isinstance(values, range)
result._range = values
result._name = name
result._cache = {}
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _constructor(self) -> type[Int64Index]:
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self) -> np.ndarray:
"""
An int array that for performance reasons is created only when needed.
The constructed array is saved in ``_cache``.
"""
return np.arange(self.start, self.stop, self.step, dtype=np.int64)
@cache_readonly
def _cached_int64index(self) -> Int64Index:
return Int64Index._simple_new(self._data, name=self.name)
@property
def _int64index(self) -> Int64Index:
# wrap _cached_int64index so we can be sure its name matches self.name
res = self._cached_int64index
res._name = self._name
return res
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
rng = self._range
return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
return attrs
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]:
if not len(self._range):
return header
first_val_str = str(self._range[0])
last_val_str = str(self._range[-1])
max_length = max(len(first_val_str), len(last_val_str))
return header + [f"{x:<{max_length}}" for x in self._range]
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
"removed in a future version. Use RangeIndex.{} "
"instead"
)
@property
def start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
"""
# GH 25710
return self._range.start
@property
def _start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``start`` instead.
"""
warnings.warn(
self._deprecation_message.format("_start", "start"),
FutureWarning,
stacklevel=2,
)
return self.start
@property
def stop(self) -> int:
"""
The value of the `stop` parameter.
"""
return self._range.stop
@property
def _stop(self) -> int:
"""
The value of the `stop` parameter.
.. deprecated:: 0.25.0
Use ``stop`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_stop", "stop"),
FutureWarning,
stacklevel=2,
)
return self.stop
@property
def step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
"""
# GH 25710
return self._range.step
@property
def _step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``step`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_step", "step"),
FutureWarning,
stacklevel=2,
)
return self.step
@cache_readonly
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
rng = self._range
return getsizeof(rng) + sum(
getsizeof(getattr(rng, attr_name))
for attr_name in ["start", "stop", "step"]
)
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self) -> np.dtype:
return np.dtype(np.int64)
@property
def is_unique(self) -> bool:
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
return self._range.step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self._range.step < 0 or len(self) <= 1
def __contains__(self, key: Any) -> bool:
hash(key)
try:
key = ensure_python_int(key)
except TypeError:
return False
return key in self._range
@property
def inferred_type(self) -> str:
return "integer"
# --------------------------------------------------------------------
# Indexing Methods
@doc(Int64Index.get_loc)
def get_loc(self, key, method=None, tolerance=None):
if method is None and tolerance is None:
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
try:
return self._range.index(new_key)
except ValueError as err:
raise KeyError(key) from err
raise KeyError(key)
return super().get_loc(key, method=method, tolerance=tolerance)
def _get_indexer(
self,
target: Index,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
# -> np.ndarray[np.intp]
if com.any_not_none(method, tolerance, limit):
return super()._get_indexer(
target, method=method, tolerance=tolerance, limit=limit
)
if self.step > 0:
start, stop, step = self.start, self.stop, self.step
else:
# GH 28678: work on reversed range for simplicity
reverse = self._range[::-1]
start, stop, step = reverse.start, reverse.stop, reverse.step
if not is_signed_integer_dtype(target):
# checks/conversions/roundings are delegated to general method
return super()._get_indexer(target, method=method, tolerance=tolerance)
target_array = np.asarray(target)
locs = target_array - start
valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)
locs[~valid] = -1
locs[valid] = locs[valid] / step
if step != self.step:
# We reversed this range: transform to original locs
locs[valid] = len(self) - 1 - locs[valid]
return ensure_platform_int(locs)
# --------------------------------------------------------------------
def repeat(self, repeats, axis=None) -> Int64Index:
return self._int64index.repeat(repeats, axis=axis)
def delete(self, loc) -> Int64Index: # type: ignore[override]
return self._int64index.delete(loc)
def take(
self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs
) -> Int64Index:
with rewrite_exception("Int64Index", type(self).__name__):
return self._int64index.take(
indices,
axis=axis,
allow_fill=allow_fill,
fill_value=fill_value,
**kwargs,
)
def tolist(self) -> list[int]:
return list(self._range)
@doc(Int64Index.__iter__)
def __iter__(self):
yield from self._range
@doc(Int64Index._shallow_copy)
def _shallow_copy(self, values, name: Hashable = no_default):
name = self.name if name is no_default else name
if values.dtype.kind == "f":
return Float64Index(values, name=name)
return Int64Index._simple_new(values, name=name)
def _view(self: RangeIndex) -> RangeIndex:
result = type(self)._simple_new(self._range, name=self._name)
result._cache = self._cache
return result
@doc(Int64Index.copy)
def copy(
self,
name: Hashable = None,
deep: bool = False,
dtype: Dtype | None = None,
names=None,
):
name = self._validate_names(name=name, names=names, deep=deep)[0]
new_index = self._rename(name=name)
if dtype:
warnings.warn(
"parameter dtype is deprecated and will be removed in a future "
"version. Use the astype method instead.",
FutureWarning,
stacklevel=2,
)
new_index = new_index.astype(dtype)
return new_index
def _minmax(self, meth: str):
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
elif (meth == "min" and self.step > 0) or (meth == "max" and self.step < 0):
return self.start
return self.start + self.step * no_steps
def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return self._minmax("min")
def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return self._minmax("max")
def argsort(self, *args, **kwargs) -> np.ndarray:
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
np.ndarray[np.intp]
See Also
--------
numpy.ndarray.argsort
"""
ascending = kwargs.pop("ascending", True) # EA compat
nv.validate_argsort(args, kwargs)
if self._range.step > 0:
result = np.arange(len(self), dtype=np.intp)
else:
result = np.arange(len(self) - 1, -1, -1, dtype=np.intp)
if not ascending:
result = result[::-1]
return result
def factorize(
self, sort: bool = False, na_sentinel: int | None = -1
) -> tuple[np.ndarray, RangeIndex]:
codes = np.arange(len(self), dtype=np.intp)
uniques = self
if sort and self.step < 0:
codes = codes[::-1]
uniques = uniques[::-1]
return codes, uniques
def equals(self, other: object) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
return self._range == other._range
return super().equals(other)
# --------------------------------------------------------------------
# Set Operations
def _intersection(self, other: Index, sort=False):
if not isinstance(other, RangeIndex):
# Int64Index
return super()._intersection(other, sort=sort)
if not len(self) or not len(other):
return self._simple_new(_empty_range)
first = self._range[::-1] if self.step < 0 else self._range
second = other._range[::-1] if other.step < 0 else other._range
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(first.start, second.start)
int_high = min(first.stop, second.stop)
if int_high <= int_low:
return self._simple_new(_empty_range)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, _ = self._extended_gcd(first.step, second.step)
# check whether element sets intersect
if (first.start - second.start) % gcd:
return self._simple_new(_empty_range)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = first.start + (second.start - first.start) * first.step // gcd * s
new_step = first.step * second.step // gcd
new_range = range(tmp_start, int_high, new_step)
new_index = self._simple_new(new_range)
# adjust index to limiting interval
new_start = new_index._min_fitting_element(int_low)
new_range = range(new_start, new_index.stop, new_index.step)
new_index = self._simple_new(new_range)
if (self.step < 0 and other.step < 0) is not (new_index.step < 0):
new_index = new_index[::-1]
if sort is None:
new_index = new_index.sort_values()
return new_index
def _min_fitting_element(self, lower_limit: int) -> int:
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self.start) // abs(self.step))
return self.start + abs(self.step) * no_steps
def _max_fitting_element(self, upper_limit: int) -> int:
"""Returns the largest element smaller than or equal to the limit"""
no_steps = (upper_limit - self.start) // abs(self.step)
return self.start + abs(self.step) * no_steps
def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]:
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
def _union(self, other: Index, sort):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort resulting index. ``sort=None`` returns a
monotonically increasing ``RangeIndex`` if possible or a sorted
``Int64Index`` if not. ``sort=False`` always returns an
unsorted ``Int64Index``
.. versionadded:: 0.25.0
Returns
-------
union : Index
"""
if isinstance(other, RangeIndex) and sort is None:
start_s, step_s = self.start, self.step
end_s = self.start + self.step * (len(self) - 1)
start_o, step_o = other.start, other.step
end_o = other.start + other.step * (len(other) - 1)
if self.step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other.step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self.start - other.start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
start_r = min(start_s, start_o)
end_r = max(end_s, end_o)
if step_o == step_s:
if (
(start_s - start_o) % step_s == 0
and (start_s - end_o) <= step_s
and (start_o - end_s) <= step_s
):
return type(self)(start_r, end_r + step_s, step_s)
if (
(step_s % 2 == 0)
and (abs(start_s - start_o) <= step_s / 2)
and (abs(end_s - end_o) <= step_s / 2)
):
return type(self)(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if (
(start_o - start_s) % step_s == 0
and (start_o + step_s >= start_s)
and (end_o - step_s <= end_s)
):
return type(self)(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if (
(start_s - start_o) % step_o == 0
and (start_s + step_o >= start_o)
and (end_s - step_o <= end_o)
):
return type(self)(start_r, end_r + step_o, step_o)
return self._int64index._union(other, sort=sort)
def _difference(self, other, sort=None):
# optimized set operation if we have another RangeIndex
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
if not isinstance(other, RangeIndex):
return super()._difference(other, sort=sort)
res_name = ops.get_op_result_name(self, other)
first = self._range[::-1] if self.step < 0 else self._range
overlap = self.intersection(other)
if overlap.step < 0:
overlap = overlap[::-1]
if len(overlap) == 0:
return self.rename(name=res_name)
if len(overlap) == len(self):
return self[:0].rename(res_name)
if not isinstance(overlap, RangeIndex):
# We won't end up with RangeIndex, so fall back
return super()._difference(other, sort=sort)
if overlap.step != first.step:
# In some cases we might be able to get a RangeIndex back,
# but not worth the effort.
return super()._difference(other, sort=sort)
if overlap[0] == first.start:
# The difference is everything after the intersection
new_rng = range(overlap[-1] + first.step, first.stop, first.step)
elif overlap[-1] == first[-1]:
# The difference is everything before the intersection
new_rng = range(first.start, overlap[0], first.step)
else:
# The difference is not range-like
return super()._difference(other, sort=sort)
new_index = type(self)._simple_new(new_rng, name=res_name)
if first is not self._range:
new_index = new_index[::-1]
return new_index
def symmetric_difference(self, other, result_name: Hashable = None, sort=None):
if not isinstance(other, RangeIndex) or sort is not None:
return super().symmetric_difference(other, result_name, sort)
left = self.difference(other)
right = other.difference(self)
result = left.union(right)
if result_name is not None:
result = result.rename(result_name)
return result
# --------------------------------------------------------------------
def _concat(self, indexes: list[Index], name: Hashable) -> Index:
"""
Overriding parent method for the case of all RangeIndex instances.
When all members of "indexes" are of type RangeIndex: result will be
RangeIndex if possible, Int64Index otherwise. E.g.:
indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])
"""
if not all(isinstance(x, RangeIndex) for x in indexes):
return super()._concat(indexes, name)
elif len(indexes) == 1:
return indexes[0]
rng_indexes = cast(List[RangeIndex], indexes)
start = step = next_ = None
# Filter the empty indexes
non_empty_indexes = [obj for obj in rng_indexes if len(obj)]
for obj in non_empty_indexes:
rng = obj._range
if start is None:
# This is set by the first non-empty index
start = rng.start
if step is None and len(rng) > 1:
step = rng.step
elif step is None:
# First non-empty index had only one element
if rng.start == start:
values = np.concatenate([x._values for x in rng_indexes])
result = Int64Index(values)
return result.rename(name)
step = rng.start - start
non_consecutive = (step != rng.step and len(rng) > 1) or (
next_ is not None and rng.start != next_
)
if non_consecutive:
result = Int64Index(np.concatenate([x._values for x in rng_indexes]))
return result.rename(name)
if step is not None:
next_ = rng[-1] + step
if non_empty_indexes:
# Get the stop value from "next" or alternatively
# from the last non-empty index
stop = non_empty_indexes[-1].stop if next_ is None else next_
return RangeIndex(start, stop, step).rename(name)
# Here all "indexes" had 0 length, i.e. were empty.
# In this case return an empty range index.
return RangeIndex(0, 0).rename(name)
def __len__(self) -> int:
"""
return the length of the RangeIndex
"""
return len(self._range)
@property
def size(self) -> int:
return len(self)
def __getitem__(self, key):
"""
Conserve RangeIndex type for scalar and slice keys.
"""
if isinstance(key, slice):
new_range = self._range[key]
return self._simple_new(new_range, name=self._name)
elif is_integer(key):
new_key = int(key)
try:
return self._range[new_key]
except IndexError as err:
raise IndexError(
f"index {key} is out of bounds for axis 0 with size {len(self)}"
) from err
elif is_scalar(key):
raise IndexError(
"only integers, slices (`:`), "
"ellipsis (`...`), numpy.newaxis (`None`) "
"and integer or boolean "
"arrays are valid indices"
)
# fall back to Int64Index
return super().__getitem__(key)
def _getitem_slice(self: RangeIndex, slobj: slice) -> RangeIndex:
"""
Fastpath for __getitem__ when we know we have a slice.
"""
res = self._range[slobj]
return type(self)._simple_new(res, name=self._name)
@unpack_zerodim_and_defer("__floordiv__")
def __floordiv__(self, other):
if is_integer(other) and other != 0:
if len(self) == 0 or self.start % other == 0 and self.step % other == 0:
start = self.start // other
step = self.step // other
stop = start + len(self) * step
new_range = range(start, stop, step or 1)
return self._simple_new(new_range, name=self.name)
if len(self) == 1:
start = self.start // other
new_range = range(start, start + 1, 1)
return self._simple_new(new_range, name=self.name)
return self._int64index // other
# --------------------------------------------------------------------
# Reductions
def all(self, *args, **kwargs) -> bool:
return 0 not in self._range
def any(self, *args, **kwargs) -> bool:
return any(self._range)
# --------------------------------------------------------------------
def _cmp_method(self, other, op):
if isinstance(other, RangeIndex) and self._range == other._range:
# Both are immutable so if ._range attr. are equal, shortcut is possible
return super()._cmp_method(self, op)
return super()._cmp_method(other, op)
def _arith_method(self, other, op):
"""
Parameters
----------
other : Any
op : callable that accepts 2 params
perform the binary op
"""
if isinstance(other, ABCTimedeltaIndex):
# Defer to TimedeltaIndex implementation
return NotImplemented
elif isinstance(other, (timedelta, np.timedelta64)):
# GH#19333 is_integer evaluated True on timedelta64,
# so we need to catch these explicitly
return op(self._int64index, other)
elif is_timedelta64_dtype(other):
# Must be an np.ndarray; GH#22390
return op(self._int64index, other)
if op in [
operator.pow,
ops.rpow,
operator.mod,
ops.rmod,
ops.rfloordiv,
divmod,
ops.rdivmod,
]:
return op(self._int64index, other)
step: Callable | None = None
if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]:
step = op
# TODO: if other is a RangeIndex we may have more efficient options
other = extract_array(other, extract_numpy=True, extract_range=True)
attrs = self._get_attributes_dict()
left, right = self, other
try:
# apply if we have an override
if step:
with np.errstate(all="ignore"):
rstep = step(left.step, right)
# we don't have a representable op
# so return a base index
if not is_integer(rstep) or not rstep:
raise ValueError
else:
rstep = left.step
with np.errstate(all="ignore"):
rstart = op(left.start, right)
rstop = op(left.stop, right)
result = type(self)(rstart, rstop, rstep, **attrs)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
# as a Float64Index if we have float-like descriptors
if not all(is_integer(x) for x in [rstart, rstop, rstep]):
result = result.astype("float64")
return result
except (ValueError, TypeError, ZeroDivisionError):
# Defer to Int64Index implementation
return op(self._int64index, other)
# TODO: Do attrs get handled reliably?
| 2 | 2 |
model.py | Hasanweight/pytorch-chatbot-master | 0 | 396 | import torch
import torch.nn as nn
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.l1 = nn.Linear(input_size, hidden_size)
self.l2 = nn.Linear(hidden_size, hidden_size)
self.l3 = nn.Linear(hidden_size, hidden_size)
self.l4 = nn.Linear(hidden_size, num_classes)
self.relu = nn.ReLU()
def forward(self, x):
out = self.l1(x)
out = self.relu(out)
out = self.l2(out)
out = self.relu(out)
out = self.l3(out)
out = self.relu(out)
out = self.l4(out)
# no activation and no softmax at the end
return out | 3.484375 | 3 |
jwql/utils/logging_functions.py | hover2pi/jwql | 0 | 397 |
""" Logging functions for the ``jwql`` automation platform.
This module provides decorators to log the execution of modules. Log
files are written to the ``logs/`` directory in the ``jwql`` central
storage area, named by module name and timestamp, e.g.
``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log``
Authors
-------
- <NAME> 2018
- <NAME>, 2013 (WFC3 QL Version)
Use
---
To log the execution of a module, use:
::
import os
import logging
from jwql.logging.logging_functions import configure_logging
from jwql.logging.logging_functions import log_info
from jwql.logging.logging_functions import log_fail
@log_info
@log_fail
def my_main_function():
pass
if __name__ == '__main__':
module = os.path.basename(__file__).replace('.py', '')
configure_logging(module)
my_main_function()
Dependencies
------------
The user must have a configuration file named ``config.json``
placed in the ``utils`` directory.
References
----------
This code is adopted and updated from python routine
``logging_functions.py`` written by Alex Viana, 2013 for the WFC3
Quicklook automation platform.
"""
import datetime
import getpass
import importlib
import logging
import os
import pwd
import socket
import sys
import time
import traceback
from functools import wraps
from jwql.utils.permissions import set_permissions
from jwql.utils.utils import get_config, ensure_dir_exists
LOG_FILE_LOC = ''
PRODUCTION_BOOL = ''
def configure_logging(module, production_mode=True, path='./'):
"""Configure the log file with a standard logging format.
Parameters
----------
module : str
The name of the module being logged.
production_mode : bool
Whether or not the output should be written to the production
environement.
path : str
Where to write the log if user-supplied path; default to working dir.
"""
# Determine log file location
if production_mode:
log_file = make_log_file(module)
else:
log_file = make_log_file(module, production_mode=False, path=path)
global LOG_FILE_LOC
global PRODUCTION_BOOL
LOG_FILE_LOC = log_file
PRODUCTION_BOOL = production_mode
# Create the log file and set the permissions
logging.basicConfig(filename=log_file,
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S %p',
level=logging.INFO)
set_permissions(log_file)
def make_log_file(module, production_mode=True, path='./'):
"""Create the log file name based on the module name.
The name of the ``log_file`` is a combination of the name of the
module being logged and the current datetime.
Parameters
----------
module : str
The name of the module being logged.
production_mode : bool
Whether or not the output should be written to the production
environment.
path : str
Where to write the log if user-supplied path; default to
working dir.
Returns
-------
log_file : str
The full path to where the log file will be written to.
"""
timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')
filename = '{0}_{1}.log'.format(module, timestamp)
user = pwd.getpwuid(os.getuid()).pw_name
settings = get_config()
admin_account = settings['admin_account']
log_path = settings['log_dir']
exempt_modules = []
if user != admin_account and module not in exempt_modules and production_mode:
module = os.path.join('dev', module)
if production_mode:
log_file = os.path.join(log_path, module, filename)
else:
log_file = os.path.join(path, filename)
ensure_dir_exists(os.path.dirname(log_file))
return log_file
def log_info(func):
"""Decorator to log useful system information.
This function can be used as a decorator to log user environment
and system information. Future packages we want to track can be
added or removed as necessary.
Parameters
----------
func : func
The function to decorate.
Returns
-------
wrapped : func
The wrapped function.
"""
@wraps(func)
def wrapped(*a, **kw):
# Log environment information
logging.info('User: ' + getpass.getuser())
logging.info('System: ' + socket.gethostname())
logging.info('Python Version: ' + sys.version.replace('\n', ''))
logging.info('Python Executable Path: ' + sys.executable)
# Read in setup.py file to build list of required modules
settings = get_config()
setup_file_name = settings['setup_file']
with open(setup_file_name) as setup:
for line in setup:
if line[0:8] == "REQUIRES":
module_required = line[12:-2]
module_list = module_required.split(',')
# Clean up the module list
module_list = [module.replace('"', '').replace("'", '').replace(' ', '') for module in module_list]
module_list = [module.split('=')[0] for module in module_list]
# Log common module version information
for module in module_list:
try:
mod = importlib.import_module(module)
logging.info(module + ' Version: ' + mod.__version__)
logging.info(module + ' Path: ' + mod.__path__[0])
except ImportError as err:
logging.warning(err)
# Call the function and time it
t1_cpu = time.clock()
t1_time = time.time()
func(*a, **kw)
t2_cpu = time.clock()
t2_time = time.time()
# Log execution time
hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60)
minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60)
hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60)
minutes_time, seconds_time = divmod(remainder_time, 60)
logging.info('Elapsed Real Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time, seconds_time))
logging.info('Elapsed CPU Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu, seconds_cpu))
return wrapped
def log_fail(func):
"""Decorator to log crashes in the decorated code.
Parameters
----------
func : func
The function to decorate.
Returns
-------
wrapped : func
The wrapped function.
"""
@wraps(func)
def wrapped(*a, **kw):
try:
# Run the function
func(*a, **kw)
logging.info('Completed Successfully')
except Exception:
logging.critical(traceback.format_exc())
logging.critical('CRASHED')
return wrapped
| 2.84375 | 3 |
api/services/http.py | takos22/API-1 | 0 | 398 | from aiohttp import ClientSession
from typing import Optional
session: Optional[ClientSession] = None
__all__ = (session,)
| 1.476563 | 1 |
bcloud-snap/bcloud-3.9.1/bcloud/hasher.py | jiaxiaolei/my_snap_demo | 0 | 399 | <gh_stars>0
# Copyright (C) 2014-2015 LiuLang <<EMAIL>>
# Use of this source code is governed by GPLv3 license that can be found
# in http://www.gnu.org/licenses/gpl-3.0.html
import hashlib
import os
import zlib
CHUNK = 2 ** 20
def crc(path):
_crc = 0
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_crc = zlib.crc32(chunk, _crc)
fh.close()
return '%X' % (_crc & 0xFFFFFFFF)
def md5(path, start=0, stop=-1):
_md5 = hashlib.md5()
fh = open(path, 'rb')
if start > 0:
fh.seek(start)
if stop == -1:
stop = os.path.getsize(path)
pos = start
while pos < stop:
size = min(CHUNK, stop - pos)
chunk = fh.read(size)
if not chunk:
break
pos += len(chunk)
_md5.update(chunk)
fh.close()
return _md5.hexdigest()
def sha1(path):
_sha1 = hashlib.sha1()
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_sha1.update(chunk)
fh.close()
return _sha1.hexdigest()
def sha224(path):
_sha224 = hashlib.sha224()
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_sha224.update(chunk)
fh.close()
return _sha224.hexdigest()
def sha256(path):
_sha256 = hashlib.sha256()
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_sha256.update(chunk)
fh.close()
return _sha256.hexdigest()
def sha384(path):
_sha384 = hashlib.sha384()
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_sha384.update(chunk)
fh.close()
return _sha384.hexdigest()
def sha512(path):
_sha512 = hashlib.sha512()
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_sha512.update(chunk)
fh.close()
return _sha512.hexdigest()
| 2.0625 | 2 |