max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
Chapter__7/unittest/test_cap.py | nil1729/python__noob | 0 | 12799951 | import unittest
import cap
class TestCap(unittest.TestCase):
def test_single_word(self):
text = 'python'
result = cap.cap_text(text)
self.assertEquals(result, 'Python')
def test_multiple_word(self):
text = 'python django'
result = cap.cap_text(text)
self.assertEquals(result, 'Python Django')
if __name__ == '__main__':
unittest.main() | 3.21875 | 3 |
top_secret/_vault.py | trym-inc/top-secret | 0 | 12799952 | from typing import List, Dict, Callable
from .cast_handlers import bool_cast_handler
from .exceptions import CastHandlerMissingError
from .exceptions import SecretMissingError
from .exceptions import SecretSourceMissing
from .secret_sources import BaseSecretSource
from .secret_sources import EnvironmentVariableSecretSource
class NoDefault:
pass
DEFAULT_SECRET_SOURCES = [
EnvironmentVariableSecretSource()
]
DEFAULT_CAST_HANDLERS = {
bool: bool_cast_handler,
}
class Vault:
_cache = {}
cast_handlers: 'Dict[Callable]' = {}
secret_sources: 'List[BaseSecretSource]' = []
preprocessors: 'List[Callable[str, str]]' = []
def __init__(self, secret_sources=None, cast_handlers=None, preprocessors=None):
if cast_handlers is None:
cast_handlers = {}
if secret_sources is None:
secret_sources = []
if preprocessors is None:
preprocessors = []
self.default_secret_sources = secret_sources
self.default_cast_handlers = cast_handlers
self.default_preprocessors = preprocessors
self.reset()
def add_secret_source(self, source: 'BaseSecretSource'):
if source in self.secret_sources:
return
self.secret_sources.append(source)
def clear_secret_sources(self):
self.secret_sources = []
def reset_secret_sources(self):
self.secret_sources = list(self.default_secret_sources)
def add_cast_handler(self, handler_key, handler):
self.cast_handlers[handler_key] = handler
def clear_cast_handlers(self):
self.cast_handlers = {}
def reset_cast_handlers(self):
self.cast_handlers = {**self.default_cast_handlers}
def add_preprocessor(self, fn):
self.preprocessors.append(fn)
def clear_preprocessors(self):
self.preprocessors = []
def reset_preprocessors(self):
self.preprocessors = list(self.default_preprocessors)
def clear_cache(self):
self._cache = {}
def reset(self):
self.reset_secret_sources()
self.reset_cast_handlers()
self.reset_preprocessors()
self.clear_cache()
def get(
self,
name,
default=NoDefault,
*,
source=None,
preprocessors=None,
cast_to=None,
no_cache=False,
cache_result=True
):
if no_cache is False and name in self._cache:
return self._cache[name]
value = self._get_from_source(name, default, source)
value = self._preprocess(value, preprocessors)
value = self._cast_to(value, cast_to, default)
if cache_result:
self._cache[name] = value
return value
def _get_from_source(self, name, default, source):
if source is not None:
return source.get(name)
if not self.secret_sources:
raise SecretSourceMissing
for source in self.secret_sources:
try:
value = source.get(name)
break
except SecretMissingError:
pass
else:
if default is NoDefault:
raise SecretMissingError(name)
else:
value = default
return value
def _preprocess(self, value, preprocessors):
if preprocessors is None:
preprocessors = self.preprocessors
else:
preprocessors = preprocessors
for preprocessor in preprocessors:
value = preprocessor(value)
return value
def _cast_to(self, value, cast_to, default):
if value is default:
return value
if cast_to is not None:
handler = self.cast_handlers.get(cast_to, cast_to)
if not callable(handler):
raise CastHandlerMissingError(
f'Cast handler: {handler!r}, is not registered.'
)
value = handler(value)
return value
vault = Vault(DEFAULT_SECRET_SOURCES, DEFAULT_CAST_HANDLERS)
| 2.21875 | 2 |
pycode/demo_km.py | Skielex/InSegt | 6 | 12799953 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Demo showing how km_dict and insegtannotator may be used together for
interactive segmentation.
@author: vand and abda
"""
import sys
import insegtannotator
import skimage.io
import skimage.data
import km_dict
import numpy as np
#%% EXAMPLE 1: glass fibres
## loading image
print('Loading image')
filename = '../data/glass.png'
image = skimage.io.imread(filename)
#%% EXAMPLE 2: nerve fibres
## loading image
print('Loading image')
filename = '../data/nerve_im_scale.png'
image = skimage.io.imread(filename)
#%% COMMON PART
patch_size = 11
branching_factor = 5
number_layers = 5
number_training_patches = 35000
normalization = False
image_float = image.astype(np.float)/255
# Build tree
T = km_dict.build_km_tree(image_float, patch_size, branching_factor, number_training_patches, number_layers, normalization)
# Search km-tree and get assignment
A, number_nodes = km_dict.search_km_tree(image_float, T, branching_factor, normalization)
# number of repetitions for updating the segmentation
number_repetitions = 2
def processing_function(labels):
r,c = labels.shape
l = np.max(labels)+1
label_image = np.zeros((r,c,l))
for k in range(number_repetitions):
for i in range(1,l):
label_image[:,:,i] = (labels == i).astype(float)
D = km_dict.improb_to_dictprob(A, label_image, number_nodes, patch_size) # Dictionary
P = km_dict.dictprob_to_improb(A, D, patch_size) # Probability map
labels = np.argmax(P,axis=2) # Segmentation
return labels
print('Showtime')
# showtime
app = insegtannotator.PyQt5.QtWidgets.QApplication([])
ex = insegtannotator.InSegtAnnotator(image, processing_function)
app.exec()
sys.exit()
| 3.109375 | 3 |
src/eltetrado/analysis.py | tzok/el_tetrado | 0 | 12799954 | <gh_stars>0
import itertools
import logging
import math
import os
import string
import subprocess
import tempfile
from collections import defaultdict, Counter
from dataclasses import dataclass, field
from typing import Dict, Iterable, List, Tuple, Optional, Set
import numpy
from eltetrado.model import Atom3D, Structure3D, Structure2D, BasePair3D, Residue3D, GlycosidicBond, ONZ, \
GbaTetradClassification, Ion, Direction, LoopType, ONZM, GbaQuadruplexClassification, LoopClassification
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
@dataclass(order=True)
class Tetrad:
@staticmethod
def is_valid(nt1: Residue3D, nt2: Residue3D, nt3: Residue3D, nt4: Residue3D,
pair_dictionary: Dict[Tuple[Residue3D, Residue3D], BasePair3D]) -> bool:
lw1 = pair_dictionary[(nt1, nt2)].lw
lw2 = pair_dictionary[(nt2, nt3)].lw
lw3 = pair_dictionary[(nt3, nt4)].lw
lw4 = pair_dictionary[(nt4, nt1)].lw
for lw_i, lw_j in ((lw1, lw4), (lw2, lw1), (lw3, lw2), (lw4, lw3)):
if lw_i.name[1] == lw_j.name[2]:
return False
return True
nt1: Residue3D
nt2: Residue3D
nt3: Residue3D
nt4: Residue3D
pair_12: BasePair3D
pair_23: BasePair3D
pair_34: BasePair3D
pair_41: BasePair3D
onz: ONZ = field(init=False)
gba_class: Optional[GbaTetradClassification] = field(init=False)
planarity_deviation: float = field(init=False)
ions_channel: List[Atom3D] = field(default_factory=list)
ions_outside: Dict[Residue3D, List[Atom3D]] = field(default_factory=dict)
def __post_init__(self):
self.reorder_to_match_5p_3p()
self.planarity_deviation = self.__calculate_planarity_deviation()
def reorder_to_match_5p_3p(self):
# transform into (0, 1, 2, 3)
ni, nj, nk, nl = map(lambda nt: nt.index, self.nucleotides)
indices = sorted((ni, nj, nk, nl))
ni, nj, nk, nl = (indices.index(x) for x in (ni, nj, nk, nl))
nmin = min(ni, nj, nk, nl)
if nmin == ni:
pass
elif nmin == nj:
self.nt1, self.nt2, self.nt3, self.nt4 = self.nt2, self.nt3, self.nt4, self.nt1
self.pair_12, self.pair_23, self.pair_34, self.pair_41 = self.pair_23, self.pair_34, self.pair_41, self.pair_12
elif nmin == nk:
self.nt1, self.nt2, self.nt3, self.nt4 = self.nt3, self.nt4, self.nt1, self.nt2
self.pair_12, self.pair_23, self.pair_34, self.pair_41 = self.pair_34, self.pair_41, self.pair_12, self.pair_23
else:
self.nt1, self.nt2, self.nt3, self.nt4 = self.nt4, self.nt1, self.nt2, self.nt3
self.pair_12, self.pair_23, self.pair_34, self.pair_41 = self.pair_41, self.pair_12, self.pair_23, self.pair_34
# flip order if necessary
if self.pair_12.score() > self.pair_41.reverse().score():
self.nt1, self.nt2, self.nt3, self.nt4 = self.nt1, self.nt4, self.nt3, self.nt2
self.pair_12, self.pair_23, self.pair_34, self.pair_41 = self.pair_41.reverse(), self.pair_34.reverse(), self.pair_23.reverse(), self.pair_12.reverse()
# ONZ and da Silva's classification are valid in 5'-3' order
self.onz = self.__classify_onz()
self.gba_class = self.__classify_by_gba()
def reorder_to_match_other_tetrad(self, order: Tuple[Residue3D, Residue3D, Residue3D, Residue3D]):
if order == (self.nt1, self.nt2, self.nt3, self.nt4):
pass
elif order == (self.nt2, self.nt3, self.nt4, self.nt1):
self.pair_12, self.pair_23, self.pair_34, self.pair_41 = self.pair_23, self.pair_34, self.pair_41, self.pair_12
elif order == (self.nt3, self.nt4, self.nt1, self.nt2):
self.pair_12, self.pair_23, self.pair_34, self.pair_41 = self.pair_34, self.pair_41, self.pair_12, self.pair_23
elif order == (self.nt4, self.nt1, self.nt2, self.nt3):
self.pair_12, self.pair_23, self.pair_34, self.pair_41 = self.pair_41, self.pair_12, self.pair_23, self.pair_34
elif order == (self.nt4, self.nt3, self.nt2, self.nt1):
self.pair_12, self.pair_23, self.pair_34, self.pair_41 = self.pair_34.reverse(), self.pair_23.reverse(), self.pair_12.reverse(), self.pair_41.reverse()
elif order == (self.nt3, self.nt2, self.nt1, self.nt4):
self.pair_12, self.pair_23, self.pair_34, self.pair_41 = self.pair_23.reverse(), self.pair_12.reverse(), self.pair_41.reverse(), self.pair_34.reverse()
elif order == (self.nt2, self.nt1, self.nt4, self.nt3):
self.pair_12, self.pair_23, self.pair_34, self.pair_41 = self.pair_12.reverse(), self.pair_41.reverse(), self.pair_34.reverse(), self.pair_23.reverse()
elif order == (self.nt1, self.nt4, self.nt3, self.nt2):
self.pair_12, self.pair_23, self.pair_34, self.pair_41 = self.pair_41.reverse(), self.pair_34.reverse(), self.pair_23.reverse(), self.pair_12.reverse()
else:
raise RuntimeError(f'Cannot apply order: {order}')
self.nt1, self.nt2, self.nt3, self.nt4 = order
def __classify_onz(self) -> ONZ:
# transform into (0, 1, 2, 3)
ni, nj, nk, nl = (nt.index for nt in self.nucleotides)
indices = sorted((ni, nj, nk, nl))
ni, nj, nk, nl = (indices.index(x) for x in (ni, nj, nk, nl))
while ni != 0:
ni, nj, nk, nl = nl, ni, nj, nk
order = (nj, nk, nl)
if order == (1, 2, 3):
return ONZ.O_PLUS
elif order == (3, 2, 1):
return ONZ.O_MINUS
elif order == (1, 3, 2):
return ONZ.N_PLUS
elif order == (2, 3, 1):
return ONZ.N_MINUS
elif order == (2, 1, 3):
return ONZ.Z_PLUS
elif order == (3, 1, 2):
return ONZ.Z_MINUS
raise RuntimeError(f'Impossible combination: {ni} {nj} {nk} {nl}')
def __classify_by_gba(self) -> Optional[GbaTetradClassification]:
"""
See: <NAME>. (2007). Geometric Formalism for DNA Quadruplex Folding.
Chemistry - A European Journal, 13(35), 9738–9745. https://doi.org/10.1002/chem.200701255
:return: Classification according to Webba da Silva or n/a
"""
# without all nucleotides having a valid syn/anti, this classification is impossible
if not all([nt.chi_class in (GlycosidicBond.syn, GlycosidicBond.anti) for nt in self.nucleotides]):
return None
# this will create a 4-letter string made of 's' for syn or 'a' for anti
fingerprint = ''.join([nt.chi_class.value[0] for nt in self.nucleotides])
# this dict has all classes mapped to fingerprints
gba_classes = {
'aass': GbaTetradClassification.Ia,
'ssaa': GbaTetradClassification.Ib,
'asas': GbaTetradClassification.IIa,
'sasa': GbaTetradClassification.IIb,
'asaa': GbaTetradClassification.IIIa,
'sass': GbaTetradClassification.IIIb,
'aaas': GbaTetradClassification.IVa,
'sssa': GbaTetradClassification.IVb,
'aasa': GbaTetradClassification.Va,
'ssas': GbaTetradClassification.Vb,
'assa': GbaTetradClassification.VIa,
'saas': GbaTetradClassification.VIb,
'asss': GbaTetradClassification.VIIa,
'saaa': GbaTetradClassification.VIIb,
'aaaa': GbaTetradClassification.VIIIa,
'ssss': GbaTetradClassification.VIIIb
}
if fingerprint not in gba_classes:
logging.error(f'Impossible combination of syn/anti: {[nt.chi_class for nt in self.nucleotides]}')
return None
return gba_classes[fingerprint]
def __calculate_planarity_deviation(self) -> float:
outer = [nt.outermost_atom for nt in self.nucleotides]
inner = [nt.innermost_atom for nt in self.nucleotides]
return numpy.linalg.norm(center_of_mass(outer) - center_of_mass(inner))
@property
def nucleotides(self) -> Tuple[Residue3D, Residue3D, Residue3D, Residue3D]:
return self.nt1, self.nt2, self.nt3, self.nt4
def __hash__(self):
return hash(frozenset([self.nt1, self.nt2, self.nt3, self.nt4]))
def __str__(self):
return f' ' \
f'{self.nt1.full_name} {self.nt2.full_name} {self.nt3.full_name} {self.nt4.full_name} ' \
f'{self.pair_12.lw.value} {self.pair_23.lw.value} {self.pair_34.lw.value} {self.pair_41.lw.value} ' \
f'{self.onz.value} {self.gba_class.value} ' \
f'planarity={round(self.planarity_deviation, 2)} ' \
f'{self.__ions_channel_str()} ' \
f'{self.__ions_outside_str()}\n'
def chains(self) -> Set[str]:
return set([nt.chain for nt in self.nucleotides])
def is_disjoint(self, other) -> bool:
return frozenset(self.nucleotides).isdisjoint(frozenset(other.nucleotides))
def center(self) -> numpy.ndarray:
return center_of_mass(self.outer_and_inner_atoms())
def outer_and_inner_atoms(self) -> List[Atom3D]:
return list(map(lambda residue: residue.outermost_atom, self.nucleotides)) + \
list(map(lambda residue: residue.innermost_atom, self.nucleotides))
def __ions_channel_str(self) -> str:
if self.ions_channel:
return 'ions_channel=' + ','.join([atom.atomName for atom in self.ions_channel])
return ''
def __ions_outside_str(self) -> str:
if self.ions_outside:
result = []
for residue, ions in self.ions_outside.items():
result.append(f'{residue.full_name}: [{",".join([ion.atomName for ion in ions])}]')
return 'ions_outside=' + ' '.join(result)
return ''
@dataclass
class TetradPair:
tetrad1: Tetrad
tetrad2: Tetrad
stacked: Dict[Residue3D, Residue3D]
tetrad2_nts_best_order: Tuple[Residue3D, Residue3D, Residue3D, Residue3D] = field(init=False)
direction: Direction = field(init=False)
rise: float = field(init=False)
twist: float = field(init=False)
def __post_init__(self):
self.tetrad2_nts_best_order = (
self.stacked[self.tetrad1.nt1], self.stacked[self.tetrad1.nt2],
self.stacked[self.tetrad1.nt3], self.stacked[self.tetrad1.nt4]
)
self.direction = self.__determine_direction()
self.rise = self.__calculate_rise()
self.twist = self.__calculate_twist()
def __determine_direction(self) -> Direction:
indices1 = list(map(lambda nt: nt.index, self.tetrad1.nucleotides))
indices2 = list(map(lambda nt: nt.index, self.tetrad2_nts_best_order))
# count directions 5' -> 3' as +1 or -1
counter = Counter(1 if j - i > 0 else -1 for i, j in zip(indices1, indices2))
direction, count = counter.most_common()[0]
if count == 4:
# all in the same direction
return Direction.parallel
elif count == 2:
# two in +, one in - direction
return Direction.antiparallel
return Direction.hybrid
def __calculate_rise(self) -> float:
t1 = self.tetrad1.outer_and_inner_atoms()
t2 = self.tetrad2.outer_and_inner_atoms()
return numpy.linalg.norm(center_of_mass(t1) - center_of_mass(t2))
def __calculate_twist(self) -> float:
nt1_1, nt1_2, _, _ = self.tetrad1.nucleotides
nt2_1, nt2_2, _, _ = self.tetrad2_nts_best_order
v1 = nt1_1.find_atom("C1'").coordinates() - nt1_2.find_atom("C1'").coordinates()
v1 = v1 / numpy.linalg.norm(v1)
v2 = nt2_1.find_atom("C1'").coordinates() - nt2_2.find_atom("C1'").coordinates()
v2 = v2 / numpy.linalg.norm(v2)
return math.degrees(numpy.arccos(numpy.clip(numpy.dot(v1, v2), -1.0, 1.0)))
def __str__(self):
return f' direction={self.direction.value} rise={round(self.rise, 2)} twist={round(self.twist, 2)}\n'
@dataclass
class Tract:
nucleotides: List[Residue3D]
def __str__(self):
return f' {", ".join(map(lambda nt: nt.full_name, self.nucleotides))}'
@dataclass
class Loop:
nucleotides: List[Residue3D]
loop_type: Optional[LoopType]
def __str__(self):
return f' {self.loop_type.value if self.loop_type else "n/a"} ' \
f'{", ".join(map(lambda nt: nt.full_name, self.nucleotides))}'
@dataclass
class Quadruplex:
tetrads: List[Tetrad]
tetrad_pairs: List[TetradPair]
structure3d: Structure3D
onzm: Optional[ONZM] = field(init=False)
gba_classes: List[GbaQuadruplexClassification] = field(init=False)
tracts: List[Tract] = field(init=False)
loops: List[Loop] = field(init=False)
loop_class: Optional[LoopClassification] = field(init=False)
def __post_init__(self):
self.onzm = self.__classify_onzm()
self.gba_classes = self.__classify_by_gba()
self.tracts = self.__find_tracts()
self.loops = self.__find_loops()
self.loop_class = self.__classify_by_loops()
def __classify_onzm(self) -> Optional[ONZM]:
if len(self.tetrads) == 1:
return None
if any([t.onz is None for t in self.tetrads]):
return None
counter = Counter([t.onz.value[0] for t in self.tetrads])
onz, support = counter.most_common()[0]
if support != len(self.tetrads):
onz = 'M'
counter = Counter([tp.direction.value[0] for tp in self.tetrad_pairs])
direction, support = counter.most_common()[0]
if support != len(self.tetrad_pairs):
direction = 'h'
counter = Counter([t.onz.value[1] for t in self.tetrads])
plus_minus, support = counter.most_common()[0]
if support != len(self.tetrads):
plus_minus = '*'
return ONZM.from_value(f'{onz}{direction}{plus_minus}')
def __classify_by_gba(self) -> List[GbaQuadruplexClassification]:
gbas = set()
for t in self.tetrads:
gba = t.gba_class
if gba is not None:
gbas.add(gba.value[:-1]) # discard 'a' or 'b' subvariant
roman_numerals = {'I': 1, 'II': 2, 'III': 3, 'IV': 4, 'V': 5, 'VI': 6, 'VII': 7, 'VIII': 8}
gbas = sorted(gbas, key=lambda gba: roman_numerals.get(gba, 100))
return list(map(lambda x: GbaQuadruplexClassification[x], gbas))
def __find_tracts(self) -> List[Tract]:
tracts = [[self.tetrads[0].nt1], [self.tetrads[0].nt2], [self.tetrads[0].nt3], [self.tetrads[0].nt4]]
if len(self.tetrad_pairs) > 0:
for tetrad_pair in self.tetrad_pairs:
nt_dict = {
tetrad_pair.tetrad1.nt1: tetrad_pair.tetrad2_nts_best_order[0],
tetrad_pair.tetrad1.nt2: tetrad_pair.tetrad2_nts_best_order[1],
tetrad_pair.tetrad1.nt3: tetrad_pair.tetrad2_nts_best_order[2],
tetrad_pair.tetrad1.nt4: tetrad_pair.tetrad2_nts_best_order[3],
}
for i in range(4):
tracts[i].append(nt_dict[tracts[i][-1]])
return [Tract(nts) for nts in tracts]
def __find_loops(self) -> List[Loop]:
if len(self.tetrads) == 1:
return []
loops = []
tetrad_nucleotides = sorted([nt for tetrad in self.tetrads for nt in tetrad.nucleotides],
key=lambda nt: nt.index)
for i in range(1, len(tetrad_nucleotides)):
nprev = tetrad_nucleotides[i - 1]
ncur = tetrad_nucleotides[i]
if ncur.index - nprev.index > 1 and ncur.chain == nprev.chain:
for tract in self.tracts:
if nprev in tract.nucleotides and ncur in tract.nucleotides:
break
else:
nts = list(filter(lambda nt: nprev.index < nt.index < ncur.index, self.structure3d.residues))
loop_type = self.__detect_loop_type(nprev, ncur)
loops.append(Loop(nts, loop_type))
return loops
def __detect_loop_type(self, nt_first: Residue3D, nt_last: Residue3D) -> Optional[LoopType]:
tetrad_with_first = self.__find_tetrad_with_nt(nt_first)
tetrad_with_last = self.__find_tetrad_with_nt(nt_last)
if tetrad_with_first is None or tetrad_with_last is None:
logging.warning(f'Failed to classify the loop between {nt_first} and {nt_last}')
return None
if tetrad_with_first == tetrad_with_last:
# diagonal or laterals happen when first and last nt of a loop is in the same tetrad
sign = self.__detect_loop_sign(nt_first, nt_last, tetrad_with_first)
if sign is not None:
return LoopType.from_value(f'lateral{sign}')
return LoopType.diagonal
tract_with_last = self.__find_tract_with_nt(nt_last)
if tract_with_last is not None:
# search along the tract to check what pairs with nt_first
for nt in tract_with_last.nucleotides:
if nt in tetrad_with_first.nucleotides:
sign = self.__detect_loop_sign(nt_first, nt, tetrad_with_first)
if sign is not None:
return LoopType.from_value(f'propeller{sign}')
logging.warning(f'Failed to classify the loop between {nt_first} and {nt_last}')
return None
def __find_tetrad_with_nt(self, nt: Residue3D) -> Optional[Tetrad]:
for tetrad in self.tetrads:
if nt in tetrad.nucleotides:
return tetrad
return None
def __find_tract_with_nt(self, nt: Residue3D) -> Optional[Tract]:
for tract in self.tracts:
if nt in tract.nucleotides:
return tract
return None
def __detect_loop_sign(self, first: Residue3D, last: Residue3D, tetrad: Tetrad) -> Optional[str]:
for pair in [tetrad.pair_12, tetrad.pair_23, tetrad.pair_34, tetrad.pair_41]:
# main check
if pair.nt1 == first and pair.nt2 == last:
if pair.score() < pair.reverse().score():
return '-'
return '+'
# reverse check
if pair.nt1 == last and pair.nt2 == first:
if pair.score() < pair.reverse().score():
return '+'
return '-'
return None
def __classify_by_loops(self) -> Optional[LoopClassification]:
if len(self.loops) != 3 or any([loop.loop_type is None for loop in self.loops]):
return None
loop_classes = {
'ppp': '1',
'ppl': '2',
'plp': '3',
'lpp': '4',
'pdp': '5',
'lll': '6',
'llp': '7',
'lpl': '8',
'pll': '9',
'pdl': '10',
'ldl': '11',
'dpd': '12',
'ldp': '13'
}
fingerprint = ''.join([loop.loop_type.value[0] for loop in self.loops])
if fingerprint not in loop_classes:
logging.error(f'Unknown loop classification: {fingerprint}')
return None
subtype = 'a' if self.loops[0 if fingerprint != 'dpd' else 1].loop_type.value[-1] == '-' else 'b'
return LoopClassification.from_value(f'{loop_classes[fingerprint]}{subtype}')
def __str__(self):
builder = ''
if len(self.tetrads) == 1:
builder += ' single tetrad\n'
builder += str(self.tetrads[0])
else:
builder += f' {self.onzm.value if self.onzm is not None else "R"}'
builder += f' {",".join(map(lambda gba: gba.value, self.gba_classes))}'
if self.loop_class:
builder += f' {self.loop_class.value} {self.loop_class.loop_progression()}'
else:
builder += f' n/a'
builder += f' quadruplex with {len(self.tetrads)} tetrads\n'
builder += str(self.tetrad_pairs[0].tetrad1)
for tetrad_pair in self.tetrad_pairs:
builder += str(tetrad_pair)
builder += str(tetrad_pair.tetrad2)
if self.tracts:
builder += '\n Tracts:\n'
for tract in self.tracts:
builder += f'{tract}\n'
if self.loops:
builder += '\n Loops:\n'
for loop in self.loops:
builder += f'{loop}\n'
builder += '\n'
return builder
@dataclass
class Helix:
tetrads: List[Tetrad]
tetrad_pairs: List[TetradPair]
structure3d: Structure3D
quadruplexes: List[Quadruplex] = field(init=False)
def __post_init__(self):
self.quadruplexes = self.__find_quadruplexes()
def __find_quadruplexes(self):
if len(self.tetrad_pairs) == 0:
return [Quadruplex(self.tetrads, [], self.structure3d)]
quadruplexes = list()
tetrads = list()
for tetrad in [self.tetrad_pairs[0].tetrad1] + [tetrad_pair.tetrad2 for tetrad_pair in self.tetrad_pairs]:
if tetrads:
if tetrad.chains().isdisjoint(tetrads[-1].chains()):
quadruplexes.append(Quadruplex(tetrads, self.__filter_tetrad_pairs(tetrads), self.structure3d))
tetrads = list()
tetrads.append(tetrad)
quadruplexes.append(Quadruplex(tetrads, self.__filter_tetrad_pairs(tetrads), self.structure3d))
return quadruplexes
def __filter_tetrad_pairs(self, tetrads: List[Tetrad]) -> List[TetradPair]:
chains = set()
for tetrad in tetrads:
chains.update(tetrad.chains())
def check_tetrad(t: Tetrad) -> bool:
return not t.chains().isdisjoint(chains)
def check_pair(tp: TetradPair) -> bool:
return check_tetrad(tp.tetrad1) and check_tetrad(tp.tetrad2)
return list(filter(check_pair, self.tetrad_pairs))
def __str__(self):
builder = ''
if len(self.tetrads) > 1:
builder += f'n4-helix with {len(self.tetrads)} tetrads\n'
for quadruplex in self.quadruplexes:
builder += str(quadruplex)
elif len(self.tetrads) == 1:
builder += 'single tetrad without stacking\n'
builder += str(self.tetrads[0])
return builder
@dataclass
class Analysis:
structure2d: Structure2D
structure3d: Structure3D
strict: bool
no_reorder: bool
stacking_mismatch: int
base_pairs: List[BasePair3D] = field(init=False)
base_pair_graph: Dict[Residue3D, List[Residue3D]] = field(init=False)
base_pair_dict: Dict[Tuple[Residue3D, Residue3D], BasePair3D] = field(init=False)
stacking_graph: Dict[Residue3D, List[Residue3D]] = field(init=False)
tetrads: List[Tetrad] = field(init=False)
tetrad_scores: Dict[Tetrad, Dict[Tetrad, Tuple[int, Tuple, Tuple]]] = field(init=False)
tetrad_pairs: List[TetradPair] = field(init=False)
helices: List[Helix] = field(init=False)
ions: List[Atom3D] = field(init=False)
sequence: str = field(init=False)
line1: str = field(init=False)
line2: str = field(init=False)
shifts: Dict[Residue3D, int] = field(init=False)
def __post_init__(self):
self.base_pairs = self.structure3d.base_pairs(self.structure2d)
self.base_pair_graph = self.structure3d.base_pair_graph(self.structure2d, self.strict)
self.base_pair_dict = self.structure3d.base_pair_dict(self.structure2d, self.strict)
self.stacking_graph = self.structure3d.stacking_graph(self.structure2d)
self.tetrads = self.__find_tetrads(self.no_reorder)
self.tetrad_scores = self.__calculate_tetrad_scores()
self.tetrad_pairs = self.__find_tetrad_pairs(self.stacking_mismatch)
self.helices = self.__find_helices()
if not self.no_reorder:
self.__find_best_chain_order()
self.sequence, self.line1, self.line2, self.shifts = self.__generate_twoline_dotbracket()
self.ions = self.__find_ions()
self.__assign_ions_to_tetrads()
def __find_tetrads(self, no_reorder=False) -> List[Tetrad]:
# search for a tetrad: i -> j -> k -> l
# ^--------------^
tetrads = []
for i in self.base_pair_graph:
for j in filter(lambda x: x != i, self.base_pair_graph[i]):
for k in filter(lambda x: x not in (i, j), self.base_pair_graph[j]):
for l in filter(lambda x: x not in (i, j, k) and i in self.base_pair_graph[x],
self.base_pair_graph[k]):
if Tetrad.is_valid(i, j, k, l, self.base_pair_dict):
pair_12 = self.base_pair_dict[(i, j)]
pair_23 = self.base_pair_dict[(j, k)]
pair_34 = self.base_pair_dict[(k, l)]
pair_41 = self.base_pair_dict[(l, i)]
tetrads.append(Tetrad(i, j, k, l, pair_12, pair_23, pair_34, pair_41))
# build graph of tetrads
while tetrads:
graph = defaultdict(list)
for (ti, tj) in itertools.combinations(tetrads, 2):
if not ti.is_disjoint(tj):
graph[ti].append(tj)
graph[tj].append(ti)
# remove tetrad which conflicts the most with others
# in case of a tie, remove one which has the worst planarity deviation
candidates = sorted(tetrads, key=lambda t: (len(graph[t]), t.planarity_deviation),
reverse=True)
if len(graph[candidates[0]]) > 0:
tetrads.remove(candidates[0])
else:
break
return sorted(tetrads, key=lambda t: min(map(lambda nt: nt.index, t.nucleotides)))
def __calculate_tetrad_scores(self) \
-> Dict[Tetrad, Dict[Tetrad, Tuple[int, Tuple, Tuple]]]:
def is_next_by_stacking(nt1: Residue3D, nt2: Residue3D) -> bool:
return nt2 in self.stacking_graph.get(nt1, [])
def is_next_sequentially(nt1: Residue3D, nt2: Residue3D) -> bool:
return nt1.chain == nt2.chain and abs(nt1.index - nt2.index) == 1
tetrad_scores = defaultdict(dict)
for ti, tj in itertools.combinations(self.tetrads, 2):
nts1 = ti.nucleotides
best_score = 0
best_score_sequential = 0
best_score_stacking = 0
best_order = tj.nucleotides
n1, n2, n3, n4 = tj.nucleotides
viable_permutations = [(n1, n2, n3, n4), (n2, n3, n4, n1), (n3, n4, n1, n2), (n4, n1, n2, n3),
(n1, n4, n3, n2), (n4, n3, n2, n1), (n3, n2, n1, n4), (n2, n1, n4, n3)]
for nts2 in viable_permutations:
score_stacking = [1 if is_next_by_stacking(nts1[i], nts2[i]) else 0 for i in range(4)]
score_sequential = [1 if is_next_sequentially(nts1[i], nts2[i]) else 0 for i in range(4)]
score = sum([max(score_stacking[i], score_sequential[i]) for i in range(4)])
score_sequential = sum(score_sequential)
score_stacking = sum(score_stacking)
if (score, score_sequential, score_stacking) > (best_score, best_score_sequential, best_score_stacking):
best_score, best_score_sequential, best_score_stacking = score, score_sequential, score_stacking
best_order = nts2
if best_score == 4:
break
tetrad_scores[ti][tj] = (best_score, nts1, best_order)
tetrad_scores[tj][ti] = (best_score, best_order, nts1)
return tetrad_scores
def __find_tetrad_pairs(self, stacking_mismatch: int) -> List[TetradPair]:
tetrads = list(self.tetrads)
best_score = 0
best_order = tetrads
for ti in tetrads:
score = 0
order = [ti]
candidates = set(self.tetrads) - {ti}
while candidates:
tj = max([tj for tj in candidates], key=lambda tk: self.tetrad_scores[ti][tk][0])
score += self.tetrad_scores[ti][tj][0]
order.append(tj)
candidates.remove(tj)
ti = tj
if score > best_score:
best_score = score
best_order = order
if best_score == (len(self.tetrads) - 1) * 4:
break
tetrad_pairs = []
for i in range(1, len(best_order)):
ti, tj = best_order[i - 1], best_order[i]
score = self.tetrad_scores[ti][tj][0]
if score >= (4 - stacking_mismatch):
nts1, nts2 = self.tetrad_scores[ti][tj][1:]
stacked = {nts1[i]: nts2[i] for i in range(4)}
stacked.update({v: k for k, v in stacked.items()})
tetrad_pairs.append(TetradPair(ti, tj, stacked))
order = (stacked[ti.nt1], stacked[ti.nt2], stacked[ti.nt3], stacked[ti.nt4])
tj.reorder_to_match_other_tetrad(order)
return tetrad_pairs
def __find_helices(self):
helices = []
helix_tetrads = []
helix_tetrad_pairs = []
for tp in self.tetrad_pairs:
ti, tj = tp.tetrad1, tp.tetrad2
if not helix_tetrads:
helix_tetrads.append(ti)
score = self.tetrad_scores[helix_tetrads[-1]][tj][0]
if score >= (4 - self.stacking_mismatch):
helix_tetrads.append(tj)
helix_tetrad_pairs.append(tp)
else:
helices.append(Helix(helix_tetrads, helix_tetrad_pairs, self.structure3d))
helix_tetrads = [ti, tj]
helix_tetrad_pairs = [tp]
if helix_tetrads:
helices.append(Helix(helix_tetrads, helix_tetrad_pairs, self.structure3d))
for tetrad in self.tetrads:
if not any([tetrad in helix.tetrads for helix in helices]):
helices.append(Helix([tetrad], [], self.structure3d))
return helices
def __find_best_chain_order(self):
chain_groups = self.__group_related_chains()
final_order = []
for chains in chain_groups:
best_permutation, best_score = chains, (1e10, 1e10)
if len(chains) > 1:
for permutation in itertools.permutations(chains):
self.__reorder_chains(permutation)
classifications = [t.onz for h in self.helices for t in h.tetrads]
logging.debug(
f'Checking reorder: {" ".join(permutation)} {" ".join(map(lambda c: c.value, classifications))}')
onz_score = sum(c.score() for c in classifications)
chain_order_score = self.__chain_order_score(permutation)
score = (onz_score, chain_order_score)
if score < best_score:
best_score = score
best_permutation = permutation
elif score == best_score:
# in case of a tie, pick permutation earlier in lexicographical sense
if permutation < best_permutation:
best_permutation = permutation
final_order.extend(best_permutation)
if len(final_order) > 1:
self.__reorder_chains(final_order)
classifications = [t.onz for h in self.helices for t in h.tetrads]
logging.debug(f'Selected chain order: {" ".join(final_order)} '
f'{" ".join(map(lambda onz: onz.value, classifications))}')
self.tetrads = self.__find_tetrads(True)
self.tetrad_scores = self.__calculate_tetrad_scores()
self.tetrad_pairs = self.__find_tetrad_pairs(self.stacking_mismatch)
self.helices = self.__find_helices()
def __group_related_chains(self) -> List[List[str]]:
candidates = set()
for h in self.helices:
for t in h.tetrads:
candidates.add(frozenset([t.nt1.chain, t.nt2.chain, t.nt3.chain, t.nt4.chain]))
candidates = [set(c) for c in candidates]
changed = True
while changed:
changed = False
for i, j in itertools.combinations(range(len(candidates)), 2):
qi, qj = candidates[i], candidates[j]
if not qi.isdisjoint(qj):
qi.update(qj)
del candidates[j]
changed = True
break
candidates = sorted(candidates, key=lambda x: len(x), reverse=True)
groups = []
for candidate in candidates:
if any([group.issuperset(candidate) for group in groups]):
continue
groups.append(candidate)
return sorted([sorted(group) for group in groups], key=lambda x: x[0])
def __reorder_chains(self, chain_order: Iterable[str]):
i = 1
for chain in chain_order:
for nt in self.structure3d.residues:
if nt.chain == chain:
nt.index = i
i += 1
for nt in self.structure3d.residues:
if nt.chain not in chain_order:
nt.index = i
i += 1
if len(self.tetrad_pairs) > 0:
self.tetrad_pairs[0].tetrad1.reorder_to_match_5p_3p()
for tp in self.tetrad_pairs:
order = (tp.stacked[tp.tetrad1.nt1], tp.stacked[tp.tetrad1.nt2],
tp.stacked[tp.tetrad1.nt3], tp.stacked[tp.tetrad1.nt4])
tp.tetrad2.reorder_to_match_5p_3p() # this is required to recalculate ONZ
tp.tetrad2.reorder_to_match_other_tetrad(order)
def __chain_order_score(self, chain_order: Tuple[str, ...]) -> int:
chain_pairs = []
for h in self.helices:
for t in h.tetrads:
for p in [t.pair_12, t.pair_23, t.pair_34, t.pair_41]:
c1 = p.nt1.chain
c2 = p.nt2.chain
if c1 != c2 and c1 in chain_order and c2 in chain_order:
chain_pairs.append([c1, c2])
sum_sq = 0
for c1, c2 in chain_pairs:
sum_sq += (chain_order.index(c1) - chain_order.index(c2)) ** 2
return sum_sq
def __find_ions(self) -> List[Atom3D]:
metal_atom_names = set([ion.value.upper() for ion in Ion])
ions = []
used = set()
for residue in self.structure3d.residues:
for atom in residue.atoms:
if atom.atomName.upper() in metal_atom_names:
coordinates = tuple(atom.coordinates())
if coordinates not in used:
ions.append(atom)
used.add(coordinates)
return ions
def __assign_ions_to_tetrads(self) \
-> Tuple[Dict[Tetrad, List[Atom3D]], Dict[Tuple[Tetrad, Residue3D], List[Atom3D]]]:
if len(self.tetrads) == 0:
return {}, {}
ions_channel = defaultdict(list)
ions_outside = defaultdict(list)
for ion in self.ions:
min_distance = math.inf
min_tetrad = self.tetrads[0]
for tetrad in self.tetrads:
distance = numpy.linalg.norm(ion.coordinates() - tetrad.center())
if distance < min_distance:
min_distance = distance
min_tetrad = tetrad
# TODO: verify threshold of 6A between an ion and tetrad channel
if min_distance < 6.0:
ions_channel[min_tetrad].append(ion)
continue
min_distance = math.inf
min_tetrad = self.tetrads[0]
min_nt = min_tetrad.nt1
for tetrad in self.tetrads:
for nt in tetrad.nucleotides:
for atom in nt.atoms:
distance = numpy.linalg.norm(ion.coordinates() - atom.coordinates())
if distance < min_distance:
min_distance = distance
min_tetrad = tetrad
min_nt = nt
# TODO: verify threshold of 3A between an ion and an atom
if min_distance < 3.0:
ions_outside[(min_tetrad, min_nt)].append(ion)
continue
logging.debug(f'Skipping an ion, because it is too far from any tetrad (distance={min_distance})')
for tetrad, ions in ions_channel.items():
tetrad.ions_channel = ions
for pair, ions in ions_outside.items():
tetrad, residue = pair
tetrad.ions_outside[residue] = ions
def __generate_twoline_dotbracket(self) -> Tuple[str, str, str, Dict[Residue3D, int]]:
layer1, layer2 = [], []
for tetrad in self.tetrads:
layer1.extend([tetrad.pair_12, tetrad.pair_34])
layer2.extend([tetrad.pair_23, tetrad.pair_41])
sequence, line1, shifts = self.__elimination_conflicts(layer1)
_, line2, _ = self.__elimination_conflicts(layer2)
return sequence, line1, line2, shifts
def __elimination_conflicts(self, pairs: List[BasePair3D]) -> Tuple[str, str, Dict[Residue3D, int]]:
orders = dict()
order = 0
queue = list(pairs)
removed = []
while queue:
conflicts = defaultdict(list)
for pi, pj in itertools.combinations(queue, 2):
if pi.conflicts_with(pj):
conflicts[pi].append(pj)
conflicts[pj].append(pi)
if conflicts:
pair, _ = max(conflicts.items(), key=lambda x: (len(x[1]), x[0].nt1))
removed.append(pair)
queue.remove(pair)
else:
orders.update({pair: order for pair in queue})
queue, removed = removed, []
order += 1
opening = '([{<' + string.ascii_uppercase
closing = ')]}>' + string.ascii_lowercase
dotbracket = dict()
for pair, order in orders.items():
nt1, nt2 = sorted([pair.nt1, pair.nt2])
dotbracket[nt1] = opening[order]
dotbracket[nt2] = closing[order]
sequence = ''
structure = ''
shifts = dict()
shift_value = 0
chain = None
for nt in sorted(filter(lambda nt: nt.is_nucleotide, self.structure3d.residues), key=lambda nt: nt.index):
if chain and chain != nt.chain:
sequence += '-'
structure += '-'
shift_value += 1
sequence += nt.one_letter_name
structure += dotbracket.get(nt, '.')
shifts[nt] = shift_value
chain = nt.chain
return sequence, structure, shifts
def __str__(self):
builder = f'Chain order: {" ".join(self.__chain_order())}\n'
for helix in self.helices:
builder += str(helix)
builder += f'{self.sequence}\n{self.line1}\n{self.line2}'
return builder
def __chain_order(self) -> List[str]:
only_nucleic_acids = filter(lambda nt: nt.is_nucleotide, self.structure3d.residues)
return list({nt.chain: 0 for nt in sorted(only_nucleic_acids, key=lambda nt: nt.index)}.keys())
def canonical(self) -> List[BasePair3D]:
return [base_pair for base_pair in self.base_pairs if base_pair.is_canonical()]
@dataclass
class Visualizer:
analysis: Analysis
tetrads: List[Tetrad]
complete2d: bool
onz_dict: Dict[BasePair3D, ONZ] = field(init=False)
def __post_init__(self):
self.onz_dict = {pair: tetrad.onz for tetrad in self.tetrads for pair in
[tetrad.pair_12, tetrad.pair_23, tetrad.pair_34, tetrad.pair_41]}
def visualize(self, prefix: str, suffix: str):
fasta = tempfile.NamedTemporaryFile('w+', suffix='.fasta')
fasta.write(f'>{prefix}-{suffix}\n')
fasta.write(self.analysis.sequence)
fasta.flush()
layer1, layer2 = [], []
for tetrad in self.tetrads:
layer1.extend([tetrad.pair_12, tetrad.pair_34])
layer2.extend([tetrad.pair_23, tetrad.pair_41])
helix1 = self.__to_helix(layer1, self.analysis.canonical() if self.complete2d else [])
helix2 = self.__to_helix(layer2)
currdir = os.path.dirname(os.path.realpath(__file__))
output_pdf = f'{prefix}-{suffix}.pdf'
run = subprocess.run([os.path.join(currdir, 'quadraw.R'), fasta.name, helix1.name, helix2.name, output_pdf],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if run.returncode == 0:
print('\nPlot:', output_pdf)
else:
logging.error(f'Failed to prepare visualization, reason:\n {run.stderr.decode()}')
def __to_helix(self, layer: List[BasePair3D],
canonical: Optional[List[BasePair3D]] = None) -> tempfile.NamedTemporaryFile():
onz_value = {ONZ.O_PLUS: 1, ONZ.O_MINUS: 2, ONZ.N_PLUS: 3, ONZ.N_MINUS: 4, ONZ.Z_PLUS: 5, ONZ.Z_MINUS: 6}
nucleotides = self.analysis.structure3d.residues
shifts = self.analysis.shifts
helix = tempfile.NamedTemporaryFile('w+', suffix='.helix')
helix.write(f'#{len(self.analysis.sequence) + 1}\n')
helix.write('i\tj\tlength\tvalue\n')
for pair in layer:
x, y = pair.nt1, pair.nt2
x, y = nucleotides.index(x) + 1 + shifts[x], nucleotides.index(y) + 1 + shifts[y]
onz = self.onz_dict[pair]
helix.write(f'{x}\t{y}\t1\t{onz_value.get(onz, 7)}\n')
if canonical:
for pair in canonical:
x, y = pair.nt1, pair.nt2
x, y = nucleotides.index(x) + 1 + shifts[x], nucleotides.index(y) + 1 + shifts[y]
helix.write(f'{x}\t{y}\t1\t8\n')
helix.flush()
return helix
class AnalysisSimple:
def __init__(self, structure2d: Structure2D, structure3d: Structure3D):
self.pairs: List[BasePair3D] = structure3d.base_pairs(structure2d)
self.graph: Dict[Residue3D, List[Residue3D]] = structure3d.base_pair_graph(structure2d)
self.pair_dict: Dict[Tuple[Residue3D, Residue3D], BasePair3D] = structure3d.base_pair_dict(structure2d)
def has_tetrads(self):
tetrads = set()
for i in self.graph:
for j in filter(lambda x: x != i, self.graph[i]):
for k in filter(lambda x: x not in (i, j), self.graph[j]):
for l in filter(lambda x: x not in (i, j, k) and x in self.graph[i], self.graph[k]):
if Tetrad.is_valid(i, j, k, l, self.pair_dict):
tetrads.add(frozenset([i, j, k, l]))
if len(tetrads) > 1:
return True
return False
def center_of_mass(atoms):
coords = [atom.coordinates() for atom in atoms]
xs = (coord[0] for coord in coords)
ys = (coord[1] for coord in coords)
zs = (coord[2] for coord in coords)
return numpy.array((sum(xs) / len(coords), sum(ys) / len(coords), sum(zs) / len(coords)))
def eltetrado(structure2d: Structure2D, structure3d: Structure3D, strict: bool, no_reorder: bool,
stacking_mismatch: int) -> Analysis:
return Analysis(structure2d, structure3d, strict, no_reorder, stacking_mismatch)
def has_tetrad(structure2d: Structure2D, structure3d: Structure3D) -> bool:
structure = AnalysisSimple(structure2d, structure3d)
return structure.has_tetrads()
| 1.867188 | 2 |
cubes/__init__.py | lmjohns3/cube-experiment | 0 | 12799955 | from .database import Experiment, Movement, Trial
from . import plots
from . import utils
| 1.023438 | 1 |
tests/test_ndvisummarytimeseries.py | tmilliman/python-vegindex | 11 | 12799956 | <reponame>tmilliman/python-vegindex
# -*- coding: utf-8 -*-
"""
test_ndvisummarytimeseries
--------------------------
Tests for `vegindex.ndvi_summary_timeseries` module.
"""
import os
import numpy as np
from PIL import Image
from pkg_resources import Requirement
from pkg_resources import resource_filename
from vegindex import config
from vegindex import ndvi_summary_timeseries
from vegindex import vegindex as vi
SAMPLE_DATA_DIR = os.path.join(os.path.dirname(__file__), "sample_data")
config.archive_dir = SAMPLE_DATA_DIR
def test_reading_ndvits_summary_file():
"""
test reading in existing ndvits summary timeseries
"""
sitename = "dukehw"
roiname = "DB_1000"
ndvi_file = "{}_{}_ndvi_3day.csv".format(sitename, roiname)
# set up path to roistats file
ndvi_path = os.path.join(SAMPLE_DATA_DIR, sitename, "ROI", ndvi_file)
ndvits = ndvi_summary_timeseries.NDVISummaryTimeSeries(
site=sitename, ROIListID=roiname
)
ndvits.readCSV(ndvi_path)
first_row = ndvits.rows[0]
last_row = ndvits.rows[-1]
# test that we're getting header metadata correctly
np.testing.assert_equal(ndvits.site, "dukehw")
np.testing.assert_equal(ndvits.nday, 3)
np.testing.assert_equal(ndvits.roitype, "DB")
np.testing.assert_equal(ndvits.sequence_number, "1000")
# spot check a couple of rows
np.testing.assert_equal(
last_row["midday_rgb_filename"], "dukehw_2020_07_15_115405.jpg"
)
np.testing.assert_equal(
last_row["midday_ir_filename"], "dukehw_IR_2020_07_15_115405.jpg"
)
np.testing.assert_equal(first_row["ndvi_mean"], 0.22027)
np.testing.assert_equal(first_row["ndvi_std"], 0.16966)
np.testing.assert_equal(first_row["max_solar_elev"], 75.9963)
np.testing.assert_equal(len(ndvits.rows), 870)
def test_get_ndvi_summary_file():
"""
test reading in existing ndvits summary timeseries using the
helper function.
"""
sitename = "dukehw"
roiname = "DB_1000"
ndvits = vi.get_ndvi_summary(sitename, roiname, nday=3)
first_row = ndvits.rows[0]
last_row = ndvits.rows[-1]
# test that we're getting header metadata correctly
np.testing.assert_equal(ndvits.site, "dukehw")
np.testing.assert_equal(ndvits.nday, 3)
np.testing.assert_equal(ndvits.roitype, "DB")
np.testing.assert_equal(ndvits.sequence_number, "1000")
# spot check a couple of rows
np.testing.assert_equal(
last_row["midday_rgb_filename"], "dukehw_2020_07_15_115405.jpg"
)
np.testing.assert_equal(
last_row["midday_ir_filename"], "dukehw_IR_2020_07_15_115405.jpg"
)
np.testing.assert_equal(first_row["ndvi_mean"], 0.22027)
np.testing.assert_equal(first_row["ndvi_std"], 0.16966)
np.testing.assert_equal(first_row["max_solar_elev"], 75.9963)
np.testing.assert_equal(len(ndvits.rows), 870)
| 2.375 | 2 |
GreenPonik_Atlas_Scientific_OEM_i2c/CommonsI2c.py | GreenPonik/GreenPonik_Atlas_Scientific_OEM_i2c | 0 | 12799957 | #! /usr/bin/python3
"""
Description
-----------
Class to communicate with Atlas Scientific OEM sensors in I2C mode.
Atlas Scientific i2c by GreenPonik
Source code is based on Atlas Scientific documentations:
https://www.atlas-scientific.com/files/EC_oem_datasheet.pdf
https://atlas-scientific.com/files/oem_pH_datasheet.pdf
"""
import time
from GreenPonik_Atlas_Scientific_OEM_i2c.AtlasOEMI2c import _AtlasOEMI2c
class _CommonsI2c(_AtlasOEMI2c):
"""
commons methods for EC and PH OEM circuits
"""
def _convert_raw_hex_to_float(self, byte_array):
"""
convert bytearray response to float result
return float converted value
"""
hexstr = byte_array.hex()
float_from_hexa = float.fromhex(byte_array.hex())
converted = float_from_hexa
if self.debug:
print("Byte Array to decode: ", byte_array)
print("Byte Array decoded to hexa string: %s" % hexstr)
print("float from hexa: %.3f" % float_from_hexa)
return converted
def _check_calibration_confirm(self, confirm):
"""
check the response of calibration confirm register
"""
if self.debug:
if hex(0x00) == hex(confirm):
print("Calibration applied")
else:
raise Exception("Cannot confirm the operation was correctly executed")
# ----- Getters ----- ########
def get_device_info(self):
"""
Get device information
@return string module type, firmware version
"""
if "EC" == self.moduletype or "PH" == self.moduletype:
info = self.read(
self.OEM_EC_REGISTERS["device_type"],
self.TWO_BYTE_READ,
)
return "SUCCESS: %s, module type: %s and firmware is: %s" % (
self.moduletype,
info[0],
info[1],
)
def get_type(self):
"""
Read sensor type
@return int the sensor type (1=EC, 4=PH)
"""
if "EC" == self.moduletype or "PH" == self.moduletype:
device_type = self.read(
self.OEM_EC_REGISTERS["device_type"],
self.ONE_BYTE_READ,
)
if self.debug:
print("Device type is: %s" % device_type)
return device_type
def get_firmware(self):
"""
Read sensor firmware
@return int the firmware revision
"""
if "EC" == self.moduletype or "PH" == self.moduletype:
firmware = self.read(
self.OEM_EC_REGISTERS["device_firmware"],
self.ONE_BYTE_READ,
)
if self.debug:
print("Firmware type is: %s" % firmware)
return firmware
def get_new_read_available(self):
"""
New Read is available
@return int 1 if new read available, 0 if not
"""
is_new_read = self.read(
self.OEM_EC_REGISTERS["device_new_reading"],
self.ONE_BYTE_READ,
)
return is_new_read
def get_read(self):
"""
Read sensor value
@return float the sensor value
"""
# self.set_wakeup_sleep_mode(0x01) # wake device before read
time.sleep(self._long_timeout)
if "EC" == self.moduletype:
rawhex = self.read(
self.OEM_EC_REGISTERS["device_ec_msb"],
self.FOUR_BYTE_READ,
)
value = self._convert_raw_hex_to_float(rawhex) / 100
elif "PH" == self.moduletype:
rawhex = self.read(
self.OEM_PH_REGISTERS["device_ph_msb"],
self.FOUR_BYTE_READ,
)
value = self._convert_raw_hex_to_float(rawhex) / 1000
if self.debug:
print(
"%s: %s%s"
% (
self.moduletype,
value,
"µs" if "EC" == self.moduletype else "",
)
)
# self.set_wakeup_sleep_mode(0x00) # sleep device after read
return value
def get_temperature(self):
"""
Get current compensation temperature
@return float temperature value
"""
if "EC" == self.moduletype:
rawhex = self.read(
self.OEM_EC_REGISTERS["device_temperature_comp_msb"],
self.FOUR_BYTE_READ,
)
elif "PH" == self.moduletype:
rawhex = self.read(
self.OEM_PH_REGISTERS["device_temperature_comp_msb"],
self.FOUR_BYTE_READ,
)
value = self._convert_raw_hex_to_float(rawhex) / 100
if self.debug:
print("%s compensation Temperature: %s°c" % (self.moduletype, value))
return value
def get_calibration(self):
"""
Get current calibrations data
:return: string with current points calibrated
:rtype:
"""
if "EC" == self.moduletype:
register = self.OEM_EC_REGISTERS["device_calibration_confirm"]
""" bits
- "dry": 0,
- "single": 1,
- "low": 2,
- "high": 3,
"""
binary_calib_status = self.EC_BINARY_CALIB_STATUS
elif "PH" == self.moduletype:
register = self.OEM_PH_REGISTERS["device_calibration_confirm"]
""" bits
- "low": 1,
- "mid": 2,
- "high": 3,
"""
binary_calib_status = self.PH_BINARY_CALIB_STATUS
r = self.read(register)
if self.debug:
print("Binary result from OEM", r)
print("Who is calibrated? >", binary_calib_status[r])
return binary_calib_status[r]
def get_led(self) -> int:
"""
Get led state
register is the same for EC and PH OEM circuit
:return: int 0x00 = OFF or 0x01 = ON
:rtype: int
"""
register = self.OEM_EC_REGISTERS["device_led"]
led_status = self.read(register)
if self.debug:
print("Led status is currently: %s" % hex(led_status))
return led_status
def get_wakeup_sleep_mode(self) -> int:
"""
get Active or Hibernate device mode
register is the same for EC and PH OEM circuit
:return: int 0x01 = WakeUp or 0x00 = Hibernate
:rtype: int
"""
register = self.OEM_EC_REGISTERS["device_sleep"]
mode = self.read(register)
if self.debug:
print(
"Device is currently in mode: %s"
% ("wakeup" if hex(0x01) == hex(mode) else "sleep")
)
return mode
# ----- Setters ----- ########
def set_temperature(self, t=25.0):
"""Set the compensation temperature
:param t: float temperature value
"""
self.set_wakeup_sleep_mode(0x01) # wake device before set temperature
time.sleep(self._long_timeout)
if "EC" == self.moduletype:
register = self.OEM_EC_REGISTERS["device_temperature_comp_msb"]
elif "PH" == self.moduletype:
register = self.OEM_PH_REGISTERS["device_temperature_comp_msb"]
byte_array = int(round(t * 100)).to_bytes(4, "big")
if self.debug:
print("Temperature to set: %.2f" % t)
print(
"%s sent converted temp to bytes: " % (self.moduletype),
byte_array,
)
time.sleep(self.short_timeout)
self.write(register, byte_array)
self.set_wakeup_sleep_mode(0x00) # sleep device after set temperature
def _set_calibration_registers(self, value):
"""calibration registers
do not use alone because calibration is apply by using set_calibration_apply
/!in float micro siemens µS for EC/!
/! in float for pH/!
"""
if "EC" == self.moduletype:
register = self.OEM_EC_REGISTERS["device_calibration_msb"]
# ec calibration wait for µSiemens
byte_array = int(round(value * 100)).to_bytes(4, "big")
elif "PH" == self.moduletype:
register = self.OEM_PH_REGISTERS["device_calibration_msb"]
byte_array = int(round(value * 1000)).to_bytes(4, "big")
self.write(register, byte_array)
if self.debug:
print("Value to send: %.2f" % value)
print(
"%s sent converted value to bytes: " % (self.moduletype),
byte_array,
)
def set_calibration_apply(self, value, point=""):
"""apply the calibration
:param value: float solution calibration value converted in float. EC waiting for µS e.g. 1.413 = > 1413.0
:param point: string "dry", "single", "low", "mid", "high" only
"""
if point not in ("dry", "single", "low", "mid", "high"):
raise Exception(
'missing string point argument, \
can only be "dry", "single", "low", "mid", "high"'
)
if "EC" == self.moduletype:
points = {"dry": 0x02, "single": 0x03, "low": 0x04, "high": 0x05}
register = self.OEM_EC_REGISTERS["device_calibration_request"]
elif "PH" == self.moduletype:
points = {"low": 0x02, "mid": 0x03, "high": 0x04}
register = self.OEM_PH_REGISTERS["device_calibration_request"]
self._set_calibration_registers(value)
time.sleep(self.long_timeout)
self.write(register, points[point]) # apply point calibration data
time.sleep(self.short_timeout) # wait before read register to get confirmation
conf = self.read(register)
self._check_calibration_confirm(conf)
return conf
def set_calibration_clear(self):
"""clear calibration data
"""
if "EC" == self.moduletype:
register = self.OEM_EC_REGISTERS["device_calibration_request"]
elif "PH" == self.moduletype:
register = self.OEM_PH_REGISTERS["device_calibration_request"]
self.write(register, 0x01) # send 0x01 to clear calibration data
time.sleep(self.short_timeout) # wait before read register to get confirmation
conf = self.read(register)
self._check_calibration_confirm(conf)
return conf
def set_i2c_addr(self, addr):
"""Change the device i2c address
:param addr: int = new i2c add
"""
if addr not in self.ADDR_OEM_HEXA and addr not in self.ADDR_OEM_DECIMAL:
raise Exception(
"only decimal address expected, convert hexa by using \
AtlasI2c.ADDR_OEM_DECIMAL or AtlasI2c.ADDR_EZO_DECIMAL"
)
else:
"""
write workflow to change physical i2c address
"""
self.address(addr)
raise NotImplementedError("write workflow to change physical i2c address")
def set_led(self, state=0x01):
"""Change Led state
:param state: byte state => 0x01 = ON or 0x00 = OFF
"""
register = self.OEM_EC_REGISTERS["device_led"]
self.write(register, state)
if self.debug:
print(
"Led status change to: %s"
% ("On" if hex(0x01) == hex(state) else "OFF")
)
def set_wakeup_sleep_mode(self, action=0x01):
"""change device mode to Active or Hibernate
register is the same for EC and PH OEM circuit
:param byte: action => 0x01 = WakeUp or 0x00 = Hibernate
"""
register = self.OEM_EC_REGISTERS["device_sleep"]
self.write(register, action)
if self.debug:
print(
"Device is now: %s"
% ("wakeup" if hex(0x01) == hex(action) else "sleep")
)
def set_ack_new_read_available(self):
"""Ack new Read available
"""
register = self.OEM_EC_REGISTERS["device_new_reading"]
ack = 0x00
self.write(register, ack)
if self.debug:
print("ack new reading available register %s to %s" % (register, ack))
| 2.875 | 3 |
tools/loadKar.py | hidura/sugelico | 0 | 12799958 |
from datetime import datetime
import os
class core:
def __init__(self, environ = None, location = None):
self.response = None
if environ == None and location == None:
#If the environ and the location are None, no make anything.
self.response = """<h1>Petition doesn't have <u>Environ</u> or <u>Location</u></h1>"""
elif environ != None:
self.environ = environ
from tools.Utilities import buildRq
buildReq = buildRq()
from tools.main import main
request = buildReq.extrctEnv(environ, environ['DOCUMENT_ROOT'])
# try:
# self.response = main(request, environ).getResult()
# except Exception as ex:
# error = {"error": str(ex.args[0])}
# self.response = {"status": 200, "value": error, "type": "application/json"}
self.response = main(request, environ).getResult()
def result(self):
if self.response != None:
return self.response
else:
return ["plain", "Problem with the communication with the core..."]
def logs(self, logData):
logdir = os.listdir(self.environ['DOCUMENT_ROOT']+'logs/')
a = datetime.now()
logtime = ''
for piece in a.timetuple()[:3]:
logtime += str(piece)+'-'
logtime = logtime[:-1]+" "
for piece in a.timetuple()[3:]:
logtime += str(piece)+':'
logtime = logtime[:-3]
if len(logdir) < 1:
log = open(self.environ['DOCUMENT_ROOT']+'logs/error.log', 'w')
for piece in str(logData).split('\n'):
log.write('['+logtime+']'+str(piece)+'\n')
log.close()
else:
log = open(self.environ['DOCUMENT_ROOT']+'logs/error.log', 'r')
if len(log.readlines()) > 500:
self.cleanLogs(self.environ['DOCUMENT_ROOT']+'logs')
log = open(self.environ['DOCUMENT_ROOT']+'logs/error.log', 'w')
else:
log = open(self.environ['DOCUMENT_ROOT']+'logs/error.log', 'a')
for piece in str(logData).split('\n'):
log.write('['+logtime+']'+str(piece)+'\n')
log.close()
return str(logData)
def cleanLogs(self, location):
logfiles = os.listdir(location)
if len(logfiles) == 9:
os.remove(logfiles[-1])
logfiles = logfiles[:-1]
cont = 1
for log in logfiles:
os.rename(self.environ['DOCUMENT_ROOT']+'logs/'+log, self.environ['DOCUMENT_ROOT']+'logs/error_'+str(cont)+'.log')
else:
cont = 1
for log in logfiles:
os.rename(self.environ['DOCUMENT_ROOT']+'logs/'+log, self.environ['DOCUMENT_ROOT']+'logs/error_'+str(cont)+'.log')
| 2.90625 | 3 |
IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/scripts/__init__.py | timkrentz/SunTracker | 4 | 12799959 | <filename>IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/scripts/__init__.py
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Subpackage containing the modules that implement the command line tools.
Note that these are imported by top-level scripts which are intended to be
invoked directly from a shell.
"""
from twisted.python.versions import Version
from twisted.python.deprecate import deprecatedModuleAttribute
deprecatedModuleAttribute(
Version("Twisted", 11, 1, 0),
"Seek unzipping software outside of Twisted.",
__name__,
"tkunzip")
deprecatedModuleAttribute(
Version("Twisted", 12, 1, 0),
"tapconvert has been deprecated.",
__name__,
"tapconvert")
del Version, deprecatedModuleAttribute
| 1.34375 | 1 |
API/controller/routes.py | GeoscienceAustralia/FSDF-Roads | 1 | 12799960 | from flask import Blueprint, request, Response, render_template
from model.roads import Roads
from pyldapi import ContainerRenderer
import conf
import ast
import folium
print(__name__)
routes = Blueprint('controller', __name__)
DEFAULT_ITEMS_PER_PAGE=50
@routes.route('/', strict_slashes=True)
def home():
return render_template('home.html')
@routes.route('/rds/')
def roads():
# Search specific items using keywords
search_string = request.values.get('search')
try:
# get the register length from the online DB
sql = 'SELECT COUNT(*) FROM "transportroads"'
if search_string:
sql += '''WHERE UPPER(cast("id" as text)) LIKE '%{search_string}%' OR UPPER("name") LIKE '%{search_string}%';
'''.format(search_string=search_string.strip().upper())
no_of_items = conf.db_select(sql)[0][0]
page = int(request.values.get('page')) if request.values.get('page') is not None else 1
per_page = int(request.values.get('per_page')) \
if request.values.get('per_page') is not None else DEFAULT_ITEMS_PER_PAGE
offset = (page - 1) * per_page
# get the id and name for each record in the database
sql = '''SELECT "id", "name" FROM "transportroads"'''
if search_string:
sql += '''WHERE UPPER(cast("id" as text)) LIKE '%{search_string}%' OR UPPER("name") LIKE '%{search_string}%'
'''.format(search_string=search_string.strip().upper())
sql += '''ORDER BY "name"
OFFSET {} LIMIT {}'''.format(offset, per_page)
items = []
for item in conf.db_select(sql):
items.append(
(item[0], item[1])
)
except Exception as e:
print(e)
return Response('The Roads database is offline', mimetype='text/plain', status=500)
return ContainerRenderer(request=request,
instance_uri=request.url,
label='Roads Register',
comment='A register of Roads',
parent_container_uri='http://linked.data.gov.au/def/placenames/PlaceName',
parent_container_label='QLD_Roads',
members=items,
members_total_count=no_of_items,
profiles=None,
default_profile_token=None,
super_register=None,
page_size_max=1000,
register_template=None,
per_page=per_page,
search_query=search_string,
search_enabled=True
).render()
@routes.route('/map')
def show_map():
'''
Function to render a map around the specified line
'''
name = request.values.get('name')
coords_list = ast.literal_eval(request.values.get('coords'))[0]
# swap x & y for mapping
points = []
for coords in coords_list:
points.append(tuple([coords[1], coords[0]]))
ave_lat = sum(p[0] for p in points) / len(points)
ave_lon = sum(p[1] for p in points) / len(points)
# create a new map object
folium_map = folium.Map(location=[ave_lat, ave_lon], zoom_start=15)
tooltip = 'Click for more information'
folium.PolyLine(points, color="red", weight=2.5, opacity=1, popup = name, tooltip=tooltip).add_to(folium_map)
return folium_map.get_root().render()
@routes.route('/rds/<string:roads_id>')
def road(roads_id):
roads = Roads(request, request.base_url)
return roads.render()
| 2.609375 | 3 |
backend/chat/routing.py | CSCI34284/group4_project | 1 | 12799961 | """
Prepared by Backend/Server Team - Sheldon, Martin, Brian, Sarah, Veronica.
"""
from django.urls import re_path
from .consumers import ChatConsumer
# Assign pattern to activate selected websocket
websocket_urlpatterns = [
re_path(r'^ws/chat/(?P<room_name>[^/]+)/$', ChatConsumer),
]
| 1.9375 | 2 |
ebs_snatcher/main.py | Cobliteam/ebs_snatcher | 3 | 12799962 | <reponame>Cobliteam/ebs_snatcher
from __future__ import unicode_literals
from builtins import str, bytes
import argparse
import json
import logging
import random
from . import ebs
logger = logging.getLogger('ebs-snatcher.main')
def get_args(): # pragma: no cover
argp = argparse.ArgumentParser(
'ebs-snatcher',
description='Automatically provision AWS EBS volumes from snapshots')
argp.add_argument(
'--instance-id', metavar='ID', required=True,
help='Instance ID to attach volumes to')
argp.add_argument(
'--volume-id-tag', metavar='KEY=VALUE', type=key_tag_pair,
required=True, action='append',
help='Tag used to identify desired volumes. Will be used to search '
'currently attached volumes to determine if a new one is needed '
'and applied to new volumes. Can be provided multiple times, in '
'which case tags will be combined as an AND condition.')
argp.add_argument(
'--volume-size', metavar='GB', type=positive_int, required=True,
help='Size to assign to newly created volumes, in GBs.')
argp.add_argument(
'--snapshot-search-tag', metavar='KEY=VALUE', type=key_tag_pair,
required=True, action='append',
help='Tag used to identify snapshots to create new volumes from.'
'Can be provided multiple times, in which case tags will be '
'combined as an AND condition.')
argp.add_argument(
'--attach-device', metavar='PATH|auto', required=True,
help='Name of device to use when attaching a volume, such as '
'"/dev/sdb". Can be set to "auto" to use a safe default. '
'Device names found to be already in use will be skipped, and the '
'next name in alphabetical order will be tried until attachment '
'succeeds')
argp.add_argument(
'--volume-extra-tag', metavar='KEY=VALUE', type=key_tag_pair,
action='append',
help='Extra tags to be applied to newly create volumes, but which are '
'not used for identification')
argp.add_argument(
'--encrypt-kms-key-id', metavar='KEY-ID', default=None,
help='Enable encryption and use the given KMS key ID for newly created '
'volumes')
argp.add_argument(
'--volume-type', metavar='TYPE', choices=ebs.VOLUME_TYPES,
default='gp2',
help='Volume type to use for newly created volumes')
argp.add_argument(
'--volume-iops', metavar='COUNT', type=positive_int, default=None,
help='Number of provisioned I/O operations to assign to newly created '
'volumes. Make sure to choose an appropriate volume type to '
'match.')
argp.add_argument(
'--move-to-current-az', action='store_true', default=False,
help="If there is a volume available in a different AZ than the "
"current one, instead of skipping it and looking for snapshots "
"by tag, try to move it to the current AZ, by cloning it and "
"deleting the original.")
return argp.parse_args()
def positive_int(s):
n = int(s)
if n <= 0:
raise ValueError('Value must be positive: {}'.format(n))
return n
def key_tag_pair(s):
if isinstance(s, bytes):
s = str(s, 'utf-8')
elif not isinstance(s, str):
raise TypeError('Input must be a string')
try:
key, value = s.split('=', 1)
except ValueError:
raise ValueError('Missing tag value: {}'.format(s))
return key, value
class ResourceState(object):
def __init__(self, args, instance_info):
self.args = args
self.instance_info = instance_info
self.state = None
self.volume_id = None
self.old_volume_id = None
self.snapshot_id = None
self.attached_device = None
def survey(self):
logger.debug('Looking up currently attached volumes')
attached_volumes = \
ebs.find_attached_volumes(self.args.volume_id_tag,
self.instance_info)
if attached_volumes:
volume_id = attached_volumes[0]['VolumeId']
attached_device = attached_volumes[0]['Attachments'][0]['Device']
logger.info(
'Found volume already attached to instance: %s', volume_id)
self.state = 'present'
self.volume_id = volume_id
self.attached_device = attached_device
return
logger.debug('Looking up existing available volumes in AZ')
volumes = \
ebs.find_available_volumes(self.args.volume_id_tag,
self.instance_info, current_az=True)
if volumes:
logger.info(
'Found available volumes with given specifications in current '
'AZ: %s',
', '.join(map(lambda v: v['VolumeId'], volumes)))
self.state = 'attached'
self.volume_id = random.choice(volumes)['VolumeId']
return
if self.args.move_to_current_az:
logger.info('Did not find any available volumes in current AZ. '
'Searching for available volumes to move in other AZ')
other_az_volumes = \
ebs.find_available_volumes(self.args.volume_id_tag,
self.instance_info,
current_az=False)
for old_volume in other_az_volumes:
old_volume_id = old_volume['VolumeId']
old_az = old_volume['AvailabilityZone']
new_az = self.instance_info['Placement']['AvailabilityZone']
filters = [{'Name': 'volume-id', 'Values': [old_volume_id]}]
snapshot = ebs.find_existing_snapshot(filters=filters)
if snapshot:
snapshot_id = snapshot['SnapshotId']
logger.info(
'Found volume %s in AZ %s, will attempt to move '
'it to current AZ %s. Using snapshot %s.',
old_volume_id, old_az, new_az, snapshot_id)
self.state = 'created'
self.snapshot_id = snapshot_id
self.old_volume_id = old_volume_id
break
else:
logger.info('Did not find any available volumes in other AZ '
'move. Creating new volume from scratch.')
self.state = 'created'
else:
logger.info('Did not find any available volumes. Searching for a '
'suitable snapshot instead')
snapshot = ebs.find_existing_snapshot(
search_tags=self.args.snapshot_search_tag)
self.state = 'created'
self.snapshot_id = snapshot and snapshot['SnapshotId']
def converge(self):
if not self.volume_id:
availability_zone = \
self.instance_info['Placement']['AvailabilityZone']
logger.info('About to create volume in AZ %s', availability_zone)
if not self.snapshot_id:
logger.info('Creating volume from scratch')
else:
logger.info('Creating volume from snapshot %s',
self.snapshot_id)
new_volume = ebs.create_volume(
id_tags=self.args.volume_id_tag,
extra_tags=self.args.volume_extra_tag,
availability_zone=availability_zone,
volume_type=self.args.volume_type,
size=self.args.volume_size,
iops=self.args.volume_iops,
kms_key_id=self.args.encrypt_kms_key_id,
src_snapshot_id=self.snapshot_id)
self.volume_id = new_volume['VolumeId']
if not self.attached_device:
self.attached_device = ebs.attach_volume(
volume_id=self.volume_id,
instance_info=self.instance_info,
device_name=self.args.attach_device)
self.attached_device = \
ebs.find_system_block_device(self.volume_id, self.attached_device)
if self.old_volume_id:
ebs.delete_volume(volume_id=self.old_volume_id)
def to_json(self):
return {'volume_id': self.volume_id,
'attached_device': self.attached_device,
'result': self.state,
'src_snapshot_id': self.snapshot_id}
def main():
logging.basicConfig(level=logging.DEBUG)
args = get_args()
instance_info = ebs.get_instance_info(args.instance_id)
resource_state = ResourceState(args, instance_info)
resource_state.survey()
resource_state.converge()
print(json.dumps(resource_state.to_json()))
return 0
if __name__ == '__main__':
main() # pragma: no cover
| 2.609375 | 3 |
manager/integration/tests/test_ha.py | JacieChao/longhorn-tests | 0 | 12799963 | import pytest
import common
import time
from common import client, volume_name # NOQA
from common import SIZE, DEV_PATH
from common import check_volume_data, get_self_host_id, get_volume_endpoint
from common import write_volume_random_data
from common import RETRY_COUNTS, RETRY_ITERVAL
@pytest.mark.coretest # NOQA
def test_ha_simple_recovery(client, volume_name): # NOQA
ha_simple_recovery_test(client, volume_name, SIZE)
def ha_simple_recovery_test(client, volume_name, size, base_image=""): # NOQA
volume = client.create_volume(name=volume_name, size=size,
numberOfReplicas=2, baseImage=base_image)
volume = common.wait_for_volume_detached(client, volume_name)
assert volume["name"] == volume_name
assert volume["size"] == size
assert volume["numberOfReplicas"] == 2
assert volume["state"] == "detached"
assert volume["created"] != ""
assert volume["baseImage"] == base_image
host_id = get_self_host_id()
volume = volume.attach(hostId=host_id)
volume = common.wait_for_volume_healthy(client, volume_name)
volume = client.by_id_volume(volume_name)
assert get_volume_endpoint(volume) == DEV_PATH + volume_name
assert len(volume["replicas"]) == 2
replica0 = volume["replicas"][0]
assert replica0["name"] != ""
replica1 = volume["replicas"][1]
assert replica1["name"] != ""
data = write_volume_random_data(volume)
volume = volume.replicaRemove(name=replica0["name"])
# wait until we saw a replica starts rebuilding
new_replica_found = False
for i in range(RETRY_COUNTS):
v = client.by_id_volume(volume_name)
for r in v["replicas"]:
if r["name"] != replica0["name"] and \
r["name"] != replica1["name"]:
new_replica_found = True
break
if new_replica_found:
break
time.sleep(RETRY_ITERVAL)
assert new_replica_found
volume = common.wait_for_volume_healthy(client, volume_name)
volume = client.by_id_volume(volume_name)
assert volume["state"] == common.VOLUME_STATE_ATTACHED
assert volume["robustness"] == common.VOLUME_ROBUSTNESS_HEALTHY
assert len(volume["replicas"]) >= 2
found = False
for replica in volume["replicas"]:
if replica["name"] == replica1["name"]:
found = True
break
assert found
check_volume_data(volume, data)
volume = volume.detach()
volume = common.wait_for_volume_detached(client, volume_name)
client.delete(volume)
common.wait_for_volume_delete(client, volume_name)
volumes = client.list_volume()
assert len(volumes) == 0
@pytest.mark.coretest # NOQA
def test_ha_salvage(client, volume_name): # NOQA
ha_salvage_test(client, volume_name)
def ha_salvage_test(client, volume_name, base_image=""): # NOQA
volume = client.create_volume(name=volume_name, size=SIZE,
numberOfReplicas=2, baseImage=base_image)
volume = common.wait_for_volume_detached(client, volume_name)
assert volume["name"] == volume_name
assert volume["size"] == SIZE
assert volume["numberOfReplicas"] == 2
assert volume["state"] == "detached"
assert volume["created"] != ""
assert volume["baseImage"] == base_image
host_id = get_self_host_id()
volume = volume.attach(hostId=host_id)
volume = common.wait_for_volume_healthy(client, volume_name)
assert len(volume["replicas"]) == 2
replica0_name = volume["replicas"][0]["name"]
replica1_name = volume["replicas"][1]["name"]
data = write_volume_random_data(volume)
common.k8s_delete_replica_pods_for_volume(volume_name)
volume = common.wait_for_volume_faulted(client, volume_name)
assert len(volume["replicas"]) == 2
assert volume["replicas"][0]["failedAt"] != ""
assert volume["replicas"][1]["failedAt"] != ""
volume.salvage(names=[replica0_name, replica1_name])
volume = common.wait_for_volume_detached(client, volume_name)
assert len(volume["replicas"]) == 2
assert volume["replicas"][0]["failedAt"] == ""
assert volume["replicas"][1]["failedAt"] == ""
volume = volume.attach(hostId=host_id)
volume = common.wait_for_volume_healthy(client, volume_name)
check_volume_data(volume, data)
volume = volume.detach()
volume = common.wait_for_volume_detached(client, volume_name)
client.delete(volume)
common.wait_for_volume_delete(client, volume_name)
volumes = client.list_volume()
assert len(volumes) == 0
| 1.945313 | 2 |
neural_cdes/F.py | jb-c/dissertation | 0 | 12799964 | <reponame>jb-c/dissertation<gh_stars>0
import torch
class F(torch.nn.Module):
'''
Defines the neural network denoted f_{\theta} in our neural CDE model
'''
def __init__(self, input_channels, hidden_channels, width = 128):
'''
:param input_channels: the number of input channels in the data X.
:param hidden_channels: the number of channels for z_t. (We use h = 32)
'''
#torch.manual_seed(3)
super(F, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.linear1 = torch.nn.Linear(hidden_channels, width)
self.linear2 = torch.nn.Linear(width, width)
self.linear3 = torch.nn.Linear(width, input_channels * hidden_channels)
def forward(self, t, z):
'''
:param t: t is normally embedded in the data
:param z: input to the network & has shape (batch, hidden_channels)
:return: F(z)
'''
z = self.linear1(z)
z = z.tanh()
z = self.linear2(z)
z = z.tanh()
z = self.linear3(z)
# A final tanh non-linearity.
z = z.tanh()
# Ignoring the batch dimension, the shape of the output tensor must be a matrix,
# because we need it to represent a linear map from R^input_channels to R^hidden_channels.
z = z.view(z.size(0), self.hidden_channels, self.input_channels)
return z
| 3.796875 | 4 |
UD/test.py | Anon-LeoH/UncleDaLearn | 1 | 12799965 | from LinearRegression.HugeScaleLR import hugeScaleLR as hlr
from LinearRegression.GradientDescent import gradientDescent as glr
from LinearRegression.regularization import regularization as rg
import numpy as np
from copy import deepcopy as dp
import random
import math
from Function import *
from Distribution import NormalDistribution as nd
import matplotlib.pyplot as plt
from NeuralNetwork import BPnetwork as bpn
fig = plt.figure()
fig.suptitle(u'Informations gragh paramed by level', fontsize=14, fontweight='bold')
ax = fig.add_subplot(111)
set11 = nd(3, 2.5)
set12 = nd(3, 2.5)
set13 = nd(3, 2.5)
set21 = nd(-6, 1.0)
set22 = nd(-6, 1.0)
set23 = nd(-6, 1.0)
class testFunc(functionObject):
def __init__(self):
pass
def cal(self, x):
return ( 0.7 * x - 7 * (x ** 2) + 0.1 * (x ** 3) + 7 )
tf = testFunc()
x = []
y = []
y2 = []
for i in xrange(1000):
x.append([set11.val(), set12.val(), set13.val()])
y.append([1, 0])
for i in xrange(1000):
x.append([set21.val(), set22.val(), set23.val()])
y.append([0, 1])
y2 = [[item] for item in y2]
jg = bpn([3, 4, 4, 2])
jg.train(x, y, 0.03, 0.0005)
#for i in xrange(10):
# tmp = random.uniform(0.1, 25.0)
# ty1 = tf.cal(tmp)
# tmp = [tmp * 1, tmp ** 2, tmp ** 3]
# tmp1 = dp(tmp)
# ty2 = jg1.cal(tmp)[0]
# ty3 = jg2.cal(tmp1)
# ty2 = ty2 * param[1] + param[0]
# rlt = (ty1, ty2, ty3)
# print rlt
#x = np.arange(-20.0, 20.0, 0.01)
#y1 = [tf.cal(item) for item in x]
#y2 = [jg1.cal([item, item ** 2, item ** 3])[0] * param[1] + param[0] for item in x]
#y3 = [jg2.cal([item, item ** 2, item ** 3]) for item in x]
#ax.plot(x, y1, 'r', x, y2, 'b', x, y3, 'y')
#fig.savefig('level.png', dpi=100)
#fig.show()
tr = 0
fl = 0
for i in xrange(100):
p = random.random()
if p >= 0.5:
tmpx = [set11.val(), set12.val(), set13.val()]
tmpy = 1
else:
tmpx = [set21.val(), set22.val(), set23.val()]
tmpy = 2
rlt = jg.cal(tmpx)
if rlt[0] > rlt[1]:
rlt = 1
else:
rlt = 2
if rlt == tmpy:
tr += 1
else:
fl += 1
print "result: " + str(float(tr) / (tr + fl))
| 2.671875 | 3 |
Ex81.py | ErickTeixeira777/Python-3 | 0 | 12799966 | <filename>Ex81.py
'''Faça um programa que vai ler vários números e colocar em uma lista, depois disso,mostre:
A) Quantos números foram digitados.
B) A Lista de valores, ordenada de forma decrescente.
C) Se o valor 5 foi digitado e está ou não na lista'''
valores = []
while True:
valores.append(int(input('Digite um valor: ')))
resp = str(input('Gostaria de continuar? [S/N] '))
if resp in 'Nn':
break
print('='*30)
print(f'Você digitou {len(valores)} elementos. ')
valores.sort(reverse=True)
print(f'Os valores em ordem decrescente são {valores}')
if 5 in valores:
print('O valor 5 faz parte da lista! ')
else:
print('O valor 5 não foi encontrado na lista! ') | 4.15625 | 4 |
CreateAccount.py | manijamali2003/Nava | 1 | 12799967 | <reponame>manijamali2003/Nava
#!/usr/bin/env python3
import os
import socket, random,hashlib
from Nava import *
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
PORT = 65433 # Port to listen on (non-privileged ports are > 1023)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
conn, addr = s.accept()
with conn:
print('Connected by', addr)
while True:
data = conn.recv(1024).decode('utf-8')
if data:
try:
split = data.split(',')
username = split[0] # manijamali2003
if not os.path.isfile (f'Etc/Users Account/{username}'):
fullname = split[1] # <NAME>
gender = split[2] # 0: Male, 1: Female
birthday = split[3] # yyyy/mm/dd
countryc = split[4] # IR
city = split[5] # Mashhad
zipcode = split[6] # 11111111
hashcode = split[7] # hash of password sha3_513
f = open(f'Etc/Users Account/{username}','wb')
f.write(f'{fullname},{gender},{birthday},{countryc},{city},{zipcode},{hashcode}'.encode())
f.close()
key = KeyCreator()
f = open(f'Etc/Users Account/Public Keys/{username}.pem','wb')
f.write(key.public) # you should create it
f.close()
conn.sendall(key.private)
conn.sendall(key.public)
else:
conn.sendall(b'e: account exists')
except:
conn.sendall(b'e: some errors') | 2.609375 | 3 |
diventi/landing/migrations/0009_auto_20180220_0745.py | flavoi/diven | 2 | 12799968 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-02-20 06:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('landing', '0008_remove_featurescover_active'),
]
operations = [
migrations.CreateModel(
name='PresentationCover',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.URLField(verbose_name='image')),
('label', models.CharField(blank=True, max_length=50, verbose_name='label')),
('label_it', models.CharField(blank=True, max_length=50, null=True, verbose_name='label')),
('label_en', models.CharField(blank=True, max_length=50, null=True, verbose_name='label')),
('section', models.CharField(choices=[('DES', 'description'), ('FEA', 'features')], default='DES', max_length=3)),
('default', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Presentation Cover',
'verbose_name_plural': 'Presentation Covers',
},
),
migrations.RemoveField(
model_name='presentation',
name='features_cover',
),
migrations.DeleteModel(
name='FeaturesCover',
),
migrations.AddField(
model_name='presentation',
name='presentation_covers',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='landing.PresentationCover', verbose_name='presentation cover'),
),
]
| 1.695313 | 2 |
FAUSTPy/__main__.py | mathandy/faust_python | 25 | 12799969 | import argparse
import numpy as np
import matplotlib.pyplot as plt
from FAUSTPy import *
#######################################################
# set up command line arguments
#######################################################
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--faustfloat',
dest="faustfloat",
default="float",
help="The value of FAUSTFLOAT.")
parser.add_argument('-p', '--path',
dest="faust_path",
default="",
help="The path to the FAUST compiler.")
parser.add_argument('-c', '--cflags',
dest="cflags",
default=[],
type=str.split,
help="Extra compiler flags")
parser.add_argument('-s', '--fs',
dest="fs",
default=48000,
type=int,
help="The sampling frequency")
args = parser.parse_args()
#######################################################
# initialise the FAUST object and get the default parameters
#######################################################
wrapper.FAUST_PATH = args.faust_path
dattorro = FAUST("dattorro_notch_cut_regalia.dsp", args.fs, args.faustfloat,
extra_compile_args=args.cflags)
def_Q = dattorro.dsp.ui.p_Q
def_Gain = dattorro.dsp.ui.p_Gain
def_Freq = dattorro.dsp.ui.p_Center_Freq
#######################################################
# plot the frequency response with the default settings
#######################################################
audio = np.zeros((dattorro.dsp.num_in, args.fs), dtype=dattorro.dsp.dtype)
audio[:, 0] = 1
out = dattorro.compute(audio)
print(audio)
print(out)
spec = np.fft.fft(out)[:, :args.fs/2]
fig = plt.figure()
p = fig.add_subplot(
1, 1, 1,
title="Frequency response with the default settings\n"
"(Q={}, F={:.2f} Hz, G={:.0f} dB FS)".format(
def_Q.zone, def_Freq.zone, 20*np.log10(def_Gain.zone+1e-8)
),
xlabel="Frequency in Hz (log)",
ylabel="Magnitude in dB FS",
xscale="log"
)
p.plot(20*np.log10(np.absolute(spec.T)+1e-8))
p.legend(("Left channel", "Right channel"), loc="best")
#######################################################
# plot the frequency response with varying Q
#######################################################
Q = np.linspace(def_Q.min, def_Q.max, 10)
dattorro.dsp.ui.p_Center_Freq = 1e2
dattorro.dsp.ui.p_Gain = 10**(-0.5) # -10 dB
cur_G = dattorro.dsp.ui.p_Gain.zone
cur_F = dattorro.dsp.ui.p_Center_Freq.zone
fig = plt.figure()
p = fig.add_subplot(
1, 1, 1,
title="Frequency response "
"(G={:.0f} dB FS, F={} Hz)".format(20*np.log10(cur_G+1e-8), cur_F),
xlabel="Frequency in Hz (log)",
ylabel="Magnitude in dB FS",
xscale="log"
)
for q in Q:
dattorro.dsp.ui.p_Q = q
out = dattorro.compute(audio)
spec = np.fft.fft(out)[0, :args.fs/2]
p.plot(20*np.log10(np.absolute(spec.T)+1e-8),
label="Q={}".format(q))
p.legend(loc="best")
#######################################################
# plot the frequency response with varying gain
#######################################################
# start at -60 dB because the minimum is at an extremely low -160 dB
G = np.logspace(-3, np.log10(def_Gain.max), 10)
dattorro.dsp.ui.p_Q = 2
cur_Q = dattorro.dsp.ui.p_Q.zone
cur_F = dattorro.dsp.ui.p_Center_Freq.zone
fig = plt.figure()
p = fig.add_subplot(
1, 1, 1,
title="Frequency response (Q={}, F={} Hz)".format(cur_Q, cur_F),
xlabel="Frequency in Hz (log)",
ylabel="Magnitude in dB FS",
xscale="log"
)
for g in G:
dattorro.dsp.ui.p_Gain = g
out = dattorro.compute(audio)
spec = np.fft.fft(out)[0, :args.fs/2]
p.plot(20*np.log10(np.absolute(spec.T)+1e-8),
label="G={:.3g} dB FS".format(20*np.log10(g+1e-8)))
p.legend(loc="best")
###########################################################
# plot the frequency response with varying center frequency
###########################################################
F = np.logspace(np.log10(def_Freq.min), np.log10(def_Freq.max), 10)
dattorro.dsp.ui.p_Q = def_Q.default
dattorro.dsp.ui.p_Gain = 10**(-0.5) # -10 dB
cur_Q = dattorro.dsp.ui.p_Q.zone
cur_G = dattorro.dsp.ui.p_Gain.zone
fig = plt.figure()
p = fig.add_subplot(
1, 1, 1,
title="Frequency response "
"(Q={}, G={:.0f} dB FS)".format(cur_Q, 20*np.log10(cur_G+1e-8)),
xlabel="Frequency in Hz (log)",
ylabel="Magnitude in dB FS",
xscale="log"
)
for f in F:
dattorro.dsp.ui.p_Center_Freq = f
out = dattorro.compute(audio)
spec = np.fft.fft(out)[0, :args.fs/2]
p.plot(20*np.log10(np.absolute(spec.T)+1e-8),
label="F={:.2f} Hz".format(f))
p.legend(loc="best")
################
# show the plots
################
plt.show()
print("everything passes!")
| 2.703125 | 3 |
src/schemathesis/runner/impl/__init__.py | gluhar2006/schemathesis | 659 | 12799970 | <gh_stars>100-1000
from .core import BaseRunner
from .solo import SingleThreadASGIRunner, SingleThreadRunner, SingleThreadWSGIRunner
from .threadpool import ThreadPoolASGIRunner, ThreadPoolRunner, ThreadPoolWSGIRunner
| 0.976563 | 1 |
setup.py | sqxccdy/tcp-tunnel | 1 | 12799971 | <gh_stars>1-10
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from setuptools import setup, find_packages
import tcp_tunnel
setup(name="tcp-tunnel",
version=tcp_tunnel.VERSION,
packages=find_packages(include=['tcp_tunnel', 'tcp_tunnel.*',]),
author='<NAME>',
author_email='<EMAIL>',
long_description=open("README.md", "r").read(),
long_description_content_type="text/markdown",
include_package_data=True,
exclude_package_date={'': ['.gitignore']},
keywords='tcp, tunnel, tcp-tunnel, tcptunnel',
license='MIT License',
url='https://github.com/sqxccdy/tcp-tunnel.git',
entry_points={
'console_scripts': [
'aiomq_server=aiomq.server:run'
]
},
install_requires=[],
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: OS Independent',
'Natural Language :: Chinese (Simplified)',
'Programming Language :: Python',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
],
)
| 1.46875 | 1 |
pertemuan_8/7_HTTP_API_With_Sensor/app/forms/_action.py | Muhammad-Yunus/Flask-Web-Development | 0 | 12799972 | <filename>pertemuan_8/7_HTTP_API_With_Sensor/app/forms/_action.py
from . import FlaskForm
from . import SubmitField
class ActionTable(FlaskForm):
activate = SubmitField('Activate')
deactivate = SubmitField('Deactivate')
delete = SubmitField('Delete') | 1.820313 | 2 |
examples/html_test/static/generate.py | bitterfly/kuho | 0 | 12799973 | #!/usr/bin/python3
import os
import sys
import http.server
import socketserver
import socket
import shutil
from base64 import b64encode
from urllib.parse import quote
from os.path import basename, splitext, join, isfile
from collections import defaultdict
from subprocess import run
from distutils.dir_util import copy_tree
from distutils.file_util import copy_file
build_dir = 'build'
source_dir = 'source'
dest_dir = 'built_static'
css_dir = join(build_dir, 'css')
images_dir = join(build_dir, 'images')
class TemporaryTCPServer(socketserver.TCPServer):
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
def serve(port):
os.chdir(dest_dir)
handler = http.server.SimpleHTTPRequestHandler
httpd = TemporaryTCPServer(("", port), handler)
print("[serve] serving on port " + str(port))
httpd.serve_forever()
def clean():
shutil.rmtree(build_dir)
shutil.rmtree(dest_dir)
def build():
copy_tree(source_dir, build_dir, update=1)
make_fallback_images(images_dir)
print('[create] _images.scss ', end='')
save_images_css(images_dir, join(css_dir, '_images.scss'))
print('[ok]')
run_sass(css_dir, join(dest_dir, 'css'))
print('[update] asis ', end='')
copy_tree(join(source_dir, 'asis'), join(dest_dir, 'asis'), update=1)
print('[ok]')
def run_sass(css_source_dir, css_dest_dir):
os.makedirs(css_dest_dir, exist_ok=True)
for (dirpath, dirnames, filenames) in os.walk(css_source_dir):
for f in filenames:
name, ext = splitext(f)
if ext == '.scss' and name[0] != '_':
print("[sass] " + f + ' ', end='')
run([
'sass',
join(css_source_dir, f),
join(css_dest_dir, name + '.css')
], check = True)
print("[ok]")
elif ext == '.css':
print("[copy] " + f + ' ', end='')
copy_file(join(css_source_dir, f), join(css_dest_dir, f), update=1)
print("[ok]")
break
def make_fallback_images(images_dir):
images = find_built_images(images_dir)
for image, files in images.items():
f = files[0]
pngimage = image + '.png'
if pngimage not in files:
print("[create] " + pngimage + ' ', end='')
run([
'convert',
'-background', 'none',
join(images_dir, f),
join(images_dir, pngimage)
], check = True)
print("[ok]")
def images_in_dir(dir):
vectors = []
rasters = []
dumb_rasters = []
lossy = []
for (dirpath, dirnames, filenames) in os.walk(dir):
for f in filenames:
name, ext = splitext(basename(f))
if ext in ['.svg']:
vectors += [f]
if ext in ['.png']:
rasters += [f]
if ext in ['.gif']:
dumb_rasters += [f]
if ext in ['.jpg', '.jpeg']:
lossy += [f]
break
return vectors + rasters + dumb_rasters + lossy
def find_built_images(images_dir):
images = defaultdict(list)
for image in images_in_dir(images_dir):
name, _ = splitext(basename(image))
images[name] += [image]
return dict(images)
def images_to_css(images_dir):
images = find_built_images(images_dir)
csseses = []
for name, files in images.items():
css = '.image-' + name + " {\n"
files_and_extensions = [(f, splitext(f)[1][1:]) for f in files]
for image, ext in [(f, ext) for f, ext in files_and_extensions if ext != 'svg']:
data = raster_data(join(images_dir, image), ext)
css += 'background-image: url(' + data + ");\n"
for svg, ext in [(f, ext) for f, ext in files_and_extensions if ext == 'svg']:
data = xml_data(join(images_dir, svg), ext)
css += 'background-image: url(' + data + "), linear-gradient(transparent, transparent);\n"
css += "}\n"
csseses += [css]
return "\n".join(csseses)
def save_images_css(images_dir, css_file):
with open(css_file, 'w') as f:
f.write(images_to_css(images_dir))
def raster_data(image_filename, ext):
with open(image_filename, 'rb') as f:
data = b64encode(f.read()).decode('utf-8')
return 'data:image/' + ext + ';base64,' + data
def xml_data(image_filename, ext):
with open(image_filename, 'r') as f:
data = quote(f.read())
return 'data:image/' + ext + '+xml;charset=US-ASCII,' + data
def image_data(image_filename):
_, ext = splitext(image_filename)
if ext == '.svg':
return xml_data(image_filename, ext)
else:
return raster_data(image_filename, ext)
if __name__ == '__main__':
try:
arg = sys.argv[1]
except IndexError:
arg = None
if arg == 'build':
build()
elif arg == 'clean':
clean()
elif arg == 'serve':
try:
port = int(sys.argv[2])
except IndexError:
port = 8000
build()
serve(port)
else:
print('please use "build", "clean" or "serve" as a first argument.')
| 2.40625 | 2 |
py_utils/readFile.py | mrrgeresez/my-python-approach | 0 | 12799974 | # -*- coding: utf-8 -*-
"""
Doc: show how to use it
$ python readFile.py data.txt
Show content of data.txt
"""
import sys
if __name__ == "__main__":
with open(sys.argv[1],'r',encoding = 'utf8') as f:
# indicates that the second argument in terminal is to be used
for line in f:
print(line[:-1]) | 3.71875 | 4 |
apps/operations/urls.py | bopopescu/diandian_online | 3 | 12799975 | # _*_ coding: utf-8 _*_
__author__ = 'nick'
__date__ = '2019/2/25 18:14'
from django.urls import path
from . import views
app_name = 'operations'
urlpatterns = [
# 用户个人中心
path('user_center_information/', views.UserCenterInformation.as_view(), name='user_center_information'),
# 用户学习的课程
path('user_center_my_courses/', views.UserStudyCourse.as_view(), name='user_center_my_courses'),
# 用户消息
path('user_center_messages/', views.UserMessageView.as_view(), name='user_center_messages'),
# 用户收藏的教师
path('user_center_fav_teachers/', views.UserCollectedTeacher.as_view(), name='user_center_fav_teachers'),
# 用户收藏的教师
path('user_center_fav_courses/', views.UserCollectedCourse.as_view(), name='user_center_fav_courses'),
# 用户收藏的教师
path('user_center_fav_organizations/', views.UserCollectedOrganization.as_view(), name='user_center_fav_organizations'),
# 修改用户头像
path('reset_head_portrait/', views.ResetUserHeaderPortraitView.as_view(), name='reset_head_portrait'),
# 修改用户密码
path('reset_user_password/', views.ResetUserPasswordView.as_view(), name='reset_user_password'),
# 修改用户信息
path('reset_user_information/', views.ResetUserInformationView.as_view(), name='reset_user_information'),
# 修改邮箱是发送的验证码
path('send_email_verify_record/', views.SendEmailView.as_view(), name='send_email_verify_record'),
# 修改邮箱
path('reset_user_email/', views.ResetUserEmailView.as_view(), name='reset_user_email'),
# 读取用户消息
path('read_message/', views.ReadMessageView.as_view(), name='read_message'),
] | 1.8125 | 2 |
utils/ImageProcesser.py | LLRukia/kkrbot | 0 | 12799976 | import os
import re
import uuid
import globals
from PIL import Image, ImageDraw, ImageFont
from utils.Asset import ImageAsset
SPACING = 5
back_regex = re.compile(r'back_([0-9]*)\.jpg')
BACK_PIC_UNIT_WIDTH, BACK_PIC_UNIT_HEIGHT = 140, 130
BACK_PIC_NUM_EACH_LINE = 5
def bg_image_gen(back_number, s):
def half_en_len(s):
return (len(s) + (len(s.encode(encoding='utf-8')) - len(s)) // 2) // 2
back_number = f'back_{back_number}'
img_path = os.path.join(globals.staticpath, f'bg/{back_number}.jpg')
im_src = Image.open(img_path)
if back_number in [f'back_{n}' for n in [38, 46, 47, 51, 52, 53]]:
real_width = max(3, im_src.width // max(6, half_en_len(s)) * 4 // 5)
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), real_width)
real_height = real_width + SPACING
im = Image.new('RGB', (im_src.width, im_src.height), (255, 255, 255))
im.paste(im_src)
text_width = im_src.width
draw = ImageDraw.Draw(im)
sz = draw.textsize(s, font=font)
x = (text_width - sz[0]) / 2
y = im_src.height - real_height
draw.text((x, y), s, fill=(245, 255, 250), font=font)
elif back_number in [f'back_{n}' for n in [33]]:
real_width = max(3, im_src.width // max(6, half_en_len(s)) * 4 // 5)
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), real_width)
real_height = real_width + SPACING
im = Image.new('RGB', (im_src.width, im_src.height), (255, 255, 255))
im.paste(im_src)
text_width = im_src.width
draw = ImageDraw.Draw(im)
sz = draw.textsize(s, font=font)
x = (text_width - sz[0]) / 2
y = im_src.height - 2 * real_height
draw.text((x, y), s, fill=(245, 255, 250), font=font)
elif back_number in [f'back_{n}' for n in [50]]:
real_width = max(3, im_src.width // max(6, half_en_len(s)) * 4 // 5)
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), real_width)
real_height = real_width + SPACING
im = Image.new('RGB', (im_src.width, im_src.height), (255, 255, 255))
im.paste(im_src)
text_width = im_src.width
draw = ImageDraw.Draw(im)
sz = draw.textsize(s, font=font)
x = (text_width - sz[0]) / 2
y = 5
draw.text((x, y), s, fill=(23, 0, 0), font=font)
else:
real_width = max(3, im_src.width // max(6, half_en_len(s)))
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), real_width)
real_height = real_width + SPACING
im = Image.new('RGB', (im_src.width, im_src.height + real_height), (255, 255, 255))
im.paste(im_src)
text_width = im_src.width
draw = ImageDraw.Draw(im)
sz = draw.textsize(s, font=font)
x = (text_width - sz[0]) / 2
y = im_src.height
draw.text((x, y), s, fill=(23, 0, 0), font=font)
return im
def get_back_pics():
raw = ImageAsset.get('back_catalogue')
if raw:
return raw
back_pic_set = set()
for _, _, files in os.walk(os.path.join(globals.staticpath, 'bg')):
for f in files:
if f.startswith('back_') and f.endswith('.jpg'):
num = int(back_regex.findall(f)[0])
back_pic_set.add(num)
cur_back_pic_nums = len(back_pic_set)
if cur_back_pic_nums == 0:
return
im = Image.new('RGB', (BACK_PIC_NUM_EACH_LINE * BACK_PIC_UNIT_WIDTH, BACK_PIC_UNIT_HEIGHT * (((cur_back_pic_nums - 1) // BACK_PIC_NUM_EACH_LINE) + 1)), (255, 255, 255))
for i, num in enumerate(back_pic_set):
im_o = bg_image_gen(num, f'底图 {num}')
im_o = im_o.resize((BACK_PIC_UNIT_WIDTH, BACK_PIC_UNIT_HEIGHT))
box = (i % BACK_PIC_NUM_EACH_LINE * BACK_PIC_UNIT_WIDTH, i // BACK_PIC_NUM_EACH_LINE * BACK_PIC_UNIT_HEIGHT)
im.paste(im_o, box)
return ImageAsset.image_raw(im, 'back_catalogue')
def merge_image(rsn, rarity, attribute, band_id, thumbnail=True, trained=False, return_fn=False):
if thumbnail:
try:
if return_fn:
fn = os.path.join(globals.datapath, 'image', f'auto_reply/cards/thumb/m_{rsn}_{"normal" if not trained else "after_training"}.png')
if os.access(fn, os.R_OK):
return fn
attribute_icon = Image.open(os.path.join(globals.asset_resource_path, f'{attribute}.png'))
band_icon = Image.open(os.path.join(globals.asset_resource_path, f'band_{band_id}.png'))
if not trained:
back_image = Image.open(f'{os.path.join(globals.asset_card_thumb_path, f"{rsn}_normal.png")}')
star = Image.open(os.path.join(globals.asset_resource_path, 'star.png')).resize((32, 32), Image.ANTIALIAS)
else:
back_image = Image.open(f'{os.path.join(globals.asset_card_thumb_path, f"{rsn}_after_training.png")}')
star = Image.open(os.path.join(globals.asset_resource_path, 'star_trained.png')).resize((32, 32), Image.ANTIALIAS)
if rarity == 1:
frame = Image.open(os.path.join(globals.asset_resource_path, f'card-1-{attribute}.png'))
else:
frame = Image.open(os.path.join(globals.asset_resource_path, f'card-{rarity}.png'))
back_image.paste(frame, (0, 0), mask=frame)
back_image.paste(band_icon, (0, 0), mask=band_icon)
back_image.paste(attribute_icon, (180 - 50, 0), mask=attribute_icon)
for i in range(rarity):
back_image.paste(star, (2, 170 - 27 * (i + 1)), mask=star)
if return_fn:
fn = os.path.join(globals.datapath, 'image', f'auto_reply/cards/thumb/m_{rsn}_{"normal" if not trained else "after_training"}.png')
back_image.save(fn)
return fn
return back_image
except:
import sys
sys.excepthook(*sys.exc_info())
return None
else:
fn = os.path.join(globals.datapath, 'image', f'auto_reply/cards/m_{rsn}_{"normal" if not trained else "after_training"}.png')
if os.access(fn, os.R_OK):
return fn
try:
OUT_WIDTH, OUT_HEIGHT = 1364, 1020
INNER_WIDTH, INNER_HEIGHT = 1334, 1002
STAR_SIZE, ICON_SIZE = 100, 150
TOP_OFFSET, RIGHT_OFFSET, BOTTOM_OFFSET, LEFT_OFFSET = 22, 165, 20, 10
STAT_STEP = 95
back_image = Image.new('RGB', (OUT_WIDTH, OUT_HEIGHT))
attribute_icon = Image.open(os.path.join(globals.asset_resource_path, f'{attribute}.png')).resize((ICON_SIZE, ICON_SIZE), Image.ANTIALIAS)
band_icon = Image.open(os.path.join(globals.asset_resource_path, f'band_{band_id}.png')).resize((ICON_SIZE, ICON_SIZE), Image.ANTIALIAS)
if not trained:
card = Image.open(f'{os.path.join(globals.asset_card_path, f"{rsn}_card_normal.png")}')
star = Image.open(os.path.join(globals.asset_resource_path, 'star.png')).resize((STAR_SIZE, STAR_SIZE), Image.ANTIALIAS)
else:
card = Image.open(f'{os.path.join(globals.asset_card_path, f"{rsn}_card_after_training.png")}')
star = Image.open(os.path.join(globals.asset_resource_path, 'star_trained.png')).resize((STAR_SIZE, STAR_SIZE), Image.ANTIALIAS)
if rarity == 1:
frame = Image.open(os.path.join(globals.asset_resource_path, f'frame-1-{attribute}.png')).resize((OUT_WIDTH, OUT_HEIGHT), Image.ANTIALIAS)
else:
frame = Image.open(os.path.join(globals.asset_resource_path, f'frame-{rarity}.png')).resize((OUT_WIDTH, OUT_HEIGHT), Image.ANTIALIAS)
back_image.paste(card, ((OUT_WIDTH - INNER_WIDTH) // 2, (OUT_HEIGHT - INNER_HEIGHT) // 2), mask=card)
back_image.paste(frame, (0, 0), mask=frame)
back_image.paste(band_icon, (LEFT_OFFSET, TOP_OFFSET), mask=band_icon)
back_image.paste(attribute_icon, (OUT_WIDTH - RIGHT_OFFSET, TOP_OFFSET), mask=attribute_icon)
for i in range(rarity):
back_image.paste(star, (LEFT_OFFSET, OUT_HEIGHT - BOTTOM_OFFSET - STAT_STEP * (i + 1)), mask=star)
back_image.save(fn)
return fn
except:
return ''
def white_padding(width, height):
return Image.new('RGB', (width, height), (255, 255, 255))
def thumbnail(**options):
# images: a list of Image objects, or a list of lists(tuples) of Image objects
# labels: a list of strings shown at the bottom
# image_style: if not assigned, take the params of the first image; if both assigned, will be forced to resize
# width: width of each image, if not assigned, will be min(scaled value by height, 180)
# height: height of each image, if not assigned, will be min(scaled value by width, 180)
# label_style:
# font_size: font_size of each label
# col_num (images are arranged row by row)
# col_space: (space between two columns)
# row_space (space between two rows, if labels exist, it means the space between the label of row1 and the image of row2)
images = options['images']
first_image = images[0]
if not isinstance(first_image, Image.Image):
if isinstance(first_image, (list, tuple)):
first_image = first_image[0]
if not isinstance(first_image, Image.Image):
raise Exception('images must be a list of Image objects, or a list of lists(tuples) of Image objects')
else:
raise Exception('images must be a list of Image objects, or a list of lists(tuples) of Image objects')
else:
images = [[im] for im in images]
if not options.get('image_style'):
box_width, box_height = first_image.size
else:
if options['image_style'].get('width') and options['image_style'].get('height'):
box_width, box_height = options['image_style']['width'], options['image_style']['height']
images = [[im.resize((box_width, box_height)) for im in im_list] for im_list in images]
elif options['image_style'].get('width') and not options['image_style'].get('height'):
images = [[im.resize((options['image_style']['width'], options['image_style']['width'] * im.size[1] // im.size[0])) for im in im_list] for im_list in images]
box_width, box_height = options['image_style']['width'], max([im.size[1] for im_list in images for im in im_list])
elif not options['image_style'].get('width') and options['image_style'].get('height'):
images = [[im.resize((options['image_style']['height'] * im.size[0] // im.size[1], options['image_style']['height'])) for im in im_list] for im_list in images]
box_width, box_height = max([im.size[0] for im_list in images for im in im_list]), options['image_style']['height']
col_num = options.get('col_num', 4)
row_num = (len(images) - 1) // col_num + 1
col_space = options.get('col_space', 0)
row_space = options.get('row_space', 0)
if options.get('labels'):
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), options.get('label_style', {}).get('font_size', 20))
all_chars = set()
max_label_width = 0
for label in options['labels']:
max_label_width = max(max_label_width, ImageDraw.Draw(Image.new('RGB', (0, 0))).textsize(label, font=font)[0])
all_chars |= set(label)
label_height = ImageDraw.Draw(Image.new('RGB', (0, 0))).textsize(''.join(all_chars), font=font)[1]
box_width = max(box_width * len(images[0]), max_label_width) // len(images[0])
back_image = Image.new('RGB', (
col_num * len(images[0]) * box_width + (col_num - 1) * col_space,
(box_height + label_height) * row_num + row_num * row_space,
), (255, 255, 255))
draw = ImageDraw.Draw(back_image)
labels = options['labels']
for r in range(row_num):
for c in range(col_num):
if r * col_num + c >= len(images):
break
image_group = images[r * col_num + c]
for i, im in enumerate(image_group):
back_image.paste(im, (
(len(image_group) * c + i) * box_width + (box_width - im.size[0]) // 2 + col_space * c,
r * (box_height + label_height + row_space)
))
sz = draw.textsize(labels[r * col_num + c], font=font)
draw.text((
len(image_group) * c * box_width + (len(image_group) * box_width - sz[0]) // 2 + c * col_space, r * (box_height + label_height + row_space) + box_height
), labels[r * col_num + c], fill=(0, 0, 0), font=font)
else:
back_image = Image.new('RGB', (
col_num * len(images[0]) * box_width + (col_num - 1) * col_space,
box_height * row_num + (row_num - 1) * row_space
), (255, 255, 255))
draw = ImageDraw.Draw(back_image)
for r in range(row_num):
for c in range(col_num):
if r * col_num + c >= len(images):
break
image_group = images[r * col_num + c]
for i, im in enumerate(image_group):
back_image.paste(im, (
(len(image_group) * c + i) * box_width + (box_width - im.size[0]) // 2 + c * col_space * int(i == len(image_group) - 1),
r * (box_height + row_space)
))
return ImageAsset.image_raw(back_image)
def open_nontransparent(filename):
try:
image = Image.open(filename).convert('RGBA')
new_image = Image.new('RGBA', image.size, (255, 255, 255, 255))
new_image.paste(image, (0, 0), image)
return new_image
except:
pass
def manual():
raw = ImageAsset.get('manual')
if raw:
return raw
row_space = 20
col_space = 50
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), 20)
lines = [
'ycm/有车吗: 查询车牌(来源: https://bandoristation.com/)',
'底图目录: 查询底图目录(是的,不仅功能一样,连图都盗过来了,虽然还没更新。底图31,Tsugu!.jpg)',
'底图+数字: 切换底图',
'xx.jpg: 图片合成',
'',
'以下查询功能数据来源Bestdori',
'查卡 [稀有度] [颜色] [人物] [乐团] [技能类型]: 按条件筛选符合要求的卡片,同类条件取并集,不同类条件取交集。例如: 查卡 4x pure ksm 分',
'查卡+数字: 按id查询单卡信息',
'无框+数字: 按id查询单卡无框卡面',
'活动列表 [活动类型]: 按条件筛选符合要求的活动,活动类型包括“一般活动”,“竞演LIVE”或“对邦”,“挑战LIVE”或“CP”,“LIVE试炼”,“任务LIVE”',
'活动+数字 [服务器]: 按id查询单活动信息,默认国服,可选“日服”,“国际服”,“台服”,“国服”,“韩服”',
'卡池列表 [卡池类型]: 按条件筛选符合要求的卡池,卡池类型包括“常驻”或“无期限”,“限时”或“限定”或“期间限定”,“特殊”(该条件慎加,因为没啥特别的卡池),“必4”',
'卡池+数字 [服务器]: 按id查询单卡池信息,默认国服,可选“日服”,“国际服”,“台服”,“国服”,“韩服”',
'',
'以下查询功能数据来源bilibili开放的豹跳接口,慎用',
'查抽卡名字 名字: 查用户名称包含该名字的玩家出的4星',
]
line_height = ImageDraw.Draw(Image.new('RGB', (0, 0))).textsize('底图目录', font=font)[1]
image = Image.new('RGB', (ImageDraw.Draw(Image.new('RGB', (0, 0))).textsize(max(lines, key=lambda line: len(line)),
font=font)[0] + 2 * col_space, (line_height + row_space) * len(lines)), (255, 255, 255))
draw = ImageDraw.Draw(image)
line_pos = row_space
for i, line in enumerate(lines):
sz = draw.textsize(line, font=font)
draw.text((col_space, line_pos), line, fill=(0, 0, 0), font=font)
line_pos += sz[1] + row_space
return ImageAsset.image_raw(image, 'manual')
def compress(infile, mb=None, step=10, quality=80, isabs=False):
if not isabs:
absinfile = os.path.join(globals.datapath, 'image', infile)
else:
absinfile = infile
outfile = infile[infile.rfind('/') + 1:infile.rfind('.')] + '-c.jpg'
absoutfile = os.path.join(globals.datapath, 'image', outfile)
if os.path.exists(absoutfile):
return outfile
if mb is None:
im = Image.open(absinfile)
im = im.convert('RGB')
im.save(absoutfile, quality=quality)
return absoutfile
o_size = os.path.getsize(absinfile) / 1024
if o_size <= mb:
return infile
while o_size > mb:
im = Image.open(absinfile)
im = im.convert('RGB')
im.save(absoutfile, quality=quality)
if quality - step < 0:
break
quality -= step
o_size = os.path.getsize(absoutfile) / 1024
return absoutfile
| 2.40625 | 2 |
Maths/7. Reverse Integer.py | thewires2/Leetcode | 1 | 12799977 | class Solution:
def reverse(self, x: int) -> int:
f=False
if x<0:
f=True
y=str(abs(x))
y=y[::-1]
x=int(y)
if -2147483648<=x and x<=2147483647:
if f==False:
return x
return -x
return 0
| 3.25 | 3 |
rockpaperscissors.py | tarellmorris/RockPaperScissors | 0 | 12799978 | <filename>rockpaperscissors.py
from random import randint
options = {1: 'ROCK', 2: 'PAPER', 3: 'SCISSORS'}
def cpu_move():
cpu_rand = randint(1, 3)
return cpu_rand
def player_move():
player = input("Choose a move: ('ROCK', 'PAPER', or 'SCISSORS'): ")
# if player.upper() != 'ROCK' or player.upper() != 'PAPER' or player.upper() != 'SCISSORS':
# print("Syntax error. Please re-enter your move; either 'ROCK', 'PAPER', or 'SCISSORS'...")
# player_move()
# else:
return player.upper()
def results(cpu, player):
while player == 'ROCK':
if cpu == 2:
return 'You lose!'
elif cpu == 3:
return 'You win!'
else:
return 'Draw!'
while player == 'PAPER':
if cpu == 3:
return 'You lose!'
elif cpu == 1:
return 'You win!'
else:
return 'Draw!'
while player == 'SCISSORS':
if cpu == 1:
return 'You lose!'
elif cpu == 2:
return 'You win!'
else:
return 'Draw!'
def main():
print("This game will be played best two out of three.")
match = 1
cpu_tally = 0
player_tally = 0
while cpu_tally != 2 and player_tally != 2:
print("Round {}...".format(match))
player = player_move()
cpu = cpu_move()
print("CPU chose {}!".format(options[cpu]))
score = results(cpu, player)
if score == 'You win!':
print('You win!')
match += 1
player_tally += 1
elif score == 'You lose!':
print('You lose!')
match += 1
cpu_tally += 1
else:
print('Draw!')
match += 1
if cpu_tally == 2:
print("Oh no! You lose!")
if player_tally == 2:
print("Congratulations! You win!")
def play_again():
confirm = input("Would you like to play again?: ('YES' or 'NO') ")
if confirm.upper() == 'YES':
main()
else:
SystemExit()
print("Are you ready to play Rock Paper Scissors?")
confirm = input("Enter 'YES' to continue or 'NO' to exit program: ")
if confirm.upper() == 'YES':
main()
else:
SystemExit()
play_again()
| 4.1875 | 4 |
src/control_node/control_node/errors.py | tessia-project/tessia-mesh | 5 | 12799979 | # Copyright 2021 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Custom service layer errors that should be differentiated
"""
#
# IMPORTS
#
#
# CONSTANTS AND DEFINITIONS
#
#
# CODE
#
class StartInstanceError(RuntimeError):
"""Instance could not be started"""
# StartInstanceError
class ComponentProbeError(RuntimeError):
"""Component returned an erroneous response"""
# ComponentProbeError
class ConfigurationError(ValueError):
"""Invalid configuration values"""
# ConfigurationError
class ValidationError(ValueError):
"""JSON Schema validation error"""
def __init__(self, error_iterator) -> None:
errors = [f'{"/".join(map(str, item.path))}: {item.message}'
for item in error_iterator]
super().__init__(f'Task validation failed: {", ".join(errors)}')
# __init__()
# ValidationError
| 1.945313 | 2 |
code/route_betweenness.py | tlarock/shipping | 0 | 12799980 | <gh_stars>0
import numpy as np
import networkx as nx
def route_node_betweenness_from_paths(G, filtered_paths):
'''
Computes route betweenness for nodes starting from set of paths filtered_paths.
Uses G only to get the number of nodes; could be done by iterating
over pairs of nodes in filtered_paths or given as input parameter.
Uses dense numpy arrays for computations.
'''
## Zero array of dimensions len(G.nodes()) by len(filtered_paths)
node_to_idx = {node:idx for idx, node in enumerate(G.nodes())}
pair_to_idx = {pair:idx for idx, pair in enumerate(filtered_paths.keys())}
numerator = np.zeros((len(G.nodes()), len(filtered_paths)))
denominator = []
for pair in filtered_paths:
denominator.append(len(filtered_paths[pair]))
for path in filtered_paths[pair]:
for node in path:
numerator[node_to_idx[node], pair_to_idx[pair]] += 1
denominator = np.array(denominator)
normalized_counts = numerator / denominator
total_betweenness = normalized_counts.sum(axis=1)
route_betweenness = {node:total_betweenness[idx] for node, idx in node_to_idx.items()}
return route_betweenness
def route_node_betweenness_from_file(filename):
'''
Computes route betweenness for nodes by reading file filename.
Uses dictionaries for computations.
'''
pair_counter = 0
total_pairs = 0
first = True
node_to_pair_dict = dict()
prev_pair = (-1, -1)
filtered_paths = dict()
with open(filename, 'r') as fin:
for line in fin:
path, *_ = line.strip().split('|')
path = path.strip().split(',')
pair = (path[0], path[-1])
filtered_paths.setdefault(pair, list())
filtered_paths[pair].append(path)
if pair != prev_pair and not first:
nodes_to_norm = set()
for path in filtered_paths[prev_pair]:
for node in path:
node_to_pair_dict.setdefault(node, dict())
node_to_pair_dict[node].setdefault(prev_pair, 0)
node_to_pair_dict[node][prev_pair] += 1
nodes_to_norm.add(node)
## Normalize
for node in nodes_to_norm:
node_to_pair_dict[node][prev_pair] /= len(filtered_paths[prev_pair])
pair_counter += 1
total_pairs += 1
if pair_counter == 150_000:
print(f"{total_pairs} processed.", flush=True)
pair_counter = 0
prev_pair = pair
if first: first = False
## Handle the last pair
for path in filtered_paths[prev_pair]:
nodes_to_norm = set()
for node in path:
node_to_pair_dict.setdefault(node, dict())
node_to_pair_dict[node].setdefault(prev_pair, 0)
node_to_pair_dict[node][prev_pair] += 1
nodes_to_norm.add(node)
## Normalize
for node in nodes_to_norm:
node_to_pair_dict[node][prev_pair] /= len(filtered_paths[prev_pair])
## Compute betweenness by summing over all pairs for each node
route_betweenness = {node:sum(node_to_pair_dict[node].values()) for node in node_to_pair_dict}
return route_betweenness
def route_edge_betweenness_from_paths(G, filtered_paths):
'''
Computes route betweenness for edges starting from set of paths filtered_paths.
Uses G only to get the number of nodes; could be done by iterating
over pairs of nodes in filtered_paths or given as input parameter.
Uses dense numpy arrays for computations.
'''
## Zero array of dimensions len(G.edges()) by len(filtered_paths)
edge_to_idx = {edge:idx for idx, edge in enumerate(G.edges())}
pair_to_idx = {pair:idx for idx, pair in enumerate(filtered_paths.keys())}
numerator = np.zeros((len(G.edges()), len(filtered_paths)))
denominator = []
for pair in filtered_paths:
denominator.append(len(filtered_paths[pair]))
for path in filtered_paths[pair]:
for i in range(1, len(path)):
numerator[edge_to_idx[(path[i-1], path[i])], pair_to_idx[pair]] += 1
denominator = np.array(denominator)
normalized_counts = numerator / denominator
total_betweenness = normalized_counts.sum(axis=1)
route_betweenness = {edge:total_betweenness[idx] for edge, idx in edge_to_idx.items()}
return route_betweenness
def route_edge_betweenness_from_file(filename):
'''
Computes route betweenness for edges by reading file filename.
Uses dictionaries for computations.
'''
pair_counter = 0
total_pairs = 0
first = True
edge_to_pair_dict = dict()
prev_pair = (-1, -1)
filtered_paths = dict()
with open(filename, 'r') as fin:
for line in fin:
path, *_ = line.strip().split('|')
path = path.strip().split(',')
pair = (path[0], path[-1])
filtered_paths.setdefault(pair, list())
filtered_paths[pair].append(path)
if pair != prev_pair and not first:
edges_to_norm = set()
for path in filtered_paths[prev_pair]:
for i in range(1, len(path)):
edge = path[i-1], path[i]
edge_to_pair_dict.setdefault(edge, dict())
edge_to_pair_dict[edge].setdefault(prev_pair, 0)
edge_to_pair_dict[edge][prev_pair] += 1
edges_to_norm.add(edge)
## Normalize
for edge in edges_to_norm:
edge_to_pair_dict[edge][prev_pair] /= len(filtered_paths[prev_pair])
pair_counter += 1
total_pairs += 1
if pair_counter == 150_000:
print(f"{total_pairs} processed.", flush=True)
pair_counter = 0
prev_pair = pair
if first: first = False
## Handle the last pair
for path in filtered_paths[prev_pair]:
edges_to_norm = set()
for i in range(1, len(path)):
edge = path[i-1], path[i]
edge_to_pair_dict.setdefault(edge, dict())
edge_to_pair_dict[edge].setdefault(prev_pair, 0)
edge_to_pair_dict[edge][prev_pair] += 1
edges_to_norm.add(edge)
## Normalize
for edge in edges_to_norm:
edge_to_pair_dict[edge][prev_pair] /= len(filtered_paths[prev_pair])
## Compute betweenness by summing over all pairs for each edge
route_betweenness = {edge:sum(edge_to_pair_dict[edge].values()) for edge in edge_to_pair_dict}
return route_betweenness
def route_path_betweenness_from_file(filename, k):
'''
Computes route betweenness for paths by reading file filename.
Uses dictionaries for computations.
'''
pair_counter = 0
total_pairs = 0
first = True
path_to_pair_dict = dict()
prev_pair = (-1, -1)
filtered_paths = dict()
with open(filename, 'r') as fin:
for line in fin:
path, *_ = line.strip().split('|')
path = path.strip().split(',')
pair = (path[0], path[-1])
filtered_paths.setdefault(pair, list())
filtered_paths[pair].append(path)
if pair != prev_pair and not first:
paths_to_norm = set()
for path in filtered_paths[prev_pair]:
for i in range(0, len(path)-k):
kpath = tuple(path[i:i+k+1])
path_to_pair_dict.setdefault(kpath, dict())
path_to_pair_dict[kpath].setdefault(prev_pair, 0)
path_to_pair_dict[kpath][prev_pair] += 1
paths_to_norm.add(kpath)
## Normalize
for path in paths_to_norm:
path_to_pair_dict[path][prev_pair] /= len(filtered_paths[prev_pair])
pair_counter += 1
total_pairs += 1
if pair_counter == 150_000:
print(f"{total_pairs} processed.", flush=True)
pair_counter = 0
prev_pair = pair
if first: first = False
## Handle the last pair
for path in filtered_paths[prev_pair]:
paths_to_norm = set()
for i in range(0, len(path)-k):
kpath = tuple(path[i:i+k+1])
path_to_pair_dict.setdefault(kpath, dict())
path_to_pair_dict[kpath].setdefault(prev_pair, 0)
path_to_pair_dict[kpath][prev_pair] += 1
paths_to_norm.add(kpath)
## Normalize
for path in paths_to_norm:
path_to_pair_dict[path][prev_pair] /= len(filtered_paths[prev_pair])
## Compute betweenness by summing over all pairs for each path
route_betweenness = {path:sum(path_to_pair_dict[path].values()) for path in path_to_pair_dict}
return route_betweenness
| 3.1875 | 3 |
language/python/pocket-primer/CH5 - Files Input and Output/planets.py | pendraic/learn | 0 | 12799981 | <filename>language/python/pocket-primer/CH5 - Files Input and Output/planets.py
# This program prints the names of planets having fewer than ten (10) moons
# Open the file
try:
infile = open ("planets.txt", "r")
# Read (skip over) the header line
s = infile.readline()
# For each planet
for i in range (0, 8):
# Read a line as string s
s = infile.readline()
# Break s into components based on commas giving list P
p = s.split (",")
# If p[10] < 10 print the planet name, which is p[0]
if int(p[10])<10:
print (p[0], "has fewer than 10 moons.")
except FileNotFoundError:
print ("There is no file named 'planets.txt'. Please try again")
| 3.921875 | 4 |
runtests.py | tushargoel97/WebAdmin | 0 | 12799982 | <gh_stars>0
from fabric import Connection
class OperationsUtil():
def __init__(self):
self.rootUser="ubuntu20"
self.rootPass="<PASSWORD>!"
self.conn=Connection(host='[email protected]',connect_kwargs={"password": self.rootPass})
def createUser(self, username, password, dirname):
if not dirname:
dirname = username
command = "useradd -p "+password+" -m -d /home/"+dirname+"/ -g users -s /bin/bash "+username
try:
val = self.conn.sudo(command,password=self.rootPass,hide=True).stdout.strip()
return ('User Created: '+username,0)
except Exception as e:
return ('Cannot create user',1)
def viewUser(self):
command = "awk -F: '{ print $1}' /etc/passwd"
return self.conn.run(command,hide=True).stdout.strip()
def deluser(self,username):
command = "userdel -f "+username
try:
val = self.conn.sudo(command,password=self.<PASSWORD>,hide=True).stdout.strip()
return ('User Deleted: '+username,0)
except Exception as e:
return ('Cannot delete user',1)
def updatePriv(self,username):
command = "usermod -aG sudo "+username
try:
val = self.conn.sudo(command,password=self.rootPass).stdout.strip()
return ('User Privilege Granted',0)
except Exception as e:
return ('Cannot Grant user Privileges',1)
op = OperationsUtil()
# print(op.createUser("test", "test")[0])
# print(op.viewUser())
# print(op.updatePriv("test")[0])
print(op.deluser("test1")[0]) | 2.453125 | 2 |
src/tfchain/types/PrimitiveTypes.py | GlenDC/threefold-wallet-electron | 0 | 12799983 | <reponame>GlenDC/threefold-wallet-electron
import tfchain.errors as tferrors
import tfchain.polyfill.encoding.base64 as jsbase64
import tfchain.polyfill.encoding.hex as jshex
import tfchain.polyfill.encoding.str as jsstr
import tfchain.polyfill.encoding.decimal as jsdec
import tfchain.polyfill.array as jsarray
from tfchain.types.BaseDataType import BaseDataTypeClass
class BinaryData(BaseDataTypeClass):
"""
BinaryData is the data type used for any binary data used in tfchain.
"""
def __init__(self, value=None, fixed_size=None, strencoding=None):
# define string encoding
if strencoding != None and not isinstance(strencoding, str):
raise TypeError(
"strencoding should be None or a str, not be of type {}".format(strencoding))
if strencoding == None or jsstr.String(strencoding).lower().strip().__eq__('hex'):
self._from_str = lambda s: jshex.bytes_from_hex(s)
self._to_str = lambda value: jshex.bytes_to_hex(value)
elif jsstr.String(strencoding).lower().strip().__eq__('base64'):
self._from_str = lambda s: jsbase64.bytes_from_b64(s)
self._to_str = lambda value: jsbase64.bytes_to_b64(value)
elif jsstr.String(strencoding).lower().strip().__eq__('hexprefix'):
self._from_str = lambda s: jshex.bytes_from_hex(
s[2:] if (s.startswith("0x") or s.startswith("0X")) else s)
self._to_str = lambda value: '0x' + jshex.bytes_to_hex(value)
else:
raise TypeError(
"{} is not a valid string encoding".format(strencoding))
self._strencoding = strencoding
# define fixed size
if fixed_size != None:
if not isinstance(fixed_size, int):
raise TypeError(
"fixed size should be None or int, not be of type {}".format(type(fixed_size)))
if fixed_size < 0:
raise TypeError(
"fixed size should be at least 0, {} is not allowed".format(fixed_size))
if fixed_size != 0:
self._fixed_size = fixed_size
else:
self._fixed_size = None # for now use no fixed size
# define the value (finally)
self._value = None
self.value = value
if fixed_size == 0:
# define the fixed size now, if the fixed_size was 0
# based on the binary length of the value
self._fixed_size = len(self.value)
@classmethod
def from_json(cls, obj, fixed_size=None, strencoding=None):
if obj != None and not isinstance(obj, str):
raise TypeError(
"binary data is expected to be an encoded string when part of a JSON object")
if obj == '':
obj = None
return cls(value=obj, fixed_size=fixed_size, strencoding=strencoding)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
# normalize the value
if isinstance(value, BinaryData):
value = value.value
elif value == None:
if self._fixed_size != None:
value = bytes(jsarray.new_array(self._fixed_size))
else:
value = bytes(jsarray.new_array(0))
elif isinstance(value, str):
value = self._from_str(value)
elif isinstance(value, bytearray):
value = bytes(value)
elif not isinstance(value, bytes) and not jsarray.is_uint8_array(value):
raise TypeError(
"binary data can only be set to a BinaryData, str, bytes or bytearray, not {}".format(type(value)))
# if fixed size, check this now
lvalue = len(value)
if self._fixed_size != None and lvalue != 0 and lvalue != self._fixed_size:
raise ValueError(
"binary data was expected to be of fixed size {}, length {} is not allowed".format(
self._fixed_size, len(value)))
# all good, assign the bytearray value
self._value = value
def __len__(self):
return len(self.value)
def __str__(self):
return self._to_str(self._value)
def str(self):
return self.__str__()
def __repr__(self):
return self.__str__()
def json(self):
return self.__str__()
def __eq__(self, other):
other = self._op_other_as_binary_data(other)
return self.value == other.value
def __ne__(self, other):
other = self._op_other_as_binary_data(other)
return self.value != other.value
def _op_other_as_binary_data(self, other):
if isinstance(other, (str, bytes, bytearray)):
other = BinaryData(
value=other, fixed_size=self._fixed_size, strencoding=self._strencoding)
elif not isinstance(other, BinaryData):
raise TypeError(
"Binary data of type {} is not supported".format(type(other)))
if self._fixed_size != other._fixed_size:
raise TypeError(
"Cannot compare binary data with different fixed size: self({}) != other({})".format(
self._fixed_size, other._fixed_size))
if self._strencoding != other._strencoding:
raise TypeError(
"Cannot compare binary data with different strencoding: self({}) != other({})".format(
self._strencoding, other._strencoding))
return other
def __hash__(self):
return hash(self.__str__())
def sia_binary_encode(self, encoder):
"""
Encode this binary data according to the Sia Binary Encoding format.
Either encoded as a slice or an array, depending on whether or not it is fixed sized.
"""
if self._fixed_size == None:
encoder.add_slice(self._value)
else:
encoder.add_array(self._value)
def rivine_binary_encode(self, encoder):
"""
Encode this binary data according to the Rivine Binary Encoding format.
Either encoded as a slice or an array, depending on whether or not it is fixed sized.
"""
if self._fixed_size == None:
encoder.add_slice(self._value)
else:
encoder.add_array(self._value)
class Hash(BinaryData):
SIZE = 32
"""
TFChain Hash Object, a special type of BinaryData
"""
def __init__(self, value=None):
super().__init__(value, fixed_size=Hash.SIZE, strencoding='hex')
@classmethod
def from_json(cls, obj):
if obj != None and not isinstance(obj, str):
raise TypeError(
"hash is expected to be an encoded string when part of a JSON object, not {}".format(type(obj)))
if obj == '':
obj = None
return cls(value=obj)
def __str__(self):
s = super().__str__()
if jsstr.isempty(s):
return jsstr.repeat('0', Hash.SIZE*2)
return s
class Currency(BaseDataTypeClass):
"""
TFChain Currency Object.
"""
def __init__(self, value=None):
self._value = None
self.value = value
@classmethod
def sum(cls, *values):
s = cls()
for value in values:
s.__iadd__(value)
return s
@classmethod
def from_str(cls, obj, lowest_unit=False):
if obj != None and not isinstance(obj, str):
raise TypeError(
"currency is expected to be a string , not type {}".format(type(obj)))
if obj == '':
obj = None
c = cls()
c.value = jsdec.Decimal(obj)
if lowest_unit:
c.value.__imul__(jsdec.Decimal('0.000000001'))
return c
@classmethod
def from_json(_, obj):
return Currency.from_str(obj, lowest_unit=True)
@property
def value(self):
return self._value
def plus(self, other):
return self.__add__(other)
def minus(self, other):
return self.__sub__(other)
def times(self, other):
return self.__mul__(other)
def divided_by(self, other):
return self.__truediv__(other)
def equal_to(self, other):
return self.__eq__(other)
def not_equal_to(self, other):
return self.__ne__(other)
def less_than(self, other):
return self.__lt__(other)
def greater_than(self, other):
return self.__gt__(other)
def less_than_or_equal_to(self, other):
return self.__le__(other)
def greater_than_or_equal_to(self, other):
return self.__ge__(other)
def negate(self):
return Currency(self.value.negate())
@value.setter
def value(self, value):
if value == None:
self._value = jsdec.Decimal()
return
if isinstance(value, Currency):
self._value = value.value
return
if isinstance(value, (int, str, jsdec.Decimal)):
inner_value = value
if isinstance(inner_value, str):
inner_value = jsstr.String(inner_value).upper().strip().value
if len(inner_value) >= 4 and inner_value[-3:] == 'TFT':
inner_value = jsstr.rstrip(inner_value[:-3])
d = jsdec.Decimal(inner_value)
_, _, exp = d.as_tuple() # sign is first return value
if exp < -9:
raise tferrors.CurrencyPrecisionOverflow(d.__str__())
# if sign != 0: # allow negative values for intermediate computations
# raise tferrors.CurrencyNegativeValue(d.__str__())
self._value = d
return
raise TypeError(
"cannot set value of type {} as Currency (invalid type): {}".format(type(value), value))
# operator overloading to allow currencies to be summed
def __add__(self, other):
if not isinstance(other, Currency):
return self.__add__(Currency(other))
return Currency(self.value.__add__(other.value))
def __radd__(self, other):
return self.__add__(other)
def __iadd__(self, other):
if not isinstance(other, Currency):
return self.__iadd__(Currency(other))
self._value.__iadd__(other.value)
return self
# operator overloading to allow currencies to be multiplied
def __mul__(self, other):
if not isinstance(other, Currency):
return self.__mul__(Currency(other))
return Currency(self.value.__mul__(other.value).to_nearest(9))
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
if not isinstance(other, Currency):
return self.__imul__(Currency(other))
self._value.__imul__(other.value)
return self
# operator overloading to allow currencies to be divided
def __truediv__(self, other):
if not isinstance(other, Currency):
return self.__truediv__(Currency(other))
return Currency(self.value.__truediv__(other.value).to_nearest(9))
# operator overloading to allow currencies to be subtracted
def __sub__(self, other):
if not isinstance(other, Currency):
return self.__sub__(Currency(other))
return Currency(self.value.__sub__(other.value))
def __rsub__(self, other):
return self.__sub__(other)
def __isub__(self, other):
if not isinstance(other, Currency):
return self.__isub__(Currency(other))
self._value.__isub__(other.value)
return self
# operator overloading to allow currencies to be compared
def __lt__(self, other):
if not isinstance(other, Currency):
return self.__lt__(Currency(other))
return self.value.__lt__(other.value)
def __le__(self, other):
if not isinstance(other, Currency):
return self.__le__(Currency(other))
return self.value.__le__(other.value)
def __eq__(self, other):
if not isinstance(other, Currency):
return self.__eq__(Currency(other))
return self.value.__eq__(other.value)
def __ne__(self, other):
if not isinstance(other, Currency):
return self.__ne__(Currency(other))
return self.value.__ne__(other.value)
def __gt__(self, other):
if not isinstance(other, Currency):
return self.__gt__(Currency(other))
return self.value.__gt__(other.value)
def __ge__(self, other):
if not isinstance(other, Currency):
return self.__ge__(Currency(other))
return self.value.__ge__(other.value)
@staticmethod
def _op_other_as_currency(other):
if isinstance(other, (int, str)):
other = Currency(value=other)
elif isinstance(other, float):
other = Currency(value=jsdec.Decimal(str(other)))
elif not isinstance(other, Currency):
raise TypeError(
"currency of type {} is not supported".format(type(other)))
return other
# allow our currency to be turned into an int
def __int__(self):
return jsstr.to_int(self.str(lowest_unit=True))
def bytes(self):
return self.value.bytes(prec=9)
def __str__(self):
return self.str()
def str(self, with_unit=False, lowest_unit=False, precision=9):
"""
Turn this Currency value into a str TFT unit-based value,
optionally with the currency notation.
@param with_unit: include the TFT currency suffix unit with the str
"""
s = self.value.str(precision)
if lowest_unit:
s = jsstr.lstrip(jsstr.replace(s, ".", ""), "0")
elif jsstr.contains(s, "."):
s = jsstr.rstrip(jsstr.rstrip(s, "0 "), '.')
if jsstr.isempty(s):
s = "0"
if with_unit:
s += " TFT"
return s
def __repr__(self):
return self.str(with_unit=True)
def json(self):
return self.str(lowest_unit=True)
def sia_binary_encode(self, encoder):
"""
Encode this currency according to the Sia Binary Encoding format.
"""
b = self.bytes()
encoder.add_int(len(b))
encoder.add_array(b)
def rivine_binary_encode(self, encoder):
"""
Encode this currency according to the Rivine Binary Encoding format.
"""
b = self.bytes()
encoder.add_slice(b)
class Blockstake(BaseDataTypeClass):
"""
TFChain Blockstake Object.
"""
def __init__(self, value=None):
self._value = Currency(value)
@classmethod
def from_json(cls, obj):
if obj != None and not isinstance(obj, str):
raise TypeError(
"block stake is expected to be a string when part of a JSON object, not type {}".format(type(obj)))
if obj == '':
obj = None
return cls(value=obj)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
value._value = Currency(value=value)
# allow our block stake to be turned into an int
def __int__(self):
return jsstr.to_int(self.value.str(lowest_unit=False))
def str(self):
return jsstr.from_int(self.__int__())
def __str__(self):
return self.str()
def __repr__(self):
return self.__str__()
def json(self):
return self.__str__()
def bytes(self):
return self.value.bytes()
def sia_binary_encode(self, encoder):
"""
Encode this block stake (==Currency) according to the Sia Binary Encoding format.
"""
b = self.bytes()
encoder.add_int(len(b))
encoder.add_array(b)
def rivine_binary_encode(self, encoder):
"""
Encode this block stake (==Currency) according to the Rivine Binary Encoding format.
"""
b = self.bytes()
encoder.add_slice(b)
| 2.015625 | 2 |
dimensionality_reduction/LDA.py | jonathangouvea/PatternRecognition | 0 | 12799984 | import numpy as np
from numpy import linalg as LA
class LDA():
def __init__(self, dim = 2):
self.dim = dim
self.matrixTransf = None
def fit_transform(self, X, labels):
positive = []
negative = []
for i in range(len(labels)):
if labels[i] == 1:
positive.append(X[i])
else:
negative.append(X[i])
positive = np.array(positive)
negative = np.array(negative)
media_pos = np.mean(positive, axis = 0)
media_neg = np.mean(negative, axis = 0)
cov_pos = np.cov(positive.T)
cov_neg = np.cov(negative.T)
SW = cov_pos + cov_neg
sub = (media_pos - media_neg)
print(SW.shape)
print(sub.shape)
wLDA = np.matmul(LA.pinv(SW), sub)
self.matrixTransf = np.array(wLDA)
print("Matriz de transformação")
print(self.matrixTransf)
res = np.matmul(X, self.matrixTransf.T)
return res
| 2.921875 | 3 |
wowgic/wowgic_flask/lib/tweepy/__init__.py | chelladurai89/wowgicbackend2.0 | 0 | 12799985 | # Tweepy
# Copyright 2009-2010 <NAME>
# See LICENSE for details.
"""
Tweepy Twitter API library
"""
__version__ = '3.5.0'
__author__ = '<NAME>'
__license__ = 'MIT'
from tweepy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResults, ModelFactory, Category
from tweepy.error import TweepError, RateLimitError
from tweepy.api import API
from tweepy.cache import Cache, MemoryCache, FileCache
from tweepy.auth import OAuthHandler, AppAuthHandler
from tweepy.limit import RateLimitHandler
from tweepy.streaming import Stream, StreamListener
from tweepy.cursor import Cursor
# Global, unauthenticated instance of API
api = API()
def debug(enable=True, level=1):
from six.moves.http_client import HTTPConnection
HTTPConnection.debuglevel = level
def chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
class Twitter(object):
"""
Twitter API wrapper based on Tweepy using the RateLimitHandler
with multiple access tokens (see https://github.com/svven/tweepy).
It also handles API method cursors and splits input param lists in
chunks if neccessary.
"""
def __init__(self,
consumer_key, consumer_secret, access_tokens=None):
"""
Initialize params for RateLimitHandler to pass to Tweepy API.
Param `access_tokens` must be a dictionary but it can be loaded
later just before the first API method call, and has to be like
{user_id: (access_token_key, access_token_secret)}.
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_tokens = access_tokens
_api = None
def _get_api(self):
"Initialize Tweepy API object with RateLimitHandler auth."
auth = RateLimitHandler(self.consumer_key, self.consumer_secret)
for key, secret in self.access_tokens.values():
auth.add_access_token(key, secret)
# print 'Token pool size: %d' % len(auth.tokens)
return API(auth)
# retry_count=2, retry_delay=3,
# wait_on_rate_limit=True, wait_on_rate_limit_notify=True
@property
def api(self):
"Lazy loaded Tweepy API object."
if not self._api:
self._api = self._get_api()
return self._api
| 2.515625 | 3 |
Section 2 - Data (variables, assignments and expressions)/Breakouts/Breakout 2.1 - Turtle Graphics/Q5 - random walk solution.py | gitjot/python-for-lccs | 10 | 12799986 | <gh_stars>1-10
# Event: LCCS Python Fundamental Skills Workshop
# Date: May 2018
# Author: <NAME>, PDST
# eMail: <EMAIL>
# Purpose: Solution to Q5 page 53
from turtle import *
from random import *
angle = randint(0, 360)
lineLen = randint(50, 100)
left(angle)
forward(lineLen)
angle = randint(0, 360)
lineLen = randint(50, 100)
left(angle)
forward(lineLen)
angle = randint(0, 360)
lineLen = randint(50, 100)
left(angle)
forward(lineLen)
angle = randint(0, 360)
lineLen = randint(50, 100)
left(angle)
forward(lineLen)
angle = randint(0, 360)
lineLen = randint(50, 100)
left(angle)
forward(lineLen)
| 3.34375 | 3 |
QianProj.py | Bachery/sqlova | 0 | 12799987 | from datetime import timedelta
import numpy as np
import pandas as pd
import argparse
import torch
import json
import os
from add_csv import csv_to_sqlite, csv_to_json
from sqlnet.dbengine import DBEngine
from sqlova.utils.utils_wikisql import *
from train import construct_hyper_param, get_models
#### prediction ####################
def get_args():
parser = argparse.ArgumentParser()
# parser.add_argument("--model_file", required=True, help='model file to use (e.g. model_best.pt)')
# parser.add_argument("--bert_model_file", required=True, help='bert model file to use (e.g. model_bert_best.pt)')
# parser.add_argument("--bert_path", required=True, help='path to bert files (bert_config*.json etc)')
# parser.add_argument("--data_path", required=True, help='path to *.jsonl and *.db files')
# parser.add_argument("--split", required=True, help='prefix of jsonl and db files (e.g. dev)')
# parser.add_argument("--result_path", required=True, help='directory in which to place results')
args = construct_hyper_param(parser)
return args
def load_models(args):
BERT_PT_PATH = './data_and_model'
path_model_bert = './model_bert_best.pt'
path_model = './model_best.pt'
model, model_bert, tokenizer, bert_config = get_models(args, BERT_PT_PATH, True, path_model_bert, path_model)
return model, model_bert, tokenizer, bert_config
def my_get_fields(t, data_tables):
### t: list of dict
### data_tables: dict
nlu, nlu_t, tb, hds = [], [], [], []
for t1 in t:
nlu.append( t1['question'])
nlu_t.append( t1['question_tok'])
tbid = t1['table_id']
tb.append(data_tables[tbid])
hds.append(data_tables[tbid]['header'])
return nlu, nlu_t, tb, hds
def my_predict( data_loader, data_table,
model, model_bert, bert_config, tokenizer,
max_seq_length,
num_target_layers, path_db, dset_name,
EG=False, beam_size=4):
model.eval()
model_bert.eval()
# engine = DBEngine(os.path.join(path_db, f"{dset_name}.db"))
engine = DBEngine(path_db)
results = []
for _, t in enumerate(data_loader):
nlu, nlu_t, tb, hds = my_get_fields(t, data_table)
wemb_n, wemb_h, l_n, l_hpu, l_hs, \
nlu_tt, t_to_tt_idx, tt_to_t_idx \
= get_wemb_bert(bert_config, model_bert, tokenizer, nlu_t, hds, max_seq_length,
num_out_layers_n=num_target_layers, num_out_layers_h=num_target_layers)
if not EG:
# No Execution guided decoding
s_sc, s_sa, s_wn, s_wc, s_wo, s_wv = model(wemb_n, l_n, wemb_h, l_hpu, l_hs)
pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi = pred_sw_se(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, )
pr_wv_str, pr_wv_str_wp = convert_pr_wvi_to_string(pr_wvi, nlu_t, nlu_tt, tt_to_t_idx, nlu)
pr_sql_i = generate_sql_i(pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wv_str, nlu)
else:
# Execution guided decoding
prob_sca, prob_w, prob_wn_w, \
pr_sc, pr_sa, pr_wn, pr_sql_i \
= model.beam_forward(wemb_n, l_n, wemb_h, l_hpu,
l_hs, engine, tb,
nlu_t, nlu_tt,
tt_to_t_idx, nlu,
beam_size=beam_size)
# sort and generate
pr_wc, pr_wo, pr_wv, pr_sql_i = sort_and_generate_pr_w(pr_sql_i)
# Following variables are just for consistency with no-EG case.
pr_wvi = None # not used
pr_wv_str=None
pr_wv_str_wp=None
pr_sql_q = generate_sql_q(pr_sql_i, tb)
for b, (pr_sql_i1, pr_sql_q1) in enumerate(zip(pr_sql_i, pr_sql_q)):
results1 = {}
results1["query"] = pr_sql_i1
results1["table_id"] = tb[b]["id"]
results1["nlu"] = nlu[b]
results1["sql"] = pr_sql_q1
results.append(results1)
return results
#### deal with data ###################
## 不需要了
def read_csv_to_table(csv_path):
# file_name as table_id
table_id = csv_path.split('/')[-1][:-4]
df = pd.read_csv(csv_path)
headers = df.columns.tolist()
rows = []
for _, row in df.iterrows():
rows.append(row.tolist())
print(rows)
## TODO: add_csv
def create_table_and_db():
pass
def read_scripts(txt_path):
nlu = []
with open(txt_path, 'r') as f:
line = f.readline()
while line:
if line.endswith('\n'):
nlu.append(line[:-1])
else:
nlu.append(line)
line = f.readline()
return nlu
## TODO: with tools in annotate_ws.py
def split_scripts(nlu):
nlu_t = []
for nlu1 in nlu:
nlu_t.append(nlu1.split(' '))
return nlu_t
def get_tables(tb_path):
table = {}
with open(tb_path) as f:
for _, line in enumerate(f):
t1 = json.loads(line.strip())
table[t1['id']] = t1
return table
def prepare_data():
sc_paths = [ './Qian_data/company_script.txt',
'./Qian_data/product_script.txt',]
sc_tableids = [ 'company_table',
'product_table',]
nlu = []
nlu_t = []
tbid = []
for i in range(len(sc_paths)):
nlu_i = read_scripts(sc_paths[i])
nlu_t_i = split_scripts(nlu_i)
nlu.extend(nlu_i)
nlu_t.extend(nlu_t_i)
tbid.extend([sc_tableids[i]] * len(nlu_i))
data = []
for i in range(len(nlu)):
data.append({
'question': nlu[i],
'question_tok': nlu_t[i],
'table_id': tbid[i],
})
return data
if __name__ == '__main__':
dset_name = 'qian'
save_path = './Qian_data/'
### model
args = get_args()
model, model_bert, tokenizer, bert_config = load_models(args)
### data
db_path = './Qian_data/qian.db'
tb_path = './Qian_data/qian.tables.jsonl'
data_table = get_tables(tb_path)
data = prepare_data()
data_loader = torch.utils.data.DataLoader(
batch_size=args.bS,
dataset=data,
shuffle=False,
num_workers=1,
collate_fn=lambda x: x # now dictionary values are not merged!
)
### predict
with torch.no_grad():
results = my_predict(data_loader,
data_table,
model,
model_bert,
bert_config,
tokenizer,
max_seq_length=args.max_seq_length,
num_target_layers=args.num_target_layers,
path_db=db_path,
dset_name=dset_name,
EG=False, #args.EG,
)
# save results
save_for_evaluation(save_path, results, dset_name)
| 2.34375 | 2 |
scalyr_agent/third_party_tls/tlslite/defragmenter.py | zak905/scalyr-agent-2 | 0 | 12799988 | <gh_stars>0
# Copyright (c) 2015, <NAME>
#
# See the LICENSE file for legal information regarding use of this file.
""" Helper package for handling fragmentation of messages """
from __future__ import generators
from .utils.codec import Parser
class Defragmenter(object):
"""
Class for demultiplexing TLS messages.
Since the messages can be interleaved and fragmented between each other
we need to cache not complete ones and return in order of urgency.
Supports messages with given size (like Alerts) or with a length header
in specific place (like Handshake messages).
:ivar priorities: order in which messages from given types should be
returned.
:ivar buffers: data buffers for message types
:ivar decoders: functions which check buffers if a message of given type
is complete
"""
def __init__(self):
"""Set up empty defregmenter"""
self.priorities = []
self.buffers = {}
self.decoders = {}
def addStaticSize(self, msgType, size):
"""Add a message type which all messages are of same length"""
if msgType in self.priorities:
raise ValueError("Message type already defined")
if size < 1:
raise ValueError("Message size must be positive integer")
self.priorities += [msgType]
self.buffers[msgType] = bytearray(0)
def sizeHandler(data):
"""
Size of message in parameter
If complete message is present in parameter returns its size,
None otherwise.
"""
if len(data) < size:
return None
else:
return size
self.decoders[msgType] = sizeHandler
def addDynamicSize(self, msgType, sizeOffset, sizeOfSize):
"""Add a message type which has a dynamic size set in a header"""
if msgType in self.priorities:
raise ValueError("Message type already defined")
if sizeOfSize < 1:
raise ValueError("Size of size must be positive integer")
if sizeOffset < 0:
raise ValueError("Offset can't be negative")
self.priorities += [msgType]
self.buffers[msgType] = bytearray(0)
def sizeHandler(data):
"""
Size of message in parameter
If complete message is present in parameter returns its size,
None otherwise.
"""
if len(data) < sizeOffset+sizeOfSize:
return None
else:
parser = Parser(data)
# skip the header
parser.getFixBytes(sizeOffset)
payloadLength = parser.get(sizeOfSize)
if parser.getRemainingLength() < payloadLength:
# not enough bytes in buffer
return None
return sizeOffset + sizeOfSize + payloadLength
self.decoders[msgType] = sizeHandler
def addData(self, msgType, data):
"""Adds data to buffers"""
if msgType not in self.priorities:
raise ValueError("Message type not defined")
self.buffers[msgType] += data
def getMessage(self):
"""Extract the highest priority complete message from buffer"""
for msgType in self.priorities:
length = self.decoders[msgType](self.buffers[msgType])
if length is None:
continue
# extract message
data = self.buffers[msgType][:length]
# remove it from buffer
self.buffers[msgType] = self.buffers[msgType][length:]
return (msgType, data)
return None
def clearBuffers(self):
"""Remove all data from buffers"""
for key in self.buffers.keys():
self.buffers[key] = bytearray(0)
| 2.609375 | 3 |
tfx/orchestration/portable/execution_watcher.py | avelez93/tfx | 1,813 | 12799989 | <filename>tfx/orchestration/portable/execution_watcher.py
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides a gRPC service for updating remote job info to MLMD."""
from concurrent import futures
from typing import Optional
from absl import logging
import grpc
from tfx.orchestration import metadata
from tfx.proto.orchestration import execution_watcher_pb2
from tfx.proto.orchestration import execution_watcher_pb2_grpc
from ml_metadata.proto import metadata_store_pb2
def generate_service_stub(
address: str,
creds: Optional[grpc.ChannelCredentials] = None,
) -> execution_watcher_pb2_grpc.ExecutionWatcherServiceStub:
"""Generates a gRPC service stub for a given server address."""
channel = grpc.secure_channel(
address, creds) if creds else grpc.insecure_channel(address)
return execution_watcher_pb2_grpc.ExecutionWatcherServiceStub(channel)
class ExecutionWatcher(
execution_watcher_pb2_grpc.ExecutionWatcherServiceServicer):
"""A gRPC service server for updating remote job info to MLMD.
Attributes:
local_address: Local network address to the server.
address: Remote network address to the server, same as local_address if not
configured.
"""
def __init__(self,
port: int,
mlmd_connection: metadata.Metadata,
execution: metadata_store_pb2.Execution,
address: Optional[str] = None,
creds: Optional[grpc.ServerCredentials] = None):
"""Initializes the gRPC server.
Args:
port: Which port the service will be using.
mlmd_connection: ML metadata connection.
execution: The MLMD Execution to keep track of.
address: Remote address used to contact the server. Should be formatted as
an ipv4 or ipv6 address in the format `address:port`. If left as
None, server will use local address.
creds: gRPC server credentials. If left as None, server will use an
insecure port.
"""
super().__init__()
self._port = port
self._address = address
self._creds = creds
self._mlmd_connection = mlmd_connection
self._server = self._create_server()
if not execution.HasField('id'):
raise ValueError(
'execution id must be set to be tracked by ExecutionWatcher.')
self._execution = execution
def UpdateExecutionInfo(
self, request: execution_watcher_pb2.UpdateExecutionInfoRequest,
context: grpc.ServicerContext
) -> execution_watcher_pb2.UpdateExecutionInfoResponse:
"""Updates the `custom_properties` field of Execution object in MLMD."""
logging.info('Received request to update execution info: updates %s, '
'execution_id %s', request.updates, request.execution_id)
if request.execution_id != self._execution.id:
context.set_code(grpc.StatusCode.NOT_FOUND)
context.set_details(
'Execution with given execution_id not tracked by server: '
f'{request.execution_id}')
return execution_watcher_pb2.UpdateExecutionInfoResponse()
for key, value in request.updates.items():
self._execution.custom_properties[key].CopyFrom(
value)
# Only the execution is needed
with self._mlmd_connection as m:
m.store.put_executions((self._execution,))
return execution_watcher_pb2.UpdateExecutionInfoResponse()
def _create_server(self):
"""Creates a gRPC server and add `self` on to it."""
result = grpc.server(futures.ThreadPoolExecutor())
execution_watcher_pb2_grpc.add_ExecutionWatcherServiceServicer_to_server(
self, result)
if self._creds is None:
result.add_insecure_port(self.local_address)
else:
result.add_secure_port(self.local_address, self._creds)
return result
@property
def local_address(self) -> str:
# Local network address to the server.
return f'localhost:{self._port}'
@property
def address(self) -> str:
return self._address or self.local_address
def start(self):
"""Starts the server."""
self._server.start()
def stop(self):
"""Stops the server."""
self._server.stop(grace=None)
| 2.03125 | 2 |
main.py | xiangsheng1325/CPGAN | 0 | 12799990 | <filename>main.py
import warnings
import pprint, os, random, torch
from CPGAN.data_utils import *
import numpy as np
from CPGAN.options import *
from CPGAN.train import *
warnings.filterwarnings("ignore")
def get_options():
opt = Options()
opt = opt.initialize()
return opt
def seed_torch(seed=1029):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
# torch.backends.cudnn.benchmark = False
# torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
opt = get_options()
##{ 临时改超参数
opt.gpu = '1'
opt.batch_size = 1
opt.max_epochs = 160
opt.method = 'cpgan'
opt.graph_type = "yeast"
opt.data_dir = "./data"
## 正式训练时收起 }
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu
seed = int(time.time()*opt.random_seed) % (2**32)
seed_torch(seed=seed)
print('=========== OPTIONS ===========')
pprint.pprint(vars(opt))
print(' ======== END OPTIONS ========\n\n')
graphs = train_adj_mats, test_adj_mats, train_attr_vecs, test_attr_vecs = load_data(
data_filepath=opt.data_dir,
)
modeldict, logdict = train_cpgan(graphs, train_adj_mats, train_attr_vecs, opt) | 2.3125 | 2 |
src/drone_control/mavros/sender.py | Adrien4193/drone_control | 0 | 12799991 | <reponame>Adrien4193/drone_control<gh_stars>0
import rospy
from tf.transformations import quaternion_from_euler
from geometry_msgs.msg import Point, PoseStamped, Quaternion
from mavros_msgs.msg import Thrust
class Sender(object):
def __init__(self):
self._pubs = {}
self._pubs['position'] = rospy.Publisher(
'/mavros/setpoint_position/local',
PoseStamped, queue_size=10
)
self._pubs['attitude'] = rospy.Publisher(
'/mavros/setpoint_attitude/attitude',
PoseStamped, queue_size=10
)
self._pubs['velocity'] = rospy.Publisher(
'/mavros/setpoint_attitude/velocity',
PoseStamped, queue_size=10
)
self._pubs['thrust'] = rospy.Publisher(
'/mavros/setpoint_attitude/thrust',
Thrust, queue_size=10
)
self._pubs['mocap'] = rospy.Publisher(
'/mavros/mocap/pose',
PoseStamped, queue_size=10
)
def __del__(self):
for pub in self._pubs.values():
pass#pub.unregister()
def send_attitude(self, attitude):
self._pubs['attitude'].publish(attitude.get_message())
self._pubs['thrust'].publish(Thrust(thrust=attitude.thrust))
def send_velocity(self, attitude):
self._pubs['velocity'].publish(attitude.get_message())
self._pubs['thrust'].publish(Thrust(thrust=attitude.thrust))
def send_position(self, pose):
self._pubs['position'].publish(pose.get_message())
def send_mocap(self, pose):
self._pubs['mocap'].publish(pose.get_message())
| 2.3125 | 2 |
introducing-python-answers/chapter10.py | DailyYu/python-study | 1 | 12799992 | <reponame>DailyYu/python-study
# Q1
from datetime import date
now = date.today()
now_string = now.isoformat()
with open('today.txt', 'w') as file:
print(now, file=file)
# Q2
today_string = None
with open('today.txt') as file:
today_string = file.read()
print(today_string)
# Q3
from datetime import datetime
format = '%Y-%m-%d\n'
print(datetime.strptime(today_string, format))
# Q4
import os
print(os.listdir('.'))
# Q5
print(os.listdir('..'))
# Q6
import multiprocessing
def print_current_time(seconds):
from time import sleep
sleep(seconds)
print(f'Wait for {seconds} seconds, Current time is {datetime.today().time()}')
import random
# 由于Windows下multiprocess会执行整个代码块,所以会引起循环创建进程的问题
# 这需要下面的代码来避免
if __name__ == '__main__':
for n in range(3):
seconds = random.random()
process = multiprocessing.Process(target=print_current_time, args=(seconds,))
process.start()
# Q7
my_birthday = date(1993, 8, 13)
print(my_birthday)
# Q8
# 星期从零开始计数
print(my_birthday.weekday())
# 星期从一开始计数
print(my_birthday.isoweekday())
# Q9
from datetime import timedelta
ten_thousand_day_after_my_birthday = my_birthday + timedelta(days=10000)
print(ten_thousand_day_after_my_birthday)
| 3.84375 | 4 |
things.py | racinmat/depth-voxelmap-estimation | 2 | 12799993 | import tensorflow as tf
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from PIL import Image
import Network
import dataset
from Network import BATCH_SIZE
from dataset import DataSet
def output_predict(depths, images, depths_discretized, depths_reconstructed, output_dir):
print("output predict into %s" % output_dir)
if not tf.gfile.Exists(output_dir):
tf.gfile.MakeDirs(output_dir)
for i, _ in enumerate(images):
image, depth, depth_discretized, depth_reconstructed = images[i], depths[i], depths_discretized[i], \
depths_reconstructed[i]
pilimg = Image.fromarray(np.uint8(image))
image_name = "%s/%03d_org.png" % (output_dir, i)
pilimg.save(image_name)
depth = depth.transpose(2, 0, 1)
if np.max(depth) != 0:
ra_depth = (depth / np.max(depth)) * 255.0
else:
ra_depth = depth * 255.0
depth_pil = Image.fromarray(np.uint8(ra_depth[0]), mode="L")
depth_name = "%s/%03d.png" % (output_dir, i)
depth_pil.save(depth_name)
for j in range(dataset.DEPTH_DIM):
ra_depth = depth_discretized[:, :, j] * 255.0
depth_discr_pil = Image.fromarray(np.uint8(ra_depth), mode="L")
depth_discr_name = "%s/%03d_%03d_discr.png" % (output_dir, i, j)
depth_discr_pil.save(depth_discr_name)
# for j in range(DEPTH_DIM):
# ra_depth = mask[:, :, j]
# depth_discr_pil = Image.fromarray(np.uint8(ra_depth), mode="L")
# depth_discr_name = "%s/%03d_%03d_discr_m.png" % (output_dir, i, j)
# depth_discr_pil.save(depth_discr_name)
#
# for j in range(DEPTH_DIM):
# ra_depth = mask_lower[:, :, j]
# depth_discr_pil = Image.fromarray(np.uint8(ra_depth), mode="L")
# depth_discr_name = "%s/%03d_%03d_discr_ml.png" % (output_dir, i, j)
# depth_discr_pil.save(depth_discr_name)
depth = depth_reconstructed[:, :, 0]
if np.max(depth) != 0:
ra_depth = (depth / np.max(depth)) * 255.0
else:
ra_depth = depth * 255.0
depth_pil = Image.fromarray(np.uint8(ra_depth), mode="L")
depth_name = "%s/%03d_reconstructed.png" % (output_dir, i)
depth_pil.save(depth_name)
def playground_loss_function(labels, logits):
# in rank 2, [elements, classes]
# tf.nn.weighted_cross_entropy_with_logits(labels, logits, weights)
losses = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
return losses
def prob_to_logit(probs):
return np.log(probs / (1 - probs))
def softmax(x):
"""Same behaviour as tf.nn.softmax in tensorflow"""
e_x = np.exp(x)
sum_per_row = np.tile(e_x.sum(axis=1), (x.shape[1], 1)).T
print('e_x', '\n', e_x)
print('sum_per_row', '\n', sum_per_row)
return e_x / sum_per_row
def softmax_cross_entropy_loss(labels, logits):
"""Same behaviour as tf.nn.softmax_cross_entropy_with_logits in tensorflow"""
loss_per_row = - np.sum(labels * np.log(softmax(logits)), axis=1)
return loss_per_row
def labels_to_info_gain(labels, logits, alpha=0.2):
last_axis = len(logits.shape) - 1
label_idx = np.tile(np.argmax(labels, axis=last_axis), (labels.shape[last_axis], 1)).T
prob_bin_idx = np.tile(range(logits.shape[last_axis]), (labels.shape[0], 1))
# print('label_idx', '\n', label_idx)
# print('probs_idx', '\n', prob_bin_idx)
info_gain = np.exp(-alpha * (label_idx - prob_bin_idx)**2)
print('info gain', '\n', info_gain)
return info_gain
def tf_labels_to_info_gain(labels, logits, alpha=0.2):
last_axis = len(logits.shape) - 1
label_idx = tf.expand_dims(tf.argmax(labels, axis=last_axis), 0)
label_idx = tf.cast(label_idx, dtype=tf.int32)
label_idx = tf.tile(label_idx, [labels.shape[last_axis], 1])
label_idx = tf.transpose(label_idx)
prob_bin_idx = tf.expand_dims(tf.range(logits.shape[last_axis], dtype=tf.int32), last_axis)
prob_bin_idx = tf.transpose(prob_bin_idx)
prob_bin_idx = tf.tile(prob_bin_idx, [labels.shape[0], 1])
difference = (label_idx - prob_bin_idx)**2
difference = tf.cast(difference, dtype=tf.float32)
info_gain = tf.exp(-alpha * difference)
return info_gain
def informed_cross_entropy_loss(labels, logits):
"""Same behaviour as tf.nn.weighted_cross_entropy_with_logits in tensorflow"""
probs = softmax(logits)
print('probs', '\n', probs)
logged_probs = np.log(probs)
print('logged probs', '\n', logged_probs)
loss_per_row = - np.sum(labels_to_info_gain(labels, logits) * logged_probs, axis=1)
return loss_per_row
def playing_with_losses():
labels = np.array([
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
# [0, 1, 0, 0, 0],
# [0, 0, 1, 0, 0],
# [0, 0, 0, 1, 0],
# [0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0],
])
logits = np.array([
[0, 20, 0, 0, 0],
[0, 10, 0, 0, 0],
[0, 2, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 0, 0, 1],
[0, 1, 0, 0, 0],
# [3, 1, 1, 1, 1],
# [0, 10, 0, 0, 0],
# [1, 5, 1, 1, 1],
# [0, 0, 1, 0, 0],
# [1, 1, 4, 1, 1],
# [1, 1, 1, 4, 1],
# [1, 1, 1, 1, 4],
# [4, 1, 1, 1, 1],
])
probs = softmax(logits)
loss = softmax_cross_entropy_loss(labels=labels, logits=logits)
new_loss = informed_cross_entropy_loss(labels=labels, logits=logits)
with tf.Graph().as_default():
with tf.Session() as sess:
logits_tf = tf.constant(logits, dtype=tf.float32)
labels_tf = tf.constant(labels, dtype=tf.float32)
probs_tf = sess.run(tf.nn.softmax(logits_tf))
loss_tf = sess.run(tf.nn.softmax_cross_entropy_with_logits(labels=labels_tf, logits=logits_tf))
new_loss_tf = sess.run(tf.nn.softmax_cross_entropy_with_logits(labels=tf_labels_to_info_gain(labels, logits_tf), logits=logits_tf))
# print('labels', '\n', labels)
# print('logits', '\n', logits)
# print('probs', '\n', probs)
# print('probs diff', '\n', probs - probs_tf)
print('loss', '\n', loss)
print('loss_tf', '\n', loss_tf)
print('loss diff', '\n', loss - loss_tf)
print('new_loss', '\n', new_loss)
print('new_loss_tf', '\n', new_loss_tf)
print('new loss diff', '\n', new_loss - new_loss_tf)
# f, axarr = plt.subplots(2, 3)
# axarr[0, 0].set_title('sample 1')
# axarr[0, 0].plot(probs[0, :])
# axarr[0, 1].set_title('sample 2')
# axarr[0, 1].plot(probs[1, :])
# axarr[1, 0].set_title('sample 3')
# axarr[1, 0].plot(probs[2, :])
# axarr[1, 1].set_title('sample 4')
# axarr[1, 1].plot(probs[3, :])
plt.plot(probs[0, :], color='r')
plt.plot(probs[1, :], color='g')
plt.plot(probs[2, :], color='b')
plt.plot(probs[3, :], color='y')
plt.show()
def input_parser(filename):
assert tf.get_default_session() is sess
tf.logging.warning(('filename', filename))
channel_data = tf.data.TextLineDataset(filename).map(lambda line: tf.decode_csv(line, [["path"], ["annotation"]]))
return channel_data
def filenames_to_data(rgb_filename, voxelmap_filename):
tf.logging.warning(('rgb_filename', rgb_filename))
rgb_image = dataset.DataSet.filename_to_input_image(rgb_filename)
voxelmap = tf.py_func(dataset.DataSet.filename_to_target_voxelmap, [voxelmap_filename], tf.int32)
voxelmap.set_shape([dataset.TARGET_WIDTH, dataset.TARGET_HEIGHT, dataset.DEPTH_DIM])
# voxelmap = dataset.DataSet.filename_to_target_voxelmap(voxelmap_filename)
depth_reconstructed = dataset.DataSet.tf_voxelmap_to_depth(voxelmap)
return rgb_image, voxelmap, depth_reconstructed
def tf_new_data_api_experiments():
# global sess
batch_size = 4
with sess.as_default():
tf.logging.set_verbosity(tf.logging.INFO)
# dataset = tf.data.TFRecordDataset(['train-voxel-gta.csv', 'test-voxel-gta.csv'])
train_imgs = tf.constant(['train-voxel-gta.csv'])
filename_list = tf.data.Dataset.from_tensor_slices(train_imgs)
filename_pairs = filename_list.flat_map(input_parser)
data_pairs = filename_pairs.map(filenames_to_data)
data_pairs = data_pairs.batch(batch_size)
#
# # input
# image = dataset.DataSet.filename_to_input_image(filename)
# # target
# voxelmap = dataset.DataSet.filename_to_target_voxelmap(voxelmap_filename)
# depth_reconstructed = dataset.DataSet.tf_voxelmap_to_depth(voxelmap)
iterator = data_pairs.make_one_shot_iterator()
batch_images, batch_voxels, batch_depths = iterator.get_next()
for i in range(1):
images_values, voxels_values, depths_values = sess.run([batch_images, batch_voxels, batch_depths])
for j in range(batch_size):
plt.figure(figsize=(10, 6))
plt.axis('off')
plt.imshow(images_values[j, :, :, :].astype(dtype=np.uint8))
plt.savefig('inspections/out-{}-rgb.png'.format(j), bbox_inches='tight')
plt.figure(figsize=(10, 6))
plt.axis('off')
plt.imshow(depths_values[j, :, :].T, cmap='gray')
plt.savefig('inspections/out-{}-depth.png'.format(j), bbox_inches='tight')
# pure numpy calculation of depth image from voxelmap
occupied_ndc_grid = voxels_values[j, :, :, :]
occupied_ndc_grid = np.flip(occupied_ndc_grid, axis=2)
depth_size = occupied_ndc_grid.shape[2]
new_depth = np.argmax(occupied_ndc_grid, axis=2)
new_depth = new_depth.T
new_depth *= int(255/depth_size)
plt.figure(figsize=(10, 7))
plt.axis('off')
plt.imshow(new_depth, cmap='gray')
plt.savefig('inspections/out-{}-depth-np.png'.format(j), bbox_inches='tight')
def load_numpy_bin():
# name = 'inspections/2018-03-07--17-57-32--527.bin'
name = 'inspections/2018-03-07--17-57-32--527.npy'
# numpy_voxelmap = np.fromfile(name, sep=';')
numpy_voxelmap = np.load(name)
print(numpy_voxelmap.shape)
# numpy_voxelmap = numpy_voxelmap.reshape([240, 160, 100])
numpy_voxelmap = np.flip(numpy_voxelmap, axis=2)
# now I have just boolean for each value
# so I create mask to assign higher value to booleans in higher index
depth_size = numpy_voxelmap.shape[2]
new_depth = np.argmax(numpy_voxelmap, axis=2)
new_depth = new_depth.T
new_depth *= int(255 / depth_size)
plt.figure(figsize=(10, 6))
plt.axis('off')
plt.imshow(new_depth, cmap='gray')
plt.savefig('inspections/2018-03-07--17-57-32--527.png', bbox_inches='tight')
sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))
if __name__ == '__main__':
# playing_with_losses()
# tf_dataset_experiments()
# load_numpy_bin()
tf_new_data_api_experiments()
# arr = np.array([
# [1, 1, 1, 2],
# [2, 2, 2, 4],
# [4, 4, 4, 8],
# ])
# with tf.Graph().as_default():
# with tf.Session() as sess:
# logits_tf = tf.constant(arr, dtype=tf.float32)
# tf_mean = sess.run(tf.reduce_mean(logits_tf))
# print('tf_mean\n', tf_mean)
#
# print('mean\n', np.mean(arr))
# print('sum_per_row\n', np.sum(arr, axis=1))
# print('mean_of_sum\n', np.mean(np.sum(arr, axis=1), axis=0))
# ds = DataSet(8)
# ds.load_params('train.csv')
#
# d = list(range(1, 100))
# d_min = np.min(d)
# d_max = 20
# num_bins = 10
# q_calc = (np.log(np.max(d)) - np.log(d_min)) / (num_bins - 1)
# # q = 0.5 # width of quantization bin
# l = np.round((np.log(d) - np.log(d_min)) / q_calc)
#
# print(d)
# print(l)
#
# print('q_calc', q_calc)
#
# f, axarr = plt.subplots(2, 2)
# axarr[0, 0].plot(d)
# axarr[0, 1].plot(np.log(d))
# axarr[1, 0].plot(np.log(d) - np.log(d_min))
# axarr[1, 1].plot((np.log(d) - np.log(d_min)) / q_calc)
# plt.show()
# with tf.Graph().as_default():
# with tf.Session() as sess:
# x = tf.constant(d)
#
# # for i in range(500):
# # if i % 500 == 0:
# # print('hi', i)
#
# IMAGE_HEIGHT = 240
# IMAGE_WIDTH = 320
# TARGET_HEIGHT = 120
# TARGET_WIDTH = 160
# DEPTH_DIM = 10
#
# filename_queue = tf.train.string_input_producer(['train.csv'], shuffle=True)
# reader = tf.TextLineReader()
# _, serialized_example = reader.read(filename_queue)
# filename, depth_filename = tf.decode_csv(serialized_example, [["path"], ["annotation"]])
# # input
# jpg = tf.read_file(filename)
# image = tf.image.decode_jpeg(jpg, channels=3)
# image = tf.cast(image, tf.float32)
# # target
# depth_png = tf.read_file(depth_filename)
# depth = tf.image.decode_png(depth_png, channels=1)
# depth = tf.cast(depth, tf.float32)
# depth = depth / 255.0
# # depth = tf.cast(depth, tf.int64)
# # resize
# image = tf.image.resize_images(image, (IMAGE_HEIGHT, IMAGE_WIDTH))
# depth = tf.image.resize_images(depth, (TARGET_HEIGHT, TARGET_WIDTH))
#
# depth_discretized = dataset.DataSet.discretize_depth(depth)
#
# invalid_depth = tf.sign(depth)
#
# batch_size = 8
# # generate batch
# images, depths, depths_discretized, invalid_depths = tf.train.batch(
# [image, depth, depth_discretized, invalid_depth],
# batch_size=batch_size,
# num_threads=4,
# capacity=40)
#
# depth_reconstructed, weights, mask, mask_multiplied, mask_multiplied_sum = Network.Network.bins_to_depth(depths_discretized)
#
# print('weights: ', weights)
#
# coord = tf.train.Coordinator()
# threads = tf.train.start_queue_runners(sess=sess, coord=coord)
#
# images_val, depths_val, depths_discretized_val, invalid_depths_val, depth_reconstructed_val, mask_val, mask_multiplied_val, mask_multiplied_sum_val = sess.run(
# [images, depths, depths_discretized, invalid_depths, depth_reconstructed, mask, mask_multiplied, mask_multiplied_sum])
# sess.run(images)
#
# output_predict(depths_val, images_val, depths_discretized_val,
# depth_reconstructed_val, 'kunda')
#
# depth_reconstructed_val = depth_reconstructed_val[:, :, :, 0]
# coord.request_stop()
# coord.join(threads)
#
# layer = 2
# f, axarr = plt.subplots(2, 3)
# axarr[0, 0].set_title('masks_val')
# axarr[0, 0].imshow(mask_val[0, :, :, layer])
# axarr[0, 1].set_title('mask_multiplied_val')
# axarr[0, 1].imshow(mask_multiplied_val[0, :, :, layer])
# axarr[1, 0].set_title('depths_val')
# axarr[1, 0].imshow(depths_val[0, :, :, 0])
# axarr[1, 1].set_title('depths_discretized_val')
# axarr[1, 1].imshow(depths_discretized_val[0, :, :, layer])
# axarr[0, 2].set_title('mask_multiplied_sum_val')
# axarr[0, 2].imshow(mask_multiplied_sum_val[0, :, :])
# axarr[1, 2].set_title('depth_reconstructed_val')
# axarr[1, 2].imshow(depth_reconstructed_val[0, :, :])
# plt.show()
# network = Network.Network()
# network.prepare()
# total_vars = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
# print('trainable vars: ', total_vars)
# for output bins = 200: 73 696 786
# for output bins = 100: 65 312 586 | 2.46875 | 2 |
src/python/twitter/pants/tasks/cache_manager.py | wfarner/commons | 1 | 12799994 | # ==================================================================================================
# Copyright 2012 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
try:
import cPickle as pickle
except ImportError:
import pickle
from twitter.pants import has_sources
from twitter.pants.base.build_invalidator import (
BuildInvalidator,
CacheKeyGenerator,
NO_SOURCES,
TARGET_SOURCES)
from twitter.pants.base.target import Target
from twitter.pants.targets import TargetWithSources
from twitter.pants.targets.external_dependency import ExternalDependency
from twitter.pants.targets.internal import InternalTarget
class VersionedTargetSet(object):
"""Represents a list of targets, a corresponding CacheKey, and a flag determining whether the
list of targets is currently valid.
When invalidating a single target, this can be used to represent that target as a singleton.
When checking the artifact cache, this can also be used to represent a list of targets that are
built together into a single artifact.
"""
@classmethod
def from_versioned_targets(cls, versioned_targets):
first_target = versioned_targets[0]
cache_manager = first_target._cache_manager
# Quick sanity check; all the versioned targets should have the same cache manager.
# TODO(ryan): the way VersionedTargets store their own links to a single CacheManager instance
# feels hacky; see if there's a cleaner way for callers to handle awareness of the CacheManager.
for versioned_target in versioned_targets:
if versioned_target._cache_manager != cache_manager:
raise ValueError("Attempting to combine versioned targets %s and %s with different"
" CacheManager instances: %s and %s" % (first_target, versioned_target,
cache_manager,
versioned_target._cache_manager))
return cls(cache_manager, versioned_targets)
def __init__(self, cache_manager, versioned_targets):
self._cache_manager = cache_manager
self.versioned_targets = versioned_targets
self.targets = [vt.target for vt in versioned_targets]
# The following line is a no-op if cache_key was set in the VersionedTarget __init__ method.
self.cache_key = CacheKeyGenerator.combine_cache_keys([vt.cache_key
for vt in versioned_targets])
self.num_sources = self.cache_key.num_sources
self.valid = not cache_manager.needs_update(self.cache_key)
def update(self):
self._cache_manager.update(self)
def force_invalidate(self):
self._cache_manager.force_invalidate(self)
def __repr__(self):
return "VTS(%s. %d)" % (','.join(target.id for target in self.targets), 1 if self.valid else 0)
class VersionedTarget(VersionedTargetSet):
"""This class represents a singleton VersionedTargetSet, and has links to VersionedTargets that
the wrapped target depends on (after having resolved through any "alias" targets.
"""
def __init__(self, cache_manager, target, cache_key):
if not isinstance(target, TargetWithSources):
raise ValueError("The target %s must support sources and does not." % target.id)
self.target = target
self.cache_key = cache_key
# Must come after the assignments above, as they are used in the parent's __init__.
VersionedTargetSet.__init__(self, cache_manager, [self])
self.id = target.id
self.dependencies = set()
# The result of calling check() on a CacheManager.
# Each member is a list of VersionedTargetSet objects in topological order.
# Tasks may need to perform no, some or all operations on either of these, depending on how they
# are implemented.
class InvalidationCheck(object):
@classmethod
def _partition_versioned_targets(cls, versioned_targets, partition_size_hint):
"""Groups versioned targets so that each group has roughly the same number of sources.
versioned_targets is a list of VersionedTarget objects [vt1, vt2, vt3, vt4, vt5, vt6, ...].
Returns a list of VersionedTargetSet objects, e.g., [VT1, VT2, VT3, ...] representing the
same underlying targets. E.g., VT1 is the combination of [vt1, vt2, vt3], VT2 is the combination
of [vt4, vt5] and VT3 is [vt6].
The new versioned targets are chosen to have roughly partition_size_hint sources.
This is useful as a compromise between flat mode, where we build all targets in a
single compiler invocation, and non-flat mode, where we invoke a compiler for each target,
which may lead to lots of compiler startup overhead. A task can choose instead to build one
group at a time.
"""
res = []
# Hack around the python outer scope problem.
class VtGroup(object):
def __init__(self):
self.vts = []
self.total_sources = 0
current_group = VtGroup()
def add_to_current_group(vt):
current_group.vts.append(vt)
current_group.total_sources += vt.num_sources
def close_current_group():
if len(current_group.vts) > 0:
new_vt = VersionedTargetSet.from_versioned_targets(current_group.vts)
res.append(new_vt)
current_group.vts = []
current_group.total_sources = 0
for vt in versioned_targets:
add_to_current_group(vt)
if current_group.total_sources > 1.5 * partition_size_hint and len(current_group.vts) > 1:
# Too big. Close the current group without this vt and add it to the next one.
current_group.vts.pop()
close_current_group()
add_to_current_group(vt)
elif current_group.total_sources > partition_size_hint:
close_current_group()
close_current_group() # Close the last group, if any.
return res
def __init__(self, all_vts, invalid_vts, partition_size_hint=None):
# All the targets, valid and invalid.
self.all_vts = all_vts
# All the targets, partitioned if so requested.
self.all_vts_partitioned = self._partition_versioned_targets(
all_vts, partition_size_hint) if partition_size_hint else all_vts
# Just the invalid targets.
self.invalid_vts = invalid_vts
# Just the invalid targets, partitioned if so requested.
self.invalid_vts_partitioned = self._partition_versioned_targets(
invalid_vts, partition_size_hint) if partition_size_hint else invalid_vts
class CacheManager(object):
"""Manages cache checks, updates and invalidation keeping track of basic change
and invalidation statistics.
Note that this is distinct from the ArtifactCache concept, and should probably be renamed.
"""
def __init__(self, cache_key_generator, build_invalidator_dir,
invalidate_dependents, extra_data, only_externaldeps):
self._cache_key_generator = cache_key_generator
self._invalidate_dependents = invalidate_dependents
self._extra_data = pickle.dumps(extra_data) # extra_data may be None.
self._sources = NO_SOURCES if only_externaldeps else TARGET_SOURCES
self._invalidator = BuildInvalidator(build_invalidator_dir)
def update(self, vts):
"""Mark a changed or invalidated VersionedTargetSet as successfully processed."""
for vt in vts.versioned_targets:
self._invalidator.update(vt.cache_key)
vt.valid = True
self._invalidator.update(vts.cache_key)
vts.valid = True
def force_invalidate(self, vts):
"""Force invalidation of a VersionedTargetSet."""
for vt in vts.versioned_targets:
self._invalidator.force_invalidate(vt.cache_key)
vt.valid = False
self._invalidator.force_invalidate(vts.cache_key)
vts.valid = False
def check(self, targets, partition_size_hint=None):
"""Checks whether each of the targets has changed and invalidates it if so.
Returns a list of VersionedTargetSet objects (either valid or invalid). The returned sets
'cover' the input targets, possibly partitioning them, and are in topological order.
The caller can inspect these in order and, e.g., rebuild the invalid ones.
"""
all_vts = self._sort_and_validate_targets(targets)
invalid_vts = filter(lambda vt: not vt.valid, all_vts)
return InvalidationCheck(all_vts, invalid_vts, partition_size_hint)
def _sort_and_validate_targets(self, targets):
"""Validate each target.
Returns a topologically ordered set of VersionedTargets, each representing one input target.
"""
# We must check the targets in this order, to ensure correctness if invalidate_dependents=True,
# since we use earlier cache keys to compute later cache keys in this case.
ordered_targets = self._order_target_list(targets)
# This will be a list of VersionedTargets that correspond to @targets.
versioned_targets = []
# This will be a mapping from each target to its corresponding VersionedTarget.
versioned_targets_by_target = {}
# Map from id to current fingerprint of the target with that id. We update this as we iterate,
# in topological order, so when handling a target, this will already contain all its deps (in
# this round).
id_to_hash = {}
for target in ordered_targets:
dependency_keys = set()
if self._invalidate_dependents and hasattr(target, 'dependencies'):
# Note that we only need to do this for the immediate deps, because those will already
# reflect changes in their own deps.
for dep in target.dependencies:
# We rely on the fact that any deps have already been processed, either in an earlier
# round or because they came first in ordered_targets.
if isinstance(dep, ExternalDependency):
dependency_keys.add(dep.cache_key())
elif isinstance(dep, Target):
fprint = id_to_hash.get(dep.id, None)
if fprint is None:
# It may have been processed in a prior round, and therefore the fprint should
# have been written out by the invalidator.
fprint = self._invalidator.existing_hash(dep.id)
# Note that fprint may be None here, indicating that the dependency will not be
# processed until a later phase. For example, if a codegen target depends on a
# library target (because the generated code needs that library).
if fprint is not None:
dependency_keys.add(fprint)
else:
raise ValueError('Cannot calculate a cache_key for a dependency: %s' % dep)
cache_key = self._key_for(target, dependency_keys)
id_to_hash[target.id] = cache_key.hash
# Create a VersionedTarget corresponding to @target.
versioned_target = VersionedTarget(self, target, cache_key)
# Add the new VersionedTarget to the list of computed VersionedTargets.
versioned_targets.append(versioned_target)
# Add to the mapping from Targets to VersionedTargets, for use in hooking up VersionedTarget
# dependencies below.
versioned_targets_by_target[target] = versioned_target
# Having created all applicable VersionedTargets, now we build the VersionedTarget dependency
# graph, looking through targets that don't correspond to VersionedTargets themselves.
versioned_target_deps_by_target = {}
def get_versioned_target_deps_for_target(target):
# For every dependency of @target, we will store its corresponding VersionedTarget here. For
# dependencies that don't correspond to a VersionedTarget (e.g. pass-through dependency
# wrappers), we will resolve their actual dependencies and find VersionedTargets for them.
versioned_target_deps = set([])
if hasattr(target, 'dependencies'):
for dep in target.dependencies:
for dependency in dep.resolve():
if dependency in versioned_targets_by_target:
# If there exists a VersionedTarget corresponding to this Target, store it and
# continue.
versioned_target_deps.add(versioned_targets_by_target[dependency])
elif dependency in versioned_target_deps_by_target:
# Otherwise, see if we've already resolved this dependency to the VersionedTargets it
# depends on, and use those.
versioned_target_deps.update(versioned_target_deps_by_target[dependency])
else:
# Otherwise, compute the VersionedTargets that correspond to this dependency's
# dependencies, cache and use the computed result.
versioned_target_deps_by_target[dependency] = get_versioned_target_deps_for_target(
dependency)
versioned_target_deps.update(versioned_target_deps_by_target[dependency])
# Return the VersionedTarget dependencies that this target's VersionedTarget should depend on.
return versioned_target_deps
# Initialize all VersionedTargets to point to the VersionedTargets they depend on.
for versioned_target in versioned_targets:
versioned_target.dependencies = get_versioned_target_deps_for_target(versioned_target.target)
return versioned_targets
def needs_update(self, cache_key):
return self._invalidator.needs_update(cache_key)
def _order_target_list(self, targets):
"""Orders the targets topologically, from least to most dependent."""
targets = set(filter(has_sources, targets))
return filter(targets.__contains__, reversed(InternalTarget.sort_targets(targets)))
def _key_for(self, target, dependency_keys):
def fingerprint_extra(sha):
sha.update(self._extra_data)
for key in sorted(dependency_keys): # Sort to ensure hashing in a consistent order.
sha.update(key)
return self._cache_key_generator.key_for_target(
target,
sources=self._sources,
fingerprint_extra=fingerprint_extra
)
| 1.78125 | 2 |
configman/tests/test_val_for_json.py | peterbe/configman | 0 | 12799995 | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is configman
#
# The Initial Developer of the Original Code is
# Mozilla Foundation
# Portions created by the Initial Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# <NAME>, <EMAIL>
# <NAME>, <EMAIL>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import unittest
import os
import json
import tempfile
from cStringIO import StringIO
import configman.config_manager as config_manager
import configman.datetime_util as dtu
from configman.value_sources.for_json import ValueSource
#from ..value_sources.for_json import ValueSource
def bbb_minus_one(config, local_config, args):
return config.bbb - 1
class TestCase(unittest.TestCase):
def test_for_json_basics(self):
tmp_filename = os.path.join(tempfile.gettempdir(), 'test.json')
j = {'fred': 'wilma',
'number': 23,
}
with open(tmp_filename, 'w') as f:
json.dump(j, f)
try:
jvs = ValueSource(tmp_filename)
vals = jvs.get_values(None, True)
self.assertEqual(vals['fred'], 'wilma')
self.assertEqual(vals['number'], 23)
finally:
if os.path.isfile(tmp_filename):
os.remove(tmp_filename)
def test_write_json(self):
n = config_manager.Namespace(doc='top')
n.add_option('aaa', '2011-05-04T15:10:00', 'the a',
short_form='a',
from_string_converter=dtu.datetime_from_ISO_string
)
def value_iter():
yield 'aaa', 'aaa', n.aaa
s = StringIO()
ValueSource.write(value_iter, output_stream=s)
received = s.getvalue()
s.close()
jrec = json.loads(received)
expect_to_find = {
"short_form": "a",
"default": "2011-05-04T15:10:00",
"doc": "the a",
"value": "2011-05-04T15:10:00",
"from_string_converter":
"configman.datetime_util.datetime_from_ISO_string",
"name": "aaa"
}
for key, value in expect_to_find.items():
self.assertEqual(jrec['aaa'][key], value)
def test_json_round_trip(self):
n = config_manager.Namespace(doc='top')
n.add_option('aaa', '2011-05-04T15:10:00', 'the a',
short_form='a',
from_string_converter=dtu.datetime_from_ISO_string
)
expected_date = dtu.datetime_from_ISO_string('2011-05-04T15:10:00')
n.add_option('bbb', '37', 'the a',
short_form='a',
from_string_converter=int
)
n.add_option('write', 'json')
n.add_aggregation('bbb_minus_one', bbb_minus_one)
#t = tempfile.NamedTemporaryFile('w', suffix='.json', delete=False)
name = '/tmp/test.json'
import functools
opener = functools.partial(open, name, 'w')
c1 = config_manager.ConfigurationManager([n], [],
use_admin_controls=True,
use_auto_help=False,
app_name='/tmp/test',
app_version='0',
app_description='',
argv_source=[])
c1.write_conf('json', opener)
d1 = {'bbb': 88}
d2 = {'bbb': '-99'}
try:
with open(name) as jfp:
j = json.load(jfp)
c2 = config_manager.ConfigurationManager((j,), (d1, d2),
use_admin_controls=True,
use_auto_help=False,
argv_source=[])
config = c2.get_config()
self.assertEqual(config.aaa, expected_date)
self.assertEqual(config.bbb, -99)
self.assertEqual(config.bbb_minus_one, -100)
finally:
os.unlink(name)
| 1.507813 | 2 |
Python/Programming Basics/Simple Conditional Statements/09. Password Guess.py | teodoramilcheva/softuni-software-engineering | 0 | 12799996 | <gh_stars>0
a = input()
b = '<PASSWORD>'
if a == b:
print('Welcome')
else:
print('Wrong password!')
| 3.1875 | 3 |
scripts/addons/uvpackmaster2/operator.py | Tilapiatsu/blender-custom_conf | 2 | 12799997 | <gh_stars>1-10
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import subprocess
import queue
import threading
import signal
import webbrowser
from .utils import *
from .pack_context import *
from .connection import *
from .prefs import *
from .os_iface import *
from .island_params import *
from .labels import UvpLabels
from .register import check_uvp, unregister_uvp
import bmesh
import bpy
import mathutils
import tempfile
class InvalidIslandsError(Exception):
pass
class NoUvFaceError(Exception):
pass
class UVP2_OT_PackOperatorGeneric(bpy.types.Operator):
bl_options = {'UNDO'}
MODAL_INTERVAL_S = 0.1
interactive = False
@classmethod
def poll(cls, context):
prefs = get_prefs()
return prefs.uvp_initialized and context.active_object is not None and context.active_object.mode == 'EDIT'
def check_uvp_retcode(self, retcode):
self.prefs.uvp_retcode = retcode
if retcode in {UvPackerErrorCode.SUCCESS,
UvPackerErrorCode.INVALID_ISLANDS,
UvPackerErrorCode.NO_SPACE,
UvPackerErrorCode.PRE_VALIDATION_FAILED}:
return
if retcode == UvPackerErrorCode.CANCELLED:
raise OpCancelledException()
if retcode == UvPackerErrorCode.NO_VALID_STATIC_ISLAND:
raise RuntimeError("'Pack To Others' option enabled, but no unselected island found in the packing box")
if retcode == UvPackerErrorCode.MAX_GROUP_COUNT_EXCEEDED:
raise RuntimeError("Maximal group count exceeded")
if retcode == UvPackerErrorCode.DEVICE_NOT_SUPPORTED:
raise RuntimeError("Selected device is not supported")
if retcode == UvPackerErrorCode.DEVICE_DOESNT_SUPPORT_GROUPS_TOGETHER:
raise RuntimeError("Selected device doesn't support packing groups together")
raise RuntimeError('Pack process returned an error')
def raiseUnexpectedOutputError(self):
raise RuntimeError('Unexpected output from the pack process')
def set_status(self, status_type, status):
self.op_status_type = status_type
self.op_status = status
def add_warning(self, warn_msg):
self.op_warnings.append(warn_msg)
def report_status(self):
if self.op_status is not None:
self.prefs['op_status'] = self.op_status
op_status_type = self.op_status_type if self.op_status_type is not None else 'INFO'
op_status = self.op_status
if len(self.op_warnings) > 0:
if op_status_type == 'INFO':
op_status_type = 'WARNING'
op_status += '. (WARNINGS were reported - check the UVP tab for details)'
self.report({op_status_type}, op_status)
self.prefs['op_warnings'] = self.op_warnings
# self.prefs.stats_op_warnings.add(warning_msg)
def exit_common(self):
if self.interactive:
wm = self.p_context.context.window_manager
wm.event_timer_remove(self._timer)
self.p_context.update_meshes()
self.report_status()
if in_debug_mode():
print('UVP operation time: ' + str(time.time() - self.start_time))
def read_islands(self, islands_msg):
islands = []
island_cnt = force_read_int(islands_msg)
selected_cnt = force_read_int(islands_msg)
for i in range(island_cnt):
islands.append(read_int_array(islands_msg))
self.p_context.set_islands(selected_cnt, islands)
def process_invalid_islands(self):
if self.uvp_proc.returncode != UvPackerErrorCode.INVALID_ISLANDS:
return
if self.invalid_islands_msg is None:
self.raiseUnexpectedOutputError()
code = force_read_int(self.invalid_islands_msg)
subcode = force_read_int(self.invalid_islands_msg)
invalid_islands = read_int_array(self.invalid_islands_msg)
if len(invalid_islands) == 0:
self.raiseUnexpectedOutputError()
self.p_context.handle_invalid_islands(invalid_islands)
if code == UvInvalidIslandCode.TOPOLOGY:
error_msg = "Invalid topology encountered in the selected islands. Check the Help panel to learn more"
elif code == UvInvalidIslandCode.INT_PARAM:
param_array = IslandParamInfo.get_param_info_array()
error_msg = "Faces with inconsistent {} values found in the selected islands".format(param_array[subcode].NAME)
else:
self.raiseUnexpectedOutputError()
raise InvalidIslandsError(error_msg)
def require_selection(self):
return True
def finish_after_op_done(self):
return True
def handle_op_done(self):
self.op_done = True
send_finish_confirmation(self.uvp_proc)
try:
self.uvp_proc.wait(5)
except:
raise RuntimeError('The UVP process wait timeout reached')
self.connection_thread.join()
self.check_uvp_retcode(self.uvp_proc.returncode)
if not self.p_context.islands_received():
self.raiseUnexpectedOutputError()
self.process_invalid_islands()
self.process_result()
if self.finish_after_op_done():
raise OpFinishedException()
def finish(self, context):
self.exit_common()
return {'FINISHED', 'PASS_THROUGH'}
def cancel(self, context):
self.uvp_proc.terminate()
# self.progress_thread.terminate()
self.exit_common()
return {'FINISHED'}
def get_progress_msg_spec(self):
return False
def get_progress_msg(self):
if self.hang_detected:
return 'Packer process not responding for a longer time (press ESC to abort)'
if self.curr_phase is None:
return False
progress_msg_spec = self.get_progress_msg_spec()
if progress_msg_spec:
return progress_msg_spec
if self.curr_phase == UvPackingPhaseCode.INITIALIZATION:
return 'Initialization (press ESC to cancel)'
if self.curr_phase == UvPackingPhaseCode.TOPOLOGY_ANALYSIS:
return "Topology analysis: {:3}% (press ESC to cancel)".format(self.progress_array[0])
if self.curr_phase == UvPackingPhaseCode.OVERLAP_CHECK:
return 'Overlap check in progress (press ESC to cancel)'
if self.curr_phase == UvPackingPhaseCode.AREA_MEASUREMENT:
return 'Area measurement in progress (press ESC to cancel)'
if self.curr_phase == UvPackingPhaseCode.SIMILAR_SELECTION:
return 'Searching for similar islands (press ESC to cancel)'
if self.curr_phase == UvPackingPhaseCode.SIMILAR_ALIGNING:
return 'Similar islands aligning (press ESC to cancel)'
if self.curr_phase == UvPackingPhaseCode.RENDER_PRESENTATION:
return 'Close the demo window to finish'
if self.curr_phase == UvPackingPhaseCode.TOPOLOGY_VALIDATION:
return "Topology validation: {:3}% (press ESC to cancel)".format(self.progress_array[0])
if self.curr_phase == UvPackingPhaseCode.VALIDATION:
return "Per-face overlap check: {:3}% (press ESC to cancel)".format(self.progress_array[0])
raise RuntimeError('Unexpected packing phase encountered')
def handle_uvp_msg_spec(self, msg_code, msg):
return False
def handle_event_spec(self, event):
return False
def handle_progress_msg(self):
if self.op_done:
return
msg_refresh_interval = 2.0
new_progress_msg = self.get_progress_msg()
if not new_progress_msg:
return
now = time.time()
if now - self.progress_last_update_time > msg_refresh_interval or new_progress_msg != self.progress_msg:
self.progress_last_update_time = now
self.progress_msg = new_progress_msg
self.report({'INFO'}, self.progress_msg)
def handle_uvp_msg(self, msg):
msg_code = force_read_int(msg)
if self.handle_uvp_msg_spec(msg_code, msg):
return
if msg_code == UvPackMessageCode.PROGRESS_REPORT:
self.curr_phase = force_read_int(msg)
progress_size = force_read_int(msg)
if progress_size > len(self.progress_array):
self.progress_array = [0] * (progress_size)
for i in range(progress_size):
self.progress_array[i] = force_read_int(msg)
self.progress_sec_left = force_read_int(msg)
self.progress_iter_done = force_read_int(msg)
# Inform the upper layer wheter it should finish
if self.curr_phase == UvPackingPhaseCode.DONE:
self.handle_op_done()
elif msg_code == UvPackMessageCode.INVALID_ISLANDS:
if self.invalid_islands_msg is not None:
self.raiseUnexpectedOutputError()
self.invalid_islands_msg = msg
elif msg_code == UvPackMessageCode.ISLAND_FLAGS:
if self.island_flags_msg is not None:
self.raiseUnexpectedOutputError()
self.island_flags_msg = msg
elif msg_code == UvPackMessageCode.PACK_SOLUTION:
if self.pack_solution_msg is not None:
self.raiseUnexpectedOutputError()
self.pack_solution_msg = msg
elif msg_code == UvPackMessageCode.AREA:
if self.area_msg is not None:
self.raiseUnexpectedOutputError()
self.area_msg = msg
elif msg_code == UvPackMessageCode.INVALID_FACES:
if self.invalid_faces_msg is not None:
self.raiseUnexpectedOutputError()
self.invalid_faces_msg = msg
elif msg_code == UvPackMessageCode.SIMILAR_ISLANDS:
if self.similar_islands_msg is not None:
self.raiseUnexpectedOutputError()
self.similar_islands_msg = msg
elif msg_code == UvPackMessageCode.ISLANDS:
self.read_islands(msg)
elif msg_code == UvPackMessageCode.ISLANDS_METADATA:
if self.islands_metadata_msg is not None:
self.raiseUnexpectedOutputError()
self.islands_metadata_msg = msg
else:
self.raiseUnexpectedOutputError()
def handle_communication(self):
if self.op_done:
return
msg_received = 0
while True:
try:
item = self.progress_queue.get_nowait()
except queue.Empty as ex:
break
if isinstance(item, str):
raise RuntimeError(item)
elif isinstance(item, io.BytesIO):
self.handle_uvp_msg(item)
else:
raise RuntimeError('Unexpected output from the connection thread')
msg_received += 1
curr_time = time.time()
if msg_received > 0:
self.last_msg_time = curr_time
self.hang_detected = False
else:
if self.curr_phase != UvPackingPhaseCode.RENDER_PRESENTATION and curr_time - self.last_msg_time > self.hang_timeout:
self.hang_detected = True
def handle_event(self, event):
# Kill the UVP process unconditionally if a hang was detected
if self.hang_detected and event.type == 'ESC':
raise OpAbortedException()
if self.handle_event_spec(event):
return
# Generic event processing code
if event.type == 'ESC':
raise OpCancelledException()
elif event.type == 'TIMER':
self.handle_communication()
def modal(self, context, event):
cancel = False
finish = False
try:
try:
self.handle_event(event)
# Check whether the uvp process is alive
if not self.op_done and self.uvp_proc.poll() is not None:
# It should not be required to but check once again to be on the safe side
self.handle_communication()
if not self.op_done:
# Special value indicating a crash
self.prefs.uvp_retcode = -1
raise RuntimeError('Packer process died unexpectedly')
self.handle_progress_msg()
except OpFinishedException:
finish = True
except:
raise
if finish:
return self.finish(context)
except OpAbortedException:
self.set_status('INFO', 'Packer process killed')
cancel = True
except OpCancelledException:
self.set_status('INFO', 'Operation cancelled by the user')
cancel = True
except InvalidIslandsError as err:
self.set_status('ERROR', str(err))
cancel = True
except RuntimeError as ex:
if in_debug_mode():
print_backtrace(ex)
self.set_status('ERROR', str(ex))
cancel = True
except Exception as ex:
if in_debug_mode():
print_backtrace(ex)
self.set_status('ERROR', 'Unexpected error')
cancel = True
if cancel:
return self.cancel(context)
return {'RUNNING_MODAL'} if not self.op_done else {'PASS_THROUGH'}
def pre_op_initialize(self):
pass
def execute(self, context):
cancel = False
self.op_done = False
self.uvp_proc = None
self.prefs = get_prefs()
self.scene_props = context.scene.uvp2_props
self.p_context = None
self.pack_ratio = 1.0
self.target_box = None
self.op_status_type = None
self.op_status = None
self.op_warnings = []
try:
if not check_uvp():
unregister_uvp()
redraw_ui(context)
raise RuntimeError("UVP engine broken")
reset_stats(self.prefs)
self.p_context = PackContext(context)
self.pre_op_initialize()
send_unselected = self.send_unselected_islands()
send_rot_step = self.send_rot_step()
send_groups = self.grouping_enabled() and (to_uvp_group_method(self.get_group_method()) == UvGroupingMethodUvp.EXTERNAL)
send_lock_groups = self.lock_groups_enabled()
send_verts_3d = self.send_verts_3d()
selected_cnt, unselected_cnt = self.p_context.serialize_uv_maps(send_unselected, send_groups, send_rot_step, send_lock_groups, send_verts_3d, self.get_group_method() if send_groups else None)
if self.require_selection():
if selected_cnt == 0:
raise NoUvFaceError('No UV face selected')
else:
if selected_cnt + unselected_cnt == 0:
raise NoUvFaceError('No UV face visible')
self.validate_pack_params()
if self.prefs.write_to_file:
out_filepath = os.path.join(tempfile.gettempdir(), 'uv_islands.data')
out_file = open(out_filepath, 'wb')
out_file.write(self.p_context.serialized_maps)
out_file.close()
uvp_args_final = [get_uvp_execpath(), '-E', '-e', str(UvTopoAnalysisLevel.FORCE_EXTENDED), '-t', str(self.prefs.thread_count)] + self.get_uvp_args()
if send_unselected:
uvp_args_final.append('-s')
if self.grouping_enabled():
uvp_args_final += ['-a', str(to_uvp_group_method(self.get_group_method()))]
if self.send_rot_step():
uvp_args_final += ['-R']
if self.lock_groups_enabled():
uvp_args_final += ['-Q']
if in_debug_mode():
if self.prefs.seed > 0:
uvp_args_final += ['-S', str(self.prefs.seed)]
if self.prefs.wait_for_debugger:
uvp_args_final.append('-G')
uvp_args_final += ['-T', str(self.prefs.test_param)]
print('Pakcer args: ' + ' '.join(x for x in uvp_args_final))
creation_flags = os_uvp_creation_flags()
popen_args = dict()
if creation_flags is not None:
popen_args['creationflags'] = creation_flags
self.uvp_proc = subprocess.Popen(uvp_args_final,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
**popen_args)
out_stream = self.uvp_proc.stdin
out_stream.write(self.p_context.serialized_maps)
out_stream.flush()
self.start_time = time.time()
self.last_msg_time = self.start_time
self.hang_detected = False
self.hang_timeout = 10.0
# Start progress monitor thread
self.progress_queue = queue.Queue()
self.connection_thread = threading.Thread(target=connection_thread_func,
args=(self.uvp_proc.stdout, self.progress_queue))
self.connection_thread.daemon = True
self.connection_thread.start()
self.progress_array = [0]
self.progress_msg = ''
self.progress_sec_left = -1
self.progress_iter_done = -1
self.progress_last_update_time = 0.0
self.curr_phase = UvPackingPhaseCode.INITIALIZATION
self.invalid_islands_msg = None
self.island_flags_msg = None
self.pack_solution_msg = None
self.area_msg = None
self.invalid_faces_msg = None
self.similar_islands_msg = None
self.islands_metadata_msg = None
except NoUvFaceError as ex:
self.set_status('WARNING', str(ex))
cancel = True
except RuntimeError as ex:
if in_debug_mode():
print_backtrace(ex)
self.set_status('ERROR', str(ex))
cancel = True
except Exception as ex:
if in_debug_mode():
print_backtrace(ex)
self.set_status('ERROR', 'Unexpected error')
cancel = True
if self.p_context is not None:
self.p_context.update_meshes()
if cancel:
if self.uvp_proc is not None:
self.uvp_proc.terminate()
self.report_status()
return {'FINISHED'}
if self.interactive:
wm = context.window_manager
self._timer = wm.event_timer_add(self.MODAL_INTERVAL_S, window=context.window)
wm.modal_handler_add(self)
return {'RUNNING_MODAL'}
class FakeTimerEvent:
def __init__(self):
self.type = 'TIMER'
self.value = 'NOTHING'
self.ctrl = False
while True:
event = FakeTimerEvent()
ret = self.modal(context, event)
if ret.intersection({'FINISHED', 'CANCELLED'}):
return ret
time.sleep(self.MODAL_INTERVAL_S)
def invoke(self, context, event):
self.interactive = True
self.prefs = get_prefs()
self.scene_props = context.scene.uvp2_props
self.confirmation_msg = self.get_confirmation_msg()
wm = context.window_manager
if self.confirmation_msg != '':
pix_per_char = 5
dialog_width = pix_per_char * len(self.confirmation_msg) + 50
return wm.invoke_props_dialog(self, width=dialog_width)
return self.execute(context)
def draw(self, context):
layout = self.layout
col = layout.column()
col.label(text=self.confirmation_msg)
def get_confirmation_msg(self):
return ''
def send_unselected_islands(self):
return False
def grouping_enabled(self):
return False
def get_group_method(self):
raise RuntimeError('Unexpected grouping requested')
def send_rot_step(self):
return False
def lock_groups_enabled(self):
return False
def send_verts_3d(self):
return False
def read_area(self, area_msg):
return round(force_read_float(area_msg) / self.pack_ratio, 3)
class UVP2_OT_PackOperator(UVP2_OT_PackOperatorGeneric):
bl_idname = 'uvpackmaster2.uv_pack'
bl_label = 'Pack'
bl_description = 'Pack selected UV islands'
def __init__(self):
self.cancel_sig_sent = False
self.area = None
def get_confirmation_msg(self):
if platform.system() == 'Darwin':
active_dev = self.prefs.dev_array[self.prefs.sel_dev_idx] if self.prefs.sel_dev_idx < len(self.prefs.dev_array) else None
if active_dev is not None and active_dev.id.startswith('cuda'):
return UvpLabels.CUDA_MACOS_CONFIRM_MSG
if self.prefs.pack_groups_together(self.scene_props) and not self.prefs.heuristic_enabled(self.scene_props):
return UvpLabels.GROUPS_TOGETHER_CONFIRM_MSG
return ''
def send_unselected_islands(self):
return self.prefs.pack_to_others_enabled(self.scene_props)
def grouping_enabled(self):
return self.prefs.grouping_enabled(self.scene_props)
def get_group_method(self):
return self.scene_props.group_method
def send_rot_step(self):
return self.prefs.FEATURE_island_rotation_step and self.scene_props.rot_enable and self.scene_props.island_rot_step_enable
def lock_groups_enabled(self):
return self.prefs.FEATURE_lock_overlapping and self.scene_props.lock_groups_enable
def send_verts_3d(self):
return self.scene_props.normalize_islands
def get_progress_msg_spec(self):
if self.curr_phase in { UvPackingPhaseCode.PACKING, UvPackingPhaseCode.PIXEL_MARGIN_ADJUSTMENT }:
if self.curr_phase == UvPackingPhaseCode.PIXEL_MARGIN_ADJUSTMENT:
header_str = 'Pixel margin adjustment. '
elif self.prefs.heuristic_enabled(self.scene_props):
header_str = 'Current area: {}. '.format(self.area if self.area is not None else 'none')
else:
header_str = ''
if self.progress_iter_done >= 0:
iter_str = 'Iter. done: {}. '.format(self.progress_iter_done)
else:
iter_str = ''
if self.progress_sec_left >= 0:
time_left_str = "Time left: {} sec. ".format(self.progress_sec_left)
else:
time_left_str = ''
percent_progress_str = ''
for prog in self.progress_array:
percent_progress_str += str(prog).rjust(3, ' ') + '%, '
percent_progress_str = percent_progress_str[:-2]
progress_str = 'Pack progress: {} '.format(percent_progress_str)
if self.area is not None:
end_str = '(press ESC to apply result) '
else:
end_str = '(press ESC to cancel) '
return header_str + iter_str + time_left_str + progress_str + end_str
return False
def handle_uvp_msg_spec(self, msg_code, msg):
if msg_code == UvPackMessageCode.AREA:
self.area = self.read_area(msg)
return True
elif msg_code == UvPackMessageCode.PACK_SOLUTION:
pack_solution = read_pack_solution(msg)
self.p_context.apply_pack_solution(self.pack_ratio, pack_solution)
return True
elif msg_code == UvPackMessageCode.BENCHMARK:
stats = self.prefs.stats_array.add()
dev_name_len = force_read_int(msg)
stats.dev_name = msg.read(dev_name_len).decode('ascii')
stats.iter_count = force_read_int(msg)
stats.total_time = force_read_int(msg)
stats.avg_time = force_read_int(msg)
return True
return False
def handle_event_spec(self, event):
if event.type == 'ESC':
if not self.cancel_sig_sent:
self.uvp_proc.send_signal(os_cancel_sig())
self.cancel_sig_sent = True
return True
return False
def process_result(self):
overlap_detected = False
outside_detected = False
if self.invalid_faces_msg is not None:
invalid_face_count = force_read_int(self.invalid_faces_msg)
invalid_faces = read_int_array(self.invalid_faces_msg)
if not self.prefs.FEATURE_demo:
if len(invalid_faces) != invalid_face_count:
self.raiseUnexpectedOutputError()
if invalid_face_count > 0:
# Switch to the face selection mode
if self.p_context.context.tool_settings.use_uv_select_sync:
self.p_context.context.tool_settings.mesh_select_mode = (False, False, True)
else:
self.p_context.context.tool_settings.uv_select_mode = 'FACE'
self.p_context.select_all_faces(False)
self.p_context.select_faces(list(invalid_faces), True)
if invalid_face_count > 0:
self.set_status('WARNING', 'Pre-validation failed. Number of invalid faces found: ' + str(invalid_face_count) + '. Packing aborted')
return
if not self.prefs.FEATURE_demo:
if self.island_flags_msg is None:
self.raiseUnexpectedOutputError()
island_flags = read_int_array(self.island_flags_msg)
overlap_detected, outside_detected = self.p_context.handle_island_flags(island_flags)
if self.area is not None:
self.prefs.stats_area = self.area
if self.uvp_proc.returncode == UvPackerErrorCode.NO_SPACE:
op_status = 'Packing stopped - no space to pack all islands'
self.add_warning("Overlap check was performed only on the islands which were packed")
else:
op_status = 'Packing done'
if self.area is not None:
op_status += ', packed islands area: ' + str(self.area)
self.set_status('INFO', op_status)
if overlap_detected:
self.add_warning("Overlapping islands were detected after packing (check the selected islands). Consider increasing the 'Precision' parameter. Sometimes increasing the 'Adjustment Time' may solve the problem (if used in the operation).")
if outside_detected:
self.add_warning("Some islands are outside their packing box after packing (check the selected islands). This usually happens when 'Pixel Padding' is set to a small value and the 'Adjustment Time' is not long enough.")
def validate_pack_params(self):
active_dev = self.prefs.dev_array[self.prefs.sel_dev_idx] if self.prefs.sel_dev_idx < len(self.prefs.dev_array) else None
if active_dev is None:
raise RuntimeError('Could not find a packing device')
if not active_dev.supported:
raise RuntimeError('Selected packing device is not supported in this engine edition')
# Validate pack mode
pack_mode = UvPackingMode.get_mode(self.scene_props.pack_mode)
if pack_mode.req_feature != '' and not getattr(self.prefs, 'FEATURE_' + pack_mode.req_feature):
raise RuntimeError('Selected packing mode is not supported in this engine edition')
if self.grouping_enabled():
if self.get_group_method() == UvGroupingMethod.SIMILARITY.code:
if self.prefs.pack_to_others_enabled(self.scene_props):
raise RuntimeError("'Pack To Others' is not supported with grouping by similarity")
if not self.scene_props.rot_enable:
raise RuntimeError("Island rotations must be enabled in order to group by similarity")
if self.scene_props.prerot_disable:
raise RuntimeError("'Pre-Rotation Disable' option must be off in order to group by similarity")
if self.prefs.FEATURE_target_box and self.prefs.target_box_enable:
validate_target_box(self.scene_props)
def get_target_box_string(self, target_box):
prec = 4
return "{}:{}:{}:{}".format(
round(target_box[0].x, prec),
round(target_box[0].y, prec),
round(target_box[1].x, prec),
round(target_box[1].y, prec))
def get_uvp_args(self):
uvp_args = ['-o', str(UvPackerOpcode.PACK), '-i', str(self.scene_props.precision), '-m',
str(self.scene_props.margin)]
uvp_args += ['-d', self.prefs.dev_array[self.prefs.sel_dev_idx].id]
if self.prefs.pixel_margin_enabled(self.scene_props):
uvp_args += ['-M', str(self.scene_props.pixel_margin)]
uvp_args += ['-y', str(self.prefs.pixel_margin_tex_size(self.scene_props, self.p_context.context))]
if self.prefs.pixel_padding_enabled(self.scene_props):
uvp_args += ['-N', str(self.scene_props.pixel_padding)]
uvp_args += ['-W', self.scene_props.pixel_margin_method]
uvp_args += ['-Y', str(self.scene_props.pixel_margin_adjust_time)]
if self.prefs.fixed_scale_enabled(self.scene_props):
uvp_args += ['-O']
uvp_args += ['-F', self.scene_props.fixed_scale_strategy]
if self.prefs.FEATURE_island_rotation:
if self.scene_props.rot_enable:
rot_step_value = self.scene_props.rot_step
if self.scene_props.prerot_disable:
uvp_args += ['-w']
else:
rot_step_value = -1
uvp_args += ['-r', str(rot_step_value)]
if self.prefs.heuristic_enabled(self.scene_props):
uvp_args += ['-h', str(self.scene_props.heuristic_search_time), '-j', str(self.scene_props.heuristic_max_wait_time)]
if self.prefs.FEATURE_advanced_heuristic and self.scene_props.advanced_heuristic:
uvp_args.append('-H')
uvp_args += ['-g', self.scene_props.pack_mode]
tile_count, tiles_in_row = self.prefs.tile_grid_config(self.scene_props, self.p_context.context)
if self.prefs.pack_to_tiles(self.scene_props):
uvp_args += ['-V', str(tile_count)]
if self.prefs.tiles_enabled(self.scene_props):
uvp_args += ['-C', str(tiles_in_row)]
if self.grouping_enabled():
if to_uvp_group_method(self.get_group_method()) == UvGroupingMethodUvp.SIMILARITY:
uvp_args += ['-I', str(self.scene_props.similarity_threshold)]
if self.prefs.pack_groups_together(self.scene_props):
uvp_args += ['-U', str(self.scene_props.group_compactness)]
if self.prefs.multi_device_enabled(self.scene_props):
uvp_args.append('-u')
if self.prefs.lock_overlap_enabled(self.scene_props):
uvp_args += ['-l', self.scene_props.lock_overlapping_mode]
if self.prefs.pack_to_others_enabled(self.scene_props):
uvp_args += ['-x']
if self.prefs.FEATURE_validation and self.scene_props.pre_validate:
uvp_args.append('-v')
if self.prefs.normalize_islands_enabled(self.scene_props):
uvp_args.append('-L')
if self.prefs.FEATURE_target_box and self.prefs.target_box_enable:
self.target_box = self.prefs.target_box(self.scene_props)
if self.prefs.pack_ratio_enabled(self.scene_props):
self.pack_ratio = get_active_image_ratio(self.p_context.context)
if self.pack_ratio != 1.0:
uvp_args += ['-q', str(self.pack_ratio)]
if self.target_box is not None:
self.target_box[0].x *= self.pack_ratio
self.target_box[1].x *= self.pack_ratio
else:
self.target_box = (Vector((0.0, 0.0)),
Vector((self.pack_ratio, 1.0)))
if self.target_box is not None:
uvp_args += ['-B', self.get_target_box_string(self.target_box)]
uvp_args.append('-b')
return uvp_args
class UVP2_OT_OverlapCheckOperator(UVP2_OT_PackOperatorGeneric):
bl_idname = 'uvpackmaster2.uv_overlap_check'
bl_label = 'Overlap Check'
bl_description = 'Check wheter selected UV islands overlap each other'
def process_result(self):
if self.island_flags_msg is None:
self.raiseUnexpectedOutputError()
island_flags = read_int_array(self.island_flags_msg)
overlap_detected, outside_detected = self.p_context.handle_island_flags(island_flags)
if overlap_detected:
self.set_status('WARNING', 'Overlapping islands detected')
else:
self.set_status('INFO', 'No overlapping islands detected')
def validate_pack_params(self):
pass
def get_uvp_args(self):
uvp_args = ['-o', str(UvPackerOpcode.OVERLAP_CHECK)]
return uvp_args
class UVP2_OT_MeasureAreaOperator(UVP2_OT_PackOperatorGeneric):
bl_idname = 'uvpackmaster2.uv_measure_area'
bl_label = 'Measure Area'
bl_description = 'Measure area of selected UV islands'
def process_result(self):
if self.area_msg is None:
self.raiseUnexpectedOutputError()
area = self.read_area(self.area_msg)
self.prefs.stats_area = area
self.set_status('INFO', 'Islands area: ' + str(area))
def validate_pack_params(self):
pass
def get_uvp_args(self):
uvp_args = ['-o', str(UvPackerOpcode.MEASURE_AREA)]
return uvp_args
class UVP2_OT_ValidateOperator(UVP2_OT_PackOperatorGeneric):
bl_idname = 'uvpackmaster2.uv_validate'
bl_label = 'Validate UVs'
bl_description = 'Validate selected UV faces. The validation procedure looks for invalid UV faces i.e. faces with area close to 0, self-intersecting faces, faces overlapping each other'
def get_confirmation_msg(self):
if self.prefs.FEATURE_demo:
return 'WARNING: in the demo mode only the number of invalid faces found is reported, invalid faces will not be selected. Click OK to continue'
return ''
def process_result(self):
if self.invalid_faces_msg is None:
self.raiseUnexpectedOutputError()
invalid_face_count = force_read_int(self.invalid_faces_msg)
invalid_faces = read_int_array(self.invalid_faces_msg)
if not self.prefs.FEATURE_demo:
if len(invalid_faces) != invalid_face_count:
self.raiseUnexpectedOutputError()
if invalid_face_count > 0:
# Switch to the face selection mode
if self.p_context.context.tool_settings.use_uv_select_sync:
self.p_context.context.tool_settings.mesh_select_mode = (False, False, True)
else:
self.p_context.context.tool_settings.uv_select_mode = 'FACE'
self.p_context.select_all_faces(False)
self.p_context.select_faces(list(invalid_faces), True)
else:
if len(invalid_faces) > 0:
self.raiseUnexpectedOutputError()
if invalid_face_count > 0:
self.set_status('WARNING', 'Number of invalid faces found: ' + str(invalid_face_count))
else:
self.set_status('INFO', 'No invalid faces found')
def validate_pack_params(self):
pass
def get_uvp_args(self):
uvp_args = ['-o', str(UvPackerOpcode.VALIDATE_UVS)]
return uvp_args
class UVP2_OT_ProcessSimilar(UVP2_OT_PackOperatorGeneric):
def validate_pack_params(self):
pass
def get_uvp_args(self):
uvp_args = ['-o', str(self.get_uvp_opcode()), '-I', str(self.scene_props.similarity_threshold)]
uvp_args += ['-i', str(self.scene_props.precision)]
uvp_args += ['-r', str(90)]
if self.prefs.pack_ratio_enabled(self.scene_props):
self.pack_ratio = get_active_image_ratio(self.p_context.context)
uvp_args += ['-q', str(self.pack_ratio)]
return uvp_args
class UVP2_OT_SelectSimilar(UVP2_OT_ProcessSimilar):
bl_idname = 'uvpackmaster2.uv_select_similar'
bl_label = 'Select Similar'
bl_description = "Selects all islands which have similar shape to islands which are already selected. For more info regarding similarity detection click the help button"
def get_confirmation_msg(self):
if self.prefs.FEATURE_demo:
return 'WARNING: in the demo mode only the number of similar islands found is reported, islands will not be selected. Click OK to continue'
return ''
def send_unselected_islands(self):
return True
def get_uvp_opcode(self):
return UvPackerOpcode.SELECT_SIMILAR
def process_result(self):
if self.similar_islands_msg is None:
self.raiseUnexpectedOutputError()
similar_island_count = force_read_int(self.similar_islands_msg)
similar_islands = read_int_array(self.similar_islands_msg)
if not self.prefs.FEATURE_demo:
if len(similar_islands) != similar_island_count:
self.raiseUnexpectedOutputError()
for island_idx in similar_islands:
self.p_context.select_island_faces(island_idx, self.p_context.uv_island_faces_list[island_idx], True)
else:
if len(similar_islands) > 0:
self.raiseUnexpectedOutputError()
self.set_status('INFO', 'Similar islands found: ' + str(similar_island_count))
class UVP2_OT_AlignSimilar(UVP2_OT_ProcessSimilar):
bl_idname = 'uvpackmaster2.uv_align_similar'
bl_label = 'Align Similar'
bl_description = "Align selected islands, so islands which are similar are placed on top of each other. For more info regarding similarity detection click the help button"
def get_uvp_opcode(self):
return UvPackerOpcode.ALIGN_SIMILAR
def process_result(self):
if self.prefs.FEATURE_demo:
return
if self.pack_solution_msg is None:
self.raiseUnexpectedOutputError()
pack_solution = read_pack_solution(self.pack_solution_msg)
self.p_context.apply_pack_solution(self.pack_ratio, pack_solution)
self.set_status('INFO', 'Islands aligned')
class UVP2_OT_ScaleIslands(bpy.types.Operator):
bl_options = {'UNDO'}
@classmethod
def poll(cls, context):
return context.active_object is not None and context.active_object.mode == 'EDIT'
def execute(self, context):
try:
self.p_context = PackContext(context)
ratio = get_active_image_ratio(self.p_context.context)
self.p_context.scale_selected_faces(self.get_scale_factors())
except RuntimeError as ex:
if in_debug_mode():
print_backtrace(ex)
self.report({'ERROR'}, str(ex))
except Exception as ex:
if in_debug_mode():
print_backtrace(ex)
self.report({'ERROR'}, 'Unexpected error')
self.p_context.update_meshes()
return {'FINISHED'}
def get_scale_factors(self):
return (1.0, 1.0)
class UVP2_OT_AdjustIslandsToTexture(UVP2_OT_ScaleIslands):
bl_idname = 'uvpackmaster2.uv_adjust_islands_to_texture'
bl_label = 'Adjust Islands To Texture'
bl_description = "Adjust scale of selected islands so they are suitable for packing into the active texture. CAUTION: this operator should be used only when packing to a non-square texture. For for info regarding non-square packing click the help icon"
def get_scale_factors(self):
ratio = get_active_image_ratio(self.p_context.context)
return (1.0 / ratio, 1.0)
class UVP2_OT_UndoIslandsAdjustemntToTexture(UVP2_OT_ScaleIslands):
bl_idname = 'uvpackmaster2.uv_undo_islands_adjustment_to_texture'
bl_label = 'Undo Islands Adjustment'
bl_description = "Undo adjustment performed by the 'Adjust Islands To Texture' operator so islands are again suitable for packing into a square texture. For for info regarding non-square packing read the documentation"
def get_scale_factors(self):
ratio = get_active_image_ratio(self.p_context.context)
return (ratio, 1.0)
class UVP2_OT_Help(bpy.types.Operator):
bl_label = 'Help'
def execute(self, context):
webbrowser.open(UvpLabels.HELP_BASEURL + self.URL_SUFFIX)
return {'FINISHED'}
class UVP2_OT_UvpSetupHelp(UVP2_OT_Help):
bl_label = 'UVP Setup Help'
bl_idname = 'uvpackmaster2.uv_uvp_setup_help'
bl_description = "Show help for UVP setup"
URL_SUFFIX = "uvp-setup"
class UVP2_OT_HeuristicSearchHelp(UVP2_OT_Help):
bl_label = 'Non-Square Packing Help'
bl_idname = 'uvpackmaster2.uv_heuristic_search_help'
bl_description = "Show help for heuristic search"
URL_SUFFIX = "heuristic-search"
class UVP2_OT_NonSquarePackingHelp(UVP2_OT_Help):
bl_label = 'Non-Square Packing Help'
bl_idname = 'uvpackmaster2.uv_nonsquare_packing_help'
bl_description = "Show help for non-square packing"
URL_SUFFIX = "non-square-packing"
class UVP2_OT_SimilarityDetectionHelp(UVP2_OT_Help):
bl_label = 'Similarity Detection Help'
bl_idname = 'uvpackmaster2.uv_similarity_detection_help'
bl_description = "Show help for similarity detection"
URL_SUFFIX = "similarity-detection"
class UVP2_OT_InvalidTopologyHelp(UVP2_OT_Help):
bl_label = 'Invalid Topology Help'
bl_idname = 'uvpackmaster2.uv_invalid_topology_help'
bl_description = "Show help for handling invalid topology errors"
URL_SUFFIX = "invalid-topology-issues"
class UVP2_OT_PixelMarginHelp(UVP2_OT_Help):
bl_label = 'Pixel Margin Help'
bl_idname = 'uvpackmaster2.uv_pixel_margin_help'
bl_description = "Show help for setting margin in pixels"
URL_SUFFIX = "pixel-margin"
class UVP2_OT_IslandRotStepHelp(UVP2_OT_Help):
bl_label = 'Island Rotation Step Help'
bl_idname = 'uvpackmaster2.uv_island_rot_step_help'
bl_description = "Show help for setting rotation step on per-island level"
URL_SUFFIX = "island-rotation-step"
class UVP2_OT_UdimSupportHelp(UVP2_OT_Help):
bl_label = 'UDIM Support Help'
bl_idname = 'uvpackmaster2.uv_udim_support_help'
bl_description = "Show help for UDIM support"
URL_SUFFIX = "udim-support"
class UVP2_OT_ManualGroupingHelp(UVP2_OT_Help):
bl_label = 'Manual Grouping Help'
bl_idname = 'uvpackmaster2.uv_manual_grouping_help'
bl_description = "Show help for manual grouping"
URL_SUFFIX = "udim-support#manual-grouping"
| 1.953125 | 2 |
src/data_gen/train_tokenizer.py | spacemanidol/Image2smiles | 6 | 12799998 | <filename>src/data_gen/train_tokenizer.py
import os
import argparse
from tokenizers.decoders import ByteLevel as ByteLevelDecoder
from tokenizers.models import BPE
from tokenizers.normalizers import Lowercase, NFKC, Sequence
from tokenizers.pre_tokenizers import ByteLevel
from tokenizers.trainers import BpeTrainer
from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers, processors
def get_smi_files(directory):
files = []
for filename in os.listdir(directory):
if filename.endswith(".smi"):
files.append(os.path.join(directory, filename))
else:
continue
return files
def main(args):
if args.do_train:
# Initialize a tokenizer
files = get_smi_files(args.training_files)
print("Training BPE tokenizer using the following files:{}".format(files))
tokenizer = Tokenizer(models.BPE(unk_token="<unk>"))
tokenizer.enable_padding(pad_id=args.vocab_size+2, pad_token="<pad>", length=args.pad_len)
tokenizer.enable_truncation(max_length=args.pad_len,strategy='only_first')
tokenizer.normalizer = Sequence([NFKC()])
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.ByteLevel(trim_offsets=True)
# Train the tokenizer
trainer = trainers.BpeTrainer(show_progress=True, vocab_size=args.vocab_size, min_frequency=args.min_frequency)
tokenizer.train(files, trainer=trainer)
tokenizer.add_tokens(["<start>", "<end>" ])
tokenizer.save(os.path.join('tokenizers',args.tokenizer_name), pretty=True)
print("Trained vocab size: {}".format(tokenizer.get_vocab_size()))
if args.do_test:
# Test the tokenizer
tokenizer = Tokenizer.from_file(os.path.join('tokenizers',args.tokenizer_name))
print("Testing with SMILES String: {}".format(args.test_string))
encoding = tokenizer.encode(args.test_string)
print("Encoded string: {}".format(encoding.tokens))
print(encoding.ids)
decoded = tokenizer.decode(encoding.ids)
print("Decoded string: {}".format(decoded))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate a smiles tokenizer given candidate files and target configs.')
parser.add_argument('--training_files', type=str, default='data/', help='source of input smiles data')
parser.add_argument('--pad_len', type=int, default=150, help='how much to pad tokenized input')
parser.add_argument('--do_train', action='store_true', help='Train a tokenizer' )
parser.add_argument('--do_test', action='store_true', help='Test the tokenizer found in tokenizer dir file')
parser.add_argument('--test_string', type=str, default='CC(C)CCNc1cnnc(NCCc2ccc(S(N)(=O)=O)cc2)n1', help='a SMILES string to test tokenizer with')
parser.add_argument('--tokenizer_name', type=str, default='tokenizer_vocab_2000.json')
parser.add_argument('--vocab_size', type=int, default=2000, help='Size of the vocab to rain')
parser.add_argument('--min_frequency', type=int, default=2, help='min fequency of word in corpus')
args = parser.parse_args()
main(args)
| 2.421875 | 2 |
hub/tests/test_models_custom_link.py | yevgenykuz/dev-team-hub | 2 | 12799999 | <reponame>yevgenykuz/dev-team-hub
from django.test import TestCase
from ..models import CustomLink
class CustomLinkModelTests(TestCase):
def setUp(self):
self.custom_link = CustomLink.objects.create(name='Google', url='https://www.google.com', order_id=1)
def test_new_object(self):
self.assertEquals(self.custom_link.name, 'Google')
self.assertEquals(self.custom_link.url, 'https://www.google.com')
self.assertEquals(self.custom_link.order_id, 1)
def test_field_name(self):
field_label = self.custom_link._meta.get_field('name').verbose_name
max_length = self.custom_link._meta.get_field('name').max_length
unique = self.custom_link._meta.get_field('name').unique
self.assertEquals(field_label, 'name')
self.assertEquals(max_length, 50)
self.assertEquals(unique, True)
def test_field_url(self):
field_label = self.custom_link._meta.get_field('url').verbose_name
max_length = self.custom_link._meta.get_field('url').max_length
self.assertEquals(field_label, 'url')
self.assertEquals(max_length, 255)
def test_field_order_id(self):
field_label = self.custom_link._meta.get_field('order_id').verbose_name
max_length = self.custom_link._meta.get_field('order_id').default
self.assertEquals(field_label, 'order id')
self.assertEquals(max_length, 0)
def test_object_presentation(self):
expected_presentation = f"[{self.custom_link.name}]: {self.custom_link.url}"
self.assertEquals(expected_presentation, str(self.custom_link))
def test_object_ordering(self):
self.assertEquals(self.custom_link._meta.ordering, ['order_id'])
| 2.546875 | 3 |