text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
---|---|
# pylint:disable=unsupported-membership-test
# pylint:disable=unsubscriptable-object
# pylint:disable=unsupported-assignment-operation
"""Antenna Circuit
This module supports:
1. Changing the affinity values of each of the odorant-receptor pairs
characterizing the input of the Odorant Transduction Process.
2. Changing parameter values of the Biological Spike Generators (BSGs)
associated with each OSN.
3. Changing the number of OSNs expressing the same Odorant Receptor (OR) type.
"""
import copy
import typing as tp
from dataclasses import dataclass, field
import numpy as np
import networkx as nx
from olftrans.olftrans import estimate_resting_spike_rate, estimate_sigma
from ..basecircuit import Config, Circuit, EOSCircuitException
from . import model as NDModel
from . import NDComponents as ndcomp
class ANTException(EOSCircuitException):
"""Base Antenna Exception"""
@dataclass
class ANTConfig(Config):
"""Configuration for Antenna Circuits"""
NO: tp.Iterable[tp.Iterable[int]]
"""Number of OSNs per Receptor Type"""
affs: tp.Iterable[float]
"""Affinity Values"""
receptors: tp.Iterable[str] = None
"""Name of receptors of length NR"""
resting: float = None
"""Resting OSN Spike Rates [Hz]"""
node_params: dict = field(default_factory=lambda: dict(osn_bsgs=dict(sigma=0.0025)))
"""Parameters for each neuron type"""
osns: tp.Iterable[tp.Iterable[str]] = field(repr=False, default=None)
"""Ids of OSNs for each channel
This is a list of list, where the outer list correspond to
"""
def __post_init__(self):
for n in self.node_types:
if n not in self.node_params:
self.node_params[n] = dict()
self.affs = np.asarray(self.affs)
# set receptor names
self.receptors = self.set_or_assert(
self.receptors, [f"{r}" for r in range(self.NR)], self.NR
)
# set osn names
if np.isscalar(self.NO):
self.NO = np.full((self.NR,), self.NO, dtype=int)
else:
if len(self.NO) != self.NR:
raise ANTException(
f"If `NO` is iterable, it has to have length same as affs."
)
self.osns = self.set_or_assert(
self.osns,
[
[f"OSN/{_or}/{o}" for o in range(self.NO[r])]
for r, _or in enumerate(self.receptors)
],
self.NO,
)
if self.drs is None:
self.drs = np.full((self.NR,), 10.0)
elif np.isscalar(self.drs):
self.drs = np.full((self.NR,), self.drs)
else:
self.drs = np.asarray(self.drs)
if len(self.drs) != self.NR:
raise ANTException(
"If Dissociation rate (dr) is specified as iterable, "
"it needs to have length the same as affs."
)
self.node_params["osn_otps"]["br"] = self.drs * self.affs
if all([v is None for v in [self.resting, self.sigma]]):
raise ANTException("Resting and Sigma cannot both be None")
if self.resting is not None:
self.sigma = estimate_sigma(self.resting)
def set_or_assert(
self, var: "Config.Attribute", new_var: "Config.Attribute", N: np.ndarray
) -> "Config.Attribute":
"""Set Variable or Check Dimensionality
If :code:`var` to new_names if None and perform dimensionality checks
Arguments:
var: old variable value
new_var: new variable value
N: dimensionality for the variable, could be multi-dimensional
"""
if var is None:
if hasattr(N, "__len__"):
assert len(new_var) == len(N)
assert all([len(v) == n for v, n in zip(new_var, N)])
var = new_var
else:
if hasattr(N, "__len__"):
assert len(new_var) == len(N)
assert all([len(v) == n for v, n in zip(var, N)])
else:
assert len(var) == N
return var
def set_affs(self, new_affs):
self.affs = new_affs
self.brs = self.drs * self.affs
@property
def node_types(self) -> tp.List[str]:
return ["osn_otps", "osn_bsgs"]
@property
def osn_otps(self):
return [[f"{name}/OTP" for name in names] for names in self.osns]
@property
def osn_bsgs(self):
return [[f"{name}/BSG" for name in names] for names in self.osns]
@property
def NR(self) -> int:
"""Number of Receptors"""
return len(self.affs)
@property
def sigma(self) -> float:
"""Noisy Connor Stevens model Noise Level"""
return self.node_params["osn_bsgs"]["sigma"]
@sigma.setter
def sigma(self, new_sigma) -> float:
self.node_params["osn_bsgs"]["sigma"] = new_sigma
@property
def brs(self) -> float:
"""Binding Rates of the OTPs"""
if "br" in self.node_params["osn_otps"]:
return self.node_params["osn_otps"]["br"]
return None
@property
def drs(self) -> float:
"""Binding Rates of the OTPs"""
if "dr" in self.node_params["osn_otps"]:
return self.node_params["osn_otps"]["dr"]
return None
@drs.setter
def drs(self, new_drs) -> float:
new_drs = np.atleast_1d(new_drs)
if len(new_drs) != self.NR:
raise ANTException(
f"dr values length mismatch, expected {self.NR}, " f"got {len(new_drs)}"
)
self.node_params["osn_otps"]["dr"] = new_drs
@dataclass(repr=False)
class ANTCircuit(Circuit):
"""Antenna Circuit"""
config: ANTConfig
extra_comps: tp.List["NDComponent"] = field(
init=False, default_factory=lambda: [ndcomp.NoisyConnorStevens, ndcomp.OTP]
)
@classmethod
def create_graph(cls, cfg) -> nx.MultiDiGraph:
G = nx.MultiDiGraph()
for r, (_otp_ids, _bsg_ids) in enumerate(zip(cfg.osn_otps, cfg.osn_bsgs)):
bsg_params = copy.deepcopy(NDModel.NoisyConnorStevens.params)
bsg_params.update(
{
key: val
for key, val in cfg.node_params["osn_bsgs"].items()
if not hasattr(val, "__len__")
}
)
otp_params = copy.deepcopy(NDModel.OTP.params)
otp_params.update({"br": cfg.brs[r], "dr": cfg.drs[r]})
otp_params.update(
{
key: val
for key, val in cfg.node_params["osn_otps"].items()
if key not in ["br", "dr"] and not hasattr(val, "__len__")
}
)
for _o_id, _b_id in zip(_otp_ids, _bsg_ids):
G.add_node(_o_id, **{"class": "OTP"}, **otp_params)
G.add_node(_b_id, **{"class": "NoisyConnorStevens"}, **bsg_params)
G.add_edge(_o_id, _b_id, variable="I")
return G
@classmethod
def create_from_config(cls, cfg) -> "ANTCircuit":
"""Create Instance from Config
Arguments:
cfg: Config instance that specifies the configuration of the module
Returns:
A new ANTCircuit instance
"""
return cls(graph=cls.create_graph(cfg), config=cfg)
def set_affinities(self, value, receptors=None) -> None:
"""Set Affinity values.
.. note::
Because binding rates are computed from affinities :code:`config.affs`
and dissociations rates :code:`config.drs`, change affinities
will have effect of changing binding rates but not dissociation
rates.
"""
if receptors is None:
receptors = list(self.config.receptors)
else:
receptors = list(np.atleast_1d(receptors))
value = np.atleast_1d(value)
if len(value) != len(receptors):
raise ANTException(
f"Attempting to set values of length {len(value)} into "
f"{len(receptors)} receptors"
)
for r in receptors:
r_idx = list(self.config.receptors).index(r)
new_aff = value[r_idx]
self.config.affs[r_idx] = new_aff
otp_nodes = self.config.osn_otps[r_idx]
update_dct = {
n: {"br": self.graph.nodes[n]["dr"] * new_aff} for n in otp_nodes
}
nx.set_node_attributes(self.graph, update_dct)
def set_bsg_params(self, key: str, value: float) -> None:
"""Set parameter value of BSG nodes"""
if key == "sigma":
self.config.sigma = value
update_dict = {n: {key: value} for n in sum(self.config.osn_bsgs, [])}
nx.set_node_attributes(self.graph, update_dict)
def set_NO(
self, NO: tp.Union[int, tp.Iterable[int]], receptor=None, aff_noise_std=0.0
) -> None:
"""Change number of OSNs expressing each receptor type"""
if receptor is None:
receptor = list(self.config.receptors)
else:
receptor = list(np.atleast_1d(receptor))
if any([r not in self.config.receptors for r in receptor]):
raise ANTException("Receptors not found in list of names")
for r in receptor:
r_idx = list(self.config.receptors).index(r)
self.config.NO[r_idx] = NO
self.config.osns[r_idx] = [f"OSN/{r}/{n}" for n in range(NO)]
self.graph = self.create_graph(self.config)
def get_node_ids(
self,
node_type: "ANTConfig.node_types",
receptor: tp.Union[str, tp.Iterable[str]] = None,
) -> list:
if receptor is None:
receptor = self.config.receptors
else:
receptor = np.atleast_1d(receptor)
for r in receptor:
if r not in self.config.receptors:
raise ANTException(f"Receptors {r} not found in list of receptor names")
if node_type not in self.config.node_types:
raise ANTException(
f"node_type {node_type} not recognized, "
f"must be one of {self.config.node_types}"
)
node_ids = getattr(self.config, node_type)
return [node_ids[list(self.config.receptors).index(r)] for r in receptor]
@property
def inputs(self) -> dict:
"""Output OTP Nodes IDs and the Variables"""
return {"conc": sum(self.config.osn_otps, [])}
@property
def outputs(self) -> dict:
"""Output BSG Nodes IDs and the Variables"""
bsg_ids = sum(self.config.osn_bsgs, [])
return {"V": bsg_ids, "spike_state": bsg_ids}
| {"hexsha": "5ed04b3cb7ca416a5519c2b54058aafcbd446445", "size": 10735, "ext": "py", "lang": "Python", "max_stars_repo_path": "eoscircuits/antcircuits/circuit.py", "max_stars_repo_name": "FlyBrainLab/EOScircuits", "max_stars_repo_head_hexsha": "2ade33db402997f5001f1707f136370c660dce33", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "eoscircuits/antcircuits/circuit.py", "max_issues_repo_name": "FlyBrainLab/EOScircuits", "max_issues_repo_head_hexsha": "2ade33db402997f5001f1707f136370c660dce33", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "eoscircuits/antcircuits/circuit.py", "max_forks_repo_name": "FlyBrainLab/EOScircuits", "max_forks_repo_head_hexsha": "2ade33db402997f5001f1707f136370c660dce33", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6520376176, "max_line_length": 88, "alphanum_fraction": 0.5800652073, "include": true, "reason": "import numpy,import networkx", "num_tokens": 2620} |
import sys
import os
sys.path.insert(0, os.path.abspath("../tstcommon"))
import commondata2d as cd
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from utils import to_cpp
inp = cd.inp.reshape(cd.inp.shape[1:])
inp.requires_grad_()
in_feat = 8
out_feat = 4
batch_size = 3
fc = nn.Linear(in_feat, out_feat, bias=False)
fc.weight.data = cd.weights
optimizer = optim.SGD(fc.parameters(), lr=0.01, momentum=0.9)
loss_fn = nn.CrossEntropyLoss()
output = fc(inp)
output.requires_grad_()
output.retain_grad()
loss = loss_fn(output, cd.target_nonbinary)
loss.retain_grad()
loss.backward()
optimizer.step()
##################################################
def cross_entropy(o, target):
return (-o[cd.target == 1] + torch.log(torch.exp(o).sum(axis=1))).mean()
cross_entropy(output, cd.target_nonbinary)
def cross_entropy_grad(o):
mask = cd.target
N, C = o.shape
dxi = -1. / N * mask
dsum = 1./N * torch.from_numpy(np.ones((N)))
dlog = 1. / torch.exp(o).sum(axis=1) * dsum
dsumexp = torch.exp(o) * dlog.reshape((N, 1))
dL = dxi + dsumexp
return dL
cross_entropy_grad(output)
| {"hexsha": "a07646559562b92f62c19689d098e227d9c0bcb2", "size": 1137, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/losses/crossentropy.py", "max_stars_repo_name": "fierval/EigenSiNN", "max_stars_repo_head_hexsha": "4ed01b47d4b13b9c9e29622475d821868499942d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/losses/crossentropy.py", "max_issues_repo_name": "fierval/EigenSiNN", "max_issues_repo_head_hexsha": "4ed01b47d4b13b9c9e29622475d821868499942d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/losses/crossentropy.py", "max_forks_repo_name": "fierval/EigenSiNN", "max_forks_repo_head_hexsha": "4ed01b47d4b13b9c9e29622475d821868499942d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.2711864407, "max_line_length": 74, "alphanum_fraction": 0.671064204, "include": true, "reason": "import numpy", "num_tokens": 327} |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import zip, range
from future.utils import viewkeys, viewitems
from collections import Counter, defaultdict, OrderedDict
from warnings import warn
import numpy as np
from scipy.stats import entropy
from skbio.stats.distance import DistanceMatrix
from skbio.io.util import open_file
from ._exception import SequenceCollectionError, StockholmParseError
class SequenceCollection(object):
"""Class for storing collections of biological sequences.
Parameters
----------
seqs : list of `skbio.sequence.BiologicalSequence` objects
The `skbio.sequence.BiologicalSequence` objects to load into
a new `SequenceCollection` object.
validate : bool, optional
If True, runs the `is_valid` method after construction and raises
`SequenceCollectionError` if ``is_valid == False``.
Raises
------
skbio.alignment.SequenceCollectionError
If ``validate == True`` and ``is_valid == False``.
See Also
--------
skbio.sequence.BiologicalSequence
skbio.sequence.NucleotideSequence
skbio.sequence.DNASequence
skbio.sequence.RNASequence
Alignment
skbio.parse.sequences
skbio.parse.sequences.parse_fasta
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> s1
<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
"""
@classmethod
def from_fasta_records(cls, fasta_records, seq_constructor,
validate=False):
r"""Initialize a `SequenceCollection` object
Parameters
----------
fasta_records : iterator of tuples
The records to load into a new `SequenceCollection` object. These
should be tuples of ``(sequence_id, sequence)``.
seq_constructor : skbio.sequence.BiologicalSequence
validate : bool, optional
If True, runs the `is_valid` method after construction and raises
`SequenceCollectionError` if ``is_valid == False``.
Returns
-------
SequenceCollection (or a derived class)
The new `SequenceCollection` object.
Raises
------
skbio.alignment.SequenceCollectionError
If ``validate == True`` and ``is_valid == False``.
See Also
--------
skbio.sequence.BiologicalSequence
skbio.sequence.NucleotideSequence
skbio.sequence.DNASequence
skbio.sequence.RNASequence
Alignment
skbio.parse.sequences
skbio.parse.sequences.parse_fasta
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.parse.sequences import parse_fasta
>>> from StringIO import StringIO
>>> from skbio.sequence import DNA
>>> fasta_f = StringIO('>seq1\nACCGT\n>seq2\nAACCGGT\n')
>>> s1 = SequenceCollection.from_fasta_records(
... parse_fasta(fasta_f), DNA)
>>> s1
<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
>>> records = [('seq1', 'ACCGT'), ('seq2', 'AACCGGT')]
>>> s1 = SequenceCollection.from_fasta_records(records, DNA)
>>> s1
<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
"""
data = []
for seq_id, seq in fasta_records:
try:
id, description = seq_id.split(None, 1)
except ValueError:
id = seq_id.strip()
description = None
data.append(seq_constructor(seq, id=id,
description=description))
return cls(data, validate=validate)
def __init__(self, seqs, validate=False):
self._data = seqs
self._id_to_index = {}
for i, seq in enumerate(self._data):
id = seq.id
if id in self:
raise SequenceCollectionError(
"All sequence ids must be unique, but "
"id %s is present multiple times." % id)
else:
self._id_to_index[seq.id] = i
# This is bad because we're making a second pass through the sequence
# collection to validate. We'll want to avoid this, but it's tricky
# because different subclasses will want to define their own is_valid
# methods.
if validate and not self.is_valid():
raise SequenceCollectionError(
"%s failed to validate." % self.__class__.__name__)
def __contains__(self, id):
r"""The in operator.
Parameters
----------
id : str
The id to look up in the `SequenceCollection`.
Returns
-------
bool
Indicates whether `id` corresponds to a sequence id
in the `SequenceCollection`.
.. shownumpydoc
"""
return id in self._id_to_index
def __eq__(self, other):
r"""The equality operator.
Parameters
----------
other : `SequenceCollection`
The `SequenceCollection` to test for equality against.
Returns
-------
bool
Indicates whether `self` and `other` are equal.
Notes
-----
`SequenceCollection` objects are equal if they are the same type,
contain the same number of sequences, and if each of the
`skbio.sequence.BiologicalSequence` objects, in order, are equal.
.. shownumpydoc
"""
if self.__class__ != other.__class__:
return False
elif len(self) != len(other):
return False
else:
for self_seq, other_seq in zip(self, other):
if self_seq != other_seq:
return False
return True
def __getitem__(self, index):
r"""The indexing operator.
Parameters
----------
index : int, str
The position or sequence id of the
`skbio.sequence.BiologicalSequence` to return from the
`SequenceCollection`.
Returns
-------
`skbio.sequence.BiologicalSequence`
The `skbio.sequence.BiologicalSequence` at the specified
index in the `SequenceCollection`.
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> s1[0]
<DNASequence: ACCGT (length: 5)>
>>> s1["seq1"]
<DNASequence: ACCGT (length: 5)>
.. shownumpydoc
"""
if isinstance(index, str):
return self.get_seq(index)
else:
return self._data[index]
def __iter__(self):
r"""The iter operator.
Returns
-------
iterator
`skbio.sequence.BiologicalSequence` iterator for the
`SequenceCollection`.
.. shownumpydoc
"""
return iter(self._data)
def __len__(self):
r"""The len operator.
Returns
-------
int
The number of sequences in the `SequenceCollection`.
.. shownumpydoc
"""
return self.sequence_count()
def __ne__(self, other):
r"""The inequality operator.
Parameters
----------
other : `SequenceCollection`
Returns
-------
bool
Indicates whether self and other are not equal.
Notes
-----
See `SequenceCollection.__eq__` for a description of what it means for
a pair of `SequenceCollection` objects to be equal.
.. shownumpydoc
"""
return not self.__eq__(other)
def __repr__(self):
r"""The repr method.
Returns
-------
str
Returns a string representation of the object.
Notes
-----
String representation contains the class name, the number of sequences
in the `SequenceCollection` (n), and the mean and standard deviation
sequence length.
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> print(repr(s1))
<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
.. shownumpydoc
"""
cn = self.__class__.__name__
count, center, spread = self.distribution_stats()
return "<%s: n=%d; mean +/- std length=%.2f +/- %.2f>" \
% (cn, count, center, spread)
def __reversed__(self):
"""The reversed method.
Returns
-------
iterator
`skbio.sequence.BiologicalSequence` iterator for the
`SequenceCollection` in reverse order.
.. shownumpydoc
"""
return reversed(self._data)
def __str__(self):
r"""The str method.
Returns
-------
str
Fasta-formatted string of all sequences in the object.
.. shownumpydoc
"""
return self.to_fasta()
def distances(self, distance_fn):
"""Compute distances between all pairs of sequences
Parameters
----------
distance_fn : function
Function for computing the distance between a pair of sequences.
This must take two sequences as input (as
`skbio.sequence.BiologicalSequence` objects) and return a
single integer or float value.
Returns
-------
skbio.DistanceMatrix
Matrix containing the distances between all pairs of sequences.
Raises
------
skbio.util.exception.BiologicalSequenceError
If ``len(self) != len(other)`` and ``distance_fn`` ==
``scipy.spatial.distance.hamming``.
See Also
--------
skbio.DistanceMatrix
scipy.spatial.distance.hamming
Examples
--------
>>> from scipy.spatial.distance import hamming
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> seqs = [DNA("ACCGGGTT", id="s1"),
... DNA("ACTTGGTT", id="s2"),
... DNA("ACTAGGTT", id="s3")]
>>> a1 = SequenceCollection(seqs)
>>> print(a1.distances(hamming))
3x3 distance matrix
IDs:
s1, s2, s3
Data:
[[ 0. 0.25 0.25 ]
[ 0.25 0. 0.125]
[ 0.25 0.125 0. ]]
"""
sequence_count = self.sequence_count()
dm = np.zeros((sequence_count, sequence_count))
ids = []
for i in range(sequence_count):
self_i = self[i]
ids.append(self_i.id)
for j in range(i):
dm[i, j] = dm[j, i] = self_i.distance(self[j], distance_fn)
return DistanceMatrix(dm, ids)
def distribution_stats(self, center_f=np.mean, spread_f=np.std):
r"""Return sequence count, and center and spread of sequence lengths
Parameters
----------
center_f : function
Should take a list-like object and return a single value
representing the center of the distribution.
spread_f : function
Should take a list-like object and return a single value
representing the spread of the distribution.
Returns
-------
tuple of (int, float, float)
The sequence count, center of length distribution, spread of length
distribution.
Notes
-----
Alternatives for `center_f` and `spread_f` could be median and median
absolute deviation.
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> s1.distribution_stats()
(2, 6.0, 1.0)
"""
if self.is_empty():
return (0, 0.0, 0.0)
else:
sequence_count = self.sequence_count()
sequence_lengths = self.sequence_lengths()
return (sequence_count, center_f(sequence_lengths),
spread_f(sequence_lengths))
def degap(self):
r"""Return a new `SequenceCollection` with all gap characters removed.
Returns
-------
SequenceCollection
A new `SequenceCollection` where
`skbio.sequence.BiologicalSequence.degap` has been called on
each sequence.
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('A--CCGT.', id="seq1"),
... DNA('.AACCG-GT.', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> s2 = s1.degap()
>>> s2
<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
"""
return SequenceCollection([seq.degap() for seq in self])
def get_seq(self, id):
r"""Return a sequence from the `SequenceCollection` by its id.
Parameters
----------
id, str
The id of the sequence to return.
Returns
-------
skbio.sequence.BiologicalSequence
The `skbio.sequence.BiologicalSequence` with `id`.
Raises
------
KeyError
If `id` is not in the `SequenceCollection` object.
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('A--CCGT.', id="seq1"),
... DNA('.AACCG-GT.', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> print(s1['seq1'])
A--CCGT.
"""
return self[self._id_to_index[id]]
def ids(self):
"""Returns the `BiologicalSequence` ids
Returns
-------
list
The ordered list of ids for the
`skbio.sequence.BiologicalSequence` objects in the
`SequenceCollection`.
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('A--CCGT.', id="seq1"),
... DNA('.AACCG-GT.', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> print(s1.ids())
['seq1', 'seq2']
"""
return [seq.id for seq in self]
def int_map(self, prefix=""):
"""Create an integer-based mapping of sequence ids
Parameters
----------
prefix : str
String prefix for new integer-based ids.
Returns
-------
dict
Mapping of new ids to sequences.
dict
Mapping of new ids to old ids.
Notes
-----
This is useful when writing sequences out for use with programs that
are picky about their sequence ids (e.g., raXML).
The integer-based ids will be strings, for consistency (e.g., if prefix
is passed) and begin at 1.
References
----------
RAxML Version 8: A tool for Phylogenetic Analysis and Post-Analysis of
Large Phylogenies". In Bioinformatics, 2014
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> new_id_to_seqs, new_id_to_old_ids = s1.int_map()
>>> print(repr(new_id_to_seqs['1']))
<DNASequence: ACCGT (length: 5)>
>>> print(repr(new_id_to_seqs['2']))
<DNASequence: AACCGGT (length: 7)>
>>> print(new_id_to_old_ids['1'])
seq1
>>> print(new_id_to_old_ids['2'])
seq2
"""
int_keys = []
int_map = []
for i, seq in enumerate(self):
k = ("%s%d" % (prefix, i+1))
int_map.append((k, seq))
int_keys.append((k, seq.id))
return dict(int_map), dict(int_keys)
def is_empty(self):
"""Return True if the SequenceCollection is empty
Returns
-------
bool
``True`` if `self` contains zero sequences, and ``False``
otherwise.
"""
return self.sequence_count() == 0
def is_valid(self):
"""Return True if the SequenceCollection is valid
Returns
-------
bool
``True`` if `self` is valid, and ``False`` otherwise.
Notes
-----
Validity is defined as having no sequences containing characters
outside of their valid character sets.
See Also
--------
skbio.alignment.BiologicalSequence.is_valid
Examples
--------
>>> from skbio.alignment import SequenceCollection
>>> from skbio.sequence import DNA, RNA
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> print(s1.is_valid())
True
>>> sequences = [RNA('ACCGT', id="seq1"),
... RNA('AACCGGT', id="seq2")]
>>> s1 = SequenceCollection(sequences)
>>> print(s1.is_valid())
False
"""
return self._validate_character_set()
def iteritems(self):
"""Generator of id, sequence tuples
Returns
-------
generator of tuples
Each tuple contains ordered
(`skbio.sequence.BiologicalSequence.id`,
`skbio.sequence.BiologicalSequence`) pairs.
"""
for seq in self:
yield seq.id, seq
def lower(self):
"""Converts all sequences to lowercase
Returns
-------
SequenceCollection
New `SequenceCollection` object where
`skbio.sequence.BiologicalSequence.lower()` has been called
on each sequence.
See Also
--------
skbio.sequence.BiologicalSequence.lower
upper
"""
return self.__class__([seq.lower() for seq in self])
def sequence_count(self):
"""Return the count of sequences in the `SequenceCollection`
Returns
-------
int
The number of sequences in the `SequenceCollection`.
See Also
--------
sequence_lengths
Alignment.sequence_length
"""
return len(self._data)
def k_word_frequencies(self, k, overlapping=True, constructor=str):
"""Return frequencies of length k words for sequences in Alignment
Parameters
----------
k : int
The word length.
overlapping : bool, optional
Defines whether the k-words should be overlapping or not
overlapping. This is only relevant when k > 1.
constructor : type, optional
The constructor for the returned k-words.
Returns
-------
list
List of ``collections.defaultdict`` objects, one for each sequence
in the `Alignment`, representing the frequency of each character in
each sequence of the `Alignment`.
See Also
--------
position_frequencies
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('A', id="seq1"),
... DNA('AT', id="seq2"),
... DNA('TTTT', id="seq3")]
>>> s1 = SequenceCollection(sequences)
>>> for freqs in s1.k_word_frequencies(1):
... print(freqs)
defaultdict(<type 'int'>, {'A': 1.0})
defaultdict(<type 'int'>, {'A': 0.5, 'T': 0.5})
defaultdict(<type 'int'>, {'T': 1.0})
>>> for freqs in s1.k_word_frequencies(2):
... print(freqs)
defaultdict(<type 'int'>, {})
defaultdict(<type 'int'>, {'AT': 1.0})
defaultdict(<type 'int'>, {'TT': 1.0})
"""
result = []
for s in self:
result.append(s.k_word_frequencies(k, overlapping, constructor))
return result
def sequence_lengths(self):
"""Return lengths of the sequences in the `SequenceCollection`
Returns
-------
list
The ordered list of sequence lengths.
See Also
--------
sequence_count
"""
return [len(seq) for seq in self]
def to_fasta(self):
"""Return fasta-formatted string representing the `SequenceCollection`
Returns
-------
str
A fasta-formatted string representing the `SequenceCollection`.
See Also
--------
skbio.parse.sequences.parse_fasta
"""
return ''.join([seq.to_fasta() for seq in self._data])
def toFasta(self):
"""Return fasta-formatted string representing the `SequenceCollection`
.. note:: Deprecated in skbio 0.3.0
`SequenceCollection.toFasta` will be removed in skbio 0.2.0,
it is replaced by `SequenceCollection.to_fasta` as the latter
adheres to PEP8 naming conventions. This is necessary to keep
in place now as these objects are sometimes passed into
code that expects a `cogent.alignment.Alignment` object
(e.g., PyNAST), so we need to support the method with this
name.
Returns
-------
str
A fasta-formatted string representing the `SequenceCollection`.
"""
warn("SequenceCollection.toFasta() is deprecated. You should use "
"SequenceCollection.to_fasta().")
return self.to_fasta()
def upper(self):
"""Converts all sequences to uppercase
Returns
-------
SequenceCollection
New `SequenceCollection` object where `BiologicalSequence.upper()`
has been called on each sequence.
See Also
--------
BiologicalSequence.upper
lower
"""
return self.__class__([seq.upper() for seq in self])
def _validate_character_set(self):
"""Return ``True`` if all sequences are valid, ``False`` otherwise
"""
for seq in self:
if not seq.is_valid():
return False
return True
class Alignment(SequenceCollection):
"""Class for storing alignments of biological sequences.
The ``Alignment`` class adds convenience methods to the
``SequenceCollection`` class to make it easy to work with alignments of
biological sequences.
Parameters
----------
seqs : list of `skbio.sequence.BiologicalSequence` objects
The `skbio.sequence.BiologicalSequence` objects to load into
a new `Alignment` object.
validate : bool, optional
If True, runs the `is_valid` method after construction and raises
`SequenceCollectionError` if ``is_valid == False``.
score : float, optional
The score of the alignment, if applicable (usually only if the
alignment was just constructed).
start_end_positions : iterable of two-item tuples, optional
The start and end positions of each input sequence in the alignment,
if applicable (usually only if the alignment was just constructed using
a local alignment algorithm). Note that these should be indexes into
the unaligned sequences, though the `Alignment` object itself doesn't
know about these.
Raises
------
skbio.alignment.SequenceCollectionError
If ``validate == True`` and ``is_valid == False``.
Notes
-----
By definition, all of the sequences in an alignment must be of the same
length. For this reason, an alignment can be thought of as a matrix of
sequences (rows) by positions (columns).
See Also
--------
skbio.sequence.BiologicalSequence
skbio.sequence.NucleotideSequence
skbio.sequence.DNASequence
skbio.sequence.RNASequence
SequenceCollection
skbio.parse.sequences
skbio.parse.sequences.parse_fasta
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('A--CCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> a1 = Alignment(sequences)
>>> a1
<Alignment: n=2; mean +/- std length=7.00 +/- 0.00>
"""
def __init__(self, seqs, validate=False, score=None,
start_end_positions=None):
super(Alignment, self).__init__(seqs, validate)
if score is not None:
self._score = float(score)
self._start_end_positions = start_end_positions
def distances(self, distance_fn=None):
"""Compute distances between all pairs of sequences
Parameters
----------
distance_fn : function, optional
Function for computing the distance between a pair of sequences.
This must take two sequences as input (as
`skbio.sequence.BiologicalSequence` objects) and return a
single integer or float value. Defaults to
`scipy.spatial.distance.hamming`.
Returns
-------
skbio.DistanceMatrix
Matrix containing the distances between all pairs of sequences.
Raises
------
skbio.util.exception.BiologicalSequenceError
If ``len(self) != len(other)`` and ``distance_fn`` ==
``scipy.spatial.distance.hamming``.
See Also
--------
skbio.DistanceMatrix
scipy.spatial.distance.hamming
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> seqs = [DNA("A-CCGGG", id="s1"),
... DNA("ATCC--G", id="s2"),
... DNA("ATCCGGA", id="s3")]
>>> a1 = Alignment(seqs)
>>> print(a1.distances())
3x3 distance matrix
IDs:
s1, s2, s3
Data:
[[ 0. 0.42857143 0.28571429]
[ 0.42857143 0. 0.42857143]
[ 0.28571429 0.42857143 0. ]]
"""
return super(Alignment, self).distances(distance_fn)
def score(self):
"""Returns the score of the alignment.
Returns
-------
float, None
The score of the alignment, or ``None`` if this was not provided on
object construction.
Notes
-----
This value will often be ``None``, as it is generally only going to be
provided on construction if the alignment itself was built within
scikit-bio.
"""
return self._score
def start_end_positions(self):
"""Returns the (start, end) positions for each aligned sequence.
Returns
-------
list, None
The list of sequence start/end positions, or ``None`` if this was
not provided on object construction.
Notes
-----
The start/end positions indicate the range of the unaligned sequences
in the alignment. For example, if local alignment were performed on the
sequences ACA and TACAT, depending on the specific algorithm that was
used to perform the alignment, the start/end positions would likely be:
``[(0,2), (1,3)]``. This indicates that the first and last positions of
the second sequence were not included in the alignment, and the
aligned sequences were therefore:
ACA
ACA
This value will often be ``None``, as it is generally only going to be
provided on construction if the alignment itself was built within
scikit-bio.
"""
return self._start_end_positions
def subalignment(self, seqs_to_keep=None, positions_to_keep=None,
invert_seqs_to_keep=False,
invert_positions_to_keep=False):
"""Returns new `Alignment` that is a subset of the current `Alignment`
Parameters
----------
seqs_to_keep : list, optional
A list of sequence ids to be retained in the resulting
`Alignment`. If this is not passed, the default will be to retain
all sequences.
positions_to_keep : list, optional
A list of position ids to be retained in the resulting
`Alignment`. If this is not passed, the default will be to retain
all positions.
invert_seqs_to_keep : bool, optional
If `True`, the sequences identified in `seqs_to_keep` will be
discarded, rather than retained.
invert_positions_to_keep : bool, optional
If `True`, the sequences identified in `positions_to_keep` will be
discarded, rather than retained.
Returns
-------
Alignment
The specified subalignment.
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> seqs = [DNA("A-CCGGG", id="s1"),
... DNA("ATCC--G", id="s2"),
... DNA("ATCCGGA", id="s3")]
>>> a1 = Alignment(seqs)
>>> a1
<Alignment: n=3; mean +/- std length=7.00 +/- 0.00>
>>> a1.subalignment(seqs_to_keep=["s1", "s2"])
<Alignment: n=2; mean +/- std length=7.00 +/- 0.00>
>>> a1.subalignment(seqs_to_keep=["s1", "s2"],
... invert_seqs_to_keep=True)
<Alignment: n=1; mean +/- std length=7.00 +/- 0.00>
>>> a1.subalignment(positions_to_keep=[0, 2, 3, 5])
<Alignment: n=3; mean +/- std length=4.00 +/- 0.00>
>>> a1.subalignment(positions_to_keep=[0, 2, 3, 5],
... invert_positions_to_keep=True)
<Alignment: n=3; mean +/- std length=3.00 +/- 0.00>
>>> a1.subalignment(seqs_to_keep=["s1", "s2"],
... positions_to_keep=[0, 2, 3, 5])
<Alignment: n=2; mean +/- std length=4.00 +/- 0.00>
"""
# if seqs_to_keep was not passed
if seqs_to_keep is None:
# and invert_seqs_to_keep is True
if invert_seqs_to_keep:
# return an empty alignment (because we're inverting the
# default of keeping all sequences)
return self.__class__([])
# else if invert_seqs_to_keep is False
else:
# default to returning all sequences
def keep_seq(i, id):
return True
# else, if seqs_to_keep was passed
else:
seqs_to_keep = set(seqs_to_keep)
# and invert_seqs_to_keep is True
if invert_seqs_to_keep:
# keep only sequences that were not listed in seqs_to_keep
def keep_seq(i, id):
return not (id in seqs_to_keep or
i in seqs_to_keep)
# else if invert_seqs_to_keep is False
else:
# keep only sequences that were listed in seqs_to_keep
def keep_seq(i, id):
return (id in seqs_to_keep or
i in seqs_to_keep)
# if positions_to_keep was not passed
if positions_to_keep is None:
# and invert_positions_to_keep is True
if invert_positions_to_keep:
# return an empty alignment (because we're inverting the
# default of keeping all positions)
return self.__class__([])
# else if invert_positions_to_keep is False
else:
# default to returning all positions
def keep_position(pos):
return True
# else, if positions_to_keep was passed
else:
positions_to_keep = set(positions_to_keep)
# and invert_positions_to_keep is True
if invert_positions_to_keep:
# keep only positions that were not listed in
# positions_to_keep
def keep_position(pos):
return pos not in positions_to_keep
# else if invert_positions_to_keep is False
else:
# keep only sequences that were listed in positions_to_keep
def keep_position(pos):
return pos in positions_to_keep
# prep the result object
result = []
# iterate over sequences
for sequence_index, seq in enumerate(self):
# determine if we're keeping the current sequence
if keep_seq(sequence_index, seq.id):
# if so, iterate over the positions to determine which we're
# keeping, and store them in a new list
new_seq = [c for i, c in enumerate(seq) if keep_position(i)]
# and then pack the resulting sequence into a new
# BiologicalSequence object, of the same type as the current
# object.
# Note: This is bad, we are calling join too much. This
# should be addressed in issue #194.
result.append(seq.__class__(''.join(new_seq),
id=seq.id,
description=seq.description))
# if we're not keeping the current sequence, move on to the next
else:
continue
# pack the result up in the same type of object as the current object
# and return it
return self.__class__(result)
def is_valid(self):
"""Return True if the Alignment is valid
Returns
-------
bool
``True`` if `self` is valid, and ``False`` otherwise.
Notes
-----
Validity is defined as having no sequences containing characters
outside of their valid character sets, and all sequences being of equal
length.
See Also
--------
skbio.alignment.BiologicalSequence.is_valid
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA, RNA
>>> sequences = [DNA('ACCGT--', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> a1 = Alignment(sequences)
>>> a1.is_valid()
True
>>> sequences = [DNA('ACCGT', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> a1 = Alignment(sequences)
>>> print(a1.is_valid())
False
>>> sequences = [RNA('ACCGT--', id="seq1"),
... RNA('AACCGGT', id="seq2")]
>>> a1 = Alignment(sequences)
>>> print(a1.is_valid())
False
"""
return super(Alignment, self).is_valid() and self._validate_lengths()
def iter_positions(self, constructor=None):
"""Generator of Alignment positions (i.e., columns)
Parameters
----------
constructor : type, optional
Constructor function for creating the positional values. By
default, these will be the same type as corresponding
`skbio.sequence.BiologicalSequence` in the
`SequenceCollection` object, but you can pass a
`skbio.sequence.BiologicalSequence` class here to ensure
that they are all of consistent type, or ``str`` to have them
returned as strings.
Returns
-------
GeneratorType
Generator of lists of positional values in the
`SequenceCollection` (effectively the transpose of the alignment).
See Also
--------
iter
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('ACCGT--', id="seq1"),
... DNA('AACCGGT', id="seq2")]
>>> a1 = Alignment(sequences)
>>> for position in a1.iter_positions():
... print(position)
[<DNASequence: A (length: 1)>, <DNASequence: A (length: 1)>]
[<DNASequence: C (length: 1)>, <DNASequence: A (length: 1)>]
[<DNASequence: C (length: 1)>, <DNASequence: C (length: 1)>]
[<DNASequence: G (length: 1)>, <DNASequence: C (length: 1)>]
[<DNASequence: T (length: 1)>, <DNASequence: G (length: 1)>]
[<DNASequence: - (length: 1)>, <DNASequence: G (length: 1)>]
[<DNASequence: - (length: 1)>, <DNASequence: T (length: 1)>]
>>> for position in a1.iter_positions(constructor=str):
... print(position)
['A', 'A']
['C', 'A']
['C', 'C']
['G', 'C']
['T', 'G']
['-', 'G']
['-', 'T']
"""
if constructor is None:
def constructor(s):
return s
for i in range(self.sequence_length()):
position = [constructor(seq[i]) for seq in self]
yield position
def majority_consensus(self, constructor=None):
"""Return the majority consensus sequence for the `Alignment`
Parameters
----------
constructor : function, optional
Constructor function for creating the consensus sequence. By
default, this will be the same type as the first sequence in the
`Alignment`.
Returns
-------
skbio.sequence.BiologicalSequence
The consensus sequence of the `Alignment`. In other words, at each
position the most common character is chosen, and those characters
are combined to create a new sequence.
Notes
-----
If there are two characters that are equally abundant in the sequence
at a given position, the choice of which of those characters will be
present at that position in the result is arbitrary.
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('AC--', id="seq1"),
... DNA('AT-C', id="seq2"),
... DNA('TT-C', id="seq3")]
>>> a1 = Alignment(sequences)
>>> a1.majority_consensus()
<DNASequence: AT-C (length: 4)>
>>> a1.majority_consensus(constructor=str)
'AT-C'
"""
# handle empty Alignment case
if self.is_empty():
return ''
if constructor is None:
constructor = self[0].__class__
result = []
for c in self.position_counters():
# Counter.most_common returns an ordered list of the
# n most common (sequence, count) items in Counter. Here
# we set n=1, and take only the character, not the count.
result.append(c.most_common(1)[0][0])
result = ''.join(result)
return constructor(result)
def omit_gap_positions(self, maximum_gap_frequency):
"""Returns Alignment with positions filtered based on gap frequency
Parameters
----------
maximum_gap_frequency : float
The maximum fraction of the sequences that can contain a gap at a
given position for that position to be retained in the resulting
`Alignment`.
Returns
-------
Alignment
The subalignment containing only the positions with gaps in fewer
than `maximum_gap_frequency` fraction of the sequences.
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('AC--', id="seq1"),
... DNA('AT-C', id="seq2"),
... DNA('TT-C', id="seq3")]
>>> a1 = Alignment(sequences)
>>> a2 = a1.omit_gap_positions(0.50)
>>> a2
<Alignment: n=3; mean +/- std length=3.00 +/- 0.00>
>>> print(a2[0])
AC-
>>> print(a2[1])
ATC
>>> print(a2[2])
TTC
"""
# handle empty Alignment case
if self.is_empty():
return self.__class__([])
position_frequencies = self.position_frequencies()
gap_alphabet = self[0].gap_alphabet()
positions_to_keep = []
for i, f in enumerate(position_frequencies):
gap_frequency = sum([f[c] for c in gap_alphabet])
if gap_frequency <= maximum_gap_frequency:
positions_to_keep.append(i)
return self.subalignment(positions_to_keep=positions_to_keep)
def omit_gap_sequences(self, maximum_gap_frequency):
"""Returns Alignment with sequences filtered based on gap frequency
Parameters
----------
maximum_gap_frequency : float
The maximum fraction of the positions that can contain a gap in a
given sequence for that sequence to be retained in the resulting
`Alignment`.
Returns
-------
Alignment
The subalignment containing only the sequences with gaps in fewer
than `maximum_gap_frequency` fraction of the positions.
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('AC--', id="seq1"),
... DNA('AT-C', id="seq2"),
... DNA('TT-C', id="seq3")]
>>> a1 = Alignment(sequences)
>>> a2 = a1.omit_gap_sequences(0.49)
>>> a2
<Alignment: n=2; mean +/- std length=4.00 +/- 0.00>
>>> print(a2[0])
AT-C
>>> print(a2[1])
TT-C
"""
# handle empty Alignment case
if self.is_empty():
return self.__class__([])
base_frequencies = self.k_word_frequencies(k=1)
gap_alphabet = self[0].gap_alphabet()
seqs_to_keep = []
for seq, f in zip(self, base_frequencies):
gap_frequency = sum([f[c] for c in gap_alphabet])
if gap_frequency <= maximum_gap_frequency:
seqs_to_keep.append(seq.id)
return self.subalignment(seqs_to_keep=seqs_to_keep)
def position_counters(self):
"""Return collection.Counter object for positions in Alignment
Returns
-------
list
List of ``collection.Counter`` objects, one for each position in
the `Alignment`.
See Also
--------
position_frequencies
position_entropies
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('AC--', id="seq1"),
... DNA('AT-C', id="seq2"),
... DNA('TT-C', id="seq3")]
>>> a1 = Alignment(sequences)
>>> for counter in a1.position_counters():
... print(counter)
Counter({'A': 2, 'T': 1})
Counter({'T': 2, 'C': 1})
Counter({'-': 3})
Counter({'C': 2, '-': 1})
"""
return [Counter(p) for p in self.iter_positions(constructor=str)]
def position_frequencies(self):
"""Return frequencies of characters for positions in Alignment
Returns
-------
list
List of ``collection.defaultdict`` objects, one for each position
in the `Alignment`, representing the frequency of each character in
the `Alignment` at that position.
See Also
--------
position_counters
position_entropies
k_word_frequencies
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('AC--', id="seq1"),
... DNA('AT-C', id="seq2"),
... DNA('TT-C', id="seq3")]
>>> a1 = Alignment(sequences)
>>> position_freqs = a1.position_frequencies()
>>> print(round(position_freqs[0]['A'],3))
0.667
>>> print(round(position_freqs[1]['A'],3))
0.0
"""
result = []
# handle the empty Alignment case
if self.is_empty():
return result
count = 1 / self.sequence_count()
for p in self.iter_positions(constructor=str):
current_freqs = defaultdict(float)
for c in p:
current_freqs[c] += count
result.append(current_freqs)
return result
def position_entropies(self, base=None,
nan_on_non_standard_chars=True):
"""Return Shannon entropy of positions in Alignment
Parameters
----------
base : float, optional
log base for entropy calculation. If not passed, default will be e
(i.e., natural log will be computed).
nan_on_non_standard_chars : bool, optional
if True, the entropy at positions containing characters outside of
the first sequence's `iupac_standard_characters` will be `np.nan`.
This is useful, and the default behavior, as it's not clear how a
gap or degenerate character should contribute to a positional
entropy. This issue was described in [1]_.
Returns
-------
list
List of floats of Shannon entropy at `Alignment` positions. Shannon
entropy is defined in [2]_.
See Also
--------
position_counters
position_frequencies
References
----------
.. [1] Identifying DNA and protein patterns with statistically
significant alignments of multiple sequences.
Hertz GZ, Stormo GD.
Bioinformatics. 1999 Jul-Aug;15(7-8):563-77.
.. [2] A Mathematical Theory of Communication
CE Shannon
The Bell System Technical Journal (1948).
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('AC--', id="seq1"),
... DNA('AT-C', id="seq2"),
... DNA('TT-C', id="seq3")]
>>> a1 = Alignment(sequences)
>>> print(a1.position_entropies())
[0.63651416829481278, 0.63651416829481278, nan, nan]
"""
result = []
# handle empty Alignment case
if self.is_empty():
return result
iupac_standard_characters = self[0].iupac_standard_characters()
for f in self.position_frequencies():
if (nan_on_non_standard_chars and
len(viewkeys(f) - iupac_standard_characters) > 0):
result.append(np.nan)
else:
result.append(entropy(list(f.values()), base=base))
return result
def sequence_length(self):
"""Return the number of positions in Alignment
Returns
-------
int
The number of positions in `Alignment`.
See Also
--------
sequence_lengths
sequence_count
Examples
--------
>>> from skbio.alignment import Alignment
>>> from skbio.sequence import DNA
>>> sequences = [DNA('AC--', id="seq1"),
... DNA('AT-C', id="seq2"),
... DNA('TT-C', id="seq3")]
>>> a1 = Alignment(sequences)
>>> a1.sequence_length()
4
"""
# handle the empty Alignment case
if self.is_empty():
return 0
else:
return len(self._data[0])
def to_phylip(self, map_labels=False, label_prefix=""):
"""Return phylip-formatted string representing the `SequenceCollection`
Returns
-------
str
A phylip-formatted string representing the `SequenceCollection`.
"""
if not self._validate_lengths():
raise SequenceCollectionError("PHYLIP-formatted string can only "
"be generated if all sequences are "
"of equal length.")
if self.is_empty():
raise SequenceCollectionError("PHYLIP-formatted string can only "
"be generated if there is at least "
"one sequence in the Alignment.")
sequence_length = self.sequence_length()
if sequence_length == 0:
raise SequenceCollectionError("PHYLIP-formatted string can only "
"be generated if there is at least "
"one position in the Alignment.")
ids = self.ids()
sequence_count = self.sequence_count()
result = ["%d %d" % (sequence_count, sequence_length)]
if map_labels:
_, new_id_to_old_id = self.int_map(prefix=label_prefix)
old_id_to_new_id = {v: k for k, v in new_id_to_old_id.items()}
else:
new_id_to_old_id = {seq_id: seq_id for seq_id in ids}
old_id_to_new_id = new_id_to_old_id
for seq_id in ids:
new_id = old_id_to_new_id[seq_id]
seq = self[seq_id]
result.append("%s %s" % (new_id, str(seq)))
return '\n'.join(result), new_id_to_old_id
def _validate_lengths(self):
"""Return ``True`` if all sequences same length, ``False`` otherwise
"""
seq1_length = self.sequence_length()
for seq in self:
if seq1_length != len(seq):
return False
return True
class StockholmAlignment(Alignment):
"""Contains the metadata information in a Stockholm file alignment
Parameters
----------
seqs : list of `skbio.sequence.BiologicalSequence` objects
The `skbio.sequence.BiologicalSequence` objects to load.
gf : dict, optional
GF info in the format {feature: info}
gs : dict of dicts, optional
GS info in the format {feature: {seqlabel: info}}
gr : dict of dicts, optional
GR info in the format {feature: {seqlabel: info}}
gc : dict, optional
GC info in the format {feature: info}
Notes
-----
The Stockholm format is described in [1]_ and [2]_.
If there are multiple references, include information for each R* line
as a list, with reference 0 information in position 0 for all lists,
etc. This list will be broken up into the appropriate bits for each
reference on string formatting.
If there are multiple trees included, use a list to store identifiers
and trees, with position 0 holding identifier for tree in position 0,
etc.
References
----------
.. [1] http://sonnhammer.sbc.su.se/Stockholm.html
.. [2] http://en.wikipedia.org/wiki/Stockholm_format
Examples
--------
Assume we have a basic stockholm file with the following contents::
# STOCKHOLM 1.0
seq1 ACC--G-GGGU
seq2 TCC--G-GGGA
#=GC SS_cons (((.....)))
//
>>> from skbio.sequence import RNA
>>> from skbio.alignment import StockholmAlignment
>>> from StringIO import StringIO
>>> sto_in = StringIO("# STOCKHOLM 1.0\\n"
... "seq1 ACC--G-GGGU\\nseq2 TCC--G-GGGA\\n"
... "#=GC SS_cons (((.....)))\\n//")
>>> sto_records = StockholmAlignment.from_file(sto_in, RNA)
>>> sto = next(sto_records)
>>> print(sto)
# STOCKHOLM 1.0
seq1 ACC--G-GGGU
seq2 TCC--G-GGGA
#=GC SS_cons (((.....)))
//
>>> sto.gc
{'SS_cons': '(((.....)))'}
We can also write out information by instantiating the StockholmAlignment
object and then printing it.
>>> from skbio.sequence import RNA
>>> from skbio.alignment import StockholmAlignment
>>> seqs = [RNA("ACC--G-GGGU", id="seq1"),
... RNA("TCC--G-GGGA", id="seq2")]
>>> gf = {
... "RT": ["TITLE1", "TITLE2"],
... "RA": ["Auth1;", "Auth2;"],
... "RL": ["J Mol Biol", "Cell"],
... "RM": ["11469857", "12007400"]}
>>> sto = StockholmAlignment(seqs, gf=gf)
>>> print(sto)
# STOCKHOLM 1.0
#=GF RN [1]
#=GF RM 11469857
#=GF RT TITLE1
#=GF RA Auth1;
#=GF RL J Mol Biol
#=GF RN [2]
#=GF RM 12007400
#=GF RT TITLE2
#=GF RA Auth2;
#=GF RL Cell
seq1 ACC--G-GGGU
seq2 TCC--G-GGGA
//
"""
def __init__(self, seqs, gf=None, gs=None, gr=None, gc=None,
validate=False):
self.gf = gf if gf else {}
self.gs = gs if gs else {}
self.gr = gr if gr else {}
self.gc = gc if gc else {}
super(StockholmAlignment, self).__init__(seqs, validate)
def __str__(self):
"""Parses StockholmAlignment into a string with stockholm format
Returns
-------
str
Stockholm formatted string containing all information in the object
Notes
-----
If references are included in GF data, the RN lines are automatically
generated if not provided.
"""
# find length of leader info needed to make file pretty
# 10 comes from the characters for '#=GF ' and the feature after label
infolen = max(len(seq.id) for seq in self._data) + 10
GF_lines = []
GS_lines = []
GC_lines = []
# NOTE: EVERYTHING MUST BE COERECED TO STR in case int or float passed
# add GF information if applicable
if self.gf:
skipfeatures = set(("NH", "RC", "RM", "RN", "RA", "RL"))
for feature, value in self.gf.items():
# list of features to skip and parse special later
if feature in skipfeatures:
continue
# list of features to parse special
elif feature == "TN":
# trees must be in proper order of identifier then tree
ident = value if isinstance(value, list) else [value]
tree = self.gf["NH"] if isinstance(self.gf["NH"], list) \
else [self.gf["NH"]]
for ident, tree in zip(self.gf["TN"], self.gf["NH"]):
GF_lines.append(' '.join(["#=GF", "TN", str(ident)]))
GF_lines.append(' '.join(["#=GF", "NH", str(tree)]))
elif feature == "RT":
# make sure each reference block stays together
# set up lists to zip in case some bits are missing
# create rn list if needed
default_none = [0]*len(value)
rn = self.gf.get("RN", ["[%i]" % x for x in
range(1, len(value)+1)])
rm = self.gf.get("RM", default_none)
rt = self.gf.get("RT", default_none)
ra = self.gf.get("RA", default_none)
rl = self.gf.get("RL", default_none)
rc = self.gf.get("RC", default_none)
# order: RN, RM, RT, RA, RL, RC
for n, m, t, a, l, c in zip(rn, rm, rt, ra, rl, rc):
GF_lines.append(' '.join(["#=GF", "RN", n]))
if m:
GF_lines.append(' '.join(["#=GF", "RM", str(m)]))
if t:
GF_lines.append(' '.join(["#=GF", "RT", str(t)]))
if a:
GF_lines.append(' '.join(["#=GF", "RA", str(a)]))
if l:
GF_lines.append(' '.join(["#=GF", "RL", str(l)]))
if c:
GF_lines.append(' '.join(["#=GF", "RC", str(c)]))
else:
# normal addition for everything else
if not isinstance(value, list):
value = [value]
for val in value:
GF_lines.append(' '.join(["#=GF", feature, str(val)]))
# add GS information if applicable
if self.gs:
for feature in self.gs:
for seqname in self.gs[feature]:
GS_lines.append(' '.join(["#=GS", seqname, feature,
str(self.gs[feature][seqname])]))
# add GC information if applicable
if self.gc:
for feature, value in viewitems(self.gc):
leaderinfo = ' '.join(["#=GC", feature])
spacer = ' ' * (infolen - len(leaderinfo))
GC_lines.append(spacer.join([leaderinfo,
str(self.gc[feature])]))
sto_lines = ["# STOCKHOLM 1.0"] + GF_lines + GS_lines
# create seq output along with GR info if applicable
for label, seq in self.iteritems():
spacer = ' ' * (infolen - len(label))
sto_lines.append(spacer.join([label, str(seq)]))
# GR info added for sequence
for feature in viewkeys(self.gr):
value = self.gr[feature][label]
leaderinfo = ' '.join(['#=GR', label, feature])
spacer = ' ' * (infolen - len(leaderinfo))
sto_lines.append(spacer.join([leaderinfo, value]))
sto_lines.extend(GC_lines)
# add final slashes to end of file
sto_lines.append('//')
return '\n'.join(sto_lines)
def to_file(self, out_f):
r"""Save the alignment to file in text format.
Parameters
----------
out_f : file-like object or filename
File-like object to write serialized data to, or name of
file. If it's a file-like object, it must have a ``write``
method, and it won't be closed. Else, it is opened and
closed after writing.
See Also
--------
from_file
"""
with open_file(out_f, 'w') as out_f:
out_f.write(self.__str__())
@staticmethod
def _parse_gf_info(lines):
"""Takes care of parsing GF lines in stockholm plus special cases"""
parsed = defaultdict(list)
# needed for making each multi-line RT and NH one string
rt = []
nh = []
lastline = ""
for line in lines:
try:
init, feature, content = line.split(None, 2)
except ValueError:
raise StockholmParseError("Malformed GF line encountered!"
"\n%s" % line.split(None, 2))
if init != "#=GF":
raise StockholmParseError("Non-GF line encountered!")
# take care of adding multiline RT to the parsed information
if lastline == "RT" and feature != "RT":
# add rt line to the parsed dictionary
rtline = " ".join(rt)
rt = []
parsed["RT"].append(rtline)
elif feature == "RT":
rt.append(content)
lastline = feature
continue
# Take care of adding multiline NH to the parsed dictionary
elif lastline == "NH" and feature != "NH":
nhline = " ".join(nh)
nh = []
parsed["NH"].append(nhline)
elif feature == "NH":
nh.append(content)
lastline = feature
continue
# add current feature to the parsed information
parsed[feature].append(content)
lastline = feature
# removing unneccessary lists from parsed. Use .items() for py3 support
for feature, value in parsed.items():
# list of multi-line features to join into single string if needed
if feature in ["CC"]:
parsed[feature] = ' '.join(value)
elif len(parsed[feature]) == 1:
parsed[feature] = value[0]
return parsed
@staticmethod
def _parse_gc_info(lines, strict=False, seqlen=-1):
"""Takes care of parsing GC lines in stockholm format"""
parsed = {}
for line in lines:
try:
init, feature, content = line.split(None, 2)
except ValueError:
raise StockholmParseError("Malformed GC line encountered!\n%s"
% line.split(None, 2))
if init != "#=GC":
raise StockholmParseError("Non-GC line encountered!")
# add current feature to the parsed information
if feature in parsed:
if strict:
raise StockholmParseError("Should not have multiple lines "
"with the same feature: %s" %
feature)
else:
parsed[feature] = [content]
# removing unneccessary lists from parsed. Use .items() for py3 support
for feature, value in parsed.items():
parsed[feature] = ''.join(value)
if strict:
if len(value) != seqlen:
raise StockholmParseError("GC must have exactly one char "
"per position in alignment!")
return parsed
@staticmethod
def _parse_gs_gr_info(lines, strict=False, seqlen=-1):
"""Takes care of parsing GS and GR lines in stockholm format"""
parsed = {}
parsetype = ""
for line in lines:
try:
init, label, feature, content = line.split(None, 3)
except ValueError:
raise StockholmParseError("Malformed GS/GR line encountered!"
"\n%s" % line.split(None, 3))
if parsetype == "":
parsetype = init
elif init != parsetype:
raise StockholmParseError("Non-GS/GR line encountered!")
# parse each line, taking into account interleaved format
if feature in parsed and label in parsed[feature]:
# interleaved format, so need list of content
parsed[feature][label].append(content)
else:
parsed[feature] = {label: [content]}
# join all the crazy lists created during parsing
for feature in parsed:
for label, content in parsed[feature].items():
parsed[feature][label] = ''.join(content)
if strict:
if len(parsed[feature][label]) != seqlen:
raise StockholmParseError("GR must have exactly one "
"char per position in the "
"alignment!")
return parsed
@classmethod
def from_file(cls, infile, seq_constructor, strict=False):
r"""yields StockholmAlignment objects from a stockholm file.
Parameters
----------
infile : open file object
An open stockholm file.
seq_constructor : BiologicalSequence object
The biologicalsequence object that corresponds to what the
stockholm file holds. See skbio.sequence
strict : bool (optional)
Turns on strict parsing of GR and GC lines to ensure one char per
position. Default: False
Returns
-------
Iterator of StockholmAlignment objects
Raises
------
skbio.alignment.StockholmParseError
If any lines are found that don't conform to stockholm format
"""
# make sure first line is corect
line = infile.readline()
if not line.startswith("# STOCKHOLM 1.0"):
raise StockholmParseError("Incorrect header found")
gs_lines = []
gf_lines = []
gr_lines = []
gc_lines = []
# OrderedDict used so sequences maintain same order as in file
seqs = OrderedDict()
for line in infile:
line = line.strip()
if line == "" or line.startswith("# S"):
# skip blank lines or secondary headers
continue
elif line == "//":
# parse the record since we are at its end
# build the seuence list for alignment construction
seqs = [seq_constructor(seq, id=_id) for _id, seq in
viewitems(seqs)]
# get length of sequences in the alignment
seqlen = len(seqs[0][1])
# parse information lines
gf = cls._parse_gf_info(gf_lines)
gs = cls._parse_gs_gr_info(gs_lines)
gr = cls._parse_gs_gr_info(gr_lines, strict, seqlen)
gc = cls._parse_gc_info(gc_lines, strict, seqlen)
# yield the actual stockholm object
yield cls(seqs, gf, gs, gr, gc)
# reset all storage variables
gs_lines = []
gf_lines = []
gr_lines = []
gc_lines = []
seqs = OrderedDict()
# add the metadata lines to the proper lists
elif line.startswith("#=GF"):
gf_lines.append(line)
elif line.startswith("#=GS"):
gs_lines.append(line)
elif line.startswith("#=GR"):
gr_lines.append(line)
elif line.startswith("#=GC"):
gc_lines.append(line)
else:
lineinfo = line.split()
# assume sequence since nothing else in format is left
# in case of interleaved format, need to do check
if lineinfo[0] in seqs:
sequence = seqs[lineinfo[0]]
seqs[lineinfo[0]] = ''.join([sequence, lineinfo[1]])
else:
seqs[lineinfo[0]] = lineinfo[1]
| {"hexsha": "e75666617e245f40ed0b014fde0b345852730ec5", "size": 67569, "ext": "py", "lang": "Python", "max_stars_repo_path": "skbio/alignment/_alignment.py", "max_stars_repo_name": "JWDebelius/scikit-bio", "max_stars_repo_head_hexsha": "9df3edb46eb728f6efbd4f2db74529200ad40a77", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "skbio/alignment/_alignment.py", "max_issues_repo_name": "JWDebelius/scikit-bio", "max_issues_repo_head_hexsha": "9df3edb46eb728f6efbd4f2db74529200ad40a77", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "skbio/alignment/_alignment.py", "max_forks_repo_name": "JWDebelius/scikit-bio", "max_forks_repo_head_hexsha": "9df3edb46eb728f6efbd4f2db74529200ad40a77", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2121518987, "max_line_length": 79, "alphanum_fraction": 0.5394485637, "include": true, "reason": "import numpy,from scipy", "num_tokens": 14607} |
from configs import cfg
from src.utils.record_log import _logger
import tensorflow as tf
import numpy as np
from abc import ABCMeta, abstractmethod
class ModelTemplate(metaclass=ABCMeta):
def __init__(self, token_emb_mat, glove_emb_mat, tds, tel, hn, scope):
self.scope = scope
self.global_step = tf.get_variable('global_step', shape=[], dtype=tf.int32,
initializer=tf.constant_initializer(0), trainable=False)
self.token_emb_mat, self.glove_emb_mat = token_emb_mat, glove_emb_mat
# ---------place holders-------------
self.context_token = tf.placeholder(tf.int32, [None, None, None], name='context_token')
self.question_token = tf.placeholder(tf.int32, [None, None], name='question_token')
self.sent_label = tf.placeholder(tf.int32, [None], 'sent_label')
self.is_train = tf.placeholder(tf.bool, [], name='is_train')
# -------- Lengths -------
self.tds, self.tel = tds, tel
self.hn = hn
self.bs = tf.shape(self.context_token)[0]
self.sn, self.sl = tf.shape(self.context_token)[1], tf.shape(self.context_token)[2]
self.ql = tf.shape(self.question_token)[1]
# ------other ------
self.context_token_mask = tf.cast(self.context_token, tf.bool)
self.question_token_mask = tf.cast(self.question_token, tf.bool)
self.context_token_len = tf.reduce_sum(tf.cast(self.context_token_mask, tf.int32), -1)
self.question_token_len = tf.reduce_sum(tf.cast(self.question_token_mask, tf.int32), -1)
self.context_sent_mask = tf.cast(tf.reduce_sum(tf.cast(self.context_token_mask, tf.int32), -1), tf.bool)
self.context_sent_len = tf.reduce_sum(tf.cast(self.context_sent_mask, tf.int32), -1)
self.tensor_dict = {}
# ----- start ------
self.logits = None
self.loss = None
self.accuracy = None
self.var_ema = None
self.ema = None
self.summary = None
self.opt = None
self.train_op = None
@abstractmethod
def build_network(self):
pass
def build_loss(self):
# weight_decay
with tf.name_scope("weight_decay"):
for var in set(tf.get_collection('reg_vars', self.scope)):
weight_decay = tf.multiply(tf.nn.l2_loss(var), cfg.wd,
name="{}-wd".format('-'.join(str(var.op.name).split('/'))))
tf.add_to_collection('losses', weight_decay)
reg_vars = tf.get_collection('losses', self.scope)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
_logger.add('regularization var num: %d' % len(reg_vars))
_logger.add('trainable var num: %d' % len(trainable_vars))
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self.sent_label,
logits=self.logits
)
tf.add_to_collection('losses', tf.reduce_mean(losses, name='xentropy_loss_mean'))
loss = tf.add_n(tf.get_collection('losses', self.scope), name='loss')
tf.summary.scalar(loss.op.name, loss)
tf.add_to_collection('ema/scalar', loss)
return loss
def build_accuracy(self):
correct = tf.equal(
tf.cast(tf.argmax(self.logits, -1), tf.int32),
self.sent_label
) # [bs]
return tf.cast(correct, tf.float32)
def update_tensor_add_ema_and_opt(self):
self.logits = self.build_network()
self.loss = self.build_loss()
self.accuracy = self.build_accuracy()
# ------------ema-------------
if True:
self.var_ema = tf.train.ExponentialMovingAverage(cfg.var_decay)
self.build_var_ema()
if cfg.mode == 'train':
self.ema = tf.train.ExponentialMovingAverage(cfg.decay)
self.build_ema()
self.summary = tf.summary.merge_all()
# ---------- optimization ---------
if cfg.optimizer.lower() == 'adadelta':
assert cfg.learning_rate > 0.1 and cfg.learning_rate <= 1.
self.opt = tf.train.AdadeltaOptimizer(cfg.learning_rate)
elif cfg.optimizer.lower() == 'adam':
assert cfg.learning_rate < 0.1
self.opt = tf.train.AdamOptimizer(cfg.learning_rate)
elif cfg.optimizer.lower() == 'rmsprop':
assert cfg.learning_rate < 0.1
self.opt = tf.train.RMSPropOptimizer(cfg.learning_rate)
elif cfg.optimizer.lower() == 'test':
self.opt = tf.train.RMSPropOptimizer(0.001, 0.75)
# self.opt = tf.contrib.keras.optimizers.Nadam()
else:
raise AttributeError('no optimizer named as \'%s\'' % cfg.optimizer)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
# trainable param num:
# print params num
all_params_num = 0
for elem in trainable_vars:
# elem.name
var_name = elem.name.split(':')[0]
if var_name.endswith('emb_mat'): continue
params_num = 1
for l in elem.get_shape().as_list(): params_num *= l
all_params_num += params_num
_logger.add('Trainable Parameters Number: %d' % all_params_num)
self.train_op = self.opt.minimize(self.loss, self.global_step,
var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope))
def build_var_ema(self):
ema_op = self.var_ema.apply(tf.trainable_variables(),)
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def build_ema(self):
tensors = tf.get_collection("ema/scalar", scope=self.scope) + \
tf.get_collection("ema/vector", scope=self.scope)
ema_op = self.ema.apply(tensors)
for var in tf.get_collection("ema/scalar", scope=self.scope):
ema_var = self.ema.average(var)
tf.summary.scalar(ema_var.op.name, ema_var)
for var in tf.get_collection("ema/vector", scope=self.scope):
ema_var = self.ema.average(var)
tf.summary.histogram(ema_var.op.name, ema_var)
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def get_feed_dict(self, sample_batch, data_type='train'):
max_sn, max_sl, max_ql = 0, 0, 0
for sample in sample_batch:
max_ql = max(max_ql, len(sample['question_token_digital']))
max_sn = max(max_sn, len(sample['context_token_digital']))
for sent_token in sample['context_token_digital']:
max_sl = max(max_sl, len(sent_token))
# -----------
context_token_b = []
question_token_b = []
# tokens
for sample in sample_batch:
context_token = np.zeros([max_sn, max_sl], cfg.intX)
for idx_s, sent_token in enumerate(sample['context_token_digital']):
for idx_t, token in enumerate(sent_token):
context_token[idx_s, idx_t] = token
context_token_b.append(context_token)
question_token = np.zeros([max_ql], cfg.intX)
for idx_qt, qtoken in enumerate(sample['question_token_digital']):
question_token[idx_qt] = qtoken
question_token_b.append(question_token)
context_token_b = np.stack(context_token_b)
question_token_b = np.stack(question_token_b)
feed_dict = {
self.context_token: context_token_b, self.question_token: question_token_b,
self.is_train: True if data_type == 'train' else False
}
# labels
if data_type in ['train', 'dev']:
sent_label_b = []
for sample in sample_batch:
sent_label_b.append(sample['answers'][0]['sent_label'])
sent_label_b = np.stack(sent_label_b).astype(cfg.intX)
feed_dict[self.sent_label] = sent_label_b
return feed_dict
def step(self, sess, batch_samples, get_summary=False):
assert isinstance(sess, tf.Session)
feed_dict = self.get_feed_dict(batch_samples, 'train')
cfg.time_counter.add_start()
if get_summary:
loss, summary, train_op = sess.run([self.loss, self.summary, self.train_op], feed_dict=feed_dict)
else:
loss, train_op = sess.run([self.loss, self.train_op], feed_dict=feed_dict)
summary = None
cfg.time_counter.add_stop()
return loss, summary, train_op
| {"hexsha": "748ac366be4939554a7da0e390d70e4e67a98317", "size": 8609, "ext": "py", "lang": "Python", "max_stars_repo_path": "BiBloSA/exp_SQuAD_sim/src/model/model_template.py", "max_stars_repo_name": "mikimaus78/ml_monorepo", "max_stars_repo_head_hexsha": "b2c2627ff0e86e27f6829170d0dac168d8e5783b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 116, "max_stars_repo_stars_event_min_datetime": "2018-02-01T08:33:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-04T05:28:04.000Z", "max_issues_repo_path": "BiBloSA/exp_SQuAD_sim/src/model/model_template.py", "max_issues_repo_name": "mikimaus78/ml_monorepo", "max_issues_repo_head_hexsha": "b2c2627ff0e86e27f6829170d0dac168d8e5783b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-02-23T18:54:22.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-09T01:30:32.000Z", "max_forks_repo_path": "BiBloSA/exp_SQuAD_sim/src/model/model_template.py", "max_forks_repo_name": "mikimaus78/ml_monorepo", "max_forks_repo_head_hexsha": "b2c2627ff0e86e27f6829170d0dac168d8e5783b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 35, "max_forks_repo_forks_event_min_datetime": "2019-02-08T02:00:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T23:17:00.000Z", "avg_line_length": 41.7912621359, "max_line_length": 115, "alphanum_fraction": 0.6102915553, "include": true, "reason": "import numpy", "num_tokens": 1923} |
"""Calculate dynamic aperture."""
import numpy as _np
import pyaccel.naff as _pynaff
from ..utils import DataBaseClass as _BaseClass
class BaseClass(_BaseClass):
"""."""
COLORS = ('k', 'b', 'r', 'g', 'm', 'c')
def __str__(self):
"""."""
return str(self.params)
# class methods
@classmethod
def calc_resonances_for_bounds(cls, bounds, orders=3, symmetry=1):
"""."""
orders = _np.asarray(orders)
if not orders.shape:
orders = _np.arange(1, orders+1)
axis = _np.asarray(bounds).reshape(2, -1)
points = _np.zeros((2, 4))
points[:2, :2] = axis
points[0, 2:] = _np.flip(axis[0])
points[1, 2:] = axis[1]
resons = []
for order in orders:
resons.extend(
cls._calc_resonances_fixed_order(points, order, symmetry))
# Unique resonances:
ress = set()
for reson in resons:
gcd = _np.gcd.reduce(reson)
if gcd > 1:
reson = (reson[0]//gcd, reson[1]//gcd, reson[2]//gcd)
ress.add(reson)
resons = list(ress)
return resons
@staticmethod
def _calc_resonances_fixed_order(points, order=3, symmetry=1):
"""."""
points = _np.asarray(points)
if points.shape[0] != 2:
if points.shape[1] == 2:
points = points.T
else:
raise TypeError('wrong number of dimensions for points.')
ang_coeffs = _np.zeros((2*order + 1, 2), dtype=int)
ang_coeffs[:, 0] = _np.arange(-order, order + 1)
ang_coeffs[:, 1] = order - _np.abs(ang_coeffs[:, 0])
consts = _np.dot(ang_coeffs, points)
consts_min = _np.array(_np.ceil(consts.min(axis=1)), dtype=int)
consts_max = _np.array(_np.floor(consts.max(axis=1)), dtype=int)
resons = []
for ang_i, c_min, c_max in zip(ang_coeffs, consts_min, consts_max):
cons = _np.arange(c_min, c_max+1)
for c_i in cons:
if not c_i % symmetry:
resons.append((ang_i[0], ang_i[1], c_i))
return resons
@classmethod
def add_resonances_to_axis(cls, axes, resons=None, orders=3, symmetry=1):
"""."""
if resons is None:
bounds = axes.axis()
resons = cls.calc_resonances_for_bounds(
bounds, orders=orders, symmetry=symmetry)
for coeffx, coeffy, coeffc in resons:
order = int(_np.abs(coeffx) + _np.abs(coeffy))
idx = order - 1
cor = cls.COLORS[idx % len(cls.COLORS)]
lwid = max(3-idx, 1)
if coeffy:
line = cls.add_reson_line(
axes, const=coeffc/coeffy, slope=-coeffx/coeffy,
color=cor, linewidth=lwid)
else:
line = cls.add_reson_line(
axes, const=coeffc/coeffx, slope=None,
color=cor, linewidth=lwid)
if not coeffx:
line.name = f'reson: {coeffy}y = {coeffc}'
elif not coeffy:
line.name = f'reson: {coeffx}x = {coeffc}'
else:
sig = '+' if coeffy > 0 else '-'
line.name = \
f'reson: {coeffx}x {sig} {abs(coeffy)}y = {coeffc}'
@staticmethod
def add_reson_line(axes, const=0, slope=None, **kwargs):
"""."""
axis = axes.axis()
if slope is not None:
x11, x22 = axis[:2]
y11, y22 = slope*x11 + const, slope*x22 + const
else:
x11, x22 = const, const
y11, y22 = axis[2:]
line = axes.plot([x11, x22], [y11, y22], **kwargs)[0]
axes.set_xlim(axis[:2])
axes.set_ylim(axis[2:])
return line
@staticmethod
def _calc_dynap(x_in, y_in, lost_plane):
"""."""
shape = x_in.shape
nlost = _np.array([l is None for l in lost_plane], dtype=bool)
nlost = nlost.reshape(shape)
r_sqr = x_in*x_in + y_in*y_in
idx = _np.unravel_index(_np.argmin(r_sqr), r_sqr.shape)
tolook = set()
tolook.add(idx)
inner = set(idx)
border = set()
looked = set()
neigbs = [
(-1, 0), (0, -1), (0, 1), (1, 0),
# uncomment these lines to include diagonal neighboors:
# (-1, -1), (-1, 1), (1, -1), (1, 1),
]
while tolook:
idx = tolook.pop()
isborder = False
for nei in neigbs:
idxn = idx[0] + nei[0], idx[1] + nei[1]
if 0 <= idxn[0] < shape[0] and 0 <= idxn[1] < shape[1]:
if nlost[idxn]:
if idxn not in looked:
tolook.add(idxn)
inner.add(idxn)
else:
isborder = True
if isborder:
border.add(idx)
looked.add(idx)
# tuple(zip(*border)) transforms:
# ((x1, y1), ..., (xn, yn)) --> ((x1, ..., xn), (y1, ..., yn))
border = tuple(zip(*border))
x_dyn = x_in[border]
y_dyn = y_in[border]
r_sqr = x_dyn*x_dyn + y_dyn*y_dyn
theta = _np.arctan2(y_dyn, x_dyn)
ind = _np.argsort(theta)
return x_dyn[ind], y_dyn[ind]
@staticmethod
def _calc_frequencies(rout, lost_plane):
"""."""
if not rout.size or len(rout.shape) < 3:
return None, None
nmult = rout.shape[2] // 6
left = rout.shape[2] % 6
if nmult < 5:
return None, None
if left < 1:
nmult -= 1
nlost = _np.array([l is None for l in lost_plane], dtype=bool)
nt_ini = nmult * 6 + 1
x_ini = rout[0, :, :nt_ini]
y_ini = rout[2, :, :nt_ini]
x_freq = _np.full(x_ini.shape[0], _np.nan, dtype=float)
y_freq = _np.full(x_ini.shape[0], _np.nan, dtype=float)
x_ini = x_ini[nlost, :]
y_ini = y_ini[nlost, :]
x_ini -= x_ini.mean(axis=1)[:, None]
y_ini -= y_ini.mean(axis=1)[:, None]
fx1, _ = _pynaff.naff_general(x_ini, nr_ff=1)
fy1, _ = _pynaff.naff_general(y_ini, nr_ff=1)
fx1 = _np.abs(fx1)
fy1 = _np.abs(fy1)
x_freq[nlost] = fx1
y_freq[nlost] = fy1
return x_freq, y_freq
@staticmethod
def _calc_fmap(rout, lost_plane):
"""."""
if not rout.size or len(rout.shape) < 3:
return 7*[None, ]
nmult = rout.shape[2] // 12
left = rout.shape[2] % 12
if nmult < 5:
return 7*[None, ]
if left < 2:
nmult -= 1
nlost = _np.array([l is None for l in lost_plane], dtype=bool)
nt_ini = nmult * 6 + 1
nt_fin = nmult * 12 + 2
x_ini = rout[0, :, :nt_ini]
x_fin = rout[0, :, nt_ini:nt_fin]
y_ini = rout[2, :, :nt_ini]
y_fin = rout[2, :, nt_ini:nt_fin]
x_freq_ini = _np.full(x_ini.shape[0], _np.nan, dtype=float)
x_freq_fin = _np.full(x_ini.shape[0], _np.nan, dtype=float)
y_freq_ini = _np.full(x_ini.shape[0], _np.nan, dtype=float)
y_freq_fin = _np.full(x_ini.shape[0], _np.nan, dtype=float)
x_diffusion = _np.full(x_ini.shape[0], _np.nan, dtype=float)
y_diffusion = _np.full(x_ini.shape[0], _np.nan, dtype=float)
diffusion = _np.full(x_ini.shape[0], _np.nan, dtype=float)
x_ini = x_ini[nlost, :]
x_fin = x_fin[nlost, :]
y_ini = y_ini[nlost, :]
y_fin = y_fin[nlost, :]
x_ini -= x_ini.mean(axis=1)[:, None]
x_fin -= x_fin.mean(axis=1)[:, None]
y_ini -= y_ini.mean(axis=1)[:, None]
y_fin -= y_fin.mean(axis=1)[:, None]
fx1, _ = _pynaff.naff_general(x_ini, nr_ff=1)
fx2, _ = _pynaff.naff_general(x_fin, nr_ff=1)
fy1, _ = _pynaff.naff_general(y_ini, nr_ff=1)
fy2, _ = _pynaff.naff_general(y_fin, nr_ff=1)
fx1 = _np.abs(fx1)
fx2 = _np.abs(fx2)
fy1 = _np.abs(fy1)
fy2 = _np.abs(fy2)
diffx = _np.abs(fx1 - fx2)
diffy = _np.abs(fy1 - fy2)
diff = _np.sqrt(diffx*diffx + diffy*diffy)
x_freq_ini[nlost] = fx1
x_freq_fin[nlost] = fx2
y_freq_ini[nlost] = fy1
y_freq_fin[nlost] = fy2
x_diffusion[nlost] = diffx
y_diffusion[nlost] = diffy
diffusion[nlost] = diff
return x_freq_ini, x_freq_fin, y_freq_ini, y_freq_fin,\
x_diffusion, y_diffusion, diffusion
@staticmethod
def _map_resons2real_plane(
freqx, freqy, diff, resons, maxdist=1e-5, mindiff=1e-3):
"""."""
indcs = []
if maxdist is None or mindiff is None:
return indcs
ind = ~_np.isnan(freqx)
ind1 = diff[ind] > mindiff
freqx = freqx[ind]
freqy = freqy[ind]
idcs = ind.nonzero()[0]
for coefx, coefy, coefc in resons:
if coefy == 0:
dist_to_reson = _np.abs(coefc/coefx - freqx)
else:
tan_theta = -coefx/coefy
reson_y0 = coefc/coefy
parallel_y0 = freqy - tan_theta*freqx
delta_y0 = reson_y0 - parallel_y0
dist_to_reson = _np.abs(delta_y0) / _np.sqrt(1 + tan_theta**2)
ind2 = _np.logical_and(ind1, dist_to_reson < maxdist).nonzero()
indcs.append(idcs[ind2])
return indcs
| {"hexsha": "e73b538111b60395fe8088304e1635f6c76f2c8c", "size": 9550, "ext": "py", "lang": "Python", "max_stars_repo_path": "apsuite/dynap/base.py", "max_stars_repo_name": "carneirofc/apsuite", "max_stars_repo_head_hexsha": "1bbaa44ec6b89f50201790d6fab05c32729db6e1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-02-25T01:48:49.000Z", "max_stars_repo_stars_event_max_datetime": "2016-02-25T01:48:49.000Z", "max_issues_repo_path": "apsuite/dynap/base.py", "max_issues_repo_name": "carneirofc/apsuite", "max_issues_repo_head_hexsha": "1bbaa44ec6b89f50201790d6fab05c32729db6e1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2015-09-25T12:46:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T12:04:03.000Z", "max_forks_repo_path": "apsuite/dynap/base.py", "max_forks_repo_name": "carneirofc/apsuite", "max_forks_repo_head_hexsha": "1bbaa44ec6b89f50201790d6fab05c32729db6e1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-02-08T13:12:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T17:38:11.000Z", "avg_line_length": 32.8178694158, "max_line_length": 78, "alphanum_fraction": 0.5136125654, "include": true, "reason": "import numpy", "num_tokens": 2783} |
# Defining Custom Display Logic for Your Own Objects
## Overview
In Python, objects can declare their textual representation using the `__repr__` method. IPython expands on this idea and allows objects to declare other, richer representations including:
* HTML
* JSON
* PNG
* JPEG
* SVG
* LaTeX
This Notebook shows how you can add custom display logic to your own classes, so that they can be displayed using these rich representations. There are two ways of accomplishing this:
1. Implementing special display methods such as `_repr_html_`.
2. Registering a display function for a particular type.
In this Notebook we show how both approaches work.
Before we get started, we will import the various display functions for displaying the different formats we will create.
```python
from IPython.display import display
from IPython.display import (
display_html, display_jpeg, display_png,
display_javascript, display_svg, display_latex
)
```
## Implementing special display methods
The main idea of the first approach is that you have to implement special display methods, one for each representation you want to use. Here is a list of the names of the special methods and the values they must return:
* `_repr_html_`: return raw HTML as a string
* `_repr_json_`: return raw JSON as a string
* `_repr_jpeg_`: return raw JPEG data
* `_repr_png_`: return raw PNG data
* `_repr_svg_`: return raw SVG data as a string
* `_repr_latex_`: return LaTeX commands in a string surrounded by "$".
### Model Citizen: pandas
A prominent example of a package that has IPython-aware rich representations of its objects is [pandas](http://pandas.pydata.org/).
A pandas DataFrame has a rich HTML table representation,
using `_repr_html_`.
```python
import io
import pandas
```
```python
%%writefile data.csv
Date,Open,High,Low,Close,Volume,Adj Close
2012-06-01,569.16,590.00,548.50,584.00,14077000,581.50
2012-05-01,584.90,596.76,522.18,577.73,18827900,575.26
2012-04-02,601.83,644.00,555.00,583.98,28759100,581.48
2012-03-01,548.17,621.45,516.22,599.55,26486000,596.99
2012-02-01,458.41,547.61,453.98,542.44,22001000,540.12
2012-01-03,409.40,458.24,409.00,456.48,12949100,454.53
```
```python
df = pandas.read_csv("data.csv")
pandas.set_option('display.notebook_repr_html', False)
df
```
rich HTML can be activated via `pandas.set_option`.
```python
pandas.set_option('display.notebook_repr_html', True)
df
```
```python
lines = df._repr_html_().splitlines()
print("\n".join(lines[:20]))
```
### Exercise
Write a simple `Circle` Python class. Don't even worry about properties such as radius, position, colors, etc. To help you out use the following representations (remember to wrap them in Python strings):
For HTML:
○
For SVG:
<svg width="100px" height="100px">
<circle cx="50" cy="50" r="20" stroke="black" stroke-width="1" fill="white"/>
</svg>
For LaTeX (wrap with `$` and use a raw Python string):
\bigcirc
After you write the class, create an instance and then use `display_html`, `display_svg` and `display_latex` to display those representations.
Tips : you can slightly tweek the representation to know from which `_repr_*_` method it came from.
For example in my solution the svg representation is blue, and the HTML one show "`HTML`" between brackets.
### Solution
Here is my simple `MyCircle` class:
```python
# %load ../../exercises/IPython Kernel/soln/mycircle.py
class MyCircle(object):
def __init__(self, center=(0.0,0.0), radius=1.0, color='blue'):
self.center = center
self.radius = radius
self.color = color
def _repr_html_(self):
return "○ (<b>html</b>)"
def _repr_svg_(self):
return """<svg width="100px" height="100px">
<circle cx="50" cy="50" r="20" stroke="black" stroke-width="1" fill="blue"/>
</svg>"""
def _repr_latex_(self):
return r"$\bigcirc \LaTeX$"
def _repr_javascript_(self):
return "alert('I am a circle!');"
```
Now create an instance and use the display methods:
```python
c = MyCircle()
```
```python
display_html(c)
```
```python
display_svg(c)
```
```python
display_latex(c)
```
```python
display_javascript(c)
```
## Adding IPython display support to existing objects
When you are directly writing your own classes, you can adapt them for display in IPython by following the above example. But in practice, we often need to work with existing code we can't modify. We now illustrate how to add these kinds of extended display capabilities to existing objects. To continue with our example above, we will add a PNG representation to our `Circle` class using Matplotlib.
### Model citizen: sympy
[SymPy](http://sympy.org) is another model citizen that defines rich representations of its object.
Unlike pandas above, sympy registers display formatters via IPython's display formatter API, rather than declaring `_repr_mime_` methods.
```python
from sympy import Rational, pi, exp, I, symbols
x, y, z = symbols("x y z")
```
```python
r = Rational(3,2)*pi + exp(I*x) / (x**2 + y)
r
```
SymPy provides an `init_printing` function that sets up advanced $\LaTeX$
representations of its objects.
```python
from sympy.interactive.printing import init_printing
init_printing()
r
```
To add a display method to an existing class, we must use IPython's display formatter API. Here we show all of the available formatters:
```python
ip = get_ipython()
for mime, formatter in ip.display_formatter.formatters.items():
print('%24s : %s' % (mime, formatter.__class__.__name__))
```
Let's grab the PNG formatter:
```python
png_f = ip.display_formatter.formatters['image/png']
```
We will use the `for_type` method to register our display function.
```python
png_f.for_type?
```
As the docstring describes, we need to define a function the takes the object as a parameter and returns the raw PNG data.
```python
%matplotlib inline
import matplotlib.pyplot as plt
```
```python
class AnotherCircle(object):
def __init__(self, radius=1, center=(0,0), color='r'):
self.radius = radius
self.center = center
self.color = color
def __repr__(self):
return "<%s Circle with r=%s at %s>" % (
self.color,
self.radius,
self.center,
)
c = AnotherCircle()
c
```
```python
from IPython.core.pylabtools import print_figure
def png_circle(circle):
"""Render AnotherCircle to png data using matplotlib"""
fig, ax = plt.subplots()
patch = plt.Circle(circle.center,
radius=circle.radius,
fc=circle.color,
)
ax.add_patch(patch)
plt.axis('scaled')
data = print_figure(fig, 'png')
# We MUST close the figure, otherwise IPython's display machinery
# will pick it up and send it as output, resulting in a double display
plt.close(fig)
return data
```
```python
c = AnotherCircle()
print(repr(png_circle(c)[:10]))
```
Now we register the display function for the type:
```python
png_f.for_type(AnotherCircle, png_circle)
```
Now all `Circle` instances have PNG representations!
```python
c2 = AnotherCircle(radius=2, center=(1,0), color='g')
c2
```
```python
display_png(c2)
```
## return the object
```python
# for demonstration purpose, I do the same with a circle that has no _repr_javascript method
class MyNoJSCircle(MyCircle):
def _repr_javascript_(self):
return
cNoJS = MyNoJSCircle()
```
Of course you can now still return the object, and this will use compute all the representations, store them in the notebook and show you the appropriate one.
```python
cNoJS
```
Or just use `display(object)` if you are in a middle of a loop
```python
for i in range(3):
display(cNoJS)
```
Advantage of using `display()` versus `display_*()` is that all representation will be stored in the notebook document and notebook file, they are then availlable for other frontends or post-processing tool like `nbconvert`.
Let's compare `display()` vs `display_html()` for our circle in the Notebook Web-app and we'll see later the difference in nbconvert.
```python
print("I should see a nice html circle in web-app, but")
print("nothing if the format I'm viewing the notebook in")
print("does not support html")
display_html(cNoJS)
```
```python
print("Whatever the format I will see a representation")
print("of my circle")
display(cNoJS)
```
```python
print("Same if I return the object")
cNoJS
```
```python
print("But not if I print it")
print(cNoJS)
```
## Cleanup
```python
!rm -f data.csv
```
```python
```
| {"hexsha": "3e9eba577a70ce0fc1f025f6983e8cc93696f065", "size": 18408, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "001-Jupyter/001-Tutorials/003-IPython-in-Depth/examples/IPython Kernel/Old Custom Display Logic.ipynb", "max_stars_repo_name": "jhgoebbert/jupyter-jsc-notebooks", "max_stars_repo_head_hexsha": "bcd08ced04db00e7a66473b146f8f31f2e657539", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "001-Jupyter/001-Tutorials/003-IPython-in-Depth/examples/IPython Kernel/Old Custom Display Logic.ipynb", "max_issues_repo_name": "jhgoebbert/jupyter-jsc-notebooks", "max_issues_repo_head_hexsha": "bcd08ced04db00e7a66473b146f8f31f2e657539", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "001-Jupyter/001-Tutorials/003-IPython-in-Depth/examples/IPython Kernel/Old Custom Display Logic.ipynb", "max_forks_repo_name": "jhgoebbert/jupyter-jsc-notebooks", "max_forks_repo_head_hexsha": "bcd08ced04db00e7a66473b146f8f31f2e657539", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-13T18:49:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-13T18:49:12.000Z", "avg_line_length": 22.7821782178, "max_line_length": 408, "alphanum_fraction": 0.5373750543, "converted": true, "num_tokens": 2238} |
# Core Pkgs
import streamlit as st
# NLP Pkgs
import spacy_streamlit
import spacy
#nlp = spacy.load('en')
import os
from PIL import Image
from gensim.summarization.summarizer import summarize
from gensim.summarization import keywords
import trafilatura
#import pdfplumber
import en_core_web_md
#import zipfile
#!python -m spacy download en_core_web_lg
##################################################################################################
# '''
# import torch
# from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
# import numpy as np
# import contextlib
# import plotly.express as px
# import pandas as pd
# from PIL import Image
# import datetime
# import os
# import psutil
# MODEL_DESC = {
# 'Bart MNLI': """Bart with a classification head trained on MNLI.\n\nSequences are posed as NLI premises and topic labels are turned into premises, i.e. `business` -> `This text is about business.`""",
# 'Bart MNLI + Yahoo Answers': """Bart with a classification head trained on MNLI and then further fine-tuned on Yahoo Answers topic classification.\n\nSequences are posed as NLI premises and topic labels are turned into premises, i.e. `business` -> `This text is about business.`""",
# 'XLM Roberta XNLI (cross-lingual)': """XLM Roberta, a cross-lingual model, with a classification head trained on XNLI. Supported languages include: _English, French, Spanish, German, Greek, Bulgarian, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, Hindi, Swahili, and Urdu_.
# Note that this model seems to be less reliable than the English-only models when classifying longer sequences.
# Examples were automatically translated and may contain grammatical mistakes.
# Sequences are posed as NLI premises and topic labels are turned into premises, i.e. `business` -> `This text is about business.`""",
# }
# ZSL_DESC = """Recently, the NLP science community has begun to pay increasing attention to zero-shot and few-shot applications, such as in the [paper from OpenAI](https://arxiv.org/abs/2005.14165) introducing GPT-3. This demo shows how 🤗 Transformers can be used for zero-shot topic classification, the task of predicting a topic that the model has not been trained on."""
# CODE_DESC = """```python
# from transformers import pipeline
# classifier = pipeline('zero-shot-classification',
# model='{}')
# hypothesis_template = 'This text is about {{}}.' # the template used in this demo
# classifier(sequence, labels,
# hypothesis_template=hypothesis_template,
# multi_class=multi_class)
#{{'sequence' ..., 'labels': ..., 'scores': ...}}
# ```"""
# model_ids = {
# 'Bart MNLI': 'facebook/bart-large-mnli',
# 'Bart MNLI + Yahoo Answers': 'joeddav/bart-large-mnli-yahoo-answers',
# 'XLM Roberta XNLI (cross-lingual)': 'joeddav/xlm-roberta-large-xnli'
# }
# device = 0 if torch.cuda.is_available() else -1
# @st.cache(allow_output_mutation=True)
# def load_models():
# return {id: AutoModelForSequenceClassification.from_pretrained(id) for id in model_ids.values()}
# models = load_models()
# @st.cache(allow_output_mutation=True, show_spinner=False)
# def load_tokenizer(tok_id):
# return AutoTokenizer.from_pretrained(tok_id)
# @st.cache(allow_output_mutation=True, show_spinner=False)
# def get_most_likely(nli_model_id, sequence, labels, hypothesis_template, multi_class, do_print_code):
# classifier = pipeline('zero-shot-classification', model=models[nli_model_id], tokenizer=load_tokenizer(nli_model_id), device=device)
# outputs = classifier(sequence, labels, hypothesis_template, multi_class)
# return outputs['labels'], outputs['scores']
# def load_examples(model_id):
# model_id_stripped = model_id.split('/')[-1]
# df = pd.read_json(f'texts-{model_id_stripped}.json')
# names = df.name.values.tolist()
# mapping = {df['name'].iloc[i]: (df['text'].iloc[i], df['labels'].iloc[i]) for i in range(len(names))}
# names.append('Custom')
# mapping['Custom'] = ('', '')
# return names, mapping
# def plot_result(top_topics, scores):
# top_topics = np.array(top_topics)
# scores = np.array(scores)
# scores *= 100
# fig = px.bar(x=scores, y=top_topics, orientation='h',
# labels={'x': 'Confidence', 'y': 'Label'},
# text=scores,
# range_x=(0,115),
# title='Top Predictions',
# color=np.linspace(0,1,len(scores)),
# color_continuous_scale='GnBu')
# fig.update(layout_coloraxis_showscale=False)
# fig.update_traces(texttemplate='%{text:0.1f}%', textposition='outside')
# st.plotly_chart(fig)
# '''
#################################################################################################
@st.cache(suppress_st_warning=True)
def model_loader(link,foldername):
"""
returns path of zipped folder with trained spacy model
"""
import requests
import zipfile
import tempfile
import spacy
dir=tempfile.gettempdir()
#link= "https://github.com/fm1320/IC_NLP/releases/download/V3/V3-20210203T001829Z-001.zip"
results = requests.get(link)
#with open(dir, 'wb') as f:
fp = tempfile.TemporaryFile()
fp.write(results.content)
file = zipfile.ZipFile(fp)
with tempfile.TemporaryDirectory() as tmpdirname:
file.extractall(path=dir)
#print(dir)
end_path=os.path.join(dir, foldername)
files = os.listdir(end_path)
#for file in files:
#print(file)
return end_path
def finder(text,user_assay):
import re
import pandas as pd
file_path="./all_assays.csv"
assay=[]
df = pd.read_csv(file_path,dtype= str, encoding='latin1') # READ CSV AS STRING !!!
assay = df['1H NMR'].values.tolist()
assay = list(map(''.join, assay)) # convert list of lists to list of strings
nuovo=[]
pattern1=r'[^.?!]*(?<=[.?\s!])%s(?=[\s.?!])[^.?!]*[.?!]' #extracts full sentences that contain a word
pattern2=r'\b%s\b' #extracts a given word
index=[]
sentc=[]
for i in range(len(assay)):
tmp=re.findall(pattern2 %assay[i],text, flags=re.IGNORECASE)
if (len(tmp)>0):
index.append(i)
nuovo.append(tmp)
tmp1=re.findall(pattern1 %assay[i],text, flags=re.IGNORECASE)
#st.write("Sentences that have the assay:" ,tmp1)
if (len(tmp1)>0):
for k in range(len(tmp1)-1):
sentc.append(tmp1[k])
res_list = [assay[j] for j in index]
#print("Nuovo:", nuovo)
res_list=list(set(res_list))
st.write("The assays mentioned are: \n ", res_list)
#sentc=list(set(sentc))
#st.write("Some sentences that mention an assay:", sentc)
#st.write("Here are some sentences that mention an assay:")
return sentc
def main():
"""A Simple NLP app with Spacy-Streamlit"""
st.title("Text processing app for biological scientific papers")
menu = ["Home","NER","Summarization","Zero shot learning"]
choice = st.sidebar.selectbox("Menu",menu)
if choice == "Home":
link = '[GitHub page](https://github.com/fm1320/IC_NLP)'
st.write("""This application was made as part of a postgradute program at Imeprial College London. The details about the traning of the models, data and the techniques can be found at my personal github page provided below.""")
st.markdown(link, unsafe_allow_html=True)
st.write("""<---- Choose and try out one of the NLP tasks available from the drop down menu on the left""")
st.markdown("![Alt Text](https://upload.wikimedia.org/wikipedia/en/thumb/5/5f/Imperial_College_London_monotone_logo.jpg/320px-Imperial_College_London_monotone_logo.jpg)")
st.write("*Text examples source: Garcia-Perez, I., Posma, J.M., Serrano-Contreras, J.I. et al. Identifying unknown metabolites using NMR-based metabolic profiling techniques. Nat Protoc 15, 2538–2567 (2020). https://doi.org/10.1038/s41596-020-0343-3")
#st.subheader("Tokenization")
#raw_text = st.text_area("Your Text","Enter Text Here")
#docx = nlp(raw_text)
#if st.button("Tokenize"):
# spacy_streamlit.visualize_tokens(docx,attrs=['text','pos_','dep_','ent_type_'])
elif choice == "NER":
st.subheader("Named Entity Recognition")
# Add a selectbox to the sidebar:
sel = st.sidebar.selectbox("Which NER model would you like to use ?", ["SpaCy Bloom embedding DL","Spacy core en default","String/Regex matching"])
# if sel== "SciSpacy":
#import scispacy
# nlp = spacy.load("en_core_sci_sm")
# elif sel=="DL small":
# nlp = spacy.load('./BiA') #Location of directory of spacy model
if sel=="SpaCy Bloom embedding DL":
path=model_loader("https://github.com/fm1320/IC_NLP/releases/download/V3/V3-20210203T001829Z-001.zip", "V3")
nlp = spacy.load(path)
elif sel=="Spacy core en default":
import en_core_web_sm
nlp = en_core_web_sm.load()
st.write("*This is an example of a default model with general entities. Choose one of the other two to see assay recognition.")
elif sel=="String/Regex matching":
#r_text = st.text_area("Enter text for entity recognition with Regex","Text here")
r_text = st.text_area("Enter text for entity recognition with Regex","However, it is very challenging to elucidate the structure of all metabolites present in biofluid samples. The large number of unknown or unidentified metabolites with high dynamic concentration range, extensive chemical diversity and different physical properties poses a substantial analytical challenge. Metabolic profiling studies are often geared toward finding differences in the levels of metabolites that are statistically correlated with a clinical outcome, dietary intervention or toxic exposure when compared to a control group. The chemical assignment of this reduced panel of biologically relevant metabolites is possible using statistical spectroscopic tools9–11, two-dimensional (2D) NMR spectroscopic analysis12–14, separation and pre-concentration techniques11, various chromatographic and mass spectroscopy (MS)-based analytical platforms.")
iz=finder(r_text,"")
######################################
# '''
# model_id = model_ids[model_desc]
# ex_names, ex_map = load_examples(model_id)
# st.title('Zero Shot Topic Classification')
# sequence = st.text_area('Text', ex_map[example][0], key='sequence', height=height)
# labels = st.text_input('Possible topics (separated by `,`)', ex_map[example][1], max_chars=1000)
# multi_class = st.checkbox('Allow multiple correct topics', value=True)
# hypothesis_template = "This text is about {}."
# labels = list(set([x.strip() for x in labels.strip().split(',') if len(x.strip()) > 0]))
# if len(labels) == 0 or len(sequence) == 0:
# st.write('Enter some text and at least one possible topic to see predictions.')
# return
# if do_print_code:
# st.markdown(CODE_DESC.format(model_id))
# with st.spinner('Classifying...'):
# top_topics, scores = get_most_likely(model_id, sequence, labels, hypothesis_template, multi_class, do_print_code)
# plot_result(top_topics[::-1][-10:], scores[::-1][-10:])
# if "socat" not in [p.name() for p in psutil.process_iter()]:
# os.system('socat tcp-listen:8000,reuseaddr,fork tcp:localhost:8001 &')
# '''
##########################################
method = st.sidebar.selectbox("Choose input method (recommended:text box)", ["Text box", "URL"])
if method == "Text box" and sel !="String/Regex matching":
raw_text = st.text_area("Enter text for entity recognition","However, it is very challenging to elucidate the structure of all metabolites present in biofluid samples. The large number of unknown or unidentified metabolites with high dynamic concentration range, extensive chemical diversity and different physical properties poses a substantial analytical challenge. Metabolic profiling studies are often geared toward finding differences in the levels of metabolites that are statistically correlated with a clinical outcome, dietary intervention or toxic exposure when compared to a control group. The chemical assignment of this reduced panel of biologically relevant metabolites is possible using statistical spectroscopic tools9–11, two-dimensional (2D) NMR spectroscopic analysis12–14, separation and pre-concentration techniques11, various chromatographic and mass spectroscopy (MS)-based analytical platforms.")
docx = nlp(raw_text)
spacy_streamlit.visualize_ner(docx,labels=nlp.get_pipe('ner').labels)
if method == "URL" and sel !="String/Regex matching":
user_input = st.text_input("Enter page URL of an HTML file")
if user_input is not None:
downloaded = trafilatura.fetch_url(user_input)
raw_text=trafilatura.extract(downloaded)
raw_text=str(raw_text)
docx = nlp(raw_text)
spacy_streamlit.visualize_ner(docx,labels=nlp.get_pipe('ner').labels)
elif choice == "Summarization":
#Textbox for text user is entering
st.subheader("Enter the text you'd like to summarize (Here is an example that can be pasted in the text box!)")
raw_text = st.text_area('''
For over three decades, NMR spectroscopy has been widely applied in metabolic profiling and phenotyping1,2,3. The technology allows for accurate high-throughput screening of thousands of metabolites (small molecular species <1 kDa) present in a biological sample4,5,6,7, such as urine, plasma, feces, saliva and multiple types of tissues, as well as food8 and plant extracts. NMR spectroscopy provides robust multi-metabolite fingerprints of hundreds of metabolites in many biofluids, many of which are listed in spectral databases, particularly for common biofluids in urine and blood.
However, it is very challenging to elucidate the structure of all metabolites present in biofluid samples. The large number of unknown or unidentified metabolites with high dynamic concentration range, extensive chemical diversity and different physical properties poses a substantial analytical challenge. Metabolic profiling studies are often geared toward finding differences in the levels of metabolites that are statistically correlated with a clinical outcome, dietary intervention or toxic exposure when compared to a control group. The chemical assignment of this reduced panel of biologically relevant metabolites is possible using statistical spectroscopic tools9,10,11, two-dimensional (2D) NMR spectroscopic analysis12,13,14, separation and pre-concentration techniques11, various chromatographic and mass spectroscopy
(MS)-based analytical platforms15,16 and existing spectral databases. However, the structural elucidation of NMR resonances relating to unknown molecules remains a major bottleneck in metabolic profiling studies. As a result, many published NMR-based metabolic profiling studies still continue to include putatively identified metabolites and unknown features without providing unequivocal proof of assignment, or they simply label peaks as ‘unknown’, thereby potentially missing key mechanistic information.
To avoid the problem of multiple entries for the same compound in databases under different names, a community-wide effort is underway to develop better, faster and more standardized metabolite identification strategies, such as implementing standard nomenclature for newly identified metabolites using the International Chemical Identifier (InChI)17. Sumner et al. proposed a four-level system18 for assigning a confidence level to newly identified metabolites in metabolic profiling studies: 1) positively identified compounds (with a name, a known structure, a CAS number or an InChI); 2) putatively annotated compounds using spectral similarity with databases but without chemical reference standard; 3) putatively identified chemicals within a compound class; and 4) unknown compounds. Wishart et al. proposed a further distinction for those metabolites: the ‘known unknowns’ and the ‘unknown unknowns’19.
A ‘known unknown’ corresponds to a metabolite that has not yet been identified in the sample of interest but that has been previously described in a database or in the literature, whereas a truly new compound, an ‘unknown unknown’, has never been described or formally identified.
Commercial packages, such as Bruker’s AMIX TM software, and open-source software20, such as COLMAR (http://spinportal.magnet.fsu.edu/), can help with identifying these ‘known unknowns’, and some of these software applications are capable of automatically or semi-automatically annotating a limited number of compounds in a biological sample. However, even with automated annotation, the software still requires manual revision and can be prone to inconsistent interpretation and assignment by different individuals19. Most software packages and databases do not support identification of ‘unknown unknowns’, although a few platforms, such as AMIX, include prediction software to aid the identification of new compounds.
Open-access databases have been created for researchers to deposit information relating to newly identified compounds. Most of the available databases, such as the Human Metabolome Database (HMDB)21, the BioMagResBank (BMRB)22, PRIMe server23, COLMAR 1H(13C)-TOCCATA and Bruker-AMIX (http://www.bruker-biospin.com/amix.html), contain chemical shift values, relative intensity and peak shape information for 1H-NMR and often 13C-NMR data to support metabolite identification. However, all databases contain inherent errors, such as incorrect structures for the metabolites, incorrect names and incorrect assigments. This problem is compounded further by the effect that experimental conditions, such as the pH or ionic content of the sample, can have on the chemical shift of a metabolite.
Some of these databases, such as HMDB, provide complementary information, including MS assignments, which can be useful for checking potential errors in assignments of NMR peaks. However, although there are resources available to aid assignment of candidate biomarkers, there is no panacea for accurate metabolite identification, and there remains a clear unmet need for improved strategies for metabolite identification and curation for NMR spectral profiling.
''') #text is stored in this variable
summWords = summarize(raw_text)
st.subheader("Summary")
st.write(summWords)
elif choice == "Zero shot learning":
st.write("""Due to resource constraints, this demo is moved to the link below:""")
link = '[Zero shot learning for NER demo](https://colab.research.google.com/drive/1zKDbjLo9vyEuSRotSSVwFLyaA61o1ceG#scrollTo=hkfE6NRA0Dzy)'
st.markdown(link, unsafe_allow_html=True)
st.write("*Thanks to Hugging face's wonderful model repository and inspired by Joe Davison (researcher at hugging face)")
hug = '[Hugging face](https://huggingface.co/)'
st.markdown(hug, unsafe_allow_html=True)
if __name__ == '__main__':
main() | {"hexsha": "51f6a99a412388a572a907fa704c072b216c585e", "size": 19352, "ext": "py", "lang": "Python", "max_stars_repo_path": "streamlit_appv1.py", "max_stars_repo_name": "fm1320/ICL", "max_stars_repo_head_hexsha": "ce43739b202ee1253e6c3b80debc0652d712d4c5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-27T15:07:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-27T15:07:24.000Z", "max_issues_repo_path": "streamlit_appv1.py", "max_issues_repo_name": "fm1320/ICL", "max_issues_repo_head_hexsha": "ce43739b202ee1253e6c3b80debc0652d712d4c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-01-17T00:37:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-17T00:37:27.000Z", "max_forks_repo_path": "streamlit_appv1.py", "max_forks_repo_name": "fm1320/ICL", "max_forks_repo_head_hexsha": "ce43739b202ee1253e6c3b80debc0652d712d4c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 68.3816254417, "max_line_length": 938, "alphanum_fraction": 0.7165150889, "include": true, "reason": "import numpy", "num_tokens": 4446} |
import tensorflow as tf
import numpy as np
import argparse
import socket
import importlib
import time
import os
import scipy.misc
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'models'))
sys.path.append(os.path.join(BASE_DIR, 'utils'))
import provider
import pc_util
import pdb
#importing the libraries
TEST_FILES = provider.getDataFiles(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/kittiTest.txt'))
MODEL = importlib.import_module('pointnet_cls')
#Importing the test and the model
PC_SIZE = 128
BATCH_SIZE = 1
NUM_CLASSES = 3
def evaluate(is_training):
#This is for the nodes
with tf.device('/gpu:0'):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, PC_SIZE)
is_training_pl = tf.placeholder(tf.bool, shape=())
#Creating the label's field
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl)
loss = MODEL.get_loss(pred, labels_pl, end_points)
saver = tf.train.Saver()
#calculate the loss, fetching the endpoints and save the system?.
#HÄR OVAN ÄR ANROPET!!!!!!
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = True
sess = tf.Session(config=config)
#Preparing the session
error_cnt = 0
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
#fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
#preparing the estimation
for PC in range(len(TEST_FILES)):
current_data, current_label = provider.loadDataFile(TEST_FILES[PC])
current_data = current_data[:, 0:PC_SIZE, :]
current_label = np.squeeze(current_label)
print(current_label)
if __name__ == '__main__':
with tf.Graph().as_default():
is_training = False
evaluate(is_training)
| {"hexsha": "28a1b541d33114d422c59b7eb8bb6b0e9fd5ab56", "size": 2113, "ext": "py", "lang": "Python", "max_stars_repo_path": "pointnet/evaluatorOne.py", "max_stars_repo_name": "ErikNoren92/Exjobb", "max_stars_repo_head_hexsha": "eb4b36d2241043a7b81f6bf9ff5596176aebcd27", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pointnet/evaluatorOne.py", "max_issues_repo_name": "ErikNoren92/Exjobb", "max_issues_repo_head_hexsha": "eb4b36d2241043a7b81f6bf9ff5596176aebcd27", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pointnet/evaluatorOne.py", "max_forks_repo_name": "ErikNoren92/Exjobb", "max_forks_repo_head_hexsha": "eb4b36d2241043a7b81f6bf9ff5596176aebcd27", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1733333333, "max_line_length": 105, "alphanum_fraction": 0.6819687648, "include": true, "reason": "import numpy,import scipy", "num_tokens": 498} |
import pygame
import numpy as np
from Source import UI_functions as UI
from Source import battleships_functions_bot as bfb
from Source import battleships_functions_check as bfc
def Play_Game(screen, bg, cfg):
#Init
screen, bg = UI.Update_Screen_Values(screen, bg)
pygame.time.Clock().tick(cfg["Basic"].getint("FPS"))
#Resources (Images, Icons, Fonts)
font = pygame.font.Font("Assets/Font/impact.ttf", 26)
font_b = pygame.font.Font("Assets/Font/impact.ttf", 40)
font_c = pygame.font.SysFont('segoeuisymbol', 34)
square = pygame.image.load("Assets/Images/Square.png")
rectangle = pygame.image.load("Assets/Images/Rectangle.png")
grid = pygame.image.load("Assets/Images/WhiteGrid.png")
#Initial Values
RUNNING = True
SHOW_HAHA = True
CANT_GENERATE1 = False
CANT_GENERATE2 = False
CLICK = False
bfb.load_config_file(cfg)
Bmap1 = np.zeros((cfg["Rules"].getint("Y_RANGE"),cfg["Rules"].getint("X_RANGE")), dtype = np.int32)
Bmap2 = np.zeros((cfg["Rules"].getint("Y_RANGE"),cfg["Rules"].getint("X_RANGE")), dtype = np.int32)
Bmap1, CANT_GENERATE1 = bfb.generate_bot_ships(Bmap1)
Bmap2, CANT_GENERATE2 = bfb.generate_bot_ships(Bmap2)
rects, images_pos, text_pos = UI.Rect_AI_AI_Set()
rect_map = UI.Rect_Player_AI_Map()
images = [rectangle, square, rectangle, rectangle]
texts = [font.render("PLAY", True, (52, 52, 54)),
font.render("χ", True, (52, 52, 54)),
font.render("AI 1", True, (52, 52, 54)),
font.render("AI 2", True, (52, 52, 54)),
font_c.render("⟳", True, (52, 52, 54)),
font_c.render("⟳", True, (52, 52, 54)),
font_b.render(cfg["Text"]["AI1"], True, (255, 255, 255)),
font_b.render(cfg["Text"]["AI2"], True, (255, 255, 255)),
font_b.render(cfg["Text"]["SCORE"], True, (255, 255, 255)),
font_b.render(str(cfg["Points"].getint("AI1_PTS")) + " - " + str(cfg["Points"].getint("AI2_PTS")), True, (255, 255, 255))]
#InGame
while RUNNING:
#Screen properties per update
mx, my = pygame.mouse.get_pos()
screen.blit(bg,(0,0))
#Draw functions
UI.Draw_Left_Map_Set(screen, Bmap1, grid)
UI.Draw_Right_Map_Set(screen, Bmap2, grid)
UI.Draw_Pos(screen, images, images_pos)
UI.Draw_Pos(screen, texts, text_pos)
#Clickable buttons
if CLICK:
if rects[0].collidepoint((mx,my)):
return Bmap1, Bmap2, True
if CLICK:
if rects[1].collidepoint((mx,my)):
return None, None, False
if rects[2].collidepoint((mx,my)):
Bmap1 = np.zeros((cfg["Rules"].getint("Y_RANGE"),cfg["Rules"].getint("X_RANGE")), dtype = np.int32)
Bmap1, CANT_GENERATE1 = bfb.generate_bot_ships(Bmap1)
if rects[3].collidepoint((mx,my)):
Bmap2 = np.zeros((cfg["Rules"].getint("Y_RANGE"),cfg["Rules"].getint("X_RANGE")), dtype = np.int32)
Bmap2, CANT_GENERATE2 = bfb.generate_bot_ships(Bmap2)
#Events and update
pygame.display.update()
CLICK = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
RUNNING = False
pygame.quit()
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
CLICK = True | {"hexsha": "ac46915ed60f6f977d96ec5919d4614d21f262fd", "size": 3517, "ext": "py", "lang": "Python", "max_stars_repo_path": "Battleships/Source/Set_Ai_ai_game.py", "max_stars_repo_name": "Dorthion/Python-Minigames", "max_stars_repo_head_hexsha": "91ba20d42ac7376ccaad60cd948a576800085623", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Battleships/Source/Set_Ai_ai_game.py", "max_issues_repo_name": "Dorthion/Python-Minigames", "max_issues_repo_head_hexsha": "91ba20d42ac7376ccaad60cd948a576800085623", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Battleships/Source/Set_Ai_ai_game.py", "max_forks_repo_name": "Dorthion/Python-Minigames", "max_forks_repo_head_hexsha": "91ba20d42ac7376ccaad60cd948a576800085623", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8953488372, "max_line_length": 135, "alphanum_fraction": 0.5862951379, "include": true, "reason": "import numpy", "num_tokens": 963} |
#-*- coding:utf -8-*-
#http://www.cnblogs.com/huadongw/p/6159408.html
#数据重采样
#python SampleData.py -s 0 trainJ/train.txt 64 trainJ/trainSample.txt
#
# 从python调用shell脚本
# !/usr/bin/python
# import sys
# import os
# print "start call sh file"
# os.system('./fromsh.sh')
# print "end call sh file"
#
# 从shell脚本调用python
# !/bin/bash
# echo 'start call py'
# ./frompy.py
# echo 'end call py'
#
#
import numpy as np
from sklearn.utils import check_random_state
import os, sys, math, random
from collections import defaultdict
if sys.version_info[0] >= 3:
xrange = range
def exit_with_help(argv):
print("""\
Usage: {0} [options] dataset subclass_size [output]
options:
-s method : method of selection (default 0)
0 -- over-sampling & under-sampling given subclass_size
1 -- over-sampling (subclass_size: any value)
2 -- under-sampling(subclass_size: any value)
output : balance set file (optional)
If output is omitted, the subset will be printed on the screen.""".format(argv[0]))
exit(1)
def process_options(argv):
argc = len(argv)
if argc < 3:
exit_with_help(argv)
# default method is over-sampling & under-sampling
method = 0
BalanceSet_file = sys.stdout
i = 1
while i < argc:
if argv[i][0] != "-":
break
if argv[i] == "-s":
i = i + 1
method = int(argv[i])
if method not in [0,1,2]:
print("Unknown selection method {0}".format(method))
exit_with_help(argv)
i = i + 1
dataset = argv[i]
BalanceSet_size = int(argv[i+1])
if i+2 < argc:
BalanceSet_file = open(argv[i+2],'w')
return dataset, BalanceSet_size, method, BalanceSet_file
def stratified_selection(dataset, subset_size, method):
labels = [line.split(None,1)[0] for line in open(dataset)]
label_linenums = defaultdict(list)
for i, label in enumerate(labels):
label_linenums[label] += [i]
l = len(labels)
remaining = subset_size
ret = []
# classes with fewer data are sampled first;
label_list = sorted(label_linenums, key=lambda x: len(label_linenums[x]))
min_class = label_list[0]
maj_class = label_list[-1]
min_class_num = len(label_linenums[min_class])
maj_class_num = len(label_linenums[maj_class])
random_state = check_random_state(42)
for label in label_list:
linenums = label_linenums[label]
label_size = len(linenums)
if method == 0:
if label_size<subset_size:
ret += linenums
subnum = subset_size-label_size
else:
subnum = subset_size
ret += [linenums[i] for i in random_state.randint(low=0, high=label_size,size=subnum)]
elif method == 1:
if label == maj_class:
ret += linenums
continue
else:
ret += linenums
subnum = maj_class_num-label_size
ret += [linenums[i] for i in random_state.randint(low=0, high=label_size,size=subnum)]
elif method == 2:
if label == min_class:
ret += linenums
continue
else:
subnum = min_class_num
ret += [linenums[i] for i in random_state.randint(low=0, high=label_size,size=subnum)]
random.shuffle(ret)
return ret
def sampledata(dataset, subset_size, method, subset):
selected_lines = []
selected_lines = stratified_selection(dataset, subset_size,method)
#select instances based on selected_lines
subset_file = open(subset,'w')
dataset = open(dataset,'r')
datalist = dataset.readlines()
for i in selected_lines:
subset_file.write(datalist[i])
subset_file.close()
dataset.close()
def main(argv=sys.argv):
dataset, subset_size, method, subset_file = process_options(argv)
selected_lines = []
selected_lines = stratified_selection(dataset, subset_size,method)
#select instances based on selected_lines
dataset = open(dataset,'r')
datalist = dataset.readlines()
for i in selected_lines:
subset_file.write(datalist[i])
subset_file.close()
dataset.close()
if __name__ == '__main__':
main(sys.argv) | {"hexsha": "0da62838e6709f0bca5a9f36d5156e4155288716", "size": 4314, "ext": "py", "lang": "Python", "max_stars_repo_path": "SampleData.py", "max_stars_repo_name": "xyj77/dataLoader", "max_stars_repo_head_hexsha": "c80dcdb355ff07c1e9e1029e4765ac9546b31522", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 41, "max_stars_repo_stars_event_min_datetime": "2018-04-25T01:52:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-04T04:25:17.000Z", "max_issues_repo_path": "SampleData.py", "max_issues_repo_name": "xyj77/dataLoader", "max_issues_repo_head_hexsha": "c80dcdb355ff07c1e9e1029e4765ac9546b31522", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-05-29T01:40:48.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-13T02:07:26.000Z", "max_forks_repo_path": "SampleData.py", "max_forks_repo_name": "xyj77/dataLoader", "max_forks_repo_head_hexsha": "c80dcdb355ff07c1e9e1029e4765ac9546b31522", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2018-04-25T01:52:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-27T15:26:09.000Z", "avg_line_length": 29.5479452055, "max_line_length": 102, "alphanum_fraction": 0.6214649977, "include": true, "reason": "import numpy", "num_tokens": 1089} |
[STATEMENT]
lemma (in normal) oVeblen_oLimit:
"oVeblen F (oLimit f) = ordering (\<Inter>n. range (oVeblen F (f n)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. oVeblen F (oLimit f) = OrdinalVeblen.ordering (\<Inter>n. range (oVeblen F (f n)))
[PROOF STEP]
apply (unfold oVeblen_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. OrdinalVeblen.ordering (critical_set (range F) (oLimit f)) = OrdinalVeblen.ordering (\<Inter>n. range (OrdinalVeblen.ordering (critical_set (range F) (f n))))
[PROOF STEP]
apply (subst critical_set_oLimit)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. OrdinalVeblen.ordering (\<Inter>n. critical_set (range F) (f n)) = OrdinalVeblen.ordering (\<Inter>n. range (OrdinalVeblen.ordering (critical_set (range F) (f n))))
[PROOF STEP]
apply (cut_tac normal_set_range)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. normal_set (range F) \<Longrightarrow> OrdinalVeblen.ordering (\<Inter>n. critical_set (range F) (f n)) = OrdinalVeblen.ordering (\<Inter>n. range (OrdinalVeblen.ordering (critical_set (range F) (f n))))
[PROOF STEP]
apply (simp add: normal_set.range_ordering[OF normal_set_critical_set])
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done | {"llama_tokens": 495, "file": "Ordinal_OrdinalVeblen", "length": 5} |
[STATEMENT]
lemma mset_le_single_iff[iff]: "{#x#} \<le> {#y#} \<longleftrightarrow> x \<le> y" for x y :: "'a::order"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ({#x#} \<le> {#y#}) = (x \<le> y)
[PROOF STEP]
unfolding less_eq_multiset\<^sub>H\<^sub>O
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<forall>ya. count {#y#} ya < count {#x#} ya \<longrightarrow> (\<exists>xa>ya. count {#x#} xa < count {#y#} xa)) = (x \<le> y)
[PROOF STEP]
by force | {"llama_tokens": 213, "file": null, "length": 2} |
SUBROUTINE PIECHT(XORIG,YORIG,RADIUS,VALUES,NSECS)
C
C ------------------------------------------------
C ROUTINE NO. ( 86) VERSION (A8.3) 21:MAR:86
C ------------------------------------------------
C
C THIS DRAWS THE PIECHART SECTOR BY SECTOR.
C
C THE ARGUMENTS ARE AS FOLLOWS:
C
C <XORIG> IS THE X-ORIGIN OF THE PIECHART,
C <YORIG> IS THE Y-ORIGIN OF THE PIECHART,
C <RADIUS> IS THE RADIUS OF THE PIECHART,
C [VALUES] IS AN ARRAY CONTAINING THE SIZES OF THE SECTORS
C IF > 0, THE SECTOR IS CENTRED
C IF < 0, THE SECTOR IS SHIFTED OUT,
C <NSECS> THE NUMBER OF SECTORS.
C
REAL VALUES(NSECS),XAUTO(2),YAUTO(2)
C
COMMON /T0ACON/ ANGCON
COMMON /T0KFIL/ KOLFL0
COMMON /T0KLST/ LSTCL0(100),LENLST
COMMON /T0MAPP/ X1MAP0,X2MAP0,Y1MAP0,Y2MAP0
COMMON /T0PIAN/ ANGPIE
COMMON /T0PIE/ XCPIE,YCPIE,PIEVAL(50),NPVALS,PIERAD,PIETOT,SIZLIM
COMMON /T0PPOS/ XPLOT0,YPLOT0
COMMON /T0TRAC/ IPRINT
COMMON /T3CONS/ PI
C
C
IF (IPRINT.EQ.1) CALL G0MESG(136,0)
C
NPVALS= 0
IF (NSECS.LE.0) RETURN
C
IPRSAV= IPRINT
IPRINT= 0
ABSRAD= ABS(RADIUS)
RAD= ABSRAD*1.1
XAUTO(1)= XORIG-RAD
XAUTO(2)= XORIG+RAD
YSCALE= (Y2MAP0-Y1MAP0)/(X2MAP0-X1MAP0)
YAUTO(1)= YORIG-RAD*YSCALE
YAUTO(2)= YORIG+RAD*YSCALE
CALL G0AUTO(XAUTO,YAUTO,1,2,1,2,1)
XSAVE= XPLOT0
YSAVE= YPLOT0
CALL POSITN(XORIG,YORIG)
XCPIE= XORIG
YCPIE= YORIG
NPVALS= NSECS
PIERAD= ABSRAD
TOTAL= 0.0
SECMIN= ABS(VALUES(1))
C
DO 100 IADD= 1,NSECS
ABSVAL= ABS(VALUES(IADD))
TOTAL= TOTAL+ABSVAL
IF (SECMIN.GT.ABSVAL) SECMIN= ABSVAL
IF (IADD.GT.50) GO TO 100
C
PIEVAL(IADD)= VALUES(IADD)
100 CONTINUE
C
PIETOT= 2.0*PI/TOTAL
SIZLIM= 0.72*SQRT(SECMIN/TOTAL)
C
C FIND THE ANGLE OF EACH SECTOR THEN DRAW IT.
C <OFFSET> IS NON-ZERO WHEN THE SECTOR IS SHIFTED-OUT.
C
KOLSAV= KOLFL0
KOLFL0= 0
KOLIND= 1
ADANGL= ANGPIE
C
DO 200 N= 1,NSECS
IF (LENLST.LE.0) GO TO 1
C
KOLFL0= LSTCL0(KOLIND)
KOLIND= KOLIND+1
IF (KOLIND.GT.LENLST) KOLIND= 1
C
1 ANGLE= ABS(VALUES(N))*PIETOT
OFFSET= 0.0
IF (VALUES(N).LT.0.0) OFFSET= 0.1
C
RAD= ABSRAD*OFFSET
ANGLAB= ANGLE*0.5+ADANGL
RX= RAD*COS(ANGLAB)+XORIG
RY= RAD*YSCALE*SIN(ANGLAB)+YORIG
CALL POSITN(RX,RY)
SX= ABSRAD*COS(ADANGL)+RX
SY= ABSRAD*YSCALE*SIN(ADANGL)+RY
CALL SECCIR(SX,SY,ANGLE/ANGCON)
ADANGL= ADANGL+ANGLE
200 CONTINUE
C
CALL POSITN(XSAVE,YSAVE)
KOLFL0= KOLSAV
IPRINT= IPRSAV
C
RETURN
END
| {"hexsha": "a71a80f12161ffeecc57fdacea172ae78890ad90", "size": 2852, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/lib/piecht.f", "max_stars_repo_name": "ZedThree/GHOST", "max_stars_repo_head_hexsha": "cba30b43bdcc73fb87cff0724337a7d3a1bd7812", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/lib/piecht.f", "max_issues_repo_name": "ZedThree/GHOST", "max_issues_repo_head_hexsha": "cba30b43bdcc73fb87cff0724337a7d3a1bd7812", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lib/piecht.f", "max_forks_repo_name": "ZedThree/GHOST", "max_forks_repo_head_hexsha": "cba30b43bdcc73fb87cff0724337a7d3a1bd7812", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1619047619, "max_line_length": 72, "alphanum_fraction": 0.5596072931, "num_tokens": 1139} |
-- Andreas, 2015-09-09 Issue 1643
-- {-# OPTIONS -v tc.mod.apply:20 #-}
-- {-# OPTIONS -v tc.signature:30 #-}
-- {-# OPTIONS -v tc.display:100 #-}
-- {-# OPTIONS -v scope:50 -v scope.inverse:100 -v interactive.meta:20 #-}
module _ where
module M where
postulate A : Set
module N = M -- This alias used to introduce a display form M.A --> N.A
open N
postulate
a : A
test : Set
test = a
-- ERROR SHOULD BE: A !=< Set of type Set
| {"hexsha": "bead4e2ca66a959c5be170734001edadcd6715fa", "size": 442, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "test/Fail/Issue1643a.agda", "max_stars_repo_name": "shlevy/agda", "max_stars_repo_head_hexsha": "ed8ac6f4062ea8a20fa0f62d5db82d4e68278338", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1989, "max_stars_repo_stars_event_min_datetime": "2015-01-09T23:51:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T18:20:48.000Z", "max_issues_repo_path": "test/Fail/Issue1643a.agda", "max_issues_repo_name": "shlevy/agda", "max_issues_repo_head_hexsha": "ed8ac6f4062ea8a20fa0f62d5db82d4e68278338", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4066, "max_issues_repo_issues_event_min_datetime": "2015-01-10T11:24:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:14:49.000Z", "max_forks_repo_path": "test/Fail/Issue1643a.agda", "max_forks_repo_name": "Agda-zh/agda", "max_forks_repo_head_hexsha": "231d6ad8e77b67ff8c4b1cb35a6c31ccd988c3e9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 371, "max_forks_repo_forks_event_min_datetime": "2015-01-03T14:04:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T19:00:30.000Z", "avg_line_length": 19.2173913043, "max_line_length": 74, "alphanum_fraction": 0.6176470588, "num_tokens": 140} |
#!/usr/bin/env python
import numpy as np
import pickle
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter,ScalarFormatter
'''
This script generates the average trace over 10 different initial conditions.
GCG
04.02.2020
'''
seed_number = 10 #number of seeds
nv = 6 #number of variables
npoints = int(10e4)#int(1e5) #time points available
arr = np.zeros((nv,npoints,seed_number))
av_ar = np.zeros((nv,npoints)) #average traces
ar_std = np.zeros((nv,npoints))
time = np.zeros((nv, npoints)) #time
li = ['/D.om.dat','/D.im.dat','/D.cm.dat','/T.om.dat','/T.im.dat','/T.cm.dat']
for i in range(seed_number):
print i
if i>8:
for s,j in enumerate(li):
var = np.genfromtxt("./random/react_data/param1m_000"+str(i+1)+j,dtype = float)
print "./random/react_data/param1m_0000"+str(i+1)
arr[s,:,i]= var[:npoints,1]
time[s,:] = var[:npoints,0]
else:
for s,j in enumerate(li):
var = np.genfromtxt("./random/react_data/param1m_0000"+str(i+1)+j, dtype = float)
#print "./random/seed_0000"+str(i+1)+"_1e9"+j
arr[s,:,i]= var[:npoints,1]
time[s,:] = var[:npoints,0]
#arr[29,:,:] = arr[29,:,:]-arr[3,:,:]
#arr[0,:,:] = arr[0,:,:]-arr[1,:,:]
#arr[3,:,:] = arr[3,:,:]-arr[2,:,:]
#arr[21,:,:] = arr[21,:,:]-arr[22,:,:]
#arr[23,:,:] = arr[23,:,:]-arr[24,:,:]
#arr[27,:,:] = arr[27,:,:]-arr[28,:,:]
av_ar = np.average(arr, axis=2)
ar_std = np.std(arr, axis=2)
the_filename = 'av_10r_1e9_conc'
with open(the_filename, 'wb') as f:#
pickle.dump(av_ar, f)
np.savetxt('time_10r_1e9_conc',time)
| {"hexsha": "9ad9e509be147302c7206c0a70b183cc43060576", "size": 1629, "ext": "py", "lang": "Python", "max_stars_repo_path": "odes_and_figures/non_equilibrium_induced_gradients/generate_av_hits.py", "max_stars_repo_name": "guadagar/Mitochondrial_morphology", "max_stars_repo_head_hexsha": "39c29b909002f975bab5fe436165d63f31f5a878", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "odes_and_figures/non_equilibrium_induced_gradients/generate_av_hits.py", "max_issues_repo_name": "guadagar/Mitochondrial_morphology", "max_issues_repo_head_hexsha": "39c29b909002f975bab5fe436165d63f31f5a878", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "odes_and_figures/non_equilibrium_induced_gradients/generate_av_hits.py", "max_forks_repo_name": "guadagar/Mitochondrial_morphology", "max_forks_repo_head_hexsha": "39c29b909002f975bab5fe436165d63f31f5a878", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0892857143, "max_line_length": 93, "alphanum_fraction": 0.6022099448, "include": true, "reason": "import numpy", "num_tokens": 540} |
import pygame.camera
import sys
import PIL
from PIL import Image
import numpy
from threading import Thread
import SocketServer
import json
import whistler
whistler.im = Image.new("RGB", (1024, 640), "white")
draw_target = whistler.draw_and_compare
import pointillism
class JsonConfigServer(SocketServer.ThreadingTCPServer):
allow_reuse_address = True
class JsonConfigHandler(SocketServer.BaseRequestHandler):
def handle(self):
global draw_target
config = json.loads(self.request.recv(1024).strip())
print config
if config['type'] == 'beziers':
print 'Changing to beziers'
whistler.min_width = whistler.im.size[0]/config['min_x']
whistler.max_width = whistler.im.size[0]/config['max_x']
whistler.min_height = whistler.im.size[0]/config['min_x']
whistler.max_height =whistler.im.size[0]/config['max_x']
draw_target = whistler.draw_and_compare
elif config['type'] == 'pointillism':
print 'Changing to Pointillism'
pointillism.colors = pointillism.load_colors(config['colors'])
draw_target = pointillism.draw_and_compare
self.request.sendall(json.dumps({'return':'ok'}))
server = JsonConfigServer(('127.0.0.1', 8000), JsonConfigHandler)
th = Thread(target=server.serve_forever)
th.daemon = True
th.start()
pygame.init()
pygame.camera.init()
cameras = pygame.camera.list_cameras()
camera = pygame.camera.Camera(cameras[0])
camera.start()
img = camera.get_image()
cam_width = img.get_width()
cam_height = img.get_height()
cam_im_array = pygame.surfarray.array3d(img)#pygame.image.load('sometest.png'))
average = map(int,map(round, cam_im_array.mean(1).mean(0)))
temp = numpy.array([[[0,0,0]]*cam_height]*cam_width,dtype=numpy.uint8)
#temp = numpy.array([[[0,0,0]]*cam_width]*cam_height, dtype=numpy.uint8)
whistler.save_im = Image.fromarray(temp)
whistler.im = Image.fromarray(cam_im_array)
whistler.width, whistler.height = cam_height, cam_width#, cam_height
#some defaults
whistler.min_width = cam_width/16
whistler.max_width = cam_width/4
whistler.min_height = cam_height/16
whistler.max_height = cam_height/4
whistler.p_alpha = 0.0125
whistler.remaining = whistler.abs_diff(whistler.save_im, whistler.im)
old_remaining = whistler.remaining
screen = pygame.display.set_mode([cam_width, cam_height])# pygame.FULLSCREEN)
pygame.mouse.set_visible(0)
pygame.display.set_caption("Live Painting")
c = 0
while True :
for e in pygame.event.get() :
if e.type == pygame.QUIT\
or e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:
pygame.camera.quit()
sys.exit()
# try to draw something
while whistler.fail_count < 1 and whistler.remaining <= old_remaining:
dc_thread = Thread(target=draw_target)
dc_thread.start()
dc_thread.join()
whistler.fail_count = 0
old_remaining = whistler.remaining
if c % 60 == 0:
img = camera.get_image()
whistler.im = whistler.im = Image.fromarray(pygame.surfarray.array3d(img))
whistler.remaining = whistler.abs_diff(whistler.save_im, whistler.im)
c += 1
# draw frame
pygame.surfarray.blit_array(screen, numpy.asarray(whistler.save_im))#.rotate(90)))
#pygame.surfarray.blit_array(screen, pygame.surfarray.array3d(img))
#screen.blit(pygame.image.frombuffer(whistler.save_im.tostring(), whistler.save_im.size, whistler.save_im.mode), (0,0))
pygame.display.flip()
| {"hexsha": "8a8940ec6103c4a8d36d71d7c31f6b8f73cf5e6f", "size": 3536, "ext": "py", "lang": "Python", "max_stars_repo_path": "project_code/live_painter.py", "max_stars_repo_name": "BlainMaguire/devart-template", "max_stars_repo_head_hexsha": "f295f91866deb6935b46191bd406e22cf44620ca", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-06-27T17:44:37.000Z", "max_stars_repo_stars_event_max_datetime": "2016-06-27T17:44:37.000Z", "max_issues_repo_path": "project_code/live_painter.py", "max_issues_repo_name": "BlainMaguire/devart-template", "max_issues_repo_head_hexsha": "f295f91866deb6935b46191bd406e22cf44620ca", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "project_code/live_painter.py", "max_forks_repo_name": "BlainMaguire/devart-template", "max_forks_repo_head_hexsha": "f295f91866deb6935b46191bd406e22cf44620ca", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3300970874, "max_line_length": 123, "alphanum_fraction": 0.7010746606, "include": true, "reason": "import numpy", "num_tokens": 923} |
import rosbag
import roslib
import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge, CvBridgeError
import std_msgs
import os
import glob
import numpy as np
import argparse
import yaml
'''
@author: Pushyami Kaveti
This is a tool to convert images into a bag files.Each directory representss a topic.
It uses a config file to specify various options for selection like topics,
time range, frequency etc.
'''
class Image2Bag:
def __init__(self,topic_n, info_topics, freq, op_file, data_p="/home/auv/calib" ,img_t="bmp"):
rospy.init_node('image_converter', anonymous=True)
self.data_path=data_p
self.topic_names = topic_n
self.info_topic_names = info_topics
self.num_topics = len(topic_n)
self.img_type = img_t
self.pubs = []
self.cam_info_pubs = []
self.im_list = []
self.bridge = CvBridge()
self.init_publishers()
self.get_imgs()
self.to_bag= True
if self.to_bag :
self.write_bag = rosbag.Bag(op_file, 'w')
self.frequency = freq
def init_publishers(self):
for top_ind in range(self.num_topics):
image_pub = rospy.Publisher(self.topic_names[top_ind], Image, queue_size=1)
cam_info_pub = rospy.Publisher(self.info_topic_names[top_ind], CameraInfo, queue_size=1)
self.pubs.append(image_pub)
self.cam_info_pubs.append(cam_info_pub)
def get_imgs(self):
li_single = glob.glob(os.path.join(self.data_path ,"cam0/*."+self.img_type))
#print(os.path.join(self.data_path ,"cam0/*.", self.img_type))
self.num_imgs = len(li_single)
# check all cam directories has the same number f images
# check the number of diretories is equal to number of topics publishing
dirs = [ name for name in os.listdir(self.data_path) if os.path.isdir(os.path.join(self.data_path, name)) ]
#print(dirs)
dirs.sort()
if(len(dirs) != self.num_topics):
print("ERROR: number of image directories is not equal to number of topics")
exit()
self.im_list = glob.glob(os.path.join(self.data_path ,"cam*/*."+self.img_type))
self.im_list.sort()
def run(self):
r = rospy.Rate(self.frequency) # 10hz
i=0
print(self.num_imgs)
while not rospy.is_shutdown():
if ( i < self.num_imgs):
h = std_msgs.msg.Header()
h.stamp = rospy.Time.now()
h.seq = i
print("--------------------------------")
imgs = []
info_msgs = []
for j in range(self.num_topics):
img = cv2.imread(self.im_list[i + j*self.num_imgs], cv2.IMREAD_GRAYSCALE)
print(self.im_list[i + j*self.num_imgs])
ros_img = self.bridge.cv2_to_imgmsg(img, "mono8")
ros_img.header = h
imgs.append(ros_img)
cam_info_msg = CameraInfo()
cam_info_msg.header = h
info_msgs.append(cam_info_msg)
for k in range(self.num_topics):
if self.to_bag:
print("writing"+str(i))
self.write_bag.write(self.topic_names[k], imgs[k], imgs[k].header.stamp)
self.write_bag.write(self.info_topic_names[k], info_msgs[k], info_msgs[k].header.stamp)
else:
self.pubs[k].publish(imgs[k])
self.cam_info_pubs[k].publish(info_msgs[k])
i = i+1
r.sleep()
else:
print(i)
break
r.sleep()
if self.to_bag:
self.write_bag.close()
if __name__=="__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
description='Reads the images from directories in the path and \
saves them to individual topics in a bag file.\n\n')
parser.add_argument('-i','--input_dir', help='Input directory path containing images', required=True)
parser.add_argument('-o','--output_dir', help='Output dir for output bag file', default='.')
parser.add_argument('-of','--output_filename', help='Output file name for output bag file' , default = 'output.bag')
parser.add_argument('-c','--config_file', help='Yaml file which specifies the topic names, frequency of selection and time range',
default = 'config/config.yaml')
args = parser.parse_args()
with open(args.config_file, 'r') as f:
config = yaml.safe_load(f)
op_file = os.path.join(args.output_dir, args.output_filename)
bs = Image2Bag(config['topics'], config['info_topics'], config['frequency'], op_file, args.input_dir , config['img_type'])
bs.run()
| {"hexsha": "2c8bfc06c0bd99b09e80a63f70f19ec4853082b0", "size": 5043, "ext": "py", "lang": "Python", "max_stars_repo_path": "image2bag/image2bag.py", "max_stars_repo_name": "neufieldrobotics/rosbag_toolk", "max_stars_repo_head_hexsha": "b8317af1f03ab0e7afe023d7cd884b3418286da9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-03-18T04:29:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T08:28:29.000Z", "max_issues_repo_path": "image2bag/image2bag.py", "max_issues_repo_name": "neufieldrobotics/rosbag_toolk", "max_issues_repo_head_hexsha": "b8317af1f03ab0e7afe023d7cd884b3418286da9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-02-19T13:24:12.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-19T18:01:35.000Z", "max_forks_repo_path": "image2bag/image2bag.py", "max_forks_repo_name": "neufieldrobotics/rosbag_toolk", "max_forks_repo_head_hexsha": "b8317af1f03ab0e7afe023d7cd884b3418286da9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-02-19T13:18:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-22T08:33:12.000Z", "avg_line_length": 41.3360655738, "max_line_length": 134, "alphanum_fraction": 0.5921078723, "include": true, "reason": "import numpy", "num_tokens": 1093} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: agent.py
# Author: Yuxin Wu <[email protected]>
import numpy as np
import cv2
import tensorflow as tf
#assert int(tf.__version__.split('.')[1]) == 9
assert int(np.__version__.split('.')[1]) >= 11
from collections import deque, Counter
import random
import time
from tensorpack import *
from tensorpack.RL import *
from tensorpack.utils.rect import Rect
from history import HistoryPlayerWithVar
NUM_ACTIONS = 6
IMAGE_SIZE = (120, 120)
CHANNEL = 4 * 3 * 2
IMAGE_SHAPE3 = IMAGE_SIZE + (CHANNEL,)
class EOGError:
""" end of game"""
pass
class FinalEnv(RLEnvironment):
def __init__(self, game):
self.game = game
self._image_shape = IMAGE_SIZE
self._frame_skip = 2
center_patch = 0.22
frac = center_patch / 2
W, H = 512, 384
self._center_rect = Rect(*map(int,
[W/2 - W*frac, H/2 - H*frac, W*frac*2, H*frac*2]))
self.last_history = deque(maxlen=60)
self.current_ammo = 10
self.timer = 0
def dead(self):
self.last_history.clear()
self.current_ammo = 10
self.timer = 0
def parse_state(self, s):
img = s.screen_buffer
if img is None:
raise EOGError()
img = np.transpose(img, (1,2,0))
center_patch = self._center_rect.roi(img)
center_patch = cv2.resize(center_patch, self._image_shape[::-1])
img = cv2.resize(img, self._image_shape[::-1])
img = np.concatenate((img, center_patch), axis=2)
v = s.game_variables
v = [(v[0] - 50) * 0.01, # health
(v[1] - 50) * 0.01, # ammo
0.5 if v[0] < 10 else -0.5, # dying
0.5 if v[1] < 3 else -0.5, # short of ammo
0.5 if v[1] == 0 else -0.5 # no ammo
]
self.current_ammo = v[1]
return img, v
def current_state(self):
return self.parse_state(self.game.get_state())
def repeat_action(self, act):
cnt = 0
while len(self.last_history) > cnt + 1 \
and self.last_history[-(cnt+1)] == act:
cnt += 1
return cnt
def stuck_detect(self):
if len(self.last_history) < 60:
return False
c = Counter(self.last_history)
if len(c) == 1:
return True
c = Counter(list(self.last_history)[-40:])
if len(c) == 2:
keys = sorted(c.keys())
if keys == [0,1]:
if abs(c[0] - c[1]) < 2:
return True
return False
def action(self, act):
# act: a distribution
self.timer += 1
act_distrib = act
act = act.argmax()
if act == 6: act = 4
extra_attack = act != 2 and act_distrib[2] > 1e-2
self.last_history.append(act)
action = [False] * 8
if self.current_ammo == 0 and act == 2:
act_distrib[2] = 0
action[7] = True # turn180
act = 3
elif extra_attack:
action[2] = True
elif self.stuck_detect():
action[7] = True # turn180
self.last_history.append(-1)
if self.timer < 10:
action[2] = True
repeat = self._frame_skip
is_attacking = extra_attack or (2 in list(self.last_history)[-3:])
if is_attacking:
repeat = 1
if act in [0, 1, 2, 4, 5]:
action[act] = True
if act == 2:
self.game.make_action(action, 1)
else:
if act in [0, 1]:
if self.repeat_action(act) > 3:
action[act] = False
action[6] = -9 if act == 0 else 9
self.game.make_action(action, repeat)
else:
action[3] = 37 if not is_attacking else 25
self.game.make_action(action, repeat)
if self.game.is_player_dead():
self.dead()
return 0, False
class Model(ModelDesc):
def _get_input_vars(self):
return [InputVar(tf.float32, (None,) + IMAGE_SHAPE3, 'image'),
InputVar(tf.float32, (None, 5), 'vars')]
def _get_NN_prediction(self, state, is_training):
""" image: [0,255]"""
image, vars = state
image = image / 255.0
with argscope(Conv2D, nl=PReLU.f):
l = (LinearWrap(image)
.Conv2D('conv0', out_channel=32, kernel_shape=7, stride=2)
# 60
.Conv2D('conv1', out_channel=64, kernel_shape=7, stride=2)
.MaxPooling('pool1', 3, 2)
# 15
.Conv2D('conv3', out_channel=128, kernel_shape=3)
.MaxPooling('pool3', 3, 2, padding='SAME')
# 7
.Conv2D('conv4', out_channel=192, kernel_shape=3, padding='VALID')
# 5
.FullyConnected('fcimage', 1024, nl=PReLU.f)())
vars = tf.tile(vars, [1, 10], name='tiled_vars')
feat = tf.concat(1, [l, vars])
policy = FullyConnected('fc-pi-m', feat, out_dim=NUM_ACTIONS, nl=tf.identity) * 0.1
return policy
def _build_graph(self, inputs, is_training):
policy = self._get_NN_prediction(inputs, is_training)
self.logits = tf.nn.softmax(policy, name='logits')
class Model2(ModelDesc):
def _get_input_vars(self):
return [InputVar(tf.float32, (None,) + IMAGE_SHAPE3, 'image'),
InputVar(tf.float32, (None, 5), 'vars')]
def _get_NN_prediction(self, state, is_training):
""" image: [0,255]"""
image, vars = state
image = image / 255.0
with argscope(Conv2D, nl=PReLU.f):
l = (LinearWrap(image)
.Conv2D('conv0', out_channel=32, kernel_shape=7, stride=2)
# 60
.Conv2D('conv1', out_channel=64, kernel_shape=7, stride=2)
.MaxPooling('pool1', 3, 2)
# 15
.Conv2D('conv3', out_channel=128, kernel_shape=3)
.MaxPooling('pool3', 3, 2, padding='SAME')
# 7
.Conv2D('conv4', out_channel=192, kernel_shape=3, padding='VALID')
# 5
.FullyConnected('fcimage', 1024, nl=PReLU.f)())
vars = tf.tile(vars, [1, 10], name='tiled_vars')
feat = tf.concat(1, [l, vars])
policy = FullyConnected('fc-pi-m', feat, out_dim=NUM_ACTIONS+1, nl=tf.identity)
return policy
def _build_graph(self, inputs, is_training):
policy = self._get_NN_prediction(inputs, is_training)
self.logits = tf.nn.softmax(policy, name='logits')
class Runner(object):
def __init__(self, game):
p = FinalEnv(game)
self.player = HistoryPlayerWithVar(p, 4)
cfg = PredictConfig(
model=Model(),
session_init=SaverRestore('model.tfmodel'),
input_var_names=['image', 'vars'],
output_var_names=['logits'])
self._pred_func = get_predict_func(cfg)
def action_func(self, inputs):
f = self._pred_func
act = f([[inputs[0]], [inputs[1]]])[0][0]
return act
def step(self):
try:
s = self.player.current_state()
except EOGError:
return
act = self.action_func(s)
self.player.action(act)
| {"hexsha": "a71784285e1103f855a40656cf76344c4fbfb761", "size": 7374, "ext": "py", "lang": "Python", "max_stars_repo_path": "f1/F1_track1/agent.py", "max_stars_repo_name": "ebonyclock/vizdoom_cig2017", "max_stars_repo_head_hexsha": "42baffa7c6ee43db618605838ea6f9e0547001d1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2017-03-27T14:04:52.000Z", "max_stars_repo_stars_event_max_datetime": "2017-09-20T09:07:37.000Z", "max_issues_repo_path": "f1/F1_track1/agent.py", "max_issues_repo_name": "mihahauke/vizdoom_cig2017", "max_issues_repo_head_hexsha": "42baffa7c6ee43db618605838ea6f9e0547001d1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2017-04-07T14:16:06.000Z", "max_issues_repo_issues_event_max_datetime": "2017-07-21T16:22:37.000Z", "max_forks_repo_path": "f1/F1_track1/agent.py", "max_forks_repo_name": "ebonyclock/vizdoom_cig2017", "max_forks_repo_head_hexsha": "42baffa7c6ee43db618605838ea6f9e0547001d1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2017-03-29T13:53:56.000Z", "max_forks_repo_forks_event_max_datetime": "2017-08-24T06:38:29.000Z", "avg_line_length": 31.7844827586, "max_line_length": 91, "alphanum_fraction": 0.5379712503, "include": true, "reason": "import numpy", "num_tokens": 2026} |
# cd; conda activate food; cd food; python 0_food_app.py &>>$HOME/app1.log & disown
from tendo import singleton
me = singleton.SingleInstance()
import pandas as pd
import numpy as np
from food.tools import *
from food.psql import *
from food.paths import *
from time import sleep
######default_exp psql
from food.tools import docker_container
import os
os.environ['MKL_THREADING_LAYER'] = 'GNU'
import schedule
from pathlib import Path
import os
from functools import partial
bash_command = lambda x : os.system(f'cd $HOME/food; conda run -n food python "{x}".py &>>$HOME/"{x}".log')
kill_command = lambda x : os.system(f'pkill -f {x}')
start_docker = lambda x : docker_container(x).start()
scheduler = schedule.Scheduler()
constant_procs = ['bot']
[scheduler.every(5).seconds.do(partial(bash_command,p)) for p in constant_procs]
[scheduler.every(5).seconds.do(partial(start_docker,p)) for p in ['dima_re_postgres','qdrant_prod']]
while True:
scheduler.run_pending()
sleep(5)
| {"hexsha": "1b14b2a59f0bd02212d3ba357eb52740ec8e29b0", "size": 1015, "ext": "py", "lang": "Python", "max_stars_repo_path": "0_food_app.py", "max_stars_repo_name": "DmitriyG228/food", "max_stars_repo_head_hexsha": "dc2375b06a47e742573e53b86d87f3ecd5b160bf", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "0_food_app.py", "max_issues_repo_name": "DmitriyG228/food", "max_issues_repo_head_hexsha": "dc2375b06a47e742573e53b86d87f3ecd5b160bf", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "0_food_app.py", "max_forks_repo_name": "DmitriyG228/food", "max_forks_repo_head_hexsha": "dc2375b06a47e742573e53b86d87f3ecd5b160bf", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0256410256, "max_line_length": 113, "alphanum_fraction": 0.7251231527, "include": true, "reason": "import numpy", "num_tokens": 265} |
\documentclass[11pt]{article}
\usepackage{doc}
\usepackage{fullpage}
\usepackage{fancyvrb}
\usepackage{pdfpages}
\usepackage{url}
\usepackage{color}
\usepackage{hyperref}
\hypersetup{
bookmarks=true, % show bookmarks bar?
colorlinks=true, % false: boxed links; true: colored links
linkcolor=red, % color of internal links
citecolor=green, % color of links to bibliography
filecolor=magenta, % color of file links
urlcolor=cyan, % color of external links
pdftitle={SHOC Manual}, % title
pdfauthor={The Dakar Team}, % author
pdfnewwindow=true, % links in new window
}
\begin{document}
\title{SHOC: The Scalable HeterOgeneous Computing Benchmark Suite}
\author{Dakar Team\\Future Technologies Group\\Oak Ridge National Laboratory}
\date{Version 1.1.4, May 2012}
\maketitle
%\tableofcontents
\section{Introduction}
The Scalable HeterOgeneous Computing benchmark suite (SHOC) is a collection of
benchmark programs that tests the performance and stability of systems using
computing devices with non-traditional architectures for general purpose
computing, and the software used to program them. Its initial focus is on
systems containing Graphics Processing Units (GPUs) and multi-core
processors, and on the OpenCL\,\cite{openclspec} programming standard.
It can be used on clusters as well as individual hosts.
OpenCL is an open standard for programming a variety of types of computing
devices. The OpenCL specification describes a language for programming
\emph{kernels} to run on an OpenCL-capable device, and an
Application Programming Interface (API) for transferring data to such
devices and executing kernels on them.
%The OpenCL specification was ratified
%by The Khronos Group in late 2008. At the time of this writing, OpenCL
%implementations are just becoming publicly available. These early OpenCL
%implementations support running OpenCL kernels on GPUs and commodity multi-core
%processors, though not all implementations support both device types.
In addition to OpenCL-based benchmark programs, SHOC also includes
Compute Unified Device Architecture (CUDA)\cite{cuda} versions
of its benchmarks for comparison.
%CUDA, developed by
%NVIDIA, is an approach for programming NVIDIA GPUs for general purpose
%computing that predates OpenCL. Like OpenCL, CUDA-based programs use
%a host program running on the system's CPU to run kernels on an
%accelerator device (in this case, a GPU).
%This document describes how to build and use SHOC.
%We first detail the supported platforms for using SHOC
%(Section\,\ref{sec:supported}),
%followed by an overview of the SHOC source code (Section\,\ref{sec:source}),
%how to configure it (Section\,\ref{sec:configuring}), build it
%(Section\,\ref{sec:building}), and run
%it (Section\,\ref{sec:running}).
% System hardware and software platforms
\section{Supported Platforms}\label{sec:supported}
The Dakar team intends SHOC to be useful on any platform with an
OpenCL implementation. However, the Dakar team develops and tests
SHOC primarily on UNIX-like platforms, specifically Linux and
Mac OS X.
This section lists software versions used for much of the SHOC development
on these platforms.
\subsection{Linux}
\begin{itemize}
\item A recent RedHat-family OS distribution (Fedora or RHEL).\footnote{
Some Linux distributions may include a more recent GCC toolchain that is
not yet supported by NVIDIA CUDA. On such platforms, an earlier version of GCC
must be used to compile SHOC, and the SHOC configuration files must be
modified so that the --compiler-bindir switch is passed to nvcc to
indicate to nvcc the location of the GCC compiler binaries it should use.}
\item A working OpenCL implementation. The Dakar team has tested SHOC
using the following versions:
\begin{itemize}
\item NVIDIA GPU Computing SDK version 4.0 or later
\item AMD Accelerated Parallel Processing (APP) SDK version 2.5 or later
\end{itemize}
\item (Optional) CUDA 4.0 or later
\end{itemize}
SHOC may work on other platforms with other OpenCL and CUDA versions
than those listed here, but will most likely require modifications for
differing OpenCL header and library paths, differing system library versions,
and differing compiler versions/vendors.
\subsection{Mac OS X}
\begin{itemize}
\item Mac OS X 10.6 (''Snow Leopard''). The Dakar team has not yet tested SHOC on a Mac OS X 10.7 (``Lion'') system due to lack of availability.
\item Xcode 3.2 or later.
\item (Optional) CUDA 4.0 or later
\end{itemize}
\subsection{Clusters}
In addition to individual systems, SHOC can also build parallel benchmark
programs for clusters. Each cluster node must meet the requirements described
earlier in this section for the OS distribution used on that node.
Also, the cluster must have a working implementation of the
Message Passing Interface
(MPI)\,\cite{gropp-lusk-skjellum:using-mpi2nd,gropp-lusk-thakur:usingmpi2}
library such as OpenMPI \url{www.open-mpi.org} or MPICH2
\url{www.mcs.anl.gov/mpi/mpich}.
An OS X 10.6 system with Xcode installed contains an OpenMPI implementation
sufficient for use by SHOC without the need to install additional software.
\section{Configuring}\label{sec:configuring}
Unlike previous SHOC versions, this version of SHOC uses a configuration script
generated by GNU autoconf.
This script is located in the SHOC distribution's root directory.
In the rest of this document, we presume this directory is called
\verb+$SHOC_ROOT+.
By default, the configure script will try to determine whether the
target system has usable OpenCL, CUDA, and MPI installations.
The configure script depends on the PATH environment variable to find necessary
binary programs like CUDA's \verb+nvcc+, so the PATH must be set before the
configure script is run.
Similarly, the configure script uses CPPFLAGS and LDFLAGS to find needed
headers and libraries for OpenCL and CUDA.
For instance, on a system with the NVIDIA CUDA/OpenCL software installed
in \verb+/opt/cuda+ (a non-default location), the PATH should be updated to include
\verb+/opt/cuda/bin+ and the configure script should be run as follows so that it can
find the OpenCL headers:
\begin{Verbatim}[frame=single]
$ cd $SHOC_ROOT
$ sh ./configure CPPFLAGS="-I/opt/cuda/include"
\end{Verbatim}
SHOC uses MPI compiler drivers (e.g., \verb+mpicxx+) to compile and link code
that uses MPI functionality.
The SHOC configure script depends on the PATH environment variable to find
the MPI compiler drivers for the MPI installation it will use, so it is often
helpful to use a command like \verb+which mpicxx+ before configuring SHOC
to ensure that the PATH is set to find the desired MPI installation.
Note that, unlike previous versions of SHOC that also required configuration
script options to specify the location of MPI headers and libraries, current
versions of SHOC do not support such options because the MPI compiler drivers
automatically add the appropriate compiler and linker flags.
If you desire not to use SHOC's OpenCL, CUDA, or MPI support (e.g., because
no MPI implementation is available), use the \verb+--without-opencl+,
\verb+--without-cuda+,
and/or \verb+--without-mpi+ configure script flags.
Note, however, that support for at least one of OpenCL and CUDA must be
enabled to use SHOC.
By default, SHOC does not explicitly specify a flag to the compiler and
linker to specify whether to build 32-bit or 64-bit executables.
Thus, your SHOC executables will have the default bit-ness of the compiler
you used to build SHOC.
If you want to specify a different bit-ness (e.g., 64-bit executables on
Mac OS X with the GNU compiler) you must specify the appropriate flag
for your compiler in CPPFLAGS and LDFLAGS when configuring
SHOC.\footnote{Earlier versions of SHOC supported an --enable-m64 option
that automatically added the appropriate flag for the GNU compiler to generate
64-bit executables. Because this flag was specific to the GNU compiler, we have
deprecated that configure option.}
For example,
\begin{Verbatim}[frame=single]
$ cd $SHOC_ROOT
$ sh ./configure \
CXXFLAGS="-m64" \
CFLAGS="-m64" \
NVCXXFLAGS="-m64"
\end{Verbatim}
\noindent gives equivalent behavior to the \verb+--enable-m64+ configure flag
supported by earlier versions of SHOC if the GNU compilers are being used.
By default, SHOC builds a CUDA-based stability test.
If you desire not to build the SHOC stability test (e.g., because CUDA is
not available), use the \verb+--disable-stability+ configuration flag.
See the output of \verb+sh ./configure --help+ for a full list of configuration
options.
Also, see the example configuration scripts in \verb+$SHOC_ROOT\config+ for
examples of configuring the SHOC benchmark suite.
\subsection{Cross compiling}
{\bf
Cross compilation support is experimental.
The SHOC development team has had very limited opportunity to test the
functionality for a wide variety of build and host systems, and has had
virtually no opportunity to thoroughly test the executables produced through
cross compiling.
}
SHOC has {\bf experimental} support for building executables that can run on
a different type of system than was used to build them.
For instance, on an x86 Linux system SHOC can be configured to build
executables that run on an ARM Linux system.
This technique is commonly called {\em cross compiling}, the system
used to build the software is called the {\em build} system, and the system
that will run the executables is called the {\em host}.
Note that SHOC only enables cross compiling, it does not provide
the compilers, linkers, libraries, and support tools needed to produce
executables for the host system.
You will need to obtain, install, and preferably test a cross-compilation
toolchain on the build system before configuring SHOC.
To configure SHOC for cross compiling, pass a description of the target
system using the \verb+--host=+{\em hostspec} option to SHOC's configure
script.
In its simplest form, this looks something like:
\begin{Verbatim}[frame=single]
$ cd $SHOC_ROOT
$ sh ./configure \
--host=arm-none-linux-gnueabi
\end{Verbatim}
\noindent See the {\tt config/conf-crossarm.sh} file for an example of
configuring SHOC to build ARM Linux executables on an x86\_64 Linux system
using the Sourcery CodeBench Lite ARM toolchain.\footnote{Sourcery CodeBench
Lite Edition is available from Mentor Graphics at
{\tt http://www.mentor.com/embedded-software/sourcery-tools/sourcery-codebench/editions/lite-edition}.}
Note that in that example configure script we are only configuring to build
serial OpenCL benchmark programs.
\subsection{Regenerating the SHOC configure script}
If desired, the SHOC configure script can be regenerated on the target system.
Make sure that GNU autoconf and GNU automake (for aclocal) can be found with
your PATH environment variable, and do the following:
\begin{Verbatim}[frame=single]
$ cd $SHOC_ROOT
$ sh ./build-aux/bootstrap.sh
\end{Verbatim}
Once this command has finished building a new configure script, follow the
instructions given earlier in this section to configure SHOC.
% Building
\section{Building}\label{sec:building}
Once the SHOC benchmark suite has been configured, it is built using:
\begin{Verbatim}[frame=single]
$ cd $SHOC_ROOT
$ make
\end{Verbatim}
Unlike previous versions of SHOC, control over whether to build the
OpenCL, CUDA, OpenCL+MPI, and CUDA+MPI versions of the benchmarks is
exercised at configure time instead of build time.
Therefore, commands like 'make cuda' are no longer supported.
% Running
\section{Running}\label{sec:running}
SHOC includes a driver script for running either the CUDA or OpenCL versions
of the benchmarks. The driver script assumes MPI is in your current path,
so be sure to set the appropriate environement variables.
\begin{Verbatim}[frame=single]
$ export PATH=$PATH:/path/to/mpi/bin/dir
$ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/path/to/mpi/lib/dir
\end{Verbatim}
To run the benchmarks on a single device, execute the following. Be sure
to specify \verb+-cuda+ or \verb+-opencl+ and a device number \verb+-d n+
where \verb+n+ is the device you want to test. The example below shows how
to test device zero with a small problem using CUDA.
\begin{Verbatim}[frame=single]
$ cd $SHOC_ROOT/tools
$ perl driver.pl -s 1 -cuda -d 0
\end{Verbatim}
To run on more than one node, supply the script with the number of nodes and
a comma separated list of the devices that you want to use on each node. For
example, if running on 4 nodes that each have 2 devices, execute the following:
\begin{Verbatim}[frame=single]
$ cd $SHOC_ROOT/tools
$ perl driver.pl -cuda -n 4 -d 0,1 -s 1 -cuda
\end{Verbatim}
Or, if on those same four nodes, you only wanted to test device one on each node:
\begin{Verbatim}[frame=single]
$ cd $SHOC_ROOT/tools
$ perl driver.pl -cuda -n 4 -d 1 -s 1 -cuda
\end{Verbatim}
These scripts output benchmark results to a file in comma separated value (CSV) format.
After building SHOC, individual benchmark programs will be left in the
directory tree rooted at \verb+$SHOC_ROOT/bin+.
Run single-process benchmark programs with commands like:
\begin{Verbatim}[frame=single]
$ cd $SHOC_ROOT/bin
$ ./Serial/OpenCL/Scan
\end{Verbatim}
and parallel benchmark programs with commands like:
\begin{Verbatim}[frame=single]
$ cd $SHOC_ROOT/bin
$ mpirun -np 8 ./EP/Scan
\end{Verbatim}
Use 1 MPI rank per GPU.
\section{Benchmark Programs}\label{sec:programs}
% list and short description of benchmarks
The SHOC benchmark suite currently contains benchmark programs, categoried
based on complexity. Some measure low-level ''feeds and speeds'' behavior
(Level 0), some measure the performance of a higher-level operation such
as a Fast Fourier Transform (FFT) (Level 1), and the others measure
real application kernels (Level 2).
\newpage
\begin{itemize}
\item Level 0
\begin{itemize}
\item {\bf BusSpeedDownload}: measures bandwidth of transferring data
across the PCIe bus to a device.
\item {\bf BusSpeedReadback}: measures bandwidth of reading data back
from a device.
\item {\bf DeviceMemory}: measures bandwidth of memory accesses to
various types of device memory including global, local, and image
memories.
\item {\bf KernelCompile}: measures compile time for several OpenCL
kernels, which range in complexity
\item {\bf MaxFlops}: measures maximum achievable floating point
performance using a combination of auto-generated and hand coded
kernels.
\item {\bf QueueDelay}: measures the overhead of using the OpenCL
command queue.
\end{itemize}
\item Level 1
\begin{itemize}
\item {\bf BFS}: a breadth-first search, a common graph traversal. Requires a
a device which supports atomic oeprations. (CC $>$ 1.2)
\item {\bf FFT}: forward and reverse 1D FFT.
\item {\bf MD}: computation of the Lennard-Jones potential from molecular dynamics
\item {\bf Reduction}: reduction operation on an array of single
or double precision floating point values.
\item {\bf SGEMM}: matrix-matrix multiply.
\item {\bf Scan}: scan (also known as parallel prefix sum) on an array
of single or double precision floating point values.
\item {\bf Sort}: sorts an array of key-value pairs using a radix sort
algorithm
\item {\bf Spmv}: sparse matrix-vector multiplication
\item {\bf Stencil2D}: a 9-point stencil operation applied to a 2D data
set. In the MPI version, data is distributed across MPI processes
organized in a 2D Cartesian topology, with periodic halo exchanges.
\item {\bf Triad}: a version of the STREAM Triad benchmark, implemented
in OpenCL and CUDA. This version includes PCIe transfer time.
\end{itemize}
\item{Level 2}
\begin{itemize}
\item {\bf QTC}: quality threshold clustering
\item {\bf S3D}: A computationally-intensive kernel from the
S3D turbulent combustion simulation program\cite{s3d}.
\end{itemize}
\end{itemize}
To see the options each program supports and their default values, run
{\it program} \verb+--help+ for serial versions and \verb+mpirun -np 1+ {\it program} \verb+--help+
for parallel versions.
Many SHOC benchmark programs test both single precision and double precision
arithmetic.
For programs that support both precisions, the program first runs the
single precision benchmark test, then attempts to determine if the
OpenCL or CUDA device being used supports double precision arithmetic.
If so, the program runs the double precision test.
If the target device does not support double precision arithmetic, the
driver script reports ``NoResult'' as the result.
If some error occurred when running the benchmark, the driver script reports
``BenchmarkError.''
In that case, please see the benchmark log files in
{\tt\$SHOC\_ROOT/tools/Logs} for more information about the error.
Please also report such situations (including the contents of the appropriate
log file) to the SHOC developers the following email address
\[email protected]+.
Benchmarks are built not only as serial programs (S) but also as
embarrassingly parallel (EP) or true parallel (TP) programs.
The following table indicates which versions of each program that
SHOC builds.
\begin{table}
\centering
\begin{tabular}{|l|c|c|c|c|c|c|}
\hline
& \multicolumn{3}{c|}{\em OpenCL} & \multicolumn{3}{c|}{\em CUDA} \\
\hline
{\em Program} & {\em S} & {\em EP} & {\em TP} & {\em S} & {\em EP} & {\em TP} \\
\hline\hline
BusSpeedDownload & x & x & & x & x & \\
BusSpeedReadback & x & x & & x & x & \\
DeviceMemory & x & x & & x & x & \\
KernelCompile & x & x & & & & \\
MaxFlops & x & x & & x & x & \\
QueueDelay & x & x & & & & \\
BFS & x & x & & x & x & \\
FFT & x & x & & x & x & \\
MD & x & x & & x & x & \\
Reduction & x & x & x & x & x & x \\
QTC & & & & x & & x \\
S3D & x & x & & x & x & \\
SGEMM & x & x & & x & x & \\
Scan & x & x & x & x & x & x \\
Sort & x & x & & x & x & \\
Spmv & x & x & & x & x & \\
Stencil2D & x & & x & x & & x \\
Triad & x & x & & x & x & \\
BusCont & & x & & & x & \\
MTBusCont & & x & & & x & \\
\hline
\end{tabular}
\caption{Programming APIs and parallelism models of SHOC programs}
\end{table}
% Source code structure
\section{Source Tree}\label{sec:source}
SHOC is distributed as a compressed tar archive.
Let \verb+$SHOC_ROOT+ represent the directory that will hold the SHOC source
tree.
The SHOC archive can be uncompressed and extracted using
\begin{Verbatim}[frame=single]
$ cd $SHOC_ROOT
$ tar xvzf shoc-x.y.tar.gz
\end{Verbatim}
\pagebreak
The SHOC source tree directory structure is as follows:
\begin{Verbatim}[frame=single]
$SHOC_ROOT
bin # benchmark executables are built here
EP # "embarrassingly parallel" benchmarks
CUDA
OpenCL
TP # true parallel benchmarks
CUDA
OpenCL
Serial # single-node benchmarks
CUDA
OpenCL
config # SHOC configuration files
doc # SHOC documentation files
lib # SHOC auxiliary libraries are built here
src # SHOC source files
common # programming-model independent helper code
cuda # CUDA-based benchmarks
level0 # low-level CUDA benchmarks
level1 # higher-level CUDA benchmarks
level2 # application level CUDA benchmarks
mpi # MPI-specific benchmarks
common # code needed by programs using MPI
contention # a contention benchmark
contention-mt # a multithreaded version of the contention benchmark
opencl # OpenCL benchmarks
common # code needed for all OpenCL benchmarks
level0 # low-level OpenCL benchmarks
level1 # higher-level OpenCL benchmarks
level2 # application-level OpenCL benchmarks
stability # a CUDA stability test
\end{Verbatim}
\section{Support}\label{sec:support}
Support for SHOC is provided on a best-effort basis by the Dakar team members
and eventually by its user community via several mailing lists.
\begin{itemize}
\item \[email protected]+: mailing list for announcements
regarding new versions or important updates.
\item \[email protected]+: email address for requesting
help in building or using SHOC, or for providing feedback about the benchmark
suite.
\item \[email protected]+: mailing list for internal
development discussions by the SHOC development team.
\end{itemize}
\section*{Revision History}
\begin{itemize}
\item 0.1 September 2009
\item 0.2 December 2009
\item 0.3 June 2010
\item 0.4 September 2010
\item 0.5 October 2010
\item 1.0 November 2010
\item 1.01 January 2011
\item 1.0.2 March 2011
\item 1.0.3 March 2011
\item 1.1.0 June 2011
\item 1.1.1 July 2011
\item 1.1.2 November 2011
\item 1.1.3 December 2011
\item 1.1.4 May 2012
\end{itemize}
% References
\bibliographystyle{plain}
\bibliography{shoc}
\end{document}
| {"hexsha": "ec352df792a7ce08fe2ef1e25a877029394f52c3", "size": 21545, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/shoc-manual.tex", "max_stars_repo_name": "ashwinma/mpiacc-contention-tests", "max_stars_repo_head_hexsha": "6246ad9b177095220aa9cea5c85def7effd78222", "max_stars_repo_licenses": ["BSD-3-Clause-Clear", "BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/shoc-manual.tex", "max_issues_repo_name": "ashwinma/mpiacc-contention-tests", "max_issues_repo_head_hexsha": "6246ad9b177095220aa9cea5c85def7effd78222", "max_issues_repo_licenses": ["BSD-3-Clause-Clear", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/shoc-manual.tex", "max_forks_repo_name": "ashwinma/mpiacc-contention-tests", "max_forks_repo_head_hexsha": "6246ad9b177095220aa9cea5c85def7effd78222", "max_forks_repo_licenses": ["BSD-3-Clause-Clear", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.6509433962, "max_line_length": 145, "alphanum_fraction": 0.7312601532, "num_tokens": 5424} |
import numpy as np
import torch
import torch.nn as nn,torch.nn.functional as F,torch.optim as optim
from loader import dataReader
#########2.定义卷积神经网络
class MnistNet(nn.Module):
def __init__(self):
super(MnistNet, self).__init__()
pass
def forward(self, x):
pass
# 3.训练网络
def train(loader):
model = MnistNet()
######## 让我们使用分类交叉熵损失和带有动量的 SGD。
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
for epoch in range(1):
running_loss = 0.0
for i, data in enumerate(loader):
inputs, labels = zip(*data)
inputs = np.array(inputs).astype('float32')
labels = np.array(labels).astype('int64')
inputs = torch.from_numpy(inputs).unsqueeze(1)#扩展通道维度 NCHW
labels = torch.from_numpy(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 100 == 99:
last_loss = running_loss / 100 # loss per batch
print(' batch {} loss: {}'.format(i + 1, last_loss))
running_loss = 0.
if i==199:
break
print('Finished Training')
return model
# 4.测试网络
def test(PATH,loader):
# 让我们重新加载保存的模型
model = MnistNet()
model.load_state_dict(torch.load(PATH))
correct = 0
total = 0
with torch.no_grad():
for data in loader:
images, labels = zip(*data)
images = np.array(images).astype('float32')
labels = np.array(labels).astype('int64')
images = torch.from_numpy(images).unsqueeze(1)
labels = torch.from_numpy(labels)
outputs = model(images)
_, predicted = torch.max(outputs, 1)#torch.argmax
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the {:d} test images: {:f}%'.format(total,100 * correct / total))
return model
if __name__ == '__main__':
BATCH_SIZE = 16
PATH = '/data/workspace/myshixun/mnist/model/mnist_model.pth'
train_loader = dataReader('/data/workspace/myshixun/mnist/data/train-images-idx3-ubyte', '/data/workspace/myshixun/mnist/data/train-labels-idx1-ubyte', BATCH_SIZE, True)
test_loader = dataReader('/data/workspace/myshixun/mnist/data/t10k-images-idx3-ubyte', '/data/workspace/myshixun/mnist/data/t10k-labels-idx1-ubyte', BATCH_SIZE, False)
model = train(train_loader)
#快速保存我们训练过的模型:
torch.save(model.state_dict(), PATH)
test(PATH,test_loader) | {"hexsha": "02686ebcda271d30521c3290be24f6ec956c336b", "size": 2982, "ext": "py", "lang": "Python", "max_stars_repo_path": "task_2/src/mnist_net.py", "max_stars_repo_name": "Hickey3197/educoder", "max_stars_repo_head_hexsha": "bf45cef420c7b1f1d052cb108e9be8e14a724068", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "task_2/src/mnist_net.py", "max_issues_repo_name": "Hickey3197/educoder", "max_issues_repo_head_hexsha": "bf45cef420c7b1f1d052cb108e9be8e14a724068", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "task_2/src/mnist_net.py", "max_forks_repo_name": "Hickey3197/educoder", "max_forks_repo_head_hexsha": "bf45cef420c7b1f1d052cb108e9be8e14a724068", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.275, "max_line_length": 174, "alphanum_fraction": 0.5757880617, "include": true, "reason": "import numpy", "num_tokens": 734} |
#! /usr/bin/env python
"""
Author: Jeremy M. Stober
Program: MDP.PY
Date: Monday, January 11 2010
Description: Basic MDP framework for Markov Decision Problems.
"""
import os, sys, getopt, pdb, string
import functools
import random as pr
import numpy as np
import numpy.random as npr
import scipy.sparse as sp
from utils import sp_create, sp_create_data,flip
class Features( object ):
"""
Mixin for specific feature functions. Default is tabular feature
representation.
"""
def __init__(self, nstates, nactions):
self.nstates = nstates
self.nactions = nactions
self.feature_cnt = nstates * nactions + 1
def nfeatures(self):
return self.feature_cnt
def phi(self, s, a, sparse=False, format="csr"):
if sparse:
cols = np.array([0,0])
rows = np.array([s + (a * self.nstates), self.feature_cnt - 1])
data = np.array([1.0,1.0])
sparse_features = sp_create_data(data,rows,cols,self.feature_cnt,1,format)
return sparse_features
else:
features = np.zeros(self.feature_cnt)
features[s + (a * self.nstates)] = 1.0
features[-1] = 1.0
return features
def get_sparsity(self):
return -1.0 # not implemented
def vphi(self, s):
features = np.zeros(self.nstates + 1)
features[s] = 1.0
features[-1] = 1.0
return features
class AliasFeatures(Features):
"""
A feature class that also has a certain subset of states that are aliased (produce identical feature vectors).
"""
def __init__(self, nstates, nactions, aliases):
super(AliasFeatures, self).__init__(nstates,nactions)
# aliases is a dictionary where the keys are canonical state names and values are sets of aliases states.
self.aliases = aliases
def find_canonical(self, s):
for canonical, aliases in self.aliases.items():
if s in aliases:
return canonical
return s
def phi(self, s, a, sparse=False, format="csr"):
s = self.find_canonical(s)
return super(AliasFeatures,self).phi(s, a, sparse = sparse, format = format)
def vphi(self, s):
s = self.find_canonical(s)
return super(AliasFeatures,self).vphi(s)
class PolyFeatures( Features ):
"""
Polynomial feature representation.
"""
def __init__(self, nstates, nactions, maxexp = 9 ):
self.nstates = nstates
self.nactions = nactions
self.maxexp = maxexp
self.feature_cnt = nstates * maxexp + 1
def phi(self, s, a):
features = np.zeros(self.feature_cnt)
for i in range(self.maxexp):
features[a * self.maxexp + i] = s ** i
features[-1] = 1.0
return features
def vphi(self, s):
features = np.zeros(self.maxexp + 1)
for i in range(self.maxexp):
features[i] = s ** i
features[-1] = 1.0
return features
class IndicatorFeatures( Features ):
"""
Indicate the state and the action.
"""
def __init__(self, nstates, nactions):
self.nstates = nstates
self.nactions = nactions
self.feature_cnt = nstates + nactions + 1
def phi(self, s, a):
features = np.zeros(self.feature_cnt)
features[:] = 0
features[s] = 1.0
features[nstates + a] = 1.0
features[-1] = 1.0
return features
def vphi(self, s):
features = np.zeros(self.nstates + 1)
features[s] = 1.0
features[-1] = 1.0
return features
class HashFeatures( Features ):
"""
Hash features.
"""
def __init__(self, nstates, nactions, nfeatures = 15 ):
self.nstates = nstates
self.nactions = nactions
self.feature_cnt = nfeatures
self.prime = 191
self.a = 36
self.b = 105
def phi(self, s, a):
features = np.zeros(self.feature_cnt)
x = (s + 1) * (a + 1)
f = (self.a * x + self.b) % self.prime
g = f % self.feature_cnt
features[g] = 1.0
return features
def vphi(self, s):
features = np.zeros(self.feature_cnt)
x = (s + 1)
f = (self.s * x + self.b) % self.prime
g = f % self.feature_cnt
features[g] = 1.0
return features
class MDP( Features ):
def __init__(self, nstates = 32, nactions = 4):
self.nstates = nstates
self.sindices = range(self.nstates)
self.nactions = nactions
self.actions = range(self.nactions)
self.vcnts = np.zeros(self.nstates)
self.gamma = 0.9
# possible start states for reset problems
if not hasattr(self, 'startindices'):
self.startindices = range(self.nstates)
# possible end states that initiate resets
if not hasattr(self, 'endstates'):
self.endstates = []
# model tensor is initialized from a function that needs to be overwritten in subclasses
indices = [(a,i,j) for a in self.actions for i in self.sindices for j in self.sindices]
shape = (self.nactions,self.nstates,self.nstates)
self.model = np.array([self.initialize_model(*idx) for idx in indices]).reshape(*shape)
self.rewards = np.array([self.initialize_rewards(*idx) for idx in indices]).reshape(*shape)
self.reset()
Features.__init__(self, self.nstates, self.nactions) # feature mixin supplies phi and related methods
# Both the transition model and rewards are tensors. For a
# particular action, the model tensor resolves to a stochastic
# matrix. The reward tensor should be sparse (unlike the value
# function tensor).
# In particular, if generating random problem instances, care
# must be taken to make sure that the distribution over
# transition models and rewards "makes sense."
# The default form of the random model will be that of a
# "gridworld." Actions are "left","up","right","down" and
# result in non-zero transition probabilities for the
# appropriate neighboring states in particular cases.
def value_iteration(self):
"""
The optimal value function (and optimal policy) can be computed from the model directly using dynamic programming.
"""
# compute values over states
values = np.zeros(self.nstates)
rtol = 1e-4
# compute the value function
condition = True
i = 0
while condition:
delta = 0
for s in self.sindices:
v = values[s]
sums = np.zeros(self.nactions)
for a in self.actions:
for t in self.sindices:
sums[a] += self.model[a,s,t] * (self.rewards[a,s,t] + self.gamma * values[t])
values[s] = np.max(sums)
delta = max(delta, abs(v - values[s]))
print i,delta
i += 1
if delta < rtol:
break
# compute the optimal policy
policy = np.zeros(self.nstates, dtype=int) # record the best action for each state
for s in self.sindices:
sums = np.zeros(self.nactions)
for a in self.actions:
for t in self.sindices:
sums[a] += self.model[a,s,t] * (self.rewards[a,s,t] + self.gamma * values[t])
policy[s] = np.argmax(sums)
return values,policy
def reset_counts(self):
self.vcnts = np.zeros(self.nstates)
def set_state(self, state):
self.current = state
self.vcnts[self.current] += 1
def initial_policy(self):
return np.zeros(self.nfeatures())
def initialize_reward(self, a, i, j):
if j == 31:
return 1.0
else:
return 0.0
def is_markov_process(self):
return len(self.nactions) == 1
def initialize_model(self, a, i, j):
"""
This function fills in the model tensor for each pair of
states and action. This needs to be overwritten when
subclassing MDP.
"""
if a == 0:
# left
# left edge
if i in (0,8,16,24):
if i == j:
return 1.0
else:
return 0.0
elif j == i - 1:
return 1.0
else:
return 0.0
elif a == 1:
# up
# top edge
if 0 <= i <= 7:
if i == j:
return 1.0
else:
return 0.0
elif j == i - 8:
return 1.0
else:
return 0.0
elif a == 2:
# right
# right edge
if i in (7,15,23,31):
if i == j:
return 1.0
else:
return 0.0
elif j == i + 1:
return 1.0
else:
return 0.0
elif a == 3:
# down
# bottom edge
if 24 <= i <= 31:
if i == j:
return 1.0
else:
return 0.0
elif j == i + 8:
return 1.0
else:
return 0.0
def failure(self):
if self.current in self.endstates:
return True
else:
return False
def state(self):
return self.current
def observe(self, s):
pass
def sample_move(self, s, a, obs=False):
# What might the next state after (s,a) be?
transition_distribution = self.model[a,s]
assert np.sum(transition_distribution) == 1.0, "a: %d, s: %d, p: %f" % (a, s, np.sum(transition_distribution))
choice = npr.multinomial(1,transition_distribution)
nonzero = np.nonzero(choice)[0]
assert len(nonzero) == 1
next = nonzero[0]
reward = self.rewards[a,s,next]
if obs:
return (self.observe(s), a, reward, self.observe(next))
else:
return (s, a, reward, next)
def move(self, a, obs=False):
self.previous = self.current
transition_distribution = self.model[a,self.current]
assert np.sum(transition_distribution) == 1.0, "a: %d, s: %d, p: %f" % (a, self.current, np.sum(transition_distribution))
choice = npr.multinomial(1,transition_distribution)
nonzero = np.nonzero(choice)[0]
assert len(nonzero) == 1
self.current = nonzero[0]
self.reward = self.rewards[a,self.previous,self.current]
if obs:
return (self.observe(self.previous), a, self.reward, self.observe(self.current))
else:
return (self.previous, a, self.reward, self.current)
def linear_policy(self, w,s):
# note that phi should be overridden (or learned in some way)
return np.argmax([np.dot(w,self.phi(s,a)) for a in range(self.nactions)])
def epsilon_linear_policy(self, epsilon, w, s):
best = np.argmax([np.dot(w,self.phi(s,a)) for a in range(self.nactions)])
if flip(epsilon):
return pr.choice(self.actions)
else:
return best
def evaluate_func_policy(self,policy):
traces = []
for s in self.startindices:
traces.append(self.single_episode(policy,start=s,tlimit=25))
return traces
def evaluate_policy(self, w):
policy = functools.partial(self.linear_policy, w)
traces = []
for s in self.startindices:
traces.append(self.single_episode(policy,start=s,tlimit=25))
# need to evaluate traces
return traces
def terminal(self):
# override in environments that have or need a terminal state
pass
def biased_choice(self, choices, pvalues):
markers = []
acc = 0
for val in pvalues:
acc += val
markers.append(acc)
r = pr.random()
print markers, r
return np.searchsorted(markers, r)
def softmax(self, values):
e = np.exp(values)
d = np.sum(e)
p = e / d
print p
return self.biased_choice(self.actions, p)
def soft_linear_policy(self, w, s):
qa = []
for a in range(self.nactions):
f = self.phi(s, a)
qa.append(np.dot(w, f))
return self.softmax(qa)
def callback(self, *args, **kwargs):
pass
def generate_value_function(self, w):
qfunc = {}
vfunc = {}
for s in range(self.nstates):
maxq = -1e6
for a in range(self.nactions):
value = np.dot(w, self.phi(s, a))
qfunc[(s,a)] = value
if value > maxq:
maxq = value
vfunc[s] = value
return qfunc,vfunc
def generate_policy(self, w):
policy = {}
for s in range(self.nstates):
a = self.linear_policy(w,s)
policy[s] = a
return policy
def generate_all_policies(self, w, threshold = 1e-6):
"""
Enumerate all (equivalent) policies given the current weights.
"""
all_policies = {}
for s in range(self.nstates):
values = np.array([np.dot(w, self.phi(s,a)) for a in range(self.nactions)])
# this is basically like selecting all indices within a tolerance of the max value
actions = np.nonzero(np.max(values) - values < threshold)
all_policies[s] = actions[0]
return all_policies
def random_policy(self, *args):
# random policy
return pr.choice(self.actions)
def generate_wrandom_policy(self, w):
# accumulate the weights for all the actions
bins = np.cumsum(w)
def policy(*args):
value = pr.random()
ind = np.searchsorted(bins,value)
return self.actions[ind]
return policy
def reset(self, method = "random"):
if method == "random":
self.current = pr.choice(self.startindices)
else: # choose least visited state
self.current = np.argmin(self.vcnts)
self.previous = -1
self.action = -1
self.reward = 0
self.vcnts[self.current] += 1
def identity(self, arg):
return arg
def single_episode(self, policy = None, obs = False, tlimit=1000, start=None):
if start:
self.current = start
else:
self.reset()
if policy is None: policy = self.random_policy
if obs:
observe = self.observe
else:
observe = self.identity
trace = []
# initialize the actions
next_action = policy(observe(self.current))
while not self.current in self.endstates and len(trace) < tlimit:
pstate, paction, reward, state = self.move(next_action, obs=obs)
next_action = policy(observe(self.current)) # next state
trace.append((pstate, paction, reward, state, next_action))
return trace
def complete_trace(self,policy=None,obs=False,mult=1):
"""
Generate a trace with 'mult' of each possible transition.
"""
trace = []
if policy is None: policy = self.random_policy
if obs:
observe = self.observe
else:
observe = self.identity
for s in self.states:
for a in self.actions:
for i in range(mult):
self.current = s
pstate, paction, reward, state = self.move(a, obs=obs)
next_action = policy(observe(self.current))
trace.append((pstate, paction, reward, state, next_action))
return trace
def test(self, policy, obs = False):
if obs:
observe = self.observe
else:
observe = self.identity
result = {}
for s in range(self.nstates): #self.states does get all the states?
self.current = s
trace = [s]
while True:
next_action = policy(observe(self.current))
self.move(next_action)
if result.has_key(self.current) and result[self.current]:
# add additional trace states to the result set with postive outcome
for i in trace:
result[i] = 1
break
elif result.has_key(self.current) and not result[self.current]:
# add additional trace states to the result set with negative outcome
for i in trace:
result[i] = 0
break
elif self.current in self.endstates:
result[self.current] = 1
for i in trace:
result[i] = 1
break
elif self.current in trace:
# cycle detected and no endstate - fail
result[self.current] = 0
for i in trace:
result[i] = 0
break
else:
trace.append(self.current)
return result
def trace(self, tlen = 1000, policy=None, obs = False, additional_endstates = None, reset_on_cycle = False, reset_on_endstate = False, stop_on_cycle = False):
# generate a trace using whatever policy is currently implemented
if policy is None: policy = self.random_policy
if obs:
observe = self.observe
else:
observe = self.identity
if additional_endstates: # can choose states that cause a reset
endstates = self.endstates + additional_endstates
else:
endstates = self.endstates
trace = []
next_action = policy(observe(self.current))
for i in range(tlen):
pstate, paction, reward, state = self.move(next_action, obs=obs)
next_action = policy(observe(self.current))
trace.append((pstate, paction, reward, state, next_action))
if reset_on_endstate and self.current in endstates:
self.reset()
if reset_on_cycle and (pstate, paction, reward, state, next_action) in trace[:-1]:
self.reset()
if stop_on_cycle and (pstate, paction, reward, state, next_action) in trace[:-1]:
break
return trace
class FastMDP(MDP):
"""
There is a useful subcase of MDPs with large state spaces and
deterministic actions which we may wish to model. In these cases
it does not make sense to compute the entire model in advance.
"""
def __init__(self, nstates = 32, nactions = 4):
self.nstates = nstates
self.sindices = range(self.nstates)
self.nactions = nactions
self.actions = range(self.nactions)
self.vcnts = np.zeros(self.nstates)
self.gamma = 0.9
if not hasattr(self, 'startindices'):
self.startindices = range(self.nstates)
# possible end states that initiate resets
if not hasattr(self, 'endstates'):
self.endstates = []
self.model = np.zeros((nactions,nstates),dtype='int')
for a in self.actions:
for i in self.sindices:
self.model[a,i] = self.get_next_state(a,i)
self.rewards = np.zeros(nstates)
for i in self.sindices:
self.rewards[i] = self.get_reward(i)
self.reset()
Features.__init__(self, self.nstates, self.nactions) # feature mixin supplies phi and related methods
@staticmethod
def load(filename):
import re
fp = open(filename)
nactions = -1
nstates = -1
endstates = []
model = None
rewards = None
state = 'nostate'
for line in fp.readlines():
if state == 'nostate':
if re.match(r'nactions', line):
_,nactions = line.split()
nactions = int(nactions)
elif re.match(r'nstates', line):
_,nstates = line.split()
nstates = int(nstates)
elif re.match(r'endstates', line):
endstates = [int(i) for i in line.split()[1:]]
elif re.match(r'rewards', line):
state = 'rewards'
rewards = np.zeros(nstates)
elif re.match(r'model', line):
state = 'model'
model = np.zeros((nactions,nstates))
elif state == 'model':
s,a,n = line.split()
model[int(a),int(s)] = int(n)
elif state == 'rewards':
s,r = line.split()
rewards[int(s)] = r
instance = FastMDP.__init__(nstates = nstates, nactions = nactions)
instance.endstates = endstates
instance.model = model
instance.rewards = rewards
return instance
def save(self, filename):
"""
Save MDP in custom format for easy inspection.
"""
fp = open(filename, 'w')
fp.write('FastMDP\n\n')
fp.write("nactions {0}\n".format(self.nactions))
fp.write("nstates {0}\n\n".format(self.nstates))
fp.write("endstates ")
for i in self.endstates:
fp.write("{0} ".format(i))
fp.write("\n\nrewards\n")
for i in self.sindices:
fp.write("{0} {1}\n".format(i,self.rewards[i]))
fp.write("\nmodel\n")
for i in self.sindices:
for a in self.actions:
fp.write("{0} {1} {2}\n".format(i, a, self.model[a,i]))
def sample_move(self, s, a, obs=False):
next = self.model[a,s]
reward = self.rewards[next]
if obs:
return (self.observe(s), a, reward, self.observe(next))
else:
return (s, a, reward, next)
def move(self, a, obs=False):
self.previous = self.current
self.current = self.model[a,self.previous]
self.reward = self.rewards[self.current]
self.vcnts[self.current] += 1
if obs:
return (self.observe(self.previous), a, self.reward, self.observe(self.current))
else:
return (self.previous, a, self.reward, self.current)
class SparseMDP( MDP ):
"""
Reward and model transition distributions are often sparse
(esp. for big problems) and so it makes sense to use sparse
matrices instead of dense matrices for these.
"""
def __init__(self, nstates = 32, nactions = 4):
self.nstates = nstates
self.sindices = range(self.nstates)
self.nactions = nactions
self.actions = range(self.nactions)
self.vcnts = np.zeros(self.nstates)
self.gamma = 0.9
# possible start states for reset problems
if not hasattr(self, 'startindices'):
self.startindices = range(self.nstates)
# possible end states that initiate resets
if not hasattr(self, 'endstates'):
self.endstates = []
self.model = {}
for a in self.actions:
for i in self.sindices:
self.model[a,i] = self.initialize_model(a,i)
self.rewards = self.initialize_rewards()
self.reset()
Features.__init__(self, self.nstates, self.nactions) # feature mixin supplies phi and related methods
def sample_move(self, s, a, obs=False):
distribution = self.model[a,s]
states = [x[0] for x in distribution]
probs = [x[1] for x in distribution]
assert np.sum(probs) == 1.0, "a: %d, s: %d, p: %f" % (a, s, np.sum(probs))
choice = npr.multinomial(1,probs)
nonzero = np.nonzero(choice)[0]
assert len(nonzero) == 1
next = states[nonzero[0]]
reward = self.rewards[next]
if obs:
return (self.observe(s), a, reward, self.observe(next))
else:
return (s, a, reward, next)
def neighbors(self, actions = None):
"""
Return a matrix of size S x S where entry s_ij is 1 if there is an action that takes the agent from state i to state j with nonzero probability.
"""
if actions is None:
actions = self.actions
matrix = np.zeros((self.nstates, self.nstates))
for s in self.states:
for a in actions:
distribution = self.model[a, s]
for (t, p) in distribution:
print t
if p > 0.0 and s != t:
matrix[s, t] = 1.0
return matrix
def move(self, a, obs=False):
self.previous = self.current
distribution = self.model[a,self.current] # list of (state, prob) pairs
states = [x[0] for x in distribution]
probs = [x[1] for x in distribution]
assert np.sum(probs) == 1.0, "a: %d, s: %d, p: %f" % (a, self.current, np.sum(probs))
choice = npr.multinomial(1,probs)
nonzero = np.nonzero(choice)[0]
assert len(nonzero) == 1
self.current = states[nonzero[0]]
self.reward = self.rewards[self.current]
if obs:
return (self.observe(self.previous), a, self.reward, self.observe(self.current))
else:
return (self.previous, a, self.reward, self.current)
def value_iteration(self):
"""
The optimal value function (and optimal policy) can be computed from the model directly using dynamic programming.
"""
# compute values over states
values = np.zeros(self.nstates)
rtol = 1e-4
# compute the value function
condition = True
i = 0
while condition:
delta = 0
for s in self.sindices:
v = values[s]
sums = np.zeros(self.nactions)
for a in self.actions:
dist = self.model[a,s]
for (t,p) in dist:
sums[a] += p * (self.rewards[t] + self.gamma * values[t])
values[s] = np.max(sums)
delta = max(delta, abs(v - values[s]))
print i,delta
i += 1
if delta < rtol:
break
# compute the optimal policy
policy = np.zeros(self.nstates, dtype=int) # record the best action for each state
for s in self.sindices:
sums = np.zeros(self.nactions)
for a in self.actions:
dist = self.model[a,s]
for (t,p) in dist:
sums[a] += p * (self.rewards[t] + self.gamma * values[t])
policy[s] = np.argmax(sums)
return values,policy
class MP( MDP ):
"""
Sort of strange for a Markov Process to inherit from a MDP, but
since a MP is an MDP with one action, this method seems to
work. The idea is just to implement simulate instead of move. and
to limit the number of possible actions to 1.
"""
def __init__(self, nstates = 32):
MDP.__init__(self, nstates = nstates, nactions = 1)
def simulate(self):
"""
Simulate a single step in the Markov process.
"""
previous, action, reward, next = self.move(0)
return previous, reward, next # action == 0
| {"hexsha": "03d80be79905970ef8e085fd1c68bef24c69a995", "size": 27692, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/markovdp.py", "max_stars_repo_name": "stober/gridworld", "max_stars_repo_head_hexsha": "58762295687f890a66f1bfff0a22b05d62044d80", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2015-05-10T13:10:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-07T11:49:49.000Z", "max_issues_repo_path": "src/markovdp.py", "max_issues_repo_name": "stober/gridworld", "max_issues_repo_head_hexsha": "58762295687f890a66f1bfff0a22b05d62044d80", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/markovdp.py", "max_forks_repo_name": "stober/gridworld", "max_forks_repo_head_hexsha": "58762295687f890a66f1bfff0a22b05d62044d80", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2015-06-08T20:00:14.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-03T18:50:36.000Z", "avg_line_length": 30.8374164811, "max_line_length": 162, "alphanum_fraction": 0.5465116279, "include": true, "reason": "import numpy,import scipy", "num_tokens": 6469} |
from skimage.exposure import rescale_intensity
import numpy as np
import cv2
import argparse
def conv(image,kernal):
iH,iW=image.shape[:2]
kH,kW=kernal.shape[:2]
pad=(kH-1) // 2
image=cv2.copyMakeBorder(image,pad,pad,pad,pad,cv2.BORDER_REPLICATE)
out=np.zeros((iH,iW),dtype="float")
for y in np.arange(pad,iH+pad):
for x in np.arange(pad,iW+pad):
window=image[y-pad:y+pad+1,x-pad:x+pad+1]
k=(window*kernal).sum()
out[(y-pad),(x-pad)]=k
out=rescale_intensity(out, in_range=(0,255))
out=(out*255).astype("uint8")
return out
ap=argparse.ArgumentParser()
ap.add_argument("-i","--input",required=True,help="path to input")
args=vars(ap.parse_args())
smallblur=np.ones((7,7),dtype="float")*(1/(7*7))
largeblur=np.ones((7,7),dtype="float")*(1/(21*21))
sharpen=np.array(([0,-1,0],
[-1,5,-1],
[0,-1,0]),dtype="int")
laplacian=np.array(([0,1,0],
[1,-4,1],
[0,1,0]),dtype="int")
sobelX=np.array((
[-1,0,1],
[0,0,0],
[-2,0,2]), dtype="int")
sobelY=np.array((
[-1,0,-2],
[0,0,0],
[1,0,2]), dtype="int")
emboss=np.array((
[-2,-1,0],
[-1,0,1],
[0,1,2]),dtype="int")
kernalbank=(("smallblur",smallblur),
("largeblur",largeblur),
("sharpen",sharpen),
("laplacian",laplacian),
("sobelX",sobelX),
("sobelY",sobelY),
("emboss",emboss))
image=cv2.imread(args["input"])
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
for (kernalname,k) in kernalbank:
print("[INFO] applying {} kernal".format(kernalname))
convolveout=conv(gray,k)
opencvout=cv2.filter2D(gray,-1,k)
cv2.imshow("original",gray)
cv2.imshow("{}opencv".format(kernalname),opencvout)
cv2.imshow("{}convolution".format(kernalname),convolveout)
cv2.waitkey(0)
cv2.destroyAllWindows()
| {"hexsha": "bd1c659fb5e18a8efcd9746d29a7d5e185359ced", "size": 1970, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/convolution.py", "max_stars_repo_name": "suhaneshivam/backprop", "max_stars_repo_head_hexsha": "2539948fccf21d4c515476144d8d93b5094f9e09", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/convolution.py", "max_issues_repo_name": "suhaneshivam/backprop", "max_issues_repo_head_hexsha": "2539948fccf21d4c515476144d8d93b5094f9e09", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-04-03T08:29:02.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-03T08:29:22.000Z", "max_forks_repo_path": "code/convolution.py", "max_forks_repo_name": "suhaneshivam/backprop", "max_forks_repo_head_hexsha": "2539948fccf21d4c515476144d8d93b5094f9e09", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2564102564, "max_line_length": 74, "alphanum_fraction": 0.5654822335, "include": true, "reason": "import numpy", "num_tokens": 631} |
import os
import glob
import trimesh
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
from . import utils
def plot_csv_column(pInFolder, pOutFolder, pCSV, pColName, pClip = 1000, label_size=5) :
cwd = os.getcwd()
inFolder = cwd + pInFolder
csv = inFolder + pCSV
models_df = pd.read_csv(csv)
column = models_df[pColName]
labels = dict()
for c in column :
if c in labels :
labels[c] += 1
else :
labels[c] = 1
for key in labels.keys() :
if labels[key] > pClip :
labels[key] = pClip
fig = plt.figure()
plt.barh(list(labels.keys()), list(labels.values()))
plt.tick_params(axis='y', which='major', labelsize=label_size)
plt.tight_layout()
plt.grid(True)
plt.title('{0} classes'.format(len(labels.keys())))
fig.savefig(cwd + pOutFolder + pColName + '.pdf', bbox_inches='tight')
def plot_csv_one_hot(pInFolder, pOutFolder, pCSV) :
cwd = os.getcwd()
inFolder = cwd + pInFolder
csv = inFolder + pCSV
models_df = pd.read_csv(csv)
models_df = models_df.iloc[:, 1:]
labels = { label : 0 for label in models_df.columns.values }
for key, value in labels.items() :
labels[key] = (models_df[key] == 1.0).sum()
fig = plt.figure()
plt.barh(list(labels.keys()), list(labels.values()))
plt.tick_params(axis='y', which='major', labelsize=10)
plt.tight_layout()
plt.grid(True)
plt.title('{0} classes'.format(len(labels.keys())))
fig.savefig(cwd + pOutFolder + pCSV[:pCSV.rfind('.')] + '_distr.pdf', bbox_inches='tight')
def plot_elements_from_classes(pInfolder, pOutFolder, pCSV, pColumn, pFraction, pClasses) :
fig = plt.figure()
utils.clear_make_dir(pOutFolder)
if pCSV is not None :
elem_df = pd.read_csv(pCSV)
if pClasses is None :
pClasses = set()
for sample in elem_df.loc[:, pColumn] :
pClasses.add(sample)
for class_name in pClasses :
class_name = class_name.replace('\\', '_')
class_name = class_name.replace('/', '_')
class_name = class_name.replace('//', '_')
class_name = class_name.rstrip()
utils.clear_make_dir(os.path.join(pOutFolder, class_name))
samples = elem_df.loc[elem_df[pColumn] == class_name].sample(frac=pFraction).reset_index(drop=True)
for i, sample in samples.iterrows() :
mesh_file = sample['Model']
model = trimesh.load(os.path.join(pInfolder, mesh_file))
model = utils.align_model_obb(model)
model = utils.normalizeModel_unorm(model)
plt.clf()
ax = fig.gca(projection='3d')
plot_model(model, ax)
mesh_name = os.path.basename(os.path.splitext(mesh_file)[0] + '.jpg')
fig.savefig(os.path.join(pOutFolder, class_name, mesh_name), bbox_inches='tight')
else :
_, folders, _ = next(os.walk(pInfolder))
for folder in folders :
plot_elements_from_class(
os.path.join(pInfolder, folder),
os.path.join(pOutFolder, folder))
def plot_elements_from_class(pInfolder, pOutFolder) :
utils.clear_make_dir(pOutFolder)
fig = plt.figure()
for model_file in glob.glob(os.path.join(pInfolder, '*.obj')) :
model = trimesh.load(model_file)
model = utils.align_model_obb(model)
model = utils.normalizeModel_unorm(model)
plt.clf()
ax = fig.gca(projection='3d')
plot_model(model, ax)
mesh_name = os.path.basename(os.path.splitext(model_file)[0] + '.jpg')
fig.savefig(os.path.join(pOutFolder, mesh_name), bbox_inches='tight')
def visualize_sample(pInfolder, pFile) :
cwd = os.getcwd()
infolder = cwd + pInfolder
voxDim = 32
model = trimesh.load(infolder + pFile)
model = fp_utils.normalizeModel(model)
samples = model.sample(5000)
voxel_grid = fp_utils.binary_voxels(samples, (voxDim - 2, voxDim - 2, voxDim - 2))
dim = np.ones(shape=(3,), dtype=np.int) * voxDim - (voxDim - 2)
voxel_grid = np.pad(voxel_grid, [
(math.floor(dim[0]/2), math.ceil(dim[0]/2)),
(math.floor(dim[1]/2), math.ceil(dim[1]/2)),
(math.floor(dim[2]/2), math.ceil(dim[2]/2))], mode='constant')
fig = plt.figure()
axs = fig.add_subplot(311, projection='3d')
axs.plot_trisurf(model.vertices[:, 0], model.vertices[:, 1], model.vertices[:, 2], triangles=model.faces, alpha=0.5, cmap=cm.cool)
axs.set_xlabel('x')
axs.set_ylabel('y')
axs.set_zlabel('z')
axs.set_xlim([0, 1])
axs.set_ylim([0, 1])
axs.set_zlim([0, 1])
axs = fig.add_subplot(312, projection='3d')
x, y, z = np.indices((voxDim + 1, voxDim + 1, voxDim + 1))
axs.voxels(x, y, z, voxel_grid, alpha=0.5)
axs.set_xlabel('x')
axs.set_ylabel('y')
axs.set_zlabel('z')
axs.set_xlim([0, 31])
axs.set_ylim([0, 31])
axs.set_zlim([0, 31])
axs = fig.add_subplot(313, projection='3d')
axs.scatter(samples[:, 0], samples[:, 1], samples[:, 2], s=2)
axs.set_xlabel('x')
axs.set_ylabel('y')
axs.set_zlabel('z')
axs.set_xlim([0, 1])
axs.set_ylim([0, 1])
axs.set_zlim([0, 1])
plt.tight_layout()
plt.show()
def plot_voxels_from_classes(pInFolder, pOutFolder) :
utils.clear_make_dir(pOutFolder)
_, folders, _ = next(os.walk(pInFolder))
for folder in folders :
outFolder = os.path.join(pOutFolder, folder)
plot_voxels_from_class(os.path.join(pInFolder, folder), outFolder)
def plot_voxels_from_class(pInFolder, pOutFolder) :
utils.clear_make_dir(pOutFolder)
fig = plt.figure()
axs = fig.add_subplot(111, projection='3d')
for model_file in glob.glob(os.path.join(pInFolder, '*.npz')) :
with np.load(model_file, allow_pickle=True) as grid :
voxel_grid = grid['a']
plt.cla()
plot_voxelgrid(voxel_grid, axs)
mesh_name = os.path.basename(os.path.splitext(model_file)[0] + '.jpg')
plt.savefig(os.path.join(pOutFolder, mesh_name), bbox_inches='tight')
def plot_voxelgrid_file(pInFile, ax=None) :
with np.load(pInFile, allow_pickle=True) as grid :
voxel_grid = grid['a']
plot_voxelgrid(voxel_grid, ax)
def plot_voxelgrid(pGrid, ax=None) :
plotGrid = True if ax is None else False
if plotGrid :
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
voxDim = pGrid.shape[0]
ax.voxels(pGrid, alpha=0.5)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim([0, voxDim])
ax.set_ylim([0, voxDim])
ax.set_zlim([0, voxDim])
if plotGrid :
plt.show()
def plot_model_file(pFile, ax=None) :
model = trimesh.load(pFile)
plot_model(model)
def plot_model(pModel, ax=None) :
plotModel = True if ax is None else False
if plotModel :
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_trisurf(
pModel.vertices[:, 0],
pModel.vertices[:, 1],
pModel.vertices[:, 2],
triangles=pModel.faces, alpha=0.5, cmap=cm.cool)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.set_zlim([0, 1])
if plotModel :
plt.show() | {"hexsha": "77834090f86dfeb577ea81b23436622b72147c96", "size": 7462, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/src/data_analysis.py", "max_stars_repo_name": "cgaueb/deep_bim", "max_stars_repo_head_hexsha": "d34c4fdefd921a11a26e56b5823ab2a0b64ea311", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-08-31T12:24:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-05T07:05:18.000Z", "max_issues_repo_path": "python/src/data_analysis.py", "max_issues_repo_name": "cgaueb/deep_bim", "max_issues_repo_head_hexsha": "d34c4fdefd921a11a26e56b5823ab2a0b64ea311", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/src/data_analysis.py", "max_forks_repo_name": "cgaueb/deep_bim", "max_forks_repo_head_hexsha": "d34c4fdefd921a11a26e56b5823ab2a0b64ea311", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6186440678, "max_line_length": 134, "alphanum_fraction": 0.6141785044, "include": true, "reason": "import numpy", "num_tokens": 2034} |
import matplotlib.pyplot as plt
import numpy as np
from itertools import combinations, product
from scipy.spatial import KDTree
# GEOMETRY
# --------
def ang(x, y):
"angle between two point x, y with respect to x axis"
return np.arctan(1/np.divide(*(y-x)))
def reorganize(a, b, c, d, return_idxs=False):
"""
order coordinates from closest to first one
"""
x = np.vstack([a, b, c, d])
distances = np.linalg.norm(x[:, :, None] - x.T[None, :, :], axis=1)
i = np.min(np.unravel_index(np.argmax(distances), distances.shape))
A = x[i]
x = np.delete(x, i, axis=0)
distances_from_A = np.linalg.norm(x - A, axis=1)
idxs = np.argsort(1 / distances_from_A)
if return_idxs:
return [0, *idxs]
else:
return [A, *x[idxs]]
def rotate_point(point, angle, pivot, norm=False):
"""
rotate point around pivot of certain angle
"""
co = np.cos(angle); si = np.sin(angle)
r = np.array([
[co, -si],
[si, co]
])
x = point - pivot
x = r@x
if norm:
x /= np.linalg.norm(x)
x *= np.linalg.norm(point - pivot)*co
x += pivot
return x
def XY(a, b, norm=False):
"""
coordinates of the x, y axis as defined in Lang2009
"""
if norm:
norm = np.linalg.norm(b-a)
x = rotate_point(b, -np.pi/4, a, norm=norm)
y = rotate_point(b, np.pi/4, a, norm=norm)
return x, y
def proj(p, origin, axe, norm=False):
"""
projection of a point p on a segment from origin to axe
"""
n = axe - origin
n /= np.linalg.norm(n, 2)
if norm:
return np.dot(p - origin, n)
else:
return origin + n*np.dot(p - origin, n)
# QUAD
# ----
def quad_hash(a, b, c, d):
"""
from 4 coordinates froduce the quad hash code
"""
x, y = XY(a,b)
h = np.linalg.norm(b-a)
xd = proj(d, a, x, norm=True)/h; yd = proj(d, a, y, norm=True)/h
xc = proj(c, a, x, norm=True)/h; yc = proj(c, a, y, norm=True)/h
return xc, xd, yc, yd
def good_quad(a, b, c, d):
"""
whether all points are contained in a circle (see Lang2009)
"""
r = np.linalg.norm(b-a)/2
center = a + (b-a)/2
# check distance from center
in_circle = np.linalg.norm(center - np.vstack([a, b, c, d]), axis=1) <= r*1.01
return np.all(in_circle)
def clean(xy, tolerance=20):
distances_to_others = np.array([np.linalg.norm(p-xy, axis=1) for p in xy])
return xy[np.argwhere(np.sum(distances_to_others < tolerance, axis=0) == 1).flatten()]
# AFFINE TRANSFORM
# ----------------
pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))])
unpad = lambda x: x[:,:-1]
def _find_transform(s1, s2):
"""
Strict finding matrix transform between registered points
least square to find the affine transform matrix between 2 set of points
"""
S1 = pad(s1)
S2 = pad(s2)
A, res, rank, s = np.linalg.lstsq(S1, S2, rcond=None)
return A.T
def affine_transform(M):
return lambda x: unpad(np.dot(pad(x), M.T))
# PLOTTING
# -------
def plot(*args, color="k", offset=5, label=None, alpha=1, **kwargs):
"""
Conveniant plot of poitn sources
"""
for i, a in enumerate(args):
plt.plot(*a, "o", fillstyle="none", c=color, label=label if i==0 else None, alpha=alpha)
for i, (name, a) in enumerate(kwargs.items()):
plt.plot(*a, "o", fillstyle="none", c=color, label=label if i==0 else None, alpha=alpha)
plt.text(a[0], a[1] + offset, name, ha='center', color=color)
plt.gca().set_aspect("equal")
if label is not None:
plt.legend()
def plot_quad(a, b, c, d):
"""
Plot to visualize quad when making hash code, as in Lang2009
"""
x, y = XY(a, b, norm=True)
xd = proj(d, a, x); yd = proj(d, a, y)
xc = proj(c, a, x); yc = proj(c, a, y)
plot(a=a, b=b, c=c, d=d)
plot(x=x, y=y, color="C0")
plot(xd=xd, yd=yd, xc=xc, yc=yc, color="C0")
plt.plot(*np.array([d, xd]).T, "--", color="C0", alpha=0.2)
plt.plot(*np.array([d, yd]).T, "--", color="C0", alpha=0.2)
plt.plot(*np.array([c, xc]).T, "--", color="C0", alpha=0.2)
plt.plot(*np.array([c, yc]).T, "--", color="C0", alpha=0.2)
plt.plot(*np.array([a, x]).T, color="C0", alpha=0.2)
plt.plot(*np.array([a, y]).T, color="C0", alpha=0.2)
plt.gca().add_patch((plt.Circle((b-a)/2 + a, radius=np.linalg.norm(b-a)/2, fill=False)))
plt.gca().add_patch((plt.Polygon(np.array([a, c, b, d]), facecolor="k", alpha=0.05)))
# Full match
# ----------
def count_cross_match(s1, s2, tolerance=2):
"""
count pair of points whose distance is less than tolerance
"""
c = 0
for i, s in enumerate(s1):
distances = np.linalg.norm(s - s2, axis=1)
closest = np.argmin(distances)
if distances[closest] < tolerance:
c += 1
return c
def quads_stars(xy, n=15):
"""
return matched indexes bewteen two set of points
"""
xy = xy.copy()
xy = xy[0:n]
quads_idxs = list(combinations(np.arange(xy.shape[0]), 4))
quads = []
stars = []
for qi in quads_idxs:
_quad = reorganize(*xy[qi, :])
if good_quad(*_quad):
quads.append(quad_hash(*_quad))
stars.append(_quad)
if len(quads) == 0:
print(len(quads))
return np.array(quads), np.array(stars)
def cross_match(s1, s2, tolerance=10, return_ixds=False):
matches = []
for i, s in enumerate(s1):
distances = np.linalg.norm(s - s2, axis=1)
closest = np.argmin(distances)
if distances[closest] < tolerance:
matches.append([i, closest])
matches = np.array(matches)
if return_ixds:
return matches
else:
if len(matches) > 0:
return s1[matches[:, 0]], s2[matches[:, 1]]
else:
return np.array([]), np.array([])
def find_transform(s1, s2, tolerance=10, n=15, show=False):
quads1, stars1 = quads_stars(s1, n=n)
quads2, stars2 = quads_stars(s2, n=n)
# KDTree
kdt = KDTree(quads1)
dist, indices = kdt.query(quads2)
# We pick the two asterisms leading to the highest stars matching
closeness = []
for i, m in enumerate(indices):
M = _find_transform(stars1[m], stars2[i])
new_s1 = affine_transform(M)(s1)
closeness.append(count_cross_match(s2, new_s1, tolerance=tolerance))
i = np.argmax(closeness)
m = indices[i]
S1 = stars1[m]
S2 = stars2[i]
M = _find_transform(S1, S2)
new_s1 = affine_transform(M)(s1)
if show:
rs1, rs2 = cross_match(new_s1, s2, tolerance=tolerance)
plot(*rs1)
plot(*rs2, color="C3")
i, j = cross_match(new_s1, s2, tolerance=tolerance, return_ixds=True).T
return _find_transform(s1[i], s2[j])
# Some optimized methods
def _reorganize(Q):
distances = np.linalg.norm((Q[:, :, :, None] - np.swapaxes(Q, 1, 2)[:, None, :, :]), axis=2)
return np.array(
[Q[i][np.argsort(distances[i, m])[[0, 3, 2, 1]]] for i, m in enumerate(np.argmax(distances, 1)[:, 0])])
def _count_cross_match(s1, s2, tolerance=2):
"""
count pair of points whose distance is less than tolerance
"""
return np.count_nonzero(np.linalg.norm(s1[None, :] - s2[:, None], axis=2) < tolerance)
def _good_quad(a, b, c, d, max_distance=1000):
"""
whether all points are contained in a circle (see Lang2009)
"""
x = np.vstack([a, b, c, d])
r = np.linalg.norm(b - a) / 2
center = a + (b - a) / 2
# check distance from center
in_circle = np.linalg.norm(center - x, axis=1) <= r * 1.01
max_distance = np.max(np.linalg.norm(x[:, :, None] - x.T[None, :, :], axis=1)) < max_distance
return np.all(in_circle) and max_distance
def _quad_hash(a, b, c, d):
"""
from 4 coordinates froduce the quad hash code
"""
x, y = XY(a, b)
h = np.linalg.norm(b - a)
n = [x, y] - a
n = (n / np.linalg.norm(n, 2)).T
xd, yd = np.dot(d - a, n) / h
xc, yc = np.dot(c - a, n) / h
return xc, xd, yc, yd
def _quads_stars(xy, n=15):
"""
return matched indexes bewteen two set of points
"""
xy = xy.copy()
xy = xy[0:n]
quads_idxs = np.array(list(combinations(np.arange(xy.shape[0]), 4)))
Q = xy[quads_idxs]
Q = _reorganize(Q)
quads = []
stars = []
for q in Q:
if _good_quad(*q):
quads.append(_quad_hash(*q))
stars.append(q)
return quads, stars | {"hexsha": "7b54ae5ad1a13e81581b87ee202ebb9ff3d5be30", "size": 8504, "ext": "py", "lang": "Python", "max_stars_repo_path": "prose/twirl/utils.py", "max_stars_repo_name": "lgrcia/prose", "max_stars_repo_head_hexsha": "bf5482f775eb8cfee261620901cebafb6edb650a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2021-03-12T15:11:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T21:43:02.000Z", "max_issues_repo_path": "prose/twirl/utils.py", "max_issues_repo_name": "lgrcia/prose", "max_issues_repo_head_hexsha": "bf5482f775eb8cfee261620901cebafb6edb650a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 29, "max_issues_repo_issues_event_min_datetime": "2020-12-08T10:55:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-13T15:44:21.000Z", "max_forks_repo_path": "prose/twirl/utils.py", "max_forks_repo_name": "lgrcia/prose", "max_forks_repo_head_hexsha": "bf5482f775eb8cfee261620901cebafb6edb650a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-11T17:00:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-11T17:00:22.000Z", "avg_line_length": 26.0858895706, "max_line_length": 111, "alphanum_fraction": 0.5697318909, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2663} |
# -*- coding: utf-8 -*-
"""Test of Fancy module
This module test the various functions present in the Fancy module.
"""
import datetime
import unittest
import unittest.mock
import sys
import matplotlib.pyplot as plt
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from pandas.api.types import CategoricalDtype
import pandas.util.testing as tm
from sklearn.preprocessing import StandardScaler
from bff.fancy import (avg_dicts, cast_to_category_pd, concat_with_categories, dates_split,
get_peaks, idict, kwargs_2_list, log_df, mem_usage_pd, normalization_pd,
parse_date, pipe_multiprocessing_pd, size_2_square,
sliding_window, value_2_list)
def df_dummy_func_one(df, i=1):
"""Dummy function for multiprocessing on DataFrame.
This function uses `itertuples`."""
df = df.assign(d=None)
for row in df.itertuples():
df.at[row.Index, 'd'] = df.at[row.Index, 'a'] + i
return df
def df_dummy_func_two(df, i=2):
"""Dummy function for multiprocessing on DataFrame."""
return df.assign(d=lambda x: x['a'] ** i)
class TestFancy(unittest.TestCase):
"""
Unittest of Fancy module.
"""
# Variables used for multiple tests.
columns = ['name', 'age', 'country']
df = pd.DataFrame([['John', 24, 'China'],
['Mary', 20, 'China'],
['Jane', 25, 'Switzerland'],
['Greg', 23, 'China'],
['James', 28, 'China']],
columns=columns)
def test_avg_dicts(self):
"""
Test of the `avg_dicts` function.
"""
# Test the standard case, all dicts with numerical values.
dic_std_a = {'a': 0.8, 'b': 0.3}
dic_std_b = {'a': 2, 'b': 0.8}
dic_std_c = {'a': 0.01, 'b': 1.3}
res_std = avg_dicts(dic_std_a, dic_std_b, dic_std_c)
self.assertEqual(res_std['a'],
(dic_std_a['a'] + dic_std_b['a'] + dic_std_c['a']) / 3)
self.assertEqual(res_std['b'],
(dic_std_a['b'] + dic_std_b['b'] + dic_std_c['b']) / 3)
# Test with dicts not having the same keys.
dic_miss_d = {'a': 3.4, 'c': 0.4}
dic_miss_e = {'d': 0.2, 'c': 3.9}
res_missing_key = avg_dicts(dic_std_a, dic_miss_d, dic_miss_e)
self.assertEqual(res_missing_key['a'],
(dic_std_a['a'] + dic_miss_d['a']) / 3)
self.assertEqual(res_missing_key['c'],
(dic_miss_d['c'] + dic_miss_e['c']) / 3)
self.assertEqual(res_missing_key['d'],
(dic_miss_e['d']) / 3)
# Test with dicts having other types. Should raise exception.
# Test with string.
dic_str_f = {'a': 3, 'b': 'str'}
# Test with list.
dic_str_g = {'a': 3, 'b': [1, 2, 3]}
self.assertRaises(TypeError, avg_dicts, dic_std_a, dic_str_f)
self.assertRaises(TypeError, avg_dicts, dic_std_a, dic_std_b, dic_str_g)
def test_cast_to_category_pd(self):
"""
Test of the `cast_to_category_pd` function.
"""
original_types = {'name': np.dtype('O'), 'age': np.dtype('int64'),
'country': np.dtype('O')}
self.assertDictEqual(self.df.dtypes.to_dict(), original_types)
df_optimized = cast_to_category_pd(self.df)
tm.assert_frame_equal(self.df, df_optimized, check_dtype=False, check_categorical=False)
country_type = CategoricalDtype(categories=['China', 'Switzerland'], ordered=False)
optimized_types = {'name': np.dtype('O'), 'age': np.dtype('int64'),
'country': country_type}
self.assertDictEqual(df_optimized.dtypes.to_dict(), optimized_types)
# Unashable type should not be casted and not raise exception.
df_unhashable = self.df.copy().assign(dummy=None)
for row in df_unhashable.itertuples():
df_unhashable.at[row.Index, 'dummy'] = np.array([1, 2, 3])
df_unhashable_optimized = cast_to_category_pd(df_unhashable)
tm.assert_frame_equal(df_unhashable, df_unhashable_optimized,
check_dtype=False, check_categorical=False)
# Check the types.
optimized_types['dummy'] = np.dtype('O')
self.assertDictEqual(df_unhashable_optimized.dtypes.to_dict(),
optimized_types)
def test_concat_with_categories(self):
"""
Test of the `concat_with_categories` function.
"""
column_types = {'name': 'object',
'color': 'category',
'country': 'category'}
columns = list(column_types.keys())
df_left = pd.DataFrame([['John', 'red', 'China'],
['Jane', 'blue', 'Switzerland']],
columns=columns).astype(column_types)
df_right = pd.DataFrame([['Mary', 'yellow', 'France'],
['Fred', 'blue', 'Italy']],
columns=columns).astype(column_types)
df_res = pd.DataFrame([['John', 'red', 'China'],
['Jane', 'blue', 'Switzerland'],
['Mary', 'yellow', 'France'],
['Fred', 'blue', 'Italy']],
columns=columns).astype(column_types)
df_concat = concat_with_categories(df_left, df_right,
ignore_index=True)
# Check the content of the DataFrame.
tm.assert_frame_equal(df_res, df_concat)
# Check the types of the DataFrame's columns.
self.assertTrue(pd.api.types.is_object_dtype(df_concat['name']))
self.assertTrue(pd.api.types.is_categorical_dtype(df_concat['color']))
self.assertTrue(pd.api.types.is_categorical_dtype(
df_concat['country']))
# Check assertion if columns don't match.
df_left_wrong = pd.DataFrame([['John', 'XXL', 'China']],
columns=['name', 'size', 'country'])
self.assertRaises(AssertionError, concat_with_categories, df_left_wrong, df_right)
def test_dates_split(self):
"""
Test of the `dates_split` function.
"""
# Check when all sub-periods have the same size.
# In this example, split 30 days into 3 ranges of 10 days.
d_start = datetime.datetime(2020, 5, 1)
d_end = datetime.datetime(2020, 5, 31)
ranges_1 = dates_split(d_start, d_end, 3)
res_1 = [(datetime.datetime(2020, 5, 1, 0, 0), datetime.datetime(2020, 5, 11, 0, 0)),
(datetime.datetime(2020, 5, 11, 0, 0), datetime.datetime(2020, 5, 21, 0, 0)),
(datetime.datetime(2020, 5, 21, 0, 0), datetime.datetime(2020, 5, 31, 0, 0))]
self.assertListEqual(ranges_1, res_1)
# Check when there is a last sub period bigger.
# In this example, the last period is bigger by 1 second.
d_start = datetime.datetime(2020, 5, 1)
d_end = datetime.datetime(2020, 5, 11, 0, 0, 3)
ranges_2 = dates_split(d_start, d_end, 2)
res_2 = [(datetime.datetime(2020, 5, 1, 0, 0), datetime.datetime(2020, 5, 6, 0, 0, 1)),
(datetime.datetime(2020, 5, 6, 0, 0, 1), datetime.datetime(2020, 5, 11, 0, 0, 3))]
self.assertListEqual(ranges_2, res_2)
def test_get_peaks(self):
"""
Test of the `get_peaks` function.
"""
# Creation of a serie with peaks 9 and 12.
values = [4, 5, 9, 3, 2, 1, 2, 1, 3, 4, 12, 9, 6, 3, 2, 4, 5]
dates = pd.date_range('2019-06-20', periods=len(values), freq='T')
s = pd.Series(values, index=dates)
# Compute the peaks.
peak_dates, peak_values = get_peaks(s)
peak_dates_res = [np.datetime64('2019-06-20T00:02'),
np.datetime64('2019-06-20T00:10')]
peak_values_res = [9., 12.]
assert_array_equal(peak_dates, peak_dates_res)
assert_array_equal(peak_values, peak_values_res)
# Check assertion if index is not of type `datetime`.
self.assertRaises(AssertionError, get_peaks, pd.Series(values, index=range(len(values))))
def test_idict(self):
"""
Test of the `idict` function.
"""
valid_dict = {1: 4, 2: 5, 3: 6}
another_valid_dict = {'1': 4, 2: '5', 3: '6'}
dataloss_dict = {1: 4, 2: 4, 3: 6}
invalid_dict = {1: [1], 2: [2], 3: [3]}
self.assertEqual(idict(valid_dict), {4: 1, 5: 2, 6: 3})
self.assertEqual(idict(another_valid_dict), {4: '1', '5': 2, '6': 3})
self.assertEqual(idict(dataloss_dict), {4: 2, 6: 3})
self.assertRaises(TypeError, idict, invalid_dict)
def test_kwargs_2_list(self):
"""
Test of the `kwargs_2_list` function.
"""
# A list should remain a list.
self.assertEqual(kwargs_2_list(seq=[1, 2, 3]), {'seq': [1, 2, 3]})
# A single integer should result in a list with one integer.
self.assertEqual(kwargs_2_list(age=42), {'age': [42]})
# A single string should result in a list with one string.
self.assertEqual(kwargs_2_list(name='John Doe'), {'name': ['John Doe']})
# A tuple should remain a tuple.
self.assertEqual(kwargs_2_list(children=('Jane Doe', 14)),
{'children': ('Jane Doe', 14)})
# A dictionary should result in a list with a dictionary.
self.assertEqual(kwargs_2_list(info={'name': 'John Doe', 'age': 42}),
{'info': [{'name': 'John Doe', 'age': 42}]})
# Passing a non-keyword argument should raise an exception.
self.assertRaises(TypeError, kwargs_2_list, [1, 2, 3])
# Passing multiple keyword arguments should work.
self.assertEqual(kwargs_2_list(name='John Doe', age=42,
children=('Jane Doe', 14)),
{'name': ['John Doe'], 'age': [42],
'children': ('Jane Doe', 14)})
def test_log_df(self):
"""
Test of the `log_df` function.
All tests of logger are done using a mock.
"""
# Should work directly on a DataFrame.
with unittest.mock.patch('logging.Logger.info') as mock_logging:
log_df(self.df)
mock_logging.assert_called_with(f'{self.df.shape}')
# Should work with the `pipe` function.
df = tm.makeDataFrame().head()
with unittest.mock.patch('logging.Logger.info') as mock_logging:
df_res = (df
.assign(E=2)
.pipe(log_df, lambda x: x.shape, 'New shape=')
)
mock_logging.assert_called_with(f'New shape={df_res.shape}')
# Should work with another function to log.
with unittest.mock.patch('logging.Logger.info') as mock_logging:
df_res = (df
.assign(F=3)
.pipe(log_df, lambda x: x.shape, 'My df: \n')
)
mock_logging.assert_called_with(f'My df: \n{df_res.shape}')
def test_mem_usage_pd(self):
"""
Test of the `mem_usage_pd` function.
"""
df = pd.DataFrame({'A': [f'value{i}' for i in range(100000)],
'B': range(100000),
'C': [float(i) for i in range(100000)]}).set_index('A')
test_1 = mem_usage_pd(df, details=False)
res_1 = {'total': '7.90 MB'}
self.assertDictEqual(test_1, res_1)
test_2 = mem_usage_pd(df)
res_2 = {'Index': {'6.38 MB', 'Index type'},
'B': {'0.76 MB', np.dtype('int64')},
'C': {'0.76 MB', np.dtype('float64')},
'total': '7.90 MB'}
self.assertDictEqual(test_2, res_2)
serie = df.reset_index()['B']
test_3 = mem_usage_pd(serie, details=False)
res_3 = {'total': '0.76 MB'}
self.assertDictEqual(test_3, res_3)
# Check the warning message using a mock.
with unittest.mock.patch('logging.Logger.warning') as mock_logging:
mem_usage_pd(serie, details=True)
mock_logging.assert_called_with('Details is only available for DataFrames.')
# Check for exception if not a pandas object.
self.assertRaises(AttributeError, mem_usage_pd, {'a': 1, 'b': 2})
def test_normalization_pd(self):
"""
Test of the `normalization_pd` function.
"""
data = {'x': [123, 27, 38, 45, 67],
'y': [456, 45.4, 32, 34, 90],
'color': ['r', 'b', 'g', 'g', 'b']}
df = pd.DataFrame(data)
# Check the function in a pipe with columns including one that does not exist.
df_std = df.pipe(normalization_pd, columns=['x'], scaler=StandardScaler)
data_std = data.copy()
data_std['x'] = [1.847198, -0.967580, -0.645053, -0.439809, 0.205244]
df_std_res = pd.DataFrame(data_std).astype({'x': np.float32})
tm.assert_frame_equal(df_std, df_std_res, check_dtype=True, check_categorical=False)
# Check with a suffix and a keyword argument for the scaler.
df_min_max = normalization_pd(df, suffix='_norm', feature_range=(0, 2), new_type=np.float64)
data_min_max = data.copy()
data_min_max['x_norm'] = [2.000000, 0.000000, 0.229167, 0.375000, 0.833333]
data_min_max['y_norm'] = [2.000000, 0.06320755, 0.000000, 0.009434, 0.273585]
df_min_max_res = pd.DataFrame(data_min_max)
tm.assert_frame_equal(df_min_max, df_min_max_res, check_dtype=True, check_categorical=False)
def test_parse_date(self):
"""
Test of the `parse_date` decorator.
"""
# Creation of a dummy function to apply the decorator on.
@parse_date
def dummy_function(**kwargs):
return kwargs
list_parses = ['20190325',
'Mon, 21 March, 2015',
'2019-03-09 08:03:01',
'March 27 2019']
list_results = [datetime.datetime(2019, 3, 25, 0, 0),
datetime.datetime(2015, 3, 21, 0, 0),
datetime.datetime(2019, 3, 9, 8, 3, 1),
datetime.datetime(2019, 3, 27, 0, 0)]
for parse, result in zip(list_parses, list_results):
self.assertEqual(dummy_function(date=parse)['date'], result)
# Should work with custom fields for date.
# Creation of a dummy function with custom fields for the date.
@parse_date(date_fields=['date_start', 'date_end'])
def dummy_function_custom(**kwargs):
return kwargs
parse_1 = dummy_function_custom(date_start='20181008',
date_end='2019-03-09')
self.assertEqual(parse_1['date_start'], datetime.datetime(2018, 10, 8))
self.assertEqual(parse_1['date_end'], datetime.datetime(2019, 3, 9))
# Should not parse if wrong format
self.assertEqual(dummy_function(date='wrong format')['date'],
'wrong format')
def test_pipe_multiprocessing_pd_one(self):
"""
Test of the `pipe_multiprocessing_pd` function.
If one of the tests fails, tests might hang and need to be killed.
"""
df_a = pd.DataFrame({'a': [1, 2, 3]})
tm.assert_frame_equal(pipe_multiprocessing_pd(df_a, df_dummy_func_one, nb_proc=2),
pd.DataFrame({'a': [1, 2, 3], 'd': [2, 3, 4]}),
check_dtype=False, check_categorical=False)
tm.assert_frame_equal(pipe_multiprocessing_pd(df_a, df_dummy_func_one, i=4, nb_proc=2),
pd.DataFrame({'a': [1, 2, 3], 'd': [5, 6, 7]}),
check_dtype=False, check_categorical=False)
def test_pipe_multiprocessing_pd_two(self):
"""
Test of the `pipe_multiprocessing_pd` function.
If one of the tests fails, tests might hang and need to be killed.
"""
df_a = pd.DataFrame({'a': [1, 2, 3]})
tm.assert_frame_equal(pipe_multiprocessing_pd(df_a, df_dummy_func_two, nb_proc=2),
pd.DataFrame({'a': [1, 2, 3], 'd': [1, 4, 9]}),
check_dtype=False, check_categorical=False)
tm.assert_frame_equal(pipe_multiprocessing_pd(df_a, df_dummy_func_two, i=3, nb_proc=2),
pd.DataFrame({'a': [1, 2, 3], 'd': [1, 8, 27]}),
check_dtype=False, check_categorical=False)
def test_size_2_square(self):
"""
Test of the `size_2_square` function.
"""
# Test when result is a square.
self.assertEqual(size_2_square(9), (3, 3))
# Test when not a square.
self.assertEqual(size_2_square(10), (4, 4))
def test_sliding_window(self):
"""
Test of the `sliding_window` function.
"""
# Should work with step of 1.
self.assertEqual(list(sliding_window('abcdef', 2, 1)),
['ab', 'bc', 'cd', 'de', 'ef'])
# Should work with numpy arrays.
res_np_1 = list(sliding_window(np.array([1, 2, 3, 4, 5, 6]), 5, 5))
np.testing.assert_array_equal(res_np_1[0], np.array([1, 2, 3, 4, 5]))
np.testing.assert_array_equal(res_np_1[1], np.array([6]))
# Should work when step and windows size are the same.
# In this case, each element will only be present once.
self.assertEqual(list(sliding_window('abcdef', 2, 2)),
['ab', 'cd', 'ef'])
# Should work with and odd number of elements.
self.assertEqual(list(sliding_window('abcdefg', 1, 1)),
['a', 'b', 'c', 'd', 'e', 'f', 'g'])
self.assertEqual(list(sliding_window('abcdefg', 2, 2)),
['ab', 'cd', 'ef', 'g'])
# Should work if lenght of sequence is the same as window size.
self.assertEqual(list(sliding_window('abcdefg', 7, 3)),
['abcdefg'])
# Should work if last chunk is not full.
self.assertEqual(list(sliding_window('abcdefgh', 6, 4)),
['abcdef', 'efgh'])
self.assertEqual(list(sliding_window('abcdefgh', 6, 5)),
['abcdef', 'fgh'])
self.assertEqual(list(sliding_window('abcdefghi', 6, 5)),
['abcdef', 'fghi'])
# Should work with longer sequence.
seq_1 = 'abcdefghijklmnopqrstuvwxyz'
res_1 = ['abcdef', 'ghijkl', 'mnopqr', 'stuvwx', 'yz']
self.assertEqual(list(sliding_window(seq_1, 6, 6)), res_1)
res_2 = ['abcdef', 'defghi', 'ghijkl', 'jklmno', 'mnopqr', 'pqrstu',
'stuvwx', 'vwxyz']
self.assertEqual(list(sliding_window(seq_1, 6, 3)), res_2)
# Check for exceptions.
# Should raise an exception if the sequence is not iterable.
with self.assertRaises(TypeError):
list(sliding_window(3, 2, 1))
# Should raise an exception if step is not an integer.
with self.assertRaises(TypeError):
list(sliding_window(seq_1, 2, 1.0))
with self.assertRaises(TypeError):
list(sliding_window(seq_1, 2, '1'))
# Should raise an exception if window size is not an integer.
with self.assertRaises(TypeError):
list(sliding_window(seq_1, 2.0, 1))
with self.assertRaises(TypeError):
list(sliding_window(seq_1, '2', 1))
# Should raise an exception if window size is smaller
# than step or <= 0.
with self.assertRaises(ValueError):
list(sliding_window(seq_1, 2, 3))
with self.assertRaises(ValueError):
list(sliding_window(seq_1, -1, -1))
# Should raise an exception if the step is smaller or equal than 0.
with self.assertRaises(ValueError):
list(sliding_window(seq_1, 2, 0))
with self.assertRaises(ValueError):
list(sliding_window(seq_1, 2, -1))
# Should raise an exception if length of sequence
# is smaller than the window size.
with self.assertRaises(ValueError):
list(sliding_window('abc', 4, 1))
def test_value_2_list(self):
"""
Test of the `value_2_list` function.
"""
# A list should remain a list.
self.assertEqual(value_2_list([1, 2, 3]), [1, 2, 3])
# A single integer should result in a list with one integer.
self.assertEqual(value_2_list(42), [42])
# A single string should result in a list with one string.
self.assertEqual(value_2_list('John Doe'), ['John Doe'])
# A tuple should remain a tuple.
self.assertEqual(value_2_list(('Jane Doe', 14)), ('Jane Doe', 14))
# A dictionary should result in a list with the dictionary.
self.assertEqual(value_2_list({'name': 'John Doe', 'age': 42}),
[{'name': 'John Doe', 'age': 42}])
# A single axis should be put in a list.
__, axes_a = plt.subplots(nrows=1, ncols=1)
self.assertEqual(len(value_2_list(axes_a)), 1)
# A list of axis (`np.ndarray`) should not change.
__, axes_b = plt.subplots(nrows=2, ncols=1)
self.assertEqual(len(value_2_list(axes_b)), 2)
if __name__ == '__main__':
from pkg_resources import load_entry_point
sys.exit(load_entry_point('pytest', 'console_scripts', 'py.test')()) # type: ignore
| {"hexsha": "d5c793d1224c4425d8525e1c70d71304fa01a31c", "size": 21775, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_fancy.py", "max_stars_repo_name": "axelfahy/FancyPythonThings", "max_stars_repo_head_hexsha": "5df204973f94dab5e7081c59d25554165db955c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-05-22T07:45:13.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-22T07:45:13.000Z", "max_issues_repo_path": "tests/test_fancy.py", "max_issues_repo_name": "axelfahy/FancyPythonThings", "max_issues_repo_head_hexsha": "5df204973f94dab5e7081c59d25554165db955c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2019-06-20T14:47:09.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-12T13:03:50.000Z", "max_forks_repo_path": "tests/test_fancy.py", "max_forks_repo_name": "axelfahy/FancyPythonThings", "max_forks_repo_head_hexsha": "5df204973f94dab5e7081c59d25554165db955c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-06-21T09:13:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-28T19:34:48.000Z", "avg_line_length": 44.5296523517, "max_line_length": 100, "alphanum_fraction": 0.5671641791, "include": true, "reason": "import numpy,from numpy", "num_tokens": 5521} |
"""
Methods to load data, analyze customer churn,
train models and plot training results
"""
import os
import logging
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
sns.set()
logging.basicConfig(
format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO)
def map_dtypes(categorical=[], string=[], numeric=[]):
"""Create dtype mapper for pd.read_csv to parse columns as specified dtypes
Args:
categorical, string, numeric (list): Column names to parse as type
Usage:
>>> dtype_mappevs codr = map_dtypes(categorical_columns=['gender', 'mood'])
>>> df = pd.read_csv(csv_file.csv, dtype=dtype_mapper)
"""
dtype_categorical = dict(zip(categorical, ["category" for i in range(len(categorical))]))
dtype_numeric = dict(zip(numeric, ["float" for i in range(len(numeric))]))
dtype_str = dict(zip(string, ["str" for i in range(len(string))]))
dtype_mapper = {**dtype_categorical, **dtype_numeric, **dtype_str}
return dtype_mapper
def import_data(
filepath, index_col=0, categorical_col=[], numeric_col=[], string_col=[]
):
"""
returns dataframe for the csv found at pth
input:
pth (str): directory that holds the csv
index_col (int): use specified column as index
output:
df: pandas dataframe
"""
dtype_mapper = map_dtypes(
categorical=categorical_col, numeric=numeric_col, string=string_col
)
try:
df = pd.read_csv(filepath, index_col=index_col, dtype=dtype_mapper)
# add feature
df['Churn'] = df['Attrition_Flag'].apply(lambda val: 0 if val == "Existing Customer" else 1)
return df
except FileNotFoundError:
logging.error(f"Could not find CSV file in {filepath}")
def plot_histograms(df, column_list):
"""Show histograms for specified columns"""
f, axes = plt.subplots(len(column_list), 1, figsize=(12,6*len(column_list)))
f.suptitle("Distributions", fontsize=18)
for row, column in enumerate(column_list):
try:
sns.histplot(df[column], ax=axes[row])
# older seaborn versions do not have histplot
# use distplot instead
except AttributeError:
logging.warning("Using distplot instead of histplot. Consider updating seaborn")
sns.distplot(df[column], ax=axes[row])
def plot_relative_count(df, column_list):
"""Relative count for specified columns in ascending order"""
f, axes = plt.subplots(len(column_list), 1, figsize=(12, 6*len(column_list)))
f.suptitle("Relative counts", fontsize=18)
for plot_row, column in enumerate(column_list):
relative_count = (df[column]
.value_counts(normalize=True)
.mul(100)
.reset_index()
.rename(columns={'index': column, column: '%'}))
# with only 1 column, 'axes' is not a list and axes[plot_row] would return TypeError
if len(column_list) == 1:
sns.barplot(x=column, y="%", data=relative_count, order=relative_count[column], ax=axes)
elif len(column_list) > 1:
sns.barplot(x=column, y="%", data=relative_count, order=relative_count[column], ax=axes[plot_row])
def plot_correlation_heatmap(df):
"""Return correlation heatmap"""
fig = plt.figure(figsize=(20, 10))
fig.suptitle("Relative counts", fontsize=18)
sns.heatmap(df.corr(), annot=False, cmap='Dark2_r', linewidths = 2, square=True, annot_kws={"size": 14})
def perform_eda(df, describe=False, filepath=None, export_as='pdf'):
'''
perform eda on df and save figures to filepath as pdf
input:
df: pandas dataframe
describe (bool): whether to print df.describe()
filepath (str): directory to store plots
export_as (str): filetype of saved plots in filepath
output:
None
'''
print(f"Dimension: {df.shape}")
print(f"Missings:\n--------------\n{df.isnull().sum()}")
if describe: print(f"{df.describe()}")
plot_histograms(df=df, column_list=['Customer_Age', "Total_Trans_Ct"])
if filepath: plt.savefig(os.path.join(filepath, f'histograms.{export_as}'))
plot_relative_count(df=df, column_list=['Churn', 'Marital_Status'])
if filepath: plt.savefig(os.path.join(filepath,f'relative_counts.{export_as}'))
plot_correlation_heatmap(df=df)
if filepath: plt.savefig(os.path.join(filepath, f'correlation_heatmap.{export_as}'))
def encoder_helper(df, column_mapper, target_feature):
'''
helper function to turn each categorical column into a new column with
propotion of churn for each category - associated with cell 15 from the notebook
input:
df: pandas dataframe
column_mapper (dict): dict of columns that contain categorical features and names for created features
target_feature (str): Feature on which to calculate mean
output:
df: pandas dataframe with new columns for
'''
if not isinstance(column_mapper, dict): raise TypeError("column_mapper has the be a dict")
for column, new_feature in column_mapper.items():
df[new_feature] = df.groupby(column)[target_feature].transform("mean")
return df
def perform_feature_engineering(df, target_variable, ignore_features=[], test_size=0.3, random_state=42):
'''Assign target variable (y), select features (x) and return train-test split
input:
df: pandas dataframe
target_variable (str): specify column name of the target variable (y)
ignore_features (list): which features should be ignored that appear in df
random_state (int): Random nr. seed for train test split
test_size (float): Fraction of test set
output:
X_train: X training data
X_test: X testing data
y_train: y training data
y_test: y testing datat
raises:
ValueError: When target_variable not among df.columns
TypeError: Features to ignore must be a list
'''
if target_variable not in df.columns:
raise ValueError(f"Target variable {target_variable} not among df columns.")
if not isinstance(ignore_features, list):
raise TypeError(f"ignore_features must be of list type. It is of type {type(ignore_features)}")
X = df[df.columns.difference(ignore_features+[target_variable])]
y = df.loc[:,target_variable]
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=test_size,
random_state=random_state)
return X_train, X_test, y_train, y_test
def train_models(X_train, y_train):
'''
train, store model results: images + scores, and store models
input:
X_train: X training data
y_train: y training data
output:
tuple: trained random forest model, linear regresssion model
'''
# random forest
rfc = RandomForestClassifier(random_state=42)
param_grid = {
'n_estimators': [200, 500],
'max_features': ['auto', 'sqrt'],
'max_depth': [4,5,100],
'criterion': ['gini', 'entropy']
}
logging.info("Start CV for random forest")
cv_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv=5)
cv_rfc.fit(X_train, y_train)
# logistic regressor
lrc = LogisticRegression(max_iter=300)
logging.info("Train linear classifier")
lrc.fit(X_train, y_train)
return cv_rfc, lrc
def classification_report_image(X_train,
y_train,
X_test,
y_test,
rf_model,
lr_model,
filepath,
export_as='pdf'):
'''
produces classification report for train and test, stores report as image
in images folder
input:
X_train: training features
y_train: training response values
X_test: test features
y_test: test response values
rf_model: trained random forest model
lr_model: trained linear classifier model
filepath: path and filename (without file extension)
export_as: file type to export classification_report
output:
None
'''
# random forest
y_train_preds_rf = rf_model.best_estimator_.predict(X_train)
y_test_preds_rf = rf_model.best_estimator_.predict(X_test)
# linear classifier
y_train_preds_lr = lr_model.predict(X_train)
y_test_preds_lr = lr_model.predict(X_test)
preds = {
'rf': {
'test': y_test_preds_rf,
'train': y_train_preds_rf
},
'lr': {
'test': y_test_preds_lr,
'train': y_train_preds_lr
},
}
# create classification reports
for model, pred in preds.items():
clf_report_test = classification_report(
y_test,
pred['test'],
output_dict=True)
clf_report_train = classification_report(
y_train,
pred['train'],
output_dict=True)
# create subplots
f, axes = plt.subplots(2, 1, figsize=(12, 6*2))
f.suptitle(f"Classification report: {model.upper()}", fontsize=18)
axes[0].set_title("Test")
sns.heatmap(
pd.DataFrame(clf_report_test).iloc[:-1, :].T,
annot=True, ax=axes[0])
axes[1].set_title("Train")
sns.heatmap(
pd.DataFrame(clf_report_train).iloc[:-1, :].T,
annot=True,
ax=axes[1])
plt.savefig(
os.path.join(
filepath,
f'{model}_classification_report.{export_as}'))
def feature_importance_plot(model, X_train, filepath='./images', export_as='pdf'):
'''
creates and stores the feature importances in pth
input:
model: model object containing feature_importances_
X_train: pandas dataframe of X training features ()
filepath: path to store the figure
output:
None
'''
# Calculate feature importances
importances = model.best_estimator_.feature_importances_
# Sort feature importances in descending order
indices = np.argsort(importances)[::-1]
# Rearrange feature names so they match the sorted feature importances
names = [X_train.columns[i] for i in indices]
# Create plot
plt.figure(figsize=(20, 5))
# Create plot title
plt.title("Feature Importance")
plt.ylabel('Importance')
# Add bars
plt.bar(range(X_train.shape[1]), importances[indices])
# Add feature names as x-axis labels
plt.xticks(range(X_train.shape[1]), names, rotation=90)
plt.savefig(os.path.join(filepath, f'feature_importance.{export_as}'))
| {"hexsha": "26fb73760c2950f14e64ed05d6fd880409df16c4", "size": 11333, "ext": "py", "lang": "Python", "max_stars_repo_path": "1 clean code/Project Predict customer churn with clean code/churn_library.py", "max_stars_repo_name": "philippschmalen/ml-devops-engineer", "max_stars_repo_head_hexsha": "98c4c94b807215e2a909905235bde4a8d022477f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "1 clean code/Project Predict customer churn with clean code/churn_library.py", "max_issues_repo_name": "philippschmalen/ml-devops-engineer", "max_issues_repo_head_hexsha": "98c4c94b807215e2a909905235bde4a8d022477f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "1 clean code/Project Predict customer churn with clean code/churn_library.py", "max_forks_repo_name": "philippschmalen/ml-devops-engineer", "max_forks_repo_head_hexsha": "98c4c94b807215e2a909905235bde4a8d022477f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7291666667, "max_line_length": 114, "alphanum_fraction": 0.6317832877, "include": true, "reason": "import numpy", "num_tokens": 2501} |
from __future__ import annotations
__all__ = [
'coroutine', 'lock_seed', 'summary', 'trace', 'trace_module', 'whereami'
]
import functools
import gc
import inspect
import os
import random
import threading
import types
from collections import Counter
from collections.abc import Callable, Generator, Hashable, Iterator
from contextlib import suppress
from itertools import islice
from types import FrameType
from typing import TypeVar, cast
import numpy as np
import wrapt
from ._import_hook import register_post_import_hook
_F = TypeVar('_F', bound=Callable[..., Generator])
def _get_module(frame: FrameType) -> str:
if (module := inspect.getmodule(frame)) and module.__spec__:
return module.__spec__.name
return '__main__'
def _get_function(frame: FrameType) -> str:
function = frame.f_code.co_name
function = next(
(f.__qualname__
for f in gc.get_referrers(frame.f_code) if inspect.isfunction(f)),
function)
return '' if function == '<module>' else function
def _stack(frame: FrameType | None) -> Iterator[str]:
while frame:
yield f'{_get_module(frame)}:{_get_function(frame)}:{frame.f_lineno}'
if frame.f_code.co_name == '<module>': # Stop on module-level scope
return
frame = frame.f_back
def stack(skip: int = 0, limit: int | None = None) -> Iterator[str]:
"""Returns iterator of FrameInfos, stopping on module-level scope"""
frame = inspect.currentframe()
calls = _stack(frame)
calls = islice(calls, skip + 1, None) # Skip 'skip' outerless frames
if not limit:
return calls
return islice(calls, limit) # Keep at most `limit` outer frames
def whereami(skip: int = 0, limit: int | None = None) -> str:
calls = stack(skip + 1, limit)
return ' -> '.join(reversed([*calls]))
@wrapt.decorator
def trace(fn, _, args, kwargs):
print(
f'<({whereami(3)})> : {fn.__module__ or ""}.{fn.__qualname__}',
flush=True)
return fn(*args, **kwargs)
def _set_trace(obj, seen=None, prefix=None, module=None):
# TODO: rewrite using unittest.mock
if isinstance(obj, types.ModuleType):
if seen is None:
seen = set()
prefix = obj.__name__
if not obj.__name__.startswith(prefix) or obj.__name__ in seen:
return
seen.add(obj.__name__)
for name in dir(obj):
_set_trace(
getattr(obj, name), module=obj, seen=seen, prefix=prefix)
if not callable(obj):
return
if not hasattr(obj, '__dict__'):
setattr(module, obj.__qualname__, trace(obj))
print(f'wraps "{module.__name__}:{obj.__qualname__}"')
return
for name in obj.__dict__:
with suppress(AttributeError, TypeError):
member = getattr(obj, name)
if not callable(member):
continue
decorated = trace(member)
for m in (decorated, member, obj):
with suppress(AttributeError):
decorated.__module__ = m.__module__
break
else:
decorated.__module__ = getattr(module, '__name__', '')
setattr(obj, name, decorated)
print(f'wraps "{module.__name__}:{obj.__qualname__}.{name}"')
def trace_module(name):
"""Enables call logging for each callable inside module name"""
register_post_import_hook(_set_trace, name)
# ---------------------------------------------------------------------------
@wrapt.decorator
def threadsafe_coroutine(fn, _, args, kwargs):
coro = fn(*args, **kwargs)
coro.send(None)
lock = threading.RLock()
class Synchronized(wrapt.ObjectProxy):
def send(self, item):
with lock:
return self.__wrapped__.send(item)
def __next__(self):
return self.send(None)
return Synchronized(coro)
@threadsafe_coroutine
def summary() -> Generator[None, Hashable, None]:
state: Counter[Hashable] = Counter()
while True:
key = yield
if key is None:
state.clear()
continue
state[key] += 1
print(dict(state), flush=True, end='\r')
def coroutine(fn: _F) -> _F:
def wrapper(*args, **kwargs):
coro = fn(*args, **kwargs)
coro.send(None)
return coro
return cast(_F, functools.update_wrapper(wrapper, fn))
# ---------------------------------------------------------------------------
def lock_seed(seed: int) -> None:
"""Set seed for all modules: random/numpy/torch"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
def _torch_seed(torch):
import torch
import torch.backends.cudnn
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
register_post_import_hook(_torch_seed, 'torch')
| {"hexsha": "8a66e6fa3f6769fcccf3f0692ff22df82ec0c4e8", "size": 4949, "ext": "py", "lang": "Python", "max_stars_repo_path": "glow/core/debug.py", "max_stars_repo_name": "arquolo/glow", "max_stars_repo_head_hexsha": "c4c63e36551cd1eec2e34129dbc0f06c788099de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "glow/core/debug.py", "max_issues_repo_name": "arquolo/glow", "max_issues_repo_head_hexsha": "c4c63e36551cd1eec2e34129dbc0f06c788099de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "glow/core/debug.py", "max_forks_repo_name": "arquolo/glow", "max_forks_repo_head_hexsha": "c4c63e36551cd1eec2e34129dbc0f06c788099de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9604519774, "max_line_length": 77, "alphanum_fraction": 0.6106284098, "include": true, "reason": "import numpy", "num_tokens": 1156} |
# ---
# title: 1311. Get Watched Videos by Your Friends
# id: problem1311
# author: Tian Jun
# date: 2020-10-31
# difficulty: Medium
# categories: Hash Table, String, Breadth-first Search
# link: <https://leetcode.com/problems/get-watched-videos-by-your-friends/description/>
# hidden: true
# ---
#
# There are `n` people, each person has a unique _id_ between `0` and `n-1`.
# Given the arrays `watchedVideos` and `friends`, where `watchedVideos[i]` and
# `friends[i]` contain the list of watched videos and the list of friends
# respectively for the person with `id = i`.
#
# Level **1** of videos are all watched videos by your friends, level **2** of
# videos are all watched videos by the friends of your friends and so on. In
# general, the level `k` of videos are all watched videos by people with the
# shortest path **exactly** equal to `k` with you. Given your `id` and the
# `level` of videos, return the list of videos ordered by their frequencies
# (increasing). For videos with the same frequency order them alphabetically
# from least to greatest.
#
#
#
# **Example 1:**
#
# **![](https://assets.leetcode.com/uploads/2020/01/02/leetcode_friends_1.png)**
#
#
#
# Input: watchedVideos = [["A","B"],["C"],["B","C"],["D"]], friends = [[1,2],[0,3],[0,3],[1,2]], id = 0, level = 1
# Output: ["B","C"]
# Explanation:
# You have id = 0 (green color in the figure) and your friends are (yellow color in the figure):
# Person with id = 1 -> watchedVideos = ["C"]
# Person with id = 2 -> watchedVideos = ["B","C"]
# The frequencies of watchedVideos by your friends are:
# B -> 1
# C -> 2
#
#
# **Example 2:**
#
# **![](https://assets.leetcode.com/uploads/2020/01/02/leetcode_friends_2.png)**
#
#
#
# Input: watchedVideos = [["A","B"],["C"],["B","C"],["D"]], friends = [[1,2],[0,3],[0,3],[1,2]], id = 0, level = 2
# Output: ["D"]
# Explanation:
# You have id = 0 (green color in the figure) and the only friend of your friends is the person with id = 3 (yellow color in the figure).
#
#
#
#
# **Constraints:**
#
# * `n == watchedVideos.length == friends.length`
# * `2 <= n <= 100`
# * `1 <= watchedVideos[i].length <= 100`
# * `1 <= watchedVideos[i][j].length <= 8`
# * `0 <= friends[i].length < n`
# * `0 <= friends[i][j] < n`
# * `0 <= id < n`
# * `1 <= level < n`
# * if `friends[i]` contains `j`, then `friends[j]` contains `i`
#
#
## @lc code=start
using LeetCode
## add your code here:
## @lc code=end
| {"hexsha": "0fc282de3d27dc27d90b46199aa6e8303c16b825", "size": 2536, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/unresolved/1311.get-watched-videos-by-your-friends.jl", "max_stars_repo_name": "jmmshn/LeetCode.jl", "max_stars_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 74, "max_stars_repo_stars_event_min_datetime": "2020-10-27T18:58:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T13:27:49.000Z", "max_issues_repo_path": "src/unresolved/1311.get-watched-videos-by-your-friends.jl", "max_issues_repo_name": "jmmshn/LeetCode.jl", "max_issues_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 57, "max_issues_repo_issues_event_min_datetime": "2020-11-01T07:26:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-19T11:57:53.000Z", "max_forks_repo_path": "src/unresolved/1311.get-watched-videos-by-your-friends.jl", "max_forks_repo_name": "jmmshn/LeetCode.jl", "max_forks_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2020-10-30T11:52:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-13T10:35:11.000Z", "avg_line_length": 33.3684210526, "max_line_length": 141, "alphanum_fraction": 0.6048895899, "num_tokens": 799} |
"""
module FEIO
All file input/output functionality. Includes 3DG interoperability
"""
module FEIO
using FELinearAlgebra
export dgtraits, read_array, write_array, read_solution, write_solution
"""
dgtraits(a::Array{T,N}) where {T,N}
Compatibility of types with 3DG
"""
dgtraits(a::Array{T,N}) where {T,N} = dgtraits(eltype(a))
function dgtraits(T::Type)
println(T)
if T <: Bool
return 2
elseif T <: Integer
return 0
elseif T <: AbstractFloat # 3DG technically assumes C++ double
return 1
else
error("Unknown DG Type")
end
end
function dgtraits(val::Integer)
if val == 0
return Int64
elseif val == 1
return Float64
elseif val == 2
return Bool
else
error("Unknown DG Type")
end
end
"""
read_array(filename::AbstractString)
Read file created by `write_array()` and return the array represented by data
"""
function read_array(filename::AbstractString)
file = open(filename, "r")
array = read_array(file)
close(file)
return array
end
function read_array(io::IOStream)
dim = read(io, Int64)
size = Array{Int64,1}(undef, dim)
read!(io, size)
type = read(io, Int64)
array = Array{dgtraits(type), dim}(undef, size...)
read!(io, array)
return array
end
"""
write_array(filename::AbstractString, a)
Write the array to a specified filename in 3DG compatible format
"""
function write_array(filename::AbstractString, a)
file = open(filename, "w")
write_array(file, a)
close(file)
end
function write_array(io::IOStream, a)
if typeof(a) == SolutionVector
a = a.data
end
sz = size(a)
write(io, length(sz))
write(io, sz...)
write(io, dgtraits(a))
write(io, a)
end
"""
read_solution(filename, mesh)
Read file created by `write_solution()` and return SolutionVector defined
on a given mesh 3DG compatible format
"""
function read_solution(filename::AbstractString, mesh)
# TODO: Will need to use mesh for parallel solutions
data = read_array(filename)
return SolutionVector(data)
end
"""
write_solution(filename, u, mesh)
Write the solution defined on a given mesh to a specified filename in
3DG compatible format
"""
function write_solution(filename::AbstractString, u, mesh)
# TODO: Will need to use mesh for parallel solutions
write_array(filename, u.data)
end
end # module IO
| {"hexsha": "ff4516895ff64451742b703bb75cef6f498bd706", "size": 2415, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/FEIO.jl", "max_stars_repo_name": "NoseKnowsAll/DGToolkit", "max_stars_repo_head_hexsha": "e029ed96f337b187876a52a3f63b7636336374c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-22T03:23:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-22T03:23:48.000Z", "max_issues_repo_path": "src/FEIO.jl", "max_issues_repo_name": "NoseKnowsAll/DGToolkit", "max_issues_repo_head_hexsha": "e029ed96f337b187876a52a3f63b7636336374c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/FEIO.jl", "max_forks_repo_name": "NoseKnowsAll/DGToolkit", "max_forks_repo_head_hexsha": "e029ed96f337b187876a52a3f63b7636336374c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-22T03:23:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-22T03:23:50.000Z", "avg_line_length": 23.4466019417, "max_line_length": 77, "alphanum_fraction": 0.6753623188, "num_tokens": 613} |
import sympy
class Variables:
# lists of sympy variables
x = []
y = []
z = []
def __init__(self, nr_x, nr_y, nr_z):
self.load_variables("x", nr_x)
self.load_variables("y", nr_y)
self.load_variables("z", nr_z)
"""
This generates <code> to be used with exec(<code>)
in order to define global indeterminates
--- number a prefix
"""
@staticmethod
def com_variables(pref, sym_nr):
sym_name = [pref + str(i) + ", " for i in range(1, sym_nr)]
sym_name.append(pref + str(sym_nr))
sym_name = "".join(sym_name)
com1 = "(" + sym_name + ")=sympy.symbols(\"" + sym_name + "\")"
com2 = "self." + pref + "=[" + sym_name + "]"
return [com1, com2]
"""
This generates <code> to be used with exec(<code>)
in order to define global indeterminates
--- uses a list of names
"""
@staticmethod
def com_variables_l(pref, names):
sym_name = [names[i] + ", " for i in range(len(names) - 1)]
sym_name.append(names[len(names) - 1])
sym_name = "".join(sym_name)
com1 = "(" + sym_name + ")=sympy.symbols(\"" + sym_name + "\")"
com2 = "self." + pref + "=[" + sym_name + "]"
return [com1, com2]
"""
Define global indeterminates
"""
def load_variables(self, pref, sym_nr):
[com1, com2] = self.com_variables(pref, sym_nr)
exec(com1)
exec(com2)
"""
Give Latex form of variable
"""
def latex(self, v):
xx = self.x
yy = self.y
zz = self.z
result = str(v)
for i in range(len(xx)):
result = result.replace(str(xx[i]), "x_{" + str(i + 1) + "}")
for i in range(len(yy)):
result = result.replace(str(yy[i]), "y_{" + str(i + 1) + "}")
for i in range(len(zz)):
result = result.replace(str(zz[i]), "z_{" + str(i + 1) + "}")
for i in range(3):
for s in ["a", "b", "c", "d"]:
result = result.replace(s + str(i), s + "_{" + str(i + 1) + "}")
result = result.replace("**", "^")
result = result.replace("*", "")
return result
| {"hexsha": "6ec60bbbf338a392949c6f9cd958701c59ca1512", "size": 2196, "ext": "py", "lang": "Python", "max_stars_repo_path": "variables.py", "max_stars_repo_name": "iuliansimion/Chevalley", "max_stars_repo_head_hexsha": "9122b1002db99c352533c99f7e143f5be8f8c34b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "variables.py", "max_issues_repo_name": "iuliansimion/Chevalley", "max_issues_repo_head_hexsha": "9122b1002db99c352533c99f7e143f5be8f8c34b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "variables.py", "max_forks_repo_name": "iuliansimion/Chevalley", "max_forks_repo_head_hexsha": "9122b1002db99c352533c99f7e143f5be8f8c34b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1111111111, "max_line_length": 80, "alphanum_fraction": 0.5063752277, "include": true, "reason": "import sympy", "num_tokens": 614} |
import numpy as np
def noise_model():
#######################################################
# Noise model for qBucket
## 0) Additive noise,
## 1) Zero-mean,
## 2) Standard deviation is a fixed at .02
## 3) min: Truncated below at min,
## 4) size: number of samples,
## 5) Closure takes a single value argument.
def close_trunc_gauss(min: float=0.0):
def the_closure(x, seed: int=None):
if seed is not None:
np.random.seed(seed)
return np.maximum(x+np.random.normal(0.0, 10.0, 1), min)
return the_closure
return np.vectorize(close_trunc_gauss(min=0.0))
| {"hexsha": "8dbe8c17bb1f224179d4aaf7cb7c724e431060d9", "size": 653, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/wrf_hydro/python/perturb/noise_qBucket_additive.py", "max_stars_repo_name": "hkershaw-brown/feature-preprocess", "max_stars_repo_head_hexsha": "fe2bd77b38c63fa0566c83ebc4d2fac1623aef66", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 65, "max_stars_repo_stars_event_min_datetime": "2019-10-16T13:31:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T11:52:58.000Z", "max_issues_repo_path": "models/wrf_hydro/python/perturb/noise_qBucket_additive.py", "max_issues_repo_name": "hkershaw-brown/feature-preprocess", "max_issues_repo_head_hexsha": "fe2bd77b38c63fa0566c83ebc4d2fac1623aef66", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 283, "max_issues_repo_issues_event_min_datetime": "2019-09-23T15:48:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:44:41.000Z", "max_forks_repo_path": "models/wrf_hydro/python/perturb/noise_qBucket_additive.py", "max_forks_repo_name": "hkershaw-brown/feature-preprocess", "max_forks_repo_head_hexsha": "fe2bd77b38c63fa0566c83ebc4d2fac1623aef66", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 67, "max_forks_repo_forks_event_min_datetime": "2019-09-19T22:13:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T15:58:26.000Z", "avg_line_length": 31.0952380952, "max_line_length": 68, "alphanum_fraction": 0.5513016845, "include": true, "reason": "import numpy", "num_tokens": 171} |
MODULE linear_algebra
CONTAINS
SUBROUTINE solve_leqs(N,A,sol,triv)
! solve the linear equations Ax=0
! return a minimal solution,i.e.,in x the number of nonzero element is samllest but not 0
IMPLICIT NONE
INTEGER,INTENT(IN)::N
REAL(KIND(1d0)),DIMENSION(N,N),INTENT(IN)::A
REAL(KIND(1d0)),DIMENSION(N,N)::B
INTEGER,DIMENSION(N)::iexchange
INTEGER::i,j,inow,k,iswap,izero
REAL(KIND(1d0)),PARAMETER::EPS=1d-10
REAL(KIND(1d0)),DIMENSION(N),INTENT(OUT)::sol
REAL(KIND(1d0)),DIMENSION(N)::row,coloum
REAL(KIND(1d0))::maxv,temp
LOGICAL,INTENT(OUT)::triv
LOGICAL::allzero
! make matrix A to be upper triangular
B(1:N,1:N)=A(1:N,1:N)
maxv=MAXVAL(ABS(B(1:N,1:N)))
IF(maxv.LT.EPS)THEN
! a trivial solution
sol(1)=1d0
sol(2:N)=0d0
triv=.FALSE.
RETURN
ENDIF
DO i=1,N
iexchange(i)=i
ENDDO
inow=1
izero=0
DO i=1,N
allzero=.TRUE.
DO j=inow,N
IF(ABS(B(j,inow))/maxv.GT.EPS)THEN
allzero=.FALSE.
IF(j.NE.inow)THEN
row(1:N)=B(inow,1:N)
B(inow,1:N)=B(j,1:N)
B(j,1:N)=row(1:N)
ENDIF
EXIT
ENDIF
ENDDO
IF(.NOT.allzero)THEN
DO j=inow+1,N
IF(inow+1.LE.N)THEN
B(j,inow+1:N)=B(j,inow+1:N)-B(inow,inow+1:N)*B(j,inow)/B(inow,inow)
ENDIF
B(j,inow)=0d0
ENDDO
inow=inow+1
ELSE
! the coloum is zero, put it to the end
coloum(1:N)=B(1:N,inow)
izero=izero+1
iswap=iexchange(N)
iexchange(N)=iexchange(inow)
DO j=inow+1,N
B(1:N,j-1)=B(1:N,j)
IF(j.NE.N)THEN
iexchange(j-1)=iexchange(j)
ELSE
iexchange(j-1)=iswap
ENDIF
ENDDO
B(1:N,N)=coloum(1:N)
ENDIF
! PRINT *,i
! DO j=1,N
! WRITE(*,*)B(j,1:N)
! ENDDO
ENDDO
! DO i=1,N
! WRITE(*,*)B(i,1:N)
! ENDDO
! WRITE(*,*)"================"
IF(izero.GT.0)THEN
sol(iexchange(N))=1d0
DO inow=1,N-1
i=N-inow
IF(ABS(B(i,i))/maxv.LT.EPS)THEN
sol(iexchange(i))=0d0
ELSE
temp=0d0
DO j=i+1,N
temp=temp-B(i,j)*sol(iexchange(j))
ENDDO
sol(iexchange(i))=temp/B(i,i)
ENDIF
ENDDO
triv=.FALSE.
ELSE
sol(1:N)=0d0
triv=.TRUE.
ENDIF
RETURN
END SUBROUTINE SOLVE_LEQS
SUBROUTINE solve_cleqs(N,A,sol,triv)
! solve the complex linear equations Ax=0
! return a minimal solution,i.e.,in x the number of nonzero element is samllest but not 0
IMPLICIT NONE
INTEGER,INTENT(IN)::N
COMPLEX(KIND(1d0)),DIMENSION(N,N),INTENT(IN)::A
COMPLEX(KIND(1d0)),DIMENSION(N,N)::B
INTEGER,DIMENSION(N)::iexchange
INTEGER::i,j,inow,k,iswap,izero
REAL(KIND(1d0)),PARAMETER::EPS=1d-10
COMPLEX(KIND(1d0)),DIMENSION(N),INTENT(OUT)::sol
COMPLEX(KIND(1d0)),DIMENSION(N)::row,coloum
REAL(KIND(1d0))::maxv
COMPLEX(KIND(1d0))::temp
LOGICAL,INTENT(OUT)::triv
LOGICAL::allzero
! make matrix A to be upper triangular
B(1:N,1:N)=A(1:N,1:N)
maxv=MAXVAL(ABS(B(1:N,1:N)))
IF(maxv.LT.EPS)THEN
! a trivial solution
sol(1)=DCMPLX(1d0)
sol(2:N)=DCMPLX(0d0)
triv=.FALSE.
RETURN
ENDIF
DO i=1,N
iexchange(i)=i
ENDDO
inow=1
izero=0
DO i=1,N
allzero=.TRUE.
DO j=inow,N
IF(ABS(B(j,inow))/maxv.GT.EPS)THEN
allzero=.FALSE.
IF(j.NE.inow)THEN
row(1:N)=B(inow,1:N)
B(inow,1:N)=B(j,1:N)
B(j,1:N)=row(1:N)
ENDIF
EXIT
ENDIF
ENDDO
IF(.NOT.allzero)THEN
DO j=inow+1,N
IF(inow+1.LE.N)THEN
B(j,inow+1:N)=B(j,inow+1:N)-B(inow,inow+1:N)*B(j,inow)/B(inow,inow)
ENDIF
B(j,inow)=0d0
ENDDO
inow=inow+1
ELSE
! the coloum is zero, put it to the end
coloum(1:N)=B(1:N,inow)
izero=izero+1
iswap=iexchange(N)
iexchange(N)=iexchange(inow)
DO j=inow+1,N
B(1:N,j-1)=B(1:N,j)
IF(j.NE.N)THEN
iexchange(j-1)=iexchange(j)
ELSE
iexchange(j-1)=iswap
ENDIF
ENDDO
B(1:N,N)=coloum(1:N)
ENDIF
ENDDO
IF(izero.GT.0)THEN
sol(iexchange(N))=DCMPLX(1d0)
DO inow=1,N-1
i=N-inow
IF(ABS(B(i,i))/maxv.LT.EPS)THEN
sol(iexchange(i))=DCMPLX(0d0)
ELSE
temp=DCMPLX(0d0)
DO j=i+1,N
temp=temp-B(i,j)*sol(iexchange(j))
ENDDO
sol(iexchange(i))=temp/B(i,i)
ENDIF
ENDDO
triv=.FALSE.
ELSE
sol(1:N)=DCMPLX(0d0)
triv=.TRUE.
ENDIF
RETURN
END SUBROUTINE SOLVE_CLEQS
END MODULE linear_algebra
| {"hexsha": "6f7082914e14d9b00dc69f90ddb9ef538d1ee779", "size": 5180, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "vendor/IREGI/src/linear_algebra.f90", "max_stars_repo_name": "valassi/mg5amc_test", "max_stars_repo_head_hexsha": "2e04f23353051f64e1604b23105fe3faabd32869", "max_stars_repo_licenses": ["NCSA"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-10-23T14:37:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-22T20:59:02.000Z", "max_issues_repo_path": "vendor/IREGI/src/linear_algebra.f90", "max_issues_repo_name": "valassi/mg5amc_test", "max_issues_repo_head_hexsha": "2e04f23353051f64e1604b23105fe3faabd32869", "max_issues_repo_licenses": ["NCSA"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2018-10-08T15:49:32.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-15T13:33:36.000Z", "max_forks_repo_path": "vendor/IREGI/src/linear_algebra.f90", "max_forks_repo_name": "valassi/mg5amc_test", "max_forks_repo_head_hexsha": "2e04f23353051f64e1604b23105fe3faabd32869", "max_forks_repo_licenses": ["NCSA"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-02-18T11:42:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-11T20:46:08.000Z", "avg_line_length": 26.8393782383, "max_line_length": 93, "alphanum_fraction": 0.4986486486, "num_tokens": 1831} |
Module Base.
Inductive t :=
| A
| B.
Definition f0 x :=
match x with
| A => A
| B => B
end.
Print f0.
Definition f0bis x y :=
match x, y with
| A, A => A
| _, _ => B
end.
Print f0bis.
End Base.
Definition f1 n :=
match n with
| O => Base.A
| S _ => Base.B
end.
Print f1.
Definition f2 n :=
match n with
| O => Base.A
| S (S n) => Base.B
| _ => Base.A
end.
Print f2.
Definition pred n :=
match n with
| O => O
| S O => O
| S (S n) => S n
end.
Print pred.
Fixpoint add n m :=
match n, m with
| O, _ => m
| _, O => n
| S p, S q => S (S (add p q))
end.
Print add.
Theorem add_0_l n : add O n = n.
Proof.
reflexivity.
Qed.
Inductive vector (A : Type) : nat -> Type :=
| Nil : vector A O
| Cons n (_ : A) (_ : vector A n) : vector A (S n).
Definition rectS {A} (P:forall n, vector A (S n) -> Type)
(bas: forall a: A, P _ (Cons _ _ a (Nil _)))
(rect: forall a {n} (v: vector A (S n)), P _ v -> P _ (Cons _ _ a v)) :=
fix rectS_fix {n} (v: vector A (S n)) : P _ v :=
match v with
|Cons _ 0 a v =>
match v with
|Nil _ => bas a
|_ => fun devil => False_ind (@IDProp) devil (* subterm !!! *)
end
|Cons _ (S nn') a v => rect a v (rectS_fix v)
|_ => fun devil => False_ind (@IDProp) devil (* subterm !!! *)
end.
Definition head' (A : Type) (n : nat) (v : vector A (S n)) :=
match v in vector _ n0 return match n0 return Type with
| S m => A
| O => IDProp
end with
| Cons _ _ hd _tl => hd
| Nil _ => idProp
end.
Print head'.
Definition head (A : Type) (n : nat) (v : vector A (S n)) :=
match v with
| Cons _ _ hd _tl => hd
end.
Print head.
Definition tail' (A : Type) (n : nat) (v : vector A (S n)) : vector A n :=
match v in vector _ n0 return match n0 return Type with
| S m => vector A m
| O => IDProp
end with
| Cons _ _ _hd tl => tl
| Nil _ => idProp
end.
Print tail'.
Definition tail (A : Type) (n : nat) (v : vector A (S n)) : vector A n (*bug!*) :=
match v with
| Cons _ _ _hd tl => tl
end.
Print tail.
Definition tail2' (A : Type) (n : nat) (v : vector A (S (S n))) :=
match
v as v0 in (vector _ H)
return
(match H as H0 return Type with
| O => IDProp
| S x =>
match x as H1 return Type with
| O => IDProp
| S x0 => vector A (S x0)
end
end)
with
| Nil _ => idProp
| Cons _ x _hd tl =>
match x as H, tl return match H return Type with
| O => IDProp
| S n => vector A (S n)
end with
| O, _ => idProp
| S x0, tl => tl
end
end.
Print tail2'.
Definition tail2 (A : Type) (n : nat) (v : vector A (S (S n))) :=
match v with
| Cons _ _ _hd tl => tl
end.
Print tail2.
Definition snd (A : Type) (n : nat) (v : vector A (S (S n))) :=
match v with
| Cons _ _ _ (Cons _ _ hd _) => hd
end.
Print snd.
Definition third (A : Type) (n : nat) (v : vector A (S (S (S n)))) :=
match v with
| Cons _ _ _ (Cons _ _ _ (Cons _ _ hd _)) => hd
end.
Print third.
Definition forth (A : Type) (n : nat) (v : vector A (S (S (S (S n))))) :=
match v with
| Cons _ _ _ (Cons _ _ _ (Cons _ _ _ (Cons _ _ hd _))) => hd
end.
Print forth.
Definition fifth (A : Type) (n : nat) (v : vector A (S (S (S (S (S n)))))) :=
match v with
| Cons _ _ _ (Cons _ _ _ (Cons _ _ _ (Cons _ _ _ (Cons _ _ hd _)))) => hd
end.
Print fifth.
Fixpoint map A B (n : nat) (f : A -> B) (v : vector A n) : vector B n :=
match v with
| Nil _ => Nil B
| Cons _ _ hd tl => Cons _ _ (f hd) (map _ _ _ f tl)
end.
Print map.
(*
Fixpoint mapn A B (n : nat) (f : A -> B) (v : vector A n) : vector B n :=
match n, v with
| O, Nil _ => Nil B
| S m, Cons _ _ hd tl => Cons _ _ (f hd) (mapn _ _ m f tl)
end.
Print mapn.
*)
Fixpoint map2' A0 A1 B (n : nat) (f : A0 -> A1 -> B) (v0 : vector A0 n) (v1 : vector A1 n) :=
match v0 in vector _ n0 return
vector A1 n0 -> vector B n0
with
| Nil _ => fun v1 : vector A1 O =>
match v1 in vector _ n0 return
match n0 return Type with
| O => vector B O
| S _ => IDProp
end with
| Nil _ => Nil B
| Cons _ _ _ _ => idProp
end
| Cons _ n0 hd0 tl0 => fun v1 : vector A1 (S n0) =>
match v1 in vector _ n1 return
match n1 return Type with
| O => IDProp
| S n2 => vector A0 n2 -> vector B (S n2)
end with
| Nil _ => idProp
| Cons _ n1 hd1 tl1 => fun tl0 : vector A0 n1 =>
Cons _ n1 (*bug!*) (f hd0 hd1) (map2' A0 A1 B _ f tl0 tl1)
end tl0
end v1.
Print map2'.
Fixpoint map2 A0 A1 B (n : nat) (f : A0 -> A1 -> B) (v0 : vector A0 n) (v1 : vector A1 n) : vector B n :=
match v0, v1 with
| Nil _, Nil _ => Nil _
| Cons _ _ hd0 tl0, Cons _ _ hd1 tl1 =>
Cons _ _ (f hd0 hd1) (map2 _ _ _ _ f tl0 tl1)
end.
Print map2.
Module M0.
Inductive t : nat -> Type :=
| A : t (S O).
Definition test := fun n (v : t n) =>
match n, v with
| S n, A => tt
end.
End M0.
Module M1.
Inductive t : nat -> Type := C : t 0 | D n : t n -> t n.
Definition test (v : t 0) : t 0 :=
match v with
| C => C
| D n x => D 0 x
end.
Print test.
End M1.
(*
Fixpoint map3 A0 A1 A2 B (n : nat) (f : A0 -> A1 -> A2 -> B) (v0 : vector A0 n) (v1 : vector A1 n) (v2 : vector A2 n) : vector B n :=
match v0, v1, v2 with
| Nil _, Nil _, Nil _ => Nil _
| Cons _ _ hd0 tl0, Cons _ _ hd1 tl1, Cons _ _ hd2 tl2 =>
Cons _ _ (f hd0 hd1 hd2) (map3 _ _ _ _ _ f tl0 tl1 tl2)
end.
Print map3.
*)
(* from Logic.v *)
Inductive ex (A:Type) (P:A -> Prop) : Prop :=
ex_intro : forall x:A, P x -> ex A P.
Section Projections.
Variables (A:Prop) (P:A->Prop).
Definition ex_proj1 (x:ex A P) : A :=
match x with ex_intro _ _ a _ => a end.
Print ex_proj1.
Definition ex_proj2 (x:ex A P) : P (ex_proj1 x) :=
match x with ex_intro _ _ _ b => b end.
End Projections.
Inductive bool : Set :=
| true : bool
| false : bool.
Definition andb (b1 b2:bool) : bool := if b1 then b2 else false.
Require Import Lists.List.
Import ListNotations.
Fixpoint last (A: Type) (l:list A) (d:A) : A :=
match l with
| [] => d
| [a] => a
| a :: l => last A l d
end.
Section Equivalences.
Variable U:Type.
Definition UIP_refl_on_ (x : U) :=
forall (p : x = x), p = eq_refl x.
End Equivalences.
Theorem UIP_shift_on (X : Type) (x : X) :
UIP_refl_on_ X x -> forall y : x = x, UIP_refl_on_ (x = x) y.
Proof.
intros UIP_refl y.
rewrite (UIP_refl y).
intros z.
assert (UIP:forall y' y'' : x = x, y' = y'').
{ intros. apply eq_trans_r with (eq_refl x); apply UIP_refl. }
transitivity (eq_trans (eq_trans (UIP (eq_refl x) (eq_refl x)) z)
(eq_sym (UIP (eq_refl x) (eq_refl x)))).
- destruct z. destruct (UIP _ _). reflexivity.
- change
(match eq_refl x as y' in _ = x' return y' = y' -> Prop with
| eq_refl => fun z => z = (eq_refl (eq_refl x))
end (eq_trans (eq_trans (UIP (eq_refl x) (eq_refl x)) z)
(eq_sym (UIP (eq_refl x) (eq_refl x))))).
destruct z. destruct (UIP _ _). reflexivity.
Qed.
Lemma UIP_refl_bool (b:bool) (x : b = b) : x = eq_refl.
Proof.
destruct b.
- change (match true as b return true=b -> Prop with
| true => fun x => x = eq_refl
| _ => fun _ => True
end x).
destruct x; reflexivity.
- change (match false as b return false=b -> Prop with
| false => fun x => x = eq_refl
| _ => fun _ => True
end x).
destruct x; reflexivity.
Defined.
Module Fin.
Inductive t : nat -> Set :=
|F1 : forall {n}, t (S n)
|FS : forall {n}, t n -> t (S n).
Definition case0 P (p: t O): P p :=
match p with | F1 | FS _ => fun devil => False_rect (@IDProp) devil (* subterm !!! *) end.
Definition caseS' {n : nat} (p : t (S n)) : forall (P : t (S n) -> Type)
(P1 : P F1) (PS : forall (p : t n), P (FS p)), P p :=
match p with
| @F1 k => fun P P1 PS => P1
| FS pp => fun P P1 PS => PS pp
end.
Fixpoint weak {m}{n} p (f : t m -> t n) :
t (p + m) -> t (p + n) :=
match p as p' return t (p' + m) -> t (p' + n) with
|0 => f
|S p' => fun x => match x with
|@F1 n' => fun eq : n' = p' + m => F1
|@FS n' y => fun eq : n' = p' + m => FS (weak p' f (eq_rect _ t y _ eq))
end (eq_refl _)
end.
End Fin.
| {"author": "thierry-martinez", "repo": "small_inversion", "sha": "00714ff638926422a9aa26d10c75fdd1b5625021", "save_path": "github-repos/coq/thierry-martinez-small_inversion", "path": "github-repos/coq/thierry-martinez-small_inversion/small_inversion-00714ff638926422a9aa26d10c75fdd1b5625021/test.v"} |
#using Pkg
#pkg"activate .."
#push!(LOAD_PATH,"../src/")
using MPT, Documenter
DocMeta.setdocmeta!(MPT, :DocTestSetup, :(using MPT); recursive=true)
makedocs(;
modules=[MPT],
authors="Kiar Fatah",
repo="https://github.com/Xiar-fatah/MPT.jl/blob/{commit}{path}#{line}",
sitename="MPT.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://Xiar-fatah.github.io/MPT.jl",
assets=String[],
),
pages=[
#"Home" => "index.md",
"index.md",
"portfolio.md",
"backtesting.md"
]
)
deploydocs(;
repo="github.com/Xiar-fatah/MPT.jl",
versions = ["stable" => "master", "dev" => "master"]
#devbranch="master",
)
| {"hexsha": "1ea6c5604a67048dd9d47c574fd84d498055b104", "size": 754, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "Xiar-fatah/MPT.jl", "max_stars_repo_head_hexsha": "aa3225e6e446ce227a3dcbabe54caffcddc01217", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "Xiar-fatah/MPT.jl", "max_issues_repo_head_hexsha": "aa3225e6e446ce227a3dcbabe54caffcddc01217", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "Xiar-fatah/MPT.jl", "max_forks_repo_head_hexsha": "aa3225e6e446ce227a3dcbabe54caffcddc01217", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5625, "max_line_length": 75, "alphanum_fraction": 0.5649867374, "num_tokens": 228} |
! Copyright 2019 Khang Hoang Nguyen
!
! Permission is hereby granted, free of charge, to any person obtaining
! a copy of this software and associated documentation files
! (the "Software"), to deal in the Software without restriction,
! including without limitation the rights to use, copy, modify, merge,
! publish, distribute, sublicense, and/or sell copies of the Software,
! and to permit persons to whom the Software is furnished to do so,
! subject to the following conditions
!
! The above copyright notice and this permission notice shall be
! included in all copies or substantial portions of the Software.
!
! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
! EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
! MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
! NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
! BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
! ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
! CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
! SOFTWARE.
!| author: Khang Hoang Nguyen
! license: <a href="https://github.com/kevinhng86/faiNumber-Fortran/blob/master/LICENSE">MIT</a>
! since: 1.0.0.f
!
! <p>This module, <code>fnDecimalUtil64</code> contains procedures for
! working with decimal strings and the int64 data type.
! </p>
!
! @note Unless stated otherwise, procedures of this module are pure
! procedures.
module fnDecimalUtil64
use fnConsts
use fnConsts64
implicit none
integer(k_int64), parameter :: dmax64 = 9_k_int64
integer(k_int32), parameter :: dmax32 = 9_k_int32
private :: dmax64, dmax32
contains
!| author: Khang Hoang Nguyen
! since: 1.0.0.f
!
! <p>Parse the `input` string as a signed decimal integer string
! to an int64 value.
!
! <p>This subroutine ignores leading and trailing whitespaces.
!
! <p>This subroutine considers an error to has occurred:<br>
! 1. If the `input` string only contains empty spaces.<br>
! 2. If the `input` string have a length of zero.<br>
! 3. If the value for either the `startpos` or `endpos` arguments
! is incorrect.<br>
! 4. If the `input` string contains a value that is smaller than
! the min value of the int64 data type or larger then the max
! value of the int64 data type.<br>
! 5. If the `input` string is not a valid signed decimal integer
! string.
! </p>
!
! @see <a href="|url|/page/startpos-endpos-explanation.html">
! startpos & endpos explanation</a>
pure subroutine decToInt64(input, output, error, startpos, endpos)
implicit none
character(len=*), intent(in) :: input !! A string to be parsed as a signed decimal integer string to an int64 value.
integer(k_int64), intent(out) :: output !! An int64 value of the <code>input</code> string if no error has occurred during parsing.
logical , intent(out) :: error !! A value of `.TRUE.` if an error has occurred during parsing, or `.FALSE.`, otherwise.
integer(k_int32), intent(in), optional :: startpos !! An int32 value of the position(inclusive) of where to start parsing.
integer(k_int32), intent(in), optional :: endpos !! An int32 value of the position(inclusive) of where to end parsing.
integer(k_int32) :: c, length, start, runlen, ch1
output = 0_k_int64 ; error = .TRUE.
length = len(input) ; start = 1
if ( present(endpos) ) then
if ( endpos < 1 ) return
if ( endpos < length ) length = endpos
end if
do while ( length > 0 )
if ( input(length:length) /= charspace ) exit
length = length - 1
end do
if ( present(startpos) ) then
if ( startpos > 1 ) start = startpos
end if
do while( start <= length )
if ( input(start:start) /= charspace ) exit
start = start + 1
end do
if ( start > length ) return
! Since ch1 is not going to be used in any equation, ch1
! can be int32 and can compare to int32 values. On the otherhand,
! output should be compared or process to values of int64.
! This is to avoid values from being up convert to int64
! multiple times or constant values from being up convert to
! int64.
ch1 = ICHAR(input(start:start))
if ( ch1 == cneg32 .OR. ch1 == cpos32 ) then
start = start + 1
if ( start > length ) return
end if
do while ( start <= length )
if ( input(start:start) /= charzero ) exit
start = start + 1
end do
runlen = (length + 1) - start
if ( runlen == 0 ) then
error = .FALSE.
return
end if
output = IEOR(ICHAR(input(start:start)), czero32)
if ( output > dmax64 ) return
start = start + 1
if ( runlen > 18 ) then
if ( runlen > 19 ) return
do while ( start < length )
c = IEOR(ICHAR(input(start:start)), czero32)
if ( c > dmax32 ) return
output = ISHFT(output, 1) + ISHFT(output, 3) + INT(c, k_int64)
start = start + 1
end do
c = IEOR(ICHAR(input(start:start)), czero32)
if ( c > dmax32 ) return
if ( output > 922337203685477579_k_int64 ) then
if ( output > 922337203685477580_k_int64 ) return
if ( c > 7 ) then
if ( c > 8 ) return
if ( ch1 /= cneg32 ) return
output = not(ISHFT(output, 1) + ISHFT(output, 3)) + 1_k_int64
output = output - INT(c, k_int64)
error = .FALSE.
return
end if
end if
output = ISHFT(output, 1) + ISHFT(output, 3) + INT(c, k_int64)
if ( ch1 == cneg32 ) output = not(output) + 1_k_int64
error = .FALSE.
return
end if
do while ( start <= length )
c = IEOR(ICHAR(input(start:start)), czero32)
if ( c > dmax32 ) return
output = ISHFT(output, 1) + ISHFT(output, 3) + INT(c, k_int64)
start = start + 1
end do
if ( ch1 == cneg32 ) output = not(output) + 1_k_int64
error = .FALSE.
end subroutine decToInt64
!| author: Khang Hoang Nguyen
! since: 1.0.0.f
!
! <p>Parse the `input` string as a signed decimal integer string
! to an int64 value.
!
! <p>This subroutine ignores leading and trailing whitespaces.
!
! <p>Error codes:<br>
! 0 - none <br>
! 1 - empty strings<br>
! 2 - invalid format<br>
! 3 - underflow<br>
! 4 - overflow<br>
! 5 - Invalid argument endpos/startpos
! </p>
!
! @note This subroutine may take longer on unsuccessful parse cases.
!
! @see <a href="|url|/page/startpos-endpos-explanation.html">
! startpos & endpos explanation</a>
pure subroutine decToInt64TrueError(input, output, error, startpos, endpos)
implicit none
character(len=*), intent(in) :: input !! A string to be parsed as a signed decimal integer string to an int64 value.
integer(k_int64), intent(out) :: output !! An int64 value of the <code>input</code> string if no error has occurred during parsing.
integer(k_int32), intent(out) :: error !! An int32 value of 0 on successful parse cases or a true error code on unsuccessful parse cases.
integer(k_int32), intent(in), optional :: startpos !! An int32 value of the position(inclusive) of where to start parsing.
integer(k_int32), intent(in), optional :: endpos !! An int32 value of the position(inclusive) of where to end parsing.
integer(k_int32) :: c, length, start, runlen, ch1
output = 0_k_int64 ; error = 0
length = len(input) ; start = 1
if ( present(endpos) ) then
if ( endpos < 1 ) goto 5
if ( present(startpos) ) then
if ( endpos < startpos ) goto 5
end if
if ( endpos < length ) length = endpos
end if
do while ( length > 0 )
if ( input(length:length) /= charspace ) exit
length = length - 1
end do
if ( present(startpos) ) then
if ( startpos > length ) goto 5
if ( startpos > 1 ) start = startpos
end if
do while( start <= length )
if ( input(start:start) /= charspace ) exit
start = start + 1
end do
if ( start > length ) goto 1
ch1 = ICHAR(input(start:start))
if ( ch1 == cneg32 .OR. ch1 == cpos32 ) then
start = start + 1
if ( start > length ) goto 2
end if
do while ( start <= length )
if ( input(start:start) /= charzero ) exit
start = start + 1
end do
runlen = (length + 1) - start
if ( runlen == 0 ) return
output = IEOR(ICHAR(input(start:start)), czero32)
if ( output > dmax64 ) goto 2
start = start + 1
if ( runlen > 18 ) then
if ( runlen > 19 ) goto 10
do while ( start < length )
c = IEOR(ICHAR(input(start:start)), czero32)
if ( c > dmax32 ) goto 2
output = ISHFT(output, 1) + ISHFT(output, 3) + INT(c, k_int64)
start = start + 1
end do
c = IEOR(ICHAR(input(start:start)), czero32)
if ( c > dmax32 ) goto 2
if ( output > 922337203685477579_k_int64 ) then
if ( output > 922337203685477580_k_int64 ) goto 11
if ( c > 7 ) then
if ( c > 8 ) goto 11
if ( ch1 /= cneg32 ) goto 4
output = not(ISHFT(output, 1) + ISHFT(output, 3)) + 1_k_int64
output = output - INT(c, k_int64)
return
end if
end if
output = ISHFT(output, 1) + ISHFT(output, 3) + INT(c, k_int64)
if ( ch1 == cneg32 ) output = not(output) + 1_k_int64
return
end if
do while ( start <= length )
c = IEOR(ICHAR(input(start:start)), czero32)
if ( c > dmax32 ) goto 2
output = ISHFT(output, 1) + ISHFT(output, 3) + INT(c, k_int64)
start = start + 1
end do
if ( ch1 == cneg32 ) output = not(output) + 1_k_int64
return
1 continue
error = 1
return
2 continue
error = 2
return
3 continue
error = 3
return
4 continue
error = 4
return
5 continue
error = 5
return
10 continue
do while ( start <= length )
if ( IEOR(ICHAR(input(start:start)), czero32) > dmax32 ) goto 2
start = start + 1
end do
11 continue
if ( ch1 == cneg32 ) goto 3
goto 4
end subroutine decToInt64TrueError
!| author: Khang Hoang Nguyen
! since: 1.0.0.f
!
! <p>Compare two strings as signed decimal integer strings by
! parsing them to int64 values first.
! </p>
!
! @see [[decToInt64]]
pure subroutine decCompareAsInt64(firstString, secondString, output, error)
implicit none
character(len=*), intent(in) :: firstString !! A string to be compared to the string <code>secondString</code>.
character(len=*), intent(in) :: secondString !! A string to be compared to the string <code>firstString</code>.
integer(k_int32), intent(out) :: output !! An int32 value of 1 if the `firstString` is larger than the `secondString`, 0 if they are both equal, or -1 if the `firstString` is smaller than the `secondString`. This value may not be a correct value if an error has occurred during parsing either one of the `input` strings.
logical , intent(out) :: error !! A value of `.TRUE.` if an error has occurred during parsing either one of the `input` strings. Otherwise, a value of `.FALSE.`.
integer(k_int64) :: n1, n2
logical :: e1, e2
output = 0 ; error = .FALSE.
call decToInt64(firstString, n1, e1)
call decToInt64(secondString, n2, e2)
if ( (e1 .eqv. .TRUE.) .OR. (e2 .eqv. .TRUE.) ) then
error = .TRUE.
return
end if
if ( n1 > n2 ) output = 1
if ( n1 < n2 ) output = -1
end subroutine decCompareAsInt64
!| author: Khang Hoang Nguyen
! since: 1.0.0.f
!
! <p>Compare two strings bases on the content of the strings
! reference to int64 values. If the strings are valid signed decimal
! integer strings that can be parsed to int64 values then they will
! be compared base on their int64 values. Otherwise, the strings
! will be compared base on the priority ranking order below.
!
! <p>This function return an int32 value of 1 if the `firstString`
! is larger than the `secondString`, 0 if they are both equal, or
! -1 if the `firstString` is smaller than the `secondString`.
!
! <p>Priority order ranking: (lo - hi)<br />
! 0 - invalid format<br />
! 1 - underflow<br />
! 2 - overflow<br />
! 3 - empty string (0 length or empty space)<br />
! 4 - valid int64
! </p>
!
! @see [[decToInt64TrueError]]
pure integer(k_int32) function decInt64OrSmaller(firstString, secondString) result(int32Out)
implicit none
character(len=*), intent(in) :: firstString !! A string to be compared to the string <code>secondString</code>.
character(len=*), intent(in) :: secondString !! A string to be compared to the string <code>firstString</code>.
integer(k_int64) :: n1, n2
integer(k_int32) :: e1, e2
int32Out = 0
call decToInt64TrueError(firstString, n1, e1)
call decToInt64TrueError(secondString, n2, e2 )
if ( e1 == 1 ) then
if ( e2 == 1 ) return
if ( e2 /= 0 ) then
int32Out = 1
else
int32Out = -1
end if
return
else if ( e2 == 1 ) then
if ( e1 /= 0 ) then
int32Out = -1
else
int32Out = 1
end if
return
end if
if ( e1 == 0 ) then
if ( e2 == 0 ) then
if ( n1 > n2 ) int32Out = 1
if ( n1 < n2 ) int32Out = -1
return
end if
int32Out = 1
return
else if ( e2 == 0 ) then
int32Out = -1
return
end if
if ( e1 > e2 ) int32Out = 1
if ( e1 < e2 ) int32Out = -1
end function decInt64OrSmaller
end module fnDecimalUtil64
| {"hexsha": "5f2178aecda7ed7c2f2e733d6cb69a0de884ce40", "size": 15839, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/fnDecimalUtil64.f90", "max_stars_repo_name": "kevinhng86/Fortran-faiNumber", "max_stars_repo_head_hexsha": "3514b7463ef2c6f8673527a5b3b4184845d0955e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-02-22T19:00:29.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-02T12:37:05.000Z", "max_issues_repo_path": "src/fnDecimalUtil64.f90", "max_issues_repo_name": "kevinhng86/faiNumber-Fortran", "max_issues_repo_head_hexsha": "3514b7463ef2c6f8673527a5b3b4184845d0955e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/fnDecimalUtil64.f90", "max_forks_repo_name": "kevinhng86/faiNumber-Fortran", "max_forks_repo_head_hexsha": "3514b7463ef2c6f8673527a5b3b4184845d0955e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5975, "max_line_length": 352, "alphanum_fraction": 0.5394279942, "num_tokens": 4217} |
# -*- coding: utf-8 -*-
#pylint: disable-msg=E0611, E1101, C0103, R0901, R0902, R0903, R0904, W0232
#------------------------------------------------------------------------------
# Copyright (c) 2007-2020, Acoular Development Team.
#------------------------------------------------------------------------------
import os
from numpy import zeros, array, arange
from bokeh.models import ColumnDataSource,Spacer
from bokeh.models.widgets import Button, Select, Div, TableColumn, DataTable,TextInput
from bokeh.plotting import figure
from bokeh.models.ranges import Range1d
from bokeh.layouts import column,row
from sinus import SINUSDeviceManager, SINUSAnalogInputManager, \
SINUSSamplesGenerator, ini_import, get_dev_state, change_device_status, SINUS
from datetime import datetime
current_time = lambda: datetime.now().isoformat('_').replace(':','-').replace('.','_') # for timestamp filename
APPFOLDER =os.path.dirname(os.path.abspath( __file__ ))
CONFPATH = os.path.join(APPFOLDER,"config_files/")
BUFFBAR_ARGS = {'plot_width':280, 'plot_height':50}
DEV_SERIAL_NUMBERS = {'tornado': ['10142', '10112', '10125', '10126'],
'typhoon': [
'10092','10095','10030','10038',
'10115','10116','10118','10119',
'10120','10123',
],
'apollo11283': ['11283']}
BufferBarCDS = ColumnDataSource({'y':['buffer'],'filling':zeros(1)})
tedscolumns = [
TableColumn(field='channel', title='Channel',width=800),
TableColumn(field='serial', title='SensorSerNo',width=800),
TableColumn(field='sensitivity', title='SensorSensitivity',width=800),
#TableColumn(field='wiredata', title='1_Wire_Data',width=800),
TableColumn(field='calibdate', title='CalibDate',width=800),
#TableColumn(field='calibperiod', title='CalibPeriod',width=800),
#TableColumn(field='chipserial', title='ChipSerNo',width=800),
TableColumn(field='manufacturer', title='Manufacturer',width=1800),
TableColumn(field='sensorversion', title='SensorVersion',width=800),
#TableColumn(field='tedstemp', title='TEDS_Template',width=800),
]
tedsCDS = ColumnDataSource(data={
"channel":[],
"serial":[],
"sensitivity":[],
#"wiredata":[],
"calibdate":[],
"calibperiod":[],
"chipserial":[],
"manufacturer":[],
"sensorversion":[],
#"tedstemp":[],
})
tedsTable = DataTable(source=tedsCDS,columns=tedscolumns,width=1200)
tedsSavename = TextInput(value="", title="Filename:",disabled=False, width=500)
# Buttons
settings_button = Button(label="Load Setting",disabled=False)
# Open Close Status Section
open_device_button = Button(label="Open Device",disabled=False,button_type="primary",width=175,height=50)
close_device_button = Button(label="Close Device",disabled=False,button_type="danger",width=175,height=50)
reload_device_status_button = Button(label="↻",disabled=False,width=40,height=40)
load_teds_button = Button(label="get TEDS",width=200,height=60,button_type="primary")
save_teds_button = Button(label="save to .csv",width=200,height=60,button_type="warning")
status_text = Div(text="Device Status: ")
sinus_open_close = column(
row(open_device_button, close_device_button),
row(reload_device_status_button,status_text))
def get_device_mode_callback():
dev_mode = get_dev_state()
status_text.text = f"Device Status: {dev_mode}"
reload_device_status_button.on_click(get_device_mode_callback)
def open_device_callback():
change_device_status('Open')
dev_mode = get_dev_state()
status_text.text = f"Device Status: {dev_mode}"
open_device_button.on_click(open_device_callback)
def close_device_callback():
change_device_status('Config')
dev_mode = get_dev_state()
status_text.text = f"Device Status: {dev_mode}"
close_device_button.on_click(close_device_callback)
# Select Settings
select_setting = Select(title="Select Settings:", value="None")
reload_settings_options = Button(label="↻",disabled=False,width=40)
select_setting.options=["None"]+os.listdir(CONFPATH)
def update_select_settings_options_callback():
select_setting.options=["None"]+os.listdir(CONFPATH)
reload_settings_options.on_click(update_select_settings_options_callback)
select_settings_row = row(column(Spacer(height=15),reload_settings_options),select_setting)
# Buffer Bar
buffer_bar = figure(title="Buffer",y_range=['buffer'],x_range=(0,400),**BUFFBAR_ARGS)
buffer_bar.xgrid.visible = False
buffer_bar.ygrid.visible = False
buffer_bar.toolbar.logo = None
buffer_bar.toolbar_location = None
buffer_bar.axis.visible = False
buffer_bar.grid.visible = False
barbuff = buffer_bar.hbar(y='y', height=0.9, left=0, right='filling',
source=BufferBarCDS)
def get_callbacks(inputSignalGen,iniManager,devManager,devInputManager,
ChLevelsCDS,checkbox_micgeom,amp_fig,
MicGeomCDS,micGeo,logger):
def single_update_settings():
ticker = list(arange(1,inputSignalGen.numchannels+1))
ChLevelsCDS.data = {'channels':ticker,'level': zeros(inputSignalGen.numchannels)}
amp_fig.xaxis.ticker = ticker
amp_fig.xaxis.major_label_overrides = {str(ticker[i]): inputSignalGen.inchannels_[i] for i in range(inputSignalGen.numchannels)}
checkbox_micgeom.labels = inputSignalGen.inchannels_
checkbox_micgeom.active = [_ for _ in range(inputSignalGen.numchannels)]
buffer_bar.x_range=Range1d(0,int(devManager.BlockCount[0]))
if micGeo.num_mics > 0:
MicGeomCDS.data = {'x':micGeo.mpos[0,:],'y':micGeo.mpos[1,:],
'sizes':array([7]*micGeo.num_mics),
'channels':[inputSignalGen.inchannels_[i] for i in checkbox_micgeom.active]}
def update_buffer_bar_plot():
BufferBarCDS.data['filling'] = array([inputSignalGen._pdiff_in])
#
def settings_callback():
logger.info("load settings ...")
try:
iniManager.get_data(devManager,devInputManager,inputSignalGen)
[obj.set_settings() for obj in [devManager,devInputManager]]
except Exception as e_text:
logger.error("{}".format(e_text))
return
logger.info("set settings ok!")
single_update_settings()
status_text.text = f"Device Status: {get_dev_state()}"
settings_button.on_click(settings_callback)
def select_setting_callback(attr, old, new):
iniManager.from_file = os.path.join(CONFPATH,new)
select_setting.on_change('value',select_setting_callback)
return update_buffer_bar_plot
def get_interface(device,syncorder=[]):
if syncorder:
devManager = SINUSDeviceManager(orderdevices = syncorder)
elif not syncorder:
devManager = SINUSDeviceManager(orderdevices = DEV_SERIAL_NUMBERS[device])
devInputManager = SINUSAnalogInputManager()
inputSignalGen = SINUSSamplesGenerator(manager=devInputManager,
inchannels=devInputManager.namechannels)
iniManager = ini_import()
return iniManager, devManager,devInputManager,inputSignalGen
def get_teds_component(devInputManager, logger):
"""Returns the button and table widget that provides the TEDS data.
Necessary callbacks will be set up and implemented by this function.
Parameters
----------
inputSignalGen : instance
class instance from sinus python module
"""
# activate the detectTEDS functionality
def load_teds_callback():
logger.info("detect TEDS ...")
if not 'None' in devInputManager.DetectTEDS: # force reload of TEDS data if it was already loaded
devInputManager.DetectTEDS = ['None']
devInputManager.set_settings()
devInputManager.DetectTEDS = ['DetectTEDS']
devInputManager.set_settings()
tedsCDS.data = { # update DataTable ColumnDataSource
'channel' : [c for c in devInputManager.namechannels],
'serial' : [SINUS.Get(str(c),'TEDSData','SensorSerNo') for c in devInputManager.namechannels],
'sensitivity' : [SINUS.Get(str(c),'TEDSData','SensorSensitivity') for c in devInputManager.namechannels],
#'wiredata' : [SINUS.Get(str(c),'TEDSData','1_Wire_Data') for c in devInputManager.namechannels],
'calibdate' : [SINUS.Get(str(c),'TEDSData','CalibDate') for c in devInputManager.namechannels],
'calibperiod' : [SINUS.Get(str(c),'TEDSData','CalibPeriod') for c in devInputManager.namechannels],
'chipserial' : [SINUS.Get(str(c),'TEDSData','ChipSerNo') for c in devInputManager.namechannels],
'manufacturer' : [SINUS.Get(str(c),'TEDSData','Manufacturer') for c in devInputManager.namechannels],
'sensorversion' : [SINUS.Get(str(c),'TEDSData','SensorVersion') for c in devInputManager.namechannels],
#'tedstemp' : [SINUS.Get(str(c),'TEDSData','TEDS_Template') for c in devInputManager.namechannels],
}
print(tedsCDS.data)
logger.info("detect TEDS finished")
def save_csv_callback():
import csv
if not tedsSavename.value:
fname = os.path.join("Measurement_App","metadata",f"TEDSdata_{current_time()}.csv")
tedsSavename.value = fname
else:
fname = tedsSavename.value
with open(fname, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, dialect='excel')
csvwriter.writerow(tedsCDS.data.keys())
rows = [[list(v)[i] for v in tedsCDS.data.values()] for i in range(len(list(tedsCDS.data.values())[0]))]
for values in rows:
csvwriter.writerow(values)
# set up callback
load_teds_button.on_click(load_teds_callback)
save_teds_button.on_click(save_csv_callback)
ur = row(load_teds_button,save_teds_button,tedsSavename)
return column(Spacer(height=15),ur,Spacer(height=15),tedsTable)
def append_left_column(left_column):
left_column.children.insert(1,sinus_open_close)
left_column.children.insert(2,select_settings_row)
left_column.children.insert(3, settings_button)
left_column.children.insert(4,Spacer(height=20))
left_column.children.insert(12, buffer_bar)
def append_disable_obj(disable_obj_disp):
disable_obj_disp.append(select_setting)
disable_obj_disp.append(settings_button)
return disable_obj_disp
def gather_metadata(devManager,devInputManager,inputSignalGen,iniManager,calibHelper):
meta = {
'config_file' : [iniManager.from_file],
'pci_synchronization' : devManager.orderdevices,
'generic_sensitivity' : inputSignalGen.sensval_,
'input_channel_names' : inputSignalGen.inchannels_,
}
for key,value in tedsCDS.data.items(): # add TEDS information
meta['TEDS_'+key] = value
for property in devInputManager.properties['settable']:
meta['AnalogInput_'+property] = eval(f"devInputManager.{property}")
if calibHelper.calibdata.size > 0:
meta['calib_value'] = calibHelper.calibdata[0,:]
meta['calib_level'] = calibHelper.calibdata[1,:]
meta['calib_factor'] = calibHelper.calibfactor[:]
return meta
| {"hexsha": "232a49d6b38a6ed5b35b86c43ba553f5b410012d", "size": 11478, "ext": "py", "lang": "Python", "max_stars_repo_path": "apps/Measurement_App/sinus_dev.py", "max_stars_repo_name": "acoular/spectacoular", "max_stars_repo_head_hexsha": "eed39695df6c81cb179eae7429a020f71ed00dfc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-11-17T15:35:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-13T20:15:56.000Z", "max_issues_repo_path": "apps/Measurement_App/sinus_dev.py", "max_issues_repo_name": "acoular/spectacoular", "max_issues_repo_head_hexsha": "eed39695df6c81cb179eae7429a020f71ed00dfc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-11-17T08:13:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-16T11:54:21.000Z", "max_forks_repo_path": "apps/Measurement_App/sinus_dev.py", "max_forks_repo_name": "acoular/spectacoular", "max_forks_repo_head_hexsha": "eed39695df6c81cb179eae7429a020f71ed00dfc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.0409836066, "max_line_length": 136, "alphanum_fraction": 0.6729395365, "include": true, "reason": "from numpy", "num_tokens": 2715} |
[STATEMENT]
lemma exhaust_4:
fixes x :: 4
shows "x = 1 \<or> x = 2 \<or> x = 3 \<or> x = 4"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x = 1 \<or> x = 2 \<or> x = 3 \<or> x = 4
[PROOF STEP]
proof (induct x)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>z. \<lbrakk>0 \<le> z; z < int CARD(4)\<rbrakk> \<Longrightarrow> of_int z = 1 \<or> of_int z = 2 \<or> of_int z = 3 \<or> of_int z = 4
[PROOF STEP]
case (of_int z)
[PROOF STATE]
proof (state)
this:
0 \<le> z
z < int CARD(4)
goal (1 subgoal):
1. \<And>z. \<lbrakk>0 \<le> z; z < int CARD(4)\<rbrakk> \<Longrightarrow> of_int z = 1 \<or> of_int z = 2 \<or> of_int z = 3 \<or> of_int z = 4
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
0 \<le> z
z < int CARD(4)
[PROOF STEP]
have "0 \<le> z" and "z < 4"
[PROOF STATE]
proof (prove)
using this:
0 \<le> z
z < int CARD(4)
goal (1 subgoal):
1. 0 \<le> z &&& z < 4
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
0 \<le> z
z < 4
goal (1 subgoal):
1. \<And>z. \<lbrakk>0 \<le> z; z < int CARD(4)\<rbrakk> \<Longrightarrow> of_int z = 1 \<or> of_int z = 2 \<or> of_int z = 3 \<or> of_int z = 4
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
0 \<le> z
z < 4
[PROOF STEP]
have "z = 0 \<or> z = 1 \<or> z = 2 \<or> z = 3"
[PROOF STATE]
proof (prove)
using this:
0 \<le> z
z < 4
goal (1 subgoal):
1. z = 0 \<or> z = 1 \<or> z = 2 \<or> z = 3
[PROOF STEP]
by arith
[PROOF STATE]
proof (state)
this:
z = 0 \<or> z = 1 \<or> z = 2 \<or> z = 3
goal (1 subgoal):
1. \<And>z. \<lbrakk>0 \<le> z; z < int CARD(4)\<rbrakk> \<Longrightarrow> of_int z = 1 \<or> of_int z = 2 \<or> of_int z = 3 \<or> of_int z = 4
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
z = 0 \<or> z = 1 \<or> z = 2 \<or> z = 3
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
z = 0 \<or> z = 1 \<or> z = 2 \<or> z = 3
goal (1 subgoal):
1. of_int z = 1 \<or> of_int z = 2 \<or> of_int z = 3 \<or> of_int z = 4
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
of_int z = 1 \<or> of_int z = 2 \<or> of_int z = 3 \<or> of_int z = 4
goal:
No subgoals!
[PROOF STEP]
qed | {"llama_tokens": 1088, "file": null, "length": 12} |
"""
global-land-mask is a python module for checking whether a lat/lon point is
on land or on sea. In order to do this, we use the globe dataset,
which samples the entire earth at 1 km resolution.
The global mask is of shape (21600, 43200), coming to about 980 mB when
saved without compression. This data can be compressed to 2.5 mb using numpy
savez_compressed, making for a very compact package.
"""
import numpy as np
import os
# Load the data from file.
full_path = os.path.realpath(__file__)
_path, _ = os.path.split(full_path)
_mask_filename = os.path.join(_path,'globe_combined_mask_compressed.npz')
_mask_fid = np.load(_mask_filename)
_mask = _mask_fid['mask']
_lat = _mask_fid['lat']
_lon = _mask_fid['lon']
def lat_to_index(lat):
"""
Convert latitude to index on the mask
Parameters
----------
lat : numeric
Latitude to get in degrees
Returns
-------
index : numeric
index of the latitude axis.
"""
lat = np.array(lat)
if np.any(lat>90):
raise ValueError('latitude must be <= 90')
if np.any(lat<-90):
raise ValueError('latitude must be >= -90')
lat[lat > _lat.max()] = _lat.max()
lat[lat < _lat.min()] = _lat.min()
return ((lat - _lat[0])/(_lat[1]-_lat[0])).astype('int')
def lon_to_index(lon):
"""
Convert longitude to index on the mask
Parameters
----------
lon : numeric
Longitude to get in degrees
Returns
-------
index : numeric
index of the longitude axis.
"""
lon = np.array(lon)
if np.any(lon > 180):
raise ValueError('longitude must be <= 180')
if np.any(lon < -180):
raise ValueError('longitude must be >= -180')
lon[lon > _lon.max()] = _lon.max()
lon[lon < _lon.min()] = _lon.min()
return ((lon - _lon[0]) / (_lon[1] - _lon[0])).astype('int')
def is_ocean(lat,lon):
"""
Return boolean array of whether the coordinates are in the ocean
Parameters
----------
lat : ndarray or float
latitude in degrees
lon : ndarray or float
longitude in degrees
Returns
-------
is_ocean_mask : ndarray or float
boolean array denoting whether the corresponding point is in the ocean.
"""
lat_i = lat_to_index(lat)
lon_i = lon_to_index(lon)
return _mask[lat_i,lon_i]
def is_land(lat,lon):
"""
Return boolean array of whether the coordinates are on the land. Note
that most lakes are considered on land.
Parameters
----------
lat : ndarray or float
latitude in degrees
lon : ndarray or float
longitude in degrees
Returns
-------
is_land_mask : ndarray or float
boolean array denoting whether the corresponding point is on land.
"""
lat_i = lat_to_index(lat)
lon_i = lon_to_index(lon)
return np.logical_not(_mask[lat_i,lon_i])
| {"hexsha": "470b0278ddcd2b1f0b601f6a1d9782d2ee77f7f3", "size": 2925, "ext": "py", "lang": "Python", "max_stars_repo_path": "global_land_mask/globe.py", "max_stars_repo_name": "toddkarin/global-land-mask", "max_stars_repo_head_hexsha": "55b4502f077deb4bfae1c99ff5a2f09ad7db3540", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 38, "max_stars_repo_stars_event_min_datetime": "2019-05-26T15:58:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-27T14:36:04.000Z", "max_issues_repo_path": "global_land_mask/globe.py", "max_issues_repo_name": "toddkarin/global-land-mask", "max_issues_repo_head_hexsha": "55b4502f077deb4bfae1c99ff5a2f09ad7db3540", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2019-09-09T02:45:38.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-03T02:18:20.000Z", "max_forks_repo_path": "global_land_mask/globe.py", "max_forks_repo_name": "toddkarin/global-land-mask", "max_forks_repo_head_hexsha": "55b4502f077deb4bfae1c99ff5a2f09ad7db3540", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2019-08-25T11:06:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-23T22:29:20.000Z", "avg_line_length": 19.2434210526, "max_line_length": 79, "alphanum_fraction": 0.6218803419, "include": true, "reason": "import numpy", "num_tokens": 727} |
using HypothesisTests
using StatsBase
using Base.Test
#Example 1 in R
#Agresti (2007) p. 39
d = [[762,484] [327,239] [468,477]]
m = PowerDivergenceTest(d)
@test_approx_eq m.theta0 [0.25523082406125785,0.19670969099133556,0.11593952361049113,0.08935608756107216,0.1935739395970214,0.1491899341788219]
@test_approx_eq m.thetahat [0.2763873775843308,0.1755531374682626,0.11860718171926006,0.08668842945230323,0.16974972796517954,0.17301414581066377]
c = ci(m)
c0 = [(0.23322451940515054, 0.31882480957222203),
(0.1323902792890823, 0.21799056945615383),
(0.0754443235400798, 0.16104461370715129),
(0.04352557127312297, 0.12912586144019447),
(0.12658686978599926, 0.21218715995307078),
(0.12985128763148351, 0.21545157779855498)]
for i = 1:length(c)
@test c[i][1] ≈ c0[i][1]
@test c[i][2] ≈ c0[i][2]
end
@test pvalue(m) ≈ 2.9535891832117357e-7
@test m.stat ≈ 30.070149095754687
@test m.df ≈ 2
@test m.n ≈ 2757
@test_approx_eq m.residuals [2.198855766015898,-2.504669492560728,0.4113701700566286,-0.46858294710127296,-2.8432397155451494,3.2386734435365825]
@test_approx_eq m.stdresiduals [4.502053521086705,-4.502053521086705,0.6994517329844298,-0.6994517329844298,-5.315945542704929,5.315945542704929]
m = PowerDivergenceTest(d,lambda=0.0)
testname(m)
pvalue(m)
show(IOBuffer(), m)
m = PowerDivergenceTest(d,lambda=-1.0)
testname(m)
pvalue(m)
show(IOBuffer(), m)
m = PowerDivergenceTest(d,lambda=-2.0)
testname(m)
pvalue(m)
show(IOBuffer(), m)
m = PowerDivergenceTest(d,lambda=-0.5)
testname(m)
pvalue(m)
show(IOBuffer(), m)
m = PowerDivergenceTest(d,lambda=2/3)
testname(m)
pvalue(m)
show(IOBuffer(), m)
m = ChisqTest(d)
m = MultinomialLRT(d)
ci(m, method = :bootstrap)
ci(m, method = :bootstrap, tail=:left)
ci(m, method = :bootstrap, tail=:right)
ci(m, method = :gold)
ci(m, method = :gold, tail=:left)
ci(m, method = :gold, tail=:right)
ci(m, method = :quesenberry_hurst)
ci(m, method = :quesenberry_hurst, tail=:left)
ci(m, method = :quesenberry_hurst, tail=:right)
ci(m, method = :sison_glaz)
ci(m, method = :sison_glaz, correct=false)
ci(m, method = :sison_glaz, tail=:left)
ci(m, method = :sison_glaz, tail=:right)
@test_throws ArgumentError ci(m, method=:FOO)
@test_throws ArgumentError ci(m, tail=:fox)
#Example 3 in R
d = [ 20, 15, 25 ]
m = PowerDivergenceTest(d)
@test_approx_eq m.theta0 [0.3333333333333333,0.3333333333333333,0.3333333333333333]
@test_approx_eq m.thetahat [0.3333333333333333,0.25,0.4166666666666667]
c = ci(m)
c0 = [(0.04999999999999999,0.5833301356192295),(0.0,0.49999680228589616),(0.13333333333333336,0.6666634689525628)]
[ @test_approx_eq c[i][1] c0[i][1] for i in 1:length(c)]
[ @test_approx_eq c[i][2] c0[i][2] for i in 1:length(c)]
@test_approx_eq pvalue(m) 0.2865047968601901
@test_approx_eq m.stat 2.5
@test_approx_eq m.df 2
@test_approx_eq m.n 60
@test_approx_eq m.residuals [0.0,-1.118033988749895,1.118033988749895]
@test_approx_eq m.stdresiduals [0.0,-1.3693063937629153,1.3693063937629153]
m = PowerDivergenceTest(d,lambda=0.0)
testname(m)
pvalue(m)
show(IOBuffer(), m)
m = PowerDivergenceTest(d,lambda=-1.0)
testname(m)
pvalue(m)
show(IOBuffer(), m)
m = PowerDivergenceTest(d,lambda=-2.0)
testname(m)
pvalue(m)
show(IOBuffer(), m)
m = PowerDivergenceTest(d,lambda=-0.5)
testname(m)
pvalue(m)
show(IOBuffer(), m)
m = PowerDivergenceTest(d,lambda=2/3)
testname(m)
pvalue(m)
show(IOBuffer(), m)
m = ChisqTest(d)
m = MultinomialLRT(d)
ci(m, method = :bootstrap)
ci(m, method = :bootstrap, tail=:left)
ci(m, method = :bootstrap, tail=:right)
ci(m, method = :gold)
ci(m, method = :gold, tail=:left)
ci(m, method = :gold, tail=:right)
ci(m, method = :quesenberry_hurst)
ci(m, method = :quesenberry_hurst, tail=:left)
ci(m, method = :quesenberry_hurst, tail=:right)
ci(m, method = :sison_glaz)
ci(m, method = :sison_glaz, correct=false)
ci(m, method = :sison_glaz, tail=:left)
ci(m, method = :sison_glaz, tail=:right)
@test_throws ArgumentError ci(m, method=:FOO)
@test_throws ArgumentError ci(m, tail=:fox)
#
x=[1,2,3,1,2,3]
y=[1,1,1,2,2,3]
d = counts(x,y,3)
ChisqTest(d)
MultinomialLRT(d)
PowerDivergenceTest(x,y,3)
PowerDivergenceTest(x,y,(1:3,1:3))
ChisqTest(x,y,3)
ChisqTest(x,y,(1:3,1:3))
MultinomialLRT(x,y,3)
MultinomialLRT(x,y,(1:3,1:3))
| {"hexsha": "08dd87c8719edc85da36fe48fe03be2b4c2889b6", "size": 4271, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/power_divergence.jl", "max_stars_repo_name": "bjarthur/HypothesisTests.jl", "max_stars_repo_head_hexsha": "a964ed55157664c8c8bc3b01cc48f71fef1c90ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/power_divergence.jl", "max_issues_repo_name": "bjarthur/HypothesisTests.jl", "max_issues_repo_head_hexsha": "a964ed55157664c8c8bc3b01cc48f71fef1c90ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/power_divergence.jl", "max_forks_repo_name": "bjarthur/HypothesisTests.jl", "max_forks_repo_head_hexsha": "a964ed55157664c8c8bc3b01cc48f71fef1c90ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6878612717, "max_line_length": 146, "alphanum_fraction": 0.7234839616, "num_tokens": 1659} |
"""
XEM6010 Phase-lock box GUI, displays diagnostics on the raw signal, phase noise measurements, and loop filters tuning
by JD Deschenes, October 2013
"""
from __future__ import print_function
import time
from PyQt5 import QtGui, Qt
#import PyQt5.Qwt5 as Qwt
import numpy as np
import math
from scipy.signal import lfilter
from scipy.signal import decimate
from scipy.signal import detrend
# For make_sure_path_exists() and os.rename()
import os
import errno
#from SuperLaserLand_JD2 import SuperLaserLand_JD2
from LoopFiltersUI import LoopFiltersUI
from DisplayVNAWindow import DisplayVNAWindow
from LoopFiltersUI_DAC1_and_DAC2 import LoopFiltersUI_DAC1_and_DAC2
from DisplayDitherSettingsWindow import DisplayDitherSettingsWindow
#from DisplayCrashMonitorWindow import DisplayCrashMonitorWindow
#from ILX_laser_control import ILX_laser_control
#from PyDAQmx_single_1 import NIDAQ_USB
#from NIUSB_DAQ import Instrument
from user_friendly_QLineEdit import user_friendly_QLineEdit
import SpectrumWidget
#import matplotlib.pyplot as plt
import traceback
# stuff for Python 3 port
import pyqtgraph as pg
import RP_PLL # for CommsError
from SocketErrorLogger import logCommsErrorsAndBreakoutOfFunction
import logging
def round_to_N_sig_figs(x, Nsigfigs):
leading_pos = np.floor(np.log10(np.abs(x)))
factor = 10**((Nsigfigs-1)-leading_pos)
return np.round(x * factor)/factor
def smooth(x,window_len=11,window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y
class XEM_GUI_MainWindow(QtGui.QWidget):
display_phase = 0 # used to refresh the phase noise plot only once every N refresh cycles
VCO_detected_gain_in_Hz_per_Volts = [1, 1, 1]
bFirstTimeLockCheckBoxClicked = True
# def __init__(self):
# super(XEM_GUI_MainWindow, self).__init__()
#
# self.bDisplayTiming = False # Activate to turn a lot of timing print()s
# self.output_controls = (True, True, True)
# self.initUI()
def __init__(self, sl, strTitle, selected_ADC, output_controls, sp, custom_style_sheet, strFGPASerialNumber):
super(XEM_GUI_MainWindow, self).__init__()
self.strTitle = strTitle
self.sl = sl
self.sp = sp # Holds the system parameters (configuration values)
self.bDisplayTiming = False # Activate to turn a lot of timing print()s
self.selected_ADC = selected_ADC
self.output_controls = output_controls
self.setObjectName('MainWindow')
self.setStyleSheet(custom_style_sheet)
self.strFGPASerialNumber = strFGPASerialNumber
self.logger = logging.getLogger(__name__)
self.logger_name = ':XEM_GUI_MainWindow'
self.timerIDDither = None
self.timerID = 0
# For the crash monitor
self.crash_number = 0
self.crash_windows = []
self.crash_windows_opening_times = []
self.bAveragePhaseNoise = True
self.bAveragePhaseNoiseLast = False
self.N_spc_average = 10.
# For the residuals streaming:
# Only one window takes care of reading both the CEO and optical residuals
if self.selected_ADC == 0:
strFolder = 'c:\\SuperLaserLandLogs\\ResidualsStreaming'
self.make_sure_path_exists(strFolder)
self.word_counter = 0
self.foutput_residuals = open('%s\\residuals_ceo_%s.bin' % (strFolder, self.strFGPASerialNumber), 'wb')
self.foutput_residuals2 = open('%s\\residuals_optical_%s.bin' % (strFolder, self.strFGPASerialNumber), 'wb')
self.foutput_residuals_time = open('%s\\residuals_time_%s.bin' % (strFolder, self.strFGPASerialNumber), 'wb', 0) # the 0 means un-buffered writes
self.initUI()
def getValues(self):
self.bFirstTimeLockCheckBoxClicked = False
self.getVCOGain()
self.spectrum.getDACoffset()
self.getVCOFreq()
self.qloop_filters[self.selected_ADC].getValues() # We should get qloop_filters.kc before (done in getVCOGain)
self.setLock()
self.timerIDDither = Qt.QTimer(self)
self.timerIDDither.timeout.connect(self.timerDitherEvent)
self.startTimers()
self.displayDAC() # This populates the current DAC values with the actual value
self.qchk_refresh.setChecked(False)
self.refreshChk_event()
def pushActualValues(self):
print("Push actual values of MainWindow, TODO")
def pushDefaultValues(self):
# print("XEM_GUI_MainWindow::pushDefaultValues()")
#For now, equivalent to calling initSL()
self.loadParameters()
# Send values to FPGA
self.setVCOFreq_event()
self.setVCOGain_event()
self.chkLockClickedEvent()
self.timerIDDither = Qt.QTimer(self)
self.timerIDDither.timeout.connect(self.timerDitherEvent)
self.startTimers()
self.displayDAC() # This populates the current DAC values with the actual value
# print("XEM_GUI_MainWindow::pushDefaultValues(): after displayDAC")
if self.output_controls[0] == True:
self.slowStart100VSwitchingSupply()
@logCommsErrorsAndBreakoutOfFunction()
def slowStart100VSwitchingSupply(self):
# need to set the switching supply to its default values:
# do a slow start over ~ 100 ms.
f_switching = 200e3
Vtarget = 100.
Vsupply = 30.
T_slow_start = 100e-3
target_duty_cycle = (Vtarget-Vsupply)/Vtarget
oscillator_modulus = int(round( self.sl.fs/f_switching ))
print("slowStart100VSwitchingSupply(): starting")
N_steps = 10
for k in range(int(N_steps)+1):
# print("slowStart100VSwitchingSupply(): here")
current_duty_cycle = float(k)/N_steps * target_duty_cycle
# print("slowStart100VSwitchingSupply(): here2")
oscillator_modulus_active = int(round( oscillator_modulus * current_duty_cycle ))
# print("slowStart100VSwitchingSupply(): here3")
self.sl.setTestOscillator(bEnable=1, bPolarity=1, oscillator_modulus=oscillator_modulus, oscillator_modulus_active=oscillator_modulus_active)
# try:
# self.sl.setTestOscillator(bEnable=1, bPolarity=1, oscillator_modulus=oscillator_modulus, oscillator_modulus_active=oscillator_modulus_active)
# except RP_PLL.CommsError:
# break
time.sleep(T_slow_start/N_steps)
print("slowStart100VSwitchingSupply(): finished")
def killTimers(self):
# print("XEM_GUI_MainWindow::killTimers(): %s" % self.strTitle)
#traceback.print_stack()
if self.timerIDDither is not None:
self.timerIDDither.stop()
if self.qchk_refresh.isChecked():
self.qchk_refresh.setChecked(False)
self.refreshChk_event()
def startTimers(self):
# print("XEM_GUI_MainWindow::startTimers(): %s" % self.strTitle)
# Need to init timerID
self.timerID = 0
# Start the timer which reads the dither:
if self.timerIDDither is not None:
self.timerIDDither.start(100) # 100 ms readout delay, increased to 1000 ms for debugging
def getVCOGainFromUI(self, output_number):
try:
VCO_gain_in_Hz_per_Volts = float(self.qedit_vco_gain[output_number].text())
except:
VCO_gain_in_Hz_per_Volts = 1e9
return VCO_gain_in_Hz_per_Volts
@logCommsErrorsAndBreakoutOfFunction()
def setVCOGain_event(self):
# Update the loop filters gain settings based on the new VCO gains:
# Also set the scale on the manual output sliders (and the steps)
# We want the user to be able to easily control the beat frequency with the mousewheel.
# (mousewheel scroll: 3 small steps or arrow keys: 1 small step)
# We want each mousewheel step to be about 0.5 MHz,
# large steps (clicking in the open area of the scrollbar) to be about 5 MHz
for k in range(3):
if self.output_controls[k]:
VCO_gain_in_Hz_per_Volts = self.getVCOGainFromUI(k)
# getFreqDiscriminatorGain is in DDC Counts/Hz
# getDACGainInVoltsPerCounts is in V/(DAC Counts)
VCO_gain_in_counts_per_counts = VCO_gain_in_Hz_per_Volts * self.sl.getFreqDiscriminatorGain() * self.sl.getDACGainInVoltsPerCounts(k) #.sl.getFreqDiscriminatorGain() and self.sl.getDACGainInVoltsPerCounts(k) are constant (different for each k)
if k == 0 or k == 1:
self.qloop_filters[k].kc = VCO_gain_in_counts_per_counts
self.qloop_filters[k].checkFirmwareLimits()
self.qloop_filters[k].updateFilterSettings()
self.qloop_filters[k].updateGraph()
elif k == 2:
# DAC 2 loop settings are controlled by the same widget as DAC1
self.qloop_filters[1].kc_dac2 = VCO_gain_in_counts_per_counts
self.qloop_filters[1].checkFirmwareLimits()
self.qloop_filters[1].updateFilterSettings()
self.qloop_filters[1].updateGraph()
self.sl.save_openLoop_gain(k, VCO_gain_in_counts_per_counts) #Save the value of the open-loop gain in the FPGA to allow reconnection (usefull to read Loop-Filter gain value)
self.spectrum.setSliderStepSize(k, VCO_gain_in_Hz_per_Volts)
# This function needs the VCO gain to compute the control effort so we have to update it if we have changed.
self.spectrum.setDACOffset_event()
@logCommsErrorsAndBreakoutOfFunction()
def getVCOGain(self):
if self.selected_ADC == 0:
dac_list = [0]
elif self.selected_ADC == 1:
dac_list = [1, 2]
for k in dac_list:
# if self.output_controls[k]:
VCO_gain_in_counts_per_counts = self.sl.get_openLoop_gain(k)
# print("k = %d, VCO_gain_in_counts_per_counts=%f" % (k, VCO_gain_in_counts_per_counts))
VCO_gain_in_Hz_per_Volts = VCO_gain_in_counts_per_counts / (self.sl.getFreqDiscriminatorGain() * self.sl.getDACGainInVoltsPerCounts(k))
# print("k = %d, VCO_gain_in_Hz_per_Volts=%f" % (k, VCO_gain_in_Hz_per_Volts))
# prevent divide-by-0 bug:
if VCO_gain_in_Hz_per_Volts == 0:
VCO_gain_in_Hz_per_Volts = 1.
self.qedit_vco_gain[k].blockSignals(True)
self.qedit_vco_gain[k].setText('{:.1e}'.format(VCO_gain_in_Hz_per_Volts))
self.qedit_vco_gain[k].blockSignals(False)
if k == 0 or k == 1:
self.qloop_filters[k].kc = VCO_gain_in_counts_per_counts
self.qloop_filters[k].checkFirmwareLimits()
self.qloop_filters[k].updateGraph()
elif k == 2:
# DAC 2 loop settings are controlled by the same widget as DAC1
self.qloop_filters[1].kc_dac2 = VCO_gain_in_counts_per_counts
self.qloop_filters[1].checkFirmwareLimits()
self.qloop_filters[1].updateGraph()
self.spectrum.setSliderStepSize(k, VCO_gain_in_Hz_per_Volts)
@logCommsErrorsAndBreakoutOfFunction()
def setVCOFreq_event(self, checked=False):
# print("setVCOFreq_event: self.selected_ADC = %d" % self.selected_ADC)
try:
frequency_in_hz = float(self.qedit_ref_freq.text())
except:
frequency_in_hz = 5e6
# If the VCO has positive sign, we need to put a negative reference frequency to make the
# total loop sign be negative so that it's stable when we close the loop
if self.qsign_positive.isChecked():
frequency_in_hz =-frequency_in_hz
#print('frequency_in_hz = %e' % frequency_in_hz)
if self.selected_ADC == 0:
self.sl.set_ddc0_ref_freq(frequency_in_hz)
elif self.selected_ADC == 1:
self.sl.set_ddc1_ref_freq(frequency_in_hz)
#print('frequency_in_hz = %e (after)' % frequency_in_hz)
@logCommsErrorsAndBreakoutOfFunction()
def getVCOFreq(self):
if self.selected_ADC == 0:
frequency_in_hz = self.sl.get_ddc0_ref_freq_from_RAM()
elif self.selected_ADC == 1:
frequency_in_hz = self.sl.get_ddc1_ref_freq_from_RAM()
# If the VCO has positive sign, we need to put a negative reference frequency to make the
# total loop sign be negative so that it's stable when we close the loop
if frequency_in_hz < 0:
self.qsign_positive.setChecked(True)
else:
self.qsign_negative.setChecked(True)
self.qedit_ref_freq.blockSignals(True)
self.qedit_ref_freq.setText('%.2e' % abs(frequency_in_hz))
self.qedit_ref_freq.blockSignals(False)
def refreshChk_event(self):
if self.qchk_refresh.isChecked():
# We are doing a not running->running transition
try:
# if True:
# print('self.qedit_timerdelay.text() = %s' % self.qedit_timerdelay.text())
timer_delay = float(self.qedit_timerdelay.text())
except:
# else:
timer_delay = 1000
# print('Timer delay = %d ms' % timer_delay)
if self.timerID != 0:
self.killTimer(self.timerID)
self.timerID = self.startTimer(int(round(timer_delay)))
self.timerEvent(0) # run the event handler once right away, makes the checkbox feel more responsive
# print('Starting timer')
else:
# We are doing a running->not running transition
if self.timerID != 0:
self.killTimer(self.timerID)
self.timerID = 0
# print('Stopping timer')
def exportData(self):
# First need to create a unique file name template (with good probability)
# We simply use the system date and time, and hope that this function doesn't get called twice in a second
strNameTemplate = time.strftime("data_export\\%m_%d_%Y_%H_%M_%S_")
# Data to write:
# self.inst_freq
# self.freq_noise_psd
# self.freq_noise_axis
# self.raw_adc_samples
# Create the subdirectory if it doesn't exist:
self.make_sure_path_exists('data_export')
# Open files for output, write raw data
# if True:
try:
strCurrentName = strNameTemplate + 'raw_adc_samples.bin'
f = open(strCurrentName, 'wb')
f.write(self.raw_adc_samples)
f.close()
except:
pass
try:
strCurrentName = strNameTemplate + 'inst_freq.bin'
f = open(strCurrentName, 'wb')
f.write(self.inst_freq)
f.close()
except:
pass
try:
strCurrentName = strNameTemplate + 'freq_noise_psd.bin'
f = open(strCurrentName, 'wb')
f.write(self.freq_noise_psd)
f.close()
except:
pass
try:
strCurrentName = strNameTemplate + 'freq_noise_axis.bin'
f = open(strCurrentName, 'wb')
f.write(self.freq_noise_axis)
f.close()
except:
pass
def showVNA(self):
self.vna = DisplayVNAWindow(self.sl)
def grabAndExportData(self, bSyncReadOnNextTimeQuantization=True):
start_time = time.perf_counter()
print('Grabbing and exporting data')
# Check if another function is currently using the DDR2 logger:
if self.sl.bDDR2InUse:
print('grabAndExportData(): DDR2 logger in use, cannot get data from adc')
return
# Ask which input to use:
currentSelector, ok = QtGui.QInputDialog.getItem(self, 'Raw data export',
'Select the data source:', ('ADC0', 'ADC1', 'DAC0', 'DAC1', 'DAC2'))
if not ok:
return
currentSelector = str(currentSelector)
# Ask how many points:
N_points_str, ok = QtGui.QInputDialog.getText(self, 'Raw data export',
'Enter the number of points desired [1, 32768]:', Qt.QLineEdit.Normal, '32768')
if not ok:
return
# Block access to the DDR2 Logger to any other function until we are done:
self.sl.bDDR2InUse = True
try:
N_points = int(float(N_points_str))
except:
N_points = 4e3
if N_points < 64:
N_points = 64
try:
# Read from selected source
print("currentSelector = %s" % currentSelector)
self.sl.setup_write(self.sl.LOGGER_MUX[currentSelector], N_points)
##################################################
# Synchronize trigger as best as possible to the next multiple of time_quantum seconds:
if bSyncReadOnNextTimeQuantization:
time_quantum = 0.01
time_now = time.time()
time_target = np.ceil(time_now/time_quantum) * time_quantum
print('time_now = %f, time_target = %f' % (time_now, time_target))
while time_target > time_now:
time.sleep(1e-3)
time_now = time.time()
self.sl.trigger_write()
if bSyncReadOnNextTimeQuantization:
print('time_now = %f, time_target = %f' % (time_now, time_target))
self.sl.wait_for_write()
(samples_out, ref_exp0) = self.sl.read_adc_samples_from_DDR2()
samples_out = samples_out.astype(dtype=np.float)/2**15
except:
# ADC read failed.
print('Unhandled exception in ADC read')
# del self.sl
# raise
# Signal to other functions that they can use the DDR2 logger
self.sl.bDDR2InUse = False
print('Elapsed time (Comm) = %f' % (time.perf_counter()-start_time))
start_time = time.perf_counter()
# Write the data to disk:
strNameTemplate = time.strftime("data_export\\%m_%d_%Y_%H_%M_%S_")
self.make_sure_path_exists('data_export')
# Open files for output, write raw data
try:
strCurrentName = strNameTemplate + self.strFGPASerialNumber + '_raw_adc_samples.bin'
f = open(strCurrentName, 'wb')
f.write(samples_out)
f.close()
except:
pass
print('Elapsed time (write to disk) = %f' % (time.perf_counter()-start_time))
start_time = time.perf_counter()
def setLock(self):
bLock = self.qloop_filters[self.selected_ADC].qchk_lock.isChecked()
self.qchk_lock.setChecked(bLock)
if bLock:
#We are reconnecting to a RP which has a locked loop filter
self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: green')
else:
self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: red')
@logCommsErrorsAndBreakoutOfFunction()
def chkLockClickedEvent(self, checked=False):
bLock = self.qchk_lock.isChecked()
if bLock:
# we are doing an unlocked->locked transition.
# We first check if the detected VCO gain seems right:
if self.sl.dither_enable[self.selected_ADC]:
# check if gain is OK
try:
VCO_gain_in_Hz_per_Volts = float(self.qedit_vco_gain[self.selected_ADC].text())
except:
VCO_gain_in_Hz_per_Volts = 1e9
# First check if sign is right:
if np.sign(self.VCO_detected_gain_in_Hz_per_Volts[self.selected_ADC]) != np.sign(VCO_gain_in_Hz_per_Volts):
# Display warning message.
reply = QtGui.QMessageBox.question(self, 'Warning',
"The detected VCO gain is negative. This will most likely make the loop unstable. This is either caused by trying to lock to an incorrect sideband, or an incorrect setting of the VCO sign in the UI. Do you want to turn on the lock anyway?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.No:
# Exit early
self.qchk_lock.setChecked(False)
return
print('Warning about the loop sign ignored.')
else:
print('Gain sign OK')
# Now we check if the magnitude of the entered VCO gain and the detected gain agree within some tolerance:
if self.VCO_detected_gain_in_Hz_per_Volts[self.selected_ADC]/VCO_gain_in_Hz_per_Volts > 1.5 or self.VCO_detected_gain_in_Hz_per_Volts[self.selected_ADC]/VCO_gain_in_Hz_per_Volts < 1/1.5:
# Display warning message.
reply = QtGui.QMessageBox.question(self, 'Warning',
"The detected VCO gain (%.2e Hz/V) has a significantly different magnitude than the entered value used for designing the controller (%.2e Hz/V). This may make the loop unstable. Do you want to turn on the lock anyway?" % (self.VCO_detected_gain_in_Hz_per_Volts[self.selected_ADC], VCO_gain_in_Hz_per_Volts),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.No:
# Exit early
self.qchk_lock.setChecked(False)
return
print('Warning about the loop gain ignored.')
else:
print('Gain magnitude OK')
# If we get here that means either that all the parameters have passed the checks, or the dither was off.
# Turn the dither off if the dither mode is automatic:
if self.selected_ADC == 0:
if self.sl.dither_mode_auto[0] == 1:
# automatic mode
self.sl.setDitherLockInState(0, False)
else:
# Optical lock: we have two dithers to take care of:
if self.sl.dither_mode_auto[1] == 1:
# automatic mode
self.sl.setDitherLockInState(1, False)
# if self.sl.dither_mode_auto[2] == 1:
# # automatic mode
# self.sl.setDitherLockInState(2, False)
self.logger.info('Red_Pitaya_GUI{}: Lock'.format(self.logger_name))
self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: green')
# Turn the lock on
if self.selected_ADC == 0:
self.qloop_filters[0].qchk_lock.setChecked(True)
self.qloop_filters[0].updateFilterSettings()
elif self.selected_ADC == 1:
# Lock procedure if there is no 3rd DAC on the Red Pitaya:
# self.qloop_filters[1].qchk_lock.setChecked(True)
# self.qloop_filters[1].updateFilterSettings()
# There is a different procedure for turning the lock on on the optical loop:
# first we grab the beat using the DAC2 frequency-locked loop. then we set this integrator to hold
# and switch to the DAC1 PLL + DAC2 second integrator.
self.qloop_filters[1].qradio_mode_off.setChecked(False)
self.qloop_filters[1].qradio_mode_slow.setChecked(True)
self.qloop_filters[1].qradio_mode_fast.setChecked(False)
self.qloop_filters[1].qradio_mode_both.setChecked(False)
self.qloop_filters[1].updateSettings()
# Wait for the integrator to grab on to the beat
time.sleep(0.2)
# Turn on the full-blown PLL
self.qloop_filters[1].qradio_mode_off.setChecked(False)
self.qloop_filters[1].qradio_mode_slow.setChecked(False)
self.qloop_filters[1].qradio_mode_fast.setChecked(False)
self.qloop_filters[1].qradio_mode_both.setChecked(True)
self.qloop_filters[1].updateSettings()
else: # bLock = False
if not self.sl.output_vco[self.selected_ADC]:
if not self.bFirstTimeLockCheckBoxClicked:
# We are doing a locked->unlocked transition
# 1. Smoothly ramp the manual dac offsets to where the lock has decided to sit:
# This is to prevent any violent step on the actuator when we turn off the lock:
# It also prevents mode changes (the laser should stay fairly close to when it was locked.
if self.selected_ADC == 0:
# Go and measure the current DAC DC value:
N_points = 10e3
self.sl.setup_DAC0_write(N_points)
self.sl.trigger_write()
self.sl.wait_for_write()
(samples_out, ref_exp0) = self.sl.read_adc_samples_from_DDR2()
# print(np.mean(samples_out))
current_dac_offset_in_counts = np.mean(samples_out)
kDAC = 0
elif self.selected_ADC == 1:
N_points = 10e3
self.sl.setup_DAC1_write(N_points)
self.sl.trigger_write()
self.sl.wait_for_write()
(samples_out, ref_exp0) = self.sl.read_adc_samples_from_DDR2()
# print(np.mean(samples_out))
current_dac_offset_in_counts = np.mean(samples_out)
kDAC = 1
# Read the current manual offset value:
current_manual_offset_in_slider_units = float(self.spectrum.q_dac_offset[kDAC].value())
# Convert the DAC DC offset to the slider units:
current_dac_offset_in_slider_units = float(current_dac_offset_in_counts - self.sl.DACs_limit_low[kDAC])/float(self.sl.DACs_limit_high[kDAC] - self.sl.DACs_limit_low[kDAC])*1e6
# Set up a ramp with 20 steps:
desired_ramp = np.linspace(current_manual_offset_in_slider_units, current_dac_offset_in_slider_units, 20)
# print('ramping from %d to %d in slider units' % (current_manual_offset_in_slider_units, current_dac_offset_in_slider_units))
Total_ramp_time = 0.1
for k2 in range(len(desired_ramp)):
# print('set slider to %d' % desired_ramp[k2])
self.spectrum.q_dac_offset[kDAC].setValue(desired_ramp[k2])
self.spectrum.setDACOffset_event()
time.sleep(float(Total_ramp_time)/len(desired_ramp))
# 2. turn the lock off
if self.selected_ADC == 0:
self.qloop_filters[0].qchk_lock.setChecked(False)
self.qloop_filters[0].updateFilterSettings()
elif self.selected_ADC == 1:
# Unlock procedure for when there is no 3rd DAC on the Red Pitaya
# self.qloop_filters[1].qchk_lock.setChecked(False)
# self.qloop_filters[1].updateFilterSettings()
# There is a different procedure for turning the lock on on the optical loop:
# first we grab the beat using the DAC2 frequency-locked loop. then we set this integrator to hold
# and switch to the DAC1 PLL + DAC2 second integrator.
self.qloop_filters[1].qradio_mode_off.setChecked(True)
self.qloop_filters[1].qradio_mode_slow.setChecked(False)
self.qloop_filters[1].qradio_mode_fast.setChecked(False)
self.qloop_filters[1].qradio_mode_both.setChecked(False)
self.qloop_filters[1].updateSettings()
else:
# if the VCO is activated, we don't want to try to estimate the output offset, we just turn off the lock directly
# 2. turn the lock off
if self.selected_ADC == 0:
self.qloop_filters[0].qchk_lock.setChecked(False)
self.qloop_filters[0].updateFilterSettings()
elif self.selected_ADC == 1:
self.qloop_filters[1].qchk_lock.setChecked(False)
self.qloop_filters[1].updateFilterSettings()
# 3. Turn the dither on if the dither mode is automatic:
if self.selected_ADC == 0:
if self.sl.dither_mode_auto[0] == 1:
# automatic mode
self.sl.setDitherLockInState(0, True)
else:
# Optical lock: we have two dithers to take care of:
if self.sl.dither_mode_auto[1] == 1:
# automatic mode
self.sl.setDitherLockInState(1, True)
# if self.sl.dither_mode_auto[2] == 1:
# # automatic mode
# self.sl.setDitherLockInState(2, True)
self.logger.info('Red_Pitaya_GUI{}: Unlock'.format(self.logger_name))
self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: red')
self.bFirstTimeLockCheckBoxClicked = False
def initUI(self):
# second_half_offset = 50
# Change the background color of the main form so that each controls group stand out better
PalNormal = Qt.QPalette()
# Assign the palette to the main form to read off the 'normal' background color:
self.setPalette(PalNormal)
normalBackgroundRGB = PalNormal.color(Qt.QPalette.Background).getRgb()
# print(normalBackground.getRgb())
# Darken the background of the dialog slightly
darker_factor = 0.5
PalDarkerBackground = Qt.QPalette()
PalDarkerBackground.setColor(Qt.QPalette.Background, Qt.QColor(normalBackgroundRGB[0]*darker_factor, normalBackgroundRGB[1]*darker_factor, normalBackgroundRGB[2]*darker_factor))
# PalDarkerBackground.setColor(Qt.QPalette.Background, Qt.QColor(255, 255, 255))
self.setPalette(PalDarkerBackground)
self.setAutoFillBackground(True)
# PalNormal's color has been changed when we assigned PalDarkerBackground to self - this statement seems very circular but somehow it works
PalNormal.setColor(Qt.QPalette.Background, PalNormal.color(Qt.QPalette.Background))
######################################################################
# Settings
######################################################################
self.qgroupbox_settings = Qt.QGroupBox('Settings', self)
# Button which exports the data to the disk
self.qbtn = QtGui.QPushButton('Export PSD data')
self.qbtn.clicked.connect(self.exportData)
# Button which grabs a single acquisition from the DDR memory and exports the data to the disk
self.qbtn_grab = QtGui.QPushButton('Export ADC data')
self.qbtn_grab.clicked.connect(self.grabAndExportData)
# Button which opens the VNA window:
self.qbtn_VNA = QtGui.QPushButton('Transfer function')
self.qbtn_VNA.clicked.connect(self.showVNA)
# VCO modulation gain:
self.qedit_vco_gain = {}
self.qlabel_detected_vco_gain = {}
if self.selected_ADC == 0:
# CEO Lock: only one output (DAC0)
self.qlabel_vco_gain = Qt.QLabel('VCO Gain (DAC0) [Hz/V]:')
self.qlabel_detected_vco_gain_label = Qt.QLabel('Detected VCO Gain [Hz/V]:')
self.qedit_vco_gain[0] = user_friendly_QLineEdit('1e6')
self.qedit_vco_gain[0].returnPressed.connect(self.setVCOGain_event)
self.qedit_vco_gain[0].setMaximumWidth(60)
self.qlabel_detected_vco_gain[0] = Qt.QLabel('0 Hz/V')
self.qlabel_detected_vco_gain[0].setAlignment(Qt.Qt.AlignHCenter)
else:
# Optical lock
self.qlabel_vco_gain = Qt.QLabel('VCO Gains (DAC1, DAC2HV) [Hz/V]:')
# self.qlabel_vco_gain = Qt.QLabel('VCO Gain (DAC1) [Hz/V]:')
self.qlabel_detected_vco_gain_label = Qt.QLabel('Detected VCO Gain [Hz/V]:')
self.qedit_vco_gain[1] = user_friendly_QLineEdit('1e6')
self.qedit_vco_gain[1].returnPressed.connect(self.setVCOGain_event)
self.qedit_vco_gain[1].setMaximumWidth(60)
self.qedit_vco_gain[2] = user_friendly_QLineEdit('1e6')
self.qedit_vco_gain[2].returnPressed.connect(self.setVCOGain_event)
self.qedit_vco_gain[2].setMaximumWidth(60)
self.qlabel_detected_vco_gain[1] = Qt.QLabel('0 Hz/V')
self.qlabel_detected_vco_gain[1].setAlignment(Qt.Qt.AlignHCenter)
self.qlabel_detected_vco_gain[2] = Qt.QLabel('0 Hz/V')
self.qlabel_detected_vco_gain[2].setAlignment(Qt.Qt.AlignHCenter)
# DDC reference frequency:
self.qlabel_ref_freq = Qt.QLabel('Reference freq [Hz]:')
self.qedit_ref_freq = user_friendly_QLineEdit('5e6')
self.qedit_ref_freq.returnPressed.connect(self.setVCOFreq_event)
self.qedit_ref_freq.setMaximumWidth(60)
# Main button for turning the locks on/off:
self.qchk_lock = Qt.QCheckBox('Lock')
self.qchk_lock.setStyleSheet('')
self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: red')
# self.qchk_lock.setStyleSheet('font-size: 18pt; color: white; background-color: green')
self.qchk_lock.clicked.connect(self.chkLockClickedEvent)
self.qchk_lock.setChecked(False)
# VCO sign:
self.qsign_positive = Qt.QRadioButton('VCO sign +')
self.qsign_negative = Qt.QRadioButton('VCO sign -')
self.qsign_group = Qt.QButtonGroup(self)
self.qsign_group.addButton(self.qsign_positive)
self.qsign_group.addButton(self.qsign_negative)
self.qsign_positive.setChecked(True)
self.qsign_negative.setChecked(False)
self.qsign_positive.clicked.connect(self.setVCOFreq_event)
self.qsign_negative.clicked.connect(self.setVCOFreq_event)
# Create widgets to indicate performance
self.last_refresh = time.perf_counter()
self.qlabel_refreshrate_display = Qt.QLabel('Actual delay:')
self.qlabel_refreshrate = Qt.QLabel('1000 ms')
# self.qlabel_refreshrate.resize(self.qlabel_refreshrate.sizeHint())
self.qlabel_timerdelay = Qt.QLabel('Refresh delay [ms]:')
self.qedit_timerdelay = user_friendly_QLineEdit('33')
self.qedit_timerdelay.returnPressed.connect(self.refreshChk_event)
self.qedit_timerdelay.setMaximumWidth(60)
self.qchk_refresh = Qt.QCheckBox('Auto-refresh')
self.qchk_refresh.clicked.connect(self.refreshChk_event)
# Status reporting:
if self.selected_ADC == 0:
self.qlbl_status1 = Qt.QLabel('Status: Idle')
elif self.selected_ADC == 1:
self.qlbl_status1 = Qt.QLabel('Status: Idle')
self.qlbl_status2 = Qt.QLabel('Status: Idle')
# Put all the widgets into a grid layout
grid = QtGui.QGridLayout()
grid.setHorizontalSpacing(10)
grid.setVerticalSpacing(1)
# 3 rows, XX columns
grid.addWidget(self.qbtn, 0, 0)
grid.addWidget(self.qbtn_VNA, 1, 0)
grid.addWidget(self.qbtn_grab, 2, 0)
grid.addWidget(self.qchk_refresh, 0, 1)
grid.addWidget(self.qlabel_timerdelay, 1, 1)
grid.addWidget(self.qedit_timerdelay, 1, 2)
grid.addWidget(self.qlabel_refreshrate_display, 2, 1)
grid.addWidget(self.qlabel_refreshrate, 2, 2)
# grid.addWidget(self.qlabel_bytes_skip, 0, 3)
# grid.addWidget(self.qedit_bytes_skip, 0, 4)
grid.addWidget(self.qchk_lock, 0, 3, 1, 2)
grid.addWidget(self.qlabel_ref_freq, 1, 3)
grid.addWidget(self.qedit_ref_freq, 1, 4)
# # both PLLs need to receive a threshold for the residuals.
# # See tooltip for info
# grid.addWidget(self.qlabel_crash_threshold, 2, 3)
# grid.addWidget(self.qedit_crash_threshold, 2, 4)
# only the first PLL has a crash monitor module in the current firmware:
if self.selected_ADC == 0:
pass
#FEATURE
#grid.addWidget(self.qlabel_crash_threshold_freq, 2, 5)
#grid.addWidget(self.qedit_crash_threshold_freq, 2, 6)
#grid.addWidget(self.qchk_crash_monitor, 2, 7)
# We put a sub-grid in the grid
# we put the VCO controls in the sub-grid, this way the outer grid stays the same size regardless of the number of elements
grid2 = Qt.QGridLayout()
grid2.setHorizontalSpacing(10)
grid2.setVerticalSpacing(10)
if self.selected_ADC == 0:
# CEO Lock: only one output (DAC0)
grid2.addWidget(self.qlabel_vco_gain, 0, 0)
grid2.addWidget(self.qlabel_detected_vco_gain_label, 1, 0)
grid2.addWidget(self.qedit_vco_gain[0], 0, 1)
grid2.addWidget(self.qlabel_detected_vco_gain[0], 1, 1)
else:
# Optical lock: two outputs (DAC1 and DAC2)
grid2.addWidget(self.qlabel_vco_gain, 0, 0)
grid2.addWidget(self.qlabel_detected_vco_gain_label, 1, 0)
grid2.addWidget(self.qedit_vco_gain[1], 0, 1)
grid2.addWidget(self.qlabel_detected_vco_gain[1], 1, 1)
grid2.addWidget(self.qedit_vco_gain[2], 0, 2)
# grid2.addWidget(self.qlabel_detected_vco_gain[2], 1, 2)
grid.addLayout(grid2, 0, 5, 2, 2)
grid.addWidget(self.qsign_positive, 0, 7)
grid.addWidget(self.qsign_negative, 1, 7)
grid.addWidget(Qt.QLabel(), 0, 9, 1, 1)
grid.setColumnStretch(9, 1)
self.qgroupbox_settings.setLayout(grid)
self.qgroupbox_settings.setPalette(PalNormal)
self.qgroupbox_settings.setAutoFillBackground(True)
######################################################################
# Spectrum analyzer/Diagnostics
######################################################################
self.spectrum = SpectrumWidget.SpectrumWidget(self, self.selected_ADC, self.output_controls, self.sl, PalNormal)
######################################################################
# Create the controls for the loop filters
######################################################################
self.qgroupbox_loop_filters = Qt.QGroupBox('Loop filters', self)
hbox = Qt.QHBoxLayout()
self.qloop_filters = {}
for k in range(3):
if self.output_controls[k] == True:
if k == 0:
# print('XEM_GUI_MainWindow(): About to call LoopFiltersUI()')
self.qloop_filters[k] = LoopFiltersUI(self.sl, k, bDisplayLockChkBox=False)
hbox.addWidget(self.qloop_filters[k])
#self.qloop_filters[k].show()
elif k == 1:
self.qloop_filters[k] = LoopFiltersUI_DAC1_and_DAC2(self.sl, k, self.sl.pll[k])
hbox.addWidget(self.qloop_filters[k])
self.qloop_filters[k].show()
self.qgroupbox_loop_filters.setLayout(hbox)
# self.qgroupbox_loop_filters.setLayout(grid)
self.qgroupbox_loop_filters.setPalette(PalNormal)
self.qgroupbox_loop_filters.setAutoFillBackground(True)
######################################################################
# Phase noise analysis
######################################################################
self.qgroupbox_phasenoise = Qt.QGroupBox('Phase noise (all computed from DDC output)', self)
# Selector for the plot type (phase or freq noise)
# self.qlabel_ddc_plot_select = Qt.QLabel('Plot type:')
self.qcombo_ddc_plot = Qt.QComboBox()
self.qcombo_ddc_plot.addItem('Freq')
self.qcombo_ddc_plot.addItem('Phase')
self.qcombo_ddc_plot.addItem('Freq: time domain')
self.qcombo_ddc_plot.addItem('Phase: time domain')
self.qcombo_ddc_plot.setCurrentIndex(1)
# Create widgets to set the number of points for the DDC graphs:
self.qlabel_ddc_rbw = Qt.QLabel('RBW: 100 kHz; Points:')
self.qedit_ddc_length = Qt.QLineEdit('32.768e3') # this used to be 3e5 in the Dave Leibrant box version, but was changed to 16e3 due to RedPitaya memory limitations
self.qedit_ddc_length.setMaximumWidth(60)
# Create widgets to set the higher frequency of the integration:
self.qlabel_cumul_integral = Qt.QLabel('Integration\nlimit [Hz]:')
self.qedit_cumul_integral = Qt.QLineEdit('5e6')
self.qedit_cumul_integral.setMaximumWidth(60)
# Display mean frequency error:
self.qlbl_mean_freq_error = Qt.QLabel('Mean freq error = 0 MHz')
# Checkbox to enable faster updates of the phase noise plot:
self.qchk_phase_noise_fast_updates = Qt.QCheckBox('Faster updates')
self.qchk_phase_noise_fast_updates.setChecked(False)
# X and Y limits for the plot:
self.qlbl_xlims = Qt.QLabel('Xmin, Xmax')
self.qedit_xlims = Qt.QLineEdit('3e3, 5e6')
self.qedit_xlims.setMaximumWidth(60)
self.qlbl_ylims = Qt.QLabel('Ymin, Ymax')
self.qedit_ylims = Qt.QLineEdit('-100, -30')
self.qedit_ylims.setMaximumWidth(60)
# Averaging controls: # Averages (1=off)
self.qlbl_spc_averaging = Qt.QLabel('# Averages\n(1=off)')
self.qedit_spc_averaging = Qt.QLineEdit('1')
self.qedit_spc_averaging.setMaximumWidth(60)
# Create the frequency domain plot for the DDC0
self.qplt_DDC0_spc = pg.PlotWidget()
self.qplt_DDC0_spc.setTitle('Freq noise PSD')
#self.qplt_DDC0_spc.setCanvasBackground(Qt.Qt.white)
#self.qplt_DDC0_spc.setAxisScaleEngine(Qwt.QwtPlot.xBottom, Qwt.QwtLog10ScaleEngine())
self.qplt_DDC0_spc.getPlotItem().setLogMode(x=True)
self.qplt_DDC0_spc.setYRange(-60, 60)
#self.qplt_DDC0_spc.enableAxis(Qwt.QwtPlot.yRight)
self.qplt_DDC0_spc.setLabel('bottom', 'Frequency [Hz]')
self.qplt_DDC0_spc.setLabel('left', 'PSD [dB Hz^2/Hz]')
#self.qplt_DDC0_spc.setLabel('right', 'Phase [rad]')
# create the right-side axis:
p1 = self.qplt_DDC0_spc.getPlotItem()
self.qplt_DDC0_spc_right_viewbox = pg.ViewBox()
#self.qplt_DDC0_spc_right_viewbox.setLogMode(x=True)
p1.showAxis('right')
p1.scene().addItem(self.qplt_DDC0_spc_right_viewbox)
p1.getAxis('right').linkToView(self.qplt_DDC0_spc_right_viewbox)
self.qplt_DDC0_spc_right_viewbox.setXLink(p1)
p1.getAxis('right').setLabel('Phase [rad]')
self.updatePhaseNoiseViews()
p1.vb.sigResized.connect(self.updatePhaseNoiseViews)
self.qplt_DDC0_spc.showGrid(x=True, y=True)
#plot_grid = Qwt.QwtPlotGrid()
#plot_grid.setMajPen(Qt.QPen(Qt.Qt.black, 0, Qt.Qt.DotLine))
#plot_grid.attach(self.qplt_DDC0_spc)
# Create the curve in the plot
self.curve_DDC0_spc = self.qplt_DDC0_spc.getPlotItem().plot(title='Phase noise PSD', pen='b')
#self.curve_DDC0_spc.attach(self.qplt_DDC0_spc)
#self.curve_DDC0_spc.setPen(Qt.QPen(Qt.Qt.blue))
#self.curve_DDC0_cumul_phase = pg.PlotCurveItem(pen='g')
self.curve_DDC0_cumul_phase = pg.PlotDataItem(pen='k')
self.curve_DDC0_cumul_phase.setLogMode(True, False)
self.qplt_DDC0_spc_right_viewbox.addItem(self.curve_DDC0_cumul_phase)
#self.curve_DDC0_cumul_phase = self.qplt_DDC0_spc_right_viewbox.getPlotItem().plot(pen='k')
#self.curve_DDC0_cumul_phase.attach(self.qplt_DDC0_spc)
#self.curve_DDC0_cumul_phase.setPen(Qt.QPen(Qt.Qt.black))
# self.curve_DDC0_cumul_phase.setYAxis(Qwt.QwtPlot.yRight)
self.curve_DDC0_spc_avg = self.qplt_DDC0_spc.getPlotItem().plot(pen='g')
#self.curve_DDC0_spc_avg.attach(self.qplt_DDC0_spc)
#self.curve_DDC0_spc_avg.setPen(Qt.QPen(Qt.Qt.darkGreen))
# Put all the widgets into a grid layout
grid = Qt.QGridLayout()
# grid.addWidget(self.qlabel_ddc_plot_select, 0, 0)
grid.addWidget(self.qcombo_ddc_plot, 0, 0, 1, 2)
grid.addWidget(self.qlabel_ddc_rbw, 1, 0)
grid.addWidget(self.qedit_ddc_length, 1, 1)
grid.addWidget(self.qlabel_cumul_integral, 2, 0)
grid.addWidget(self.qedit_cumul_integral, 2, 1)
grid.addWidget(self.qlbl_xlims, 3, 0, 1, 1)
grid.addWidget(self.qedit_xlims, 3, 1, 1, 1)
grid.addWidget(self.qlbl_ylims, 4, 0, 1, 1)
grid.addWidget(self.qedit_ylims, 4, 1, 1, 1)
grid.addWidget(self.qlbl_spc_averaging, 5, 0, 1, 1)
grid.addWidget(self.qedit_spc_averaging, 5, 1, 1, 1)
grid.addWidget(self.qchk_phase_noise_fast_updates, 6, 0, 1, 2)
grid.addWidget(self.qlbl_mean_freq_error, 7, 0, 1, 2)
grid.addWidget(Qt.QLabel(''), 8, 0)
grid.addWidget(self.qplt_DDC0_spc, 0, 2, 9, 1)
grid.setRowStretch(7, 1)
grid.setColumnStretch(2, 1)
self.qgroupbox_phasenoise.setLayout(grid)
self.qgroupbox_phasenoise.setPalette(PalNormal)
self.qgroupbox_phasenoise.setAutoFillBackground(True)
######################################################################
# Layout for the whole form:
######################################################################
grid = Qt.QGridLayout()
grid.setSpacing(10)
grid.addWidget(self.qgroupbox_settings, 0, 0, 1, 0)
grid.addWidget(self.spectrum, 1, 0, 1, 0)
grid.addWidget(self.qgroupbox_phasenoise, 2, 0, 1, 1)
grid.addWidget(self.qgroupbox_loop_filters, 3, 0, 1, 1)
# grid.setRowStretch(2, 1)
self.setLayout(grid)
# Adjust the size and position of the window
# self.resize(940, 1080-100+30)
# self.center()
#self.setGeometry(18, 40, 950, 1010)
#self.setGeometry(0, 0, 750, 1000)
self.setWindowTitle(self.strTitle)
#self.show()
# def resizeEvent(self, event):
# print('resizeEvent')
# print(self.geometry())
## Handle view resizing for the phase noise plot (since we need to manual link the left and right side axes)
def updatePhaseNoiseViews(self):
## view has resized; update auxiliary views to match
p1 = self.qplt_DDC0_spc.getPlotItem()
self.qplt_DDC0_spc_right_viewbox.setGeometry(p1.vb.sceneBoundingRect())
## need to re-update linked axes since this was called
## incorrectly while views had different shapes.
## (probably this should be handled in ViewBox.resizeEvent)
self.qplt_DDC0_spc_right_viewbox.linkedViewChanged(p1.vb, self.qplt_DDC0_spc_right_viewbox.XAxis)
def loadParameters(self):
# Update the values in the UI to reflect the internal values:
# Get values from xml file
for k in range(3):
if self.output_controls[k] == True:
# print('XEM_GUI_MainWindow(): About to call loadParameters()')
if k < 2: # For qloop_filter 0 and 1
# print('before calling self.qloop_filters[k].loadParameters(self.sp)')
self.qloop_filters[k].loadParameters(self.sp) # Get values from xml file for loop_filters
# print('after calling self.qloop_filters[k].loadParameters(self.sp)')
# self.qchk_lock.setChecked(self.qloop_filters[k].qchk_lock.isChecked()) # update the qchk_lock in this widget with the value loaded from sp
# print('after calling setChecked')
# Get dac gain from the system parameters object and set it in the UI:
# print('before calling self.sp.getValue(''VCO_gain'', strDAC)')
strDAC = 'DAC{:01d}'.format(k)
str_VCO_gain = (self.sp.getValue('VCO_gain', strDAC))
# print('before calling self.qedit_vco_gain[k].setText(str_VCO_gain)')
self.qedit_vco_gain[k].blockSignals(True)
self.qedit_vco_gain[k].setText(str_VCO_gain)
self.qedit_vco_gain[k].blockSignals(False)
# print('after calling self.qedit_vco_gain[k].setText(str_VCO_gain)')
# Output offsets values:
output_offset_in_volts = float(self.sp.getValue('Output_offset_in_volts', strDAC))
# Scale this to the correct units for the output offset slider:
min_output_in_volts = float(self.sp.getValue('Output_limits_low', strDAC))
max_output_in_volts = float(self.sp.getValue('Output_limits_high', strDAC))
slider_units = (output_offset_in_volts - min_output_in_volts)/(max_output_in_volts-min_output_in_volts) * 1e6
# print('calling dac offset slider setValue()')
# self.q_dac_offset[k].blockSignals(True)
# self.q_dac_offset[k].setValue(slider_units)
# self.q_dac_offset[k].blockSignals(False)
self.spectrum.setDacOffset(k, slider_units)
# print('done calling dac offset slider setValue()')
# Get ddc reference frequency from the system parameters object and set it in the UI:
strDDC = 'DDC{:01d}'.format(self.selected_ADC)
str_ref_freq = (self.sp.getValue('Reference_frequency', strDDC))
self.qedit_ref_freq.setText(str_ref_freq)
self.qedit_ref_freq.reset_my_color()
# print('done loadParameters()')
return
def center(self):
qr = self.frameGeometry()
cp = QtGui.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
# self.move(qr.topLeft())
self.move(QtGui.QDesktopWidget().availableGeometry().topLeft() + Qt.QPoint(10, 10))
def initSL(self):
# Old function to start the GUI communication
# self.sl = SuperLaserLand_JD2()
# self.sl.open()
print("initSL()")
self.loadParameters()
bUpdateFPGA = True
# Send values to FPGA
if bUpdateFPGA == True:
self.setVCOFreq_event()
self.setVCOGain_event()
self.chkLockClickedEvent()
if self.output_controls[0] == True:
self.setPWM0_event()
# self.setFLL0_event()
# self.setPLL0_event()
# self.timerID = self.startTimer(500 * 1/1)
self.timerID = 0
self.qchk_refresh.setChecked(False) # Set this to True to have the auto-refresh active when running the GUI, or to False to wait for the user to check the box
self.refreshChk_event()
# Start the timer which reads the dither:
# self.timerIDDither = Qt
self.timerIDDither = Qt.QTimer(self)
self.timerIDDither.timeout.connect(self.timerDitherEvent)
self.timerIDDither.start(100) # 100 ms readout delay, increased to 1000 ms for debugging
# print "Warning! Increased self.timerIDDither.start(100) to 3000 for debugging."
# self.timerDitherEvent()
# self.grabAndDisplayADC()
# self.displayDDC()
self.displayDAC() # This populates the current DAC values with the actual value
# self.timerEvent(0) # run the event handler once right away, to populate the rest of the window
print("initSL() done")
def closeEvent(self, event):
# from http://stackoverflow.com/questions/1414781/prompt-on-exit-in-pyqt-application
# quit_msg = "Are you sure you want to exit the program?"
# reply = QtGui.QMessageBox.question(self, 'Message',
# quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
#
# print(event)
#
# if reply == QtGui.QMessageBox.Yes:
# event.accept()
# app = Qt.QApplication.instance()
# app.closeAllWindows()
## app.
# else:
# event.ignore()
return
@logCommsErrorsAndBreakoutOfFunction()
def timerDitherEvent(self):
# print('timerDitherEvent')
# Check if the sl object exists: otherwise this timer will keep throwing exceptions, filling up the console messages
# and preventing us form seeing the real cause
if not hasattr(self, 'sl'):
print('SL object does not exist anymore. disabling timer in timerDitherEvent')
self.timerIDDither.stop()
return
start_time = time.perf_counter()
for k in range(2): #There is no dither for the 2nd DAC
if self.output_controls[k]:
if self.sl.dither_enable[k] == False:
if k != 2: # DAC2 currently does not have dither
self.qlabel_detected_vco_gain[k].setText('off')
self.qlabel_detected_vco_gain[k].setStyleSheet("color: white; background-color: black")
else:
samples = self.sl.ditherRead(2, k)
# There is an implicit (-) sign because the DDC has to shift the frequency to 0.
# This means that the detected gain will be positive when the VCO sign checkbox is correctly tuned
samples = -samples
samples = np.mean(samples)
# np.mean() returns a numpy.float, but the conversions functions expect an ndarray
samples = np.ndarray((1,), dtype=np.float, buffer=samples)
# TODO: fancier things with the real and imaginary part, to try to detect invalid readings? Is this necessary?
# TODO: Compare many different readings to try to sort out any incorrect ones?
VCO_detected_gain_in_Hz_per_Volts = self.sl.scaleDitherResultsToHzPerVolts(samples, k)
self.VCO_detected_gain_in_Hz_per_Volts[k] = VCO_detected_gain_in_Hz_per_Volts
self.qlabel_detected_vco_gain[k].setText('%.1e' % VCO_detected_gain_in_Hz_per_Volts)
elapsed_time = time.perf_counter() - start_time
# print('Elapsed time (timerDitherEvent) = %f ms' % (1000*elapsed_time))
# If the detected gain is negative, the loop will be unstable when closed, so we switch to a red background so that the user can flip the sign
if VCO_detected_gain_in_Hz_per_Volts > 0:
# self.qedit_fi.setStyleSheet("background-color: %s" % Qt.QColor(Qt.Qt.white).name())
self.qlabel_detected_vco_gain[k].setStyleSheet("color: white; background-color: green")
else:
# red background
# self.qlabel_detected_vco_gain[k].setStyleSheet("color: white; background-color: %s" % Qt.QColor(Qt.Qt.red).name())
self.qlabel_detected_vco_gain[k].setStyleSheet("color: white; background-color: red")
@logCommsErrorsAndBreakoutOfFunction()
def timerEvent(self, e):
# print 'timerEvent : %.3f sec' % (time.perf_counter())
# Check if the sl object exists: otherwise this timer will keep throwing exceptions, filling up the console messages
# and preventing us form seeing the real cause. We let only one exception go through and then disable
try:
# Read out residuals and dump to disk:
if self.selected_ADC == 0:
pass
# Handle the LEDs display
ret = self.sl.readLEDs()
if ret is not None:
(LED_G0, LED_R0, LED_G1, LED_R1, LED_G2, LED_R2) = ret
# print ('%d, %d, %d, %d, %d, %d' % (LED_G0, LED_R0, LED_G1, LED_R1, LED_G2, LED_R2))
# if self.selected_ADC == 0:
# # if self.qchk_crash_monitor.isChecked():
# # self.checkCrash()
# # LEDs for CEO lock:
# if LED_G0 and (not LED_R0):
# # status is locked
# self.qlbl_status1.setText('Status: Locked')
# self.qlbl_status1.setStyleSheet('color: white; background-color: green')
# elif LED_G0 and LED_R0:
# # Status is railed
# self.qlbl_status1.setText('Status: Railed')
# self.qlbl_status1.setStyleSheet('color: white; background-color: orange')
# elif (not LED_G0) and LED_R0:
# # Residuals above threshold
# self.qlbl_status1.setText('Status: Residuals > threshold')
# self.qlbl_status1.setStyleSheet('color: white; background-color: red')
# elif (not LED_G0) and (not LED_R0):
# # Idle
# self.qlbl_status1.setText('Status: Idle')
# self.qlbl_status1.setStyleSheet('')
# elif self.selected_ADC == 1:
# # LEDs for Optical lock
# if LED_G1 and (not LED_R1):
# # status is locked
# self.qlbl_status1.setText('Status: Locked')
# self.qlbl_status1.setStyleSheet('color: white; background-color: green')
# elif LED_G1 and LED_R1:
# # Status is railed
# self.qlbl_status1.setText('Status: Railed')
# self.qlbl_status1.setStyleSheet('color: white; background-color: orange')
# elif (not LED_G1) and LED_R1:
# # Residuals above threshold
# self.qlbl_status1.setText('Status: Residuals above threshold')
# self.qlbl_status1.setStyleSheet('color: white; background-color: red')
# elif (not LED_G1) and (not LED_R1):
# # Idle
# self.qlbl_status1.setText('Status: Idle')
# self.qlbl_status1.setStyleSheet('')
# # LEDs for slow PZT output:
# if LED_G2 and (not LED_R2):
# # status is locked
# self.qlbl_status2.setText('Status: Locked')
# self.qlbl_status2.setStyleSheet('color: white; background-color: green')
# elif LED_G2 and LED_R2:
# # Status is railed
# self.qlbl_status2.setText('Status: Railed')
# self.qlbl_status2.setStyleSheet('color: white; background-color: orange')
# elif (not LED_G2) and LED_R2:
# # Residuals above threshold
# self.qlbl_status2.setText('Status: Residuals above threshold')
# self.qlbl_status2.setStyleSheet('color: white; background-color: red')
# elif (not LED_G2) and (not LED_R2):
# # Idle
# self.qlbl_status2.setText('Status: Idle')
# self.qlbl_status2.setStyleSheet('')
if self.qchk_refresh.isChecked():
self.grabAndDisplayADC()
self.displayDAC()
if self.display_phase == 0 or self.qchk_phase_noise_fast_updates.isChecked():
self.displayDDC()
self.display_phase = self.display_phase + 1
if self.display_phase > 5:
self.display_phase = 0
except:
print('SL object does not exist anymore. disabling timer in timerEvent')
self.killTimer(self.timerID)
self.timerID = 0
self.qchk_refresh.setChecked(False)
raise
self.qlabel_refreshrate.setText('%.0f ms' % (1000*(time.perf_counter() - self.last_refresh)))
self.last_refresh = time.perf_counter()
# timerEvent()
def displayDAC(self):
# Check if another function is currently using the DDR2 logger:
if self.sl.bDDR2InUse:
print('displayDAC(): DDR2 logger in use, cannot get data from dac')
return
# For now: we grab the smallest chunk of points from the output (so as to not use too much time to refresh)
# and display the current average:
for k in range(3):
if self.output_controls[k]:
# Read from DAC #k
start_time = time.perf_counter()
(samples_out, ref_exp0) = self.getADCdata(input_select="DAC%d" % k, N_samples=256)
if samples_out is None:
return
elapsed_time = time.perf_counter() - start_time
if self.bDisplayTiming == True:
print('Elapsed time (read dac values) = %f ms' % (1000*elapsed_time))
samples_out = samples_out.astype(dtype=np.float)
VCO_gain_in_Hz_per_Volts = self.getVCOGainFromUI(k)
# Update the display:
# For the USB bug, compute the mean from the last points
current_output_in_volts = self.sl.convertDACCountsToVolts(k, np.mean(samples_out[128:256]))
current_output_in_hz = current_output_in_volts * VCO_gain_in_Hz_per_Volts
self.spectrum.qthermo_dac_current[k].setValue(current_output_in_volts)
self.spectrum.qlabel_dac_current_value[k].setText('{:.4f} V\n{:.0f} MHz'.format(current_output_in_volts, current_output_in_hz/1e6))
elapsed_time = time.perf_counter() - start_time
if self.bDisplayTiming == True:
print('Elapsed time (displayDAC total) = %f ms' % (1000*elapsed_time))
def displayDDC(self):
# self.bDisplayTiming = True
# Read from DDC0
try:
try:
N_points = int(float(self.qedit_ddc_length.text()))
except:
N_points = 100e3
if N_points < 64:
N_points = 64
start_time = time.perf_counter()
# if self.selected_ADC == 0:
# self.sl.setup_DDC0_write(N_points)
# elif self.selected_ADC == 1:
# self.sl.setup_DDC1_write(N_points)
# self.sl.trigger_write()
# self.sl.wait_for_write()
# if self.bDisplayTiming == True:
# print('Elapsed time (setup write) = %f' % (time.perf_counter()-start_time))
# start_time = time.perf_counter()
# inst_freq = self.sl.read_ddc_samples_from_DDR2()
inst_freq = self.getADCdata(input_select='DDC%d' % self.selected_ADC, N_samples=N_points, bReadAsDDC=True)
if inst_freq is None:
return
self.inst_freq = inst_freq
if self.bDisplayTiming == True:
print('Elapsed time (communication) = %f' % (time.perf_counter()-start_time))
# print('mean freq error = %f MHz, raw code = %f' % (np.mean(inst_freq)/1e6, np.mean(inst_freq)*2**10 / self.sl.fs*4))
self.qlbl_mean_freq_error.setText('Freq error: %.2f MHz' % (np.mean(inst_freq)/1e6))
# Compute the spectrum:
# We first perform decimation on the data since we don't have useful information above the cut-off frequency anyway:
start_time = time.perf_counter()
N_decimation = 10
fs_new = self.sl.fs/N_decimation
#inst_freq_decimated = decimate(inst_freq, N_decimation, zero_phase=False)
inst_freq_decimated = decimate(detrend(inst_freq), N_decimation, zero_phase=False)
# inst_freq_decimated = inst_freq
# fs_new = self.sl.fs
# For debugging: we want to check
# inst_freq_decimated = np.random.randn(100e3)
# print('Data std dev = %f Hz' % np.std(inst_freq_decimated))
# print('Data variance = %f Hz^2' % np.var(inst_freq_decimated))
if self.bDisplayTiming == True:
print('Elapsed time (decimation) = %f' % (time.perf_counter()-start_time))
start_time = time.perf_counter()
# Compute the spectrum of the decimated signal:
start_time = time.perf_counter()
N_fft = 2**(int(np.ceil(np.log2(len(inst_freq_decimated)))))
frequency_axis = np.linspace(0, (N_fft-1)/float(N_fft)*fs_new, N_fft)
last_index_shown = int(np.round(len(frequency_axis)/2))
window_function = np.blackman(len(inst_freq_decimated))
window_NEB = np.sum((window_function/np.sum(window_function))**2) * fs_new;
# print('window_NEB = %f Hz' % window_NEB)
spc = np.fft.fft(inst_freq_decimated * window_function, N_fft)
spc = np.real(spc*np.conj(spc))/(sum(window_function)**2) # Spectrum is now scaled in power (Hz^2 per bin)
# Scale the spectrum to be a single-sided power spectral density in Hz^2/Hz:
spc[1:last_index_shown] = 2*spc[1:last_index_shown] / window_NEB
# # Compute the running average:
# Compute spectrum averaging with exponential smoothing (simple first-order IIR filter)
try:
n_spc_avg = int(round(float(self.qedit_spc_averaging.text())))
if n_spc_avg > 1.:
self.bAveragePhaseNoise = True
self.N_spc_average = n_spc_avg
else:
self.bAveragePhaseNoise = False
self.N_spc_average = 1.
except:
n_spc_avg = 1.
self.bAveragePhaseNoise = False
if self.bAveragePhaseNoise:
# print('self.N_spc_average = %d' % self.N_spc_average)
filter_alpha = np.exp(-1./self.N_spc_average)
try:
# if this is not the first time we are called with averaging enabled, we run the filter:
if self.bAveragePhaseNoiseLast:
self.spc_running_sum = filter_alpha * self.spc_running_sum + + (1-filter_alpha)*spc
else:
# this is the first time that we are called with averaging enabled, so we reset the current state
self.spc_running_sum = spc
except:
# This is the first time that we are called:
self.spc_running_sum = spc
self.bAveragePhaseNoiseLast = self.bAveragePhaseNoise
# print('Freq noise PSD: %e Hz^2/Hz' % (np.mean(spc[1:last_index_shown])))
self.freq_noise_psd = spc[1:last_index_shown]
self.freq_noise_axis = frequency_axis[1:last_index_shown]
# spc = np.abs(spc)
if self.bDisplayTiming == True:
print('Elapsed time (FFT) = %f' % (time.perf_counter()-start_time))
try:
f_limits = self.qedit_xlims.text()
f_limits = f_limits.split(',')
f_limits = (float(f_limits[0]), float(f_limits[1]))
except:
f_limits = (frequency_axis[1], frequency_axis[last_index_shown])
try:
y_limits = self.qedit_ylims.text()
y_limits = y_limits.split(',')
y_limits = (float(y_limits[0]), float(y_limits[1]))
except:
y_limits = (-140, 60)
# Update the graph
if self.qcombo_ddc_plot.currentIndex() == 0:
# Display the frequency noise
spc = 10*np.log10(spc + 1e-20)
self.curve_DDC0_spc.setData(frequency_axis[1:last_index_shown], spc[1:last_index_shown])
if self.bAveragePhaseNoise:
self.curve_DDC0_spc_avg.setData(frequency_axis[1:last_index_shown], 10*np.log10(self.spc_running_sum[1:last_index_shown] + 1e-20))
self.curve_DDC0_spc_avg.setVisible(True)
else:
self.curve_DDC0_spc_avg.setVisible(False)
self.qplt_DDC0_spc.setTitle('Freq noise PSD')
self.qplt_DDC0_spc.setLabel('left', 'PSD [dB Hz^2/Hz]')
self.qplt_DDC0_spc.setYRange(y_limits[0], y_limits[1])
# self.qplt_DDC0_spc.setAxisScale(Qwt.QwtPlot.xBottom, frequency_axis[1], frequency_axis[last_index_shown])
self.qplt_DDC0_spc.getPlotItem().setLogMode(x=True)
self.qplt_DDC0_spc.setXRange(np.log10(f_limits[0]), np.log10(f_limits[1]))
# self.qplt_DDC0_spc.setAxisScaleEngine(Qwt.QwtPlot.xBottom, Qwt.QwtLog10ScaleEngine())
self.qplt_DDC0_spc.setLabel('bottom', 'Frequency [Hz]')
self.curve_DDC0_cumul_phase.setVisible(False)
elif self.qcombo_ddc_plot.currentIndex() == 1:
# Compute the phase noise time-domain standard deviation:
phasenoise_stddev = np.std(np.cumsum(inst_freq*2*np.pi/self.sl.fs))
# Display the phase noise (equal to 1/f^2 times the frequency noise PSD)
self.curve_DDC0_spc.setData(frequency_axis[1:last_index_shown], 10*np.log10(spc[1:last_index_shown] + 1e-20) - 20*np.log10(frequency_axis[1:last_index_shown]))
if self.bAveragePhaseNoise:
self.curve_DDC0_spc_avg.setData(frequency_axis[1:last_index_shown], 10*np.log10(self.spc_running_sum[1:last_index_shown] + 1e-20) - 20*np.log10(frequency_axis[1:last_index_shown]))
self.curve_DDC0_spc_avg.setVisible(True)
else:
self.curve_DDC0_spc_avg.setVisible(False)
self.qplt_DDC0_spc.setXRange(f_limits[0], f_limits[1])
self.qplt_DDC0_spc.setTitle('Phase noise PSD, std dev = %.2f radrms' % phasenoise_stddev)
self.qplt_DDC0_spc.setLabel('left', 'PSD [dBc/Hz]')
self.qplt_DDC0_spc.setYRange(y_limits[0], y_limits[1])
#self.qplt_DDC0_spc.setAxisScaleEngine(Qwt.QwtPlot.xBottom, Qwt.QwtLog10ScaleEngine())
self.qplt_DDC0_spc.getPlotItem().setLogMode(x=True)
self.qplt_DDC0_spc.setXRange(np.log10(f_limits[0]), np.log10(f_limits[1]*5./6.0)) # the scaling is because the widget doesn't seem to use the exact values that we pass...
self.qplt_DDC0_spc.setLabel('bottom', 'Frequency [Hz]')
# Display the cumulative integral of the phase noise:
# Select desired frequency range:
try:
integration_higher_bound = float(self.qedit_cumul_integral.text())
except:
integration_higher_bound = 1e6
if integration_higher_bound > fs_new/2:
integration_higher_bound = fs_new/2
if integration_higher_bound <= 2/len(spc)*fs_new:
integration_higher_bound = 2/len(spc)*fs_new
integration_higher_index = int(round(integration_higher_bound/fs_new*len(spc)))
# print('integration up to %d out of %d' % (integration_higher_index, len(spc)))
frequency_axis_integral = frequency_axis[1:integration_higher_index]
# Integrate the phase noise PSD, from the highest frequency to the lowest
phase_psd = spc[1:integration_higher_index] / frequency_axis_integral**2
cumul_int = np.flipud(np.cumsum(np.flipud(phase_psd))) * np.mean(np.diff(frequency_axis_integral))
# print((cumul_int).shape)
# cumul_int = 0*cumul_int + 10
# print((cumul_int).shape)
# Show results
self.curve_DDC0_cumul_phase.setData(frequency_axis_integral, np.sqrt(cumul_int))
self.curve_DDC0_cumul_phase.setVisible(True)
#self.qplt_DDC0_spc_right_viewbox.setYRange(0, 2*2*np.pi)
#self.qplt_DDC0_spc_right_viewbox.setXRange(0, 2*2*np.pi)
# self.qplt_DDC0_spc.setAxisScale(Qwt.QwtPlot.xBottom, frequency_axis[1], frequency_axis[last_index_shown])
# self.qplt_DDC0_spc.setAxisScale(Qwt.QwtPlot.yRight, 0, 2*2*np.pi)
elif self.qcombo_ddc_plot.currentIndex() == 2:
# Display the raw, time-domain instantaneous frequency output by the DDC block, mostly for debugging:
time_axis = np.arange(0, len(inst_freq))/self.sl.fs
self.curve_DDC0_spc.setData(time_axis, inst_freq)
self.curve_DDC0_spc_avg.setVisible(False)
self.curve_DDC0_cumul_phase.setVisible(False)
self.qplt_DDC0_spc.setTitle('Instantaneous frequency error, std dev = %.1f kHz' % (np.std(inst_freq_decimated)/1e3))
self.qplt_DDC0_spc.setLabel('left', 'Freq [Hz]')
self.qplt_DDC0_spc.setLabel('bottom', 'Time [s]')
# self.qplt_DDC0_spc.setAxisScale(Qwt.QwtPlot.yLeft, -self.sl.fs/2, self.sl.fs/2)
self.qplt_DDC0_spc.setYRange(np.min(inst_freq), np.max(inst_freq))
#self.qplt_DDC0_spc.setAxisScaleEngine(Qwt.QwtPlot.xBottom, Qwt.QwtLinearScaleEngine())
self.qplt_DDC0_spc.getPlotItem().setLogMode(x=False)
self.qplt_DDC0_spc.setXRange(time_axis[0], time_axis[-1])
elif self.qcombo_ddc_plot.currentIndex() == 3:
# Display the time-domain instantaneous phase output by the DDC block (computed by integrating the frequency), mostly for debugging:
time_axis = np.arange(0, len(inst_freq))/self.sl.fs
inst_phase = np.cumsum(inst_freq*2*np.pi/self.sl.fs)
# Compute the phase noise time-domain standard deviation:
phasenoise_stddev = np.std(inst_phase)
self.curve_DDC0_spc.setData(time_axis, inst_phase)
self.curve_DDC0_spc_avg.setVisible(False)
self.curve_DDC0_cumul_phase.setVisible(False)
self.qplt_DDC0_spc.setTitle('Instantaneous phase error, std dev = %.2f radrms' % phasenoise_stddev)
self.qplt_DDC0_spc.setLabel('left', 'Phase [rad]')
self.qplt_DDC0_spc.setLabel('bottom', 'Time [s]')
# self.qplt_DDC0_spc.setAxisScale(Qwt.QwtPlot.yLeft, -self.sl.fs/2, self.sl.fs/2)
self.qplt_DDC0_spc.setYRange(np.min(inst_phase), np.max(inst_phase))
#self.qplt_DDC0_spc.setAxisScaleEngine(Qwt.QwtPlot.xBottom, Qwt.QwtLinearScaleEngine())
self.qplt_DDC0_spc.getPlotItem().setLogMode(x=False)
self.qplt_DDC0_spc.setXRange(time_axis[0], time_axis[-1])
# self.qplt_DDC0_spc.setAxisScale(Qwt.QwtPlot.yLeft, -3.14, 3.14)
# print "debug warning: phase noise plot scaled to +/- pi"
# # Display the un-decimated spectrum:
# frequency_axis = np.linspace(0, (len(inst_freq)-1)/float(len(inst_freq))*(self.sl.fs), len(inst_freq))
# last_index_shown = np.round(len(frequency_axis)/2)
# window_function = np.blackman(len(inst_freq))
# spc = np.abs(np.fft.fft((inst_freq-np.mean(inst_freq)) * window_function))/(sum(window_function)/2)
# spc = 20*np.log10(np.abs(spc) + 1e-7)
# self.curve_DDC0_spc.setData(frequency_axis[1:last_index_shown], spc[1:last_index_shown])
# Refresh the display:
self.qplt_DDC0_spc.replot()
if window_NEB > 1e6:
self.qlabel_ddc_rbw.setText('RBW: %.1f MHz; Points:' % (round(window_NEB*1e5)/1e5/1e6))
elif window_NEB > 1e3:
self.qlabel_ddc_rbw.setText('RBW: %.1f kHz; Points:' % (round(window_NEB*1e2)/1e2/1e3))
else:
self.qlabel_ddc_rbw.setText('RBW: %.0f Hz; Points:' % (round(window_NEB)))
except:
del self.sl
print('Unhandled exception')
raise
# pause(1/10.)
self.bDisplayTiming = False
def grabAndDisplayADC(self):
(input_select, plot_type, N_samples) = self.spectrum.getGUIsettingsForADCdata()
# print("input_select = %s" % input_select)
# Grab data from the FPGA:
start_time = time.perf_counter()
(samples_out, ref_exp0) = self.getADCdata(input_select, N_samples)
if (samples_out is None) or (ref_exp0 is None):
return
self.raw_adc_samples = samples_out.astype(dtype=np.float)
self.spectrum.plotADCdata(input_select, plot_type, samples_out, ref_exp0)
# Update the scale which indicates the ADC fill ratio in numbers of bits:
self.spectrum.updateScaleDisplays(samples_out)
def getADCdata(self, input_select, N_samples, bReadAsDDC=False):
if bReadAsDDC:
empty_return_value = None
else:
empty_return_value = (None, None)
start_time = time.perf_counter()
# Check if another function is currently using the DDR2 logger:
if self.sl.bDDR2InUse:
print('grabAndDisplayADC(): DDR2 logger in use, cannot get data from adc')
return empty_return_value
# Block access to the DDR2 Logger to any other function until we are done:
self.sl.bDDR2InUse = True
time_start = time.perf_counter()
try:
# Read from selected source
self.sl.setup_write(self.sl.LOGGER_MUX[input_select], N_samples)
self.sl.trigger_write()
self.sl.wait_for_write()
if bReadAsDDC == False:
# read from ADC:
(samples_out, ref_exp0) = self.sl.read_adc_samples_from_DDR2()
else:
# read from DDC:
samples_out = self.sl.read_ddc_samples_from_DDR2()
return samples_out
max_abs = np.max(np.abs(samples_out))
samples_out = samples_out.astype(dtype=np.float)
self.raw_adc_samples = samples_out
except RP_PLL.CommsLoggeableError as e:
# log exception
logging.error("Exception occurred", exc_info=True)
return empty_return_value
except RP_PLL.CommsError as e:
# do not log exception (because it's simply an obvious follow-up to a previous one, and we don't want to fill up the log with repeated information)
return empty_return_value
finally:
# Tear-down, whether or not an exception occured: Signal to other functions that they can use the DDR2 logger
self.sl.bDDR2InUse = False
if self.bDisplayTiming == True:
print('Elapsed time (Comm) = %f' % (time.perf_counter()-start_time))
# A little bit of data validation:
if input_select in ['ADC0', 'ADC1']:
if np.real(ref_exp0) == 0 and np.imag(ref_exp0) == 0:
print('getADCdata(): Invalid complex exponential. Probably because of a version mismatch between the RP firmware and Python GUI.')
return empty_return_value
else:
ref_exp0 = 1.0
return (samples_out, ref_exp0)
# From: http://stackoverflow.com/questions/273192/create-directory-if-it-doesnt-exist-for-file-write
def make_sure_path_exists(self, path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
| {"hexsha": "dce53fa82ff87ff85fec404b6ed0cdbe92040cc7", "size": 71177, "ext": "py", "lang": "Python", "max_stars_repo_path": "digital_servo_python_gui/XEM_GUI_MainWindow.py", "max_stars_repo_name": "jddes/Frequency-comb-DPLL", "max_stars_repo_head_hexsha": "4b742f852ab1545c54ee17674a5c9bef1f7e3350", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2017-06-01T21:52:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T19:58:22.000Z", "max_issues_repo_path": "digital_servo_python_gui/XEM_GUI_MainWindow.py", "max_issues_repo_name": "jddes/Frequency-comb-DPLL", "max_issues_repo_head_hexsha": "4b742f852ab1545c54ee17674a5c9bef1f7e3350", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2017-09-18T01:12:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-23T12:23:37.000Z", "max_forks_repo_path": "digital_servo_python_gui/XEM_GUI_MainWindow.py", "max_forks_repo_name": "jddes/Frequency-comb-DPLL", "max_forks_repo_head_hexsha": "4b742f852ab1545c54ee17674a5c9bef1f7e3350", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2017-06-13T18:15:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T02:48:06.000Z", "avg_line_length": 39.4769828064, "max_line_length": 317, "alphanum_fraction": 0.7031063405, "include": true, "reason": "import numpy,from scipy", "num_tokens": 20000} |
\hypertarget{section}{%
\section{1}\label{section}}
\bibverse{1} The beginning of the Good News of Jesus Christ, the Son of
God.
\bibverse{2} As it is written in the prophets, ``Behold,+ 1:2
``Behold'', from ``ἰδοὺ'', means look at, take notice, observe, see, or
gaze at. It is often used as an interjection. I send my messenger before
your face, who will prepare your way before you:+ 1:2 Malachi 3:1
\bibverse{3} the voice of one crying in the wilderness, `Make ready the
way of the Lord! Make his paths straight!'''+ 1:3 Isaiah 40:3
\bibverse{4} John came baptizing+ 1:4 or, immersing in the wilderness
and preaching the baptism of repentance for forgiveness of sins.
\bibverse{5} All the country of Judea and all those of Jerusalem went
out to him. They were baptized by him in the Jordan river, confessing
their sins. \bibverse{6} John was clothed with camel's hair and a
leather belt around his waist. He ate locusts and wild honey.
\bibverse{7} He preached, saying, ``After me comes he who is mightier
than I, the thong of whose sandals I am not worthy to stoop down and
loosen. \bibverse{8} I baptized you in+ 1:8 The Greek word (en)
translated here as ``in'' could also be translated as ``with'' in some
contexts. water, but he will baptize you in the Holy Spirit.''
\bibverse{9} In those days, Jesus came from Nazareth of Galilee, and was
baptized by John in the Jordan. \bibverse{10} Immediately coming up from
the water, he saw the heavens parting and the Spirit descending on him
like a dove. \bibverse{11} A voice came out of the sky, ``You are my
beloved Son, in whom I am well pleased.''
\bibverse{12} Immediately the Spirit drove him out into the wilderness.
\bibverse{13} He was there in the wilderness forty days, tempted by
Satan. He was with the wild animals; and the angels were serving him.
\bibverse{14} Now after John was taken into custody, Jesus came into
Galilee, preaching the Good News of God's Kingdom, \bibverse{15} and
saying, ``The time is fulfilled, and God's Kingdom is at hand! Repent,
and believe in the Good News.''
\bibverse{16} Passing along by the sea of Galilee, he saw Simon and
Andrew, the brother of Simon, casting a net into the sea, for they were
fishermen. \bibverse{17} Jesus said to them, ``Come after me, and I will
make you into fishers for men.''
\bibverse{18} Immediately they left their nets, and followed him.
\bibverse{19} Going on a little further from there, he saw James the son
of Zebedee, and John his brother, who were also in the boat mending the
nets. \bibverse{20} Immediately he called them, and they left their
father, Zebedee, in the boat with the hired servants, and went after
him.
\bibverse{21} They went into Capernaum, and immediately on the Sabbath
day he entered into the synagogue and taught. \bibverse{22} They were
astonished at his teaching, for he taught them as having authority, and
not as the scribes. \bibverse{23} Immediately there was in their
synagogue a man with an unclean spirit, and he cried out, \bibverse{24}
saying, ``Ha! What do we have to do with you, Jesus, you Nazarene? Have
you come to destroy us? I know you who you are: the Holy One of God!''
\bibverse{25} Jesus rebuked him, saying, ``Be quiet, and come out of
him!''
\bibverse{26} The unclean spirit, convulsing him and crying with a loud
voice, came out of him. \bibverse{27} They were all amazed, so that they
questioned among themselves, saying, ``What is this? A new teaching? For
with authority he commands even the unclean spirits, and they obey
him!'' \bibverse{28} The report of him went out immediately everywhere
into all the region of Galilee and its surrounding area.
\bibverse{29} Immediately, when they had come out of the synagogue, they
came into the house of Simon and Andrew, with James and John.
\bibverse{30} Now Simon's wife's mother lay sick with a fever, and
immediately they told him about her. \bibverse{31} He came and took her
by the hand and raised her up. The fever left her immediately,+ 1:31 NU
omits ``immediately''. and she served them.
\bibverse{32} At evening, when the sun had set, they brought to him all
who were sick and those who were possessed by demons. \bibverse{33} All
the city was gathered together at the door. \bibverse{34} He healed many
who were sick with various diseases and cast out many demons. He didn't
allow the demons to speak, because they knew him.
\bibverse{35} Early in the morning, while it was still dark, he rose up
and went out, and departed into a deserted place, and prayed there.
\bibverse{36} Simon and those who were with him searched for him.
\bibverse{37} They found him and told him, ``Everyone is looking for
you.''
\bibverse{38} He said to them, ``Let's go elsewhere into the next towns,
that I may preach there also, because I came out for this reason.''
\bibverse{39} He went into their synagogues throughout all Galilee,
preaching and casting out demons.
\bibverse{40} A leper came to him, begging him, kneeling down to him,
and saying to him, ``If you want to, you can make me clean.''
\bibverse{41} Being moved with compassion, he stretched out his hand,
and touched him, and said to him, ``I want to. Be made clean.''
\bibverse{42} When he had said this, immediately the leprosy departed
from him and he was made clean. \bibverse{43} He strictly warned him and
immediately sent him out, \bibverse{44} and said to him, ``See that you
say nothing to anybody, but go show yourself to the priest and offer for
your cleansing the things which Moses commanded, for a testimony to
them.''
\bibverse{45} But he went out, and began to proclaim it much, and to
spread about the matter, so that Jesus could no more openly enter into a
city, but was outside in desert places. People came to him from
everywhere.
\hypertarget{section-1}{%
\section{2}\label{section-1}}
\bibverse{1} When he entered again into Capernaum after some days, it
was heard that he was at home. \bibverse{2} Immediately many were
gathered together, so that there was no more room, not even around the
door; and he spoke the word to them. \bibverse{3} Four people came,
carrying a paralytic to him. \bibverse{4} When they could not come near
to him for the crowd, they removed the roof where he was. When they had
broken it up, they let down the mat that the paralytic was lying on.
\bibverse{5} Jesus, seeing their faith, said to the paralytic, ``Son,
your sins are forgiven you.''
\bibverse{6} But there were some of the scribes sitting there and
reasoning in their hearts, \bibverse{7} ``Why does this man speak
blasphemies like that? Who can forgive sins but God alone?''
\bibverse{8} Immediately Jesus, perceiving in his spirit that they so
reasoned within themselves, said to them, ``Why do you reason these
things in your hearts? \bibverse{9} Which is easier, to tell the
paralytic, `Your sins are forgiven;' or to say, `Arise, and take up your
bed, and walk?' \bibverse{10} But that you may know that the Son of Man
has authority on earth to forgive sins''---he said to the paralytic---
\bibverse{11} ``I tell you, arise, take up your mat, and go to your
house.''
\bibverse{12} He arose, and immediately took up the mat and went out in
front of them all, so that they were all amazed and glorified God,
saying, ``We never saw anything like this!''
\bibverse{13} He went out again by the seaside. All the multitude came
to him, and he taught them. \bibverse{14} As he passed by, he saw Levi
the son of Alphaeus sitting at the tax office. He said to him, ``Follow
me.'' And he arose and followed him.
\bibverse{15} He was reclining at the table in his house, and many tax
collectors and sinners sat down with Jesus and his disciples, for there
were many, and they followed him. \bibverse{16} The scribes and the
Pharisees, when they saw that he was eating with the sinners and tax
collectors, said to his disciples, ``Why is it that he eats and drinks
with tax collectors and sinners?''
\bibverse{17} When Jesus heard it, he said to them, ``Those who are
healthy have no need for a physician, but those who are sick. I came not
to call the righteous, but sinners to repentance.''
\bibverse{18} John's disciples and the Pharisees were fasting, and they
came and asked him, ``Why do John's disciples and the disciples of the
Pharisees fast, but your disciples don't fast?''
\bibverse{19} Jesus said to them, ``Can the groomsmen fast while the
bridegroom is with them? As long as they have the bridegroom with them,
they can't fast. \bibverse{20} But the days will come when the
bridegroom will be taken away from them, and then they will fast in that
day. \bibverse{21} No one sews a piece of unshrunk cloth on an old
garment, or else the patch shrinks and the new tears away from the old,
and a worse hole is made. \bibverse{22} No one puts new wine into old
wineskins; or else the new wine will burst the skins, and the wine pours
out, and the skins will be destroyed; but they put new wine into fresh
wineskins.''
\bibverse{23} He was going on the Sabbath day through the grain fields;
and his disciples began, as they went, to pluck the ears of grain.
\bibverse{24} The Pharisees said to him, ``Behold, why do they do that
which is not lawful on the Sabbath day?''
\bibverse{25} He said to them, ``Did you never read what David did when
he had need and was hungry---he, and those who were with him?
\bibverse{26} How he entered into God's house at the time of Abiathar
the high priest, and ate the show bread, which is not lawful to eat
except for the priests, and gave also to those who were with him?''
\bibverse{27} He said to them, ``The Sabbath was made for man, not man
for the Sabbath. \bibverse{28} Therefore the Son of Man is lord even of
the Sabbath.''
\hypertarget{section-2}{%
\section{3}\label{section-2}}
\bibverse{1} He entered again into the synagogue, and there was a man
there whose hand was withered. \bibverse{2} They watched him, whether he
would heal him on the Sabbath day, that they might accuse him.
\bibverse{3} He said to the man whose hand was withered, ``Stand up.''
\bibverse{4} He said to them, ``Is it lawful on the Sabbath day to do
good or to do harm? To save a life or to kill?'' But they were silent.
\bibverse{5} When he had looked around at them with anger, being grieved
at the hardening of their hearts, he said to the man, ``Stretch out your
hand.'' He stretched it out, and his hand was restored as healthy as the
other. \bibverse{6} The Pharisees went out, and immediately conspired
with the Herodians against him, how they might destroy him.
\bibverse{7} Jesus withdrew to the sea with his disciples; and a great
multitude followed him from Galilee, from Judea, \bibverse{8} from
Jerusalem, from Idumaea, beyond the Jordan, and those from around Tyre
and Sidon. A great multitude, hearing what great things he did, came to
him. \bibverse{9} He spoke to his disciples that a little boat should
stay near him because of the crowd, so that they wouldn't press on him.
\bibverse{10} For he had healed many, so that as many as had diseases
pressed on him that they might touch him. \bibverse{11} The unclean
spirits, whenever they saw him, fell down before him and cried, ``You
are the Son of God!'' \bibverse{12} He sternly warned them that they
should not make him known.
\bibverse{13} He went up into the mountain and called to himself those
whom he wanted, and they went to him. \bibverse{14} He appointed twelve,
that they might be with him, and that he might send them out to preach
\bibverse{15} and to have authority to heal sicknesses and to cast out
demons: \bibverse{16} Simon (to whom he gave the name Peter);
\bibverse{17} James the son of Zebedee; and John, the brother of James,
(whom he called Boanerges, which means, Sons of Thunder); \bibverse{18}
Andrew; Philip; Bartholomew; Matthew; Thomas; James, the son of
Alphaeus; Thaddaeus; Simon the Zealot; \bibverse{19} and Judas Iscariot,
who also betrayed him.
Then he came into a house. \bibverse{20} The multitude came together
again, so that they could not so much as eat bread. \bibverse{21} When
his friends heard it, they went out to seize him; for they said, ``He is
insane.'' \bibverse{22} The scribes who came down from Jerusalem said,
``He has Beelzebul,'' and, ``By the prince of the demons he casts out
the demons.''
\bibverse{23} He summoned them and said to them in parables, ``How can
Satan cast out Satan? \bibverse{24} If a kingdom is divided against
itself, that kingdom cannot stand. \bibverse{25} If a house is divided
against itself, that house cannot stand. \bibverse{26} If Satan has
risen up against himself, and is divided, he can't stand, but has an
end. \bibverse{27} But no one can enter into the house of the strong man
to plunder unless he first binds the strong man; then he will plunder
his house.
\bibverse{28} ``Most certainly I tell you, all sins of the descendants
of man will be forgiven, including their blasphemies with which they may
blaspheme; \bibverse{29} but whoever may blaspheme against the Holy
Spirit never has forgiveness, but is subject to eternal condemnation.''+
3:29 NU reads, guilty of an eternal sin. \bibverse{30} ---because they
said, ``He has an unclean spirit.''
\bibverse{31} His mother and his brothers came, and standing outside,
they sent to him, calling him. \bibverse{32} A multitude was sitting
around him, and they told him, ``Behold, your mother, your brothers, and
your sisters+ 3:32 TR omits ``your sisters'' are outside looking for
you.''
\bibverse{33} He answered them, ``Who are my mother and my brothers?''
\bibverse{34} Looking around at those who sat around him, he said,
``Behold, my mother and my brothers! \bibverse{35} For whoever does the
will of God is my brother, my sister, and mother.''
\hypertarget{section-3}{%
\section{4}\label{section-3}}
\bibverse{1} Again he began to teach by the seaside. A great multitude
was gathered to him, so that he entered into a boat in the sea and sat
down. All the multitude were on the land by the sea. \bibverse{2} He
taught them many things in parables, and told them in his teaching,
\bibverse{3} ``Listen! Behold, the farmer went out to sow. \bibverse{4}
As he sowed, some seed fell by the road, and the birds+ 4:4 TR adds ``of
the air'' came and devoured it. \bibverse{5} Others fell on the rocky
ground, where it had little soil, and immediately it sprang up, because
it had no depth of soil. \bibverse{6} When the sun had risen, it was
scorched; and because it had no root, it withered away. \bibverse{7}
Others fell among the thorns, and the thorns grew up and choked it, and
it yielded no fruit. \bibverse{8} Others fell into the good ground and
yielded fruit, growing up and increasing. Some produced thirty times,
some sixty times, and some one hundred times as much.'' \bibverse{9} He
said, ``Whoever has ears to hear, let him hear.''
\bibverse{10} When he was alone, those who were around him with the
twelve asked him about the parables. \bibverse{11} He said to them, ``To
you is given the mystery of God's Kingdom, but to those who are outside,
all things are done in parables, \bibverse{12} that `seeing they may see
and not perceive, and hearing they may hear and not understand, lest
perhaps they should turn again, and their sins should be forgiven
them.'\,''+ 4:12 Isaiah 6:9-10
\bibverse{13} He said to them, ``Don't you understand this parable? How
will you understand all of the parables? \bibverse{14} The farmer sows
the word. \bibverse{15} The ones by the road are the ones where the word
is sown; and when they have heard, immediately Satan comes and takes
away the word which has been sown in them. \bibverse{16} These in the
same way are those who are sown on the rocky places, who, when they have
heard the word, immediately receive it with joy. \bibverse{17} They have
no root in themselves, but are short-lived. When oppression or
persecution arises because of the word, immediately they stumble.
\bibverse{18} Others are those who are sown among the thorns. These are
those who have heard the word, \bibverse{19} and the cares of this age,
and the deceitfulness of riches, and the lusts of other things entering
in choke the word, and it becomes unfruitful. \bibverse{20} Those which
were sown on the good ground are those who hear the word, accept it, and
bear fruit, some thirty times, some sixty times, and some one hundred
times.''
\bibverse{21} He said to them, ``Is a lamp brought to be put under a
basket + 4:21 literally, a modion, a dry measuring basket containing
about a peck (about 9 liters) or under a bed? Isn't it put on a stand?
\bibverse{22} For there is nothing hidden except that it should be made
known, neither was anything made secret but that it should come to
light. \bibverse{23} If any man has ears to hear, let him hear.''
\bibverse{24} He said to them, ``Take heed what you hear. With whatever
measure you measure, it will be measured to you; and more will be given
to you who hear. \bibverse{25} For whoever has, to him more will be
given; and he who doesn't have, even that which he has will be taken
away from him.''
\bibverse{26} He said, ``God's Kingdom is as if a man should cast seed
on the earth, \bibverse{27} and should sleep and rise night and day, and
the seed should spring up and grow, though he doesn't know how.
\bibverse{28} For the earth bears fruit by itself: first the blade, then
the ear, then the full grain in the ear. \bibverse{29} But when the
fruit is ripe, immediately he puts in the sickle, because the harvest
has come.''
\bibverse{30} He said, ``How will we liken God's Kingdom? Or with what
parable will we illustrate it? \bibverse{31} It's like a grain of
mustard seed, which, when it is sown in the earth, though it is less
than all the seeds that are on the earth, \bibverse{32} yet when it is
sown, grows up and becomes greater than all the herbs, and puts out
great branches, so that the birds of the sky can lodge under its
shadow.''
\bibverse{33} With many such parables he spoke the word to them, as they
were able to hear it. \bibverse{34} Without a parable he didn't speak to
them; but privately to his own disciples he explained everything.
\bibverse{35} On that day, when evening had come, he said to them,
``Let's go over to the other side.'' \bibverse{36} Leaving the
multitude, they took him with them, even as he was, in the boat. Other
small boats were also with him. \bibverse{37} A big wind storm arose,
and the waves beat into the boat, so much that the boat was already
filled. \bibverse{38} He himself was in the stern, asleep on the
cushion; and they woke him up and asked him, ``Teacher, don't you care
that we are dying?''
\bibverse{39} He awoke and rebuked the wind, and said to the sea,
``Peace! Be still!'' The wind ceased and there was a great calm.
\bibverse{40} He said to them, ``Why are you so afraid? How is it that
you have no faith?''
\bibverse{41} They were greatly afraid and said to one another, ``Who
then is this, that even the wind and the sea obey him?''
\hypertarget{section-4}{%
\section{5}\label{section-4}}
\bibverse{1} They came to the other side of the sea, into the country of
the Gadarenes. \bibverse{2} When he had come out of the boat,
immediately a man with an unclean spirit met him out of the tombs.
\bibverse{3} He lived in the tombs. Nobody could bind him any more, not
even with chains, \bibverse{4} because he had been often bound with
fetters and chains, and the chains had been torn apart by him, and the
fetters broken in pieces. Nobody had the strength to tame him.
\bibverse{5} Always, night and day, in the tombs and in the mountains,
he was crying out, and cutting himself with stones. \bibverse{6} When he
saw Jesus from afar, he ran and bowed down to him, \bibverse{7} and
crying out with a loud voice, he said, ``What have I to do with you,
Jesus, you Son of the Most High God? I adjure you by God, don't torment
me.'' \bibverse{8} For he said to him, ``Come out of the man, you
unclean spirit!''
\bibverse{9} He asked him, ``What is your name?''
He said to him, ``My name is Legion, for we are many.'' \bibverse{10} He
begged him much that he would not send them away out of the country.
\bibverse{11} Now on the mountainside there was a great herd of pigs
feeding. \bibverse{12} All the demons begged him, saying, ``Send us into
the pigs, that we may enter into them.''
\bibverse{13} At once Jesus gave them permission. The unclean spirits
came out and entered into the pigs. The herd of about two thousand
rushed down the steep bank into the sea, and they were drowned in the
sea. \bibverse{14} Those who fed the pigs fled, and told it in the city
and in the country.
The people came to see what it was that had happened. \bibverse{15} They
came to Jesus, and saw him who had been possessed by demons sitting,
clothed, and in his right mind, even him who had the legion; and they
were afraid. \bibverse{16} Those who saw it declared to them what
happened to him who was possessed by demons, and about the pigs.
\bibverse{17} They began to beg him to depart from their region.
\bibverse{18} As he was entering into the boat, he who had been
possessed by demons begged him that he might be with him. \bibverse{19}
He didn't allow him, but said to him, ``Go to your house, to your
friends, and tell them what great things the Lord has done for you and
how he had mercy on you.''
\bibverse{20} He went his way, and began to proclaim in Decapolis how
Jesus had done great things for him, and everyone marveled.
\bibverse{21} When Jesus had crossed back over in the boat to the other
side, a great multitude was gathered to him; and he was by the sea.
\bibverse{22} Behold, one of the rulers of the synagogue, Jairus by
name, came; and seeing him, he fell at his feet \bibverse{23} and begged
him much, saying, ``My little daughter is at the point of death. Please
come and lay your hands on her, that she may be made healthy, and
live.''
\bibverse{24} He went with him, and a great multitude followed him, and
they pressed upon him on all sides. \bibverse{25} A certain woman who
had a discharge of blood for twelve years, \bibverse{26} and had
suffered many things by many physicians, and had spent all that she had,
and was no better, but rather grew worse, \bibverse{27} having heard the
things concerning Jesus, came up behind him in the crowd and touched his
clothes. \bibverse{28} For she said, ``If I just touch his clothes, I
will be made well.'' \bibverse{29} Immediately the flow of her blood was
dried up, and she felt in her body that she was healed of her
affliction.
\bibverse{30} Immediately Jesus, perceiving in himself that the power
had gone out from him, turned around in the crowd and asked, ``Who
touched my clothes?''
\bibverse{31} His disciples said to him, ``You see the multitude
pressing against you, and you say, `Who touched me?'\,''
\bibverse{32} He looked around to see her who had done this thing.
\bibverse{33} But the woman, fearing and trembling, knowing what had
been done to her, came and fell down before him, and told him all the
truth.
\bibverse{34} He said to her, ``Daughter, your faith has made you well.
Go in peace, and be cured of your disease.''
\bibverse{35} While he was still speaking, people came from the
synagogue ruler's house, saying, ``Your daughter is dead. Why bother the
Teacher any more?''
\bibverse{36} But Jesus, when he heard the message spoken, immediately
said to the ruler of the synagogue, ``Don't be afraid, only believe.''
\bibverse{37} He allowed no one to follow him except Peter, James, and
John the brother of James. \bibverse{38} He came to the synagogue
ruler's house, and he saw an uproar, weeping, and great wailing.
\bibverse{39} When he had entered in, he said to them, ``Why do you make
an uproar and weep? The child is not dead, but is asleep.''
\bibverse{40} They ridiculed him. But he, having put them all out, took
the father of the child, her mother, and those who were with him, and
went in where the child was lying. \bibverse{41} Taking the child by the
hand, he said to her, ``Talitha cumi!'' which means, being interpreted,
``Girl, I tell you, get up!'' \bibverse{42} Immediately the girl rose up
and walked, for she was twelve years old. They were amazed with great
amazement. \bibverse{43} He strictly ordered them that no one should
know this, and commanded that something should be given to her to eat.
\hypertarget{section-5}{%
\section{6}\label{section-5}}
\bibverse{1} He went out from there. He came into his own country, and
his disciples followed him. \bibverse{2} When the Sabbath had come, he
began to teach in the synagogue, and many hearing him were astonished,
saying, ``Where did this man get these things?'' and, ``What is the
wisdom that is given to this man, that such mighty works come about by
his hands? \bibverse{3} Isn't this the carpenter, the son of Mary and
brother of James, Joses, Judah, and Simon? Aren't his sisters here with
us?'' So they were offended at him.
\bibverse{4} Jesus said to them, ``A prophet is not without honor,
except in his own country, and among his own relatives, and in his own
house.'' \bibverse{5} He could do no mighty work there, except that he
laid his hands on a few sick people and healed them. \bibverse{6} He
marveled because of their unbelief.
He went around the villages teaching. \bibverse{7} He called to himself
the twelve, and began to send them out two by two; and he gave them
authority over the unclean spirits. \bibverse{8} He commanded them that
they should take nothing for their journey, except a staff only: no
bread, no wallet, no money in their purse, \bibverse{9} but to wear
sandals, and not put on two tunics. \bibverse{10} He said to them,
``Wherever you enter into a house, stay there until you depart from
there. \bibverse{11} Whoever will not receive you nor hear you, as you
depart from there, shake off the dust that is under your feet for a
testimony against them. Assuredly, I tell you, it will be more tolerable
for Sodom and Gomorrah in the day of judgment than for that city!''
\bibverse{12} They went out and preached that people should repent.
\bibverse{13} They cast out many demons, and anointed many with oil who
were sick and healed them. \bibverse{14} King Herod heard this, for his
name had become known, and he said, ``John the Baptizer has risen from
the dead, and therefore these powers are at work in him.'' \bibverse{15}
But others said, ``He is Elijah.'' Others said, ``He is a prophet, or
like one of the prophets.'' \bibverse{16} But Herod, when he heard this,
said, ``This is John, whom I beheaded. He has risen from the dead.''
\bibverse{17} For Herod himself had sent out and arrested John and bound
him in prison for the sake of Herodias, his brother Philip's wife, for
he had married her. \bibverse{18} For John had said to Herod, ``It is
not lawful for you to have your brother's wife.'' \bibverse{19} Herodias
set herself against him and desired to kill him, but she couldn't,
\bibverse{20} for Herod feared John, knowing that he was a righteous and
holy man, and kept him safe. When he heard him, he did many things, and
he heard him gladly.
\bibverse{21} Then a convenient day came when Herod on his birthday made
a supper for his nobles, the high officers, and the chief men of
Galilee. \bibverse{22} When the daughter of Herodias herself came in and
danced, she pleased Herod and those sitting with him. The king said to
the young lady, ``Ask me whatever you want, and I will give it to you.''
\bibverse{23} He swore to her, ``Whatever you ask of me, I will give
you, up to half of my kingdom.''
\bibverse{24} She went out and said to her mother, ``What shall I ask?''
She said, ``The head of John the Baptizer.''
\bibverse{25} She came in immediately with haste to the king and
requested, ``I want you to give me right now the head of John the
Baptizer on a platter.''
\bibverse{26} The king was exceedingly sorry, but for the sake of his
oaths and of his dinner guests, he didn't wish to refuse her.
\bibverse{27} Immediately the king sent out a soldier of his guard and
commanded to bring John's head; and he went and beheaded him in the
prison, \bibverse{28} and brought his head on a platter, and gave it to
the young lady; and the young lady gave it to her mother.
\bibverse{29} When his disciples heard this, they came and took up his
corpse and laid it in a tomb.
\bibverse{30} The apostles gathered themselves together to Jesus, and
they told him all things, whatever they had done, and whatever they had
taught. \bibverse{31} He said to them, ``Come away into a deserted
place, and rest awhile.'' For there were many coming and going, and they
had no leisure so much as to eat. \bibverse{32} They went away in the
boat to a deserted place by themselves. \bibverse{33} They+ 6:33 TR
reads ``The multitudes'' instead of ``They'' saw them going, and many
recognized him and ran there on foot from all the cities. They arrived
before them and came together to him. \bibverse{34} Jesus came out, saw
a great multitude, and he had compassion on them because they were like
sheep without a shepherd; and he began to teach them many things.
\bibverse{35} When it was late in the day, his disciples came to him and
said, ``This place is deserted, and it is late in the day. \bibverse{36}
Send them away, that they may go into the surrounding country and
villages and buy themselves bread, for they have nothing to eat.''
\bibverse{37} But he answered them, ``You give them something to eat.''
They asked him, ``Shall we go and buy two hundred denarii+ 6:37 200
denarii was about 7 or 8 months wages for an agricultural laborer. worth
of bread and give them something to eat?''
\bibverse{38} He said to them, ``How many loaves do you have? Go see.''
When they knew, they said, ``Five, and two fish.''
\bibverse{39} He commanded them that everyone should sit down in groups
on the green grass. \bibverse{40} They sat down in ranks, by hundreds
and by fifties. \bibverse{41} He took the five loaves and the two fish;
and looking up to heaven, he blessed and broke the loaves, and he gave
to his disciples to set before them, and he divided the two fish among
them all. \bibverse{42} They all ate and were filled. \bibverse{43} They
took up twelve baskets full of broken pieces and also of the fish.
\bibverse{44} Those who ate the loaves were+ 6:44 TR adds ``about'' five
thousand men.
\bibverse{45} Immediately he made his disciples get into the boat and go
ahead to the other side, to Bethsaida, while he himself sent the
multitude away. \bibverse{46} After he had taken leave of them, he went
up the mountain to pray.
\bibverse{47} When evening had come, the boat was in the middle of the
sea, and he was alone on the land. \bibverse{48} Seeing them distressed
in rowing, for the wind was contrary to them, about the fourth watch of
the night he came to them, walking on the sea; + 6:48 See Job 9:8 and he
would have passed by them, \bibverse{49} but they, when they saw him
walking on the sea, supposed that it was a ghost, and cried out;
\bibverse{50} for they all saw him and were troubled. But he immediately
spoke with them and said to them, ``Cheer up! It is I!+ 6:50 or, ``I
AM!'' Don't be afraid.'' \bibverse{51} He got into the boat with them;
and the wind ceased, and they were very amazed among themselves, and
marveled; \bibverse{52} for they hadn't understood about the loaves, but
their hearts were hardened.
\bibverse{53} When they had crossed over, they came to land at
Gennesaret and moored to the shore. \bibverse{54} When they had come out
of the boat, immediately the people recognized him, \bibverse{55} and
ran around that whole region, and began to bring those who were sick on
their mats to where they heard he was. \bibverse{56} Wherever he
entered---into villages, or into cities, or into the country---they laid
the sick in the marketplaces and begged him that they might just touch
the fringe+ 6:56 or, tassel of his garment; and as many as touched him
were made well.
\hypertarget{section-6}{%
\section{7}\label{section-6}}
\bibverse{1} Then the Pharisees and some of the scribes gathered
together to him, having come from Jerusalem. \bibverse{2} Now when they
saw some of his disciples eating bread with defiled, that is unwashed,
hands, they found fault. \bibverse{3} (For the Pharisees and all the
Jews don't eat unless they wash their hands and forearms, holding to the
tradition of the elders. \bibverse{4} They don't eat when they come from
the marketplace unless they bathe themselves, and there are many other
things which they have received to hold to: washings of cups, pitchers,
bronze vessels, and couches.) \bibverse{5} The Pharisees and the scribes
asked him, ``Why don't your disciples walk according to the tradition of
the elders, but eat their bread with unwashed hands?''
\bibverse{6} He answered them, ``Well did Isaiah prophesy of you
hypocrites, as it is written, `This people honors me with their lips,
but their heart is far from me. \bibverse{7} They worship me in vain,
teaching as doctrines the commandments of men.'+ 7:7 Isaiah 29:13
\bibverse{8} ``For you set aside the commandment of God, and hold
tightly to the tradition of men---the washing of pitchers and cups, and
you do many other such things.'' \bibverse{9} He said to them, ``Full
well do you reject the commandment of God, that you may keep your
tradition. \bibverse{10} For Moses said, `Honor your father and your
mother;'+ 7:10 Exodus 20:12; Deuteronomy 5:16 and, `He who speaks evil
of father or mother, let him be put to death.'+ 7:10 Exodus 21:17;
Leviticus 20:9 \bibverse{11} But you say, `If a man tells his father or
his mother, ``Whatever profit you might have received from me is
Corban,''\,'\,''+ 7:11 Corban is a Hebrew word for an offering devoted
to God. that is to say, given to God, \bibverse{12} ``then you no longer
allow him to do anything for his father or his mother, \bibverse{13}
making void the word of God by your tradition which you have handed
down. You do many things like this.''
\bibverse{14} He called all the multitude to himself and said to them,
``Hear me, all of you, and understand. \bibverse{15} There is nothing
from outside of the man that going into him can defile him; but the
things which proceed out of the man are those that defile the man.
\bibverse{16} If anyone has ears to hear, let him hear!''+ 7:16 NU omits
verse 16.
\bibverse{17} When he had entered into a house away from the multitude,
his disciples asked him about the parable. \bibverse{18} He said to
them, ``Are you also without understanding? Don't you perceive that
whatever goes into the man from outside can't defile him, \bibverse{19}
because it doesn't go into his heart, but into his stomach, then into
the latrine, making all foods clean?''+ 7:19 NU ends Jesus' direct quote
and question after ``latrine'', ending the verse with ``Thus he declared
all foods clean. \bibverse{20} He said, ``That which proceeds out of the
man, that defiles the man. \bibverse{21} For from within, out of the
hearts of men, proceed evil thoughts, adulteries, sexual sins, murders,
thefts, \bibverse{22} covetings, wickedness, deceit, lustful desires, an
evil eye, blasphemy, pride, and foolishness. \bibverse{23} All these
evil things come from within and defile the man.''
\bibverse{24} From there he arose and went away into the borders of Tyre
and Sidon. He entered into a house and didn't want anyone to know it,
but he couldn't escape notice. \bibverse{25} For a woman whose little
daughter had an unclean spirit, having heard of him, came and fell down
at his feet. \bibverse{26} Now the woman was a Greek, a Syrophoenician
by race. She begged him that he would cast the demon out of her
daughter. \bibverse{27} But Jesus said to her, ``Let the children be
filled first, for it is not appropriate to take the children's bread and
throw it to the dogs.''
\bibverse{28} But she answered him, ``Yes, Lord. Yet even the dogs under
the table eat the children's crumbs.''
\bibverse{29} He said to her, ``For this saying, go your way. The demon
has gone out of your daughter.''
\bibverse{30} She went away to her house, and found the child having
been laid on the bed, with the demon gone out.
\bibverse{31} Again he departed from the borders of Tyre and Sidon, and
came to the sea of Galilee through the middle of the region of
Decapolis. \bibverse{32} They brought to him one who was deaf and had an
impediment in his speech. They begged him to lay his hand on him.
\bibverse{33} He took him aside from the multitude privately and put his
fingers into his ears; and he spat and touched his tongue. \bibverse{34}
Looking up to heaven, he sighed, and said to him, ``Ephphatha!'' that
is, ``Be opened!'' \bibverse{35} Immediately his ears were opened, and
the impediment of his tongue was released, and he spoke clearly.
\bibverse{36} He commanded them that they should tell no one, but the
more he commanded them, so much the more widely they proclaimed it.
\bibverse{37} They were astonished beyond measure, saying, ``He has done
all things well. He makes even the deaf hear and the mute speak!''
\hypertarget{section-7}{%
\section{8}\label{section-7}}
\bibverse{1} In those days, when there was a very great multitude, and
they had nothing to eat, Jesus called his disciples to himself and said
to them, \bibverse{2} ``I have compassion on the multitude, because they
have stayed with me now three days and have nothing to eat. \bibverse{3}
If I send them away fasting to their home, they will faint on the way,
for some of them have come a long way.''
\bibverse{4} His disciples answered him, ``From where could one satisfy
these people with bread here in a deserted place?''
\bibverse{5} He asked them, ``How many loaves do you have?''
They said, ``Seven.''
\bibverse{6} He commanded the multitude to sit down on the ground, and
he took the seven loaves. Having given thanks, he broke them and gave
them to his disciples to serve, and they served the multitude.
\bibverse{7} They also had a few small fish. Having blessed them, he
said to serve these also. \bibverse{8} They ate and were filled. They
took up seven baskets of broken pieces that were left over. \bibverse{9}
Those who had eaten were about four thousand. Then he sent them away.
\bibverse{10} Immediately he entered into the boat with his disciples
and came into the region of Dalmanutha. \bibverse{11} The Pharisees came
out and began to question him, seeking from him a sign from heaven and
testing him. \bibverse{12} He sighed deeply in his spirit and said,
``Why does this generation+ 8:12 The word translated ``generation'' here
(genea) could also be translated ``people'', ``race'', or ``family''.
seek a sign? Most certainly I tell you, no sign will be given to this
generation.''
\bibverse{13} He left them, and again entering into the boat, departed
to the other side. \bibverse{14} They forgot to take bread; and they
didn't have more than one loaf in the boat with them. \bibverse{15} He
warned them, saying, ``Take heed: beware of the yeast of the Pharisees
and the yeast of Herod.''
\bibverse{16} They reasoned with one another, saying, ``It's because we
have no bread.''
\bibverse{17} Jesus, perceiving it, said to them, ``Why do you reason
that it's because you have no bread? Don't you perceive yet or
understand? Is your heart still hardened? \bibverse{18} Having eyes,
don't you see? Having ears, don't you hear? Don't you remember?
\bibverse{19} When I broke the five loaves among the five thousand, how
many baskets full of broken pieces did you take up?''
They told him, ``Twelve.''
\bibverse{20} ``When the seven loaves fed the four thousand, how many
baskets full of broken pieces did you take up?''
They told him, ``Seven.''
\bibverse{21} He asked them, ``Don't you understand yet?''
\bibverse{22} He came to Bethsaida. They brought a blind man to him and
begged him to touch him. \bibverse{23} He took hold of the blind man by
the hand, and brought him out of the village. When he had spat on his
eyes, and laid his hands on him, he asked him if he saw anything.
\bibverse{24} He looked up, and said, ``I see men, but I see them like
walking trees.''
\bibverse{25} Then again he laid his hands on his eyes. He looked
intently, and was restored, and saw everyone clearly. \bibverse{26} He
sent him away to his house, saying, ``Don't enter into the village, nor
tell anyone in the village.''
\bibverse{27} Jesus went out, with his disciples, into the villages of
Caesarea Philippi. On the way he asked his disciples, ``Who do men say
that I am?''
\bibverse{28} They told him, ``John the Baptizer, and others say Elijah,
but others, one of the prophets.''
\bibverse{29} He said to them, ``But who do you say that I am?''
Peter answered, ``You are the Christ.''
\bibverse{30} He commanded them that they should tell no one about him.
\bibverse{31} He began to teach them that the Son of Man must suffer
many things, and be rejected by the elders, the chief priests, and the
scribes, and be killed, and after three days rise again. \bibverse{32}
He spoke to them openly. Peter took him and began to rebuke him.
\bibverse{33} But he, turning around and seeing his disciples, rebuked
Peter, and said, ``Get behind me, Satan! For you have in mind not the
things of God, but the things of men.''
\bibverse{34} He called the multitude to himself with his disciples and
said to them, ``Whoever wants to come after me, let him deny himself,
and take up his cross, and follow me. \bibverse{35} For whoever wants to
save his life will lose it; and whoever will lose his life for my sake
and the sake of the Good News will save it. \bibverse{36} For what does
it profit a man to gain the whole world and forfeit his life?
\bibverse{37} For what will a man give in exchange for his life?
\bibverse{38} For whoever will be ashamed of me and of my words in this
adulterous and sinful generation, the Son of Man also will be ashamed of
him when he comes in his Father's glory with the holy angels.''
\hypertarget{section-8}{%
\section{9}\label{section-8}}
\bibverse{1} He said to them, ``Most certainly I tell you, there are
some standing here who will in no way taste death until they see God's
Kingdom come with power.''
\bibverse{2} After six days Jesus took with him Peter, James, and John,
and brought them up onto a high mountain privately by themselves, and he
was changed into another form in front of them. \bibverse{3} His
clothing became glistening, exceedingly white, like snow, such as no
launderer on earth can whiten them. \bibverse{4} Elijah and Moses
appeared to them, and they were talking with Jesus.
\bibverse{5} Peter answered Jesus, ``Rabbi, it is good for us to be
here. Let's make three tents: one for you, one for Moses, and one for
Elijah.'' \bibverse{6} For he didn't know what to say, for they were
very afraid.
\bibverse{7} A cloud came, overshadowing them, and a voice came out of
the cloud, ``This is my beloved Son. Listen to him.''
\bibverse{8} Suddenly looking around, they saw no one with them any
more, except Jesus only.
\bibverse{9} As they were coming down from the mountain, he commanded
them that they should tell no one what things they had seen, until after
the Son of Man had risen from the dead. \bibverse{10} They kept this
saying to themselves, questioning what the ``rising from the dead''
meant.
\bibverse{11} They asked him, saying, ``Why do the scribes say that
Elijah must come first?''
\bibverse{12} He said to them, ``Elijah indeed comes first, and restores
all things. How is it written about the Son of Man, that he should
suffer many things and be despised? \bibverse{13} But I tell you that
Elijah has come, and they have also done to him whatever they wanted to,
even as it is written about him.''
\bibverse{14} Coming to the disciples, he saw a great multitude around
them, and scribes questioning them. \bibverse{15} Immediately all the
multitude, when they saw him, were greatly amazed, and running to him,
greeted him. \bibverse{16} He asked the scribes, ``What are you asking
them?''
\bibverse{17} One of the multitude answered, ``Teacher, I brought to you
my son, who has a mute spirit; \bibverse{18} and wherever it seizes him,
it throws him down; and he foams at the mouth, grinds his teeth, and
becomes rigid. I asked your disciples to cast it out, and they weren't
able.''
\bibverse{19} He answered him, ``Unbelieving generation, how long shall
I be with you? How long shall I bear with you? Bring him to me.''
\bibverse{20} They brought him to him, and when he saw him, immediately
the spirit convulsed him and he fell on the ground, wallowing and
foaming at the mouth.
\bibverse{21} He asked his father, ``How long has it been since this has
been happening to him?''
He said, ``From childhood. \bibverse{22} Often it has cast him both into
the fire and into the water to destroy him. But if you can do anything,
have compassion on us and help us.''
\bibverse{23} Jesus said to him, ``If you can believe, all things are
possible to him who believes.''
\bibverse{24} Immediately the father of the child cried out with tears,
``I believe. Help my unbelief!''
\bibverse{25} When Jesus saw that a multitude came running together, he
rebuked the unclean spirit, saying to him, ``You mute and deaf spirit, I
command you, come out of him, and never enter him again!''
\bibverse{26} After crying out and convulsing him greatly, it came out
of him. The boy became like one dead, so much that most of them said,
``He is dead.'' \bibverse{27} But Jesus took him by the hand and raised
him up; and he arose.
\bibverse{28} When he had come into the house, his disciples asked him
privately, ``Why couldn't we cast it out?''
\bibverse{29} He said to them, ``This kind can come out by nothing but
by prayer and fasting.''
\bibverse{30} They went out from there and passed through Galilee. He
didn't want anyone to know it, \bibverse{31} for he was teaching his
disciples, and said to them, ``The Son of Man is being handed over to
the hands of men, and they will kill him; and when he is killed, on the
third day he will rise again.''
\bibverse{32} But they didn't understand the saying, and were afraid to
ask him.
\bibverse{33} He came to Capernaum, and when he was in the house he
asked them, ``What were you arguing among yourselves on the way?''
\bibverse{34} But they were silent, for they had disputed with one
another on the way about who was the greatest.
\bibverse{35} He sat down and called the twelve; and he said to them,
``If any man wants to be first, he shall be last of all, and servant of
all.'' \bibverse{36} He took a little child and set him in the middle of
them. Taking him in his arms, he said to them, \bibverse{37} ``Whoever
receives one such little child in my name receives me; and whoever
receives me, doesn't receive me, but him who sent me.''
\bibverse{38} John said to him, ``Teacher, we saw someone who doesn't
follow us casting out demons in your name; and we forbade him, because
he doesn't follow us.''
\bibverse{39} But Jesus said, ``Don't forbid him, for there is no one
who will do a mighty work in my name and be able quickly to speak evil
of me. \bibverse{40} For whoever is not against us is on our side.
\bibverse{41} For whoever will give you a cup of water to drink in my
name because you are Christ's, most certainly I tell you, he will in no
way lose his reward.
\bibverse{42} ``Whoever will cause one of these little ones who believe
in me to stumble, it would be better for him if he were thrown into the
sea with a millstone hung around his neck. \bibverse{43} If your hand
causes you to stumble, cut it off. It is better for you to enter into
life maimed, rather than having your two hands to go into Gehenna, +
9:43 or, Hell into the unquenchable fire, \bibverse{44} `where their
worm doesn't die, and the fire is not quenched.' + 9:44 Isaiah 66:24+
9:44 NU omits verse 44. \bibverse{45} If your foot causes you to
stumble, cut it off. It is better for you to enter into life lame,
rather than having your two feet to be cast into Gehenna, + 9:45 or,
Hell into the fire that will never be quenched--- \bibverse{46} `where
their worm doesn't die, and the fire is not quenched.' + 9:46 NU omits
verse 46. \bibverse{47} If your eye causes you to stumble, throw it out.
It is better for you to enter into God's Kingdom with one eye, rather
than having two eyes to be cast into the Gehenna+ 9:47 or, Hell of fire,
\bibverse{48} `where their worm doesn't die, and the fire is not
quenched.' + 9:48 Isaiah 66:24 \bibverse{49} For everyone will be salted
with fire, and every sacrifice will be seasoned with salt. \bibverse{50}
Salt is good, but if the salt has lost its saltiness, with what will you
season it? Have salt in yourselves, and be at peace with one another.''
\hypertarget{section-9}{%
\section{10}\label{section-9}}
\bibverse{1} He arose from there and came into the borders of Judea and
beyond the Jordan. Multitudes came together to him again. As he usually
did, he was again teaching them.
\bibverse{2} Pharisees came to him testing him, and asked him, ``Is it
lawful for a man to divorce his wife?''
\bibverse{3} He answered, ``What did Moses command you?''
\bibverse{4} They said, ``Moses allowed a certificate of divorce to be
written, and to divorce her.''
\bibverse{5} But Jesus said to them, ``For your hardness of heart, he
wrote you this commandment. \bibverse{6} But from the beginning of the
creation, God made them male and female.+ 10:6 Genesis 1:27 \bibverse{7}
For this cause a man will leave his father and mother, and will join to
his wife, \bibverse{8} and the two will become one flesh,+ 10:8 Genesis
2:24 so that they are no longer two, but one flesh. \bibverse{9} What
therefore God has joined together, let no man separate.''
\bibverse{10} In the house, his disciples asked him again about the same
matter. \bibverse{11} He said to them, ``Whoever divorces his wife and
marries another commits adultery against her. \bibverse{12} If a woman
herself divorces her husband and marries another, she commits
adultery.''
\bibverse{13} They were bringing to him little children, that he should
touch them, but the disciples rebuked those who were bringing them.
\bibverse{14} But when Jesus saw it, he was moved with indignation and
said to them, ``Allow the little children to come to me! Don't forbid
them, for God's Kingdom belongs to such as these. \bibverse{15} Most
certainly I tell you, whoever will not receive God's Kingdom like a
little child, he will in no way enter into it.'' \bibverse{16} He took
them in his arms and blessed them, laying his hands on them.
\bibverse{17} As he was going out into the way, one ran to him, knelt
before him, and asked him, ``Good Teacher, what shall I do that I may
inherit eternal life?''
\bibverse{18} Jesus said to him, ``Why do you call me good? No one is
good except one---God. \bibverse{19} You know the commandments: `Do not
murder,' `Do not commit adultery,' `Do not steal,' `Do not give false
testimony,' `Do not defraud,' `Honor your father and mother.'\,''+ 10:19
Exodus 20:12-16; Deuteronomy 5:16-20
\bibverse{20} He said to him, ``Teacher, I have observed all these
things from my youth.''
\bibverse{21} Jesus looking at him loved him, and said to him, ``One
thing you lack. Go, sell whatever you have and give to the poor, and you
will have treasure in heaven; and come, follow me, taking up the
cross.''
\bibverse{22} But his face fell at that saying, and he went away
sorrowful, for he was one who had great possessions.
\bibverse{23} Jesus looked around and said to his disciples, ``How
difficult it is for those who have riches to enter into God's Kingdom!''
\bibverse{24} The disciples were amazed at his words. But Jesus answered
again, ``Children, how hard it is for those who trust in riches to enter
into God's Kingdom! \bibverse{25} It is easier for a camel to go through
a needle's eye than for a rich man to enter into God's Kingdom.''
\bibverse{26} They were exceedingly astonished, saying to him, ``Then
who can be saved?''
\bibverse{27} Jesus, looking at them, said, ``With men it is impossible,
but not with God, for all things are possible with God.''
\bibverse{28} Peter began to tell him, ``Behold, we have left all and
have followed you.''
\bibverse{29} Jesus said, ``Most certainly I tell you, there is no one
who has left house, or brothers, or sisters, or father, or mother, or
wife, or children, or land, for my sake, and for the sake of the Good
News, \bibverse{30} but he will receive one hundred times more now in
this time: houses, brothers, sisters, mothers, children, and land, with
persecutions; and in the age to come eternal life. \bibverse{31} But
many who are first will be last, and the last first.''
\bibverse{32} They were on the way, going up to Jerusalem; and Jesus was
going in front of them, and they were amazed; and those who followed
were afraid. He again took the twelve, and began to tell them the things
that were going to happen to him. \bibverse{33} ``Behold, we are going
up to Jerusalem. The Son of Man will be delivered to the chief priests
and the scribes. They will condemn him to death, and will deliver him to
the Gentiles. \bibverse{34} They will mock him, spit on him, scourge
him, and kill him. On the third day he will rise again.''
\bibverse{35} James and John, the sons of Zebedee, came near to him,
saying, ``Teacher, we want you to do for us whatever we will ask.''
\bibverse{36} He said to them, ``What do you want me to do for you?''
\bibverse{37} They said to him, ``Grant to us that we may sit, one at
your right hand and one at your left hand, in your glory.''
\bibverse{38} But Jesus said to them, ``You don't know what you are
asking. Are you able to drink the cup that I drink, and to be baptized
with the baptism that I am baptized with?''
\bibverse{39} They said to him, ``We are able.''
Jesus said to them, ``You shall indeed drink the cup that I drink, and
you shall be baptized with the baptism that I am baptized with;
\bibverse{40} but to sit at my right hand and at my left hand is not
mine to give, but for whom it has been prepared.''
\bibverse{41} When the ten heard it, they began to be indignant toward
James and John.
\bibverse{42} Jesus summoned them and said to them, ``You know that they
who are recognized as rulers over the nations lord it over them, and
their great ones exercise authority over them. \bibverse{43} But it
shall not be so among you, but whoever wants to become great among you
shall be your servant. \bibverse{44} Whoever of you wants to become
first among you shall be bondservant of all. \bibverse{45} For the Son
of Man also came not to be served but to serve, and to give his life as
a ransom for many.''
\bibverse{46} They came to Jericho. As he went out from Jericho with his
disciples and a great multitude, the son of Timaeus, Bartimaeus, a blind
beggar, was sitting by the road. \bibverse{47} When he heard that it was
Jesus the Nazarene, he began to cry out and say, ``Jesus, you son of
David, have mercy on me!'' \bibverse{48} Many rebuked him, that he
should be quiet, but he cried out much more, ``You son of David, have
mercy on me!''
\bibverse{49} Jesus stood still and said, ``Call him.''
They called the blind man, saying to him, ``Cheer up! Get up. He is
calling you!''
\bibverse{50} He, casting away his cloak, sprang up, and came to Jesus.
\bibverse{51} Jesus asked him, ``What do you want me to do for you?''
The blind man said to him, ``Rabboni,+ 10:51 Rabboni is a
transliteration of the Hebrew word for ``great teacher.'' that I may see
again.''
\bibverse{52} Jesus said to him, ``Go your way. Your faith has made you
well.'' Immediately he received his sight and followed Jesus on the way.
\hypertarget{section-10}{%
\section{11}\label{section-10}}
\bibverse{1} When they came near to Jerusalem, to Bethsphage+ 11:1 TR \&
NU read ``Bethphage'' instead of ``Bethsphage'' and Bethany, at the
Mount of Olives, he sent two of his disciples \bibverse{2} and said to
them, ``Go your way into the village that is opposite you. Immediately
as you enter into it, you will find a young donkey tied, on which no one
has sat. Untie him and bring him. \bibverse{3} If anyone asks you, `Why
are you doing this?' say, `The Lord needs him;' and immediately he will
send him back here.''
\bibverse{4} They went away, and found a young donkey tied at the door
outside in the open street, and they untied him. \bibverse{5} Some of
those who stood there asked them, ``What are you doing, untying the
young donkey?'' \bibverse{6} They said to them just as Jesus had said,
and they let them go.
\bibverse{7} They brought the young donkey to Jesus and threw their
garments on it, and Jesus sat on it. \bibverse{8} Many spread their
garments on the way, and others were cutting down branches from the
trees and spreading them on the road. \bibverse{9} Those who went in
front and those who followed cried out, ``Hosanna!+ 11:9 ``Hosanna''
means ``save us'' or ``help us, we pray''. Blessed is he who comes in
the name of the Lord!+ 11:9 Psalm 118:25-26 \bibverse{10} Blessed is the
kingdom of our father David that is coming in the name of the Lord!
Hosanna in the highest!''
\bibverse{11} Jesus entered into the temple in Jerusalem. When he had
looked around at everything, it being now evening, he went out to
Bethany with the twelve.
\bibverse{12} The next day, when they had come out from Bethany, he was
hungry. \bibverse{13} Seeing a fig tree afar off having leaves, he came
to see if perhaps he might find anything on it. When he came to it, he
found nothing but leaves, for it was not the season for figs.
\bibverse{14} Jesus told it, ``May no one ever eat fruit from you
again!'' and his disciples heard it.
\bibverse{15} They came to Jerusalem, and Jesus entered into the temple
and began to throw out those who sold and those who bought in the
temple, and overthrew the money changers' tables and the seats of those
who sold the doves. \bibverse{16} He would not allow anyone to carry a
container through the temple. \bibverse{17} He taught, saying to them,
``Isn't it written, `My house will be called a house of prayer for all
the nations?'+ 11:17 Isaiah 56:7 But you have made it a den of
robbers!''+ 11:17 Jeremiah 7:11
\bibverse{18} The chief priests and the scribes heard it, and sought how
they might destroy him. For they feared him, because all the multitude
was astonished at his teaching.
\bibverse{19} When evening came, he went out of the city. \bibverse{20}
As they passed by in the morning, they saw the fig tree withered away
from the roots. \bibverse{21} Peter, remembering, said to him, ``Rabbi,
look! The fig tree which you cursed has withered away.''
\bibverse{22} Jesus answered them, ``Have faith in God. \bibverse{23}
For most certainly I tell you, whoever may tell this mountain, `Be taken
up and cast into the sea,' and doesn't doubt in his heart, but believes
that what he says is happening, he shall have whatever he says.
\bibverse{24} Therefore I tell you, all things whatever you pray and ask
for, believe that you have received them, and you shall have them.
\bibverse{25} Whenever you stand praying, forgive, if you have anything
against anyone; so that your Father, who is in heaven, may also forgive
you your transgressions. \bibverse{26} But if you do not forgive,
neither will your Father in heaven forgive your transgressions.''+ 11:26
NU omits verse 26.
\bibverse{27} They came again to Jerusalem, and as he was walking in the
temple, the chief priests, the scribes, and the elders came to him,
\bibverse{28} and they began saying to him, ``By what authority do you
do these things? Or who gave you this authority to do these things?''
\bibverse{29} Jesus said to them, ``I will ask you one question. Answer
me, and I will tell you by what authority I do these things.
\bibverse{30} The baptism of John---was it from heaven, or from men?
Answer me.''
\bibverse{31} They reasoned with themselves, saying, ``If we should say,
`From heaven;' he will say, `Why then did you not believe him?'
\bibverse{32} If we should say, `From men'\,''---they feared the people,
for all held John to really be a prophet. \bibverse{33} They answered
Jesus, ``We don't know.''
Jesus said to them, ``Neither will I tell you by what authority I do
these things.''
\hypertarget{section-11}{%
\section{12}\label{section-11}}
\bibverse{1} He began to speak to them in parables. ``A man planted a
vineyard, put a hedge around it, dug a pit for the wine press, built a
tower, rented it out to a farmer, and went into another country.
\bibverse{2} When it was time, he sent a servant to the farmer to get
from the farmer his share of the fruit of the vineyard. \bibverse{3}
They took him, beat him, and sent him away empty. \bibverse{4} Again, he
sent another servant to them; and they threw stones at him, wounded him
in the head, and sent him away shamefully treated. \bibverse{5} Again he
sent another, and they killed him, and many others, beating some, and
killing some. \bibverse{6} Therefore still having one, his beloved son,
he sent him last to them, saying, `They will respect my son.'
\bibverse{7} But those farmers said among themselves, `This is the heir.
Come, let's kill him, and the inheritance will be ours.' \bibverse{8}
They took him, killed him, and cast him out of the vineyard.
\bibverse{9} What therefore will the lord of the vineyard do? He will
come and destroy the farmers, and will give the vineyard to others.
\bibverse{10} Haven't you even read this Scripture: `The stone which the
builders rejected was made the head of the corner. \bibverse{11} This
was from the Lord. It is marvelous in our eyes'?''+ 12:11 Psalm
118:22-23
\bibverse{12} They tried to seize him, but they feared the multitude;
for they perceived that he spoke the parable against them. They left him
and went away. \bibverse{13} They sent some of the Pharisees and the
Herodians to him, that they might trap him with words. \bibverse{14}
When they had come, they asked him, ``Teacher, we know that you are
honest, and don't defer to anyone; for you aren't partial to anyone, but
truly teach the way of God. Is it lawful to pay taxes to Caesar, or not?
\bibverse{15} Shall we give, or shall we not give?''
But he, knowing their hypocrisy, said to them, ``Why do you test me?
Bring me a denarius, that I may see it.''
\bibverse{16} They brought it.
He said to them, ``Whose is this image and inscription?''
They said to him, ``Caesar's.''
\bibverse{17} Jesus answered them, ``Render to Caesar the things that
are Caesar's, and to God the things that are God's.''
They marveled greatly at him.
\bibverse{18} Some Sadducees, who say that there is no resurrection,
came to him. They asked him, saying, \bibverse{19} ``Teacher, Moses
wrote to us, `If a man's brother dies and leaves a wife behind him, and
leaves no children, that his brother should take his wife and raise up
offspring for his brother.' \bibverse{20} There were seven brothers. The
first took a wife, and dying left no offspring. \bibverse{21} The second
took her, and died, leaving no children behind him. The third likewise;
\bibverse{22} and the seven took her and left no children. Last of all
the woman also died. \bibverse{23} In the resurrection, when they rise,
whose wife will she be of them? For the seven had her as a wife.''
\bibverse{24} Jesus answered them, ``Isn't this because you are
mistaken, not knowing the Scriptures nor the power of God? \bibverse{25}
For when they will rise from the dead, they neither marry nor are given
in marriage, but are like angels in heaven. \bibverse{26} But about the
dead, that they are raised, haven't you read in the book of Moses about
the Bush, how God spoke to him, saying, `I am the God of Abraham, the
God of Isaac, and the God of Jacob'?+ 12:26 Exodus 3:6 \bibverse{27} He
is not the God of the dead, but of the living. You are therefore badly
mistaken.''
\bibverse{28} One of the scribes came and heard them questioning
together, and knowing that he had answered them well, asked him, ``Which
commandment is the greatest of all?''
\bibverse{29} Jesus answered, ``The greatest is: `Hear, Israel, the Lord
our God, the Lord is one. \bibverse{30} You shall love the Lord your God
with all your heart, with all your soul, with all your mind, and with
all your strength.'+ 12:30 Deuteronomy 6:4-5 This is the first
commandment. \bibverse{31} The second is like this: `You shall love your
neighbor as yourself.'+ 12:31 Leviticus 19:18 There is no other
commandment greater than these.''
\bibverse{32} The scribe said to him, ``Truly, teacher, you have said
well that he is one, and there is none other but he; \bibverse{33} and
to love him with all the heart, with all the understanding, all the
soul, and with all the strength, and to love his neighbor as himself, is
more important than all whole burnt offerings and sacrifices.''
\bibverse{34} When Jesus saw that he answered wisely, he said to him,
``You are not far from God's Kingdom.''
No one dared ask him any question after that. \bibverse{35} Jesus
responded, as he taught in the temple, ``How is it that the scribes say
that the Christ is the son of David? \bibverse{36} For David himself
said in the Holy Spirit, `The Lord said to my Lord, ``Sit at my right
hand, until I make your enemies the footstool of your feet.''\,'+ 12:36
Psalm 110:1
\bibverse{37} Therefore David himself calls him Lord, so how can he be
his son?''
The common people heard him gladly. \bibverse{38} In his teaching he
said to them, ``Beware of the scribes, who like to walk in long robes,
and to get greetings in the marketplaces, \bibverse{39} and to get the
best seats in the synagogues and the best places at feasts,
\bibverse{40} those who devour widows' houses, and for a pretense make
long prayers. These will receive greater condemnation.''
\bibverse{41} Jesus sat down opposite the treasury and saw how the
multitude cast money into the treasury. Many who were rich cast in much.
\bibverse{42} A poor widow came and she cast in two small brass coins,+
12:42 literally, lepta (or widow's mites). Lepta are very small brass
coins worth half a quadrans each, which is a quarter of the copper
assarion. Lepta are worth less than 1\% of an agricultural worker's
daily wages. which equal a quadrans coin.+ 12:42 A quadrans is a coin
worth about 1/64 of a denarius. A denarius is about one day's wages for
an agricultural laborer. \bibverse{43} He called his disciples to
himself and said to them, ``Most certainly I tell you, this poor widow
gave more than all those who are giving into the treasury, \bibverse{44}
for they all gave out of their abundance, but she, out of her poverty,
gave all that she had to live on.''
\hypertarget{section-12}{%
\section{13}\label{section-12}}
\bibverse{1} As he went out of the temple, one of his disciples said to
him, ``Teacher, see what kind of stones and what kind of buildings!''
\bibverse{2} Jesus said to him, ``Do you see these great buildings?
There will not be left here one stone on another, which will not be
thrown down.''
\bibverse{3} As he sat on the Mount of Olives opposite the temple,
Peter, James, John, and Andrew asked him privately, \bibverse{4} ``Tell
us, when will these things be? What is the sign that these things are
all about to be fulfilled?''
\bibverse{5} Jesus, answering, began to tell them, ``Be careful that no
one leads you astray. \bibverse{6} For many will come in my name,
saying, `I am he!'+ 13:6 or, ``I AM!'' and will lead many astray.
\bibverse{7} ``When you hear of wars and rumors of wars, don't be
troubled. For those must happen, but the end is not yet. \bibverse{8}
For nation will rise against nation, and kingdom against kingdom. There
will be earthquakes in various places. There will be famines and
troubles. These things are the beginning of birth pains.
\bibverse{9} ``But watch yourselves, for they will deliver you up to
councils. You will be beaten in synagogues. You will stand before rulers
and kings for my sake, for a testimony to them. \bibverse{10} The Good
News must first be preached to all the nations. \bibverse{11} When they
lead you away and deliver you up, don't be anxious beforehand or
premeditate what you will say, but say whatever will be given you in
that hour. For it is not you who speak, but the Holy Spirit.
\bibverse{12} ``Brother will deliver up brother to death, and the father
his child. Children will rise up against parents and cause them to be
put to death. \bibverse{13} You will be hated by all men for my name's
sake, but he who endures to the end will be saved.
\bibverse{14} ``But when you see the abomination of desolation,+ 13:14
Daniel 9:17; 11:31; 12:11 spoken of by Daniel the prophet, standing
where it ought not'' (let the reader understand), ``then let those who
are in Judea flee to the mountains, \bibverse{15} and let him who is on
the housetop not go down, nor enter in, to take anything out of his
house. \bibverse{16} Let him who is in the field not return back to take
his cloak. \bibverse{17} But woe to those who are with child and to
those who nurse babies in those days! \bibverse{18} Pray that your
flight won't be in the winter. \bibverse{19} For in those days there
will be oppression, such as there has not been the like from the
beginning of the creation which God created until now, and never will
be. \bibverse{20} Unless the Lord had shortened the days, no flesh would
have been saved; but for the sake of the chosen ones, whom he picked
out, he shortened the days. \bibverse{21} Then if anyone tells you,
`Look, here is the Christ!' or, `Look, there!' don't believe it.
\bibverse{22} For false christs and false prophets will arise and will
show signs and wonders, that they may lead astray, if possible, even the
chosen ones. \bibverse{23} But you watch.
``Behold, I have told you all things beforehand. \bibverse{24} But in
those days, after that oppression, the sun will be darkened, the moon
will not give its light, \bibverse{25} the stars will be falling from
the sky, and the powers that are in the heavens will be shaken.+ 13:25
Isaiah 13:10; 34:4 \bibverse{26} Then they will see the Son of Man
coming in clouds with great power and glory. \bibverse{27} Then he will
send out his angels, and will gather together his chosen ones from the
four winds, from the ends of the earth to the ends of the sky.
\bibverse{28} ``Now from the fig tree, learn this parable. When the
branch has now become tender and produces its leaves, you know that the
summer is near; \bibverse{29} even so you also, when you see these
things coming to pass, know that it is near, at the doors. \bibverse{30}
Most certainly I say to you, this generation+ 13:30 The word translated
``generation'' (genea) could also be translated ``race'', ``family'', or
``people''. will not pass away until all these things happen.
\bibverse{31} Heaven and earth will pass away, but my words will not
pass away.
\bibverse{32} ``But of that day or that hour no one knows---not even the
angels in heaven, nor the Son, but only the Father. \bibverse{33} Watch,
keep alert, and pray; for you don't know when the time is.
\bibverse{34} ``It is like a man traveling to another country, having
left his house and given authority to his servants, and to each one his
work, and also commanded the doorkeeper to keep watch. \bibverse{35}
Watch therefore, for you don't know when the lord of the house is
coming---whether at evening, or at midnight, or when the rooster crows,
or in the morning; \bibverse{36} lest, coming suddenly, he might find
you sleeping. \bibverse{37} What I tell you, I tell all: Watch!''
\hypertarget{section-13}{%
\section{14}\label{section-13}}
\bibverse{1} It was now two days before the Passover and the Feast of
Unleavened Bread, and the chief priests and the scribes sought how they
might seize him by deception and kill him. \bibverse{2} For they said,
``Not during the feast, because there might be a riot among the
people.''
\bibverse{3} While he was at Bethany, in the house of Simon the leper,
as he sat at the table, a woman came having an alabaster jar of ointment
of pure nard---very costly. She broke the jar and poured it over his
head. \bibverse{4} But there were some who were indignant among
themselves, saying, ``Why has this ointment been wasted? \bibverse{5}
For this might have been sold for more than three hundred denarii+ 14:5
300 denarii was about a year's wages for an agricultural laborer. and
given to the poor.'' So they grumbled against her.
\bibverse{6} But Jesus said, ``Leave her alone. Why do you trouble her?
She has done a good work for me. \bibverse{7} For you always have the
poor with you, and whenever you want to, you can do them good; but you
will not always have me. \bibverse{8} She has done what she could. She
has anointed my body beforehand for the burying. \bibverse{9} Most
certainly I tell you, wherever this Good News may be preached throughout
the whole world, that which this woman has done will also be spoken of
for a memorial of her.''
\bibverse{10} Judas Iscariot, who was one of the twelve, went away to
the chief priests, that he might deliver him to them. \bibverse{11}
They, when they heard it, were glad, and promised to give him money. He
sought how he might conveniently deliver him.
\bibverse{12} On the first day of unleavened bread, when they sacrificed
the Passover, his disciples asked him, ``Where do you want us to go and
prepare that you may eat the Passover?''
\bibverse{13} He sent two of his disciples and said to them, ``Go into
the city, and there a man carrying a pitcher of water will meet you.
Follow him, \bibverse{14} and wherever he enters in, tell the master of
the house, `The Teacher says, ``Where is the guest room, where I may eat
the Passover with my disciples?''\,' \bibverse{15} He will himself show
you a large upper room furnished and ready. Get ready for us there.''
\bibverse{16} His disciples went out, and came into the city, and found
things as he had said to them, and they prepared the Passover.
\bibverse{17} When it was evening he came with the twelve. \bibverse{18}
As they sat and were eating, Jesus said, ``Most certainly I tell you,
one of you will betray me---he who eats with me.''
\bibverse{19} They began to be sorrowful, and to ask him one by one,
``Surely not I?'' And another said, ``Surely not I?''
\bibverse{20} He answered them, ``It is one of the twelve, he who dips
with me in the dish. \bibverse{21} For the Son of Man goes as it is
written about him, but woe to that man by whom the Son of Man is
betrayed! It would be better for that man if he had not been born.''
\bibverse{22} As they were eating, Jesus took bread, and when he had
blessed it, he broke it and gave to them, and said, ``Take, eat. This is
my body.''
\bibverse{23} He took the cup, and when he had given thanks, he gave to
them. They all drank of it. \bibverse{24} He said to them, ``This is my
blood of the new covenant, which is poured out for many. \bibverse{25}
Most certainly I tell you, I will no more drink of the fruit of the vine
until that day when I drink it anew in God's Kingdom.'' \bibverse{26}
When they had sung a hymn, they went out to the Mount of Olives.
\bibverse{27} Jesus said to them, ``All of you will be made to stumble
because of me tonight, for it is written, `I will strike the shepherd,
and the sheep will be scattered.'+ 14:27 Zechariah 13:7 \bibverse{28}
However, after I am raised up, I will go before you into Galilee.''
\bibverse{29} But Peter said to him, ``Although all will be offended,
yet I will not.''
\bibverse{30} Jesus said to him, ``Most certainly I tell you that you
today, even this night, before the rooster crows twice, you will deny me
three times.''
\bibverse{31} But he spoke all the more, ``If I must die with you, I
will not deny you.'' They all said the same thing.
\bibverse{32} They came to a place which was named Gethsemane. He said
to his disciples, ``Sit here while I pray.'' \bibverse{33} He took with
him Peter, James, and John, and began to be greatly troubled and
distressed. \bibverse{34} He said to them, ``My soul is exceedingly
sorrowful, even to death. Stay here and watch.''
\bibverse{35} He went forward a little, and fell on the ground, and
prayed that if it were possible, the hour might pass away from him.
\bibverse{36} He said, ``Abba,+ 14:36 Abba is a Greek spelling for the
Aramaic word for ``Father'' or ``Daddy'' used in a familiar, respectful,
and loving way. Father, all things are possible to you. Please remove
this cup from me. However, not what I desire, but what you desire.''
\bibverse{37} He came and found them sleeping, and said to Peter,
``Simon, are you sleeping? Couldn't you watch one hour? \bibverse{38}
Watch and pray, that you may not enter into temptation. The spirit
indeed is willing, but the flesh is weak.''
\bibverse{39} Again he went away and prayed, saying the same words.
\bibverse{40} Again he returned and found them sleeping, for their eyes
were very heavy; and they didn't know what to answer him. \bibverse{41}
He came the third time and said to them, ``Sleep on now, and take your
rest. It is enough. The hour has come. Behold, the Son of Man is
betrayed into the hands of sinners. \bibverse{42} Arise! Let's get
going. Behold, he who betrays me is at hand.''
\bibverse{43} Immediately, while he was still speaking, Judas, one of
the twelve, came---and with him a multitude with swords and clubs, from
the chief priests, the scribes, and the elders. \bibverse{44} Now he who
betrayed him had given them a sign, saying, ``Whomever I will kiss, that
is he. Seize him, and lead him away safely.'' \bibverse{45} When he had
come, immediately he came to him and said, ``Rabbi! Rabbi!'' and kissed
him. \bibverse{46} They laid their hands on him and seized him.
\bibverse{47} But a certain one of those who stood by drew his sword and
struck the servant of the high priest, and cut off his ear.
\bibverse{48} Jesus answered them, ``Have you come out, as against a
robber, with swords and clubs to seize me? \bibverse{49} I was daily
with you in the temple teaching, and you didn't arrest me. But this is
so that the Scriptures might be fulfilled.''
\bibverse{50} They all left him, and fled. \bibverse{51} A certain young
man followed him, having a linen cloth thrown around himself over his
naked body. The young men grabbed him, \bibverse{52} but he left the
linen cloth and fled from them naked. \bibverse{53} They led Jesus away
to the high priest. All the chief priests, the elders, and the scribes
came together with him.
\bibverse{54} Peter had followed him from a distance, until he came into
the court of the high priest. He was sitting with the officers, and
warming himself in the light of the fire. \bibverse{55} Now the chief
priests and the whole council sought witnesses against Jesus to put him
to death, and found none. \bibverse{56} For many gave false testimony
against him, and their testimony didn't agree with each other.
\bibverse{57} Some stood up and gave false testimony against him,
saying, \bibverse{58} ``We heard him say, `I will destroy this temple
that is made with hands, and in three days I will build another made
without hands.'\,'' \bibverse{59} Even so, their testimony didn't agree.
\bibverse{60} The high priest stood up in the middle, and asked Jesus,
``Have you no answer? What is it which these testify against you?''
\bibverse{61} But he stayed quiet, and answered nothing. Again the high
priest asked him, ``Are you the Christ, the Son of the Blessed?''
\bibverse{62} Jesus said, ``I am. You will see the Son of Man sitting at
the right hand of Power, and coming with the clouds of the sky.''
\bibverse{63} The high priest tore his clothes and said, ``What further
need have we of witnesses? \bibverse{64} You have heard the blasphemy!
What do you think?'' They all condemned him to be worthy of death.
\bibverse{65} Some began to spit on him, and to cover his face, and to
beat him with fists, and to tell him, ``Prophesy!'' The officers struck
him with the palms of their hands.
\bibverse{66} As Peter was in the courtyard below, one of the maids of
the high priest came, \bibverse{67} and seeing Peter warming himself,
she looked at him and said, ``You were also with the Nazarene, Jesus!''
\bibverse{68} But he denied it, saying, ``I neither know nor understand
what you are saying.'' He went out on the porch, and the rooster crowed.
\bibverse{69} The maid saw him and began again to tell those who stood
by, ``This is one of them.'' \bibverse{70} But he again denied it. After
a little while again those who stood by said to Peter, ``You truly are
one of them, for you are a Galilean, and your speech shows it.''
\bibverse{71} But he began to curse and to swear, ``I don't know this
man of whom you speak!''
\bibverse{72} The rooster crowed the second time. Peter remembered the
words that Jesus said to him, ``Before the rooster crows twice, you will
deny me three times.'' When he thought about that, he wept.
\hypertarget{section-14}{%
\section{15}\label{section-14}}
\bibverse{1} Immediately in the morning the chief priests, with the
elders, scribes, and the whole council, held a consultation, bound
Jesus, carried him away, and delivered him up to Pilate. \bibverse{2}
Pilate asked him, ``Are you the King of the Jews?''
He answered, ``So you say.''
\bibverse{3} The chief priests accused him of many things. \bibverse{4}
Pilate again asked him, ``Have you no answer? See how many things they
testify against you!''
\bibverse{5} But Jesus made no further answer, so that Pilate marveled.
\bibverse{6} Now at the feast he used to release to them one prisoner,
whomever they asked of him. \bibverse{7} There was one called Barabbas,
bound with his fellow insurgents, men who in the insurrection had
committed murder. \bibverse{8} The multitude, crying aloud, began to ask
him to do as he always did for them. \bibverse{9} Pilate answered them,
saying, ``Do you want me to release to you the King of the Jews?''
\bibverse{10} For he perceived that for envy the chief priests had
delivered him up. \bibverse{11} But the chief priests stirred up the
multitude, that he should release Barabbas to them instead.
\bibverse{12} Pilate again asked them, ``What then should I do to him
whom you call the King of the Jews?''
\bibverse{13} They cried out again, ``Crucify him!''
\bibverse{14} Pilate said to them, ``Why, what evil has he done?''
But they cried out exceedingly, ``Crucify him!''
\bibverse{15} Pilate, wishing to please the multitude, released Barabbas
to them, and handed over Jesus, when he had flogged him, to be
crucified.
\bibverse{16} The soldiers led him away within the court, which is the
Praetorium; and they called together the whole cohort. \bibverse{17}
They clothed him with purple; and weaving a crown of thorns, they put it
on him. \bibverse{18} They began to salute him, ``Hail, King of the
Jews!'' \bibverse{19} They struck his head with a reed and spat on him,
and bowing their knees, did homage to him. \bibverse{20} When they had
mocked him, they took the purple cloak off him, and put his own garments
on him. They led him out to crucify him.
\bibverse{21} They compelled one passing by, coming from the country,
Simon of Cyrene, the father of Alexander and Rufus, to go with them that
he might bear his cross. \bibverse{22} They brought him to the place
called Golgotha, which is, being interpreted, ``The place of a skull.''
\bibverse{23} They offered him wine mixed with myrrh to drink, but he
didn't take it.
\bibverse{24} Crucifying him, they parted his garments among them,
casting lots on them, what each should take. \bibverse{25} It was the
third hour+ 15:25 9:00 a.m. when they crucified him. \bibverse{26} The
superscription of his accusation was written over him: ``THE KING OF THE
JEWS.'' \bibverse{27} With him they crucified two robbers, one on his
right hand, and one on his left. \bibverse{28} The Scripture was
fulfilled which says, ``He was counted with transgressors.''+ 15:28 NU
omits verse 28.
\bibverse{29} Those who passed by blasphemed him, wagging their heads
and saying, ``Ha! You who destroy the temple and build it in three days,
\bibverse{30} save yourself, and come down from the cross!''
\bibverse{31} Likewise, also the chief priests mocking among themselves
with the scribes said, ``He saved others. He can't save himself.
\bibverse{32} Let the Christ, the King of Israel, now come down from the
cross, that we may see and believe him.''+ 15:32 TR omits ``him'' Those
who were crucified with him also insulted him.
\bibverse{33} When the sixth hour+ 15:33 or, noon had come, there was
darkness over the whole land until the ninth hour.+ 15:33 3:00 p.m.
\bibverse{34} At the ninth hour Jesus cried with a loud voice, saying,
``Eloi, Eloi, lama sabachthani?'' which is, being interpreted, ``My God,
my God, why have you forsaken me?'' + 15:34 Psalm 22:1
\bibverse{35} Some of those who stood by, when they heard it, said,
``Behold, he is calling Elijah.''
\bibverse{36} One ran, and filling a sponge full of vinegar, put it on a
reed and gave it to him to drink, saying, ``Let him be. Let's see
whether Elijah comes to take him down.''
\bibverse{37} Jesus cried out with a loud voice, and gave up the spirit.
\bibverse{38} The veil of the temple was torn in two from the top to the
bottom. \bibverse{39} When the centurion, who stood by opposite him, saw
that he cried out like this and breathed his last, he said, ``Truly this
man was the Son of God!''
\bibverse{40} There were also women watching from afar, among whom were
both Mary Magdalene and Mary the mother of James the less and of Joses,
and Salome; \bibverse{41} who, when he was in Galilee, followed him and
served him; and many other women who came up with him to Jerusalem.
\bibverse{42} When evening had now come, because it was the Preparation
Day, that is, the day before the Sabbath, \bibverse{43} Joseph of
Arimathaea, a prominent council member who also himself was looking for
God's Kingdom, came. He boldly went in to Pilate, and asked for Jesus'
body. \bibverse{44} Pilate was surprised to hear that he was already
dead; and summoning the centurion, he asked him whether he had been dead
long. \bibverse{45} When he found out from the centurion, he granted the
body to Joseph. \bibverse{46} He bought a linen cloth, and taking him
down, wound him in the linen cloth and laid him in a tomb which had been
cut out of a rock. He rolled a stone against the door of the tomb.
\bibverse{47} Mary Magdalene and Mary the mother of Joses, saw where he
was laid.
\hypertarget{section-15}{%
\section{16}\label{section-15}}
\bibverse{1} When the Sabbath was past, Mary Magdalene, and Mary the
mother of James, and Salome bought spices, that they might come and
anoint him. \bibverse{2} Very early on the first day of the week, they
came to the tomb when the sun had risen. \bibverse{3} They were saying
among themselves, ``Who will roll away the stone from the door of the
tomb for us?'' \bibverse{4} for it was very big. Looking up, they saw
that the stone was rolled back.
\bibverse{5} Entering into the tomb, they saw a young man sitting on the
right side, dressed in a white robe; and they were amazed. \bibverse{6}
He said to them, ``Don't be amazed. You seek Jesus, the Nazarene, who
has been crucified. He has risen! He is not here. See the place where
they laid him! \bibverse{7} But go, tell his disciples and Peter, `He
goes before you into Galilee. There you will see him, as he said to
you.'\,''
\bibverse{8} They went out,+ 16:8 TR adds ``quickly'' and fled from the
tomb, for trembling and astonishment had come on them. They said nothing
to anyone; for they were afraid.+ 16:8 One isolated manuscript omits
verses 9-20 but adds this ``short ending of Mark'' to the end of verse
8: They told all that had been commanded them briefly to those around
Peter. After that, Jesus himself sent them out, from east to west, with
the sacred and imperishable proclamation of eternal salvation.
\bibverse{9} + 16:9 NU includes the text of verses 9-20, but mentions in
a footnote that a few manuscripts omitted it. The translators of the
World English Bible regard Mark 16:9-20 as reliable based on an
overwhelming majority of textual evidence, including not only the
authoritative Greek Majority Text New Testament, but also the TR and
many of the manuscripts cited in the NU text.Now when he had risen early
on the first day of the week, he appeared first to Mary Magdalene, from
whom he had cast out seven demons. \bibverse{10} She went and told those
who had been with him, as they mourned and wept. \bibverse{11} When they
heard that he was alive and had been seen by her, they disbelieved.
\bibverse{12} After these things he was revealed in another form to two
of them as they walked, on their way into the country. \bibverse{13}
They went away and told it to the rest. They didn't believe them,
either.
\bibverse{14} Afterward he was revealed to the eleven themselves as they
sat at the table; and he rebuked them for their unbelief and hardness of
heart, because they didn't believe those who had seen him after he had
risen. \bibverse{15} He said to them, ``Go into all the world and preach
the Good News to the whole creation. \bibverse{16} He who believes and
is baptized will be saved; but he who disbelieves will be condemned.
\bibverse{17} These signs will accompany those who believe: in my name
they will cast out demons; they will speak with new languages;
\bibverse{18} they will take up serpents; and if they drink any deadly
thing, it will in no way hurt them; they will lay hands on the sick, and
they will recover.''
\bibverse{19} So then the Lord,+ 16:19 NU adds ``Jesus'' after he had
spoken to them, was received up into heaven and sat down at the right
hand of God. \bibverse{20} They went out and preached everywhere, the
Lord working with them and confirming the word by the signs that
followed. Amen.
| {"hexsha": "d9640f50254ec255d103c7f6f627619adca05dfb", "size": 90102, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Bibles/English.WorldEnglishBibleUS/out/tex/58-Mark.tex", "max_stars_repo_name": "bibliadelpueblo/BibliaLibre", "max_stars_repo_head_hexsha": "039ab9b18364ecade1d56695cb77c40ee62b1317", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Bibles/English.WorldEnglishBibleUS/out/tex/58-Mark.tex", "max_issues_repo_name": "bibliadelpueblo/BibliaLibre", "max_issues_repo_head_hexsha": "039ab9b18364ecade1d56695cb77c40ee62b1317", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Bibles/English.WorldEnglishBibleUS/out/tex/58-Mark.tex", "max_forks_repo_name": "bibliadelpueblo/BibliaLibre", "max_forks_repo_head_hexsha": "039ab9b18364ecade1d56695cb77c40ee62b1317", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.4458672875, "max_line_length": 72, "alphanum_fraction": 0.7532685179, "num_tokens": 24623} |
import logging
from PlatformNlp.metrics import register_metrices
from PlatformNlp.metrics.platform_metrics import PlatformMetrice
from PlatformNlp.tokenization import load_vocab
import json
import numpy as np
logger = logging.getLogger(__name__)
@register_metrices('word_embedding_metrics')
class WordEmbeddingMetrics(PlatformMetrice):
"""
Sentence (or sentence pair) prediction (classification or regression) task.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('--topk', type=int, default=10, help='topk to show the similarity between words')
parser.add_argument('--vocab_file', type=str, help='vocab file')
def __init__(self, args, input_ids, label_ids, predict_scores, label_mapping):
super().__init__(args, input_ids, label_ids, predict_scores, label_mapping)
self.args = self.args
self.input_ids = input_ids
self.label_ids = label_ids
self.predict_scores = predict_scores
self.label_mapping = label_mapping
def compute_metrices(self):
json_dict = dict()
vocab = load_vocab(self.args.vocab_file)
inv_vocab = {v: k for k, v in vocab.items()}
for (index, input_id) in enumerate(self.input_ids):
similarity = self.predict_scores[index]
similarity = np.array(similarity).reshape(-1, self.args.vocab_size)
for (i, id_word) in enumerate(input_id):
id_word = int(id_word)
similarity_i = list(similarity[i])
similarity_i_ = [-x for x in similarity_i]
similarity_i_ = np.array(similarity_i_)
char_words = inv_vocab.get(id_word, "UNK")
nearst_id = (similarity_i_).argsort()[1:self.args.topk + 1]
nearst_words = [inv_vocab.get(id_n, "UNK") for id_n in nearst_id]
json_dict[char_words] = nearst_words
json_data = json.dumps(json_dict)
return json_data
| {"hexsha": "2b96f670e8bdc194d9447ebde0cd91a866ac9a15", "size": 2113, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/nlp/PlatformNlp/metrics/word_embedding_metrics.py", "max_stars_repo_name": "jd-aig/aves2_algorithm_components", "max_stars_repo_head_hexsha": "12e06717596b824f7b0db115b95856606b97cd82", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-20T09:31:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-20T09:31:33.000Z", "max_issues_repo_path": "src/nlp/PlatformNlp/metrics/word_embedding_metrics.py", "max_issues_repo_name": "jd-aig/aves2_algorithm_components", "max_issues_repo_head_hexsha": "12e06717596b824f7b0db115b95856606b97cd82", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/nlp/PlatformNlp/metrics/word_embedding_metrics.py", "max_forks_repo_name": "jd-aig/aves2_algorithm_components", "max_forks_repo_head_hexsha": "12e06717596b824f7b0db115b95856606b97cd82", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-08T05:12:40.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-08T05:12:40.000Z", "avg_line_length": 38.4181818182, "max_line_length": 109, "alphanum_fraction": 0.6668244203, "include": true, "reason": "import numpy", "num_tokens": 466} |
import os
from numpy.testing import assert_allclose
from glue_geospatial.data_factory import is_geospatial, geospatial_reader
DATA = os.path.join(os.path.dirname(__file__), 'data')
def test_geospatial(tmpdir):
assert not is_geospatial(os.path.join(DATA, 'plain.tif'))
assert is_geospatial(os.path.join(DATA, 'simplegeo.tif'))
data = geospatial_reader(os.path.join(DATA, 'simplegeo.tif'))
assert data.shape == (18, 24)
assert_allclose(data.coords.pixel_to_world_values(9, 12),
(-3.9716666666666676, 2.9816666666666665))
| {"hexsha": "7880fe22b25f8f6ea2c9d2608fa81eabe491f2d3", "size": 567, "ext": "py", "lang": "Python", "max_stars_repo_path": "glue_geospatial/tests/test_basic.py", "max_stars_repo_name": "glue-viz/glue-satellite", "max_stars_repo_head_hexsha": "ac84454f862cf07367a7e59f1865c71652fb5d2d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2016-05-21T22:31:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-22T15:43:38.000Z", "max_issues_repo_path": "glue_geospatial/tests/test_basic.py", "max_issues_repo_name": "glue-viz/glue-satellite", "max_issues_repo_head_hexsha": "ac84454f862cf07367a7e59f1865c71652fb5d2d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2016-05-19T09:11:06.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-02T14:15:03.000Z", "max_forks_repo_path": "glue_geospatial/tests/test_basic.py", "max_forks_repo_name": "glue-viz/glue-satellite", "max_forks_repo_head_hexsha": "ac84454f862cf07367a7e59f1865c71652fb5d2d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-05-23T13:28:16.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-28T13:12:51.000Z", "avg_line_length": 28.35, "max_line_length": 73, "alphanum_fraction": 0.7283950617, "include": true, "reason": "from numpy", "num_tokens": 148} |
"""
Generate samples for calculating EER.
The format of data_dirs
data_dir
|--- {speaker name}.pkl
The format of {speaker name}.pkl
{speaker name}.pkl
|--- "filename": file name
|--- "embedding": embedding
"""
import os
from os.path import join as join_path
import random
from argparse import ArgumentParser
import joblib
import numpy as np
from tqdm import tqdm
def parse_args():
"""Parse command-line arguments."""
parser = ArgumentParser()
parser.add_argument("data_dirs", type=str, nargs="+")
parser.add_argument("-n", "--n_sample", type=int, required=True)
parser.add_argument("-o", "--output_path", type=str, required=True)
return vars(parser.parse_args())
def generate_sample(metadata, speaker1, speakers, label, nums):
"""
Calculate cosine similarity.
Generate positive or negative samples with the label.
"""
speaker1_embs = random.choices(metadata[speaker1], k=nums)
speakers_embs = []
for _ in range(nums):
speaker = random.choice(speakers)
speakers_embs.append(random.choice(metadata[speaker]))
sampels = []
for speaker1_emb, speakers_emb in zip(speaker1_embs, speakers_embs):
cosine_similarity = (
np.inner(speaker1_emb["embedding"], speakers_emb["embedding"])
/ np.linalg.norm(speaker1_emb["embedding"])
/ np.linalg.norm(speakers_emb["embedding"])
)
sampels.append((cosine_similarity, label))
return sampels
def prepare_eer_samples(data_dirs, output_path, n_sample):
"""generate eer samples"""
metadata = {}
for data_dir in data_dirs:
speaker_list = os.listdir(data_dir)
for speaker in speaker_list:
metadata[speaker] = joblib.load(join_path(data_dir, speaker))
samples = []
speakers = list(metadata.keys())
for speaker in tqdm(speakers):
negative_speakers = speakers.copy()
negative_speakers.remove(speaker)
samples += generate_sample(metadata, speaker, [speaker], 1, n_sample)
samples += generate_sample(metadata, speaker, negative_speakers, 0, n_sample)
joblib.dump(samples, output_path)
if __name__ == "__main__":
prepare_eer_samples(**parse_args())
| {"hexsha": "0fc090baa5a93f275c1e5e8535fcd0926bd1126b", "size": 2277, "ext": "py", "lang": "Python", "max_stars_repo_path": "Voice-conversion-evaluation/metrics/speaker_verification/equal_error_rate/prepare_eer_samples.py", "max_stars_repo_name": "ga642381/robust-vc", "max_stars_repo_head_hexsha": "90c5c51007db4544885e903d755fb95fadd1fb71", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2022-01-12T12:38:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T02:25:48.000Z", "max_issues_repo_path": "Voice-conversion-evaluation/metrics/speaker_verification/equal_error_rate/prepare_eer_samples.py", "max_issues_repo_name": "ga642381/robust-vc", "max_issues_repo_head_hexsha": "90c5c51007db4544885e903d755fb95fadd1fb71", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Voice-conversion-evaluation/metrics/speaker_verification/equal_error_rate/prepare_eer_samples.py", "max_forks_repo_name": "ga642381/robust-vc", "max_forks_repo_head_hexsha": "90c5c51007db4544885e903d755fb95fadd1fb71", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7702702703, "max_line_length": 85, "alphanum_fraction": 0.6640316206, "include": true, "reason": "import numpy", "num_tokens": 510} |
Welcome Community Development graduate students, alumni, prospective students, and friends of the department! This page was created to allow CRD students to communicate important information, from recommended courses to thesis help to upcoming social events. If you have questions, just send out an mailto:[email protected] email for help.
CommunityBased Research
3 year study of Bryte and Broderick
During the Winter Quarter of 2006, 2007, and 2008, the Community Development Graduate Group is working with the West Sacramento neighborhoods of Bryte and Broderick through our capstone course, CRD 440. This collaboration gives graduate students realworld experience with communitybased research, while connecting the communities of Bryte and Broderick with UC Davis resources. Reports, maps, and presentations have resulted from this collaboration, and are available at the Research on West Sacramento wiki site for public access.
For more information, contact Bernadette Austin: MailTo(braustin AT ucdavis DOT edu)
Community Development students share research
At the annual Community Development symposium, graduate students share their research projects and interests with the public. All are welcome!
Building Community: 1st Annual Community Development Symposium
May 11: 2pm5pm
Putah Creek Lodge, UC Davis Campus
For details, contact Liz Sterba: MailTo(esterba AT ucdavis DOT edu)
Upcoming Events
Template:
Event Title
Date: Time
Location
Contact
Description
Discover Davis: Farmers Market
Mar 10: 10 am
Central Park (meet at C and 3rd)
Luis or Bernie: MailTo(braustin AT ucdavis DOT edu)
Join your fellow CRDers in learning more about the Davis Farmers
Market. A guided tour will be led by our very own Luis (and possibly
some of his friends). Afterwards, we will make brunch together (with
food purchased with CRD GSA funds). Marcie previously offered her
homewhich unfortunately fits 5 people comfortably. If anyone can
offer a home with a large kitchen and seating area, please let me
know! Ill email out the final location that week.
Masters comprehensive exams due to Dean of Grad Studies
Mar 22 (for Winter 2007)
File Thesis with Committee
Mar 29 (for Spring 2007)
APRIL 2007
Discover Lake Berryessa: Cold Canyon Hike
Apr. 1: 10 am
Bernie: MailTo(braustin AT ucdavis DOT edu)
Join your rugged CRD friends on a moderate intensity, 4mile hike to a
birdseye view of Lake Berryessa. The hard work is well worth it! Bring
your own picnic lunch and a sturdy pair of shoes. We will meet at
Central Park and carpool there.
Poster Making Party
Saturday, April 21. Make your poster for the Symposium. Contact Leticia for details: MailTo(ljauregui AT ucdavis DOT edu)
Last Day to Register for Graduate Commencement
Apr. 27
Need help? Contact Wave Armijo in Grad Studies at MailTo(wmarmijo AT ucdavis DOT edu) or (530) 7523762.
MAY 2007
Last Day to Order Graduation Regalia
early May
It takes 810 weeks for delivery, so plan accordingly. Go to http://bookstore.ucdavis.edu/graduation/custom_regalia.cfm.
Discover San Francisco: SOMA (South of Market)
May 5: 11 am
Bernie: MailTo(braustin AT ucdavis DOT edu)
Come with your hip CRD friends to discover what the City by the Bay
has to offer. Explore the stores, eateries, and museums in the SOMA
district of San Francisco. The SF Museum of Modern Art, the California
Academy of Sciences, and the Cable Car turnabout is all within walking
distance. Well meet at Central Park and carpool there. Parking and
bridge toll provided by CRD GSA funds.
Building Community: 1st Annual Community Development Symposium
May 11: 2pm5pm
Putah Creek Lodge, UC Davis Campus
Liz Sterba: MailTo(esterba AT ucdavis DOT edu)
JUNE 2007
Masters These due to Dean of Grad Studies
June 1 (for Spring 2007)
Masters comprehensive exams due to Dean of Grad Studies
June 14 (for Spring 2007)
Graduate Studies Graduation
Thurs. June 14: 6:30 pm
The Office of Graduate Studies, together with the Graduate Council and the Graduate Student Association, will hold the 2007 Graduate Studies Commencement Ceremony on Thursday, June 14, 2006 at 6:30 p.m. in ARC Pavilion (formerly Recreation Hall) to honor the graduate degree recipients. Following the ceremony, there will be a reception on the lawn outside the ARC Pavilion for graduate degree recipients, faculty, and guests.
CRD Grad Group Commencement Celebration
June 15, 2007; 59pm
Putah Creek Lodge
Course Guide
See the Community Development Graduate Course Guide for a ton of advice from past and current students. Its got: course listings for specializations, recommended courses, and notsorecommended courses. Learn from other students experiments with the course catalog! Its great!
Getting Employed by the University
Working for the university is a great way to get your fees paid for and to keep yourself from scheduling foibles and commuting to an offcampus job every day. But beware: it also means your head will get entirely wrapped up in academialand pseudologic. Proceed at your own risk...
Teaching Assistant Jobs
These jobs pay more money than Research Assistantships AND are covered by the Union. They are a great way to get paid to learn about a new topic and to get some teaching experience. Applications are a pain you have to apply to each department separately, and they all have different due dates and application forms.
Hiring: Get started early as soon as January before the year you want to be hired. Some TA ships are handed out by the department in a single review process during the summer (like Community Development). Other departments (like American Studies) hire as early as March. However, if you really want first choice, you should start emailing professors of your favorite classes picked out from the UCD catalog in January or February, with a cover letter and a resume. If you show initiative and seem qualified, many professors will hire you before the big departmental roundup of applications. Make sure you get a contract, so you dont get forgotten or overlooked when someone better comes along these contracts are issued as much as a year in advance of the position you are hired for.
These jobs are union! That means when your professor wants to work you more hours than youre getting paid for (which is way more common than it should be), the union comes in, saves you, and gets you back pay. Administration will discourage you from contacting the union especially if your contract is being violated. Dont listen to them! The union is your friend, and once the wheels get rolling they really go to bat for TAs! But sometimes the TA union is slow and/or lost about replying to messages, so be persistant if you need their help.
Research Assistant Jobs
These jobs pay less, but might be a good match if you are on a professional track, or if you can find an RAship that matches your research interest. They also typically save you from having to switch bosses/jobs each quarter, as you often do with TAships. Many of these jobs are misnamed, and you do no research whatsoever, but instead coordinate programs, do outreach to the community, etc. No union here, so you better get yourself a fair boss.
The Complete Idiots Guide to Graduating
Candidacy
http://gradstudies.ucdavis.edu/forms/master.pdf Candidacy Filing Form for a Masters degree: Complete this document and turn it into Grad Studies (Mrak 2nd Floor). You must get the Department Chair to sign off on it; you should also have the Graduate Advisor make sure you have met all the degree requirements. If you are writing a thesis, you must have all members named, and the Thesis Chair signed off on this document.
Filing Fee
When you are done with courses but not quite done with your thesis/exam, you go on Filing Fee. Heres everything you need to know:
http://www.gradstudies.ucdavis.edu/forms/filingfee.pdf: The actual filing fee form. It needs to be signed by your Thesis Chair (Examtakers are not required to get this signature) and the Department Chair. Then you need to pay the filing fee fee at the Cashiers Office in Dutton Hall.
http://healthcenter.ucdavis.edu/pdf/ship06enroll.pdf: Continue getting medical insurance and services at the Health Center for one quarter. For 0607, its $633 per quarterouch!
http://email.ucdavis.edu/forms/files/taf.pdf: Continue to get email through your UC Davis email address for up to one year, with the option to renew. Turn in the form with a copy of your filing fee form to 182 Shields Library.
Thesis Requirements
http://gradstudies.ucdavis.edu/students/filing.html UC Davis thesis requirements: EVERY crazy requirement, from margin size to paper type.
Graduate Commencement
http://www.gradstudies.ucdavis.edu/commencement/index.html: Get the lowdown on what you need to walk in the Graduate Graduation. Redundant, I know. For more information, contact Wave Armijo at MailTo(wmarmijo AT ucdavis DOT edu) or (530) 7523762.
http://bookstore.ucdavis.edu/graduation/custom_regalia.cfm: Robes, caps, tassels, and UC Ph.D. hoods are available for use free of charge and may be ordered online through your department from the Central Storehouse. NonUC academic hoods may be rented online through your department office from the UC Davis Bookstore. PLEASE NOTE: Delivery of purchase orders takes 810 weeks.
Random Postings, Rantings, etc
http://invite.filmloop.com/x?1QVRiksGYQEs79kidkJJXdWFwIpCr4 Pics from Pumpkins and Potluck Halloween Party: Check out fun pics from our party on Oct. 30, 2006.
http://gradstudies.ucdavis.edu/students/handbook/index.html UC Davis Graduate Student Handbook: Topics include fees, degree completion, campus life, etc
http://communitydevelopment.ucdavis.edu/program/courses_study_options.html CRDs course and graduation requirements: A list of the required courses, as well as detailed information about the thesis and exam requirements.
http://invite.filmloop.com/x?xeAu/PLH/4/ZzeUKTsVI/H4gcU03/K8q: Photos from the Cold Canyon Hike on Apr. 1, 2007.
| {"hexsha": "0c25277ed3c114bccafe19358fec12a9d1d0aaa7", "size": 9978, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Community_Development_Graduate_Group.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Community_Development_Graduate_Group.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Community_Development_Graduate_Group.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 65.6447368421, "max_line_length": 788, "alphanum_fraction": 0.8031669673, "num_tokens": 2336} |
\chapter{Wireless Channel Emulator}
\glsresetall
\label{chapter:emulator}
\note{Go through the example wireless channel emulator that we developed.} | {"hexsha": "211b4679226798a7106e769c875a8482b912c534", "size": 149, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "emulator.tex", "max_stars_repo_name": "mithro/pp4fpgas", "max_stars_repo_head_hexsha": "ddede5bd337f4fa33915d7e4ca98f97a7b31413a", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 418, "max_stars_repo_stars_event_min_datetime": "2018-05-09T17:28:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T05:51:12.000Z", "max_issues_repo_path": "emulator.tex", "max_issues_repo_name": "jmuuu/pp4fpgas", "max_issues_repo_head_hexsha": "f604b68289b0a9998ace596c57ea7209f2d60fd5", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2018-05-13T16:26:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-06T06:06:57.000Z", "max_forks_repo_path": "emulator.tex", "max_forks_repo_name": "jmuuu/pp4fpgas", "max_forks_repo_head_hexsha": "f604b68289b0a9998ace596c57ea7209f2d60fd5", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 107, "max_forks_repo_forks_event_min_datetime": "2018-05-12T16:43:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-23T22:59:42.000Z", "avg_line_length": 29.8, "max_line_length": 74, "alphanum_fraction": 0.8187919463, "num_tokens": 38} |
# Copyright 2018-2021
# Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .volume import VolumeSrc, ImageProvider
from .util import create_gaussian_kernel, argmax_dim4
from .. import logger, QUIET
from ..commons import ParcellationIndex, MapType, compare_maps
from ..core.space import Point, PointSet, Space, BoundingBox
from ..core.region import Region
import numpy as np
from nibabel import Nifti1Image, funcs
from nilearn import image
from memoization import cached
from tqdm import tqdm
from abc import abstractmethod
from typing import Union
# Which types of available volumes should be preferred if multiple choices are available?
PREFERRED_VOLUMETYPES = ["nii", "neuroglancer/precomputed", "detailed maps"]
def create_map(parcellation, space: Space, maptype: MapType):
"""
Creates a new ParcellationMap object of the given type.
"""
classes = {
MapType.LABELLED: LabelledParcellationMap,
MapType.CONTINUOUS: ContinuousParcellationMap,
}
if maptype in classes:
obj = classes[maptype](parcellation, space)
elif maptype is None:
logger.warning(
"No maptype provided when requesting the parcellation map. Falling back to MapType.LABELLED"
)
obj = classes[MapType.LABELLED](parcellation, space)
else:
raise ValueError(f"Invalid maptype: '{maptype}'")
if len(obj) == 0:
raise ValueError(
f"No data found to construct a {maptype} map for {parcellation.name} in {space.name}."
)
return obj
class ParcellationMap(ImageProvider):
"""
Represents a brain map in a particular reference space, with
explicit knowledge about the region information per labelindex or channel.
There are two types:
1) Parcellation maps / labelled volumes (MapType.LABELLED)
A 3D or 4D volume with integer labels separating different,
non-overlapping regions. The number of regions corresponds to the
number of nonzero image labels in the volume.
2) 4D overlapping regional maps (often probability maps) (MapType.CONTINUOUS)
a 4D volume where each "time"-slice is a 3D volume representing
a map of a particular brain region. This format is used for
probability maps and similar continuous forms. The number of
regions correspond to the z dimension of the 4 object.
ParcellationMaps can be also constructred from neuroglancer (BigBrain) volumes if
a feasible downsampled resolution is provided.
"""
_regions_cached = None
_maploaders_cached = None
def __init__(self, parcellation, space: Space, maptype=MapType):
"""
Construct a ParcellationMap for the given parcellation and space.
Parameters
----------
parcellation : Parcellation
The parcellation object used to build the map
space : Space
The desired template space to build the map
maptype : MapType
The desired type of the map
"""
if not parcellation.supports_space(space):
raise ValueError(
'Parcellation "{}" does not provide a map for space "{}"'.format(
parcellation.name, space.name
)
)
self.maptype = maptype
self.parcellation = parcellation
self.space = space
@property
def maploaders(self):
if self._maploaders_cached is None:
self._define_maps_and_regions()
return self._maploaders_cached
@property
def regions(self):
"""
Dictionary of regions associated to the parcellion map, indexed by ParcellationIndex.
Lazy implementation - self._link_regions() will be called when the regions are accessed for the first time.
"""
if self._regions_cached is None:
self._define_maps_and_regions()
return self._regions_cached
@property
def names(self):
return self.parcellation.names
@abstractmethod
def _define_maps_and_regions(self):
"""
implemented by derived classes, to produce the lists _regions_cached and _maploaders_cached.
The first is a dictionary indexed by ParcellationIndex,
the latter a list of functions for loading the different maps.
"""
pass
def fetch_iter(self, resolution_mm=None, voi: BoundingBox = None):
"""
Returns an iterator to fetch all available maps sequentially.
Parameters
----------
resolution_mm : float or None (optional)
Physical resolution of the map, used for multi-resolution image volumes.
If None, the smallest possible resolution will be chosen.
If -1, the largest feasible resolution will be chosen.
"""
logger.debug(f"Iterator for fetching {len(self)} parcellation maps")
return (fnc(res=resolution_mm, voi=voi) for fnc in self.maploaders)
def fetch(
self, mapindex: int = 0, resolution_mm: float = None, voi: BoundingBox = None
):
"""
Fetches the actual image data
Parameters
----------
mapindex : int
The index of the available maps to be fetched.
resolution_mm : float or None (optional)
Physical resolution of the map, used for multi-resolution image volumes.
If None, the smallest possible resolution will be chosen.
If -1, the largest feasible resolution will be chosen.
"""
if mapindex < len(self):
if len(self) > 1:
logger.info(
f"Returning map {mapindex+1} of in total {len(self)} available maps."
)
return self.maploaders[mapindex](res=resolution_mm, voi=voi)
else:
raise ValueError(
f"'{len(self)}' maps available, but a mapindex of {mapindex} was requested."
)
def fetch_all(self):
"""Returns a 4D array containing all 3D maps.
All available maps are stacked along the 4th dimension.
Note that this can be quite memory-intensive for continuous maps.
If you just want to iterate over maps, prefer using
'for img in ParcellationMaps.fetch_iter():'
"""
N = len(self)
with QUIET:
im0 = self.fetch(mapindex=0)
out_shape = (N,) + im0.shape
logger.info(f"Create 4D array from {N} maps with size {im0.shape + (N,)}")
out_data = np.empty(out_shape, dtype=im0.dataobj.dtype)
for mapindex, img in tqdm(enumerate(self.fetch_iter()), total=N):
out_data[mapindex] = np.asanyarray(img.dataobj)
return funcs.squeeze_image(
Nifti1Image(
np.rollaxis(out_data, 0, out_data.ndim), im0.affine
)
)
def fetch_regionmap(
self,
regionspec: Union[str, int, Region],
resolution_mm=None,
voi: BoundingBox = None,
):
"""
Extract the mask for one particular region. For parcellation maps, this
is a binary mask volume. For overlapping maps, this is the
corresponding slice, which typically is a volume of float type.
Parameters
----------
regionspec : labelindex, partial region name, or Region
The desired region.
resolution_mm : float or None (optional)
Physical resolution of the map, used for multi-resolution image volumes.
If None, the smallest possible resolution will be chosen.
If -1, the largest feasible resolution will be chosen.
Return
------
Nifti1Image, if found, otherwise None
"""
indices = self.decode_region(regionspec)
mapimgs = []
for index in indices:
mapimg = self.fetch(
resolution_mm=resolution_mm, mapindex=index.map, voi=voi
)
if index.label is not None:
mapimg = Nifti1Image(
dataobj=(mapimg.get_fdata() == index.label).astype(np.uint8),
affine=mapimg.affine,
)
mapimgs.append(mapimg)
if len(mapimgs) == 1:
return mapimgs[0]
elif self.maptype == MapType.LABELLED:
m = mapimgs[0]
for m2 in mapimgs[1:]:
m.dataobj[m2.dataobj > 0] = 1
return m
else:
logger.info(
f"4D volume with {len(mapimgs)} continuous region maps extracted from region specification '{regionspec}'"
)
return image.concat_imgs(mapimgs)
def get_shape(self, resolution_mm=None):
return list(self.space.get_template().get_shape()) + [len(self)]
def is_float(self):
return self.maptype == MapType.CONTINUOUS
def _load_regional_map(
self, region: Region, resolution_mm, voi: BoundingBox = None, clip: bool = False
):
logger.debug(f"Loading regional map for {region.name} in {self.space.name}")
with QUIET:
rmap = region.get_regional_map(self.space, self.maptype).fetch(
resolution_mm=resolution_mm, voi=voi, clip=clip
)
return rmap
@abstractmethod
def assign_coordinates(self, xyz_phys, sigma_mm=1, sigma_truncation=3):
"""
Implemented by derived classes.
Assign regions to a physical coordinates with optional standard deviation.
Parameters
----------
xyz_phys : 3D point(s) in physical coordinates of the template space of the ParcellationMap
Can be one 3D coordinate tuple, list of 3D tuples, Nx3 or Nx4 array of coordinate tuples,
str of the form "3.1mm, -3.1mm, 80978mm", or list of such strings.
See arrays.create_homogeneous_array
sigma_mm : float (default: 1), applies only to continuous maps
standard deviation /expected localization accuracy of the point, in
mm units. For continuous maps, a 3D Gaussian distribution with that
bandwidth will be used for representing the location.
sigma_truncation : float (default: 3), applies only to continuous maps
If sigma_phys is nonzero, this factor is used to determine where to
truncate the Gaussian kernel in standard error units.
"""
pass
def __len__(self):
"""
Returns the number of maps available in this parcellation.
"""
return len(self.maploaders)
def __contains__(self, spec):
"""
Test if a 3D map identified by the given specification is included in this parcellation map.
For integer values, it is checked wether a corresponding slice along the fourth dimension could be extracted.
Alternatively, a region object can be provided, and it will be checked wether the region is mapped.
You might find the decode_region() function of Parcellation and Region objects useful for the latter.
"""
if isinstance(spec, int):
return spec in range(len(self.maploaders))
elif isinstance(spec, Region):
for _, region in self.regions.items():
if region == spec:
return True
return False
def decode_label(self, mapindex=None, labelindex=None):
"""
Decode the region associated to a particular index.
Parameters
----------
mapindex : Sequential index of the 3D map used, if more than one are included
labelindex : Label index of the region, if the map is a labelled volume
"""
pindex = ParcellationIndex(map=mapindex, label=labelindex)
region = self.regions.get(pindex)
if region is None:
raise ValueError(f"Could not decode parcellation index {pindex}")
else:
return region
def decode_region(self, regionspec: Union[str, Region]):
"""
Find the ParcellationIndex for a given region.
Parameters
----------
regionspec : str or Region
Partial name of region, or Region object
Return
------
list of MapIndex objects
"""
region = (
self.parcellation.decode_region(regionspec)
if isinstance(regionspec, str)
else regionspec
)
subregions = []
for idx, r in self.regions.items():
if r == region:
return [idx]
elif r.has_parent(region):
subregions.append((idx, r))
if len(subregions) == 0:
raise IndexError(
f"Could not decode region specified by {regionspec} in {self.parcellation.name}"
)
# if we found maps of child regions, we want the mapped leaves to be identical to the leaves of the requested region.
children_found = {c for _, r in subregions for c in r.leaves}
children_requested = set(region.leaves)
if children_found != children_requested:
raise IndexError(
f"Cannot decode {regionspec} for the map in {self.space.name}, as it seems only partially mapped there."
)
return [idx for idx, _ in subregions]
class LabelledParcellationMap(ParcellationMap):
"""
Represents a brain map in a reference space, with
explicit knowledge about the region information per labelindex or channel.
Contains a Nifti1Image object as the "image" member.
This form defines parcellation maps / labelled volumes (MapType.LABELLED),
A 3D or 4D volume with integer labels separating different,
non-overlapping regions. The number of regions corresponds to the
number of nonzero image labels in the volume.
"""
def __init__(self, parcellation, space: Space):
"""
Construct a ParcellationMap for the given parcellation and space.
Parameters
----------
parcellation : Parcellation
The parcellation object used to build the map
space : Space
The desired template space to build the map
"""
super().__init__(parcellation, space, MapType.LABELLED)
def _define_maps_and_regions(self):
self._maploaders_cached = []
self._regions_cached = {}
# determine the map loader functions for each available map
for volumetype in PREFERRED_VOLUMETYPES:
sources = []
for vsrc in self.parcellation.get_volumes(self.space.id):
if vsrc.__class__.volume_type == volumetype:
sources.append(vsrc)
if len(sources) > 0:
break
else:
# reached only if for loop was not interrupted by 'break'
raise RuntimeError(
f"No suitable volume source for {self.parcellation.name} in {self.space.name}"
)
for source in sources:
# Choose map loader function and populate label-to-region maps
if source.volume_type == "detailed maps":
self._maploaders_cached.append(
lambda res=None, voi=None: self._collect_maps(
resolution_mm=res, voi=voi
)
)
# collect all available region maps to maps label indices to regions
current_index = 1
for region in self.parcellation.regiontree:
with QUIET:
regionmap = region.get_regional_map(
self.space, MapType.LABELLED
)
if regionmap is not None:
self._regions_cached[
ParcellationIndex(map=0, label=current_index)
] = region
current_index += 1
elif source.volume_type == self.space.type:
self._maploaders_cached.append(
lambda res=None, s=source, voi=None: self._load_map(
s, resolution_mm=res, voi=voi
)
)
# load map at lowest resolution to map label indices to regions
mapindex = len(self._maploaders_cached) - 1
with QUIET:
m = self._maploaders_cached[mapindex](res=None)
unmatched = []
for labelindex in np.unique(m.get_fdata()):
if labelindex != 0:
pindex = ParcellationIndex(map=mapindex, label=labelindex)
try:
region = self.parcellation.decode_region(pindex)
if labelindex > 0:
self._regions_cached[pindex] = region
except ValueError:
unmatched.append(pindex)
if unmatched:
logger.warning(
f"{len(unmatched)} parcellation indices in labelled volume couldn't be matched to region definitions in {self.parcellation.name}"
)
@cached
def _load_map(self, volume: VolumeSrc, resolution_mm: float, voi: BoundingBox):
m = volume.fetch(resolution_mm=resolution_mm, voi=voi)
if len(m.dataobj.shape) == 4 and m.dataobj.shape[3] > 1:
logger.info(
f"{m.dataobj.shape[3]} continuous maps given - using argmax to generate a labelled volume. "
)
m = argmax_dim4(m)
if m.dataobj.dtype.kind == "f":
logger.warning(
f"Floating point image type encountered when building a labelled volume for {self.parcellation.name}, converting to integer."
)
m = Nifti1Image(dataobj=np.asarray(m.dataobj, dtype=int), affine=m.affine)
return m
@cached
def _collect_maps(self, resolution_mm, voi):
"""
Build a 3D volume from the list of available regional maps.
Label indices will just be sequentially assigned.
Return
------
Nifti1Image, or None if no maps are found.
"""
m = None
# generate empty mask covering the template space
tpl = self.space.get_template().fetch(resolution_mm, voi=voi)
m = None
# collect all available region maps
regions = []
for r in self.parcellation.regiontree:
with QUIET:
regionmap = r.get_regional_map(self.space, MapType.LABELLED)
if regionmap is not None:
regions.append(r)
if len(regions) == 0:
raise RuntimeError(
f"No regional maps could be collected for {self.parcellation.name} in space {self.space.name}"
)
msg = f"Collecting {len(regions)} regional maps for '{self.space.name}'"
current_index = 1
for region in tqdm(regions, total=len(regions), desc=msg, unit="maps"):
# load region mask
mask_ = self._load_regional_map(
region, resolution_mm=resolution_mm, voi=voi
)
if not mask_:
continue
if np.prod(mask_.shape) == 0:
continue
# build up the aggregated mask with labelled indices
if mask_.shape != tpl.shape:
mask = image.resample_to_img(mask_, tpl, interpolation="nearest")
else:
mask = mask_
if m is None:
m = Nifti1Image(
np.zeros_like(tpl.dataobj, dtype=mask.dataobj.dtype), tpl.affine
)
m.dataobj[mask.dataobj > 0] = current_index
self._regions_cached[
ParcellationIndex(map=0, label=current_index)
] = region
current_index += 1
return m
@cached
def assign_coordinates(
self, point: Union[Point, PointSet], sigma_mm=None, sigma_truncation=None
):
"""
Assign regions to a physical coordinates with optional standard deviation.
Parameters
----------
point : Point or PointSet
sigma_mm : Not needed for labelled parcellation maps
sigma_truncation : Not needed for labelled parcellation maps
"""
if point.space != self.space:
logger.info(
f"Coordinates will be converted from {point.space.name} "
f"to {self.space.name} space for assignment."
)
# Convert input to Nx4 list of homogenous coordinates
if isinstance(point, Point):
coords = [point.warp(self.space).homogeneous]
elif isinstance(point, PointSet):
pointset = point
coords = [p.homogeneous for p in pointset.warp(self.space)]
else:
raise ValueError("assign_coordinates expects a Point or PointSet object.")
assignments = []
N = len(self)
msg = f"Assigning {len(coords)} points to {N} maps"
assignments = [[] for _ in coords]
for mapindex, loadfnc in tqdm(
enumerate(self.maploaders), total=len(self), desc=msg, unit=" maps"
):
lmap = loadfnc()
p2v = np.linalg.inv(lmap.affine)
A = lmap.get_fdata()
for i, coord in enumerate(coords):
x, y, z = (np.dot(p2v, coord) + 0.5).astype("int")[:3]
label = A[x, y, z]
if label > 0:
region = self.decode_label(mapindex=mapindex, labelindex=label)
assignments[i].append((region, lmap, None))
return assignments
def assign(self, img: Nifti1Image, msg=None, quiet=False):
"""
Assign the region of interest represented by a given volumetric image to brain regions in this map.
TODO unify this with the corresponding methond in ContinuousParcellationMap
Parameters:
-----------
img : Nifti1Image
The input region of interest, typically a binary mask or statistical map.
msg : str, default:None
Message to display with the progress bar
quiet: Boolen, default:False
If true, no progess indicator will be displayed
"""
if msg is None and not quiet:
msg = f"Assigning structure to {len(self.regions)} regions"
# How to visualize progress from the iterator?
def plain_progress(f):
return f
def visual_progress(f):
return tqdm(f, total=len(self.regions), desc=msg, unit="regions")
progress = plain_progress if quiet else visual_progress
# setup assignment loop
values = {}
pmaps = {}
for index, region in progress(self.regions.items()):
this = self.maploaders[index.map]()
if not this:
logger.warning(f"Could not load regional map for {region.name}")
continue
if (index.label is not None) and (index.label > 0):
with QUIET:
this = region.build_mask(self.space, maptype=self.maptype)
scores = compare_maps(img, this)
if scores["overlap"] > 0:
assert region not in pmaps
pmaps[region] = this
values[region] = scores
assignments = [
(region, pmaps[region], scores)
for region, scores in sorted(
values.items(),
key=lambda item: abs(item[1]["correlation"]),
reverse=True,
)
]
return assignments
class ContinuousParcellationMap(ParcellationMap):
"""
Represents a brain map in a particular reference space, with
explicit knowledge about the region information per labelindex or channel.
This form represents overlapping regional maps (often probability maps) (MapType.CONTINUOUS)
where each "time"-slice is a 3D volume representing
a map of a particular brain region. This format is used for
probability maps and similar continuous forms. The number of
regions correspond to the z dimension of the 4 object.
"""
def __init__(self, parcellation, space: Space):
"""
Construct a ParcellationMap for the given parcellation and space.
Parameters
----------
parcellation : Parcellation
The parcellation object used to build the map
space : Space
The desired template space to build the map
"""
super().__init__(parcellation, space, MapType.CONTINUOUS)
def _define_maps_and_regions(self):
self._maploaders_cached = []
self._regions_cached = {}
# Multiple volume sources could be given - find the preferred one
volume_sources = sorted(
self.parcellation.get_volumes(self.space.id),
key=lambda vsrc: PREFERRED_VOLUMETYPES.index(vsrc.volume_type),
)
for source in volume_sources:
if (
isinstance(source, ImageProvider)
and source.is_float()
and source.is_4D()
and source.get_shape()[3] > 1
):
# The source is 4D float, that's what we are looking for.
# We assume the fourth dimension contains the regional continuous maps.
nmaps = source.get_shape()[3]
logger.info(
f"{nmaps} continuous maps will be extracted from 4D volume for {self.parcellation}."
)
for i in range(nmaps):
self._maploaders_cached.append(
lambda res=None, voi=None, mi=i: source.fetch(
resolution_mm=res, voi=voi, mapindex=mi
)
)
region = self.parcellation.decode_region(i + 1)
pindex = ParcellationIndex(map=i, label=None)
self._regions_cached[pindex] = region
# we are finished, no need to look for regional map.
return
# otherwise we look for continuous maps associated to individual regions
i = 0
for region in self.parcellation.regiontree:
with QUIET:
regionmap = region.get_regional_map(self.space, MapType.CONTINUOUS)
if regionmap is None:
continue
if region in self.regions.values():
logger.debug(f"Region already seen in tree: {region.key}")
continue
self._maploaders_cached.append(
lambda r=region, res=None, voi=None: self._load_regional_map(
r, resolution_mm=res, voi=voi
)
)
pindex = ParcellationIndex(map=i, label=None)
self._regions_cached[pindex] = region
i += 1
logger.info(
f"{i} regional continuous maps found for {self.parcellation} in {self.space.name}."
)
@cached
def assign_coordinates(
self, point: Union[Point, PointSet], sigma_mm=1, sigma_truncation=3
):
"""
Assign regions to a physical coordinates with optional standard deviation.
Parameters
----------
point : Point or PointSet
sigma_mm : float (default: 1)
standard deviation /expected localization accuracy of the point, in
mm units. A 3D Gaussian distribution with that
bandwidth will be used for representing the location.
sigma_truncation : float (default: 3)
If sigma_phys is nonzero, this factor is used to determine where to
truncate the Gaussian kernel in standard error units.
"""
assert sigma_mm >= 1
if point.space != self.space:
logger.info(
f"Coordinates will be converted from {point.space.name} "
f"to {self.space.name} space for assignment."
)
# Convert input to Nx4 list of homogenous coordinates
if isinstance(point, Point):
coords = [point.warp(self.space).homogeneous]
elif isinstance(point, PointSet):
pointset = point
coords = [p.homogeneous for p in pointset.warp(self.space)]
else:
raise ValueError("assign_coordinates expects a Point or PointSet object.")
# convert sigma to voxel coordinates
tpl = self.space.get_template().fetch()
phys2vox = np.linalg.inv(tpl.affine)
scaling = np.array([np.linalg.norm(tpl.affine[:, i]) for i in range(3)]).mean()
sigma_vox = sigma_mm / scaling
if sigma_vox < 3:
N = len(self)
msg = f"Assigning {len(coords)} coordinates to {N} maps"
assignments = [[] for _ in coords]
for mapindex, loadfnc in tqdm(
enumerate(self.maploaders), total=len(self), desc=msg, unit=" maps"
):
pmap = loadfnc()
p2v = np.linalg.inv(tpl.affine)
A = pmap.get_fdata()
region = self.decode_label(mapindex=mapindex)
for i, coord in enumerate(coords):
x, y, z = (np.dot(p2v, coord) + 0.5).astype("int")[:3]
value = A[x, y, z]
if value > 0:
assignments[i].append((region, pmap, value))
else:
logger.info(
(
f"Assigning {len(coords)} uncertain coordinates (stderr={sigma_mm}) to {len(self)} maps."
)
)
kernel = create_gaussian_kernel(sigma_vox, sigma_truncation)
r = int(kernel.shape[0] / 2) # effective radius
assignments = []
for coord in coords:
xyz_vox = (np.dot(phys2vox, coord) + 0.5).astype("int")
shift = np.identity(4)
shift[:3, -1] = xyz_vox[:3] - r
W = Nifti1Image(dataobj=kernel, affine=np.dot(tpl.affine, shift))
assignments.append(
self.assign(W, msg=", ".join([f"{v:.1f}" for v in coord[:3]]))
)
if len(assignments) == 1:
return assignments[0]
else:
return assignments
def assign(self, img: Nifti1Image, msg=None, quiet=False):
"""
Assign the region of interest represented by a given volumetric image to continuous brain regions in this map.
Parameters:
-----------
img : Nifti1Image
The input region of interest, typically a binary mask or statistical map.
msg : str, default:None
Message to display with the progress bar
quiet: Boolen, default:False
If true, no progess indicator will be displayed
"""
if msg is None and not quiet:
msg = f"Assigning structure to {len(self)} maps"
# How to visualize progress from the iterator?
def plain_progress(f):
return f
def visual_progress(f):
return tqdm(f, total=len(self), desc=msg, unit="maps")
progress = plain_progress if quiet else visual_progress
# setup assignment loop
values = {}
pmaps = {}
for mapindex, loadfnc in progress(enumerate(self.maploaders)):
# load the regional map
this = loadfnc()
if not this:
logger.warning(
f"Could not load regional map for {self.regions[mapindex].name}"
)
continue
scores = compare_maps(img, this)
if scores["overlap"] > 0:
pmaps[mapindex] = this
values[mapindex] = scores
assignments = [
(self.decode_label(mapindex=i), pmaps[i], scores)
for i, scores in sorted(
values.items(),
key=lambda item: abs(item[1]["correlation"]),
reverse=True,
)
]
return assignments
| {"hexsha": "60e574f619f6a448239e5e3754bed6698feff4fd", "size": 32854, "ext": "py", "lang": "Python", "max_stars_repo_path": "siibra/volumes/parcellationmap.py", "max_stars_repo_name": "sdiazpier/siibra-python", "max_stars_repo_head_hexsha": "152330b78ecaddfae6da57a6cbfda8117c889ab1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "siibra/volumes/parcellationmap.py", "max_issues_repo_name": "sdiazpier/siibra-python", "max_issues_repo_head_hexsha": "152330b78ecaddfae6da57a6cbfda8117c889ab1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "siibra/volumes/parcellationmap.py", "max_forks_repo_name": "sdiazpier/siibra-python", "max_forks_repo_head_hexsha": "152330b78ecaddfae6da57a6cbfda8117c889ab1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.4257309942, "max_line_length": 153, "alphanum_fraction": 0.5860777987, "include": true, "reason": "import numpy", "num_tokens": 7004} |
import os
import numpy as np
from skmultiflow.data.random_tree_generator import RandomTreeGenerator
def test_random_tree_generator(test_path):
stream = RandomTreeGenerator(tree_random_state=23, sample_random_state=12, n_classes=2, n_cat_features=2,
n_num_features=5, n_categories_per_cat_feature=5, max_tree_depth=6, min_leaf_depth=3,
fraction_leaves_per_level=0.15)
stream.prepare_for_use()
assert stream.n_remaining_samples() == -1
expected_names = ['att_num_0', 'att_num_1', 'att_num_2', 'att_num_3', 'att_num_4',
'att_nom_0_val0', 'att_nom_0_val1', 'att_nom_0_val2', 'att_nom_0_val3', 'att_nom_0_val4',
'att_nom_1_val0', 'att_nom_1_val1', 'att_nom_1_val2', 'att_nom_1_val3', 'att_nom_1_val4']
assert stream.feature_names == expected_names
expected_target_values = [0, 1]
assert stream.target_values == expected_target_values
assert stream.target_names == ['class']
assert stream.n_features == 15
assert stream.n_cat_features == 2
assert stream.n_num_features == 5
assert stream.n_targets == 1
assert stream.get_data_info() == 'Random Tree Generator - 1 target(s), 2 classes, 15 features'
assert stream.has_more_samples() is True
assert stream.is_restartable() is True
# Load test data corresponding to first 10 instances
test_file = os.path.join(test_path, 'random_tree_stream.npz')
data = np.load(test_file)
X_expected = data['X']
y_expected = data['y']
X, y = stream.next_sample()
assert np.alltrue(X[0] == X_expected[0])
assert np.alltrue(y[0] == y_expected[0])
X, y = stream.last_sample()
assert np.alltrue(X[0] == X_expected[0])
assert np.alltrue(y[0] == y_expected[0])
stream.restart()
X, y = stream.next_sample(10)
assert np.alltrue(X == X_expected)
assert np.alltrue(y == y_expected)
assert stream.n_targets == np.array(y).ndim
assert stream.n_features == X.shape[1]
assert 'stream' == stream._estimator_type
expected_info = "RandomTreeGenerator(fraction_leaves_per_level=0.15, max_tree_depth=6,\n" \
" min_leaf_depth=3, n_cat_features=2,\n" \
" n_categories_per_cat_feature=5, n_classes=2,\n" \
" n_num_features=5, sample_random_state=12,\n" \
" tree_random_state=23)"
assert stream.get_info() == expected_info
| {"hexsha": "6aad5ed850d879f24da2bbd98916d6d300c9a890", "size": 2553, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/data/test_random_tree_generator.py", "max_stars_repo_name": "tlac980/scikit-multiflow", "max_stars_repo_head_hexsha": "e05a512f3170555767cf229a4f7b5fed2423c86c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/data/test_random_tree_generator.py", "max_issues_repo_name": "tlac980/scikit-multiflow", "max_issues_repo_head_hexsha": "e05a512f3170555767cf229a4f7b5fed2423c86c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/data/test_random_tree_generator.py", "max_forks_repo_name": "tlac980/scikit-multiflow", "max_forks_repo_head_hexsha": "e05a512f3170555767cf229a4f7b5fed2423c86c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-09-26T02:49:25.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-26T02:49:25.000Z", "avg_line_length": 37.0, "max_line_length": 118, "alphanum_fraction": 0.6474735605, "include": true, "reason": "import numpy", "num_tokens": 657} |
"""
This module has functions required to perform CUR decomposition
CUR decomposition is a sparse matrix approximation of SVD decomposition. CUR
tries to maintain as much data as possible using sparse matrices as opposed to SVD
"""
import pandas as pd
import numpy as np
import random
from svd import get_svd, libsvd
def get_rand_selection(M, r):
"""Return random selection of rows and cols based of probabilities of frobenius norm
TODO: edit this accordingly
Parameters
----------
M: pandas.DataFrame
input matrix
r: int
no of rows and cols to select(features)
Returns
-------
tuple
tuple containing row and col lists with selected rows and cols
"""
# frobenius norm of total matrix
f = (M**2).sum().sum()
# find probability of each row being selected using the frobenius norm
# row_prob: list of probabilites for each row -> row_prob[i]: prob of ith row
row_prob = { i:(M.loc[i,:]**2).sum()/f for i in M.index }
# col_prob: list of probabilites for each col -> col_prob[i]: prob of ith col
col_prob = { i:(M[i]**2).sum()/f for i in M.columns }
# return [np.random.randint(0, M.shape[0]) for i in range(r)] # just randomly return rows irrespective of prob_dist
return (random.choices([i for i in M.index], row_prob, k=r),
random.choices([i for i in M.columns], col_prob, k=r),
row_prob,
col_prob )
def moore_penrose_psuedoinverse(E):
"""Return the Moore Penrose Psuedoinverse of the given Matrix
Parameters
----------
E: np.ndarray
The sigma matrix returned after performing SVD on matrix W
Returns
-------
np.ndarray
returns Moore Penrose Psuedoinverse of the input matrix
"""
E_plus = E.copy()
for i in range(min(E_plus.shape[0], E_plus.shape[1])):
if E_plus[i][i] > 1e-6 : # actually should be != 0
E_plus[i][i] = (1/E_plus[i][i])
else: E_plus[i][i] = 0
return E_plus.transpose()
def get_cur(M, r=5, energy=1):
"""This function performs CUR decomposition on the input matrix
Parameters
----------
M: pandas.DataFrame
input matrix
energy: float, optional
The threshold to perform CUR decomposition
Returns
-------
(C, U, R): tuple
tuple containing CUR decompostion matrices
"""
if r>min(M.shape):
raise Exception(f"R value exceeds min of matrix shape: {M.shape}")
rows, cols, row_prob, col_prob = get_rand_selection(M, r)
# DEBUG: to set custom rows and cols if required(Debugging)
# rows = [5,5,3,3]
# cols = [0,3,3]
# check for repeated rows and cols
row_count = dict()
for i in rows:
if i not in row_count: row_count[i] = 1
else: row_count[i] += 1
col_count = dict()
for i in cols:
if i not in col_count: col_count[i] = 1
else: col_count[i] += 1
rows = list(row_count.keys()) # get unique rows
cols = list(col_count.keys()) # get unique cols
# get selected rows/cols
# divide with their prob of selection to normalize
# if multiple same k no of rows are selected, then multiply the row by sqrt(k)
C = M.filter(cols, axis=1).copy().astype(np.float).fillna(0)
for j in C.columns:
C.loc[:,j] = ( C.loc[:,j]/( (r*col_prob[j])**0.5 ) ) * (col_count[j]**0.5)
energy -= 0.1
R = M.filter(rows, axis=0).copy().astype(np.float).fillna(0)
for i in R.index:
R.loc[i] = ( R.loc[i]/((r*row_prob[i])**0.5) ) * (row_count[i]**0.5)
C = np.nan_to_num(C.values) # convert to numpy array
R = np.nan_to_num(R.values) # convert to numpy array
W = M.filter(rows, axis=0).filter(cols, axis=1)
# our custom svd module
# X, E, Y_t = get_svd(W, energy=1.0)
###### Using numpy for svd #####
# comment this whole section to remove numpy svd
X, E, Y_t = libsvd(W, energy)
# X, s, Y_t = np.linalg.svd(W)
# E = np.zeros((X.shape[1], Y_t.shape[0])).astype(np.float)
# E[:s.shape[0], :s.shape[0]] = np.diag(s)
###### end of numpy for svd ####
E_plus = moore_penrose_psuedoinverse(E)
Y = Y_t.transpose()
U = Y.dot(E_plus**2).dot(X.transpose())
# U = [email protected](E_plus)@X.transpose()
# print("Original: \n", M, end='\n\n')
# print("Cols:", cols, end='\n')
# print("Rows:", rows, end='\n\n')
# print("Row Prob:\n", row_prob)
# print("Col Prob:\n", col_prob)
# print("C:\n", C, end='\n\n')
# print("U:\n", U, end='\n\n')
# print("R:\n", R, end='\n\n')
# print("CUR:\n", C@U@R, end='\n\n')
# print("Error:\n", C@U@R - M.values, end='\n\n')
return C, U, R | {"hexsha": "639f6891b6ad80cc7d4054ab7e4a639219b79a4d", "size": 4859, "ext": "py", "lang": "Python", "max_stars_repo_path": "cur.py", "max_stars_repo_name": "kasuba-badri-vishal/Recommender_Systems", "max_stars_repo_head_hexsha": "19409e3c06eef84b7b35d8b71656b2033a5df6a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cur.py", "max_issues_repo_name": "kasuba-badri-vishal/Recommender_Systems", "max_issues_repo_head_hexsha": "19409e3c06eef84b7b35d8b71656b2033a5df6a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cur.py", "max_forks_repo_name": "kasuba-badri-vishal/Recommender_Systems", "max_forks_repo_head_hexsha": "19409e3c06eef84b7b35d8b71656b2033a5df6a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.610738255, "max_line_length": 120, "alphanum_fraction": 0.5797489195, "include": true, "reason": "import numpy", "num_tokens": 1369} |
# coding=utf-8
# Author: Rodolfo J. O. Soares <[email protected]>
from sklearn.metrics.pairwise import euclidean_distances
import numpy as np
def min_rule(perturbations, cluster2class):
"""Compute the minimum perturbation for each sample based on classes's clusters
Parameters
----------
perturbations : array of shape (n_samples, n_clusters)
The cluster estimated perturbation for each sample.
cluster2class : dict
The mapped cluster to class.
Returns
-------
perturbations: array of shape (n_samples, n_classes)
The minimum perturbation for each sample.
"""
classes = np.array(list(cluster2class.values()))
perturbations_min = np.full((perturbations.shape[0], len(set(classes))), np.inf)
for i, c in enumerate(set(classes)):
perturbations_min[:, i] = np.min(perturbations[:, (classes == c)], axis=1)
return perturbations_min
def avg_rule(perturbations, cluster2class):
"""Compute the average perturbation for each sample based on classes's clusters
Parameters
----------
perturbations : array of shape (n_samples, n_clusters)
The cluster estimated perturbation for each sample.
cluster2class : dict
The mapped cluster to class.
Returns
-------
perturbations: array of shape (n_samples, n_classes)
The minimum perturbation for each sample.
"""
classes = np.array(list(cluster2class.values()))
perturbations_mean = np.full((perturbations.shape[0], len(set(classes))), np.inf)
for i, c in enumerate(set(classes)):
perturbations_mean[:, i] = np.mean(perturbations[:, (classes == c)], axis=1)
return perturbations_mean
def median_rule(perturbations, cluster2class):
"""Compute the median perturbation for each sample based on classes's clusters
Parameters
----------
perturbations : array of shape (n_samples, n_clusters)
The cluster estimated perturbation for each sample.
cluster2class : dict
The mapped cluster to class.
Returns
-------
perturbations: array of shape (n_samples, n_classes)
The minimum perturbation for each sample.
"""
classes = np.array(list(cluster2class.values()))
perturbations_median = np.full((perturbations.shape[0], len(set(classes))), np.inf)
for i, c in enumerate(set(classes)):
perturbations_median[:, i] = np.median(perturbations[:, (classes == c)], axis=1)
return perturbations_median
def nearest_cluster_rule(X, perturbations, cluster2class, centroids):
"""Compute the perturbation for each sample based on nearest centroids
Parameters
----------
X : array of shape (n_samples, n_features)
The input data.
perturbations : array of shape (n_samples, n_clusters)
The cluster estimated perturbation for each sample.
cluster2class : dict
The mapped cluster to class.
centroids : array of shape (n_clusters, n_features)
The centroid for each cluster.
Returns
-------
perturbations: array of shape (n_samples, n_classes)
The minimum perturbation for each sample.
"""
classes = np.array(list(cluster2class.values()))
perturbations_nearest_cluster = np.full((perturbations.shape[0], len(set(classes))), np.inf)
for i, c in enumerate(set(classes)):
class_centers_ = centroids[(classes == c), :]
dists = euclidean_distances(X, class_centers_)
nearest_clusters_idx = dists.argmin(axis=1)
perturbation_clusters = perturbations[:, (classes == c)]
for j, p in enumerate(nearest_clusters_idx):
perturbations_nearest_cluster[j, i] = perturbation_clusters[j, p]
return perturbations_nearest_cluster
| {"hexsha": "0d57a84c92543d1a0cd42383f478397622fc098d", "size": 3924, "ext": "py", "lang": "Python", "max_stars_repo_path": "perturbation_classifiers/subconcept/aggregation.py", "max_stars_repo_name": "rjos/perturbation-classifiers", "max_stars_repo_head_hexsha": "5637b49c5c297e20b4ee6bcee25173d9d11d642f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "perturbation_classifiers/subconcept/aggregation.py", "max_issues_repo_name": "rjos/perturbation-classifiers", "max_issues_repo_head_hexsha": "5637b49c5c297e20b4ee6bcee25173d9d11d642f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "perturbation_classifiers/subconcept/aggregation.py", "max_forks_repo_name": "rjos/perturbation-classifiers", "max_forks_repo_head_hexsha": "5637b49c5c297e20b4ee6bcee25173d9d11d642f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4297520661, "max_line_length": 96, "alphanum_fraction": 0.6511213048, "include": true, "reason": "import numpy", "num_tokens": 865} |
using SimpleProbabilitySets
using Base.Test, Distributions
@testset "SimpleProbabilitySets" begin
d = Categorical([0.1, 0.2, 0.7])
d2 = Categorical([0.4, 0.4, 0.2])
pb = PBox(d, d2)
@test cdfs(pb)[2] == [0.4, 0.8, 1.0]
@test pints(pb)[1] ≈ [0.1, 0.0, 0.2] atol = 1e-6
pl = [0.1, 0.2, 0.5]
pu = [0.4, 0.4, 0.7]
uncsmall = 0.05
unclarge = 0.6
pi = PInterval(pl, pu)
pi2 = PInterval(pl, uncsmall)
pi3 = PInterval(pl, unclarge)
@test pi.plower == [0.1, 0.2, 0.5]
@test pi2.plower ≈ [0.05, 0.15, 0.45] atol = 1e-6
@test pi3.pupper ≈ [0.7, 0.8, 1.0] atol = 1e-6
p = psample(pi)
@test sum(p) ≈ 1.0 atol = 1e-9
@test all(pi.plower .< p .< pi.pupper)
end
| {"hexsha": "1ecc99eb6505f3fc8a982ded39f2244383eb4a91", "size": 721, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "ajkeith/SimpleProbabilitySets.jl", "max_stars_repo_head_hexsha": "479d32c613ccdf1774a8c29212699f33b51ded4b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "ajkeith/SimpleProbabilitySets.jl", "max_issues_repo_head_hexsha": "479d32c613ccdf1774a8c29212699f33b51ded4b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "ajkeith/SimpleProbabilitySets.jl", "max_forks_repo_head_hexsha": "479d32c613ccdf1774a8c29212699f33b51ded4b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7307692308, "max_line_length": 53, "alphanum_fraction": 0.5492371706, "num_tokens": 349} |
[STATEMENT]
lemma mk_poincare_line_cmat_scale:
"cor k *\<^sub>s\<^sub>m mk_poincare_line_cmat A B = mk_poincare_line_cmat (k * A) (k * B)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cor k *\<^sub>s\<^sub>m mk_poincare_line_cmat A B = mk_poincare_line_cmat (k * A) (cor k * B)
[PROOF STEP]
by simp | {"llama_tokens": 146, "file": "Poincare_Disc_Poincare_Lines", "length": 1} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from collections import defaultdict, deque
import json
import os
import random
import sys
import gym
import numpy as np
import tensorflow as tf
from gym_puyopuyo.agent import TsuTreeSearchAgent
from gym_puyopuyo.env import register
from gym_puyopuyo.util import print_up
from util import bias_variable, conv2d, summarize_scalar, variable_summaries, vh_log, weight_variable, parse_record, read_record, GAMMA
FLAGS = None
HYPERPARAMS = {
"batch_size": 32,
"augmentation": 3,
"kernel_size": 5,
"num_features": 2,
"fc_1_size": 6,
"fc_2_size": 3,
"teacher_depth": 2,
}
class Agent(object):
def __init__(self, session, envs):
self.session = session
self.envs = envs
self.observations = [env.reset() for env in self.envs] * (1 + HYPERPARAMS["augmentation"])
self.states = [env.unwrapped.get_root() for env in self.envs] * (1 + HYPERPARAMS["augmentation"])
self.env = envs[0]
self.make_graph()
self.make_summaries()
if FLAGS:
self.writer = tf.summary.FileWriter(FLAGS.log_dir)
self.writer.add_graph(tf.get_default_graph())
@property
def BATCH_SIZE(self):
return HYPERPARAMS["batch_size"] * (1 + HYPERPARAMS["augmentation"])
@property
def KERNEL_SIZE(self):
return HYPERPARAMS["kernel_size"]
@property
def NUM_FEATURES(self):
return HYPERPARAMS["num_features"]
@property
def FC_1_SIZE(self):
return HYPERPARAMS["fc_1_size"]
@property
def FC_2_SIZE(self):
return HYPERPARAMS["fc_2_size"]
def make_graph(self):
self.make_input_graph()
self.make_convolution_graph()
self.make_fc_1_graph()
self.make_fc_2_graph()
self.make_output_graph()
self.make_loss_graph()
self.make_train_graph()
def make_summaries(self):
for variable, name in zip(self.variables, self.variable_names):
variable_summaries(variable, name)
tf.summary.histogram("policy_head", self.policy_head)
tf.summary.histogram("Q_head", self.Q_head)
tf.summary.scalar('loss_mse', tf.reduce_mean(self.loss_mse))
tf.summary.scalar('loss_xent', tf.reduce_mean(self.loss_xent))
tf.summary.scalar('loss', tf.reduce_mean(self.loss))
def make_input_graph(self):
deal_space, box_space = self.env.observation_space.spaces
with tf.name_scope("input"):
self.deal_input = tf.placeholder(tf.float32, [self.BATCH_SIZE] + list(deal_space.shape), name="deal")
self.box_input = tf.placeholder(tf.float32, [self.BATCH_SIZE] + list(box_space.shape), name="box")
self.n_deal = np.prod(deal_space.shape)
self.n_box = np.prod(box_space.shape)
self.n_inputs = self.n_deal + self.n_box
self.box_shape = box_space.shape
def make_convolution_graph(self):
with tf.name_scope("convolution"):
self.W_conv = weight_variable([self.KERNEL_SIZE, self.KERNEL_SIZE, self.box_shape[-1], self.NUM_FEATURES], name="W")
self.b_conv = bias_variable([self.NUM_FEATURES], name="b")
z = conv2d(self.box_input, self.W_conv) + self.b_conv
self.box_activation = tf.sigmoid(z)
self.n_conv = self.box_shape[0] * self.box_shape[1] * self.NUM_FEATURES
def make_fc_1_graph(self):
n_flat = 0
with tf.name_scope("flatten"):
flat_input = tf.reshape(self.box_activation, [-1, self.n_conv])
n_flat += self.n_conv
flat_input = tf.concat([flat_input, tf.reshape(self.deal_input, [-1, self.n_deal])], 1)
n_flat += self.n_deal
with tf.name_scope("fully_connected_1"):
self.W_fc_1 = weight_variable([n_flat, self.FC_1_SIZE], name="W")
self.b_fc_1 = bias_variable([self.FC_1_SIZE], name="b")
z = tf.matmul(flat_input, self.W_fc_1) + self.b_fc_1
self.fc_1_activation = tf.sigmoid(z)
def make_fc_2_graph(self):
with tf.name_scope("fully_connected_2p"):
self.W_fc_2_policy = weight_variable([self.FC_1_SIZE, self.FC_2_SIZE], name="W")
self.b_fc_2_policy = bias_variable([self.FC_2_SIZE], name="b")
z = tf.matmul(self.fc_1_activation, self.W_fc_2_policy) + self.b_fc_2_policy
self.fc_2_activation_policy = tf.sigmoid(z)
with tf.name_scope("fully_connected_2Q"):
self.W_fc_2_Q = weight_variable([self.FC_1_SIZE, self.FC_2_SIZE], name="W")
self.b_fc_2_Q = bias_variable([self.FC_2_SIZE], name="b")
z = tf.matmul(self.fc_1_activation, self.W_fc_2_Q) + self.b_fc_2_Q
self.fc_2_activation_Q = tf.sigmoid(z)
def make_output_graph(self):
self.n_actions = self.env.action_space.n
with tf.name_scope("policy"):
self.W_policy = weight_variable([self.FC_2_SIZE, self.n_actions], name="W")
self.b_policy = bias_variable([self.n_actions], name="b")
self.policy_head = tf.matmul(self.fc_2_activation_policy, self.W_policy) + self.b_policy
self.policy_actions = tf.nn.softmax(logits=5*self.policy_head, dim=1, name="policy_actions")
with tf.name_scope("Q"):
self.W_Q = weight_variable([self.FC_2_SIZE, self.n_actions], name="W")
self.b_Q = bias_variable([self.n_actions], name="b")
self.Q_head = tf.matmul(self.fc_2_activation_Q, self.W_Q) + self.b_Q
def make_loss_graph(self):
with tf.name_scope("loss"):
self.policy_target = tf.placeholder(tf.float32, [self.BATCH_SIZE, self.n_actions], name="policy_target")
self.Q_target = tf.placeholder(tf.float32, [self.BATCH_SIZE, self.n_actions], name="Q_target")
with tf.name_scope("error"):
self.loss_xent = tf.nn.softmax_cross_entropy_with_logits(labels=self.policy_target, logits=self.policy_head)
self.loss_mse = tf.reduce_mean(tf.squared_difference(self.Q_head, self.Q_target))
with tf.name_scope("regularization"):
regularizer = tf.contrib.layers.l2_regularizer(scale=0.0001)
reg_variables = tf.trainable_variables()
self.reg_term = tf.contrib.layers.apply_regularization(regularizer, reg_variables)
self.loss = self.loss_xent + self.loss_mse + self.reg_term
def make_train_graph(self):
learning_rate = FLAGS.learning_rate if FLAGS else 0
with tf.name_scope("train"):
self.optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9, use_nesterov=True)
self.train_step = self.optimizer.minimize(self.loss)
def get_feed_dict(self, observations=None):
observations = observations or self.observations
feed_dict = {self.deal_input: [], self.box_input: []}
for observation in observations:
deal, box = observation
feed_dict[self.deal_input].append(deal)
feed_dict[self.box_input].append(box)
return feed_dict
def get_policy_targets(self, states=None):
states = states or self.states
agent = TsuTreeSearchAgent(returns_distribution=True)
agent.depth = HYPERPARAMS["teacher_depth"]
return [agent.get_action(state) for state in states]
def get_Q_targets(self, Q_base, actions, observations, rewards):
feed_dict = self.get_feed_dict(observations)
new_Qs = self.session.run(self.Q_head, feed_dict=feed_dict)
targets = []
for target, responsible_action, reward, Q in zip(Q_base, actions, rewards, new_Qs):
target[responsible_action] = reward + GAMMA * np.max(Q)
targets.append(target)
return targets
def step(self):
feed_dict = self.get_feed_dict()
action_dists, Q_base = self.session.run((self.policy_actions, self.Q_head), feed_dict=feed_dict)
# print(action_dists[0])
actions = []
observations = []
rewards = []
states = []
for env, dist in zip(self.envs, action_dists):
action = np.random.choice(self.n_actions, p=dist)
observation, reward, done, info = env.step(action)
reward = np.cbrt(reward)
if done:
observation = env.reset()
for i in range(1 + HYPERPARAMS["augmentation"]):
if i > 0:
observation = self.env.permute_observation(observation)
actions.append(action)
observations.append(observation)
states.append(info["state"])
rewards.append(reward)
feed_dict[self.policy_target] = self.get_policy_targets()
feed_dict[self.Q_target] = self.get_Q_targets(Q_base, actions, observations, rewards)
self.session.run(self.train_step, feed_dict=feed_dict)
self.observations = observations
self.states = states
return rewards, feed_dict
def get_policy_dist(self, states):
experiences = list((state, [0] * self.n_actions, 0) for state in states)
feed_dict = self.get_feed_dict(experiences)
return self.session.run(self.policy_actions, feed_dict=feed_dict)
def render_in_place(self):
self.env.render()
print_up(8)
def render_ansi(self):
sio = self.env.render("ansi")
print_up(8, outfile=sio)
return sio
@property
def variable_names(self):
return [
"W_conv", "b_conv",
"W_fc_1", "b_fc_1",
"W_fc_2_policy", "b_fc_2_policy",
"W_fc_2_Q", "b_fc_2_Q",
"W_policy", "b_policy",
"W_Q", "b_Q",
]
@property
def variables(self):
return [getattr(self, name) for name in self.variable_names]
def dump(self):
outputs_dir = os.getenv("VH_OUTPUTS_DIR", "/tmp/tensorflow/gym_puyopuyo/outputs")
if not os.path.isdir(outputs_dir):
os.makedirs(outputs_dir)
arrays = self.session.run(self.variables)
for arr, name in zip(arrays, self.variable_names):
arr = arr.flatten()
filename = os.path.join(outputs_dir, "{}.csv".format(name))
np.savetxt(filename, arr, delimiter=",")
print("Saved parameters to {}".format(outputs_dir))
def load(self, params_dir):
for variable, name in zip(self.variables, self.variable_names):
filename = os.path.join(params_dir, "{}.csv".format(name))
arr = np.loadtxt(filename, delimiter=",")
arr = arr.reshape(variable.shape)
self.session.run(variable.assign(arr))
print("Loaded parameters from {}".format(params_dir))
def main(*args, **kwargs):
with tf.Session() as session:
envs = [gym.make("PuyoPuyoEndlessTsu-v2") for _ in range(HYPERPARAMS["batch_size"])]
agent = Agent(session, envs)
merged = tf.summary.merge_all()
session.run(tf.global_variables_initializer())
running_reward = 0
if FLAGS.params_dir:
agent.load(FLAGS.params_dir)
for iteration in range(FLAGS.num_iterations):
rewards, feed_dict = agent.step()
if not FLAGS.quiet:
agent.envs[0].render()
print(rewards[0])
running_reward += sum(rewards)
if iteration % 10 == 0:
vh_log({"reward": running_reward}, iteration)
if not FLAGS.quiet:
summarize_scalar(agent.writer, "reward", running_reward, iteration)
running_reward = 0
if iteration % 100 == 0 and not FLAGS.quiet:
summary = session.run(merged, feed_dict=feed_dict)
agent.writer.add_summary(summary, iteration)
agent.dump()
agent.dump()
agent.writer.close()
if __name__ == "__main__":
register()
parser = argparse.ArgumentParser()
parser.add_argument('--num_iterations', type=int, default=10000,
help='Number of steps to run the trainer')
parser.add_argument('--learning_rate', type=float, default=1e-4,
help='Initial learning rate')
parser.add_argument("--params_dir", type=str, default=None,
help="Parameters directory for initial values")
parser.add_argument('--log_dir', type=str, default='/tmp/tensorflow/gym_puyopuyo/logs/rl_with_summaries',
help='Summaries log directory')
parser.add_argument('--hyperparams', type=str, default='{}',
help='Hyperparameters (JSON or filename)')
parser.add_argument('--quiet', action='store_true')
FLAGS, unparsed = parser.parse_known_args()
try:
hyperparams = json.loads(FLAGS.hyperparams)
except ValueError:
with open(FLAGS.hyperparams) as f:
hyperparams = json.load(f)
HYPERPARAMS.update(hyperparams)
print(HYPERPARAMS)
print("Iterations =", FLAGS.num_iterations)
print("Learning rate =", FLAGS.learning_rate)
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| {"hexsha": "2f3d234cc65be46d49b730f478d6b43ff53998f4", "size": 13256, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_deep_student.py", "max_stars_repo_name": "frostburn/rl_puyopuyo", "max_stars_repo_head_hexsha": "9f57d2919351d22ed51672f7dd6b8427e9ad0e91", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-06-01T06:31:53.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-12T21:36:24.000Z", "max_issues_repo_path": "train_deep_student.py", "max_issues_repo_name": "frostburn/rl_puyopuyo", "max_issues_repo_head_hexsha": "9f57d2919351d22ed51672f7dd6b8427e9ad0e91", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_deep_student.py", "max_forks_repo_name": "frostburn/rl_puyopuyo", "max_forks_repo_head_hexsha": "9f57d2919351d22ed51672f7dd6b8427e9ad0e91", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.6855345912, "max_line_length": 135, "alphanum_fraction": 0.6371454436, "include": true, "reason": "import numpy", "num_tokens": 3076} |
import matplotlib.pyplot as plt
import numpy as np
from scipy.io.wavfile import read
(fs, x) = read('../../../sounds/oboe-A4.wav')
M = 256
H = 128
start = int(.8*fs)
plt.figure(1)
x0 = x[start:start+3*M]/float(max(x))
plt.plot(x0)
plt.axis([0, 3*M, min(x0), max(x0)+5.5])
offset = 1.5
x1 = np.zeros(3*M)+offset
x1[0:M] += (x0[0:M] * np.hamming(M))
plt.plot(x1,'b')
offset = 2.5
x2 = np.zeros(3*M)+offset
x2[H:M+H] += (x0[H:M+H] * np.hamming(M))
plt.plot(x2,'b')
offset = 3.5
x2 = np.zeros(3*M)+offset
x2[H*2:M+H*2] += (x0[2*H:M+H*2] * np.hamming(M))
plt.plot(x2,'b')
offset = 4.5
x2 = np.zeros(3*M)+offset
x2[H*3:M+H*3] += (x0[3*H:M+H*3] * np.hamming(M))
plt.plot(x2,'b')
offset = 5.5
x2 = np.zeros(3*M)+offset
x2[H*4:M+H*4] += (x0[4*H:M+H*4] * np.hamming(M))
plt.plot(x2,'b')
plt.tight_layout()
plt.savefig('ola.png')
plt.show()
| {"hexsha": "529c78ad617244ce8abad4decab5434fa69bd1bc", "size": 838, "ext": "py", "lang": "Python", "max_stars_repo_path": "stanford/sms-tools/lectures/04-STFT/plots-code/ola.py", "max_stars_repo_name": "phunc20/dsp", "max_stars_repo_head_hexsha": "e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-12T18:32:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-12T18:32:06.000Z", "max_issues_repo_path": "stanford/sms-tools/lectures/04-STFT/plots-code/ola.py", "max_issues_repo_name": "phunc20/dsp", "max_issues_repo_head_hexsha": "e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stanford/sms-tools/lectures/04-STFT/plots-code/ola.py", "max_forks_repo_name": "phunc20/dsp", "max_forks_repo_head_hexsha": "e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.488372093, "max_line_length": 48, "alphanum_fraction": 0.5954653938, "include": true, "reason": "import numpy,from scipy", "num_tokens": 370} |
using PyPlot
using DelimitedFiles
CPU=Sys.cpu_info()[1].model
scalar=readdlm("jlvtriad-scalar.dat",comments=true)'
scalar_shared=readdlm("jlvtriad-scalar-shared.dat",comments=true)'
scalar_avx=readdlm("jlvtriad-scalar-avx.dat",comments=true)'
scalar_shared_avx=readdlm("jlvtriad-scalar-shared-avx.dat",comments=true)'
PyPlot.clf()
PyPlot.semilogx(scalar[1,:],scalar[2,:],"g--",label="Array")
PyPlot.semilogx(scalar_shared[1,:],scalar_shared[2,:],"r--",label="SharedArray")
PyPlot.semilogx(scalar_avx[1,:],scalar_avx[2,:],"g",label="Array + avx")
PyPlot.semilogx(scalar_shared_avx[1,:],scalar_shared_avx[2,:],"r",label="SharedArray+avx")
PyPlot.legend()
PyPlot.grid()
PyPlot.xlabel("Array Size")
PyPlot.ylabel("GFlops/s")
PyPlot.title("SharedArray vs Array (scalar): A[i]=B[i]+C[i]*D[i]\n $(CPU)")
PyPlot.savefig("shared-vs-normal-arrays.pdf")
PyPlot.savefig("shared-vs-normal-arrays.png")
| {"hexsha": "8b8b6eec2f7d01c0c9c2174635684a5ec08029a0", "size": 892, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "parallel/shared-vs-normal-arrays.jl", "max_stars_repo_name": "j-fu/julia-tests", "max_stars_repo_head_hexsha": "d9bad70c8a101151dbf3bd8d8a56ba196a0f8655", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "parallel/shared-vs-normal-arrays.jl", "max_issues_repo_name": "j-fu/julia-tests", "max_issues_repo_head_hexsha": "d9bad70c8a101151dbf3bd8d8a56ba196a0f8655", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "parallel/shared-vs-normal-arrays.jl", "max_forks_repo_name": "j-fu/julia-tests", "max_forks_repo_head_hexsha": "d9bad70c8a101151dbf3bd8d8a56ba196a0f8655", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7826086957, "max_line_length": 90, "alphanum_fraction": 0.7466367713, "num_tokens": 284} |
!! Copyright (C) Stichting Deltares, 2012-2016.
!!
!! This program is free software: you can redistribute it and/or modify
!! it under the terms of the GNU General Public License version 3,
!! as published by the Free Software Foundation.
!!
!! This program is distributed in the hope that it will be useful,
!! but WITHOUT ANY WARRANTY; without even the implied warranty of
!! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
!! GNU General Public License for more details.
!!
!! You should have received a copy of the GNU General Public License
!! along with this program. If not, see <http://www.gnu.org/licenses/>.
!!
!! contact: [email protected]
!! Stichting Deltares
!! P.O. Box 177
!! 2600 MH Delft, The Netherlands
!!
!! All indications and logos of, and references to registered trademarks
!! of Stichting Deltares remain the property of Stichting Deltares. All
!! rights reserved.
!
! Deltares SECTOR WATERRESOURCES AND ENVIRONMENT
!
! CREATED: :
!
! V0.01 040894 Jos van Gils First version
!
! MODULE : CHPHAS
!
! FUNCTION : Find index of last water component
! in Charon-arrays with dimension NAIJ
!
! SUBROUTINES CALLED :
!
! FILES : -
!
! COMMON BLOCKS : -
!
! NAME KIND LENGTH FUNCT. DESCRIPTION
! ---- ----- ------ ------- -----------
!
SUBROUTINE CHPHAS ( NAIJ2 )
!
! Declarations
!
INTEGER NAIJ2 , IFPS , J
INCLUDE 'charon.inc'
! IFPS: number of first non water transportable phase
IFPS = 2
NAIJ2 = -1
DO 20 J = 1,NAIJ
IF ( JCOL(J) .EQ. KL(IFPS) ) THEN
NAIJ2 = J - 1
GOTO 25
ENDIF
20 CONTINUE
25 CONTINUE
IF ( NAIJ2 .LT. 0 ) NAIJ2 = NAIJ
RETURN
END
| {"hexsha": "fce399b01e78552581a4fd841816c73c4f2c18f6", "size": 1889, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "docker/water/delft3d/tags/v6686/src/engines_gpl/waq/packages/waq_kernel/src/charon/chphas.f", "max_stars_repo_name": "liujiamingustc/phd", "max_stars_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-01-06T03:01:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T03:02:55.000Z", "max_issues_repo_path": "docker/water/delft3d/tags/v6686/src/engines_gpl/waq/packages/waq_kernel/src/charon/chphas.f", "max_issues_repo_name": "liujiamingustc/phd", "max_issues_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docker/water/delft3d/tags/v6686/src/engines_gpl/waq/packages/waq_kernel/src/charon/chphas.f", "max_forks_repo_name": "liujiamingustc/phd", "max_forks_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7794117647, "max_line_length": 73, "alphanum_fraction": 0.5960825834, "num_tokens": 527} |
import numpy as np
import os
import codecs
import torch
from driver import DATA_PATH
def save_np_to_txt(np_input, txt_name, file_dir=""):
if file_dir.startswith("/"):
save_dir = file_dir
else:
save_dir = os.path.join(DATA_PATH, file_dir)
os.makedirs(save_dir, exist_ok=True)
txt_path = os.path.join(save_dir, txt_name)
assert isinstance(np_input, np.ndarray)
first_line = ",".join([str(sh) for sh in np_input.shape])
flat_input = np.reshape(np_input, (-1, np_input.shape[-1]))
with codecs.open(txt_path, "w", "utf-8") as fw:
fw.write(first_line)
fw.write("\n")
for last_dim_data in flat_input:
last_data_line = ",".join([str(float(c_data)) for c_data in last_dim_data])
fw.write(last_data_line)
fw.write("\n")
def save_torch_to_txt(torch_input, txt_name, file_dir=""):
if file_dir.startswith("/"):
save_dir = file_dir
else:
save_dir = os.path.join(DATA_PATH, file_dir)
os.makedirs(save_dir, exist_ok=True)
txt_path = os.path.join(save_dir, txt_name)
assert isinstance(torch_input, torch.Tensor)
first_line = ",".join([str(sh) for sh in torch_input.shape])
flat_input = torch.reshape(torch_input, (-1, torch_input.shape[-1]))
with codecs.open(txt_path, "w", "utf-8") as fw:
fw.write(first_line)
fw.write("\n")
for last_dim_data in flat_input:
last_data_line = ",".join([str(float(c_data)) for c_data in last_dim_data])
fw.write(last_data_line)
fw.write("\n")
def load_txt_to_np(txt_name, file_dir="", dtype=np.float32):
if file_dir.startswith("/"):
save_dir = file_dir
else:
save_dir = os.path.join(DATA_PATH, file_dir)
txt_path = os.path.join(save_dir, txt_name)
assert os.path.isfile(txt_path)
with codecs.open(txt_path, "r", "utf-8") as fr:
file_lines = fr.read().splitlines()
first_line = file_lines[0]
in_shape = tuple([int(fl) for fl in first_line.split(",")])
datas = []
for data_line in file_lines[1:]:
c_data = [float(fl) for fl in data_line.split(",")]
datas.append(c_data)
res_np = np.array(datas).reshape(in_shape).astype(dtype)
return res_np
def load_txt_to_torch(txt_name, file_dir="", dtype=torch.float32):
if file_dir.startswith("/"):
save_dir = file_dir
else:
save_dir = os.path.join(DATA_PATH, file_dir)
txt_path = os.path.join(save_dir, txt_name)
assert os.path.isfile(txt_path)
with codecs.open(txt_path, "r", "utf-8") as fr:
file_lines = fr.read().splitlines()
first_line = file_lines[0]
in_shape = tuple([int(fl) for fl in first_line.split(",")])
datas = []
for data_line in file_lines[1:]:
c_data = [float(fl) for fl in data_line.split(",")]
datas.append(c_data)
res_np = torch.Tensor(datas, dtype=torch.float32).reshape(in_shape)
return res_np
| {"hexsha": "1bb7730f4723e9d2d11045d1cba2b3b269364463", "size": 3019, "ext": "py", "lang": "Python", "max_stars_repo_path": "mtPytorchTest/utils/save.py", "max_stars_repo_name": "bingchengzhou/mtTest", "max_stars_repo_head_hexsha": "8bd72cfbf6acb11322960d199c8ae44919ae0f2f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mtPytorchTest/utils/save.py", "max_issues_repo_name": "bingchengzhou/mtTest", "max_issues_repo_head_hexsha": "8bd72cfbf6acb11322960d199c8ae44919ae0f2f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mtPytorchTest/utils/save.py", "max_forks_repo_name": "bingchengzhou/mtTest", "max_forks_repo_head_hexsha": "8bd72cfbf6acb11322960d199c8ae44919ae0f2f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9404761905, "max_line_length": 87, "alphanum_fraction": 0.6326598211, "include": true, "reason": "import numpy", "num_tokens": 756} |
\chapter{Conclution}
\label{ch:conclution}
Your Conclution. | {"hexsha": "e49d66b546b6ae06563f401508973765a5626e30", "size": 60, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "content/conclution.tex", "max_stars_repo_name": "matthias85/latex_template_thesis", "max_stars_repo_head_hexsha": "9f3583b0a6ca6111f960d8f102d677132cd7a663", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "content/conclution.tex", "max_issues_repo_name": "matthias85/latex_template_thesis", "max_issues_repo_head_hexsha": "9f3583b0a6ca6111f960d8f102d677132cd7a663", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "content/conclution.tex", "max_forks_repo_name": "matthias85/latex_template_thesis", "max_forks_repo_head_hexsha": "9f3583b0a6ca6111f960d8f102d677132cd7a663", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.0, "max_line_length": 21, "alphanum_fraction": 0.8, "num_tokens": 21} |
from __future__ import print_function
import os
import sys
###########################################################
# Change to your own library path
###########################################################
import pandas as pd
import numpy as np
from datetime import datetime
from datetime import timedelta
from dateutil.parser import parse
import pytz
# date_time format
date_time_format = '%Y-%m-%dT%H:%M:%S.%f'
date_format = '%Y-%m-%d'
pt = pytz.timezone('US/Pacific')
def make_dir(data_path):
if os.path.exists(data_path) is False:
os.mkdir(data_path)
if __name__ == '__main__':
# Read data root path
delevery_root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, os.path.pardir, 'delivery_data'))
igtb_df = pd.read_csv(os.path.join(delevery_root_path, 'p2_baseline_processed_11.2020.csv'), index_col=0)
# id,started_ts,completed_ts,duration,weekcompleted,gender,traininglevel
# Phase1Training_IGTB.csv
save_igtb_df = pd.DataFrame()
id_list = list(igtb_df.index)
id_list.sort()
for id in id_list:
row_df = pd.DataFrame(index=[id])
row_df['id'] = id[:8]
row_df['gender'] = igtb_df.loc[id, 'gender']
row_df['traininglevel'] = igtb_df.loc[id, 'traininglevel']
save_igtb_df = pd.concat([save_igtb_df, row_df])
save_igtb_df.to_csv(os.path.join(delevery_root_path, 'tiles-phase2-gpr', 'Phase2', 'Phase2_IGTB.csv'), index=False)
| {"hexsha": "ead5832ad2d6568fd1a149c1ba44ee2297743ecd", "size": 1422, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/survey/extract_igtb.py", "max_stars_repo_name": "usc-sail/tiles-2019-dataset", "max_stars_repo_head_hexsha": "c91c2da3a282757f67981c5e10aa93faab7b414b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/survey/extract_igtb.py", "max_issues_repo_name": "usc-sail/tiles-2019-dataset", "max_issues_repo_head_hexsha": "c91c2da3a282757f67981c5e10aa93faab7b414b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/survey/extract_igtb.py", "max_forks_repo_name": "usc-sail/tiles-2019-dataset", "max_forks_repo_head_hexsha": "c91c2da3a282757f67981c5e10aa93faab7b414b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9473684211, "max_line_length": 143, "alphanum_fraction": 0.6800281294, "include": true, "reason": "import numpy", "num_tokens": 384} |
"""
Run Matthews example using specified config file.
"""
import os
import pickle
import shutil
import configparser
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tqdm
import torch
import lpde
from torch.utils.tensorboard import SummaryWriter
import utils
import tests
import int.matthews as mint
torch.set_default_dtype(torch.float32)
POINTS_W = 397.48499
plt.set_cmap('plasma')
def integrate_system(config, n, path, verbose=False):
"""Integrate Matthews system."""
pars = {}
pars["gamma"] = float(config["gamma"])
pars["omega"] = np.linspace(-pars["gamma"], pars["gamma"], int(config["N_int"])) + \
float(config["gamma_off"])
pars["K"] = float(config["K"])
data_dict = mint.integrate(pars=pars,
dt=float(config["dt"]), N=int(config["N_int"]), T=int(config["T"]),
tmin=float(config["tmin"]), tmax=float(config["tmax"]),
gamma_off=float(config["gamma_off"]),
append_init=True)
if verbose:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(data_dict["xx"], data_dict["data"][-1].real, label='real')
ax.plot(data_dict["xx"], data_dict["data"][-1].imag, label='imag')
ax.set_xlabel(r'$\omega$')
plt.title('snapshot')
plt.legend()
plt.show()
for i in range(n):
perturbation = utils.perturb_limit_cycle(data_dict, i, config)
if verbose:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(data_dict["xx"], perturbation.real, label='real')
ax.plot(data_dict["xx"], perturbation.imag, label='imag')
ax.set_xlabel(r'$\omega$')
plt.title('perturbation')
plt.legend()
plt.show()
for p in [0, -1, 1]:
data_perturbed = mint.integrate(pars=pars,
dt=data_dict["dt"], N=data_dict["N"], T=data_dict["T"],
tmin=0, tmax=data_dict["tmax"]-data_dict["tmin"],
ic='manual',
Ainit=data_dict["data"][int(i*int(config["T_off"]))] +
float(config["eps"])*p*perturbation,
gamma_off=float(config["gamma_off"]),
append_init=True)
data_perturbed["data"] = data_perturbed["data"][:, ::int(
int(config["N_int"])/int(config["N"]))]
data_perturbed["omega"] = data_perturbed["omega"][::int(
int(config["N_int"])/int(config["N"]))]
data_perturbed["xx"] = data_perturbed["xx"][::int(
int(config["N_int"])/int(config["N"]))]
data_perturbed["N"] = int(config["N"])
output = open(path + 'run'+str(i)+'_p_'+str(p)+'.pkl', 'wb')
pickle.dump(data_perturbed, output)
output.close()
def integrate_system_gamma(config, n, path, verbose=False):
"""Integrate Matthews system."""
gamma_list = np.linspace(1.7, 1.8, n-5)
gamma_list2 = np.append(1.7+0.1*np.random.random(4), 1.8)
# np.random.shuffle(gamma_list)
gamma_crit = 1.747
for i, gamma in enumerate(np.append(gamma_list, gamma_list2)):
print("gamma: "+str(gamma))
pars = {}
pars["gamma"] = gamma
pars["omega"] = np.linspace(-pars["gamma"], pars["gamma"], int(config["N_int"])) + \
float(config["gamma_off"])
pars["K"] = float(config["K"])
data_dict = mint.integrate(pars=pars,
dt=float(config["dt"]), N=int(config["N_int"]), T=int(config["T"]),
tmin=float(config["tmin"]), tmax=float(config["tmax"]),
gamma_off=float(config["gamma_off"]),
append_init=True)
if gamma < gamma_crit:
perturbation = utils.perturb_limit_cycle(data_dict, i, config)
else:
perturbation = utils.perturb_fixed_point(data_dict, i, config)
if verbose:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(pars["omega"], data_dict["data"][-1].real, label='real')
ax.plot(pars["omega"], data_dict["data"][-1].imag, label='imag')
ax.set_xlabel(r'$\omega$')
plt.title('snapshot')
plt.legend()
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(pars["omega"], perturbation.real, label='real')
ax.plot(pars["omega"], perturbation.imag, label='imag')
ax.set_xlabel(r'$\omega$')
plt.title('perturbation')
plt.legend()
plt.show()
for p in [0, -1, 1]:
data_perturbed = mint.integrate(pars=pars,
dt=data_dict["dt"], N=data_dict["N"], T=data_dict["T"],
tmin=0, tmax=data_dict["tmax"]-data_dict["tmin"],
ic='manual',
Ainit=data_dict["data"][int(i*int(config["T_off"]))] +
float(config["eps"])*p*perturbation,
gamma_off=float(config["gamma_off"]),
append_init=True)
data_perturbed["data"] = data_perturbed["data"][:, ::int(
int(config["N_int"])/int(config["N"]))]
data_perturbed["omega"] = data_perturbed["omega"][::int(
int(config["N_int"])/int(config["N"]))]
data_perturbed["xx"] = data_perturbed["xx"][::int(
int(config["N_int"])/int(config["N"]))]
data_perturbed["N"] = int(config["N"])
output = open(path + 'run'+str(i)+'_p_'+str(p)+'.pkl', 'wb')
pickle.dump(data_perturbed, output)
output.close()
def make_plot_paper(config):
"""Plot SLE learning results."""
dataset_train = utils.Dataset(0, int(config["TRAINING"]["n_train"]),
config["MODEL"],
path=config["GENERAL"]["save_dir"])
dataset = utils.Dataset(int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"])-1,
int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"]),
config["MODEL"],
path=config["GENERAL"]["save_dir"])
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=int(config["TRAINING"]['batch_size']), shuffle=True,
num_workers=int(config["TRAINING"]['num_workers']), pin_memory=True)
network = lpde.network.Network(config["MODEL"], n_vars=2)
model = lpde.model.Model(dataloader, dataloader, network, config["TRAINING"],
path=config["GENERAL"]["save_dir"]+'/')
model.load_network('test.model')
limit_amps_true = []
limit_amps_learned = []
gamma_list = []
for i in range(int(config["TRAINING"]["n_train"])):
print(i)
dataset = utils.Dataset(i, i+1, config["MODEL"],
path=config["GENERAL"]["save_dir"])
prediction = model.integrate_svd(dataset, dataset_train.svd, 0, 20000-1)
if i == 0:
prediction0 = model.integrate_svd(dataset, dataset_train.svd, 0, 20000-1)
limit_amps_true.append(
np.mean(np.abs(dataset.x_data[20000-1, 0]+1.0j*dataset.x_data[20000-1, 1])))
limit_amps_learned.append(np.mean(np.abs(prediction[-1, 0]+1.0j*prediction[-1, 1])))
gamma_list.append(dataset.param[0]*0.02+1.75)
pkl_file = open(config["GENERAL"]["save_dir"]+'/dat/run' +
str(int(config["TRAINING"]["n_train"])+int(config["TRAINING"]["n_test"])-1) + '_p_'+str(-1)+'.pkl', 'rb')
data_unperturbed = pickle.load(pkl_file)
pkl_file.close()
v_scaled = np.load(config["GENERAL"]["save_dir"]+'/v_scaled.npy')
df_data = {'gamma': gamma_list,
'limit_amps_true': limit_amps_true,
'limit_amps_predicted': limit_amps_learned}
df = pd.DataFrame(df_data)
df.to_excel(r'Source_Data_Figure_4.xlsx',
sheet_name='Figure 4', index=False)
np.save('Source_Data_Figure_4_Inset_1.npy', prediction[::10, 0])
np.save('Source_Data_Figure_4_Inset_2.npy', prediction0[::10, 0])
fig = plt.figure(figsize=(POINTS_W/72, 0.66*POINTS_W/72))
ax1 = fig.add_subplot(111)
scat1 = ax1.scatter(gamma_list, limit_amps_true, label='true')
scat2 = ax1.scatter(gamma_list, limit_amps_learned, label='learned',
marker='+')
ax1.set_xlabel(r'$\gamma$', labelpad=-2)
ax1.set_ylabel(r'$\langle | W_{\mbox{limit}}|\rangle$', labelpad=-1)
ax1.set_ylim((-0.005, 0.18))
ax1.set_xlim((min(gamma_list)-0.002, max(gamma_list)+0.002))
axins1 = ax1.inset_axes([0.67, 0.33, 0.32, 0.42])
axins1.pcolor(np.linspace(-1, 1, data_unperturbed["N"]), data_unperturbed["tt"][::10],
prediction[::10, 0], rasterized=True)
phi_arr = np.linspace(-1, 1, data_unperturbed["N"])
axins1.axvline(x=(phi_arr[3]+phi_arr[4])/2, ymin=0, ymax=1, color='white', lw=1)
axins1.axvline(x=(phi_arr[-4]+phi_arr[-5])/2, ymin=0, ymax=1, color='white', lw=1)
axins1.set_xlabel(r'$\phi_1$')
axins1.set_ylabel(r'$t$', labelpad=-2)
axins2 = ax1.inset_axes([0.24, 0.55, 0.32, 0.42])
axins2.pcolor(np.linspace(-1, 1, data_unperturbed["N"]), data_unperturbed["tt"][::10],
prediction0[::10, 0], rasterized=True)
axins2.axvline(x=(phi_arr[3]+phi_arr[4])/2, ymin=0, ymax=1, color='white', lw=1)
axins2.axvline(x=(phi_arr[-4]+phi_arr[-5])/2, ymin=0, ymax=1, color='white', lw=1)
axins2.set_xlabel(r'$\phi_1$')
axins2.set_ylabel(r'$t$', labelpad=-2)
plt.legend(fontsize=8)
ax1.annotate("", xy=(1.7995, 0.001), xytext=(1.79, 0.05),
arrowprops=dict(arrowstyle="->"))
ax1.annotate("", xy=(1.7005, 0.117), xytext=(1.719, 0.12),
arrowprops=dict(arrowstyle="->"))
plt.subplots_adjust(top=0.94, wspace=0.45, right=0.98, bottom=0.1, hspace=0.3, left=0.15)
plt.show()
def main(config):
"""Integrate system and train model."""
verbose = config["GENERAL"].getboolean("verbose")
# Create data folders
if not os.path.exists(config["GENERAL"]["save_dir"]):
os.makedirs(config["GENERAL"]["save_dir"])
if not os.path.exists(config["GENERAL"]["save_dir"]+'/tests'):
os.makedirs(config["GENERAL"]["save_dir"]+'/tests')
# Create training and test data
if not os.path.exists(config["GENERAL"]["save_dir"]+'/dat'):
os.makedirs(config["GENERAL"]["save_dir"]+'/dat')
if config["MODEL"].getboolean("use_param"):
integrate_system_gamma(config["SYSTEM"], int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"]),
config["GENERAL"]["save_dir"]+'/dat/',
verbose=verbose)
else:
integrate_system(config["SYSTEM"], int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"]),
config["GENERAL"]["save_dir"]+'/dat/',
verbose=verbose)
# Create Dataset
dataset_train = utils.Dataset(0, int(config["TRAINING"]["n_train"]), config["MODEL"],
path=config["GENERAL"]["save_dir"], verbose=verbose)
dataset_test = utils.Dataset(int(config["TRAINING"]["n_train"]),
int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"]),
config["MODEL"],
path=config["GENERAL"]["save_dir"], verbose=verbose)
if config["GENERAL"].getboolean("use_dmaps"):
utils.dmaps_transform(int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"]), dataset_train,
path=config["GENERAL"]["save_dir"], verbose=verbose)
dataset_train = utils.Dataset(0, int(config["TRAINING"]["n_train"]), config["MODEL"],
path=config["GENERAL"]["save_dir"], verbose=verbose)
dataset_test = utils.Dataset(int(config["TRAINING"]["n_train"]),
int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"]),
config["MODEL"],
path=config["GENERAL"]["save_dir"], verbose=verbose)
dataloader_train = utils.FastDataLoader(
dataset_train, batch_size=int(config["TRAINING"]['batch_size']), shuffle=True,
num_workers=int(config["TRAINING"]['num_workers']), pin_memory=True)
dataloader_test = utils.FastDataLoader(
dataset_test, batch_size=int(config["TRAINING"]['batch_size']), shuffle=False,
num_workers=int(config["TRAINING"]['num_workers']), pin_memory=True)
network = lpde.network.Network(config["MODEL"], n_vars=2)
delta_x = 2*float(config["SYSTEM"]["gamma"])/int(config["SYSTEM"]["N"]) * \
float(config["MODEL"]["rescale_dx"])
if verbose:
tests.test_fd_coeffs(network, path=config["GENERAL"]["save_dir"])
tests.test_derivs(network, torch.tensor(dataset_train.x_data[:1],
dtype=torch.get_default_dtype()),
torch.tensor([delta_x], dtype=torch.get_default_dtype()),
path=config["GENERAL"]["save_dir"])
model = lpde.model.Model(dataloader_train, dataloader_test, network, config["TRAINING"],
path=config["GENERAL"]["save_dir"]+'/')
if not os.path.exists(config["GENERAL"]["save_dir"]+'/log'):
os.makedirs(config["GENERAL"]["save_dir"]+'/log')
else:
shutil.rmtree(config["GENERAL"]["save_dir"]+'/log')
os.makedirs(config["GENERAL"]["save_dir"]+'/log')
logger = SummaryWriter(config["GENERAL"]["save_dir"]+'/log/')
progress_bar = tqdm.tqdm(range(0, int(config["TRAINING"]['epochs'])),
total=int(config["TRAINING"]['epochs']),
leave=True, desc=lpde.utils.progress(0, 0))
if config["GENERAL"].getboolean('proceed_training'):
model.load_network('test.model')
for epoch in progress_bar:
train_loss = model.train()
val_loss = model.validate()
progress_bar.set_description(lpde.utils.progress(train_loss, val_loss))
logger.add_scalar('Loss/train', train_loss, epoch)
logger.add_scalar('Loss/val', val_loss, epoch)
logger.add_scalar('learning rate', model.optimizer.param_groups[-1]["lr"], epoch)
model.save_network('test.model')
if verbose:
model = lpde.model.Model(dataloader_train, dataloader_test, network, config["TRAINING"],
path=config["GENERAL"]["save_dir"]+'/')
model.load_network('test.model')
tests.test_learned_dt(model, dataset_test, mint.f,
path=config["GENERAL"]["save_dir"], idx=0)
tests.test_transient_dynamics(model, dataset_test, dataset_train.svd,
idx=int(config["TRAINING"]["n_train"]), t_off=0,
path=config["GENERAL"]["save_dir"])
if __name__ == "__main__":
config = configparser.ConfigParser()
config.read('config/config.cfg')
main(config)
make_plot_paper(config)
| {"hexsha": "41396e631718abef4792684c8ef16a1ecc9058c9", "size": 16035, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/sle_gamma/run.py", "max_stars_repo_name": "fkemeth/emergent_pdes", "max_stars_repo_head_hexsha": "d0501f21c9eb569543a19d4d95d6c91a9ccb11fe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/sle_gamma/run.py", "max_issues_repo_name": "fkemeth/emergent_pdes", "max_issues_repo_head_hexsha": "d0501f21c9eb569543a19d4d95d6c91a9ccb11fe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/sle_gamma/run.py", "max_forks_repo_name": "fkemeth/emergent_pdes", "max_forks_repo_head_hexsha": "d0501f21c9eb569543a19d4d95d6c91a9ccb11fe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.1690140845, "max_line_length": 125, "alphanum_fraction": 0.5451200499, "include": true, "reason": "import numpy", "num_tokens": 3775} |
from Models import sub_models
import matplotlib.pyplot as plt # plotting
import matplotlib.font_manager as font_manager # plot fonts
import numpy as np # x,y axes values
from sklearn.model_selection import KFold # cross validation
import copy # splice site results copy
import tensorflow as tf
def acc_build_cnn1(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4, 1)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(
filters=208,
kernel_size=(4, 4),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv2D(
filters=152,
kernel_size=(2, 2),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling2D(
pool_size=(2, 2),
padding='same'
)
)
model.add(tf.keras.layers.Conv2D(
filters=360,
kernel_size=(5,5),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling2D(
pool_size=(4, 4),
padding='same'
)
)
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(rate=0.5))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.005,
decay_steps=600,
decay_rate=0.5,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=43,
validation_data=(X_val, y_val),
callbacks=[
tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=7, mode='min', min_delta=0.001),
]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=43,
callbacks=[
tf.keras.callbacks.EarlyStopping(monitor='loss', patience=7, mode='min', min_delta=0.001),
]
)
return (history, model)
def acc_build_cnn2(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv1D(
filters=136,
kernel_size=5,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv1D(
filters=344,
kernel_size=6,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=9, padding="same"))
model.add(tf.keras.layers.Conv1D(
filters=232,
kernel_size=5,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=4, padding="same"))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(rate=0.3))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.01,
decay_steps=300,
decay_rate=0.5,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=8,
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=8,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def acc_build_cnn3(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv1D(
filters=344,
kernel_size=8,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=9, padding="same"))
model.add(tf.keras.layers.Conv1D(
filters=160,
kernel_size=7,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2, padding="same"))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(rate=0.1))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.005,
decay_steps=460,
decay_rate=0.5,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=10,
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=10,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def acc_build_cnn4(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv1D(
filters=144,
kernel_size=10,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=5, padding="same"))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(rate=0.1))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.005,
decay_steps=460,
decay_rate=2.0,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=9,
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=9,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def acc_build_cnn5(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4, 1)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(
filters=56,
kernel_size=(3, 3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv2D(
filters=48,
kernel_size=(1, 1),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv2D(
filters=400,
kernel_size=(3, 3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling2D(
pool_size=(5, 5),
padding='same'
)
)
model.add(tf.keras.layers.Conv2D(
filters=80,
kernel_size=(4,4),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(rate=0.2))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.0025,
decay_steps=540,
decay_rate=0.5,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=57,
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=57,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def acc_build_dnn1(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(
units=608,
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.0025),
)
)
model.add(tf.keras.layers.Dense(
units=576,
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.0025),
)
)
model.add(tf.keras.layers.Dropout(rate=0.6))
model.add(tf.keras.layers.Dense(
units=512,
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.05),
)
)
model.add(tf.keras.layers.Dropout(rate=0.3))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.001,
decay_steps=460,
decay_rate=1.5,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
# model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
# filepath=f'tmp/dnn',
# monitor='val_accuracy',
# mode='max',
# save_best_only=True
# )
# tf.keras.callbacks.ModelCheckpoint(filepath='/tmp/checkpoint',monitor='val_loss', save_best_only=True, mode='min')
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=9,
validation_data=(X_val, y_val),
callbacks=[
tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True),
]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=9,
callbacks=[
tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True),
]
)
return (history, model)
def acc_build_dnn2(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(
units=672,
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.005),
)
)
model.add(tf.keras.layers.Dropout(rate=0.1))
model.add(tf.keras.layers.Dense(
units=576,
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.005),
)
)
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.005,
decay_steps=280,
decay_rate=1.5,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=9,
validation_data=(X_val, y_val),
callbacks=[
tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True),
]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=9,
callbacks=[
tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True),
]
)
return (history, model)
def acc_build_dnn3(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(
units=704,
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.001),
)
)
model.add(tf.keras.layers.Dropout(rate=0.7))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.0025,
decay_steps=600,
decay_rate=0.5,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=26,
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=26,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def acc_build_rnn1(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.LSTM(
units=288,
activation='tanh',
return_sequences=True,
)
)
model.add(tf.keras.layers.Dropout(rate=0.2))
model.add(tf.keras.layers.LSTM(
units=160,
activation='tanh',
)
)
model.add(tf.keras.layers.Dropout(rate=0.6))
model.add(tf.keras.layers.Dense(
units=160,
activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(0.001)
)
)
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.001,
decay_steps=520,
decay_rate=0.5,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=33,
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=33,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def acc_build_rnn2(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.LSTM(
units=320,
activation='tanh',
return_sequences=True,
)
)
model.add(tf.keras.layers.LSTM(
units=672,
activation='tanh',
return_sequences=True,
)
)
model.add(tf.keras.layers.LSTM(
units=352,
activation='tanh',
)
)
model.add(tf.keras.layers.Dropout(rate=0.1))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.001,
decay_steps=500,
decay_rate=2.0,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=37,
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=37,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def acc_build_rnn3(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.LSTM(
units=576,
activation='tanh',
return_sequences=True,
)
)
model.add(tf.keras.layers.Dense(
units=640,
activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(0.005)
)
)
model.add(tf.keras.layers.LSTM(
units=224,
activation='tanh',
)
)
model.add(tf.keras.layers.Dense(
units=480,
activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(0.005)
)
)
model.add(tf.keras.layers.Dropout(rate=0.5))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.0001,
decay_steps=220,
decay_rate=1.0,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=14,
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=14,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def don_build_cnn1(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4,1)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(
filters=80,
kernel_size=(5,5),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv2D(
filters=360,
kernel_size=(4, 4),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling2D(
pool_size=(6, 6),
padding='same'
)
)
model.add(tf.keras.layers.Conv2D(
filters=8,
kernel_size=(3, 3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling2D(
pool_size=(2, 2),
padding='same'
)
)
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(rate=0.1))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.01,
decay_steps=100,
decay_rate=1.0,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=10,
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=10,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def don_build_cnn2(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv1D(
filters=400,
kernel_size=5,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv1D(
filters=216,
kernel_size=10,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=6, padding="same"))
model.add(tf.keras.layers.Conv1D(
filters=344,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2, padding="same"))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(rate=0.5))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.005,
decay_steps=80,
decay_rate=0.5,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=10,
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=10,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def don_build_cnn3(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv1D(
filters=368,
kernel_size=5,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2, padding="same"))
model.add(tf.keras.layers.Conv1D(
filters=384,
kernel_size=2,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=6, padding="same"))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(rate=0.5))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.0025,
decay_steps=460,
decay_rate=0.5,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=10,# was 1 but I change this
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=10,# was 1 but I change this
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def don_build_cnn4(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv1D(
filters=360,
kernel_size=10,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2, padding="same"))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(rate=0.4))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.0025,
decay_steps=220,
decay_rate=1.5,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=10,
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=10,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def don_build_cnn5(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4,1)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(
filters=256,
kernel_size=(6, 6),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv2D(
filters=288,
kernel_size=(4, 4),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv2D(
filters=264,
kernel_size=(6, 6),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling2D(
pool_size=(3, 3),
padding='same'
)
)
model.add(tf.keras.layers.Conv2D(
filters=392,
kernel_size=(2,2),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(rate=0.5))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.001,
decay_steps=380,
decay_rate=2.0,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=10,
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=10,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def don_build_dnn1(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(
units=352,
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.0025),
)
)
model.add(tf.keras.layers.Dense(
units=640,
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.05),
)
)
model.add(tf.keras.layers.Dropout(rate=0.6))
model.add(tf.keras.layers.Dense(
units=32,
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.01),
)
)
model.add(tf.keras.layers.Dropout(rate=0.5))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.01,
decay_steps=300,
decay_rate=0.5,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=33,
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=33,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def don_build_dnn2(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(
units=288,
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.001),
)
)
model.add(tf.keras.layers.Dropout(rate=0.6))
model.add(tf.keras.layers.Dense(
units=384,
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.0025),
)
)
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.005,
decay_steps=540,
decay_rate=0.5,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=14,
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=14,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def don_build_dnn3(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(
units=288,
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.0025),
)
)
model.add(tf.keras.layers.Dropout(rate=0.4))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.01,
decay_steps=400,
decay_rate=2.0,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=9,
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=9,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def don_build_rnn1(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.LSTM(
units=416,
activation='tanh',
return_sequences=True,
)
)
model.add(tf.keras.layers.Dropout(rate=0.2))
model.add(tf.keras.layers.LSTM(
units=672,
activation='tanh',
)
)
model.add(tf.keras.layers.Dropout(rate=0.3))
model.add(tf.keras.layers.Dense(
units=192,
activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(0.0025)
)
)
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.0025,
decay_steps=580,
decay_rate=1.0,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=16,
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=16,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def don_build_rnn2(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.LSTM(
units=128,
activation='tanh',
return_sequences=True,
)
)
model.add(tf.keras.layers.LSTM(
units=224,
activation='tanh',
return_sequences=True,
)
)
model.add(tf.keras.layers.LSTM(
units=352,
activation='tanh',
)
)
model.add(tf.keras.layers.Dropout(rate=0.1))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.005,
decay_steps=340,
decay_rate=2.0,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=10,
validation_data=(X_val, y_val),
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=10,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)]
)
return (history, model)
def don_build_rnn3(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
model = tf.keras.Sequential()
input_shape = (rows, 4)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.LSTM(
units=608,
activation='tanh',
return_sequences=True,
)
)
model.add(tf.keras.layers.Dense(
units=480,
activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(0.0025)
)
)
model.add(tf.keras.layers.LSTM(
units=224,
activation='tanh',
)
)
model.add(tf.keras.layers.Dense(
units=32,
activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(0.001)
)
)
model.add(tf.keras.layers.Dropout(rate=0.3))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.0025,
decay_steps=500,
decay_rate=0.5,
staircase=False
)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr_schedule),
metrics=[
'accuracy',
tf.keras.metrics.BinaryAccuracy(name='bin_acc'),
tf.keras.metrics.BinaryCrossentropy(name='bin_cross'),
tf.keras.metrics.AUC(name='auc'),
tf.keras.metrics.Precision(name='pre'),
tf.keras.metrics.Recall(name='rec')
]
)
if kwargs['test']:
return model
if kwargs['validate']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=33,
validation_data=(X_val, y_val),
callbacks=[
tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=6, min_delta=0.001, restore_best_weights=True)
]
)
if kwargs['train']:
history = model.fit(
x=X_train,
y=y_train,
batch_size=32,
epochs=33,
callbacks=[
tf.keras.callbacks.EarlyStopping(monitor='loss', patience=6, min_delta=0.001, restore_best_weights=True)
]
)
return (history, model)
def build_single_model(
rows,
ss_type,
model,
X_train,
y_train,
X_val,
y_val,
kwargs,
):
# add something like *args or **kwargs here
if ss_type == 'acceptor':
if model == 'cnn1':
return acc_build_cnn1(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'cnn2':
return acc_build_cnn2(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'cnn3':
return acc_build_cnn3(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'cnn4':
return acc_build_cnn4(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'cnn5':
return acc_build_cnn5(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'dnn1':
return acc_build_dnn1(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'dnn2':
return acc_build_dnn2(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'dnn3':
return acc_build_dnn3(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'rnn1':
return acc_build_rnn1(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'rnn2':
return acc_build_rnn2(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'rnn3':
return acc_build_rnn3(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if ss_type == 'donor':
if model == 'cnn1':
return don_build_cnn1(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'cnn2':
return don_build_cnn2(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'cnn3':
return don_build_cnn3(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'cnn4':
return don_build_cnn4(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'cnn5':
return don_build_cnn5(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'dnn1':
return don_build_dnn1(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'dnn2':
return don_build_dnn2(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'dnn3':
return don_build_dnn3(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'rnn1':
return don_build_rnn1(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'rnn2':
return don_build_rnn2(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
if model == 'rnn3':
return don_build_rnn3(
rows,
X_train,
y_train,
X_val,
y_val,
kwargs,
)
# if model == 'rnn':
# return build_rnn(
# dataset_row_num,
# dataset,
# model,
# splice_type,
# summary,
# X_train,
# y_train,
# batch_size,# wont need after tuning
# epochs, # wont need after tuning
# X_val,
# y_val,
# fold,
# num_folds,
# save,
# )
| {"hexsha": "121133f7d15c0af0310b51ebd102e88a9397fc11", "size": 53639, "ext": "py", "lang": "Python", "max_stars_repo_path": "build_models.py", "max_stars_repo_name": "tmartin2/EnsembleSplice-Inactive", "max_stars_repo_head_hexsha": "a161ff007b47ceadd3a21376f2eac2971bb81d90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "build_models.py", "max_issues_repo_name": "tmartin2/EnsembleSplice-Inactive", "max_issues_repo_head_hexsha": "a161ff007b47ceadd3a21376f2eac2971bb81d90", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "build_models.py", "max_forks_repo_name": "tmartin2/EnsembleSplice-Inactive", "max_forks_repo_head_hexsha": "a161ff007b47ceadd3a21376f2eac2971bb81d90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9003232759, "max_line_length": 132, "alphanum_fraction": 0.5538320998, "include": true, "reason": "import numpy", "num_tokens": 12116} |
#!/usr/bin/env python
# coding: utf-8
import cv2
from math import sqrt
import numpy as np
in_path = ''
out_directory = ''
cut_count = 100
cut_base = int(sqrt(100))
origin_image = cv2.imread(in_path)
h, w = origin_image.shape[:2]
h_d = int(h / cut_base)
w_d = int(w / cut_base)
for i in range(1, cut_base):
for j in range(1, cut_base):
cut_image = origin_image[(i - 1) * h_d:i * h_d, (j - 1) * w_d:j * w_d]
cv2.imwrite(f'{out_directory}/{i}-{j}.png', cut_image)
| {"hexsha": "e08f9b0cd57c420559c1ebde509910866406dbe6", "size": 485, "ext": "py", "lang": "Python", "max_stars_repo_path": "photomosaic/tests/test.py", "max_stars_repo_name": "zibuyu1995/ApplicationInImageProcessing", "max_stars_repo_head_hexsha": "75699ebafdfb6c9b2c800f059e377a13f2409c82", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 53, "max_stars_repo_stars_event_min_datetime": "2018-03-13T12:32:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-13T03:13:23.000Z", "max_issues_repo_path": "photomosaic/tests/test.py", "max_issues_repo_name": "zibuyu1995/ApplicationInImageProcessing", "max_issues_repo_head_hexsha": "75699ebafdfb6c9b2c800f059e377a13f2409c82", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2018-03-05T08:46:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T04:28:44.000Z", "max_forks_repo_path": "photomosaic/tests/test.py", "max_forks_repo_name": "zibuyu1995/ApplicationInImageProcessing", "max_forks_repo_head_hexsha": "75699ebafdfb6c9b2c800f059e377a13f2409c82", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 28, "max_forks_repo_forks_event_min_datetime": "2018-03-17T03:47:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-29T01:51:30.000Z", "avg_line_length": 22.0454545455, "max_line_length": 78, "alphanum_fraction": 0.6432989691, "include": true, "reason": "import numpy", "num_tokens": 159} |
import librosa
from librosa.feature import mfcc
import os
import numpy as np
import scipy
import os.path
import pandas as pd
from sklearn.cluster import KMeans
import h5py
import cv2
from utils import data_cleaner
audio_dir = sys.argv[1]
audio_names = []
def get_mfcc(audio_dir):
result_array = np.empty((0,20,155))
for filename in os.listdir(audio_dir):
if filename.endswith(".wav"):
try:
audio_names.append(filename)
y, sr = librosa.core.load(audio_dir+"/"+filename, sr=None)
mfcc = librosa.feature.mfcc(y=y, sr=sr)
mfcc = mfcc.flatten()
mfcc = np.pad(mfcc, (0, (3100-mfcc.shape[0])%3100), 'constant').reshape((20,155))
result_array = np.append(result_array, [mfcc], axis=0)
except Exception as e:
print(e)
pass
return result_array
result = get_mfcc(audio_dir)
results = np.reshape(result,(result.shape[0],-1))
path_to_feats = sys.argv[2]
number_of_clusters = int(sys.argv[3])
# Run KMeans
kmeans = KMeans(n_clusters=number_of_clusters).fit(results)
# Generate CSV file with labels
myfile = open(sys.argv[2] + '/mfcc_k'+str(number_of_clusters)+'.csv', 'w')
for i in range(len(audio_names)):
myfile.write(audio_names[i][:-4]+".jpg,"+str(kmeans.labels_[i])+"\n")
myfile.close()
#Shuffle CSV file
os.system('shuf '+path_to_feats+'/mfcc_k'+str(number_of_clusters)+'.csv > '+path_to_feats+'/mfcc_k'+str(number_of_clusters)+'_shuf.csv')
# Split file into train and validation CSV
number_of_valid_samples = 5208 # Change as per your need
os.system('awk \'NR < '+str(number_of_valid_samples)+' { print >> "'+path_to_feats+'/val_mfcc_k'+str(number_of_clusters)+'.csv"; next } {print >> "'+path_to_feats+'/train_mfcc_k'+str(number_of_clusters)+'.csv" }\' '+path_to_feats+'/mfcc_k'+str(number_of_clusters)+'_shuf.csv')
# 3. Create Trainable Dataset
train_df, val_df = data_cleaner.clean_dataset(path_to_feats+"/train_mfcc_k"+str(number_of_clusters)+".csv", path_to_feats+"/val_mfcc_k"+str(number_of_clusters)+".csv")
data_cleaner.create_h5(train_df, val_df, path_to_feats+"/train_mfcc_k"+str(number_of_clusters)+".hdf5", path_to_feats+"/val_mfcc_k"+str(number_of_clusters)+".hdf5")
| {"hexsha": "7aa8d76d76886f990150c904cdb7c1076b4d2633", "size": 2346, "ext": "py", "lang": "Python", "max_stars_repo_path": "pretext_training/get_mfcc.py", "max_stars_repo_name": "tensor-flo/ambient-sound-self-supervision", "max_stars_repo_head_hexsha": "72e6944107620fb8c3ef4b428a2dfb1765e59492", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-12-24T10:41:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-12T03:00:34.000Z", "max_issues_repo_path": "pretext_training/get_mfcc.py", "max_issues_repo_name": "tensor-flo/ambient-sound-self-supervision", "max_issues_repo_head_hexsha": "72e6944107620fb8c3ef4b428a2dfb1765e59492", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-12-19T11:38:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:09:54.000Z", "max_forks_repo_path": "pretext_training/get_mfcc.py", "max_forks_repo_name": "tensor-flo/ambient-sound-self-supervision", "max_forks_repo_head_hexsha": "72e6944107620fb8c3ef4b428a2dfb1765e59492", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-05-08T02:06:28.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-04T08:54:22.000Z", "avg_line_length": 34.5, "max_line_length": 277, "alphanum_fraction": 0.6658141517, "include": true, "reason": "import numpy,import scipy", "num_tokens": 609} |
theory GabrielaLimonta
imports "~~/src/HOL/IMP/Star" Complex_Main
begin
text {* We build on @{theory Complex_Main} instead of @{theory Main} to access
the real numbers. *}
subsection "Arithmetic Expressions"
type_synonym val = real
type_synonym vname = string
type_synonym state = "vname \<Rightarrow> val"
text_raw{*\snip{aexptDef}{0}{2}{% *}
datatype aexp = Rc real | V vname | Plus aexp aexp | Div aexp aexp
text_raw{*}%endsnip*}
inductive taval :: "aexp \<Rightarrow> state \<Rightarrow> val \<Rightarrow> bool" where
"taval (Rc r) s r" |
"taval (V x) s (s x)" |
"taval a1 s r1 \<Longrightarrow> taval a2 s r2
\<Longrightarrow> taval (Plus a1 a2) s (r1+r2)" |
"taval a1 s r1 \<Longrightarrow> taval a2 s r2
\<Longrightarrow> taval (Div a1 a2) s (r1 / r2)"
inductive_cases [elim!]:
"taval (Rc i) s v"
"taval (V x) s v"
"taval (Plus a1 a2) s v"
"taval (Div a1 a2) s v"
subsection "Boolean Expressions"
datatype bexp = Bc bool | Not bexp | And bexp bexp | Less aexp aexp
inductive tbval :: "bexp \<Rightarrow> state \<Rightarrow> bool \<Rightarrow> bool" where
"tbval (Bc v) s v" |
"tbval b s bv \<Longrightarrow> tbval (Not b) s (\<not> bv)" |
"tbval b1 s bv1 \<Longrightarrow> tbval b2 s bv2 \<Longrightarrow> tbval (And b1 b2) s (bv1 & bv2)" |
"taval a1 s r1 \<Longrightarrow> taval a2 s r2 \<Longrightarrow> tbval (Less a1 a2) s (r1 < r2)"
subsection "Syntax of Commands"
(* a copy of Com.thy - keep in sync! *)
datatype
com = SKIP
| Assign vname aexp ("_ ::= _" [1000, 61] 61)
| Seq com com ("_;; _" [60, 61] 60)
| If bexp com com ("IF _ THEN _ ELSE _" [0, 0, 61] 61)
| While bexp com ("WHILE _ DO _" [0, 61] 61)
subsection "Small-Step Semantics of Commands"
inductive
small_step :: "(com \<times> state) \<Rightarrow> (com \<times> state) \<Rightarrow> bool" (infix "\<rightarrow>" 55)
where
Assign: "taval a s v \<Longrightarrow> (x ::= a, s) \<rightarrow> (SKIP, s(x := v))" |
Seq1: "(SKIP;;c,s) \<rightarrow> (c,s)" |
Seq2: "(c1,s) \<rightarrow> (c1',s') \<Longrightarrow> (c1;;c2,s) \<rightarrow> (c1';;c2,s')" |
IfTrue: "tbval b s True \<Longrightarrow> (IF b THEN c1 ELSE c2,s) \<rightarrow> (c1,s)" |
IfFalse: "tbval b s False \<Longrightarrow> (IF b THEN c1 ELSE c2,s) \<rightarrow> (c2,s)" |
While: "(WHILE b DO c,s) \<rightarrow> (IF b THEN c;; WHILE b DO c ELSE SKIP,s)"
lemmas small_step_induct = small_step.induct[split_format(complete)]
subsection "The Type System"
datatype ty = Neg | Pos | Zero | Any
definition ty_of_c :: "real \<Rightarrow> ty" where
"ty_of_c r = (if r = 0 then Zero else (if r > 0 then Pos else Neg))"
fun ty_of_plus :: "ty \<Rightarrow> ty \<Rightarrow> ty" where
"ty_of_plus Neg Neg = Neg" |
"ty_of_plus Pos Pos = Pos" |
"ty_of_plus Neg Pos = Any" |
"ty_of_plus Pos Neg = Any" |
"ty_of_plus Any _ = Any" |
"ty_of_plus _ Any = Any" |
"ty_of_plus Zero a = a" |
"ty_of_plus a Zero = a"
fun ty_of_div :: "ty \<Rightarrow> ty \<Rightarrow> ty option" where
"ty_of_div Neg Neg = Some Pos" |
"ty_of_div Pos Pos = Some Pos" |
"ty_of_div Neg Pos = Some Neg" |
"ty_of_div Pos Neg = Some Neg" |
"ty_of_div a Zero = None" |
"ty_of_div Zero a = Some Zero" |
"ty_of_div Any Pos = Some Any" |
"ty_of_div Any Neg = Some Any" |
"ty_of_div _ Any = None"
type_synonym tyenv = "vname \<Rightarrow> ty"
inductive atyping :: "tyenv \<Rightarrow> aexp \<Rightarrow> ty \<Rightarrow> bool"
("(1_/ \<turnstile>/ (_ :/ _))" [50,0,50] 50)
where
Rc_ty: "\<Gamma> \<turnstile> Rc r : ty_of_c r" |
V_ty: "\<Gamma> \<turnstile> V x : \<Gamma> x" |
Plus_ty: "\<Gamma> \<turnstile> a1 : \<tau>1 \<Longrightarrow> \<Gamma> \<turnstile> a2 : \<tau>2 \<Longrightarrow> \<Gamma> \<turnstile> Plus a1 a2 : ty_of_plus \<tau>1 \<tau>2" |
Div_ty: "\<Gamma> \<turnstile> a1 : \<tau>1 \<Longrightarrow> \<Gamma> \<turnstile> a2 : \<tau>2 \<Longrightarrow> ty_of_div \<tau>1 \<tau>2 = Some \<tau> \<Longrightarrow> \<Gamma> \<turnstile> Div a1 a2 : \<tau>"
fun values_of_type :: "ty \<Rightarrow> real set" where
"values_of_type Neg = {x. x<0}" |
"values_of_type Pos = {x. x>0}" |
"values_of_type Zero = {0}" |
"values_of_type Any = {x. x<0 \<or> x\<ge>0}"
declare atyping.intros [intro!]
inductive_cases [elim!]:
"\<Gamma> \<turnstile> V x : \<tau>" "\<Gamma> \<turnstile> Rc r : \<tau>" "\<Gamma> \<turnstile> Plus a1 a2 : \<tau>" "\<Gamma> \<turnstile> Div a1 a2 : \<tau>"
text{* Warning: the ``:'' notation leads to syntactic ambiguities,
i.e. multiple parse trees, because ``:'' also stands for set membership.
In most situations Isabelle's type system will reject all but one parse tree,
but will still inform you of the potential ambiguity. *}
inductive btyping :: "tyenv \<Rightarrow> bexp \<Rightarrow> bool" (infix "\<turnstile>" 50)
where
B_ty: "\<Gamma> \<turnstile> Bc v" |
Not_ty: "\<Gamma> \<turnstile> b \<Longrightarrow> \<Gamma> \<turnstile> Not b" |
And_ty: "\<Gamma> \<turnstile> b1 \<Longrightarrow> \<Gamma> \<turnstile> b2 \<Longrightarrow> \<Gamma> \<turnstile> And b1 b2" |
Less_ty: "\<Gamma> \<turnstile> a1 : \<tau> \<Longrightarrow> \<Gamma> \<turnstile> a2 : \<tau> \<Longrightarrow> \<Gamma> \<turnstile> Less a1 a2"
declare btyping.intros [intro!]
inductive_cases [elim!]: "\<Gamma> \<turnstile> Not b" "\<Gamma> \<turnstile> And b1 b2" "\<Gamma> \<turnstile> Less a1 a2"
inductive ctyping :: "tyenv \<Rightarrow> com \<Rightarrow> bool" (infix "\<turnstile>" 50) where
Skip_ty: "\<Gamma> \<turnstile> SKIP" |
Assign_ty: "\<Gamma> \<turnstile> a : \<Gamma>(x) \<Longrightarrow> \<Gamma> \<turnstile> x ::= a" |
Seq_ty: "\<Gamma> \<turnstile> c1 \<Longrightarrow> \<Gamma> \<turnstile> c2 \<Longrightarrow> \<Gamma> \<turnstile> c1;;c2" |
If_ty: "\<Gamma> \<turnstile> b \<Longrightarrow> \<Gamma> \<turnstile> c1 \<Longrightarrow> \<Gamma> \<turnstile> c2 \<Longrightarrow> \<Gamma> \<turnstile> IF b THEN c1 ELSE c2" |
While_ty: "\<Gamma> \<turnstile> b \<Longrightarrow> \<Gamma> \<turnstile> c \<Longrightarrow> \<Gamma> \<turnstile> WHILE b DO c"
declare ctyping.intros [intro!]
inductive_cases [elim!]:
"\<Gamma> \<turnstile> x ::= a" "\<Gamma> \<turnstile> c1;;c2"
"\<Gamma> \<turnstile> IF b THEN c1 ELSE c2"
"\<Gamma> \<turnstile> WHILE b DO c"
subsection "Well-typed Programs Do Not Get Stuck"
(*
fun type :: "val \<Rightarrow> ty" where
"type (Iv i) = Ity" |
"type (Rv r) = Rty"
lemma type_eq_Rty[simp]: "type v = Rty \<longleftrightarrow> (\<exists>r. v = Rv r)"
by (cases v) simp_all
*)
definition styping :: "tyenv \<Rightarrow> state \<Rightarrow> bool" (infix "\<turnstile>" 50)
where "\<Gamma> \<turnstile> s \<longleftrightarrow> (\<forall>x. s x \<in> values_of_type (\<Gamma> x))"
lemma apreservation:
"\<Gamma> \<turnstile> a : \<tau> \<Longrightarrow> taval a s v \<Longrightarrow> \<Gamma> \<turnstile> s \<Longrightarrow> v \<in> (values_of_type \<tau>)"
proof (induction arbitrary: v rule: atyping.induct)
print_cases
case (Rc_ty \<Gamma> r)
thus ?case using ty_of_c_def by fastforce
next
case (V_ty \<Gamma> x)
thus ?case
proof -
have "s x = v" using V_ty.prems(1) by force
thus "v \<in> values_of_type (\<Gamma> x)" using V_ty.prems(2) styping_def by blast
qed
next
case (Plus_ty \<Gamma> a1 \<tau>1 a2 \<tau>2)
thus ?case sorry
next
case (Div_ty \<Gamma> a1 \<tau>1 a2 \<tau>2 \<tau>)
thus ?case using taval.intros(4)[of a1 s r1 a2 r2] sorry
qed
(*
apply(induction arbitrary: v rule: atyping.induct)
apply (fastforce simp: styping_def)+
done*)
lemma aprogress: "\<Gamma> \<turnstile> a : \<tau> \<Longrightarrow> \<Gamma> \<turnstile> s \<Longrightarrow> \<exists>v. taval a s v"
proof(induction rule: atyping.induct)
print_cases
case (Plus_ty \<Gamma> a1 \<tau>1 a2 \<tau>2)
then obtain v1 v2 where v: "taval a1 s v1" "taval a2 s v2" by blast
show ?case using Plus_ty taval.intros by blast
qed (auto intro: taval.intros)
lemma bprogress: "\<Gamma> \<turnstile> b \<Longrightarrow> \<Gamma> \<turnstile> s \<Longrightarrow> \<exists>v. tbval b s v"
proof(induction rule: btyping.induct)
print_cases
case (Less_ty \<Gamma> a1 t a2)
then obtain v1 v2 where v: "taval a1 s v1" "taval a2 s v2"
by (metis aprogress)
show ?case using tbval.intros v(1) v(2) by blast
qed (auto intro: tbval.intros)
theorem progress:
"\<Gamma> \<turnstile> c \<Longrightarrow> \<Gamma> \<turnstile> s \<Longrightarrow> c \<noteq> SKIP \<Longrightarrow> \<exists>cs'. (c,s) \<rightarrow> cs'"
proof(induction rule: ctyping.induct)
case Skip_ty thus ?case by simp
next
case Assign_ty
thus ?case by (metis Assign aprogress)
next
case Seq_ty thus ?case by simp (metis Seq1 Seq2)
next
case (If_ty \<Gamma> b c1 c2)
then obtain bv where "tbval b s bv" by (metis bprogress)
show ?case
proof(cases bv)
assume "bv"
with `tbval b s bv` show ?case by simp (metis IfTrue)
next
assume "\<not>bv"
with `tbval b s bv` show ?case by simp (metis IfFalse)
qed
next
case While_ty show ?case by (metis While)
qed
theorem styping_preservation:
"(c,s) \<rightarrow> (c',s') \<Longrightarrow> \<Gamma> \<turnstile> c \<Longrightarrow> \<Gamma> \<turnstile> s \<Longrightarrow> \<Gamma> \<turnstile> s'"
proof(induction rule: small_step_induct)
case Assign thus ?case
by (auto simp: styping_def) (metis Assign(1,3) apreservation)
qed auto
theorem ctyping_preservation:
"(c,s) \<rightarrow> (c',s') \<Longrightarrow> \<Gamma> \<turnstile> c \<Longrightarrow> \<Gamma> \<turnstile> c'"
by (induct rule: small_step_induct) (auto simp: ctyping.intros)
abbreviation small_steps :: "com * state \<Rightarrow> com * state \<Rightarrow> bool" (infix "\<rightarrow>*" 55)
where "x \<rightarrow>* y == star small_step x y"
theorem type_sound:
"(c,s) \<rightarrow>* (c',s') \<Longrightarrow> \<Gamma> \<turnstile> c \<Longrightarrow> \<Gamma> \<turnstile> s \<Longrightarrow> c' \<noteq> SKIP
\<Longrightarrow> \<exists>cs''. (c',s') \<rightarrow> cs''"
apply(induction rule:star_induct)
apply (metis progress)
by (metis styping_preservation ctyping_preservation)
end
| {"author": "glimonta", "repo": "Semantics", "sha": "68d3cacdb2101c7e7c67fd3065266bb37db5f760", "save_path": "github-repos/isabelle/glimonta-Semantics", "path": "github-repos/isabelle/glimonta-Semantics/Semantics-68d3cacdb2101c7e7c67fd3065266bb37db5f760/Exercise7/GabrielaLimonta.thy"} |
##*
## MIT License
##
## Plotter - Copyright (c) 2020-2021 Aleksandr Kazakov, Varvara Prokacheva
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
##*
# TODO:
# Experimental
# pandas
# extend to several line at once
# ...
from .bot_engine import BotEngine
from dataclasses import dataclass, field
try:
from pygnuplot import gnuplot
from pygnuplot.gnuplot import Gnuplot
except Exception as e:
Gnuplot = object
print(f"GNUPLOT is not available: {e}")
import pandas as pd
import numpy as np
class GnuplotEngineError(Exception):
pass
@dataclass
class GnuplotEngine(BotEngine):
internal_name : str = "[GnuplotEngine]"
g : Gnuplot = None
def __post_init__(self):
self.g = gnuplot.Gnuplot(log=False, terminal = 'dumb size 79, 24 aspect 2, 1 mono')
self.g.set(
title = f"'{self.title}'",
xlabel = f"'{self.xname}'",
ylabel = f"'{self.yname}'",
)
if self.xlog: self.g.set("logscale x")
if self.ylog: self.g.set("logscale y")
# X
if self.xmin.lower() != "auto" or self.xmax.lower() != "auto":
# drop value
xmin = "" if self.xmin == "Auto" else str(self.xmin)
xmax = "" if self.xmax == "Auto" else str(self.xmax)
self.g.set(f"xrange [{str(xmin)}:{str(xmax)}")
# y
if self.ymin.lower() != "auto" or self.ymax.lower() != "auto":
# drop value
ymin = "" if self.ymin == "Auto" else str(self.ymin)
ymax = "" if self.ymax == "Auto" else str(self.ymax)
self.g.set(f"yrange [{str(ymin)}:{str(ymax)}")
def plot(self, x, y, key_name_f='', key_name="", **kwargs):
_x, _y, _xerr, _yerr = None, None, None, None,
if len(np.shape(y)) == 2: _y = y[0]; _yerr = y[1]
else: _y = y
if len(np.shape(x)) == 2: _x = x[0]; _xerr = x[1]
else: _x = x
df = pd.DataFrame(data = {
'col1': _x,
'col2': _xerr,
'col3': _y,
'col4': _yerr,
}
)
using_str = ""
if _x is not None: using_str += "1"
if _y is not None: using_str += ":3"
if _xerr is not None and _yerr is not None:
raise GnuplotEngineError(NotImplemented)
if _x is not None and _y is not None:
using_str = "1:2"
if self.plotLine: using_str += " with line"
if _x is not None and _y is not None and _yerr is not None:
using_str = "1:3:4 with yerr"
if key_name_f: key = self.name_converter(key_name_f)
else: key = key_name
self.g.plot_data(df, f"using {using_str} t '{key}' ")
| {"hexsha": "9296a83aa3f6579d28b05054f38f4a04cb9ea8d8", "size": 3798, "ext": "py", "lang": "Python", "max_stars_repo_path": "vplotter/engines/gnuplot_engine.py", "max_stars_repo_name": "AlexanderDKazakov/Plotter", "max_stars_repo_head_hexsha": "38874946c0013c30b7749d60368f2e28b6d498fb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "vplotter/engines/gnuplot_engine.py", "max_issues_repo_name": "AlexanderDKazakov/Plotter", "max_issues_repo_head_hexsha": "38874946c0013c30b7749d60368f2e28b6d498fb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-04-09T11:26:08.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-09T11:26:08.000Z", "max_forks_repo_path": "vplotter/engines/gnuplot_engine.py", "max_forks_repo_name": "AlexanderDKazakov/Plotter", "max_forks_repo_head_hexsha": "38874946c0013c30b7749d60368f2e28b6d498fb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8301886792, "max_line_length": 91, "alphanum_fraction": 0.5984728805, "include": true, "reason": "import numpy", "num_tokens": 1020} |
[STATEMENT]
lemma sorted_wrt_gen2: "sorted_wrt (<\<^sub>r\<^sub>l\<^sub>e\<^sub>x\<^sub>2) (gen2 A B m n)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sorted_wrt (<\<^sub>r\<^sub>l\<^sub>e\<^sub>x\<^sub>2) (gen2 A B m n)
[PROOF STEP]
by (intro sorted_wrt_concat_map_map [where Q = "(<\<^sub>r\<^sub>l\<^sub>e\<^sub>x)"] sorted_wrt_gen)
(auto simp: set_gen rlex_def intro: lex_append_leftI lex_append_rightI) | {"llama_tokens": 191, "file": "Diophantine_Eqns_Lin_Hom_Simple_Algorithm", "length": 1} |
using KernelMachines
using Test
using FiniteDiff: finite_difference_gradient
using Zygote: gradient
@testset "utils" begin
s = rand(10, 3)
slices = KernelMachines.split_matrix(s, (2, 3, 5))
@test length(slices) == 3
@test slices[1] == s[1:2, :]
@test slices[2] == s[3:5, :]
@test slices[3] == s[6:10, :]
end
@testset "kernelmachine" begin
dims = (2, 3, 2)
data = rand(5, 10)
input = rand(5, 50)
dm = KernelMachine(data; dims=dims)
g_auto = gradient(input) do input
r, n = dm(input)
return sum(r) + n
end
g_num = finite_difference_gradient(input) do input
r, n = dm(input)
return sum(r) + n
end
@test isapprox(first(g_auto), g_num)
end | {"hexsha": "af3b3d2a86d8bc0bfcac4b89a9e4dc5fde3fd359", "size": 728, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "Veos-Digital/KernelMachines.jl", "max_stars_repo_head_hexsha": "fcd74ecd11e585adf168d78c62a99e75f9f0dec1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "Veos-Digital/KernelMachines.jl", "max_issues_repo_head_hexsha": "fcd74ecd11e585adf168d78c62a99e75f9f0dec1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "Veos-Digital/KernelMachines.jl", "max_forks_repo_head_hexsha": "fcd74ecd11e585adf168d78c62a99e75f9f0dec1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.1034482759, "max_line_length": 54, "alphanum_fraction": 0.6043956044, "num_tokens": 241} |
#include "storage/storage.hpp"
#include "storage/io.hpp"
#include "storage/shared_datatype.hpp"
#include "storage/shared_memory.hpp"
#include "storage/shared_memory_ownership.hpp"
#include "storage/shared_monitor.hpp"
#include "storage/view_factory.hpp"
#include "contractor/files.hpp"
#include "customizer/files.hpp"
#include "extractor/files.hpp"
#include "guidance/files.hpp"
#include "partitioner/files.hpp"
#include "util/exception.hpp"
#include "util/exception_utils.hpp"
#include "util/fingerprint.hpp"
#include "util/log.hpp"
#ifdef __linux__
#include <sys/mman.h>
#endif
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/filesystem/path.hpp>
#include <boost/interprocess/sync/file_lock.hpp>
#include <boost/interprocess/sync/scoped_lock.hpp>
#include <cstdint>
#include <fstream>
#include <iostream>
#include <iterator>
#include <new>
#include <string>
namespace osrm
{
namespace storage
{
namespace
{
using Monitor = SharedMonitor<SharedRegionRegister>;
struct RegionHandle
{
std::unique_ptr<SharedMemory> memory;
char *data_ptr;
std::uint16_t shm_key;
};
RegionHandle setupRegion(SharedRegionRegister &shared_register,
const storage::BaseDataLayout &layout)
{
// This is safe because we have an exclusive lock for all osrm-datastore processes.
auto shm_key = shared_register.ReserveKey();
// ensure that the shared memory region we want to write to is really removed
// this is only needef for failure recovery because we actually wait for all clients
// to detach at the end of the function
if (storage::SharedMemory::RegionExists(shm_key))
{
util::Log(logWARNING) << "Old shared memory region " << (int)shm_key << " still exists.";
util::UnbufferedLog() << "Retrying removal... ";
storage::SharedMemory::Remove(shm_key);
util::UnbufferedLog() << "ok.";
}
io::BufferWriter writer;
serialization::write(writer, layout);
auto encoded_static_layout = writer.GetBuffer();
// Allocate shared memory block
auto regions_size = encoded_static_layout.size() + layout.GetSizeOfLayout();
util::Log() << "Data layout has a size of " << encoded_static_layout.size() << " bytes";
util::Log() << "Allocating shared memory of " << regions_size << " bytes";
auto memory = makeSharedMemory(shm_key, regions_size);
// Copy memory static_layout to shared memory and populate data
char *shared_memory_ptr = static_cast<char *>(memory->Ptr());
auto data_ptr =
std::copy_n(encoded_static_layout.data(), encoded_static_layout.size(), shared_memory_ptr);
return RegionHandle{std::move(memory), data_ptr, shm_key};
}
bool swapData(Monitor &monitor,
SharedRegionRegister &shared_register,
const std::map<std::string, RegionHandle> &handles,
int max_wait)
{
std::vector<RegionHandle> old_handles;
{ // Lock for write access shared region mutex
boost::interprocess::scoped_lock<Monitor::mutex_type> lock(monitor.get_mutex(),
boost::interprocess::defer_lock);
if (max_wait >= 0)
{
if (!lock.timed_lock(boost::posix_time::microsec_clock::universal_time() +
boost::posix_time::seconds(max_wait)))
{
util::Log(logERROR) << "Could not aquire current region lock after " << max_wait
<< " seconds. Data update failed.";
for (auto &pair : handles)
{
SharedMemory::Remove(pair.second.shm_key);
}
return false;
}
}
else
{
lock.lock();
}
for (auto &pair : handles)
{
auto region_id = shared_register.Find(pair.first);
if (region_id == SharedRegionRegister::INVALID_REGION_ID)
{
region_id = shared_register.Register(pair.first, pair.second.shm_key);
}
else
{
auto &shared_region = shared_register.GetRegion(region_id);
old_handles.push_back(RegionHandle{
makeSharedMemory(shared_region.shm_key), nullptr, shared_region.shm_key});
shared_region.shm_key = pair.second.shm_key;
shared_region.timestamp++;
}
}
}
util::Log() << "All data loaded. Notify all client about new data in:";
for (const auto &pair : handles)
{
util::Log() << pair.first << "\t" << static_cast<int>(pair.second.shm_key);
}
monitor.notify_all();
for (auto &old_handle : old_handles)
{
util::UnbufferedLog() << "Marking old shared memory region "
<< static_cast<int>(old_handle.shm_key) << " for removal... ";
// SHMCTL(2): Mark the segment to be destroyed. The segment will actually be destroyed
// only after the last process detaches it.
storage::SharedMemory::Remove(old_handle.shm_key);
util::UnbufferedLog() << "ok.";
util::UnbufferedLog() << "Waiting for clients to detach... ";
old_handle.memory->WaitForDetach();
util::UnbufferedLog() << " ok.";
shared_register.ReleaseKey(old_handle.shm_key);
}
util::Log() << "All clients switched.";
return true;
}
} // namespace
void populateLayoutFromFile(const boost::filesystem::path &path, storage::BaseDataLayout &layout)
{
tar::FileReader reader(path, tar::FileReader::VerifyFingerprint);
std::vector<tar::FileReader::FileEntry> entries;
reader.List(std::back_inserter(entries));
for (const auto &entry : entries)
{
const auto name_end = entry.name.rfind(".meta");
if (name_end == std::string::npos)
{
auto number_of_elements = reader.ReadElementCount64(entry.name);
layout.SetBlock(entry.name, Block{number_of_elements, entry.size, entry.offset});
}
}
}
Storage::Storage(StorageConfig config_) : config(std::move(config_)) {}
int Storage::Run(int max_wait, const std::string &dataset_name, bool only_metric)
{
BOOST_ASSERT_MSG(config.IsValid(), "Invalid storage config");
util::LogPolicy::GetInstance().Unmute();
boost::filesystem::path lock_path =
boost::filesystem::temp_directory_path() / "osrm-datastore.lock";
if (!boost::filesystem::exists(lock_path))
{
boost::filesystem::ofstream ofs(lock_path);
}
boost::interprocess::file_lock file_lock(lock_path.string().c_str());
boost::interprocess::scoped_lock<boost::interprocess::file_lock> datastore_lock(
file_lock, boost::interprocess::defer_lock);
if (!datastore_lock.try_lock())
{
util::UnbufferedLog(logWARNING) << "Data update in progress, waiting until it finishes... ";
datastore_lock.lock();
util::UnbufferedLog(logWARNING) << "ok.";
}
#ifdef __linux__
// try to disable swapping on Linux
const bool lock_flags = MCL_CURRENT | MCL_FUTURE;
if (-1 == mlockall(lock_flags))
{
util::Log(logWARNING) << "Could not request RAM lock";
}
#endif
// Get the next region ID and time stamp without locking shared barriers.
// Because of datastore_lock the only write operation can occur sequentially later.
Monitor monitor(SharedRegionRegister{});
auto &shared_register = monitor.data();
// Populate a memory layout into stack memory
std::vector<SharedDataIndex::AllocatedRegion> regions;
std::map<std::string, RegionHandle> handles;
// We keep this handles to read-only regions
// that we don't update to be able to cross-validate
// data when loading it
std::vector<RegionHandle> readonly_handles;
if (only_metric)
{
auto region_id = shared_register.Find(dataset_name + "/static");
if (region_id == storage::SharedRegionRegister::INVALID_REGION_ID)
{
throw util::exception("Cannot update the metric to a dataset that does not exist yet.");
}
auto static_region = shared_register.GetRegion(region_id);
auto static_memory = makeSharedMemory(static_region.shm_key);
std::unique_ptr<storage::BaseDataLayout> static_layout =
std::make_unique<storage::ContiguousDataLayout>();
io::BufferReader reader(reinterpret_cast<char *>(static_memory->Ptr()),
static_memory->Size());
serialization::read(reader, *static_layout);
auto layout_size = reader.GetPosition();
auto *data_ptr = reinterpret_cast<char *>(static_memory->Ptr()) + layout_size;
regions.push_back({data_ptr, std::move(static_layout)});
readonly_handles.push_back({std::move(static_memory), data_ptr, static_region.shm_key});
}
else
{
std::unique_ptr<storage::BaseDataLayout> static_layout =
std::make_unique<storage::ContiguousDataLayout>();
Storage::PopulateLayoutWithRTree(*static_layout);
std::vector<std::pair<bool, boost::filesystem::path>> files = Storage::GetStaticFiles();
Storage::PopulateLayout(*static_layout, files);
auto static_handle = setupRegion(shared_register, *static_layout);
regions.push_back({static_handle.data_ptr, std::move(static_layout)});
handles[dataset_name + "/static"] = std::move(static_handle);
}
std::unique_ptr<storage::BaseDataLayout> updatable_layout =
std::make_unique<storage::ContiguousDataLayout>();
std::vector<std::pair<bool, boost::filesystem::path>> files = Storage::GetUpdatableFiles();
Storage::PopulateLayout(*updatable_layout, files);
auto updatable_handle = setupRegion(shared_register, *updatable_layout);
regions.push_back({updatable_handle.data_ptr, std::move(updatable_layout)});
handles[dataset_name + "/updatable"] = std::move(updatable_handle);
SharedDataIndex index{std::move(regions)};
if (!only_metric)
{
PopulateStaticData(index);
}
PopulateUpdatableData(index);
swapData(monitor, shared_register, handles, max_wait);
return EXIT_SUCCESS;
}
std::vector<std::pair<bool, boost::filesystem::path>> Storage::GetStaticFiles()
{
constexpr bool REQUIRED = true;
constexpr bool OPTIONAL = false;
std::vector<std::pair<bool, boost::filesystem::path>> files = {
{OPTIONAL, config.GetPath(".osrm.cells")},
{OPTIONAL, config.GetPath(".osrm.partition")},
{REQUIRED, config.GetPath(".osrm.icd")},
{REQUIRED, config.GetPath(".osrm.properties")},
{REQUIRED, config.GetPath(".osrm.nbg_nodes")},
{REQUIRED, config.GetPath(".osrm.ebg_nodes")},
{REQUIRED, config.GetPath(".osrm.tls")},
{REQUIRED, config.GetPath(".osrm.tld")},
{REQUIRED, config.GetPath(".osrm.timestamp")},
{REQUIRED, config.GetPath(".osrm.maneuver_overrides")},
{REQUIRED, config.GetPath(".osrm.edges")},
{REQUIRED, config.GetPath(".osrm.names")},
{REQUIRED, config.GetPath(".osrm.ramIndex")}};
for (const auto &file : files)
{
if (file.first == REQUIRED && !boost::filesystem::exists(file.second))
{
throw util::exception("Could not find required filed: " + std::get<1>(file).string());
}
}
return files;
}
std::vector<std::pair<bool, boost::filesystem::path>> Storage::GetUpdatableFiles()
{
constexpr bool REQUIRED = true;
constexpr bool OPTIONAL = false;
std::vector<std::pair<bool, boost::filesystem::path>> files = {
{OPTIONAL, config.GetPath(".osrm.mldgr")},
{OPTIONAL, config.GetPath(".osrm.cell_metrics")},
{OPTIONAL, config.GetPath(".osrm.hsgr")},
{REQUIRED, config.GetPath(".osrm.datasource_names")},
{REQUIRED, config.GetPath(".osrm.geometry")},
{REQUIRED, config.GetPath(".osrm.turn_weight_penalties")},
{REQUIRED, config.GetPath(".osrm.turn_duration_penalties")}};
for (const auto &file : files)
{
if (file.first == REQUIRED && !boost::filesystem::exists(file.second))
{
throw util::exception("Could not find required filed: " + std::get<1>(file).string());
}
}
return files;
}
std::string Storage::PopulateLayoutWithRTree(storage::BaseDataLayout &layout)
{
// Figure out the path to the rtree file (it's not a tar file)
auto absolute_file_index_path = boost::filesystem::absolute(config.GetPath(".osrm.fileIndex"));
// Convert the boost::filesystem::path object into a plain string
// that can then be stored as a member of an allocator object
auto rtree_filename = absolute_file_index_path.string();
// Here, we hardcode the special file_index_path block name.
// The important bit here is that the "offset" is set to zero
layout.SetBlock("/common/rtree/file_index_path", make_block<char>(rtree_filename.length() + 1));
return rtree_filename;
}
/**
* This function examines all our data files and figures out how much
* memory needs to be allocated, and the position of each data structure
* in that big block. It updates the fields in the layout parameter.
*/
void Storage::PopulateLayout(storage::BaseDataLayout &layout,
const std::vector<std::pair<bool, boost::filesystem::path>> &files)
{
for (const auto &file : files)
{
if (boost::filesystem::exists(file.second))
{
populateLayoutFromFile(file.second, layout);
}
}
}
void Storage::PopulateStaticData(const SharedDataIndex &index)
{
// read actual data into shared memory object //
// store the filename of the on-disk portion of the RTree
{
const auto file_index_path_ptr = index.GetBlockPtr<char>("/common/rtree/file_index_path");
// make sure we have 0 ending
std::fill(file_index_path_ptr,
file_index_path_ptr + index.GetBlockSize("/common/rtree/file_index_path"),
0);
const auto absolute_file_index_path =
boost::filesystem::absolute(config.GetPath(".osrm.fileIndex")).string();
BOOST_ASSERT(static_cast<std::size_t>(index.GetBlockSize(
"/common/rtree/file_index_path")) >= absolute_file_index_path.size());
std::copy(
absolute_file_index_path.begin(), absolute_file_index_path.end(), file_index_path_ptr);
}
// Name data
{
auto name_table = make_name_table_view(index, "/common/names");
extractor::files::readNames(config.GetPath(".osrm.names"), name_table);
}
// Timestamp mark
{
auto timestamp_ref = make_timestamp_view(index, "/common/timestamp");
std::string ts;
extractor::files::readTimestamp(config.GetPath(".osrm.timestamp"), ts);
if (!ts.empty())
{
memcpy(const_cast<char *>(timestamp_ref.data()), ts.data(), ts.size());
}
}
// Turn lane data
{
auto turn_lane_data = make_lane_data_view(index, "/common/turn_lanes");
extractor::files::readTurnLaneData(config.GetPath(".osrm.tld"), turn_lane_data);
}
// Turn lane descriptions
{
auto views = make_turn_lane_description_views(index, "/common/turn_lanes");
extractor::files::readTurnLaneDescriptions(
config.GetPath(".osrm.tls"), std::get<0>(views), std::get<1>(views));
}
// Load edge-based nodes data
{
auto node_data = make_ebn_data_view(index, "/common/ebg_node_data");
extractor::files::readNodeData(config.GetPath(".osrm.ebg_nodes"), node_data);
}
// Load original edge data
{
auto turn_data = make_turn_data_view(index, "/common/turn_data");
auto connectivity_checksum_ptr =
index.GetBlockPtr<std::uint32_t>("/common/connectivity_checksum");
guidance::files::readTurnData(
config.GetPath(".osrm.edges"), turn_data, *connectivity_checksum_ptr);
}
// Loading list of coordinates
{
auto views = make_nbn_data_view(index, "/common/nbn_data");
extractor::files::readNodes(
config.GetPath(".osrm.nbg_nodes"), std::get<0>(views), std::get<1>(views));
}
// store search tree portion of rtree
{
auto rtree = make_search_tree_view(index, "/common/rtree");
extractor::files::readRamIndex(config.GetPath(".osrm.ramIndex"), rtree);
}
// FIXME we only need to get the weight name
std::string metric_name;
// load profile properties
{
const auto profile_properties_ptr =
index.GetBlockPtr<extractor::ProfileProperties>("/common/properties");
extractor::files::readProfileProperties(config.GetPath(".osrm.properties"),
*profile_properties_ptr);
metric_name = profile_properties_ptr->GetWeightName();
}
// Load intersection data
{
auto intersection_bearings_view =
make_intersection_bearings_view(index, "/common/intersection_bearings");
auto entry_classes = make_entry_classes_view(index, "/common/entry_classes");
extractor::files::readIntersections(
config.GetPath(".osrm.icd"), intersection_bearings_view, entry_classes);
}
if (boost::filesystem::exists(config.GetPath(".osrm.partition")))
{
auto mlp = make_partition_view(index, "/mld/multilevelpartition");
partitioner::files::readPartition(config.GetPath(".osrm.partition"), mlp);
}
if (boost::filesystem::exists(config.GetPath(".osrm.cells")))
{
auto storage = make_cell_storage_view(index, "/mld/cellstorage");
partitioner::files::readCells(config.GetPath(".osrm.cells"), storage);
}
if (boost::filesystem::exists(config.GetPath(".osrm.cell_metrics")))
{
auto exclude_metrics = make_cell_metric_view(index, "/mld/metrics/" + metric_name);
std::unordered_map<std::string, std::vector<customizer::CellMetricView>> metrics = {
{metric_name, std::move(exclude_metrics)},
};
customizer::files::readCellMetrics(config.GetPath(".osrm.cell_metrics"), metrics);
}
// load maneuver overrides
{
auto views = make_maneuver_overrides_views(index, "/common/maneuver_overrides");
extractor::files::readManeuverOverrides(
config.GetPath(".osrm.maneuver_overrides"), std::get<0>(views), std::get<1>(views));
}
}
void Storage::PopulateUpdatableData(const SharedDataIndex &index)
{
// load compressed geometry
{
auto segment_data = make_segment_data_view(index, "/common/segment_data");
extractor::files::readSegmentData(config.GetPath(".osrm.geometry"), segment_data);
}
{
const auto datasources_names_ptr =
index.GetBlockPtr<extractor::Datasources>("/common/data_sources_names");
extractor::files::readDatasources(config.GetPath(".osrm.datasource_names"),
*datasources_names_ptr);
}
// load turn weight penalties
{
auto turn_duration_penalties = make_turn_weight_view(index, "/common/turn_penalty");
extractor::files::readTurnWeightPenalty(config.GetPath(".osrm.turn_weight_penalties"),
turn_duration_penalties);
}
// load turn duration penalties
{
auto turn_duration_penalties = make_turn_duration_view(index, "/common/turn_penalty");
extractor::files::readTurnDurationPenalty(config.GetPath(".osrm.turn_duration_penalties"),
turn_duration_penalties);
}
// FIXME we only need to get the weight name
std::string metric_name;
// load profile properties
{
extractor::ProfileProperties properties;
extractor::files::readProfileProperties(config.GetPath(".osrm.properties"), properties);
metric_name = properties.GetWeightName();
}
if (boost::filesystem::exists(config.GetPath(".osrm.hsgr")))
{
const std::string metric_prefix = "/ch/metrics/" + metric_name;
auto contracted_metric = make_contracted_metric_view(index, metric_prefix);
std::unordered_map<std::string, contractor::ContractedMetricView> metrics = {
{metric_name, std::move(contracted_metric)}};
std::uint32_t graph_connectivity_checksum = 0;
contractor::files::readGraph(
config.GetPath(".osrm.hsgr"), metrics, graph_connectivity_checksum);
auto turns_connectivity_checksum =
*index.GetBlockPtr<std::uint32_t>("/common/connectivity_checksum");
if (turns_connectivity_checksum != graph_connectivity_checksum)
{
throw util::exception(
"Connectivity checksum " + std::to_string(graph_connectivity_checksum) + " in " +
config.GetPath(".osrm.hsgr").string() + " does not equal to checksum " +
std::to_string(turns_connectivity_checksum) + " in " +
config.GetPath(".osrm.edges").string());
}
}
if (boost::filesystem::exists(config.GetPath(".osrm.cell_metrics")))
{
auto exclude_metrics = make_cell_metric_view(index, "/mld/metrics/" + metric_name);
std::unordered_map<std::string, std::vector<customizer::CellMetricView>> metrics = {
{metric_name, std::move(exclude_metrics)},
};
customizer::files::readCellMetrics(config.GetPath(".osrm.cell_metrics"), metrics);
}
if (boost::filesystem::exists(config.GetPath(".osrm.mldgr")))
{
auto graph_view = make_multi_level_graph_view(index, "/mld/multilevelgraph");
std::uint32_t graph_connectivity_checksum = 0;
customizer::files::readGraph(
config.GetPath(".osrm.mldgr"), graph_view, graph_connectivity_checksum);
auto turns_connectivity_checksum =
*index.GetBlockPtr<std::uint32_t>("/common/connectivity_checksum");
if (turns_connectivity_checksum != graph_connectivity_checksum)
{
throw util::exception(
"Connectivity checksum " + std::to_string(graph_connectivity_checksum) + " in " +
config.GetPath(".osrm.hsgr").string() + " does not equal to checksum " +
std::to_string(turns_connectivity_checksum) + " in " +
config.GetPath(".osrm.edges").string());
}
}
}
} // namespace storage
} // namespace osrm
| {"hexsha": "9185d99c0bca88f56b6f78eb9c3d7739e015ee45", "size": 22617, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/storage/storage.cpp", "max_stars_repo_name": "OgreTransporter/osrm-backend", "max_stars_repo_head_hexsha": "35ff807e1d782b71200a1b1b4d9f781fd8352072", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-06-29T10:06:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-29T10:06:35.000Z", "max_issues_repo_path": "src/storage/storage.cpp", "max_issues_repo_name": "OgreTransporter/osrm-backend", "max_issues_repo_head_hexsha": "35ff807e1d782b71200a1b1b4d9f781fd8352072", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/storage/storage.cpp", "max_forks_repo_name": "OgreTransporter/osrm-backend", "max_forks_repo_head_hexsha": "35ff807e1d782b71200a1b1b4d9f781fd8352072", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6322795341, "max_line_length": 100, "alphanum_fraction": 0.6470354158, "num_tokens": 5002} |
function varargout = grDilate(varargin)
%GRDILATE Morphological dilation on graph.
%
% LBL2 = grDilate(EDGES, LBL1)
% Each label of the graph is assigned the highest label of its
% neighbours, or it keeps the same label this one is bigger.
%
% Example
% grDilate
%
% See also
% grErode, grOpen, grClose
%
% ------
% Author: David Legland
% E-mail: [email protected]
% Created: 2006-01-20
% Copyright 2006-2022 INRA - CEPIA Nantes - MIAJ (Jouy-en-Josas)
if length(varargin) == 2
edges = varargin{1};
lbl = varargin{2};
elseif length(varargin) == 3
edges = varargin{2};
lbl = varargin{3};
else
error('Wrong number of arguments in "grDilate"');
end
lbl2 = zeros(size(lbl));
uni = unique(edges(:));
for n = 1:length(uni)
neigh = grAdjacentNodes(edges, uni(n));
lbl2(uni(n)) = max(lbl([uni(n); neigh]));
end
varargout{1} = lbl2;
| {"author": "mattools", "repo": "matGeom", "sha": "1fd2c937064be1ee1f4fd09fbfdf96145ebe5271", "save_path": "github-repos/MATLAB/mattools-matGeom", "path": "github-repos/MATLAB/mattools-matGeom/matGeom-1fd2c937064be1ee1f4fd09fbfdf96145ebe5271/matGeom/graphs/grDilate.m"} |
# Taken from https://github.com/psclklnk/spdl and wrapped to our architecture
# Modified by Clément Romac, copy of the license at TeachMyAgent/teachers/LICENSES/SPDL
import torch
import numpy as np
from copy import deepcopy
from functools import partial
from TeachMyAgent.teachers.algos.AbstractTeacher import AbstractTeacher
from TeachMyAgent.teachers.utils.conjugate_gradient import cg_step
from TeachMyAgent.teachers.utils.torch import to_float_tensor
from TeachMyAgent.teachers.utils.gaussian_torch_distribution import GaussianTorchDistribution
class Buffer:
def __init__(self, n_elements, max_buffer_size, reset_on_query):
self.reset_on_query = reset_on_query
self.max_buffer_size = max_buffer_size
self.buffers = [list() for i in range(0, n_elements)]
def update_buffer(self, datas):
if isinstance(datas[0], list):
for buffer, data in zip(self.buffers, datas):
buffer.extend(data)
else:
for buffer, data in zip(self.buffers, datas):
buffer.append(data)
while len(self.buffers[0]) > self.max_buffer_size:
for buffer in self.buffers:
del buffer[0]
def read_buffer(self, reset=None):
if reset is None:
reset = self.reset_on_query
res = tuple([buffer for buffer in self.buffers])
if reset:
for i in range(0, len(self.buffers)):
self.buffers[i] = []
return res
def __len__(self):
return len(self.buffers[0])
class AbstractSelfPacedTeacher():
'''
Base SPDL Teacher
'''
def __init__(self, init_mean, flat_init_chol, target_mean, flat_target_chol, alpha_function, max_kl, cg_parameters):
self.context_dist = GaussianTorchDistribution(init_mean, flat_init_chol, use_cuda=False)
self.target_dist = GaussianTorchDistribution(target_mean, flat_target_chol, use_cuda=False)
self.alpha_function = alpha_function
self.max_kl = max_kl
self.cg_parameters = {"n_epochs_line_search": 10, "n_epochs_cg": 10, "cg_damping": 1e-2,
"cg_residual_tol": 1e-10}
if cg_parameters is not None:
self.cg_parameters.update(cg_parameters)
self.task = None
self.iteration = 0
def target_context_kl(self, numpy=True):
kl_div = torch.distributions.kl.kl_divergence(self.context_dist.distribution_t,
self.target_dist.distribution_t).detach()
if numpy:
kl_div = kl_div.numpy()
return kl_div
def save(self, path):
weights = self.context_dist.get_weights()
np.save(path, weights)
def load(self, path):
self.context_dist.set_weights(np.load(path))
def _compute_context_kl(self, old_context_dist):
return torch.distributions.kl.kl_divergence(old_context_dist.distribution_t, self.context_dist.distribution_t)
def _compute_context_loss(self, cons_t, old_c_log_prob_t, c_val_t, alpha_cur_t):
con_ratio_t = torch.exp(self.context_dist.log_pdf_t(cons_t) - old_c_log_prob_t)
kl_div = torch.distributions.kl.kl_divergence(self.context_dist.distribution_t, self.target_dist.distribution_t)
return torch.mean(con_ratio_t * c_val_t) - alpha_cur_t * kl_div
class SelfPacedTeacher(AbstractTeacher, AbstractSelfPacedTeacher):
def __init__(self, mins, maxs, seed, env_reward_lb, env_reward_ub, update_frequency, update_offset, alpha_function, initial_dist=None,
target_dist=None, max_kl=0.1, std_lower_bound=None, kl_threshold=None, cg_parameters=None,
use_avg_performance=False, max_context_buffer_size=1000, reset_contexts=True, discount_factor=0.99):
'''
Self-paced Deep Reinforcement Learning (https://papers.nips.cc/paper/2020/hash/68a9750337a418a86fe06c1991a1d64c-Abstract.html).
Taken from https://github.com/psclklnk/spdl and wrapped to our architecture.
Works in a non-episodic setup, updates are thus made in the `step_update` method.
Args:
update_frequency: Update frequency of the sampling distribution (in steps)
update_offset: How many steps must be done before the starting to update the distribution
alpha_function: Function calculating the alpha parameter
initial_dist: Initial distribution to start from
target_dist: Target distribution to reach
max_kl: Maximum KL-divergence authorized between the old and new distributions when updating
std_lower_bound: Minimum std authorized on the sampling distribution if the KL-divergence between
the latter and the target distribution is greater than `kl_threshold`. Set this to
`None` if no constraint on the std must be applied
kl_threshold: Threshold enforcing the std constraint
cg_parameters: Additional parameters for the Conjugate Gradient method
use_avg_performance: Whether the alpha function must used the averaged performance
max_context_buffer_size: Maximum size of the buffer storing sampled tasks
reset_contexts: Whether the buffer should be reset when queried
discount_factor: Discount factor used in the Universal Value Function
'''
AbstractTeacher.__init__(self, mins, maxs, env_reward_lb, env_reward_ub, seed)
torch.manual_seed(self.seed)
initial_mean, initial_variance = self.get_or_create_dist(initial_dist, mins, maxs, subspace=True) # Random subspace of the task space if no intial dist
target_mean, target_variance = self.get_or_create_dist(target_dist, mins, maxs, subspace=False) # Full task space if no intial dist
context_bounds = (np.array(mins), np.array(maxs))
self.update_frequency = update_frequency
self.update_offset = update_offset
self.step_counter = 0
self.discounted_sum_reward = 0
self.discount_factor = discount_factor
self.discounted_sum_rewards = []
self.current_disc = 1
self.pending_initial_state = None
self.algorithm_iterations = 0
# The bounds that we show to the outside are limited to the interval [-1, 1], as this is typically better for
# neural nets to deal with
self.context_buffer = Buffer(2, max_context_buffer_size, reset_contexts)
self.context_dim = target_mean.shape[0]
self.context_bounds = context_bounds
self.use_avg_performance = use_avg_performance
if std_lower_bound is not None and kl_threshold is None:
raise RuntimeError("Error! Both Lower Bound on standard deviation and kl threshold need to be set")
else:
if std_lower_bound is not None:
if isinstance(std_lower_bound, np.ndarray):
if std_lower_bound.shape[0] != self.context_dim:
raise RuntimeError("Error! Wrong dimension of the standard deviation lower bound")
elif std_lower_bound is not None:
std_lower_bound = np.ones(self.context_dim) * std_lower_bound
self.std_lower_bound = std_lower_bound
self.kl_threshold = kl_threshold
# Create the initial context distribution
if isinstance(initial_variance, np.ndarray):
flat_init_chol = GaussianTorchDistribution.flatten_matrix(initial_variance, tril=False)
else:
flat_init_chol = GaussianTorchDistribution.flatten_matrix(initial_variance * np.eye(self.context_dim),
tril=False)
# Create the target distribution
if isinstance(target_variance, np.ndarray):
flat_target_chol = GaussianTorchDistribution.flatten_matrix(target_variance, tril=False)
else:
flat_target_chol = GaussianTorchDistribution.flatten_matrix(target_variance * np.eye(self.context_dim),
tril=False)
AbstractSelfPacedTeacher.__init__(self, initial_mean, flat_init_chol, target_mean, flat_target_chol,
alpha_function, max_kl, cg_parameters)
self.bk = {'mean': [],
'covariance': [],
'steps': [],
'algo_iterations': [],
'kl': []}
def record_initial_state(self, task, initial_state):
self.pending_initial_state = initial_state
def episodic_update(self, task, reward, is_success):
assert self.pending_initial_state is not None
self.discounted_sum_rewards.append(self.discounted_sum_reward)
self.context_buffer.update_buffer((self.pending_initial_state, task))
self.discounted_sum_reward = 0
self.current_disc = 1
self.pending_initial_state = None
def step_update(self, state, action, reward, next_state, done):
self.step_counter += 1
self.discounted_sum_reward += self.current_disc * reward
self.current_disc *= self.discount_factor
if self.step_counter >= self.update_offset and self.step_counter % self.update_frequency == 0:
if len(self.discounted_sum_rewards) > 0 and len(self.context_buffer) > 0:
self.algorithm_iterations += 1
avg_performance = np.mean(self.discounted_sum_rewards)
self.discounted_sum_rewards = []
ins, cons = self.context_buffer.read_buffer()
initial_states, contexts = np.array(ins), np.array(cons)
values = self.value_estimator(initial_states)
if values is None:
raise Exception("Please define a valid value estimator, this one returns None...")
old_context_dist = deepcopy(self.context_dist)
contexts_t = to_float_tensor(contexts, use_cuda=False)
old_c_log_prob_t = old_context_dist.log_pdf_t(contexts_t).detach()
# Estimate the value of the state after the policy update
c_val_t = to_float_tensor(values, use_cuda=False)
# Add the penalty term
cur_kl_t = self.target_context_kl(numpy=False)
if self.use_avg_performance:
alpha_cur_t = self.alpha_function(self.algorithm_iterations, avg_performance, cur_kl_t)
else:
alpha_cur_t = self.alpha_function(self.algorithm_iterations, torch.mean(c_val_t).detach(), cur_kl_t)
cg_step(partial(self._compute_context_loss, contexts_t, old_c_log_prob_t, c_val_t, alpha_cur_t),
partial(self._compute_context_kl, old_context_dist), self.max_kl,
self.context_dist.parameters, self.context_dist.set_weights,
self.context_dist.get_weights, **self.cg_parameters, use_cuda=False)
cov = self.context_dist._chol_flat.detach().numpy()
if self.std_lower_bound is not None and self.target_context_kl() > self.kl_threshold:
cov[0:self.context_dim] = np.log(np.maximum(np.exp(cov[0:self.context_dim]), self.std_lower_bound))
self.context_dist.set_weights(np.concatenate((self.context_dist.mean(), cov)))
self.bk["mean"].append(self.context_dist.mean())
self.bk["covariance"].append(self.context_dist.covariance_matrix())
self.bk["steps"].append(self.step_counter)
self.bk["algo_iterations"].append(self.algorithm_iterations)
self.bk["kl"].append(self.target_context_kl())
else:
print("Skipping iteration at step {} because buffers are empty.".format(self.step_counter))
def sample_task(self):
sample = self.context_dist.sample().detach().numpy()
return np.clip(sample, self.context_bounds[0], self.context_bounds[1], dtype=np.float32)
def non_exploratory_task_sampling(self):
return {"task": self.sample_task(),
"infos": {
"bk_index": len(self.bk[list(self.bk.keys())[0]]) - 1,
"task_infos": None}
}
| {"hexsha": "b8b1742211018e8e348141a3a38b90b6ba5d13e4", "size": 12434, "ext": "py", "lang": "Python", "max_stars_repo_path": "TeachMyAgent/teachers/algos/self_paced_teacher.py", "max_stars_repo_name": "flowersteam/TeachMyAgent", "max_stars_repo_head_hexsha": "a8f71cbfce4cb8ca6da24d00ea690495e3afbd2e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 45, "max_stars_repo_stars_event_min_datetime": "2021-03-19T00:16:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T14:02:18.000Z", "max_issues_repo_path": "TeachMyAgent/teachers/algos/self_paced_teacher.py", "max_issues_repo_name": "flowersteam/TeachMyAgent", "max_issues_repo_head_hexsha": "a8f71cbfce4cb8ca6da24d00ea690495e3afbd2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-04-26T06:21:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-24T02:57:02.000Z", "max_forks_repo_path": "TeachMyAgent/teachers/algos/self_paced_teacher.py", "max_forks_repo_name": "flowersteam/TeachMyAgent", "max_forks_repo_head_hexsha": "a8f71cbfce4cb8ca6da24d00ea690495e3afbd2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-03-23T20:21:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T14:55:11.000Z", "avg_line_length": 50.3400809717, "max_line_length": 159, "alphanum_fraction": 0.6547370114, "include": true, "reason": "import numpy", "num_tokens": 2541} |
# importing modules
import cv2 as cv
import numpy as np
import AiPhile
import time
# point seletctor function, which let's select the point, through mouse
def selectPoint(event, x, y, flags, params):
global point, condition, old_points
if event == cv.EVENT_LBUTTONDOWN:
point = (int(x), int(y))
# print(point)
condition = True
old_points = np.array([[x, y]], dtype=np.float32)
cv.namedWindow('frame')
cv.setMouseCallback("frame", selectPoint)
cap = cv.VideoCapture(1)
condition = False
point = ()
old_points = np.array([[]])
frame_counter = 0
starting_time = time.time()
while True:
frame_counter +=1
ret, frame = cap.read()
gray_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
#print(old_points.astype(int))
if condition is True:
cv.circle(frame, point, 5, (155, 0, 255), -1)
# calculating frame of Video
fps = frame_counter/(time.time()-starting_time)
AiPhile.textBGoutline(frame, f'FPS: {round(fps,1)}', (30, 40))
cv.imshow('frame', frame)
key = cv.waitKey(1)
if key == ord('q'):
break
cv.destroyAllWindows()
cap.release() | {"hexsha": "601f48cee85b6bc8767b1d0a6b06749f24f63155", "size": 1146, "ext": "py", "lang": "Python", "max_stars_repo_path": "Optical Flow Basics/main.py", "max_stars_repo_name": "Asadullah-Dal17/Qr-code-detection-distance-estimation", "max_stars_repo_head_hexsha": "962e48c9524e5fa9be8948fdeb16eedafb7adf30", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Optical Flow Basics/main.py", "max_issues_repo_name": "Asadullah-Dal17/Qr-code-detection-distance-estimation", "max_issues_repo_head_hexsha": "962e48c9524e5fa9be8948fdeb16eedafb7adf30", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Optical Flow Basics/main.py", "max_forks_repo_name": "Asadullah-Dal17/Qr-code-detection-distance-estimation", "max_forks_repo_head_hexsha": "962e48c9524e5fa9be8948fdeb16eedafb7adf30", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.875, "max_line_length": 72, "alphanum_fraction": 0.65008726, "include": true, "reason": "import numpy", "num_tokens": 306} |
"""LieConv Baseline experiments.
requires:
https://github.com/mfinzi/LieConv
Usage:
$ python3 run_LieConv_cifar100.py --epochs 100 --nlay 2 --ker 256 --lr 3e-3 --bn 0 --rot 1 --scr 1
--rot: rotate images
--scr: scramble images (fixed shuffling of pixels in all images)
--nlay: number of layers
--bn: batchnorm (0: no batchnorm; used in baseline experiments)
"""
import torch
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from oil.utils.utils import LoaderTo, cosLr, islice
from oil.tuning.study import train_trial
from oil.datasetup.datasets import split_dataset
from oil.utils.parallel import try_multigpu_parallelize
from oil.model_trainers.classifier import Classifier
from functools import partial
from torch.optim import Adam
from oil.tuning.args import argupdated_config
import copy
import lie_conv.lieGroups as lieGroups
import lie_conv.lieConv as lieConv
from lie_conv.lieConv import ImgLieResnet
# from lie_conv.datasets import MnistRotDataset, RotMNIST
from oil.datasetup.datasets import EasyIMGDataset
from lie_conv.utils import Named, export, Expression, FixedNumpySeed, RandomZrotation, GaussianNoise
from lie_conv.utils import Named
import numpy as np
from PIL import Image
from torchvision.datasets.utils import download_url, download_and_extract_archive, extract_archive, \
verify_str_arg
from torchvision.datasets.vision import VisionDataset
import torchvision
class RotCIFAR10(EasyIMGDataset,torchvision.datasets.CIFAR10):
# """ Unofficial RotMNIST dataset created on the fly by rotating MNIST"""
means = (0.5,)
stds = (0.25,)
num_targets = 10
def __init__(self,*args,dataseed=0,transform=None,**kwargs):
super().__init__(*args,download=True,**kwargs)
N = len(self)
with FixedNumpySeed(dataseed):
angles = torch.rand(N)*2*np.pi
with torch.no_grad():
# R = torch.zeros(N,2,2)
# R[:,0,0] = R[:,1,1] = angles.cos()
# R[:,0,1] = R[:,1,0] = angles.sin()
# R[:,1,0] *=-1
# Build affine matrices for random translation of each image
affineMatrices = torch.zeros(N,2,3)
affineMatrices[:,0,0] = angles.cos()
affineMatrices[:,1,1] = angles.cos()
affineMatrices[:,0,1] = angles.sin()
affineMatrices[:,1,0] = -angles.sin()
# affineMatrices[:,0,2] = -2*np.random.randint(-self.max_trans, self.max_trans+1, bs)/w
# affineMatrices[:,1,2] = 2*np.random.randint(-self.max_trans, self.max_trans+1, bs)/h
# self.data = self.data.unsqueeze(1).float()
self.data = torch.as_tensor(self.data.transpose((0,3,1,2))).float()
flowgrid = F.affine_grid(affineMatrices, size = self.data.size())
self.data = F.grid_sample(self.data, flowgrid)
normalize = transforms.Normalize((127.5,) ,(255,))
self.data = normalize(self.data)
def __getitem__(self,idx):
return self.data[idx], int(self.targets[idx])
def default_aug_layers(self):
return RandomRotateTranslate(0)# no translation
class CIFAR100(EasyIMGDataset,torchvision.datasets.CIFAR100):
# """ Unofficial RotMNIST dataset created on the fly by rotating MNIST"""
means = (0.5,)
stds = (0.25,)
num_targets = 100
def __init__(self,*args,dataseed=0,transform=None,**kwargs):
super().__init__(*args,download=True,**kwargs)
N = len(self)
with torch.no_grad():
self.data = torch.as_tensor(self.data.transpose((0,3,1,2))).float()
normalize = transforms.Normalize((127.5,) ,(255,))
self.data = normalize(self.data)
def __getitem__(self,idx):
return self.data[idx], int(self.targets[idx])
def default_aug_layers(self):
return RandomRotateTranslate(0)# no translation
class RotCIFAR100(EasyIMGDataset,torchvision.datasets.CIFAR100):
# """ Unofficial RotMNIST dataset created on the fly by rotating MNIST"""
means = (0.5,)
stds = (0.25,)
num_targets = 100
def __init__(self,*args,dataseed=0,transform=None,**kwargs):
super().__init__(*args,download=True,**kwargs)
N = len(self)
with FixedNumpySeed(dataseed):
angles = torch.rand(N)*2*np.pi
with torch.no_grad():
# R = torch.zeros(N,2,2)
# R[:,0,0] = R[:,1,1] = angles.cos()
# R[:,0,1] = R[:,1,0] = angles.sin()
# R[:,1,0] *=-1
# Build affine matrices for random translation of each image
affineMatrices = torch.zeros(N,2,3)
affineMatrices[:,0,0] = angles.cos()
affineMatrices[:,1,1] = angles.cos()
affineMatrices[:,0,1] = angles.sin()
affineMatrices[:,1,0] = -angles.sin()
# affineMatrices[:,0,2] = -2*np.random.randint(-self.max_trans, self.max_trans+1, bs)/w
# affineMatrices[:,1,2] = 2*np.random.randint(-self.max_trans, self.max_trans+1, bs)/h
# self.data = self.data.unsqueeze(1).float()
self.data = torch.as_tensor(self.data.transpose((0,3,1,2))).float()
flowgrid = F.affine_grid(affineMatrices, size = self.data.size())
self.data = F.grid_sample(self.data, flowgrid)
normalize = transforms.Normalize((127.5,) ,(255,))
self.data = normalize(self.data)
def __getitem__(self,idx):
return self.data[idx], int(self.targets[idx])
def default_aug_layers(self):
return RandomRotateTranslate(0)# no translation
class RotScramCIFAR100(RotCIFAR100):
""" Scrambled"""
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
with torch.no_grad():
idx = torch.randperm(self.data[0,0].nelement())
self.data = self.data.view(*self.data.shape[:2], -1)[:,:,idx].view(self.data.size())
def makeTrainer(*, dataset=RotCIFAR100, network=ImgLieResnet, num_epochs=100,
bs=50, lr=3e-3, aug=False,#True,
optim=Adam, device='cuda', trainer=Classifier,
split={'train':40000}, small_test=False, net_config={}, opt_config={},
trainer_config={'log_dir':None}):
# Prep the datasets splits, model, and dataloaders
datasets = split_dataset(dataset(f'~/datasets/{dataset}/'),splits=split)
datasets['test'] = dataset(f'~/datasets/{dataset}/', train=False)
device = torch.device(device)
model = network(num_targets=datasets['train'].num_targets,**net_config).to(device)
if aug: model = torch.nn.Sequential(datasets['train'].default_aug_layers(),model)
model,bs = try_multigpu_parallelize(model,bs)
dataloaders = {k:LoaderTo(DataLoader(v,batch_size=bs,shuffle=(k=='train'),
num_workers=0,pin_memory=False),device) for k,v in datasets.items()}
dataloaders['Train'] = islice(dataloaders['train'],1+len(dataloaders['train'])//10)
if small_test: dataloaders['test'] = islice(dataloaders['test'],1+len(dataloaders['train'])//10)
# Add some extra defaults if SGD is chosen
opt_constr = partial(optim, lr=lr, **opt_config)
lr_sched = cosLr(num_epochs)
return trainer(model,dataloaders,opt_constr,lr_sched,**trainer_config)
import argparse
parser = argparse.ArgumentParser(description='LieConv Tests')
parser.add_argument('--rot', type=int, default=1, metavar='N',
help='rotated CIFAR100 (default: False)')
parser.add_argument('--scr', type=int, default=0, metavar='N',
help='scramble (default: False)')
parser.add_argument('--ker', type=int, default=128, metavar='N',
help='k in LieConv layer (default: 128)')
parser.add_argument('--nlay', type=int, default=2, metavar='N',
help='number of layers (default: 2)')
parser.add_argument('--epochs', type=int, default=40, metavar='N',
help='number of epochs to train (default: 40)')
parser.add_argument('--lr', type=float, default=3e-3, metavar='N',
help='learning rate (default: 3e-3)')
parser.add_argument('--bn', type=int, default=1, metavar='N',
help='batch normalization (default: True)')
args = parser.parse_args()
SCRAMBLE = args.scr
ROTATE = args.rot
ker = args.ker
nlay = args.nlay
batchnorm = bool(args.bn)
EPOCHS = args.epochs
Trial = train_trial(makeTrainer)
defaults = copy.deepcopy(makeTrainer.__kwdefaults__)
if ROTATE:
defaults['dataset'] = RotCIFAR100 #MnistRotScrambleDataset
elif SCRAMBLE:
defaults['dataset'] = RotScramCIFAR100
else:
print("=============\n\n Using default CIFAR100\n\n=============")
defaults['dataset'] = CIFAR100
defaults['net_config'] = dict(chin=3,
num_layers=nlay,
k=ker,
bn= batchnorm
)
defaults['num_epochs'] = EPOCHS
defaults['lr'] = args.lr
print(defaults)
fnam = f'./results/lie_conv-cifar100{"-rot" if ROTATE else ""}{"-scr" if SCRAMBLE else ""}-lay{nlay}-k{ker}.pkl'
print('\n', fnam,'\n')
results = Trial(defaults)
net = ImgLieResnet(**defaults['net_config'])
param_size_list = [(name,tuple(param.shape)) for name, param in net.net.named_parameters() if param.requires_grad]
out = dict( net_configs = results[0],
results = results[1].to_dict(),
params = param_size_list,
total_params = sum([np.prod(i[1]) for i in param_size_list])
)
print('# params: ', out['total_params'])
import pickle
pickle.dump(out, open(fnam, 'wb'))
# if __name__=="__main__":
# Trial = train_trial(makeTrainer)
# defaults = copy.deepcopy(makeTrainer.__kwdefaults__)
# defaults['save'] = False
# Trial(argupdated_config(defaults,namespace=(lieConv,lieGroups)))
| {"hexsha": "f8012e3004633b9367565140f7df550e4aec0ea7", "size": 9858, "ext": "py", "lang": "Python", "max_stars_repo_path": "paper-code/D-image-experiments/run_LieConv_cifar100.py", "max_stars_repo_name": "nimadehmamy/L-conv-code", "max_stars_repo_head_hexsha": "5a8abfbff3f6564771234df3e177d1d4aafe371d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-12-03T16:02:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T14:39:16.000Z", "max_issues_repo_path": "paper-code/D-image-experiments/run_LieConv_cifar100.py", "max_issues_repo_name": "LaLaLailalai/L-conv-code", "max_issues_repo_head_hexsha": "5a8abfbff3f6564771234df3e177d1d4aafe371d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "paper-code/D-image-experiments/run_LieConv_cifar100.py", "max_forks_repo_name": "LaLaLailalai/L-conv-code", "max_forks_repo_head_hexsha": "5a8abfbff3f6564771234df3e177d1d4aafe371d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-12-14T17:27:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T13:45:31.000Z", "avg_line_length": 40.2367346939, "max_line_length": 114, "alphanum_fraction": 0.6452627308, "include": true, "reason": "import numpy", "num_tokens": 2545} |
# Author: [email protected]
import matplotlib.pyplot as plt
import pyLib.imtools as imtools
import numpy as np
# # ========================= CREANDO DICCIONARIO
# cdict1={'red': ((0.0, 0.0, 0.0),
# (0.5, 0.0, 0.1),
# (1.0, 1.0, 1.0)),
# 'green':((0.0, 0.0, 0.0),
# (1.0, 0.0, 0.0)),
# 'blue': ((0.0, 0.0, 0.0),
# (0.5, 0.0, 0.1),
# (1.0, 1.0, 1.0))
# }
import matplotlib.colors as mcolors
# #blue_red1 = mcolors.LinearSegmentedColormap('BlueRed1', cdict1)
def make_colormap(seq):
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mcolors.LinearSegmentedColormap('CustomMap', cdict)
c = mcolors.ColorConverter().to_rgb
# #phimap = make_colormap(
# #[c('blue'), c('white'), 0.33, c('white'), 0.66, c('white'),c('blue')])
# #phimap = make_colormap(
# #[c('grey'), c('white'),0.5, c('white'), c('grey')])
phimap = make_colormap([c('white'), c('tomato'), 0.33, c('tomato'), c('deepskyblue'), 0.66, c('deepskyblue'),c('white')])
# phimap = make_colormap([c('white'), c('tomato'), 0.33, c('tomato'), c('steelblue'), 0.66, c('steelblue'),c('white')])
# phimap = make_colormap([c('red'), 0.33, c('red'), c('blue'), 0.66, c('blue')])
# phimap = make_colormap([c('tomato'), c('gold'), 0.25, c('gold'), c('deepskyblue'), 0.50, c('deepskyblue'),c('hotpink'), 0.75, c('hotpink'),c('tomato')])
# phimap = make_colormap([c('tomato'), 0.33, c('gold'), 0.66, c('deepskyblue')])
import numpy as np
from matplotlib.colors import LinearSegmentedColormap as lsc
def cmap_map(function, cmap, name='colormap_mod', N=None, gamma=None):
"""
Modify a colormap using `function` which must operate on 3-element
arrays of [r, g, b] values.
You may specify the number of colors, `N`, and the opacity, `gamma`,
value of the returned colormap. These values default to the ones in
the input `cmap`.
You may also specify a `name` for the colormap, so that it can be
loaded using plt.get_cmap(name).
"""
if N is None:
N = cmap.N
if gamma is None:
gamma = cmap._gamma
cdict = cmap._segmentdata
# Cast the steps into lists:
step_dict = {key: map(lambda x: x[0], cdict[key]) for key in cdict}
# Now get the unique steps (first column of the arrays):
step_list = np.unique(sum(step_dict.values(), []))
# 'y0', 'y1' are as defined in LinearSegmentedColormap docstring:
y0 = cmap(step_list)[:, :3]
y1 = y0.copy()[:, :3]
# Go back to catch the discontinuities, and place them into y0, y1
for iclr, key in enumerate(['red', 'green', 'blue']):
for istp, step in enumerate(step_list):
try:
ind = step_dict[key].index(step)
except ValueError:
# This step is not in this color
continue
y0[istp, iclr] = cdict[key][ind][1]
y1[istp, iclr] = cdict[key][ind][2]
# Map the colors to their new values:
y0 = np.array(map(function, y0))
y1 = np.array(map(function, y1))
# Build the new colormap (overwriting step_dict):
for iclr, clr in enumerate(['red', 'green', 'blue']):
step_dict[clr] = np.vstack((step_list, y0[:, iclr], y1[:, iclr])).T
return lsc(name, step_dict, N=N, gamma=gamma)
def cmap_map(function,cmap):
""" Applies function (which should operate on vectors of shape 3:
[r, g, b], on colormap cmap. This routine will break any discontinuous points in a colormap.
"""
cdict = cmap._segmentdata
step_dict = {}
# Firt get the list of points where the segments start or end
for key in ('red','green','blue'): step_dict[key] = map(lambda x: x[0], cdict[key])
step_list = sum(step_dict.values(), [])
step_list = array(list(set(step_list)))
# Then compute the LUT, and apply the function to the LUT
reduced_cmap = lambda step : array(cmap(step)[0:3])
old_LUT = array(map( reduced_cmap, step_list))
new_LUT = array(map( function, old_LUT))
# Now try to make a minimal segment definition of the new LUT
cdict = {}
for i,key in enumerate(('red','green','blue')):
this_cdict = {}
for j,step in enumerate(step_list):
if step in step_dict[key]:
this_cdict[step] = new_LUT[j,i]
elif new_LUT[j,i]!=old_LUT[j,i]:
this_cdict[step] = new_LUT[j,i]
colorvector= map(lambda x: x + (x[1], ), this_cdict.items())
colorvector.sort()
cdict[key] = colorvector
return matplotlib.colors.LinearSegmentedColormap('colormap',cdict,1024)
def dimMap(resultadoSir):
height = resultadoSir.shape[0]*(resultadoSir1[0][-1][0][0]+1)
width = (resultadoSir1[0][-1][0][1]+1)
return [height,width]
def readmapa(resultadoSir, mapa, magnitud):
cont = 0
for fila in range(0, height):
for columna in range(0, width):
punto = cont % resultadoSir.shape[1]
veces = int(cont/resultadoSir.shape[1])
if magnitud == 8 or magnitud == 9 or magnitud == 10 or magnitud == 11:
mapa[columna,fila] = resultadoSir[veces][punto][1][0][magnitud]
else:
mapa[columna,fila] = resultadoSir[veces][punto][1][0][magnitud][index]
cont += 1
return mapa
def corrphi(mapa):
mapa[mapa<0] = (mapa[mapa<0]+360) % 360; mapa[mapa>180] = (mapa[mapa>180]-180)
# ==============================================================================================
global index
global magnitud
import matplotlib
#hsv
# from numpy import array
# phimap = cmap_map(lambda x: x/2+0.5, matplotlib.cm.jet)
# ========================= INPUT
invSir1 = 'finalSir.npy'
logTau = 0.0
magnitud = 7
cmapArray = ['','gray','gray','bone','bone','seismic','Spectral_r',phimap,'bone','gray','gray','cubehelix']
magTitle = ['TAU','$T$ $[kK]$','p','$v$ $[km/s]$','$B$ $[kG]$','$v$ $[km/s]$','$\gamma$ $[d]$','$\phi$ $[d]$','vmacro','fillingf','difusa','$\chi^2$']
magFile = ['TAU','TEMP','PRESION','VMICRO','CAMPO','VLOS','GAMMA','PHI','VMACRO','FILLING','DIFUSA','CHI2']
# ========================= MAP
resultadoSir1 = np.load(invSir1)
height, width = dimMap(resultadoSir1)
print('height:',height,'width:',width)
mapa = np.zeros((height, width))
index = np.where(resultadoSir1[0][0][1][0][0] == logTau)[0][0]
print('logTau: '+str(logTau)+' -> index: '+str(index))
readmapa(resultadoSir1, mapa.T ,magnitud)
# Limites en la escala de color
if magnitud == 7: corrphi(mapa)
print('3sigma_map: {0:2.2f}'.format(3*np.std(mapa)))
print('Mean_map: {0:2.2f}'.format(np.mean(mapa)))
print('Min_map: {0:2.2f}'.format(np.min(mapa)))
print('Max_map: {0:2.2f}'.format(np.max(mapa)))
vmini = np.mean(mapa)-3*np.std(mapa)
if np.min(mapa) >= 0.0 and magnitud != 1: vmini = 0.
vmaxi = np.mean(mapa)+3*np.std(mapa)
if magnitud == 1: vmini = np.min(mapa); vmaxi = np.max(mapa)
if magnitud == 6: vmaxi = 180.
if magnitud == 7: vmaxi = 180.;vmini = 0.
if magnitud == 5: vmaxi = np.mean(mapa)+3*np.std(mapa); vmini = -vmaxi
from matplotlib.colors import LogNorm
plt.imshow(mapa,cmap=cmapArray[magnitud],origin='lower',interpolation='None',vmin=vmini,vmax=vmaxi)#norm=LogNorm()
plt.title('Map 17jun14.006 (1)')
plt.xlabel('Slit Axis [pix]')
plt.ylabel('Time Axis [pix]')
cb = plt.colorbar(shrink=.46)#, ticks=[0.6, 0.8, 1., 1.2])
#cb = plt.colorbar(shrink=.46, ticks=[0.3, 0.6, 0.9, 1.2, 1.5])
# cb.set_label(r'Intensity HeI ({0:4.1f}) /$I_{{qs}}$({1:4.1f})'.format(xLambda[341],xLambda[posicontinuo]), labelpad=5., y=0.5, fontsize=12.)
cb.set_label(r""+magTitle[magnitud]+", $log(\\tau)$={0}".format(logTau), labelpad=8., y=0.5, fontsize=12.)
# plt.show()
plt.savefig(magFile[magnitud]+'_log{0:02d}.pdf'.format(int(logTau)), bbox_inches='tight')
print(magFile[magnitud]+'_log{0:02d}.pdf SAVE'.format(int(logTau)))
| {"hexsha": "b67f07abb2983ff418bbca8544194aaef58e4ab9", "size": 8343, "ext": "py", "lang": "Python", "max_stars_repo_path": "1map_OLD.py", "max_stars_repo_name": "cdiazbas/MPySIR", "max_stars_repo_head_hexsha": "72b1ad5086263e1360cb3c2aa9fd34733fc4cf7b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2015-12-18T21:06:04.000Z", "max_stars_repo_stars_event_max_datetime": "2018-01-04T09:13:56.000Z", "max_issues_repo_path": "1map_OLD.py", "max_issues_repo_name": "cdiazbas/MPySIR", "max_issues_repo_head_hexsha": "72b1ad5086263e1360cb3c2aa9fd34733fc4cf7b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "1map_OLD.py", "max_forks_repo_name": "cdiazbas/MPySIR", "max_forks_repo_head_hexsha": "72b1ad5086263e1360cb3c2aa9fd34733fc4cf7b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.8046511628, "max_line_length": 155, "alphanum_fraction": 0.5936713412, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2740} |
# MIT License
#
# Copyright (C) IBM Corporation 2019
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow as tf
import os
import pickle
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
from sklearn.linear_model import LogisticRegression
from art.classifiers import SklearnClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier
from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
import numpy as np
from tests.utils import load_dataset, master_seed
def main_mnist_binary():
master_seed(1234)
model = Sequential()
model.add(Conv2D(1, kernel_size=(7, 7), activation='relu', input_shape=(28, 28, 1)))
model.add(MaxPooling2D(pool_size=(4, 4)))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(lr=0.01), metrics=['accuracy'])
(x_train, y_train), (_, _), _, _ = load_dataset('mnist')
y_train = np.argmax(y_train, axis=1)
y_train[y_train < 5] = 0
y_train[y_train >= 5] = 1
model.fit(x_train, y_train, batch_size=128, epochs=10)
w_0, b_0 = model.layers[0].get_weights()
w_3, b_3 = model.layers[3].get_weights()
np.save(
os.path.join(os.path.dirname(os.path.dirname(__file__)), "resources/models/scikit/", 'W_CONV2D_MNIST_BINARY'),
w_0)
np.save(os.path.join(os.path.dirname(os.path.dirname(__file__)), "resources/models/scikit/"'B_CONV2D_MNIST_BINARY'),
b_0)
np.save(os.path.join(os.path.dirname(os.path.dirname(__file__)), "resources/models/scikit/"'W_DENSE_MNIST_BINARY'),
w_3)
np.save(os.path.join(os.path.dirname(os.path.dirname(__file__)), "resources/models/scikit/"'B_DENSE_MNIST_BINARY'),
b_3)
def create_scikit_model_weights():
master_seed(1234)
model_list = {"decisionTreeClassifier": DecisionTreeClassifier(),
"extraTreeClassifier": ExtraTreeClassifier(),
"adaBoostClassifier": AdaBoostClassifier(),
"baggingClassifier": BaggingClassifier(),
"extraTreesClassifier": ExtraTreesClassifier(n_estimators=10),
"gradientBoostingClassifier": GradientBoostingClassifier(n_estimators=10),
"randomForestClassifier": RandomForestClassifier(n_estimators=10),
"logisticRegression": LogisticRegression(solver='lbfgs', multi_class='auto'),
"svc": SVC(gamma='auto'),
"linearSVC": LinearSVC()}
clipped_models = {model_name: SklearnClassifier(model=model, clip_values=(0, 1)) for model_name, model in
model_list.items()}
unclipped_models = {model_name: SklearnClassifier(model=model) for model_name, model in model_list.items()}
(x_train_iris, y_train_iris), (_, _), _, _ = load_dataset('iris')
for model_name, model in clipped_models.items():
model.fit(x=x_train_iris, y=y_train_iris)
pickle.dump(model, open(os.path.join(os.path.dirname(os.path.dirname(__file__)), "resources/models/scikit/",
model_name + "iris_clipped.sav"), 'wb'))
for model_name, model in unclipped_models.items():
model.fit(x=x_train_iris, y=y_train_iris)
pickle.dump(model, open(os.path.join(os.path.dirname(os.path.dirname(__file__)), "resources/models/scikit/",
model_name + "iris_unclipped.sav"), 'wb'))
if __name__ == '__main__':
main_mnist_binary()
create_scikit_model_weights()
| {"hexsha": "83653bfdb945be307f2238a5de30ec0225a672d2", "size": 4782, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/create_model_weights.py", "max_stars_repo_name": "virkt25/adversarial-robustness-toolbox", "max_stars_repo_head_hexsha": "3cfa6de196cb32a3efafab2ff6bbf44247c9ddbd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-22T19:59:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-22T19:59:02.000Z", "max_issues_repo_path": "models/create_model_weights.py", "max_issues_repo_name": "virkt25/adversarial-robustness-toolbox", "max_issues_repo_head_hexsha": "3cfa6de196cb32a3efafab2ff6bbf44247c9ddbd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/create_model_weights.py", "max_forks_repo_name": "virkt25/adversarial-robustness-toolbox", "max_forks_repo_head_hexsha": "3cfa6de196cb32a3efafab2ff6bbf44247c9ddbd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.3465346535, "max_line_length": 120, "alphanum_fraction": 0.708281054, "include": true, "reason": "import numpy", "num_tokens": 1092} |
[STATEMENT]
lemma master_integral:
fixes a p p' :: real
assumes p: "p \<noteq> p'" and a: "a > 0"
obtains c d where "c \<noteq> 0" "p > p' \<longrightarrow> d \<noteq> 0"
"(\<lambda>x::nat. x powr p * (1 + integral {a..x} (\<lambda>u. u powr p' / u powr (p+1)))) \<in>
\<Theta>(\<lambda>x::nat. d * x powr p + c * x powr p')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>c d. \<lbrakk>c \<noteq> 0; p' < p \<longrightarrow> d \<noteq> 0; (\<lambda>x. real x powr p * (1 + integral {a..real x} (\<lambda>u. u powr p' / u powr (p + 1)))) \<in> \<Theta>(\<lambda>x. d * real x powr p + c * real x powr p')\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<And>c d. \<lbrakk>c \<noteq> 0; p' < p \<longrightarrow> d \<noteq> 0; (\<lambda>x. real x powr p * (1 + integral {a..real x} (\<lambda>u. u powr p' / u powr (p + 1)))) \<in> \<Theta>(\<lambda>x. d * real x powr p + c * real x powr p')\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
define e where "e = a powr (p' - p)"
[PROOF STATE]
proof (state)
this:
e = a powr (p' - p)
goal (1 subgoal):
1. (\<And>c d. \<lbrakk>c \<noteq> 0; p' < p \<longrightarrow> d \<noteq> 0; (\<lambda>x. real x powr p * (1 + integral {a..real x} (\<lambda>u. u powr p' / u powr (p + 1)))) \<in> \<Theta>(\<lambda>x. d * real x powr p + c * real x powr p')\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
p \<noteq> p'
0 < a
[PROOF STEP]
have e: "e \<ge> 0"
[PROOF STATE]
proof (prove)
using this:
p \<noteq> p'
0 < a
goal (1 subgoal):
1. 0 \<le> e
[PROOF STEP]
by (simp add: e_def)
[PROOF STATE]
proof (state)
this:
0 \<le> e
goal (1 subgoal):
1. (\<And>c d. \<lbrakk>c \<noteq> 0; p' < p \<longrightarrow> d \<noteq> 0; (\<lambda>x. real x powr p * (1 + integral {a..real x} (\<lambda>u. u powr p' / u powr (p + 1)))) \<in> \<Theta>(\<lambda>x. d * real x powr p + c * real x powr p')\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
define c where "c = inverse (p' - p)"
[PROOF STATE]
proof (state)
this:
c = inverse (p' - p)
goal (1 subgoal):
1. (\<And>c d. \<lbrakk>c \<noteq> 0; p' < p \<longrightarrow> d \<noteq> 0; (\<lambda>x. real x powr p * (1 + integral {a..real x} (\<lambda>u. u powr p' / u powr (p + 1)))) \<in> \<Theta>(\<lambda>x. d * real x powr p + c * real x powr p')\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
define d where "d = 1 - inverse (p' - p) * e"
[PROOF STATE]
proof (state)
this:
d = 1 - inverse (p' - p) * e
goal (1 subgoal):
1. (\<And>c d. \<lbrakk>c \<noteq> 0; p' < p \<longrightarrow> d \<noteq> 0; (\<lambda>x. real x powr p * (1 + integral {a..real x} (\<lambda>u. u powr p' / u powr (p + 1)))) \<in> \<Theta>(\<lambda>x. d * real x powr p + c * real x powr p')\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
have "c \<noteq> 0" and "p > p' \<longrightarrow> d \<noteq> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. c \<noteq> 0 &&& p' < p \<longrightarrow> d \<noteq> 0
[PROOF STEP]
using e p a
[PROOF STATE]
proof (prove)
using this:
0 \<le> e
p \<noteq> p'
0 < a
goal (1 subgoal):
1. c \<noteq> 0 &&& p' < p \<longrightarrow> d \<noteq> 0
[PROOF STEP]
unfolding c_def d_def
[PROOF STATE]
proof (prove)
using this:
0 \<le> e
p \<noteq> p'
0 < a
goal (1 subgoal):
1. inverse (p' - p) \<noteq> 0 &&& p' < p \<longrightarrow> 1 - inverse (p' - p) * e \<noteq> 0
[PROOF STEP]
by (auto simp: field_simps)
[PROOF STATE]
proof (state)
this:
c \<noteq> 0
p' < p \<longrightarrow> d \<noteq> 0
goal (1 subgoal):
1. (\<And>c d. \<lbrakk>c \<noteq> 0; p' < p \<longrightarrow> d \<noteq> 0; (\<lambda>x. real x powr p * (1 + integral {a..real x} (\<lambda>u. u powr p' / u powr (p + 1)))) \<in> \<Theta>(\<lambda>x. d * real x powr p + c * real x powr p')\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
c \<noteq> 0
p' < p \<longrightarrow> d \<noteq> 0
goal (1 subgoal):
1. thesis
[PROOF STEP]
apply (rule that)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. real x powr p * (1 + integral {a..real x} (\<lambda>u. u powr p' / u powr (p + 1)))) \<in> \<Theta>(\<lambda>x. d * real x powr p + c * real x powr p')
[PROOF STEP]
apply (rule bigtheta_real_nat_transfer, rule bigthetaI_cong)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>\<^sub>F x in at_top. x powr p * (1 + integral {a..x} (\<lambda>u. u powr p' / u powr (p + 1))) = d * x powr p + c * x powr p'
[PROOF STEP]
using eventually_ge_at_top[of a]
[PROOF STATE]
proof (prove)
using this:
eventually ((\<le>) a) at_top
goal (1 subgoal):
1. \<forall>\<^sub>F x in at_top. x powr p * (1 + integral {a..x} (\<lambda>u. u powr p' / u powr (p + 1))) = d * x powr p + c * x powr p'
[PROOF STEP]
proof eventually_elim
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. a \<le> x \<Longrightarrow> x powr p * (1 + integral {a..x} (\<lambda>u. u powr p' / u powr (p + 1))) = d * x powr p + c * x powr p'
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. a \<le> x \<Longrightarrow> x powr p * (1 + integral {a..x} (\<lambda>u. u powr p' / u powr (p + 1))) = d * x powr p + c * x powr p'
[PROOF STEP]
assume x: "x \<ge> a"
[PROOF STATE]
proof (state)
this:
a \<le> x
goal (1 subgoal):
1. \<And>x. a \<le> x \<Longrightarrow> x powr p * (1 + integral {a..x} (\<lambda>u. u powr p' / u powr (p + 1))) = d * x powr p + c * x powr p'
[PROOF STEP]
hence "integral {a..x} (\<lambda>u. u powr p' / u powr (p+1)) =
integral {a..x} (\<lambda>u. u powr (p' - (p + 1)))"
[PROOF STATE]
proof (prove)
using this:
a \<le> x
goal (1 subgoal):
1. integral {a..x} (\<lambda>u. u powr p' / u powr (p + 1)) = integral {a..x} (\<lambda>u. u powr (p' - (p + 1)))
[PROOF STEP]
by (intro Henstock_Kurzweil_Integration.integral_cong) (simp_all add: powr_diff [symmetric] )
[PROOF STATE]
proof (state)
this:
integral {a..x} (\<lambda>u. u powr p' / u powr (p + 1)) = integral {a..x} (\<lambda>u. u powr (p' - (p + 1)))
goal (1 subgoal):
1. \<And>x. a \<le> x \<Longrightarrow> x powr p * (1 + integral {a..x} (\<lambda>u. u powr p' / u powr (p + 1))) = d * x powr p + c * x powr p'
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
integral {a..x} (\<lambda>u. u powr p' / u powr (p + 1)) = integral {a..x} (\<lambda>u. u powr (p' - (p + 1)))
goal (1 subgoal):
1. \<And>x. a \<le> x \<Longrightarrow> x powr p * (1 + integral {a..x} (\<lambda>u. u powr p' / u powr (p + 1))) = d * x powr p + c * x powr p'
[PROOF STEP]
have "... = inverse (p' - p) * (x powr (p' - p) - a powr (p' - p))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. integral {a..x} (\<lambda>u. u powr (p' - (p + 1))) = inverse (p' - p) * (x powr (p' - p) - a powr (p' - p))
[PROOF STEP]
using p x0_less_x1 a x
[PROOF STATE]
proof (prove)
using this:
p \<noteq> p'
x\<^sub>0 < x\<^sub>1
0 < a
a \<le> x
goal (1 subgoal):
1. integral {a..x} (\<lambda>u. u powr (p' - (p + 1))) = inverse (p' - p) * (x powr (p' - p) - a powr (p' - p))
[PROOF STEP]
by (simp add: integral_powr)
[PROOF STATE]
proof (state)
this:
integral {a..x} (\<lambda>u. u powr (p' - (p + 1))) = inverse (p' - p) * (x powr (p' - p) - a powr (p' - p))
goal (1 subgoal):
1. \<And>x. a \<le> x \<Longrightarrow> x powr p * (1 + integral {a..x} (\<lambda>u. u powr p' / u powr (p + 1))) = d * x powr p + c * x powr p'
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
integral {a..x} (\<lambda>u. u powr (p' - (p + 1))) = inverse (p' - p) * (x powr (p' - p) - a powr (p' - p))
goal (1 subgoal):
1. \<And>x. a \<le> x \<Longrightarrow> x powr p * (1 + integral {a..x} (\<lambda>u. u powr p' / u powr (p + 1))) = d * x powr p + c * x powr p'
[PROOF STEP]
have "x powr p * (1 + ...) = d * x powr p + c * x powr p'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x powr p * (1 + inverse (p' - p) * (x powr (p' - p) - a powr (p' - p))) = d * x powr p + c * x powr p'
[PROOF STEP]
using p
[PROOF STATE]
proof (prove)
using this:
p \<noteq> p'
goal (1 subgoal):
1. x powr p * (1 + inverse (p' - p) * (x powr (p' - p) - a powr (p' - p))) = d * x powr p + c * x powr p'
[PROOF STEP]
unfolding c_def d_def
[PROOF STATE]
proof (prove)
using this:
p \<noteq> p'
goal (1 subgoal):
1. x powr p * (1 + inverse (p' - p) * (x powr (p' - p) - a powr (p' - p))) = (1 - inverse (p' - p) * e) * x powr p + inverse (p' - p) * x powr p'
[PROOF STEP]
by (simp add: algebra_simps powr_diff e_def)
[PROOF STATE]
proof (state)
this:
x powr p * (1 + inverse (p' - p) * (x powr (p' - p) - a powr (p' - p))) = d * x powr p + c * x powr p'
goal (1 subgoal):
1. \<And>x. a \<le> x \<Longrightarrow> x powr p * (1 + integral {a..x} (\<lambda>u. u powr p' / u powr (p + 1))) = d * x powr p + c * x powr p'
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
x powr p * (1 + integral {a..x} (\<lambda>u. u powr p' / u powr (p + 1))) = d * x powr p + c * x powr p'
[PROOF STEP]
show "x powr p * (1 + integral {a..x} (\<lambda>u. u powr p' / u powr (p+1))) =
d * x powr p + c * x powr p'"
[PROOF STATE]
proof (prove)
using this:
x powr p * (1 + integral {a..x} (\<lambda>u. u powr p' / u powr (p + 1))) = d * x powr p + c * x powr p'
goal (1 subgoal):
1. x powr p * (1 + integral {a..x} (\<lambda>u. u powr p' / u powr (p + 1))) = d * x powr p + c * x powr p'
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
x powr p * (1 + integral {a..x} (\<lambda>u. u powr p' / u powr (p + 1))) = d * x powr p + c * x powr p'
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
thesis
goal:
No subgoals!
[PROOF STEP]
qed | {"llama_tokens": 4411, "file": "Akra_Bazzi_Master_Theorem", "length": 34} |
/-
Copyright (c) 2021 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import algebra.algebra.subalgebra.basic
import topology.algebra.module.basic
import topology.algebra.field
/-!
# Topological (sub)algebras
A topological algebra over a topological semiring `R` is a topological semiring with a compatible
continuous scalar multiplication by elements of `R`. We reuse typeclass `has_continuous_smul` for
topological algebras.
## Results
This is just a minimal stub for now!
The topological closure of a subalgebra is still a subalgebra,
which as an algebra is a topological algebra.
-/
open classical set topological_space algebra
open_locale classical
universes u v w
section topological_algebra
variables (R : Type*) [topological_space R] [comm_semiring R]
variables (A : Type u) [topological_space A]
variables [semiring A]
lemma continuous_algebra_map_iff_smul [algebra R A] [topological_semiring A] :
continuous (algebra_map R A) ↔ continuous (λ p : R × A, p.1 • p.2) :=
begin
refine ⟨λ h, _, λ h, _⟩,
{ simp only [algebra.smul_def], exact (h.comp continuous_fst).mul continuous_snd },
{ rw algebra_map_eq_smul_one', exact h.comp (continuous_id.prod_mk continuous_const) }
end
@[continuity]
lemma continuous_algebra_map [algebra R A] [topological_semiring A] [has_continuous_smul R A] :
continuous (algebra_map R A) :=
(continuous_algebra_map_iff_smul R A).2 continuous_smul
lemma has_continuous_smul_of_algebra_map [algebra R A] [topological_semiring A]
(h : continuous (algebra_map R A)) :
has_continuous_smul R A :=
⟨(continuous_algebra_map_iff_smul R A).1 h⟩
end topological_algebra
section topological_algebra
variables {R : Type*} [comm_semiring R]
variables {A : Type u} [topological_space A]
variables [semiring A]
variables [algebra R A] [topological_semiring A]
/-- The closure of a subalgebra in a topological algebra as a subalgebra. -/
def subalgebra.topological_closure (s : subalgebra R A) : subalgebra R A :=
{ carrier := closure (s : set A),
algebra_map_mem' := λ r, s.to_subsemiring.subring_topological_closure (s.algebra_map_mem r),
.. s.to_subsemiring.topological_closure }
@[simp] lemma subalgebra.topological_closure_coe (s : subalgebra R A) :
(s.topological_closure : set A) = closure (s : set A) :=
rfl
instance subalgebra.topological_closure_topological_semiring (s : subalgebra R A) :
topological_semiring (s.topological_closure) :=
s.to_subsemiring.topological_closure_topological_semiring
instance subalgebra.topological_closure_topological_algebra
[topological_space R] [has_continuous_smul R A] (s : subalgebra R A) :
has_continuous_smul R (s.topological_closure) :=
s.to_submodule.topological_closure_has_continuous_smul
lemma subalgebra.subalgebra_topological_closure (s : subalgebra R A) :
s ≤ s.topological_closure :=
subset_closure
lemma subalgebra.is_closed_topological_closure (s : subalgebra R A) :
is_closed (s.topological_closure : set A) :=
by convert is_closed_closure
lemma subalgebra.topological_closure_minimal
(s : subalgebra R A) {t : subalgebra R A} (h : s ≤ t) (ht : is_closed (t : set A)) :
s.topological_closure ≤ t :=
closure_minimal h ht
/-- If a subalgebra of a topological algebra is commutative, then so is its topological closure. -/
def subalgebra.comm_semiring_topological_closure [t2_space A] (s : subalgebra R A)
(hs : ∀ (x y : s), x * y = y * x) : comm_semiring s.topological_closure :=
{ ..s.topological_closure.to_semiring,
..s.to_submonoid.comm_monoid_topological_closure hs }
/--
This is really a statement about topological algebra isomorphisms,
but we don't have those, so we use the clunky approach of talking about
an algebra homomorphism, and a separate homeomorphism,
along with a witness that as functions they are the same.
-/
lemma subalgebra.topological_closure_comap'_homeomorph
(s : subalgebra R A)
{B : Type*} [topological_space B] [ring B] [topological_ring B] [algebra R B]
(f : B →ₐ[R] A) (f' : B ≃ₜ A) (w : (f : B → A) = f') :
s.topological_closure.comap' f = (s.comap' f).topological_closure :=
begin
apply set_like.ext',
simp only [subalgebra.topological_closure_coe],
simp only [subalgebra.coe_comap, subsemiring.coe_comap, alg_hom.coe_to_ring_hom],
rw [w],
exact f'.preimage_closure _,
end
end topological_algebra
section ring
variables {R : Type*} [comm_ring R]
variables {A : Type u} [topological_space A]
variables [ring A]
variables [algebra R A] [topological_ring A]
/-- If a subalgebra of a topological algebra is commutative, then so is its topological closure.
See note [reducible non-instances]. -/
@[reducible] def subalgebra.comm_ring_topological_closure [t2_space A] (s : subalgebra R A)
(hs : ∀ (x y : s), x * y = y * x) : comm_ring s.topological_closure :=
{ ..s.topological_closure.to_ring,
..s.to_submonoid.comm_monoid_topological_closure hs }
variables (R)
/-- The topological closure of the subalgebra generated by a single element. -/
def algebra.elemental_algebra (x : A) : subalgebra R A :=
(algebra.adjoin R ({x} : set A)).topological_closure
lemma algebra.self_mem_elemental_algebra (x : A) : x ∈ algebra.elemental_algebra R x :=
set_like.le_def.mp (subalgebra.subalgebra_topological_closure (algebra.adjoin R ({x} : set A))) $
algebra.self_mem_adjoin_singleton R x
variables {R}
instance [t2_space A] {x : A} : comm_ring (algebra.elemental_algebra R x) :=
subalgebra.comm_ring_topological_closure _
begin
letI : comm_ring (algebra.adjoin R ({x} : set A)) := algebra.adjoin_comm_ring_of_comm R
(λ y hy z hz, by {rw [mem_singleton_iff] at hy hz, rw [hy, hz]}),
exact λ _ _, mul_comm _ _,
end
end ring
section division_ring
/-- The action induced by `algebra_rat` is continuous. -/
instance division_ring.has_continuous_const_smul_rat
{A} [division_ring A] [topological_space A] [has_continuous_mul A] [char_zero A] :
has_continuous_const_smul ℚ A :=
⟨λ r, continuous_const.mul continuous_id⟩
end division_ring
| {"author": "nick-kuhn", "repo": "leantools", "sha": "567a98c031fffe3f270b7b8dea48389bc70d7abb", "save_path": "github-repos/lean/nick-kuhn-leantools", "path": "github-repos/lean/nick-kuhn-leantools/leantools-567a98c031fffe3f270b7b8dea48389bc70d7abb/src/topology/algebra/algebra.lean"} |
"""
Tests for the Voice Detector model(s).
"""
import numpy as np
import os
import shutil
import sys
import unittest
import warnings
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
sys.path.insert(0, path)
import senses.dataproviders.featureprovider as fp # pylint: disable=locally-disabled, import-error
import senses.voice_detector.voice_detector as vd # pylint: disable=locally-disabled, import-error
class TestVoiceDetector(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
logpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs")
if os.path.isdir(logpath):
try:
shutil.rmtree(logpath)
except PermissionError:
print("Could not remove logs. Probably because Windows is stupid.")
except OSError:
print("Could not remove logs. Probably because Windows is stupid.")
self.root = os.path.abspath("test_data_directory")
self.sample_rate = 24_000
self.nchannels = 1
self.bytewidth = 2
self.provider = fp.FeatureProvider(self.root, sample_rate=self.sample_rate, nchannels=self.nchannels, bytewidth=self.bytewidth)
def _label_fn(self, fpath):
"""
Returns 0 if fpath contains 'babies', otherwise 1.
"""
if "babies" in fpath:
return 0
else:
return 1
def test_instantiate_vd_fft_instance(self):
"""
Test creating the voice detector as an FFT model.
"""
# Create the detector
ms = 30
detector = vd.VoiceDetector(sample_rate_hz=self.sample_rate, sample_width_bytes=self.bytewidth, ms=ms, model_type="fft")
# Check that it knows how many inputs it should have
nsamples = self.sample_rate * (ms / 1000)
bins = np.arange(0, int(round(nsamples/2)) + 1, 1.0) * (self.sample_rate / nsamples)
input_shape = (None, len(bins))
self.assertEqual(input_shape, detector.input_shape)
def test_instantiate_vd_spectrogram_instance(self):
"""
Test creating the voice detector as a spectrogram model.
"""
# Create the detector
ms = 300
nwindows = 10
window_length_ms = ms / nwindows
overlap = 0.5
nfreqbins = 409 # TODO: Why is this 409?
ntimebins = nwindows * (1/overlap) - 1
input_shape = (int(nfreqbins), int(ntimebins), int(1))
detector = vd.VoiceDetector(sample_rate_hz=self.sample_rate, sample_width_bytes=self.bytewidth, ms=ms, model_type="spec", window_length_ms=window_length_ms, overlap=overlap, spectrogram_shape=input_shape)
# Check that it knows its input shape
self.assertEqual((None, *input_shape), detector.input_shape)
@unittest.skipIf("TRAVIS_CI" in os.environ, "Travis CI's memory allowances are too small for this test.")
def test_fit_ffts(self):
"""
Test training on FFT data.
"""
n = None
ms = 30
batchsize = 32
datagen = self.provider.generate_n_fft_batches(n, batchsize, ms, self._label_fn, normalize=True, forever=True)
detector = vd.VoiceDetector(sample_rate_hz=self.sample_rate, sample_width_bytes=self.bytewidth, ms=ms, model_type="fft")
detector.fit(datagen, batchsize, save_models=False, steps_per_epoch=100, epochs=2)
@unittest.skipIf("TRAVIS_CI" in os.environ, "Travis CI's memory allowances are too small for this test.")
def test_fit_spectrograms(self):
"""
Test training on spectrogram data.
"""
n = None
ms = 300
batchsize = 32
shape = [s for s in self.provider.generate_n_spectrograms(n=1, ms=ms, label_fn=self._label_fn, expand_dims=True)][0][0].shape
datagen = self.provider.generate_n_spectrogram_batches(n, batchsize, ms, self._label_fn, normalize=True, forever=True, expand_dims=True)
detector = vd.VoiceDetector(sample_rate_hz=self.sample_rate, sample_width_bytes=self.bytewidth, ms=ms, model_type="spec", window_length_ms=0.5, spectrogram_shape=shape)
detector.fit(datagen, batchsize, save_models=False, steps_per_epoch=100, epochs=2)
if __name__ == "__main__":
unittest.main()
| {"hexsha": "195aaa10f8542df3fb115c76fabb8676f394555c", "size": 4281, "ext": "py", "lang": "Python", "max_stars_repo_path": "Artie/tests/test_voice_detector.py", "max_stars_repo_name": "MaxStrange/ArtieInfant", "max_stars_repo_head_hexsha": "1edbb171a5405d2971227f2d2d83acb523c70034", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-04-28T16:55:05.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-28T16:55:05.000Z", "max_issues_repo_path": "Artie/tests/test_voice_detector.py", "max_issues_repo_name": "MaxStrange/ArtieInfant", "max_issues_repo_head_hexsha": "1edbb171a5405d2971227f2d2d83acb523c70034", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Artie/tests/test_voice_detector.py", "max_forks_repo_name": "MaxStrange/ArtieInfant", "max_forks_repo_head_hexsha": "1edbb171a5405d2971227f2d2d83acb523c70034", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.3861386139, "max_line_length": 212, "alphanum_fraction": 0.6676010278, "include": true, "reason": "import numpy", "num_tokens": 1027} |
# This file is part of the P3IV Simulator (https://github.com/fzi-forschungszentrum-informatik/P3IV),
# copyright by FZI Forschungszentrum Informatik, licensed under the BSD-3 license (see LICENSE file in main directory)
import numpy as np
class DrivingCorridorCartesian(object):
def __init__(self):
self.left = None
self.center = None
self.right = None
def __call__(self, left, center, right):
assert isinstance(left, np.ndarray) and isinstance(center, np.ndarray) and isinstance(right, np.ndarray)
self.left = left
self.center = center
self.right = right
class DecisionBase(object):
"""DecisionBase class augments the information provided by the
understanding, prediction modules and resolves conflicts if any.
Attributes
----------
corridor : DrivingCorridorCartesian
Cartesian coordinates of the DrivingCorridor. DrivingCorridor in Cartesian coord's are stored here. Whereas,
their Lanelet counterparts are stored in SceneUnderstanding.
motion_plans : List
List of alternative Motions for the future
"""
def __init__(self):
self.corridor = DrivingCorridorCartesian()
self.motion_plans = []
def __call__(self, *args, **kwargs):
pass
def set_driving_corridor(self, laneletsequence, *args):
self.corridor(
laneletsequence.bound_left(), laneletsequence.centerline(smooth=True), laneletsequence.bound_right()
)
| {"hexsha": "46162bec0dc4187773029acb6a74686bd4406b3a", "size": 1500, "ext": "py", "lang": "Python", "max_stars_repo_path": "p3iv_types/src/p3iv_types/decision_base.py", "max_stars_repo_name": "fzi-forschungszentrum-informatik/P3IV", "max_stars_repo_head_hexsha": "51784e6dc03dcaa0ad58a5078475fa4daec774bd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-07-27T06:56:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T11:21:30.000Z", "max_issues_repo_path": "p3iv_types/src/p3iv_types/decision_base.py", "max_issues_repo_name": "fzi-forschungszentrum-informatik/P3IV", "max_issues_repo_head_hexsha": "51784e6dc03dcaa0ad58a5078475fa4daec774bd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "p3iv_types/src/p3iv_types/decision_base.py", "max_forks_repo_name": "fzi-forschungszentrum-informatik/P3IV", "max_forks_repo_head_hexsha": "51784e6dc03dcaa0ad58a5078475fa4daec774bd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-10T01:56:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-10T01:56:44.000Z", "avg_line_length": 34.0909090909, "max_line_length": 118, "alphanum_fraction": 0.6973333333, "include": true, "reason": "import numpy", "num_tokens": 336} |
import numpy as np
from CartPole.state_utilities import ANGLE_IDX
TARGET_TIME_UP = 3.0 # s
TARGET_TIME_DOWN = 4.0
TARGET_ANGLE_UP = np.pi/5.0
TARGET_ANGLE_DOWN = 4.0*np.pi/5.0
class CheckStabilized:
def __init__(self, dt, pole_position_init='down'):
self.samples_stabilized_min = TARGET_TIME_UP/dt # Gives for how many calls of this function the pole needs to be upright to return true
self.samples_down_min = TARGET_TIME_DOWN/dt # Due to noise in sensor reading you can wrongly read that pole is in lower half plane. This should account for that.
self.pole_position = pole_position_init
self.pole_position_now = pole_position_init
self.counter = 0
def check(self, s):
if abs(s[ANGLE_IDX]) < TARGET_ANGLE_UP:
self.pole_position_now = 'up'
elif abs(s[ANGLE_IDX]) > TARGET_ANGLE_DOWN:
self.pole_position_now = 'down'
if self.pole_position == self.pole_position_now:
self.counter = 0
else:
if self.pole_position == 'up':
if self.counter == self.samples_down_min:
self.pole_position = self.pole_position_now
else:
if self.counter == self.samples_stabilized_min:
self.pole_position = self.pole_position_now
self.counter += 1
if self.pole_position == 'up':
return True
else:
return False
| {"hexsha": "f42572a318ad770de0c7f5adb612cfa905c0e77f", "size": 1492, "ext": "py", "lang": "Python", "max_stars_repo_path": "Controllers/CheckStabilized.py", "max_stars_repo_name": "SensorsINI/CartPoleSimulation", "max_stars_repo_head_hexsha": "8f59c21796ed68d224e5f731957e68583f54a10a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-10-25T06:55:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-12T08:42:48.000Z", "max_issues_repo_path": "Controllers/CheckStabilized.py", "max_issues_repo_name": "SensorsINI/CartPoleSimulation", "max_issues_repo_head_hexsha": "8f59c21796ed68d224e5f731957e68583f54a10a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-03-12T15:05:42.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-02T16:27:41.000Z", "max_forks_repo_path": "Controllers/CheckStabilized.py", "max_forks_repo_name": "SensorsINI/CartPoleSimulation", "max_forks_repo_head_hexsha": "8f59c21796ed68d224e5f731957e68583f54a10a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-10-16T19:15:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T15:09:51.000Z", "avg_line_length": 33.9090909091, "max_line_length": 170, "alphanum_fraction": 0.6273458445, "include": true, "reason": "import numpy", "num_tokens": 349} |
module replace_target_test
use iso_varying_string, only: replace, var_str
use veggies, only: result_t, test_item_t, assert_equals, describe, it
implicit none
private
public :: &
test_replace_character_with_character_in_character, &
test_replace_character_with_character_in_string, &
test_replace_character_with_string_in_character, &
test_replace_character_with_string_in_string, &
test_replace_string_with_character_in_character, &
test_replace_string_with_character_in_string, &
test_replace_string_with_string_in_character, &
test_replace_string_with_string_in_string
contains
function test_replace_character_with_character_in_character() result(tests)
type(test_item_t) :: tests
tests = describe( &
"Sec. 3.7.4: REPLACE target character with character in character", &
[ it( &
"The copy of string is searched for occurences of target. If" &
// " target is found, it is replaced by substring.", &
check_replace_character_with_character_in_character) &
, it( &
"The search is done in the backward direction if the argument" &
// " back is present with the value true.", &
check_replace_character_with_character_in_character_backward) &
, it( &
"If every is present with the value true, the search and replace" &
// " is continued from the character following target in the" &
// " search direction specified until all occurrences of" &
// " target in the copy string are replaced.", &
check_replace_character_with_character_in_character_every) &
, it( &
"Get's the expected result for various examples and edge cases", &
check_replace_examples) &
])
end function
function test_replace_character_with_character_in_string() result(tests)
type(test_item_t) :: tests
tests = describe( &
"Sec. 3.7.4: REPLACE target character with character in string", &
[ it( &
"The copy of string is searched for occurences of target. If" &
// " target is found, it is replaced by substring.", &
check_replace_character_with_character_in_string) &
, it( &
"The search is done in the backward direction if the argument" &
// " back is present with the value true.", &
check_replace_character_with_character_in_string_backward) &
, it( &
"If every is present with the value true, the search and replace" &
// " is continued from the character following target in the" &
// " search direction specified until all occurrences of" &
// " target in the copy string are replaced.", &
check_replace_character_with_character_in_string_every) &
])
end function
function test_replace_character_with_string_in_character() result(tests)
type(test_item_t) :: tests
tests = describe( &
"Sec. 3.7.4: REPLACE target character with string in character", &
[ it( &
"The copy of string is searched for occurences of target. If" &
// " target is found, it is replaced by substring.", &
check_replace_character_with_string_in_character) &
, it( &
"The search is done in the backward direction if the argument" &
// " back is present with the value true.", &
check_replace_character_with_string_in_character_backward) &
, it( &
"If every is present with the value true, the search and replace" &
// " is continued from the character following target in the" &
// " search direction specified until all occurrences of" &
// " target in the copy string are replaced.", &
check_replace_character_with_string_in_character_every) &
])
end function
function test_replace_character_with_string_in_string() result(tests)
type(test_item_t) :: tests
tests = describe( &
"Sec. 3.7.4: REPLACE target character with string in string", &
[ it( &
"The copy of string is searched for occurences of target. If" &
// " target is found, it is replaced by substring.", &
check_replace_character_with_string_in_string) &
, it( &
"The search is done in the backward direction if the argument" &
// " back is present with the value true.", &
check_replace_character_with_string_in_string_backward) &
, it( &
"If every is present with the value true, the search and replace" &
// " is continued from the character following target in the" &
// " search direction specified until all occurrences of" &
// " target in the copy string are replaced.", &
check_replace_character_with_string_in_string_every) &
])
end function
function test_replace_string_with_character_in_character() result(tests)
type(test_item_t) :: tests
tests = describe( &
"Sec. 3.7.4: REPLACE target string with character in character", &
[ it( &
"The copy of string is searched for occurences of target. If" &
// " target is found, it is replaced by substring.", &
check_replace_string_with_character_in_character) &
, it( &
"The search is done in the backward direction if the argument" &
// " back is present with the value true.", &
check_replace_string_with_character_in_character_backward) &
, it( &
"If every is present with the value true, the search and replace" &
// " is continued from the character following target in the" &
// " search direction specified until all occurrences of" &
// " target in the copy string are replaced.", &
check_replace_string_with_character_in_character_every) &
])
end function
function test_replace_string_with_character_in_string() result(tests)
type(test_item_t) :: tests
tests = describe( &
"Sec. 3.7.4: REPLACE target string with character in string", &
[ it( &
"The copy of string is searched for occurences of target. If" &
// " target is found, it is replaced by substring.", &
check_replace_string_with_character_in_string) &
, it( &
"The search is done in the backward direction if the argument" &
// " back is present with the value true.", &
check_replace_string_with_character_in_string_backward) &
, it( &
"If every is present with the value true, the search and replace" &
// " is continued from the character following target in the" &
// " search direction specified until all occurrences of" &
// " target in the copy string are replaced.", &
check_replace_string_with_character_in_string_every) &
])
end function
function test_replace_string_with_string_in_character() result(tests)
type(test_item_t) :: tests
tests = describe( &
"Sec. 3.7.4: REPLACE target string with string in character", &
[ it( &
"The copy of string is searched for occurences of target. If" &
// " target is found, it is replaced by substring.", &
check_replace_string_with_string_in_character) &
, it( &
"The search is done in the backward direction if the argument" &
// " back is present with the value true.", &
check_replace_string_with_string_in_character_backward) &
, it( &
"If every is present with the value true, the search and replace" &
// " is continued from the character following target in the" &
// " search direction specified until all occurrences of" &
// " target in the copy string are replaced.", &
check_replace_string_with_string_in_character_every) &
])
end function
function test_replace_string_with_string_in_string() result(tests)
type(test_item_t) :: tests
tests = describe( &
"Sec. 3.7.4: REPLACE target string with string in string", &
[ it( &
"The copy of string is searched for occurences of target. If" &
// " target is found, it is replaced by substring.", &
check_replace_string_with_string_in_string) &
, it( &
"The search is done in the backward direction if the argument" &
// " back is present with the value true.", &
check_replace_string_with_string_in_string_backward) &
, it( &
"If every is present with the value true, the search and replace" &
// " is continued from the character following target in the" &
// " search direction specified until all occurrences of" &
// " target in the copy string are replaced.", &
check_replace_string_with_string_in_string_every) &
])
end function
pure function check_replace_character_with_character_in_character() result(result_)
type(result_t) :: result_
result_ = assert_equals( &
"with this in this string", &
replace( &
"this in this string", &
"this", &
"with this"))
end function
pure function check_replace_character_with_character_in_character_backward() result(result_)
type(result_t) :: result_
result_ = assert_equals( &
"this in with this string", &
replace(&
"this in this string", &
"this", &
"with this", &
back = .TRUE.))
end function
pure function check_replace_character_with_character_in_character_every() result(result_)
type(result_t) :: result_
result_ = &
assert_equals( &
"with this in with this string", &
replace(&
"this in this string", &
"this", &
"with this", &
every = .TRUE.)) &
.and.assert_equals( &
"with this in with this string", &
replace( &
"this in this string", &
"this", &
"with this", &
every = .TRUE., &
back = .TRUE.))
end function
pure function check_replace_examples() result(result_)
type(result_t) :: result_
result_ = &
assert_equals("A.", replace(".A.", ".A", "A", every=.true.))
end function
pure function check_replace_character_with_character_in_string() result(result_)
type(result_t) :: result_
result_ = assert_equals( &
"with this in this string", &
replace( &
var_str("this in this string"), &
"this", &
"with this"))
end function
pure function check_replace_character_with_character_in_string_backward() result(result_)
type(result_t) :: result_
result_ = assert_equals( &
"this in with this string", &
replace(&
var_str("this in this string"), &
"this", &
"with this", &
back = .TRUE.))
end function
pure function check_replace_character_with_character_in_string_every() result(result_)
type(result_t) :: result_
result_ = &
assert_equals( &
"with this in with this string", &
replace(&
var_str("this in this string"), &
"this", &
"with this", &
every = .TRUE.)) &
.and.assert_equals( &
"with this in with this string", &
replace( &
var_str("this in this string"), &
"this", &
"with this", &
every = .TRUE., &
back = .TRUE.))
end function
pure function check_replace_character_with_string_in_character() result(result_)
type(result_t) :: result_
result_ = assert_equals( &
"with this in this string", &
replace( &
"this in this string", &
"this", &
var_str("with this")))
end function
pure function check_replace_character_with_string_in_character_backward() result(result_)
type(result_t) :: result_
result_ = assert_equals( &
"this in with this string", &
replace(&
"this in this string", &
"this", &
var_str("with this"), &
back = .TRUE.))
end function
pure function check_replace_character_with_string_in_character_every() result(result_)
type(result_t) :: result_
result_ = &
assert_equals( &
"with this in with this string", &
replace(&
"this in this string", &
"this", &
var_str("with this"), &
every = .TRUE.)) &
.and.assert_equals( &
"with this in with this string", &
replace( &
"this in this string", &
"this", &
var_str("with this"), &
every = .TRUE., &
back = .TRUE.))
end function
pure function check_replace_character_with_string_in_string() result(result_)
type(result_t) :: result_
result_ = assert_equals( &
"with this in this string", &
replace( &
var_str("this in this string"), &
"this", &
var_str("with this")))
end function
pure function check_replace_character_with_string_in_string_backward() result(result_)
type(result_t) :: result_
result_ = assert_equals( &
"this in with this string", &
replace(&
var_str("this in this string"), &
"this", &
var_str("with this"), &
back = .TRUE.))
end function
pure function check_replace_character_with_string_in_string_every() result(result_)
type(result_t) :: result_
result_ = &
assert_equals( &
"with this in with this string", &
replace(&
var_str("this in this string"), &
"this", &
var_str("with this"), &
every = .TRUE.)) &
.and.assert_equals( &
"with this in with this string", &
replace( &
var_str("this in this string"), &
"this", &
var_str("with this"), &
every = .TRUE., &
back = .TRUE.))
end function
pure function check_replace_string_with_character_in_character() result(result_)
type(result_t) :: result_
result_ = assert_equals( &
"with this in this string", &
replace( &
"this in this string", &
var_str("this"), &
"with this"))
end function
pure function check_replace_string_with_character_in_character_backward() result(result_)
type(result_t) :: result_
result_ = assert_equals( &
"this in with this string", &
replace(&
"this in this string", &
var_str("this"), &
"with this", &
back = .TRUE.))
end function
pure function check_replace_string_with_character_in_character_every() result(result_)
type(result_t) :: result_
result_ = &
assert_equals( &
"with this in with this string", &
replace(&
"this in this string", &
var_str("this"), &
"with this", &
every = .TRUE.)) &
.and.assert_equals( &
"with this in with this string", &
replace( &
"this in this string", &
var_str("this"), &
"with this", &
every = .TRUE., &
back = .TRUE.))
end function
pure function check_replace_string_with_character_in_string() result(result_)
type(result_t) :: result_
result_ = assert_equals( &
"with this in this string", &
replace( &
var_str("this in this string"), &
var_str("this"), &
"with this"))
end function
pure function check_replace_string_with_character_in_string_backward() result(result_)
type(result_t) :: result_
result_ = assert_equals( &
"this in with this string", &
replace(&
var_str("this in this string"), &
var_str("this"), &
"with this", &
back = .TRUE.))
end function
pure function check_replace_string_with_character_in_string_every() result(result_)
type(result_t) :: result_
result_ = &
assert_equals( &
"with this in with this string", &
replace(&
var_str("this in this string"), &
var_str("this"), &
"with this", &
every = .TRUE.)) &
.and.assert_equals( &
"with this in with this string", &
replace( &
var_str("this in this string"), &
var_str("this"), &
"with this", &
every = .TRUE., &
back = .TRUE.))
end function
pure function check_replace_string_with_string_in_character() result(result_)
type(result_t) :: result_
result_ = assert_equals( &
"with this in this string", &
replace( &
"this in this string", &
var_str("this"), &
var_str("with this")))
end function
pure function check_replace_string_with_string_in_character_backward() result(result_)
type(result_t) :: result_
result_ = assert_equals( &
"this in with this string", &
replace(&
"this in this string", &
var_str("this"), &
var_str("with this"), &
back = .TRUE.))
end function
pure function check_replace_string_with_string_in_character_every() result(result_)
type(result_t) :: result_
result_ = &
assert_equals( &
"with this in with this string", &
replace(&
"this in this string", &
var_str("this"), &
var_str("with this"), &
every = .TRUE.)) &
.and.assert_equals( &
"with this in with this string", &
replace( &
"this in this string", &
var_str("this"), &
var_str("with this"), &
every = .TRUE., &
back = .TRUE.))
end function
pure function check_replace_string_with_string_in_string() result(result_)
type(result_t) :: result_
result_ = assert_equals( &
"with this in this string", &
replace( &
var_str("this in this string"), &
var_str("this"), &
var_str("with this")))
end function
pure function check_replace_string_with_string_in_string_backward() result(result_)
type(result_t) :: result_
result_ = assert_equals( &
"this in with this string", &
replace(&
var_str("this in this string"), &
var_str("this"), &
var_str("with this"), &
back = .TRUE.))
end function
pure function check_replace_string_with_string_in_string_every() result(result_)
type(result_t) :: result_
result_ = &
assert_equals( &
"with this in with this string", &
replace(&
var_str("this in this string"), &
var_str("this"), &
var_str("with this"), &
every = .TRUE.)) &
.and.assert_equals( &
"with this in with this string", &
replace( &
var_str("this in this string"), &
var_str("this"), &
var_str("with this"), &
every = .TRUE., &
back = .TRUE.))
end function
end module
| {"hexsha": "8f7b05e186caf5e6bd7261190df1e5674f7360d5", "size": 24493, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "test/unit_test/replace_target_test.f90", "max_stars_repo_name": "everythingfunctional/iso_varying_string", "max_stars_repo_head_hexsha": "f330d7a246d81aacfbf92f8085a4eca9d2506820", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/unit_test/replace_target_test.f90", "max_issues_repo_name": "everythingfunctional/iso_varying_string", "max_issues_repo_head_hexsha": "f330d7a246d81aacfbf92f8085a4eca9d2506820", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/unit_test/replace_target_test.f90", "max_forks_repo_name": "everythingfunctional/iso_varying_string", "max_forks_repo_head_hexsha": "f330d7a246d81aacfbf92f8085a4eca9d2506820", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.1315315315, "max_line_length": 96, "alphanum_fraction": 0.4715633038, "num_tokens": 4201} |
'''
A linear regression learning algorithm example using TensorFlow library.
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
from __future__ import print_function
import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
rng = numpy.random
# Parameters
learning_rate = 0.01
training_epochs = 1000
display_step = 50
# Training Data
train_X = numpy.asarray([5,10,1,4,6,4,9,4,10,6,3,4,9,8,1,7,4,7,10,4,6,3,7,5,8,7,6,10,6,9,1,9,4,6,8,7,8,5,1,5,8,7,10,2,8,4,10,6,10,2,10,10,1,10,3,6,6,2,5,9,6,5,1,8,10,6,0,10,2,8,1,1,5,3,9,3,2,9,6,9,2,10,4,7,8,3,2,3,7,3,0,5,1,5,7,4,1,5,7,1])
train_Y = numpy.asarray([0,8,2,7,4,5,9,8,2,9,0,1,5,0,1,10,3,7,4,7,10,4,6,1,8,3,8,7,3,6,1,4,5,2,10,9,0,4,2,4,5,3,6,1,10,6,2,7,10,1,3,7,1,2,1,9,2,8,0,0,9,1,8,2,9,10,0,1,4,10,9,10,1,4,2,9,5,1,10,9,5,7,4,4,6,2,1,8,4,10,7,1,0,6,9,6,10,9,2,0])
n_samples = train_X.shape[0]
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
pred = tf.add(tf.multiply(X, W), b)
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# Gradient descent
# Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
# Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Save the variables to disk.
save_path = saver.save(sess, "/tmp/model.ckpt")
print("Model saved in file: %s" % save_path)
# Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
# Testing example, as requested (Issue #2)
test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1])
test_Y = numpy.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03])
print("Testing... (Mean square loss Comparison)")
testing_cost = sess.run(
tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]),
feed_dict={X: test_X, Y: test_Y}) # same function as cost above
print("Testing cost=", testing_cost)
print("Absolute mean square loss difference:", abs(
training_cost - testing_cost))
plt.plot(test_X, test_Y, 'bo', label='Testing data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show() | {"hexsha": "38c600b2989ef491be9fb12934bd7bb08006c6c2", "size": 3498, "ext": "py", "lang": "Python", "max_stars_repo_path": "425 Machine Learning.py", "max_stars_repo_name": "4saad/425-code", "max_stars_repo_head_hexsha": "3d8a2120dae34807672ab1d7bdf8a840297372f4", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "425 Machine Learning.py", "max_issues_repo_name": "4saad/425-code", "max_issues_repo_head_hexsha": "3d8a2120dae34807672ab1d7bdf8a840297372f4", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "425 Machine Learning.py", "max_forks_repo_name": "4saad/425-code", "max_forks_repo_head_hexsha": "3d8a2120dae34807672ab1d7bdf8a840297372f4", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0618556701, "max_line_length": 240, "alphanum_fraction": 0.6217838765, "include": true, "reason": "import numpy", "num_tokens": 1222} |
"""Upsample images."""
import os
import subprocess
import numpy as np
import nibabel as nb
# =============================================================================
NII_NAMES = [
'/home/faruk/data/DATA_MRI_NIFTI/derived/sub-01/T1_wholebrain/01_crop/sub-01_ses-T2s_MP2RAGE_inv1_crop.nii.gz',
'/home/faruk/data/DATA_MRI_NIFTI/derived/sub-01/T1_wholebrain/01_crop/sub-01_ses-T2s_MP2RAGE_inv2_crop.nii.gz',
'/home/faruk/data/DATA_MRI_NIFTI/derived/sub-01/T1_wholebrain/01_crop/sub-01_ses-T2s_MP2RAGE_uni_crop.nii.gz',
'/home/faruk/data/DATA_MRI_NIFTI/derived/sub-01/T1_wholebrain/01_crop/sub-01_ses-T2s_MP2RAGE_T1_crop.nii.gz',
]
OUTDIR = "/home/faruk/data/DATA_MRI_NIFTI/derived/sub-01/T1_wholebrain/02_upsample"
# =============================================================================
print("Step_04: Upsample.")
# Output directory
if not os.path.exists(OUTDIR):
os.makedirs(OUTDIR)
print(" Output directory: {}\n".format(OUTDIR))
for i, f in enumerate(NII_NAMES):
print(" Processing file {}...".format(i+1))
# Prepare output
basename, ext = f.split(os.extsep, 1)
basename = os.path.basename(basename)
out_file = os.path.join(OUTDIR, "{}_ups350um.nii.gz".format(basename))
# Prepare command
command1 = "c3d {} ".format(f)
command1 += "-interpolation Cubic "
command1 += "-resample-mm 0.35x0.35x0.35mm "
command1 += "-o {}".format(out_file)
# Execute command
subprocess.run(command1, shell=True)
print('\n\nFinished.')
| {"hexsha": "ab4d4872148211fe357240e3bd33fb106677a7ee", "size": 1511, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/unused/unused-whole_brain_segmentation/unused_04_upsample.py", "max_stars_repo_name": "ofgulban/meso-MRI", "max_stars_repo_head_hexsha": "15ef8e19aae6218833a06bf01418d3d83eafd8c7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-21T13:48:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T13:48:01.000Z", "max_issues_repo_path": "scripts/unused/unused-whole_brain_segmentation/unused_04_upsample.py", "max_issues_repo_name": "ofgulban/meso-MRI", "max_issues_repo_head_hexsha": "15ef8e19aae6218833a06bf01418d3d83eafd8c7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/unused/unused-whole_brain_segmentation/unused_04_upsample.py", "max_forks_repo_name": "ofgulban/meso-MRI", "max_forks_repo_head_hexsha": "15ef8e19aae6218833a06bf01418d3d83eafd8c7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-21T13:48:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-21T13:48:08.000Z", "avg_line_length": 35.9761904762, "max_line_length": 115, "alphanum_fraction": 0.6432825943, "include": true, "reason": "import numpy", "num_tokens": 443} |
\chapter{\abstractname}
%TODO: Abstract
Union-Find is a classical data structure whose complexity analysis is famously non-trivial. In this thesis we prove the $\alpha$-bound amortized time complexity of an efficient imperative implementation of this data structure. We first revise the history of this emblematic result by Tarjan \cite{Tarjan1975b} and arrive at the modern proof by Alstrup et al. \cite{Alstrup14}.
To reproduce this proof in a formal context within Isabelle/HOL, we first gather the mathematical and technical tools required, most prominently a more comprehensive theory about the Ackermann function than the one already available in the Isabelle/HOL distribution, properties about its inverses, as well as the framework implementing Separation Logic with Time Credits for Imperative/HOL, which already contained a non optimal implementation of this data structure. We then follow closely the work of Charguéraud and Pottier \cite{chargueraud17}, which formalized this proof in a similar framework in Coq.
In the end, we prove the asymptotically optimal bound of the operations in an efficient implementation of the Union-Find data structure. The whole proof in Isabelle is available under \cite{Loewenberg2019}. As with any other program in Imperative/HOL, the implementation can be exported to several languages.
| {"hexsha": "75ec01ba55c714ba52c70e65056934adc5452117", "size": 1338, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "tum-thesis-latex-master/pages/abstract.tex", "max_stars_repo_name": "adrilow/Proof-of-the-amortized-time-complexity-of-the-Union-Find-data-structure-in-Isabelle-HOL", "max_stars_repo_head_hexsha": "293b12752261dac7f741483b62b27891bf4be1cc", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tum-thesis-latex-master/pages/abstract.tex", "max_issues_repo_name": "adrilow/Proof-of-the-amortized-time-complexity-of-the-Union-Find-data-structure-in-Isabelle-HOL", "max_issues_repo_head_hexsha": "293b12752261dac7f741483b62b27891bf4be1cc", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tum-thesis-latex-master/pages/abstract.tex", "max_forks_repo_name": "adrilow/Proof-of-the-amortized-time-complexity-of-the-Union-Find-data-structure-in-Isabelle-HOL", "max_forks_repo_head_hexsha": "293b12752261dac7f741483b62b27891bf4be1cc", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-05T10:54:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-05T10:54:16.000Z", "avg_line_length": 133.8, "max_line_length": 607, "alphanum_fraction": 0.8213751868, "num_tokens": 281} |
"""
Tests for ISMAGS isomorphism algorithm.
"""
import pytest
import networkx as nx
from networkx.algorithms import isomorphism as iso
def _matches_to_sets(matches):
"""
Helper function to facilitate comparing collections of dictionaries in
which order does not matter.
"""
return set(map(lambda m: frozenset(m.items()), matches))
class TestSelfIsomorphism:
data = [
(
[(0, dict(name='a')),
(1, dict(name='a')),
(2, dict(name='b')),
(3, dict(name='b')),
(4, dict(name='a')),
(5, dict(name='a'))],
[(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)]
),
(
range(1, 5),
[(1, 2), (2, 4), (4, 3), (3, 1)]
),
(
[],
[(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 0), (0, 6), (6, 7),
(2, 8), (8, 9), (4, 10), (10, 11)]
),
(
[],
[(0, 1), (1, 2), (1, 4), (2, 3), (3, 5), (3, 6)]
),
]
def test_self_isomorphism(self):
"""
For some small, symmetric graphs, make sure that 1) they are isomorphic
to themselves, and 2) that only the identity mapping is found.
"""
for node_data, edge_data in self.data:
graph = nx.Graph()
graph.add_nodes_from(node_data)
graph.add_edges_from(edge_data)
ismags = iso.ISMAGS(graph, graph, node_match=iso.categorical_node_match('name', None))
assert ismags.is_isomorphic()
assert ismags.subgraph_is_isomorphic()
assert (list(ismags.subgraph_isomorphisms_iter(symmetry=True)) ==
[{n: n for n in graph.nodes}])
def test_edgecase_self_isomorphism(self):
"""
This edgecase is one of the cases in which it is hard to find all
symmetry elements.
"""
graph = nx.Graph()
nx.add_path(graph, range(5))
graph.add_edges_from([(2, 5), (5, 6)])
ismags = iso.ISMAGS(graph, graph)
ismags_answer = list(ismags.find_isomorphisms(True))
assert ismags_answer == [{n: n for n in graph.nodes}]
graph = nx.relabel_nodes(graph, {0: 0, 1: 1, 2: 2, 3: 3, 4: 6, 5: 4, 6: 5})
ismags = iso.ISMAGS(graph, graph)
ismags_answer = list(ismags.find_isomorphisms(True))
assert ismags_answer == [{n: n for n in graph.nodes}]
@pytest.mark.skip()
def test_directed_self_isomorphism(self):
"""
For some small, directed, symmetric graphs, make sure that 1) they are
isomorphic to themselves, and 2) that only the identity mapping is
found.
"""
for node_data, edge_data in self.data:
graph = nx.Graph()
graph.add_nodes_from(node_data)
graph.add_edges_from(edge_data)
ismags = iso.ISMAGS(graph, graph, node_match=iso.categorical_node_match('name', None))
assert ismags.is_isomorphic()
assert ismags.subgraph_is_isomorphic()
assert (list(ismags.subgraph_isomorphisms_iter(symmetry=True)) ==
[{n: n for n in graph.nodes}])
class TestSubgraphIsomorphism:
def test_isomorphism(self):
g1 = nx.Graph()
nx.add_cycle(g1, range(4))
g2 = nx.Graph()
nx.add_cycle(g2, range(4))
g2.add_edges_from([(n, m) for n, m in zip(g2, range(4, 8))])
ismags = iso.ISMAGS(g2, g1)
assert (list(ismags.subgraph_isomorphisms_iter(symmetry=True)) ==
[{n: n for n in g1.nodes}])
def test_isomorphism2(self):
g1 = nx.Graph()
nx.add_path(g1, range(3))
g2 = g1.copy()
g2.add_edge(1, 3)
ismags = iso.ISMAGS(g2, g1)
matches = ismags.subgraph_isomorphisms_iter(symmetry=True)
expected_symmetric = [{0: 0, 1: 1, 2: 2},
{0: 0, 1: 1, 3: 2},
{2: 0, 1: 1, 3: 2}]
assert (_matches_to_sets(matches) ==
_matches_to_sets(expected_symmetric))
matches = ismags.subgraph_isomorphisms_iter(symmetry=False)
expected_asymmetric = [{0: 2, 1: 1, 2: 0},
{0: 2, 1: 1, 3: 0},
{2: 2, 1: 1, 3: 0}]
assert (_matches_to_sets(matches) ==
_matches_to_sets(expected_symmetric + expected_asymmetric))
def test_labeled_nodes(self):
g1 = nx.Graph()
nx.add_cycle(g1, range(3))
g1.nodes[1]['attr'] = True
g2 = g1.copy()
g2.add_edge(1, 3)
ismags = iso.ISMAGS(g2, g1, node_match=lambda x, y: x == y)
matches = ismags.subgraph_isomorphisms_iter(symmetry=True)
expected_symmetric = [{0: 0, 1: 1, 2: 2}]
assert (_matches_to_sets(matches) ==
_matches_to_sets(expected_symmetric))
matches = ismags.subgraph_isomorphisms_iter(symmetry=False)
expected_asymmetric = [{0: 2, 1: 1, 2: 0}]
assert (_matches_to_sets(matches) ==
_matches_to_sets(expected_symmetric + expected_asymmetric))
def test_labeled_edges(self):
g1 = nx.Graph()
nx.add_cycle(g1, range(3))
g1.edges[1, 2]['attr'] = True
g2 = g1.copy()
g2.add_edge(1, 3)
ismags = iso.ISMAGS(g2, g1, edge_match=lambda x, y: x == y)
matches = ismags.subgraph_isomorphisms_iter(symmetry=True)
expected_symmetric = [{0: 0, 1: 1, 2: 2}]
assert (_matches_to_sets(matches) ==
_matches_to_sets(expected_symmetric))
matches = ismags.subgraph_isomorphisms_iter(symmetry=False)
expected_asymmetric = [{1: 2, 0: 0, 2: 1}]
assert (_matches_to_sets(matches) ==
_matches_to_sets(expected_symmetric + expected_asymmetric))
class TestWikipediaExample:
# Nodes 'a', 'b', 'c' and 'd' form a column.
# Nodes 'g', 'h', 'i' and 'j' form a column.
g1edges = [['a', 'g'], ['a', 'h'], ['a', 'i'],
['b', 'g'], ['b', 'h'], ['b', 'j'],
['c', 'g'], ['c', 'i'], ['c', 'j'],
['d', 'h'], ['d', 'i'], ['d', 'j']]
# Nodes 1,2,3,4 form the clockwise corners of a large square.
# Nodes 5,6,7,8 form the clockwise corners of a small square
g2edges = [[1, 2], [2, 3], [3, 4], [4, 1],
[5, 6], [6, 7], [7, 8], [8, 5],
[1, 5], [2, 6], [3, 7], [4, 8]]
def test_graph(self):
g1 = nx.Graph()
g2 = nx.Graph()
g1.add_edges_from(self.g1edges)
g2.add_edges_from(self.g2edges)
gm = iso.ISMAGS(g1, g2)
assert gm.is_isomorphic()
class TestLargestCommonSubgraph:
def test_mcis(self):
# Example graphs from DOI: 10.1002/spe.588
graph1 = nx.Graph()
graph1.add_edges_from([(1, 2), (2, 3), (2, 4), (3, 4), (4, 5)])
graph1.nodes[1]['color'] = 0
graph2 = nx.Graph()
graph2.add_edges_from([(1, 2), (2, 3), (2, 4), (3, 4), (3, 5),
(5, 6), (5, 7), (6, 7)])
graph2.nodes[1]['color'] = 1
graph2.nodes[6]['color'] = 2
graph2.nodes[7]['color'] = 2
ismags = iso.ISMAGS(graph1, graph2, node_match=iso.categorical_node_match('color', None))
assert list(ismags.subgraph_isomorphisms_iter(True)) == []
assert list(ismags.subgraph_isomorphisms_iter(False)) == []
found_mcis = _matches_to_sets(ismags.largest_common_subgraph())
expected = _matches_to_sets([{2: 2, 3: 4, 4: 3, 5: 5},
{2: 4, 3: 2, 4: 3, 5: 5}])
assert expected == found_mcis
ismags = iso.ISMAGS(graph2, graph1, node_match=iso.categorical_node_match('color', None))
assert list(ismags.subgraph_isomorphisms_iter(True)) == []
assert list(ismags.subgraph_isomorphisms_iter(False)) == []
found_mcis = _matches_to_sets(ismags.largest_common_subgraph())
# Same answer, but reversed.
expected = _matches_to_sets([{2: 2, 3: 4, 4: 3, 5: 5},
{4: 2, 2: 3, 3: 4, 5: 5}])
assert expected == found_mcis
def test_symmetry_mcis(self):
graph1 = nx.Graph()
nx.add_path(graph1, range(4))
graph2 = nx.Graph()
nx.add_path(graph2, range(3))
graph2.add_edge(1, 3)
# Only the symmetry of graph2 is taken into account here.
ismags1 = iso.ISMAGS(graph1, graph2, node_match=iso.categorical_node_match('color', None))
assert list(ismags1.subgraph_isomorphisms_iter(True)) == []
found_mcis = _matches_to_sets(ismags1.largest_common_subgraph())
expected = _matches_to_sets([{0: 0, 1: 1, 2: 2},
{1: 0, 3: 2, 2: 1}])
assert expected == found_mcis
# Only the symmetry of graph1 is taken into account here.
ismags2 = iso.ISMAGS(graph2, graph1, node_match=iso.categorical_node_match('color', None))
assert list(ismags2.subgraph_isomorphisms_iter(True)) == []
found_mcis = _matches_to_sets(ismags2.largest_common_subgraph())
expected = _matches_to_sets([{3: 2, 0: 0, 1: 1},
{2: 0, 0: 2, 1: 1},
{3: 0, 0: 2, 1: 1},
{3: 0, 1: 1, 2: 2},
{0: 0, 1: 1, 2: 2},
{2: 0, 3: 2, 1: 1}])
assert expected == found_mcis
found_mcis1 = _matches_to_sets(ismags1.largest_common_subgraph(False))
found_mcis2 = ismags2.largest_common_subgraph(False)
found_mcis2 = [{v: k for k, v in d.items()} for d in found_mcis2]
found_mcis2 = _matches_to_sets(found_mcis2)
expected = _matches_to_sets([{3: 2, 1: 3, 2: 1},
{2: 0, 0: 2, 1: 1},
{1: 2, 3: 3, 2: 1},
{3: 0, 1: 3, 2: 1},
{0: 2, 2: 3, 1: 1},
{3: 0, 1: 2, 2: 1},
{2: 0, 0: 3, 1: 1},
{0: 0, 2: 3, 1: 1},
{1: 0, 3: 3, 2: 1},
{1: 0, 3: 2, 2: 1},
{0: 3, 1: 1, 2: 2},
{0: 0, 1: 1, 2: 2}])
assert expected == found_mcis1
assert expected == found_mcis2
| {"hexsha": "ed3d81ea38804437b785953d511ff0827863de8c", "size": 10542, "ext": "py", "lang": "Python", "max_stars_repo_path": "networkx/algorithms/isomorphism/tests/test_ismags.py", "max_stars_repo_name": "Mackyboy12/networkx", "max_stars_repo_head_hexsha": "2afe713bce0a57dbdd8e51d3b83ae28f73c9b677", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2020-04-29T10:38:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T03:30:28.000Z", "max_issues_repo_path": "networkx/algorithms/isomorphism/tests/test_ismags.py", "max_issues_repo_name": "Mackyboy12/networkx", "max_issues_repo_head_hexsha": "2afe713bce0a57dbdd8e51d3b83ae28f73c9b677", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-05-19T11:01:00.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-19T11:01:00.000Z", "max_forks_repo_path": "networkx/algorithms/isomorphism/tests/test_ismags.py", "max_forks_repo_name": "Mackyboy12/networkx", "max_forks_repo_head_hexsha": "2afe713bce0a57dbdd8e51d3b83ae28f73c9b677", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-04-08T07:50:23.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-08T11:59:03.000Z", "avg_line_length": 39.0444444444, "max_line_length": 98, "alphanum_fraction": 0.5142287991, "include": true, "reason": "import networkx,from networkx", "num_tokens": 3218} |
import os
from pathlib import Path
import pandas as pd
from src.models.ecomplexity_model import EconomicDataModel
import pickle as pkl
import numpy as np
project_dir = Path(__file__).resolve().parents[1]
raw_dir = os.path.join(project_dir, 'data', 'raw')
interim_dir = os.path.join(project_dir, 'data', 'interim')
external_data_dir = os.path.join(project_dir, 'data', 'external')
processed_data_dir = os.path.join(project_dir, 'data', 'processed')
if __name__ == '__main__':
world_region_data = pd.read_csv(os.path.join(raw_dir, 'fake_tbl.csv'))
processing_container = EconomicDataModel.getContrainer(world_region_data)
processing_container.rca()
processing_container.mcp()
processing_container.ubiquity()
processing_container.diversity()
processing_container.proximity()
processing_container.eci_pci()
# print(processing_container.mcp_data[2019])
#np.savetxt('mcc_eigen_vecs.csv', np.linalg.eig(processing_container.MCC[2019])[1], fmt='%.5f',delimiter=',')
np.savetxt('mcc.csv', processing_container.MCC[2019],fmt='%.5f', delimiter=',')
np.savetxt('mpp.csv', processing_container.MPP[2019],fmt='%.5f', delimiter=',')
#np.savetxt('eigen.csv', processing_container.eigen_kp[2019],fmt='%.5f', delimiter=',')
mpp = processing_container.MPP[2019]
mcc = processing_container.MCC[2019]
eigenvals, eigenvec = np.linalg.eig(mcc)
np.savetxt('mcc_eigenvec.csv', eigenvec, fmt='%.5f', delimiter=',')
np.savetxt('mcc_eigenval.csv', eigenvals, fmt='%.5f', delimiter=',')
np.savetxt('mpp_eigenvec.csv', np.linalg.eig(mpp)[1][1], fmt='%.5f', delimiter=',')
np.savetxt('mpp_eigenval.csv', np.linalg.eig(mpp)[0], fmt='%.5f', delimiter=',')
res_tbl = {}
c_prox = processing_container.country_proximity()
with open(project_dir/ 'data' / 'cprox.pkl', 'wb') as f:
pkl.dump(processing_container.country_proximity_data, f)
| {"hexsha": "6cec5d124e6d948e17d11d687321dc839665a5fc", "size": 1916, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/main.py", "max_stars_repo_name": "hydrophis-spiralis/regional_economics_complexity", "max_stars_repo_head_hexsha": "c507c7307c068dd3e1f6b846b5c25641a5dd507b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-11T21:28:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-11T21:28:55.000Z", "max_issues_repo_path": "src/main.py", "max_issues_repo_name": "hydrophis-spiralis/regional_economics_complexity", "max_issues_repo_head_hexsha": "c507c7307c068dd3e1f6b846b5c25641a5dd507b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/main.py", "max_forks_repo_name": "hydrophis-spiralis/regional_economics_complexity", "max_forks_repo_head_hexsha": "c507c7307c068dd3e1f6b846b5c25641a5dd507b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1020408163, "max_line_length": 113, "alphanum_fraction": 0.7124217119, "include": true, "reason": "import numpy", "num_tokens": 517} |
from __future__ import annotations
from typing import Callable, Optional, Union
import numpyro
import numpyro.distributions as dist
import jax
import jax.numpy as jnp
from numpy.typing import ArrayLike
from .kernels import Kernel, WhiteNoise
__all__ = [
'GP',
]
class GP:
r"""Gaussian process class.
The function f(x) is described by a Gaussian process: a collection of
random variables for which any finite number are a part of a multivariate
normal distribution.
.. math::
f(x) \sim \mathcal{GP}(m(x), k(x, x')),
where :math:`m(x)` and :math:`k(x, x')` are the mean and covariance
of :math:`f(x)` and :math:`f(x')`. I.e. :math:`k(x, x') = \mathrm{Cov}(f(x), f(x'))`.
The kernel implies a distribution over all possible functional forms of :math:`f`.
For example, :math:`f` given :math:`x` is drawn from a multivariate
normal distribution,
.. math::
f | x \sim \mathcal{N}(m(x), k(x, x)).
The marginal likelihood of some observation :math:`y` of :math:`f(x)`,
.. math::
p(y | x) = \int p(y | f, x)\,p(f | \theta)\,\mathrm{d}f
can be shown to be,
.. math::
y | x \sim \mathcal{N}(m(x), k(x, x) + n(x))
where :math:`n(x)` is some uncorrelated Gaussian noise term such that
:math:`y | f \sim \mathcal{N}(f, n(x))`.
Making predictions from the GP given :math:`x` and :math:`y` for some
new points :math:`x_\star`,
.. math::
:nowrap:
\begin{equation}
\begin{bmatrix}
f_\star\\y
\end{bmatrix}
\sim \mathcal{N} \left(
\begin{bmatrix}
m(x_\star)\\m(x)
\end{bmatrix}
,\,
\begin{bmatrix}
k(x_\star, x_\star) & k(x_\star, x) \\
k(x, x_\star) & k(x, x) + n(x)
\end{bmatrix}
\right)
\end{equation}
or making predictions with noise :math:`n(x_star)`,
.. math::
:nowrap:
\begin{equation}
\begin{bmatrix}
y_\star\\y
\end{bmatrix}
\sim \mathcal{N} \left(
\begin{bmatrix}
m(x_\star)\\m(x)
\end{bmatrix}
,\,
\begin{bmatrix}
k(x_\star, x_\star) + n(x_\star) & k(x_\star, x) \\
k(x, x_\star) & k(x, x) + n(x)
\end{bmatrix}
\right)
\end{equation}
Predicted truth,
f(x_pred) | y ~ N(
m(x_pred) + k(x, x_pred)·(k(x, x) + n_x)^{-1}·(y - m(x)),
k(x_pred, x_pred) - k(x, x_pred)·(k(x, x) + n_x)^{-1}·k(x_pred, x)
)
Predicted observations (just add noise),
y_pred | y ~ N(
m(x_pred) + k(x, x_pred)·(k(x, x) + n_x)^{-1}·(y - m(x)),
k(x_pred, x_pred) + n_pred - k(x, x_pred)·(k(x, x) + n_x)^{-1}·k(x_pred, x)
)
Args:
kernel (Kernel, or function): Kernel function,
default is the squared exponential kernel.
mean (function): Mean model function. If float, mean
function is constant at this value.
jitter (float, or function): Small amount to add to the covariance.
If float, this is multiplied by the identity matrix.
Example:
.. code-block:: python
import numpyro
import numpyro.distributions as dist
from asterion.gp import GP, SquaredExponential
def model(x, x_pred=None, y=None):
var = numpyro.sample('var', dist.HalfNormal(1.0))
length = numpyro.sample('length', dist.Normal(100.0, 1.0))
noise = numpyro.sample('noise', dist.HalfNormal(0.1))
kernel = SquaredExponential(var, length)
gp = GP(kernel)
gp.sample('y', x, noise=noise, obs=y)
if x_pred is not None:
gp.predict('f_pred', x_pred, noise=None)
gp.predict('y_pred', x_pred, noise=noise)
"""
def __init__(self, kernel: Union[Kernel, Callable], mean: Union[float, Callable]=0.0,
jitter: Union[float, Callable]=1e-6):
if not callable(kernel):
raise TypeError("Argument 'kernel' is not callable")
self.kernel: Callable = kernel #: Kernel function.
if not callable(mean):
_mean = lambda x: jnp.full(x.shape, mean)
else:
_mean = mean
self.mean: Callable = _mean #: Mean function.
if not callable(jitter):
_jitter = lambda x: jitter * jnp.eye(x.shape[-1])
else:
_jitter = jitter
self.jitter: Callable = _jitter #: Jitter function.
self.noise: Optional[Callable] = None
""": Independent noise function passed to
:meth:`distribution` or :meth:`sample`."""
self.x: Optional[ArrayLike] = None
""": Input array passed to :meth:`distribution` or :meth:`sample`."""
self.y: Optional[ArrayLike] = None
""": Output of :meth:`sample`."""
self.loc: Optional[ArrayLike] = None
""": Output of :attr:`mean` :code:`(x)`."""
self.cov: Optional[ArrayLike] = None
""": Output of :attr:`kernel` :code:`(x, x)`."""
def __add__(self, obj):
if not isinstance(obj, self.__class__):
raise TypeError(
f"Object added must be instance of {self.__class__}"
)
kernel = self.kernel + obj.kernel
mean = self.mean + obj.mean
jitter = max(self.jitter, obj.jitter) # Take the max jitter.
gp = GP(kernel, mean=mean, jitter=jitter)
return gp
def _validate_noise(self,
noise: Optional[Union[Callable, float]]) -> Callable:
if noise is None or noise is False:
noise = WhiteNoise(0.0)
elif not callable(noise):
noise = WhiteNoise(noise)
return noise
def distribution(self, x: ArrayLike,
noise: Optional[Union[Callable, float]]=None,
**kwargs) -> dist.MultivariateNormal:
"""Distribution for the GP. Calling this method updates :attr:`x`,
:attr:`noise`, :attr:`loc` and :attr:`cov`.
Args:
x: The x values for which to construct the distribution
noise: The independent noise function.
**kwargs: Keyword arguments to pass to dist.MultivariateNormal.
"""
self.x = x
self.noise = self._validate_noise(noise)
self.loc = self.mean(x)
self.cov = self.kernel(x, x) + self.noise(x) + self.jitter(x)
return dist.MultivariateNormal(self.loc, self.cov, **kwargs)
def sample(self, name: str, x: ArrayLike,
noise: Optional[Union[Callable, float]]=None,
obs: Optional[ArrayLike]=None,
rng_key: Optional[jnp.ndarray]=None,
sample_shape: tuple=(), infer: Optional[dict]=None,
obs_mask: Optional[ArrayLike]=None,
**kwargs) -> jnp.ndarray:
"""Sample from the GP likelihood. Calling this method updates
:attr:`x`, :attr:`noise`, :attr:`loc` and :attr:`cov` and assigns the
result to :attr:`y`.
Args:
name: [description]
x: [description]
noise: [description]. Defaults to None.
obs: [description]. Defaults to None.
rng_key: [description]. Defaults to None.
sample_shape: [description]. Defaults to ().
infer: [description]. Defaults to None.
obs_mask: [description]. Defaults to None.
Returns:
A sample from the GP likelihood.
"""
fn = self.distribution(x, noise=noise, **kwargs)
self.y = numpyro.sample(name, fn, obs=obs, rng_key=rng_key,
sample_shape=sample_shape, infer=infer,
obs_mask=obs_mask)
return self.y
def _build_conditional(self, x: ArrayLike,
noise: Optional[Union[float, Callable]]=None,
gp=None, diag: bool=False):
"""Make a prediction for the loc and cov of f(x) given y,
loc = mp + kxp·(kxx + nx)^{-1}·(y - mx),
cov = kpp + np - kxp·(kxx + nx)^{-1}·kxp^T,
var = sqrt(diag(cov)),
where mx = mean(x), mp = mean(x_pred), kxx = kernel(x, x),
kxp = kernel(x, x_pred), kpp = kernel(x_pred, x_pred),
nx = noise(x), and np = noise(x_pred).
Args:
x: The x values for which to make predictions.
noise: If True, add self.noise to the
prediction. If callable, must be a function of (x_pred, x_pred).
Otherwise, pass the scale parameter for WhiteNoise.
Default is None (no noise).
gp (GP, optional): The GP from which to make predictions.
For example, used in GP addition. Default is self.
diag: If True, returns the variance. Default is False.
Returns:
loc: The mean of the prediction.
cov or var: The covariance or variance of the prediction.
"""
if gp is None:
# Predict given a different GP (e.g. additive)
gp = self
if gp.x is None:
raise ValueError("GP must be sampled to make predictions," +
" consider the `gp` keyword argument")
kxx = gp.cov
L = jax.scipy.linalg.cho_factor(kxx, lower=True)
A = jax.scipy.linalg.cho_solve(L, gp.y - gp.loc)
# Cross terms and prediction terms are always self.
kxp = self.kernel(gp.x, x)
v = jax.scipy.linalg.cho_solve(L, kxp.T)
kpp = self.kernel(x, x) + self.jitter(x)
noise = self._validate_noise(noise)
if noise is True:
noise = gp.noise
kpp += noise(x)
loc = self.mean(x) + jnp.dot(kxp, A)
cov = kpp - jnp.dot(kxp, v)
if diag:
var = jnp.diag(cov)
return loc, var
return loc, cov
def conditional(self, x: ArrayLike,
noise: Optional[Union[float, Callable]]=None,
diag: bool=False, gp=None, **kwargs) -> dist.Distribution:
"""Make a conditional distribution for y' = f(x') + noise',
y' | y ~ N(
mp + kxp·(kxx + nx)^{-1}·(y - mx),
kpp + np - kxp·(kxx + nx)^{-1}·kxp^T
),
where mx = mean(x), mp = mean(x_pred), kxx = kernel(x, x),
kxp = kernel(x, x_pred), kpp = kernel(x_pred, x_pred),
nx = noise(x), and np = noise(x_pred).
Args:
x: The x values for which to make predictions.
noise: If True, add self.noise to
the prediction. If callable, must be a function of
(x_pred, x_pred). Otherwise, pass the scale parameter for
WhiteNoise. Default is None (no noise).
gp (GP, optional): The GP from which to make predictions. Default is
self. E.g. the total GP in which self is a term.
diag: If True, diagonalises the variance. Default is False.
**kwargs: Keyword arguments to pass to :class:`dist.Normal` or
:class:`dist.MultivariateNormal`.
"""
args = self._build_conditional(x, noise=noise, gp=gp, diag=diag)
if diag:
return dist.Normal(*args, **kwargs)
return dist.MultivariateNormal(*args, **kwargs)
def predict(self, name: str, x: ArrayLike,
noise: Optional[Union[float, Callable]]=None, gp=None,
diag: bool=False, rng_key: Optional[jnp.ndarray]=None,
sample_shape: tuple=(), infer: Optional[dict]=None,
**kwargs) -> jnp.ndarray:
"""Sample from the GP conditional distribution.
Args:
name: [description]
x: [description]
noise: [description]. Defaults to None.
gp: [description]. Defaults to None.
diag: [description]. Defaults to False.
rng_key: [description]. Defaults to None.
sample_shape: [description]. Defaults to ().
infer: [description]. Defaults to None.
**kwargs: Keyword arguments to pass to :meth:`conditional`.
Returns:
[description]
"""
fn = self.conditional(x, noise=noise, gp=gp, diag=diag, **kwargs)
return numpyro.sample(name, fn, rng_key=rng_key, sample_shape=sample_shape,
infer=infer)
| {"hexsha": "223eb7c032d951f0973624cf0cc4558671b9e1c2", "size": 12772, "ext": "py", "lang": "Python", "max_stars_repo_path": "asterion/gp/gp.py", "max_stars_repo_name": "alexlyttle/helium-glitch-fitter", "max_stars_repo_head_hexsha": "22575f0126e3c7c4e124a0acc740b8e71ce5294e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "asterion/gp/gp.py", "max_issues_repo_name": "alexlyttle/helium-glitch-fitter", "max_issues_repo_head_hexsha": "22575f0126e3c7c4e124a0acc740b8e71ce5294e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "asterion/gp/gp.py", "max_forks_repo_name": "alexlyttle/helium-glitch-fitter", "max_forks_repo_head_hexsha": "22575f0126e3c7c4e124a0acc740b8e71ce5294e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.379501385, "max_line_length": 89, "alphanum_fraction": 0.5353116192, "include": true, "reason": "import numpy,from numpy,import jax", "num_tokens": 3220} |
{-# OPTIONS --universe-polymorphism #-}
module LLev where
--****************
-- Universe polymorphism
--****************
data Level : Set where
ze : Level
su : Level -> Level
{-# BUILTIN LEVEL Level #-}
{-# BUILTIN LEVELZERO ze #-}
{-# BUILTIN LEVELSUC su #-}
max : Level -> Level -> Level
max ze m = m
max (su n) ze = su n
max (su n) (su m) = su (max n m)
{-# BUILTIN LEVELMAX max #-}
record Up {l : Level} (A : Set l) : Set (su l) where
constructor up
field down : A
open Up
record Sg {l : Level}(S : Set l)(T : S -> Set l) : Set l where
constructor _,_
field
fst : S
snd : T fst
open Sg
_*_ : {l : Level} -> Set l -> Set l -> Set l
S * T = Sg S \ _ -> T
infixr 4 _*_ _,_
data Zero : Set where
record One {l : Level} : Set l where
constructor <>
data Desc {l : Level}(I : Set l) : Set (su l) where
var : (i : I) -> Desc I
con : (A : Set l) -> Desc I
sg pi : (S : Set l)(T : S -> Desc I) -> Desc I
_**_ : (S T : Desc I) -> Desc I
infixr 4 _**_
[!_!] : {l : Level}{I : Set l} -> Desc I -> (I -> Set l) -> Set l
[! var i !] X = X i
[! con A !] X = A
[! sg S T !] X = Sg S \ s -> [! T s !] X
[! pi S T !] X = (s : S) -> [! T s !] X
[! S ** T !] X = [! S !] X * [! T !] X
{-
data Mu {l : Level}(I : Set l)(F : I -> Desc I)(i : I) : Set l where
<_> : [! F i !] (Mu I F) -> Mu I F i
-}
All : {l : Level}{I : Set l}
(D : Desc I)(X : I -> Set l) -> [! D !] X -> Desc (Sg I X)
All (var i) X x = var (i , x)
All (con A) X d = con One
All (sg S T) X (s , t) = All (T s) X t
All (pi S T) X f = pi S \ s -> All (T s) X (f s)
All (S ** T) X (s , t) = All S X s ** All T X t
all : {l : Level}{I : Set l}
(D : Desc I)(X : I -> Set l)(P : Sg I X -> Set l) ->
((ix : Sg I X) -> P ix) ->
(d : [! D !] X) -> [! All D X d !] P
all (var i) X P p x = p (i , x)
all (con A) X P p d = _
all (sg S T) X P p (s , t) = all (T s) X P p t
all (pi S T) X P p f = \ s -> all (T s) X P p (f s)
all (S ** T) X P p (s , t) = all S X P p s , all T X P p t
{-
induction : {l : Level}{I : Set l}{F : I -> Desc I}
(P : Sg I (Mu I F) -> Set l) ->
((i : I)(d : [! F i !] (Mu I F)) ->
[! All (F i) (Mu I F) d !] P -> P (i , < d >)) ->
(ix : Sg I (Mu I F)) -> P ix
induction {F = F} P p (i , < d >) = p i d (all (F i) (Mu _ F) P (induction P p) d)
-}
{-
mutual
induction : {l : Level}{I : Set l}{F : I -> Desc I}
(P : Sg I (Mu I F) -> Set l) ->
((i : I)(d : [! F i !] (Mu I F)) ->
[! All (F i) (Mu I F) d !] P -> P (i , < d >)) ->
{i : I}(x : Mu I F i) -> P (i , x)
induction {F = F} P p {i} < d > = p i d (allInduction F P p (F i) d)
allInduction : {l : Level}{I : Set l}(F : I -> Desc I)
(P : Sg I (Mu I F) -> Set l) ->
((i : I)(d : [! F i !] (Mu I F)) ->
[! All (F i) (Mu I F) d !] P -> P (i , < d >)) ->
(D : Desc I) ->
(d : [! D !] (Mu I F)) -> [! All D (Mu I F) d !] P
allInduction F P p (var i) d = induction P p d
allInduction F P p (con A) d = _
allInduction F P p (sg S T) (s , t) = allInduction F P p (T s) t
allInduction F P p (pi S T) f = \ s -> allInduction F P p (T s) (f s)
allInduction F P p (S ** T) (s , t) = allInduction F P p S s , allInduction F P p T t
-}
data List {l : Level}(X : Set l) : Set l where
[] : List X
_::_ : X -> List X -> List X
infixr 3 _::_
map : {k l : Level}{X : Set k}{Y : Set l} -> (X -> Y) -> List X -> List Y
map f [] = []
map f (x :: xs) = f x :: map f xs
data UId {l : Level} : Set l where
ze : UId
su : UId {l} -> UId
data # {l : Level} : List {l} UId -> Set l where
ze : forall {x xs} -> # (x :: xs)
su : forall {x xs} -> # xs -> # (x :: xs)
Constrs : {l : Level} -> Set l -> Set (su l)
Constrs I = List (Up UId * Desc I)
Constr : {l : Level}{I : Set l} -> Constrs I -> Set l
Constr uDs = # (map (\ uD -> down (fst uD)) uDs)
ConD : {l : Level}{I : Set l}(uDs : Constrs I) -> Constr uDs -> Desc I
ConD [] ()
ConD (uD :: _) ze = snd uD
ConD (_ :: uDs) (su c) = ConD uDs c
data Data {l : Level}{I : Set l}(F : I -> Constrs I)(i : I) : Set l where
_/_ : (u : Constr (F i))(d : [! ConD (F i) u !] (Data F)) -> Data F i
{-
DataD : {l : Level}{I : Set l} -> Constrs I -> Desc I
DataD uDs = sg (Constr uDs) (ConD uDs)
Data : {l : Level}{I : Set l}(F : I -> Constrs I)(i : I) -> Set l
Data F = Mu _ \ i -> DataD (F i)
-}
MethodD : {l : Level}{I : Set l}(X : I -> Set l)
(uDs : Constrs I)
(P : Sg I X -> Set l)
(i : I) -> ((u : Constr uDs)(d : [! ConD uDs u !] X) -> X i) -> Set l
MethodD X [] P i c = One
MethodD X ((u , D) :: uDs) P i c
= ((d : [! D !] X) -> [! All D X d !] P -> P (i , c ze d))
* MethodD X uDs P i (\ u d -> c (su u) d)
mutual
induction : {l : Level}{I : Set l}{F : I -> Constrs I}{i : I}(x : Data F i)
(P : Sg I (Data F) -> Set l)
(ps : (i : I) -> MethodD (Data F) (F i) P i _/_)
-> P (i , x)
induction {F = F}{i = i}(u / d) P ps = indMethod P ps (F i) _/_ (ps i) u d
indMethod : {l : Level}{I : Set l}{F : I -> Constrs I}{i : I}
(P : Sg I (Data F) -> Set l)
(ps : (i : I) -> MethodD (Data F) (F i) P i _/_)
(uDs : Constrs I)
(c : (u : Constr uDs) -> [! ConD uDs u !] (Data F) -> Data F i)
(ms : MethodD (Data F) uDs P i c)
(u : Constr uDs)
(d : [! ConD uDs u !] (Data F))
-> P (i , c u d)
indMethod P ps [] c ms () d
indMethod P ps ((u , D) :: _) c (p , ms) ze d = p d (indHyps P ps D d)
indMethod P ps ((_ , _) :: uDs) c (m , ms) (su u) d
= indMethod P ps uDs _ ms u d
indHyps : {l : Level}{I : Set l}{F : I -> Constrs I}
(P : Sg I (Data F) -> Set l)
(ps : (i : I) -> MethodD (Data F) (F i) P i _/_)
(D : Desc I)
(d : [! D !] (Data F))
-> [! All D (Data F) d !] P
indHyps P ps (var i) x = induction x P ps
indHyps P ps (con A) a = _
indHyps P ps (sg S T) (s , t) = indHyps P ps (T s) t
indHyps P ps (pi S T) f = \ s -> indHyps P ps (T s) (f s)
indHyps P ps (S ** T) (s , t) = indHyps P ps S s , indHyps P ps T t
zi : {l : Level} -> UId {l}
zi = ze
si : {l : Level} -> UId {l}
si = su ze
data UQ {l : Level}(x : UId {l}) : UId {l} -> Set l where
yes : UQ x x
no : {y : UId {l}} -> UQ x y
uq : {l : Level}(x y : UId {l}) -> UQ x y
uq ze ze = yes
uq ze (su y) = no
uq (su x) ze = no
uq (su x) (su y) with uq x y
uq (su x) (su .x) | yes = yes
uq (su x) (su y) | no = no
UIn : {l : Level}(x : UId {l}) -> List (UId {l}) -> Set
UIn x [] = Zero
UIn x (y :: ys) with uq x y
UIn x (.x :: _) | yes = One
UIn x (_ :: ys) | no = UIn x ys
uin : {l : Level}(x : UId {l})(ys : List (UId {l})) -> UIn x ys -> # ys
uin x [] ()
uin x (y :: ys) p with uq x y
uin x (.x :: _) _ | yes = ze
uin x (_ :: ys) p | no = su (uin x ys p)
_!_ : {l : Level}{I : Set l}{F : I -> Constrs I}{i : I} ->
let us = map (\ uD -> down (fst uD)) (F i) in
(u : UId {l}){p : UIn u us} ->
[! ConD (F i) (uin u us p) !] (Data F) ->
Data F i
_!_ {l}{I}{F}{i} u {p} d = uin u (map (\ uD -> down (fst uD)) (F i)) p / d
infixr 3 _!_
NAT : {l : Level} -> One {l} -> Constrs {l} One
NAT _ = up zi , con One
:: up si , var _ ** con One
:: []
ZE : {l : Level} -> Data {l} NAT <>
ZE = zi ! <>
SU : {l : Level} -> Data {l} NAT <> -> Data {l} NAT <>
SU n = si ! n , <>
vari : {l : Level} -> UId {l}
vari = ze
coni : {l : Level} -> UId {l}
coni = su ze
sgi : {l : Level} -> UId {l}
sgi = su (su ze)
pii : {l : Level} -> UId {l}
pii = su (su (su ze))
asti : {l : Level} -> UId {l}
asti = su (su (su (su ze)))
DESC : {l : Level}(I : Set l) -> One {su l} -> Constrs {su l} One
DESC I _ = up vari , con (Up I) ** con One
:: up coni , con (Set _) ** con One
:: up sgi , (sg (Set _) \ S -> (pi (Up S) \ _ -> var _) ** con One)
:: up pii , (sg (Set _) \ S -> (pi (Up S) \ _ -> var _) ** con One)
:: up asti , var _ ** var _ ** con One
:: []
| {"hexsha": "2a737abe9b8a45dc6913fa78fdf87ec9a513cdf0", "size": 8129, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "models/LLev.agda", "max_stars_repo_name": "dataronio/epigram2", "max_stars_repo_head_hexsha": "17b7858f51a35b3becb8433028c3f1ba25fbba9a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2015-02-23T15:00:26.000Z", "max_stars_repo_stars_event_max_datetime": "2015-11-05T01:06:39.000Z", "max_issues_repo_path": "models/LLev.agda", "max_issues_repo_name": "dataronio/epigram2", "max_issues_repo_head_hexsha": "17b7858f51a35b3becb8433028c3f1ba25fbba9a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/LLev.agda", "max_forks_repo_name": "dataronio/epigram2", "max_forks_repo_head_hexsha": "17b7858f51a35b3becb8433028c3f1ba25fbba9a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-12T11:51:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T11:51:42.000Z", "avg_line_length": 31.3861003861, "max_line_length": 87, "alphanum_fraction": 0.4347398204, "num_tokens": 3379} |
import json
import numpy as np
with open('geo-jsons.json', 'r') as f:
opened = json.load(f)
opened = opened['features']
opened = [{'mentions': x['properties']['mentions'], 'city': x['properties']['city']} for x in opened]
opened = [json.dumps(x) for x in opened]
opened = set(opened)
opened = [json.loads(x) for x in opened]
opened = sorted(opened, key=lambda x: x['mentions'], reverse=True)
for x in opened:
print x
numbers = [x['mentions'] for x in opened]
print [np.percentile(numbers, x) for x in [0,20,40,60,80,100]] | {"hexsha": "fbc4bc01b8cf38b16bba486790545bf54d2fc620", "size": 531, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/geo-jsons.py", "max_stars_repo_name": "keelanfh/electionary", "max_stars_repo_head_hexsha": "1c33cc41f2b7357ba45d279d09f13b54026fbba7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis/geo-jsons.py", "max_issues_repo_name": "keelanfh/electionary", "max_issues_repo_head_hexsha": "1c33cc41f2b7357ba45d279d09f13b54026fbba7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/geo-jsons.py", "max_forks_repo_name": "keelanfh/electionary", "max_forks_repo_head_hexsha": "1c33cc41f2b7357ba45d279d09f13b54026fbba7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1875, "max_line_length": 101, "alphanum_fraction": 0.670433145, "include": true, "reason": "import numpy", "num_tokens": 153} |
[STATEMENT]
lemma "((\<lambda>x::real. (ln(ln x + ln (ln x)) - ln (ln x)) /
(ln (ln x + ln (ln (ln x)))) * ln x) \<longlongrightarrow> 1) at_top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<lambda>x. (ln (ln x + ln (ln x)) - ln (ln x)) / ln (ln x + ln (ln (ln x))) * ln x) \<longlongrightarrow> 1) at_top
[PROOF STEP]
by real_asymp | {"llama_tokens": 164, "file": null, "length": 1} |
import numpy as np
from numpy import ndarray
def normalize_to_max(intensity: ndarray):
return intensity / np.max(intensity)
def normalize_to_first(intensity: ndarray):
return intensity / intensity[0]
| {"hexsha": "4995a348fa06dc9c2cf2d62fef99d47af3de03bb", "size": 212, "ext": "py", "lang": "Python", "max_stars_repo_path": "mlreflect/xrrloader/footprint/normalization.py", "max_stars_repo_name": "schreiber-lab/mlreflect", "max_stars_repo_head_hexsha": "88a80ccac48461cc8934a46041726b70e469c6b8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mlreflect/xrrloader/footprint/normalization.py", "max_issues_repo_name": "schreiber-lab/mlreflect", "max_issues_repo_head_hexsha": "88a80ccac48461cc8934a46041726b70e469c6b8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mlreflect/xrrloader/footprint/normalization.py", "max_forks_repo_name": "schreiber-lab/mlreflect", "max_forks_repo_head_hexsha": "88a80ccac48461cc8934a46041726b70e469c6b8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.2727272727, "max_line_length": 43, "alphanum_fraction": 0.7688679245, "include": true, "reason": "import numpy,from numpy", "num_tokens": 45} |
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os.path as osp
import gc
def get_model(x, y):
regr = SVR()
pipe = Pipeline(steps=[('reg', regr)])
param_grid = {
'reg__kernel':('linear', 'rbf'),
'reg__C': [0.01, 0.1, 1, 10],
'reg__epsilon': [0.1, 0.2, 0.4, 0.5, 0.8, 1., 1.5, 2, 3],
'reg__gamma': ['auto', 'scale'],
}
search = GridSearchCV(pipe, param_grid, iid=False, cv=5,
return_train_score=False, n_jobs = 4)
search.fit(x, y)
return search.best_estimator_
def read(file_name):
data = pd.read_csv(file_name, sep = '\t')
x = np.array([[float(year)] for year in list(data)])
y = np.array([[year] for year in np.array(data).reshape(-1)]).reshape(-1, )
return x, y
if __name__ == '__main__':
data_root = '../data/machine_learning'
file_names = ['black_african_american.tsv', 'female.tsv', 'hispanic_latino.tsv', 'male.tsv', 'under_18_years.tsv', 'white.tsv']
names = ['black african american', 'female ', 'hispanic latino', 'male', 'under 18 years', 'white']
query = np.array([[2018], [2019], [2020]]).reshape(-1, )
for fn, n in zip(file_names, names):
x, y = read(osp.join(data_root, fn))
#predict(x, y, np.array([[2018], [2020]]))
model = get_model(x, y)
y_model = model.predict(x)
#y_query = model.predict(query)
fig =plt.figure()
plt.title(n)
plt.scatter(x, y, color='green')
#plt.scatter(query, y_query, color='black')
plt.plot(x, y_model, color='blue', linewidth=2)
plt.savefig(fn.split('.')[0] + '.jpg')
# Clean RAM
fig.clf()
plt.close()
gc.collect() | {"hexsha": "dfeece6c6b9d007256a036c5a8a3512e7806b021", "size": 1771, "ext": "py", "lang": "Python", "max_stars_repo_path": "machine_learning/regresion-allergy-percentage.py", "max_stars_repo_name": "RQuispeC/fun-with-allergies", "max_stars_repo_head_hexsha": "90d5aa9f5fb80e6139974ae6eb18006ed661bc1c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "machine_learning/regresion-allergy-percentage.py", "max_issues_repo_name": "RQuispeC/fun-with-allergies", "max_issues_repo_head_hexsha": "90d5aa9f5fb80e6139974ae6eb18006ed661bc1c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "machine_learning/regresion-allergy-percentage.py", "max_forks_repo_name": "RQuispeC/fun-with-allergies", "max_forks_repo_head_hexsha": "90d5aa9f5fb80e6139974ae6eb18006ed661bc1c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0169491525, "max_line_length": 128, "alphanum_fraction": 0.6702428007, "include": true, "reason": "import numpy", "num_tokens": 553} |
import pandas as pd
from sklearn.manifold import TSNE
from numpy import array, dot, diag, nan_to_num
from numpy.random import randn
import sys
features = 'CADD1,CADD2,RecA,EssA,CADD3,CADD4,RecB,EssB,Path'.split(',')
df_data = pd.read_csv("dida_posey_to_predict.csv")
combination = list(map(int, sys.argv[1]))
n_comb = sum(combination)
X = array(df_data[features])
X = dot(X, diag(combination))
for i in (0,1,4,5):
if not combination[i]:
continue
X[:,i] += 1.701666
X[:,i] /= 15.746334
if n_comb > 2:
X = TSNE(n_components=2, init="pca").fit_transform(X)
X = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X = nan_to_num(X)
else:
X = X[:, [i for i, j in enumerate(combination) if j] ]
df_data_vs = df_data.copy(False)
df_data_vs['x'] = X[:,0]
df_data_vs['y'] = X[:,1] if n_comb > 1 else 0
df_data_vs = df_data_vs.drop('Pair', 1)
with open("exports/p_file_" + ''.join(map(str, combination)) + ".csv", "w") as out:
out.write('id,x,y\n')
for line in array(df_data_vs):
out.write(','.join(map(str, line[[0,-2,-1]])) + '\n')
| {"hexsha": "15c1ab7bb83b1732ca5652747ee98f566616a50f", "size": 1126, "ext": "py", "lang": "Python", "max_stars_repo_path": "Visualizer_preparation/preparation.py", "max_stars_repo_name": "oligogenic/DIDA_SSL", "max_stars_repo_head_hexsha": "cbf61892bfde999eadf31db918833f6c75a5c9f3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-07-19T10:34:46.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-19T10:34:46.000Z", "max_issues_repo_path": "Visualizer_preparation/preparation.py", "max_issues_repo_name": "oligogenic/DIDA_SSL", "max_issues_repo_head_hexsha": "cbf61892bfde999eadf31db918833f6c75a5c9f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Visualizer_preparation/preparation.py", "max_forks_repo_name": "oligogenic/DIDA_SSL", "max_forks_repo_head_hexsha": "cbf61892bfde999eadf31db918833f6c75a5c9f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.15, "max_line_length": 84, "alphanum_fraction": 0.6136767318, "include": true, "reason": "from numpy", "num_tokens": 368} |
[STATEMENT]
lemma cp_OclAsType\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n_OclAny_Person: "cp P \<Longrightarrow> cp(\<lambda>X. (P (X::OclAny)::Person) .oclAsType(Person))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cp P \<Longrightarrow> cp (\<lambda>X. P X .oclAsType(Person))
[PROOF STEP]
by(rule cpI1, simp_all add: OclAsType\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n_Person) | {"llama_tokens": 174, "file": "Featherweight_OCL_examples_Employee_Model_Analysis_Analysis_UML", "length": 1} |
#include <gtest/gtest.h>
#include "converter/fixml2fix_converter.hxx"
#include "converter/xml_element_helper.hxx"
#include "converter/fix_helper.hxx"
#include "util/fix_env.hxx"
#include "tools/test_util.hxx"
#include <boost/log/trivial.hpp>
#include <quickfix/fix50sp2/ListExecute.h>
#include <list>
#include <set>
#include <string>
#include <utility>
using namespace std;
using namespace fix2xml;
TEST ( ListExecute, set_fields)
{
fixml2fix_converter converter {"../spec/fix/FIX50SP2.xml", "../spec/xsd/fixml-main-5-0-SP2.xsd"};
auto& fixml_dict = converter.fixml_dico();
ASSERT_TRUE(converter.init());
ASSERT_TRUE(converter.parse_fixt_dico("../spec/fix/FIXT11.xml"));
FIX50SP2::ListExecute msg;
list<multiset<string>> all_values;
multiset<string> all_compo_names;
multiset<string> ListExecute_0;
set_field(msg, FIX::BidID{"STRING_2009599783"}, ListExecute_0);
set_field(msg, FIX::ClientBidID{"STRING_1575754340"}, ListExecute_0);
set_field(msg, FIX::EncodedText{"DATA_1283644968"}, ListExecute_0);
set_field(msg, FIX::EncodedTextLen{898881521}, ListExecute_0);
set_field(msg, FIX::ListID{"STRING_1494408507"}, ListExecute_0);
set_field(msg, FIX::Text{"STRING_1897368907"}, ListExecute_0);
set_field(msg, FIX::TransactTime{FIX::UTCTIMESTAMP(2, 44, 20, 22, 1, 2016)}, ListExecute_0);
all_values.push_back(ListExecute_0);
all_compo_names.insert("ListExecute");
// header
multiset<string> header_35;
set_header_field(msg.getHeader(), FIX::ApplVerID{"STRING_8"}, header_35);
set_header_field(msg.getHeader(), FIX::BeginString{"STRING_310358417"}, header_35);
set_header_field(msg.getHeader(), FIX::BodyLength{1486910557}, header_35);
set_header_field(msg.getHeader(), FIX::CstmApplVerID{"STRING_853320835"}, header_35);
set_header_field(msg.getHeader(), FIX::DeliverToCompID{"STRING_1816103482"}, header_35);
set_header_field(msg.getHeader(), FIX::DeliverToLocationID{"STRING_260399809"}, header_35);
set_header_field(msg.getHeader(), FIX::DeliverToSubID{"STRING_909079063"}, header_35);
set_header_field(msg.getHeader(), FIX::LastMsgSeqNumProcessed{1826721203}, header_35);
set_header_field(msg.getHeader(), FIX::MessageEncoding{"STRING_EUC-JP"}, header_35);
set_header_field(msg.getHeader(), FIX::MsgSeqNum{1042361561}, header_35);
set_header_field(msg.getHeader(), FIX::OnBehalfOfCompID{"STRING_1623483752"}, header_35);
set_header_field(msg.getHeader(), FIX::OnBehalfOfLocationID{"STRING_938443529"}, header_35);
set_header_field(msg.getHeader(), FIX::OnBehalfOfSubID{"STRING_1845887650"}, header_35);
set_header_field(msg.getHeader(), FIX::OrigSendingTime{FIX::UTCTIMESTAMP(4, 11, 46, 22, 11, 2005)}, header_35);
set_header_field(msg.getHeader(), FIX::PossDupFlag{true}, header_35);
set_header_field(msg.getHeader(), FIX::PossResend{false}, header_35);
set_header_field(msg.getHeader(), FIX::SecureData{"DATA_2138938526"}, header_35);
set_header_field(msg.getHeader(), FIX::SecureDataLen{1216291485}, header_35);
set_header_field(msg.getHeader(), FIX::SenderCompID{"STRING_2113191723"}, header_35);
set_header_field(msg.getHeader(), FIX::SenderLocationID{"STRING_1701988328"}, header_35);
set_header_field(msg.getHeader(), FIX::SenderSubID{"STRING_76483761"}, header_35);
set_header_field(msg.getHeader(), FIX::SendingTime{FIX::UTCTIMESTAMP(6, 57, 2, 15, 7, 2001)}, header_35);
set_header_field(msg.getHeader(), FIX::TargetCompID{"STRING_697975882"}, header_35);
set_header_field(msg.getHeader(), FIX::TargetLocationID{"STRING_2055517049"}, header_35);
set_header_field(msg.getHeader(), FIX::TargetSubID{"STRING_396143120"}, header_35);
set_header_field(msg.getHeader(), FIX::XmlData{"DATA_1607054945"}, header_35);
set_header_field(msg.getHeader(), FIX::XmlDataLen{1734754604}, header_35);
all_values.push_back(header_35);
all_compo_names.insert(".header");
xml_element elt;
converter.fix2fixml(msg, elt);
BOOST_LOG_TRIVIAL(debug) << "The resulting XML is";
cout << "////////////////////////////////////////////" << endl;
cout << elt.to_string() << endl;
cout << "////////////////////////////////////////////" << endl << endl;
BOOST_LOG_TRIVIAL(debug) << "Quickfix XML representation is";
cout << "////////////////////////////////////////////" << endl;
cout << msg.toXML() << endl;
cout << "////////////////////////////////////////////" << endl << endl;
list<multiset<string>> elt_lists;
elt.to_list(elt_lists);
EXPECT_EQ(elt_lists.size(), all_values.size());
if (elt_lists.size() != all_values.size()) {
multiset<string> elt_compo_name;
elt.all_components(elt_compo_name);
BOOST_LOG_TRIVIAL(debug) << "XML Elements are:";
cout << " [";
copy(elt_compo_name.begin(), elt_compo_name.end(), ostream_iterator<string>(cout, " ")); cout << "]" << endl;
BOOST_LOG_TRIVIAL(debug) << "FIX Components are:";
cout << " [";
copy(all_compo_names.begin(), all_compo_names.end(), ostream_iterator<string>(cout, " ")); cout << "]" << endl;
}
BOOST_LOG_TRIVIAL(debug) << "All FIX components";
for (const auto& l : all_values) {
cout << " [";
copy(l.begin(), l.end(), ostream_iterator<string>(cout, " "));
cout << "]" << endl;
}
BOOST_LOG_TRIVIAL(debug) << "All XML components";
for (const auto& l : elt_lists) {
cout << " [";
copy(l.begin(), l.end(), ostream_iterator<string>(cout, " "));
cout << "]" << endl;
}
for (const auto& xml_l : elt_lists) {
bool found = false;
for (const auto& l : all_values) {
if (includes(l.begin(), l.end(), xml_l.begin(), xml_l.end())) {
found = true;
break;
} // end if includes
} // end for all_values
EXPECT_TRUE(found);
if ( ! found) {
BOOST_LOG_TRIVIAL(debug) << "[TO CHECK] This XML component was not found in FIX message";
cout << " ---> [";
copy(xml_l.begin(), xml_l.end(), ostream_iterator<string>(cout, " ")); cout << "]" << endl << endl;
} // end if ! found
} // end for elt_lists
}
| {"hexsha": "ff8028acd1fb9f863005b6fac44d83b9b32e8ade", "size": 6012, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tools/generated/fix2xml/test_fix2xml_ListExecute.cpp", "max_stars_repo_name": "abdelkaderamar/fix2xml", "max_stars_repo_head_hexsha": "fa781b747a8e40ed4c2d3dee8294fb51654f7428", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-09-26T12:08:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-26T12:08:19.000Z", "max_issues_repo_path": "tools/generated/fix2xml/test_fix2xml_ListExecute.cpp", "max_issues_repo_name": "abdelkaderamar/fix2xml", "max_issues_repo_head_hexsha": "fa781b747a8e40ed4c2d3dee8294fb51654f7428", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/generated/fix2xml/test_fix2xml_ListExecute.cpp", "max_forks_repo_name": "abdelkaderamar/fix2xml", "max_forks_repo_head_hexsha": "fa781b747a8e40ed4c2d3dee8294fb51654f7428", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-12-11T04:11:44.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-11T04:11:44.000Z", "avg_line_length": 44.8656716418, "max_line_length": 118, "alphanum_fraction": 0.6862940785, "num_tokens": 1668} |
from typing import List
import gym
import numpy as np
from gym import spaces
class SwitchingWrapper(gym.Wrapper):
def __init__(self, env: gym.Env, env_index: int):
super().__init__(env)
self.env_index = env_index
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
observation, reward, done, info = self.env.step(action)
return (
observation,
reward,
done,
{**info, **{"env_index": self.env_index}},
)
| {"hexsha": "f34a2bed13d750ddcd9154f6f00f40a58290fdd5", "size": 547, "ext": "py", "lang": "Python", "max_stars_repo_path": "rltime/env_wrappers/switching_env_wrapper.py", "max_stars_repo_name": "frederikschubert/rltime", "max_stars_repo_head_hexsha": "d1722ffd4cf7b4599655b8d9c64abc243919afc9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rltime/env_wrappers/switching_env_wrapper.py", "max_issues_repo_name": "frederikschubert/rltime", "max_issues_repo_head_hexsha": "d1722ffd4cf7b4599655b8d9c64abc243919afc9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rltime/env_wrappers/switching_env_wrapper.py", "max_forks_repo_name": "frederikschubert/rltime", "max_forks_repo_head_hexsha": "d1722ffd4cf7b4599655b8d9c64abc243919afc9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.88, "max_line_length": 63, "alphanum_fraction": 0.5886654479, "include": true, "reason": "import numpy", "num_tokens": 124} |
SUBROUTINE BCKMLT(A,U,N,NA,NU)
C
C PURPOSE:
C Compute the orthogonal matrix that reduces the output matrix A
C from subroutine HSHLDR, to upper Hessenberg form.
C
C REFERENCES:
C Bartels, R.H.; and Stewart, G.W.: Algorithm 432 - Solution of
C the Matrix Equation AX + XB = C. Commun. ACM, vol. 15, no. 9,
C Sept. 1972, pp. 820-826.
C
C Subroutines employed by BCKMLT: None
C Subroutines employing BCKMLT: ATXPXA, AXPXB
C
IMPLICIT REAL*8 (A-H,O-Z)
REAL*8
1A(NA,1),U(NU,1),SUM,P
INTEGER
1N,NA,N1,NM1,NM2,LL,L,L1,I,J
N1 = N+1
NM1 = N-1
NM2 = N-2
U(N,N) = 1.
IF(NM1 .EQ. 0) RETURN
U(NM1,N) = 0.
U(N,NM1) = 0.
U(NM1,NM1) = 1.
IF(NM2 .EQ. 0) RETURN
DO 40 LL=1,NM2
L = NM2-LL+1
L1 = L+1
IF(A(N1,L) .EQ. 0.) GO TO 25
DO 20 J=L1,N
SUM = 0.
DO 10 I=L1,N
SUM = SUM + A(I,L)*U(I,J)
10 CONTINUE
P = SUM/A(N1,L)
DO 20 I=L1,N
U(I,J) = U(I,J) - A(I,L)*P
20 CONTINUE
25 DO 30 I=L1,N
U(I,L) = 0.
U(L,I) = 0.
30 CONTINUE
U(L,L) = 1.
40 CONTINUE
RETURN
END
| {"hexsha": "d6a14ae92cfafd8918618e63b3dcd68a5f38db28", "size": 1234, "ext": "for", "lang": "FORTRAN", "max_stars_repo_path": "gsc-13067/iac/diriac2/oracls/bckmlt.for", "max_stars_repo_name": "SteveDoyle2/nasa-cosmic", "max_stars_repo_head_hexsha": "c8015a9851a04f0483b978d92c2cbaee31c81fe3", "max_stars_repo_licenses": ["BSD-Source-Code"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2015-03-14T07:26:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-16T12:23:17.000Z", "max_issues_repo_path": "gsc-13067/iac/diriac2/oracls/bckmlt.for", "max_issues_repo_name": "SteveDoyle2/nasa-cosmic", "max_issues_repo_head_hexsha": "c8015a9851a04f0483b978d92c2cbaee31c81fe3", "max_issues_repo_licenses": ["BSD-Source-Code"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gsc-13067/iac/diriac2/oracls/bckmlt.for", "max_forks_repo_name": "SteveDoyle2/nasa-cosmic", "max_forks_repo_head_hexsha": "c8015a9851a04f0483b978d92c2cbaee31c81fe3", "max_forks_repo_licenses": ["BSD-Source-Code"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2016-02-12T22:18:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-08T17:46:54.000Z", "avg_line_length": 24.68, "max_line_length": 71, "alphanum_fraction": 0.4902755267, "num_tokens": 501} |
import jax.numpy as np
import logging
def deinsum(subscript, aa, bb):
real = np.einsum(subscript, aa[0], bb[0]) - np.einsum(subscript, aa[1], bb[1])
imag = np.einsum(subscript, aa[0], bb[1]) + np.einsum(subscript, aa[1], bb[0])
return np.stack([real, imag], axis=0)
def deinsum_ord(subscript, aa, bb):
real = np.einsum(subscript, aa, bb[0])
imag = np.einsum(subscript, aa, bb[1])
return np.stack([real, imag], axis=0)
def dabs(aa):
return aa[0]**2 + aa[1]**2 # 因为是纵向叠加所以aa[0]是第一行
def dconj(aa):
return dplex(aa.val[0], -aa.val[1])
def dtomine(aa):
return np.stack([np.real(aa), np.imag(aa)], axis=0)
def dconstruct(aa, bb):
return np.stack([aa, bb], axis=0) # 纵向叠加数组
def ddivide(a, bb):
real = a * bb[0] / dabs(bb)
imag = -a * bb[1] / dabs(bb)
return np.stack([real, imag], axis=0) | {"hexsha": "bc4cbbda9a587e154bdcd6b9d5f851ad3d51d31d", "size": 841, "ext": "py", "lang": "Python", "max_stars_repo_path": "dplex.py", "max_stars_repo_name": "Dream7-Kim/graduation_code", "max_stars_repo_head_hexsha": "be1808f90589c08d7283a8e12f52e22a5749c27d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dplex.py", "max_issues_repo_name": "Dream7-Kim/graduation_code", "max_issues_repo_head_hexsha": "be1808f90589c08d7283a8e12f52e22a5749c27d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dplex.py", "max_forks_repo_name": "Dream7-Kim/graduation_code", "max_forks_repo_head_hexsha": "be1808f90589c08d7283a8e12f52e22a5749c27d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0, "max_line_length": 82, "alphanum_fraction": 0.6123662307, "include": true, "reason": "import jax", "num_tokens": 319} |
# coding: utf-8
# Copyright (c) 2021 AkaiKKRteam.
# Distributed under the terms of the Apache License, Version 2.0.
import matplotlib.pyplot as plt
import os
import numpy as np
from .AkaiKkr import AkaikkrJob
from .BasePlotter import BaseEXPlotter
class IterPlotter:
"""plotter for history
"""
def __init__(self, rms):
"""initialization routine
Args:
rms ([float]): history values
"""
rms = np.array(rms)
if rms.ndim == 1:
rms = rms.reshape(1, -1)
self.rms = rms
def make(self, output_directory: str, ylabels: list, filename: str, figsize=(5, 3)):
"""make iteration plot
Args:
output_directory (str): output directory
ylabels (list): ylabels
filename (str): output filename
figsize (tuple, optional): figure size. Defaults to (5, 3).
"""
outputpath = output_directory
os.makedirs(outputpath, exist_ok=True)
filepath = os.path.join(outputpath, filename)
if not isinstance(ylabels, list):
ylabels = [ylabels]
fig, axes = plt.subplots(self.rms.shape[0], 1, figsize=figsize)
if not isinstance(axes, np.ndarray):
axes = [axes]
for y, ax, ylabel in zip(self.rms, axes, ylabels):
x = list(range(len(y)))
x = np.array(x)
x += 1
ax.plot(x, y)
ax.set_ylabel(ylabel)
ax.tick_params(axis="x", labelbottom=False)
# show only the last ticks and labels
ax.set_xlabel("iteration")
ax.tick_params(axis="x", labelbottom=True)
fig.tight_layout()
fig.savefig(filepath)
print("saved to", filepath)
fig.clf()
plt.close(fig)
class IterEXPlotter(BaseEXPlotter):
def __init__(self, directory, outfile="out_go.log", output_directory=None,):
"""
Args:
directory (str): directory to save figures
outfile (str, optional): output filename. Defaults to "out_spc.log".
pot (str, optional): potential filename. Defaults to "pot.dat".
output_directory (str, optional): the directory of the output file. Defaults to None.
"""
super().__init__(directory, outfile, output_directory)
def make(self, hist_type=["te", "moment", "err"], filename: str = "iter_all.png", figsize=(5, 3)):
"""make history plot from outputfile
Args:
hist_type ([str]]): history type te|moment|err. Defauls to ["te", "moment", "err"].
filename (str): image filename
"""
job = AkaikkrJob(self.directory)
rms = []
for h in hist_type:
if h == "te":
value = job.get_te_history(self.outfile)
elif h == "moment":
value = job.get_moment_history(self.outfile)
elif h == "err":
value = job.get_err_history(self.outfile)
else:
raise ValueError("unknown hist_type={}".format(hist_type))
rms.append(value)
iterplotter = IterPlotter(rms)
iterplotter.make(self.output_directory, ylabels=hist_type,
filename=filename, figsize=figsize)
| {"hexsha": "6a373b76656fba4d3ba985bcdb55eb2278543d3e", "size": 3278, "ext": "py", "lang": "Python", "max_stars_repo_path": "library/PyAkaiKKR/pyakaikkr/IterPlotter.py", "max_stars_repo_name": "AkaiKKRteam/AkaiKKRPythonUtil", "max_stars_repo_head_hexsha": "be716747de83c9f1787b6ac1c9a61ef725a643dd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "library/PyAkaiKKR/pyakaikkr/IterPlotter.py", "max_issues_repo_name": "AkaiKKRteam/AkaiKKRPythonUtil", "max_issues_repo_head_hexsha": "be716747de83c9f1787b6ac1c9a61ef725a643dd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-11-26T06:28:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-18T05:45:11.000Z", "max_forks_repo_path": "library/PyAkaiKKR/pyakaikkr/IterPlotter.py", "max_forks_repo_name": "AkaiKKRteam/AkaiKKRPythonUtil", "max_forks_repo_head_hexsha": "be716747de83c9f1787b6ac1c9a61ef725a643dd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-11-26T03:06:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-26T04:45:31.000Z", "avg_line_length": 32.4554455446, "max_line_length": 102, "alphanum_fraction": 0.5759609518, "include": true, "reason": "import numpy", "num_tokens": 767} |
[STATEMENT]
lemma ndec_seq_mem:"\<lbrakk>a \<in> (A::nat set); \<not> (\<exists>m. m\<in>A \<and> (\<forall>x\<in>A. m \<le> x))\<rbrakk> \<Longrightarrow>
(ndec_seq A a n) \<in> A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>a \<in> A; \<nexists>m. m \<in> A \<and> (\<forall>x\<in>A. m \<le> x)\<rbrakk> \<Longrightarrow> ndec_seq A a n \<in> A
[PROOF STEP]
apply (induct_tac n)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>a \<in> A; \<nexists>m. m \<in> A \<and> (\<forall>x\<in>A. m \<le> x)\<rbrakk> \<Longrightarrow> ndec_seq A a 0 \<in> A
2. \<And>n. \<lbrakk>a \<in> A; \<nexists>m. m \<in> A \<and> (\<forall>x\<in>A. m \<le> x); ndec_seq A a n \<in> A\<rbrakk> \<Longrightarrow> ndec_seq A a (Suc n) \<in> A
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n. \<lbrakk>a \<in> A; \<nexists>m. m \<in> A \<and> (\<forall>x\<in>A. m \<le> x); ndec_seq A a n \<in> A\<rbrakk> \<Longrightarrow> ndec_seq A a (Suc n) \<in> A
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n. \<lbrakk>a \<in> A; \<forall>m. m \<in> A \<longrightarrow> (\<exists>x\<in>A. \<not> m \<le> x); ndec_seq A a n \<in> A\<rbrakk> \<Longrightarrow> (SOME b. b \<in> A \<and> b < ndec_seq A a n) \<in> A
[PROOF STEP]
apply (simp add: not_less [symmetric])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n. \<lbrakk>a \<in> A; \<forall>m. m \<in> A \<longrightarrow> (\<exists>x\<in>A. x < m); ndec_seq A a n \<in> A\<rbrakk> \<Longrightarrow> (SOME b. b \<in> A \<and> b < ndec_seq A a n) \<in> A
[PROOF STEP]
apply (subgoal_tac "\<exists>x\<in>A. x < (ndec_seq A a n)")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>n. \<lbrakk>a \<in> A; \<forall>m. m \<in> A \<longrightarrow> (\<exists>x\<in>A. x < m); ndec_seq A a n \<in> A; \<exists>x\<in>A. x < ndec_seq A a n\<rbrakk> \<Longrightarrow> (SOME b. b \<in> A \<and> b < ndec_seq A a n) \<in> A
2. \<And>n. \<lbrakk>a \<in> A; \<forall>m. m \<in> A \<longrightarrow> (\<exists>x\<in>A. x < m); ndec_seq A a n \<in> A\<rbrakk> \<Longrightarrow> \<exists>x\<in>A. x < ndec_seq A a n
[PROOF STEP]
prefer 2
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>n. \<lbrakk>a \<in> A; \<forall>m. m \<in> A \<longrightarrow> (\<exists>x\<in>A. x < m); ndec_seq A a n \<in> A\<rbrakk> \<Longrightarrow> \<exists>x\<in>A. x < ndec_seq A a n
2. \<And>n. \<lbrakk>a \<in> A; \<forall>m. m \<in> A \<longrightarrow> (\<exists>x\<in>A. x < m); ndec_seq A a n \<in> A; \<exists>x\<in>A. x < ndec_seq A a n\<rbrakk> \<Longrightarrow> (SOME b. b \<in> A \<and> b < ndec_seq A a n) \<in> A
[PROOF STEP]
apply blast
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n. \<lbrakk>a \<in> A; \<forall>m. m \<in> A \<longrightarrow> (\<exists>x\<in>A. x < m); ndec_seq A a n \<in> A; \<exists>x\<in>A. x < ndec_seq A a n\<rbrakk> \<Longrightarrow> (SOME b. b \<in> A \<and> b < ndec_seq A a n) \<in> A
[PROOF STEP]
apply (thin_tac "\<forall>m. m \<in> A \<longrightarrow> (\<exists>x\<in>A. x < m)")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n. \<lbrakk>a \<in> A; ndec_seq A a n \<in> A; \<exists>x\<in>A. x < ndec_seq A a n\<rbrakk> \<Longrightarrow> (SOME b. b \<in> A \<and> b < ndec_seq A a n) \<in> A
[PROOF STEP]
apply (rule someI2_ex)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>n. \<lbrakk>a \<in> A; ndec_seq A a n \<in> A; \<exists>x\<in>A. x < ndec_seq A a n\<rbrakk> \<Longrightarrow> \<exists>aa. aa \<in> A \<and> aa < ndec_seq A a n
2. \<And>n x. \<lbrakk>a \<in> A; ndec_seq A a n \<in> A; \<exists>x\<in>A. x < ndec_seq A a n; x \<in> A \<and> x < ndec_seq A a n\<rbrakk> \<Longrightarrow> x \<in> A
[PROOF STEP]
apply blast
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n x. \<lbrakk>a \<in> A; ndec_seq A a n \<in> A; \<exists>x\<in>A. x < ndec_seq A a n; x \<in> A \<and> x < ndec_seq A a n\<rbrakk> \<Longrightarrow> x \<in> A
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done | {"llama_tokens": 1888, "file": "Group-Ring-Module_Algebra1", "length": 12} |