text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
---|---|
using BenchmarkTools, Test, CUDA
a = CUDA.zeros(1024)
function kernel(a)
i = threadIdx().x
a[i] += 1
return
end
@cuda threads=length(a) kernel(a)
##
N = 2^20
x_d = CUDA.fill(1.0f0, N) # a vector stored on the GPU filled with 1.0 (Float32)
y_d = CUDA.fill(2.0f0, N) # a vector stored on the GPU filled with 2.0
y_d .+= x_d
function add_broadcast!(y, x)
CUDA.@sync y .+= x
return
end
##
@btime add_broadcast!($y_d, $x_d)
##
function gpu_add1!(y, x)
for i = 1:length(y)
@inbounds y[i] += x[i]
end
return nothing
end
fill!(y_d, 2)
@cuda gpu_add1!(y_d, x_d)
@test all(Array(y_d) .== 3.0f0)
##
function bench_gpu1!(y, x)
CUDA.@sync begin
@cuda gpu_add1!(y, x)
end
end
@btime bench_gpu1!($y_d, $x_d)
##
const nx = 1024 # do 1024 x 1024 2D FFT
xc = CuArray{ComplexF64}(CUDA.randn(Float64, nx, nx))
p = plan_fft!( xc )
##
@btime CUDA.@sync(p * x) setup=(
x=CuArray{ComplexF64}(CUDA.randn(Float64, nx, nx)));
##
for device in CUDA.devices()
@show capability(device)
end
##
using AbstractFFTs
using CUDA.CUFFT
##
b = CUDA.rand(ComplexF32,64,64,64)
# pa = plan_fft( a )
@btime fft(b); | {"hexsha": "e5e44f76994b10cf1b27d82920f3d07fb5571ef8", "size": 1159, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "scripts/cuda_debug.jl", "max_stars_repo_name": "doddgray/OptiMode.jl", "max_stars_repo_head_hexsha": "8d3185000218e0094c01b83f420dcebdf270b2c3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-05-26T00:20:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T05:24:38.000Z", "max_issues_repo_path": "scripts/cuda_debug.jl", "max_issues_repo_name": "doddgray/OptiMode.jl", "max_issues_repo_head_hexsha": "8d3185000218e0094c01b83f420dcebdf270b2c3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/cuda_debug.jl", "max_forks_repo_name": "doddgray/OptiMode.jl", "max_forks_repo_head_hexsha": "8d3185000218e0094c01b83f420dcebdf270b2c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.8767123288, "max_line_length": 81, "alphanum_fraction": 0.6220880069, "num_tokens": 420} |
#!/usr/bin/env python3
import numpy as np
import tensorflow as tf
import cart_pole_evaluator
class Network:
def __init__(self, threads, seed=42):
# Create an empty graph and a session
graph = tf.Graph()
graph.seed = seed
self.session = tf.Session(graph = graph, config=tf.ConfigProto(inter_op_parallelism_threads=threads,
intra_op_parallelism_threads=threads))
def construct(self, args, state_shape, num_actions):
with self.session.graph.as_default():
# Input states
self.states = tf.placeholder(tf.float32, [None] + state_shape)
# Chosen actions (used for training)
self.actions = tf.placeholder(tf.int32, [None])
# Observed returns (used for training)
self.returns = tf.placeholder(tf.float32, [None])
# Compute the action logits
# TODO: Add a fully connected layer processing self.states, with args.hidden_layer neurons
# and some non-linear activatin.
# TODO: Compute `logits` using another dense layer with
# `num_actions` outputs (utilizing no activation function).
# TODO: Compute the `self.probabilities` from the `logits`.
# Training
# TODO: Compute `loss`, as a softmax cross entropy loss of self.actions and `logits`.
# Because this is a REINFORCE algorithm, it is crucial to weight the loss of batch
# elements using `self.returns` -- this can be accomplished using the `weights` parameter.
global_step = tf.train.create_global_step()
self.training = tf.train.AdamOptimizer(args.learning_rate).minimize(loss, global_step=global_step, name="training")
# Initialize variables
self.session.run(tf.global_variables_initializer())
def predict(self, states):
return self.session.run(self.probabilities, {self.states: states})
def train(self, states, actions, returns):
self.session.run(self.training, {self.states: states, self.actions: actions, self.returns: returns})
if __name__ == "__main__":
# Fix random seed
np.random.seed(42)
# Parse arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=5, type=int, help="Number of episodes to train on.")
parser.add_argument("--episodes", default=500, type=int, help="Training episodes.")
parser.add_argument("--gamma", default=1.0, type=float, help="Discounting factor.")
parser.add_argument("--hidden_layer", default=20, type=int, help="Size of hidden layer.")
parser.add_argument("--learning_rate", default=0.01, type=float, help="Learning rate.")
parser.add_argument("--render_each", default=0, type=int, help="Render some episodes.")
parser.add_argument("--threads", default=1, type=int, help="Maximum number of threads to use.")
args = parser.parse_args()
# Create the environment
env = cart_pole_evaluator.environment(discrete=False)
# Construct the network
network = Network(threads=args.threads)
network.construct(args, env.state_shape, env.actions)
evaluating = False
while True:
# TODO: Decide if evaluation should start (one possibility is to train for args.episodes,
# so env.episode >= args.episodes could be used).
evaluation = ...
# Train for a batch of episodes
batch_states, batch_actions, batch_returns = [], [], []
for _ in range(args.batch_size):
# Perform episode
state = env.reset(evaluating)
states, actions, rewards, done = [], [], [], False
while not done:
if args.render_each and env.episode > 0 and env.episode % args.render_each == 0:
env.render()
# TODO: Compute action distribution using `network.predict`
# TODO: Set `action` randomly according to the generated distribution
# (you can use np.random.choice or any other method).
action = ...
next_state, reward, done, _ = env.step(action)
# TODO: Accumulate states, actions and rewards.
state = next_state
# TODO: Compute returns from rewards (by summing them up and
# applying discount by `args.gamma`).
# TODO: Extend the batch_{states,actions,returns} using the episodic
# {states,actions,returns}.
# TODO: Perform network training using batch_{states,actions,returns}.
| {"hexsha": "61a0ea735248d83dd4994c5d1afb197d463c1a0d", "size": 4646, "ext": "py", "lang": "Python", "max_stars_repo_path": "charles-university/deep-learning/labs/12/reinforce.py", "max_stars_repo_name": "Hyperparticle/lct-master", "max_stars_repo_head_hexsha": "8acb0ca8fe14bb86305f235e3fec0a595acae2de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-11-08T14:23:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-08T17:54:59.000Z", "max_issues_repo_path": "charles-university/deep-learning/labs/12/reinforce.py", "max_issues_repo_name": "Hyperparticle/lct-master", "max_issues_repo_head_hexsha": "8acb0ca8fe14bb86305f235e3fec0a595acae2de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "charles-university/deep-learning/labs/12/reinforce.py", "max_forks_repo_name": "Hyperparticle/lct-master", "max_forks_repo_head_hexsha": "8acb0ca8fe14bb86305f235e3fec0a595acae2de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.2363636364, "max_line_length": 127, "alphanum_fraction": 0.6349547998, "include": true, "reason": "import numpy", "num_tokens": 970} |
import chess
import numpy as np
import time
from numpy.random import default_rng
rng = default_rng()
class MCTS_graph:
def __init__(self,agent):
self.root=agent.root
self.temperature = agent.temperature
def make_graph(self,depth=1000):
self.cont=0
self.nodes = {}
self.edges = []
self.bfs(self.root,0,depth)
print('Total nodes: {}'.format(self.cont))
def bfs(self,node,father,depth):
if depth==0: return
if len(node.children)>0:
log_rollouts = np.log(node.num_rollouts)
for n in node.children:
self.cont+=1
win_percentage = n.winning_frac()
self.nodes[self.cont]=win_percentage
self.edges.append([father,self.cont,n.move])
self.bfs(n,self.cont,depth-1)
def save_graph(self,path,depth=1000):
with open(path,'w') as file:
self.make_graph(depth)
cad="digraph{\n 0 [label=\"root\"];\n"
for n,m in self.nodes.items():
cad+=" {} [label=\"{:.2f}\"];\n".format(n,m)
for (x,y,z) in self.edges:
cad+=" {} -- {} [label=\"{}\"];\n".format(x,y,z)
cad+="}"
file.write(cad)
print("Grafo guardado en: {}".format(path))
class MCTSNode:
def __init__(self, game_state, parent = None, move = None, bot = None, is_root = False):
self.game_state = game_state
self.parent = parent
self.move = move
self.win_counts = np.zeros([2,])
self.value=np.zeros([2,])
self.num_rollouts = 0
self.children = []
self.unvisited_moves = []
self.is_root=is_root
if self.is_terminal():
tmp = game_state.result()
if int(tmp[0]) == 0:
self.value = np.array([0,1])
elif int(tmp[2]) == 0:
self.value = np.array([1,0])
else:
self.value = np.array([1/2,1/2])
else:
self.unvisited_moves = list(game_state.legal_moves)
value = bot.get_move_values_single(game_state)
self.value+=value
def add_random_child(self,bot):
index = np.random.randint(len(self.unvisited_moves))
new_move = self.unvisited_moves.pop(index) #selecciona un movimiento disponible al azar y lo elimina de los movimientos no visitados
new_game_state = self.game_state.copy(stack=False) #crea una copia del estado de juego
new_game_state.push(new_move) #realiza el movimiento seleccionado
new_node = MCTSNode(game_state=new_game_state, parent=self, move=new_move, bot=bot) #crea un nuevo nodo
self.children.append(new_node) #añade el nodo a su lista de hijos
return new_node #retorna el nuevo nodo
def record_win(self, result):
self.win_counts += result
self.num_rollouts += 1
def result_simulation(self):
return self.value
def can_add_child(self): #comprueba si aun hay nodos por visitar
return len(self.unvisited_moves) > 0
def is_terminal(self): #verifica si es un nodo terminal, es decir, el final de una partida
return self.game_state.is_game_over()
def winning_frac(self): #obtiene el valor Q/N para el nodo dado
if self.parent.game_state.turn: #turno de las blancas
return float(self.win_counts[0]) / float(self.num_rollouts)
else: #turno de las negras
return float(self.win_counts[1]) / float(self.num_rollouts)
class agent_MCTS:
def __init__(self, temperature=2,bot=None,game_state=None,max_iter=100,verbose=0):
self.temperature = temperature
self.bot = bot
self.max_iter = max_iter
self.root = None
self.verbose = verbose
if game_state is not None:
self.root = MCTSNode(game_state.copy(),bot=self.bot,is_root=True)
def select_move(self,board,max_iter=None,push=True, thinking_time = 0):
moves,values=self.get_move_values(board,max_iter=max_iter, thinking_time = thinking_time)
if moves is None:
return None
index=np.argmax(values)
if push:
self.push_move(move=moves[index])
return moves[index]
def push_move(self,move=None):
for child in self.root.children:
if child.move==move:
child.is_root=True
self.root=child
self.root.num_rollouts-=1
self.root.parent=None
return True
return False
def push_board(self,board=None):
str_board=str(board)
for child in self.root.children:
if str(child.game_state) == str_board:
child.is_root=True
self.root=child
self.root.num_rollouts-=1
self.root.parent=None
return True
return False
def set_max_iter(self,max_iter=100):
self.max_iter=max_iter
def select_child(self, node):
best_score = -1
best_child = None
log_rollouts = np.log(node.num_rollouts)
for child in node.children:
win_percentage = child.winning_frac()
exploration_factor = np.sqrt(log_rollouts / child.num_rollouts)
uct_score = win_percentage + self.temperature * exploration_factor
if uct_score > best_score:
best_score = uct_score
best_child = child
return best_child
def get_move_values(self,game_state,max_iter=None, thinking_time = 0):
if max_iter is None:
max_iter=self.max_iter
if (self.root is None) or (str(self.root.game_state)!=str(game_state) and not self.push_board(board=game_state)):
if self.verbose>0:
print('El estado de juego no corresponde con el de la raiz del arbol, se recreó la raiz')
self.root = MCTSNode(game_state.copy(stack=False),bot=self.bot,is_root=True)
if self.root.is_terminal():
return None,None
i=0
tic = time.time()
while thinking_time>0 or i<max_iter:
toc = time.time()-tic
if toc> thinking_time:
thinking_time=0
i+=1
node = self.root
#fase de seleccion, donde busca un nodo que no sea un nodo derminal
while (not node.can_add_child()) and (not node.is_terminal()):
node = self.select_child(node)
#fase de expansión, donde se agrega un nuevo nodo
if node.can_add_child():
node = node.add_random_child(self.bot)
#fase de simulación. Con ayuda de la red neuronal, se obtiene el valor del nodo que predice como ganador
result = node.result_simulation()
#fase de retropropagación, donde se actualiza el valor de Q de los nodos padres hasta llegar al nodo raiz
while node is not None:
node.record_win(result)
node = node.parent
if self.verbose>1:
toc = time.time()-tic
print('MCTS - nodes:{} Elapsed time: {:.2f}s = {:.2f}m nps={:.0f}'.format(self.root.num_rollouts,toc,toc/60,self.root.num_rollouts/toc))
score = []
moves = []
for child in self.root.children:
win_percentage=child.winning_frac()
score.append(win_percentage)
moves.append(child.move)
score = np.array(score)
return moves,score
| {"hexsha": "d4f27f5a3133c797071081b88b7c6d69a9460a27", "size": 7779, "ext": "py", "lang": "Python", "max_stars_repo_path": "chesslab/agent_mcts.py", "max_stars_repo_name": "yniad/chesslab", "max_stars_repo_head_hexsha": "4720bfd093e9657798953702a1fa918f85991f65", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chesslab/agent_mcts.py", "max_issues_repo_name": "yniad/chesslab", "max_issues_repo_head_hexsha": "4720bfd093e9657798953702a1fa918f85991f65", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chesslab/agent_mcts.py", "max_forks_repo_name": "yniad/chesslab", "max_forks_repo_head_hexsha": "4720bfd093e9657798953702a1fa918f85991f65", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-11T04:34:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T04:34:22.000Z", "avg_line_length": 38.1323529412, "max_line_length": 149, "alphanum_fraction": 0.5756523975, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1746} |
#include <string>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <boost/filesystem.hpp>
#include "res2h.h"
#include "res2hutils.hpp"
struct FileData {
boost::filesystem::path inPath;
boost::filesystem::path outPath;
std::string internalName;
std::string dataVariableName;
std::string sizeVariableName;
size_t size;
};
bool beVerbose = false;
bool useRecursion = false;
bool useC = false;
bool createBinary = false;
bool appendFile = false;
bool combineResults = false;
boost::filesystem::path commonHeaderFilePath;
boost::filesystem::path utilitiesFilePath;
boost::filesystem::path inFilePath;
boost::filesystem::path outFilePath;
std::ofstream badOfStream; //we need this later as a default parameter...
//-----------------------------------------------------------------------------
//This is based on the example code found here: https://svn.boost.org/trac/boost/ticket/1976
//but changed to not return a trailing ".." when paths only differ in their file name.
//The function still seems to be missing in boost as of 1.54.0.
boost::filesystem::path naiveUncomplete(boost::filesystem::path const path, boost::filesystem::path const base)
{
if (path.has_root_path()) {
if (path.root_path() != base.root_path()) {
return path;
} else {
return naiveUncomplete(path.relative_path(), base.relative_path());
}
} else {
if (base.has_root_path()) {
return path;
} else {
auto path_it = path.begin();
auto base_it = base.begin();
while ( path_it != path.end() && base_it != base.end() ) {
if (*path_it != *base_it) break;
++path_it; ++base_it;
}
boost::filesystem::path result;
//check if we're at the filename of the base path already
if (*base_it != base.filename()) {
//add trailing ".." from path to base, but only if we're not already at the filename of the base path
for (; base_it != base.end() && *base_it != base.filename(); ++base_it) {
result /= "..";
}
}
for (; path_it != path.end(); ++path_it) {
result /= *path_it;
}
return result;
}
}
return path;
}
bool makeCanonical(boost::filesystem::path & result, const boost::filesystem::path & path)
{
//if we use canonical the file must exits, else we get an exception.
try {
result = boost::filesystem::canonical(path);
}
catch(...) {
//an error occurred. this maybe because the file is not there yet. try without the file name
try {
result = boost::filesystem::canonical(boost::filesystem::path(path).remove_filename());
//ok. this worked. add file name again
result /= path.filename();
}
catch (...) {
//hmm. didn't work. tell the user. at least the path should be there...
std::cout << "The path \"" << boost::filesystem::path(path).remove_filename().string() << "\" couldn't be found. Please create it." << std::endl;
return false;
}
}
return true;
}
//-----------------------------------------------------------------------------
void printVersion()
{
std::cout << "res2h " << RES2H_VERSION_STRING << " - Load plain binary data and dump to a raw C/C++ array." << std::endl << std::endl;
}
void printUsage()
{
std::cout << std::endl;
std::cout << "Usage: res2h <infile/indir> <outfile/outdir> [options]" << std::endl;
std::cout << "Valid options:" << std::endl;
std::cout << "-s Recurse into subdirectories below indir." << std::endl;
std::cout << "-c Use .c files and arrays for storing the data definitions, else" << std::endl << " uses .cpp files and std::vector/std::map." << std::endl;
std::cout << "-h <headerfile> Puts all declarations in a common \"headerfile\" using \"extern\"" << std::endl << " and includes that header file in the source files." << std::endl;
std::cout << "-u <sourcefile> Create utility functions and arrays in a .c/.cpp file." << std::endl << " Only makes sense in combination with -h" << std::endl;
std::cout << "-1 Combine all converted files into one big .c/.cpp file (use with -u)." << std::endl;
std::cout << "-b Compile binary archive outfile containing all infile(s). For reading in your" << std::endl << " software include res2hinterface.h/.c/.cpp (depending on -c) and consult the docs." << std::endl;
std::cout << "-a Append infile to outfile. Can be used to append an archive to an executable." << std::endl;
std::cout << "-v Be verbose." << std::endl;
std::cout << "Examples:" << std::endl;
std::cout << "res2h ./lenna.png ./resources/lenna_png.cpp (convert single file)" << std::endl;
std::cout << "res2h ./data ./resources -s -h resources.h -u resources.cpp (convert directory)" << std::endl;
std::cout << "res2h ./data ./resources/data.bin -b (convert directory to binary file)" << std::endl;
std::cout << "res2h ./resources/data.bin ./program.exe -a (append archive to executable)" << std::endl;
}
bool readArguments(int argc, const char * argv[])
{
bool pastFiles = false;
for(int i = 1; i < argc; ++i) {
//read argument from list
std::string argument = argv[i];
//check what it is
if (argument == "-a") {
if (!commonHeaderFilePath.empty() || !utilitiesFilePath.empty()) {
std::cout << "Error: Option -a can not be combined with -h or -u!" << std::endl;
return false;
}
else if (createBinary) {
std::cout << "Error: Option -a can not be combined with -b!" << std::endl;
return false;
}
else if (combineResults) {
std::cout << "Error: Option -a can not be combined with -1!" << std::endl;
return false;
}
appendFile = true;
pastFiles = true;
}
else if (argument == "-1") {
//-u must be used for this to work. check if specified
for(int j = 1; j < argc; ++j) {
//read argument from list
std::string argument = argv[j];
if (argument == "-u") {
combineResults = true;
pastFiles = true;
break;
}
}
if (!combineResults) {
//-u not specified. complain to user.
std::cout << "Error: Option -1 has to be combined with -u!" << std::endl;
return false;
}
}
else if (argument == "-b") {
if (!commonHeaderFilePath.empty() || !utilitiesFilePath.empty()) {
std::cout << "Error: Option -b can not be combined with -h or -u!" << std::endl;
return false;
}
else if (appendFile) {
std::cout << "Error: Option -b can not be combined with -a!" << std::endl;
return false;
}
else if (combineResults) {
std::cout << "Warning: Creating binary archive. Option -1 ignored!" << std::endl;
return false;
}
createBinary = true;
pastFiles = true;
}
else if (argument == "-c") {
useC = true;
pastFiles = true;
}
else if (argument == "-s") {
useRecursion = true;
pastFiles = true;
}
else if (argument == "-v") {
beVerbose = true;
pastFiles = true;
}
else if (argument == "-h") {
if (createBinary) {
std::cout << "Error: Option -h can not be combined with -b!" << std::endl;
return false;
}
else if (appendFile) {
std::cout << "Error: Option -h can not be combined with -a!" << std::endl;
return false;
}
//try getting next argument as header file name
i++;
if (i < argc && argv[i] != nullptr) {
if (!makeCanonical(commonHeaderFilePath, boost::filesystem::path(argv[i]))) {
return false;
}
}
else {
std::cout << "Error: Option -h specified, but no file name found!" << std::endl;
return false;
}
pastFiles = true;
}
else if (argument == "-u") {
if (createBinary) {
std::cout << "Error: Option -u can not be combined with -b!" << std::endl;
return false;
}
else if (appendFile) {
std::cout << "Error: Option -u can not be combined with -a!" << std::endl;
return false;
}
//try getting next argument as utility file name
i++;
if (i < argc && argv[i] != nullptr) {
if (!makeCanonical(utilitiesFilePath, boost::filesystem::path(argv[i]))) {
return false;
}
}
else {
std::cout << "Error: Option -u specified, but no file name found!" << std::endl;
return false;
}
if (!utilitiesFilePath.empty() && commonHeaderFilePath.empty()) {
std::cout << "Warning: -u does not make much sense without -h..." << std::endl;
}
pastFiles = true;
}
//none of the options was matched until here...
else if (!pastFiles) {
//if no files/directories have been found yet this is probably a file/directory
if (inFilePath.empty()) {
if (!makeCanonical(inFilePath, boost::filesystem::path(argument))) {
return false;
}
}
else if (outFilePath.empty()) {
if (!makeCanonical(outFilePath, boost::filesystem::path(argument))) {
return false;
}
pastFiles = true;
}
}
else {
std::cout << "Error: Unknown argument \"" << argument << "\"!" << std::endl;
return false;
}
}
return true;
}
//-----------------------------------------------------------------------------
std::vector<FileData> getFileDataFrom(const boost::filesystem::path & inPath, const boost::filesystem::path & outPath, const boost::filesystem::path & parentDir, const bool recurse)
{
//get all files from directory
std::vector<FileData> files;
//check for infinite symlinks
if(boost::filesystem::is_symlink(inPath)) {
//check if the symlink points somewhere in the path. this would recurse
if(inPath.string().find(boost::filesystem::canonical(inPath).string()) == 0) {
std::cout << "Warning: Path " << inPath << " contains recursive symlink! Skipping." << std::endl;
return files;
}
}
//iterate through source directory searching for files
const boost::filesystem::directory_iterator dirEnd;
for (boost::filesystem::directory_iterator fileIt(inPath); fileIt != dirEnd; ++fileIt) {
boost::filesystem::path filePath = (*fileIt).path();
if (!boost::filesystem::is_directory(filePath)) {
if (beVerbose) {
std::cout << "Found input file " << filePath << std::endl;
}
//add file to list
FileData temp;
temp.inPath = filePath;
//replace dots in file name with '_' and add a .c/.cpp extension
std::string newFileName = filePath.filename().generic_string();
std::replace(newFileName.begin(), newFileName.end(), '.', '_');
if (useC) {
newFileName.append(".c");
}
else {
newFileName.append(".cpp");
}
//remove parent directory of file from path for internal name. This could surely be done in a safer way
boost::filesystem::path subPath(filePath.generic_string().substr(parentDir.generic_string().size() + 1));
//add a ":/" before the name to mark internal resources (Yes. Hello Qt!)
temp.internalName = ":/" + subPath.generic_string();
//add subdir below parent path to name to enable multiple files with the same name
std::string subDirString(subPath.remove_filename().generic_string());
if (!subDirString.empty()) {
//replace dir separators by underscores
std::replace(subDirString.begin(), subDirString.end(), '/', '_');
//add in front of file name
newFileName = subDirString + "_" + newFileName;
}
//build new output file name
temp.outPath = outPath / newFileName;
if (beVerbose) {
std::cout << "Internal name will be \"" << temp.internalName << "\"" << std::endl;
std::cout << "Output path is " << temp.outPath << std::endl;
}
//get file size
try {
temp.size = (size_t)boost::filesystem::file_size(filePath);
if (beVerbose) {
std::cout << "Size is " << temp.size << " bytes." << std::endl;
}
}
catch(...) {
std::cout << "Error: Failed to get size of " << filePath << "!" << std::endl;
temp.size = 0;
}
//add file to list
files.push_back(temp);
}
}
//does the user want subdirectories?
if (recurse) {
//iterate through source directory again searching for directories
for (boost::filesystem::directory_iterator dirIt(inPath); dirIt != dirEnd; ++dirIt) {
boost::filesystem::path dirPath = (*dirIt).path();
if (boost::filesystem::is_directory(dirPath)) {
if (beVerbose) {
std::cout << "Found subdirectory " << dirPath << std::endl;
}
//subdirectory found. recurse.
std::vector<FileData> subFiles = getFileDataFrom(dirPath, outPath, parentDir, recurse);
//add returned result to file list
files.insert(files.end(), subFiles.cbegin(), subFiles.cend());
}
}
}
//return result
return files;
}
bool convertFile(FileData & fileData, const boost::filesystem::path & commonHeaderPath, std::ofstream & outStream = badOfStream, bool addHeader = true)
{
if (boost::filesystem::exists(fileData.inPath)) {
//try to open the input file
std::ifstream inStream;
inStream.open(fileData.inPath.string(), std::ifstream::in | std::ifstream::binary);
if (inStream.is_open() && inStream.good()) {
if (beVerbose) {
std::cout << "Converting input file " << fileData.inPath;
}
//try getting size of data
inStream.seekg(0, std::ios::end);
fileData.size = (size_t)inStream.tellg();
inStream.seekg(0);
//check if the caller passed and output stream and use that
bool closeOutStream = false;
if (!outStream.is_open() || !outStream.good()) {
if (!fileData.outPath.empty()) {
//try opening the output stream. truncate it when it exists
outStream.open(fileData.outPath.string(), std::ofstream::out | std::ofstream::trunc);
}
else {
std::cout << "Error: No output stream passed, but output path for \"" << fileData.inPath.filename().string() << "\" is empty! Skipping." << std::endl;
return false;
}
closeOutStream = true;
}
//now write to stream
if (outStream.is_open() && outStream.good()) {
//check if caller want to add a header
if (addHeader) {
//add message
outStream << "//this file was auto-generated from \"" << fileData.inPath.filename().string() << "\" by res2h" << std::endl << std::endl;
//add header include
if (!commonHeaderPath.empty()) {
//common header path must be relative to destination directory
boost::filesystem::path relativeHeaderPath = naiveUncomplete(commonHeaderPath, fileData.outPath);
outStream << "#include \"" << relativeHeaderPath.generic_string() << "\"" << std::endl << std::endl;
}
}
//create names for variables
fileData.dataVariableName = fileData.outPath.filename().stem().string() + "_data";
fileData.sizeVariableName = fileData.outPath.filename().stem().string() + "_size";
//add size and data variable
outStream << "const size_t " << fileData.sizeVariableName << " = " << std::dec << fileData.size << ";" << std::endl;
outStream << "const unsigned char " << fileData.dataVariableName << "[" << std::dec << fileData.size << "] = {" << std::endl;
outStream << " "; //first indent
//now add content
size_t breakCounter = 0;
while (!inStream.eof()) {
//read byte from source
unsigned char dataByte;
inStream.read((char *)&dataByte, 1);
//check if we have actually read something
if (inStream.gcount() != 1 || inStream.eof()) {
//we failed to read. break the read loop and close the file.
break;
}
//write to destination in hex with a width of 2 and '0' as padding
//we do not use showbase as it doesn't work with zero values
outStream << "0x" << std::setw(2) << std::setfill('0') << std::hex << (unsigned int)dataByte;
//was this the last character?
if (!inStream.eof() && fileData.size > (size_t)inStream.tellg()) {
//no. add comma.
outStream << ",";
//add break after 10 bytes and add indent again
if (++breakCounter % 10 == 0) {
outStream << std::endl << " ";
}
}
}
//close curly braces
outStream << std::endl << "};" << std::endl << std::endl;
//close files
if (closeOutStream) {
outStream.close();
}
inStream.close();
if (beVerbose) {
std::cout << " - succeeded." << std::endl;
}
return true;
}
else {
std::cout << "Error: Failed to open file \"" << fileData.outPath.string() << "\" for writing!" << std::endl;
return false;
}
}
else {
std::cout << "Error: Failed to open file \"" << fileData.inPath.string() << "\" for reading!" << std::endl;
return false;
}
}
else {
std::cout << "Error: File \"" << fileData.inPath.string() << "\" does not exist!" << std::endl;
}
return false;
}
bool createCommonHeader(const std::vector<FileData> & fileList, const boost::filesystem::path & commonHeaderPath, bool addUtilityFunctions = false, bool useCConstructs = false)
{
//try opening the output file. truncate it when it exists
std::ofstream outStream;
outStream.open(commonHeaderPath.generic_string(), std::ofstream::out | std::ofstream::trunc);
if (outStream.is_open() && outStream.good()) {
if (beVerbose) {
std::cout << std::endl << "Creating common header " << commonHeaderPath;
}
//add message
outStream << "//this file was auto-generated by res2h" << std::endl << std::endl;
//add #pragma to only include once
outStream << "#pragma once" << std::endl << std::endl;
//add includes for C++
if (!useCConstructs) {
outStream << "#include <string>" << std::endl;
if (addUtilityFunctions) {
outStream << "#include <map>" << std::endl;
}
outStream << std::endl;
}
//add all files
for (auto fdIt = fileList.cbegin(); fdIt != fileList.cend(); ++fdIt) {
//add size and data variable
outStream << "extern const size_t " << fdIt->sizeVariableName << ";" << std::endl;
outStream << "extern const unsigned char " << fdIt->dataVariableName << "[];" << std::endl << std::endl;
}
//if we want utilities, add array
if (addUtilityFunctions) {
//add resource struct
outStream << "struct Res2hEntry {" << std::endl;
if (useCConstructs) {
outStream << " const char * relativeFileName;" << std::endl;
}
else {
outStream << " const std::string relativeFileName;" << std::endl;
}
outStream << " const size_t size;" << std::endl;
outStream << " const unsigned char * data;" << std::endl;
outStream << "};" << std::endl << std::endl;
//add list holding files
outStream << "extern const size_t res2hNrOfFiles;" << std::endl;
outStream << "extern const Res2hEntry res2hFiles[];" << std::endl << std::endl;
if (!useCConstructs) {
//add additional std::map if C++
outStream << "typedef const std::map<const std::string, const Res2hEntry> res2hMapType;" << std::endl;
outStream << "extern res2hMapType res2hMap;" << std::endl;
}
}
//close file
outStream.close();
if (beVerbose) {
std::cout << " - succeeded." << std::endl;
}
return true;
}
else {
std::cout << "Error: Failed to open file \"" << commonHeaderPath << "\" for writing!" << std::endl;
}
return true;
}
bool createUtilities(std::vector<FileData> & fileList, const boost::filesystem::path & utilitiesPath, const boost::filesystem::path & commonHeaderPath, bool useCConstructs = false, bool addFileData = false)
{
//try opening the output file. truncate it when it exists
std::ofstream outStream;
outStream.open(utilitiesPath.generic_string(), std::ofstream::out | std::ofstream::trunc);
if (outStream.is_open() && outStream.good()) {
if (beVerbose) {
std::cout << std::endl << "Creating utilities file " << utilitiesPath;
}
//add message
outStream << "//this file was auto-generated by res2h" << std::endl << std::endl;
//create path to include file RELATIVE to this file
boost::filesystem::path relativePath = naiveUncomplete(commonHeaderPath, utilitiesPath);
//include header file
outStream << "#include \"" << relativePath.string() << "\"" << std::endl << std::endl;
//if the data should go to this file too, add it
if (addFileData) {
for (auto fdIt = fileList.begin(); fdIt != fileList.cend(); ++fdIt) {
if (!convertFile(*fdIt, commonHeaderFilePath, outStream, false)) {
std::cout << "Error: Failed to convert all files. Aborting!" << std::endl;
outStream.close();
return false;
}
}
}
//begin data arrays. switch depending wether C or C++
outStream << "const size_t res2hNrOfFiles = " << fileList.size() << ";" << std::endl;
//add files
outStream << "const Res2hEntry res2hFiles[res2hNrOfFiles] = {" << std::endl;
outStream << " "; //first indent
for (auto fdIt = fileList.cbegin(); fdIt != fileList.cend();) {
outStream << "{\"" << fdIt->internalName << "\", " << fdIt->sizeVariableName << ", " << fdIt->dataVariableName << "}";
//was this the last entry?
++fdIt;
if (fdIt != fileList.cend()) {
//no. add comma.
outStream << ",";
//add break after every entry and add indent again
outStream << std::endl << " ";
}
}
outStream << std::endl << "};" << std::endl;
if (!useCConstructs) {
//add files to map
outStream << std::endl << "res2hMapType::value_type mapTemp[] = {" << std::endl;
outStream << " ";
for (auto fdIt = fileList.cbegin(); fdIt != fileList.cend();) {
outStream << "std::make_pair(\"" << fdIt->internalName << "\", res2hFiles[" << (fdIt - fileList.cbegin()) << "])";
//was this the last entry?
++fdIt;
if (fdIt != fileList.cend()) {
//no. add comma.
outStream << ",";
//add break after every entry and add indent again
outStream << std::endl << " ";
}
}
outStream << std::endl << "};" << std::endl << std::endl;
//create map
outStream << "res2hMapType res2hMap(mapTemp, mapTemp + sizeof mapTemp / sizeof mapTemp[0]);" << std::endl;
}
//close file
outStream.close();
if (beVerbose) {
std::cout << " - succeeded." << std::endl;
}
return true;
}
else {
std::cout << "Error: Failed to open file \"" << utilitiesPath << "\" for writing!" << std::endl;
}
return true;
}
//Blob file format:
//Offset | Type | Description
//---------------+----------+-------------------------------------------
//START | char[8] | magic number string "res2hbin"
//08 | uint32_t | file format version number (currently 1)
//12 | uint32_t | format flags or other crap for file (currently 0)
//16 | uint32_t | size of whole archive including checksum in bytes
//20 | uint32_t | number of directory and file entries following
//Then follows the directory:
//24 + 00 | uint32_t | file entry #0, size of internal name INCLUDING null-terminating character
//24 + 04 | char[] | file entry #0, internal name (null-terminated)
//24 + 04 + name | uint32_t | file entry #0, format flags for entry (currently 0)
//24 + 08 + name | uint32_t | file entry #0, size of data
//24 + 12 + name | uint32_t | file entry #0, absolute offset of data in file
//24 + 16 + name | uint32_t | file entry #0, Adler-32 (RFC1950) checksum of data
//Then follow the other directory entries.
//Directly after the directory the data blocks begin.
//END - 04 | uint32_t | Adler-32 (RFC1950) checksum of whole file up to this point
//Obviously this limits you to ~4GB for the whole binary file and ~4GB per data entry. Go cry about it...
//There is some redundant information here, but that's for reading stuff faster.
//Also the version and dummy fields might be needed in later versions...
bool createBlob(const std::vector<FileData> & fileList, const boost::filesystem::path & filePath)
{
//try opening the output file. truncate it when it exists
std::fstream outStream;
outStream.open(filePath.string(), std::ofstream::out | std::ofstream::binary | std::ofstream::trunc);
if (outStream.is_open() && outStream.good()) {
if (beVerbose) {
std::cout << std::endl << "Creating binary archive " << filePath << std::endl;
}
//add magic number
const unsigned char magicBytes[9] = RES2H_MAGIC_BYTES;
outStream.write(reinterpret_cast<const char *>(&magicBytes), sizeof(magicBytes) - 1);
//add version and format flag
const uint32_t fileVersion = RES2H_ARCHIVE_VERSION;
const uint32_t fileFlags = 0;
outStream.write(reinterpret_cast<const char *>(&fileVersion), sizeof(uint32_t));
outStream.write(reinterpret_cast<const char *>(&fileFlags), sizeof(uint32_t));
//add dummy archive size
uint32_t archiveSize = 0;
outStream.write(reinterpret_cast<const char *>(&archiveSize), sizeof(uint32_t));
//add number of directory entries
const uint32_t nrOfEntries = fileList.size();
outStream.write(reinterpret_cast<const char *>(&nrOfEntries), sizeof(uint32_t));
//skip through files calculating data start offset behind directory
size_t dataStart = RES2H_OFFSET_DIR_START;
for (auto fdIt = fileList.cbegin(); fdIt != fileList.cend(); ++fdIt) {
//calculate size of entry and to entry start adress
dataStart += 20 + fdIt->internalName.size() + 1;
}
//add directory for all files
for (auto fdIt = fileList.cbegin(); fdIt != fileList.cend(); ++fdIt) {
//add size of name
const uint32_t nameSize = fdIt->internalName.size() + 1;
outStream.write(reinterpret_cast<const char *>(&nameSize), sizeof(uint32_t));
//add name and null-termination
outStream << fdIt->internalName << '\0';
//add flags
const uint32_t entryFlags = 0;
outStream.write(reinterpret_cast<const char *>(&entryFlags), sizeof(uint32_t));
//add data size
outStream.write(reinterpret_cast<const char *>(&fdIt->size), sizeof(uint32_t));
//add offset from file start to start of data
outStream.write(reinterpret_cast<const char *>(&dataStart), sizeof(uint32_t));
//add checksum of data
const uint32_t checksum = calculateAdler32(fdIt->inPath.string());
outStream.write(reinterpret_cast<const char *>(&checksum), sizeof(uint32_t));
if (beVerbose) {
std::cout << "Creating directory entry for \"" << fdIt->internalName << "\"" << std::endl;
std::cout << "Size is " << fdIt->size << " bytes." << std::endl;
std::cout << "Data starts at " << std::hex << std::showbase << dataStart << std::endl;
std::cout << "Adler-32 checksum is " << std::hex << std::showbase << checksum << std::endl;
}
//now add size of this entrys data to start offset for next data block
dataStart += fdIt->size;
}
//add data for all files
for (auto fdIt = fileList.cbegin(); fdIt != fileList.cend(); ++fdIt) {
//try to open file
std::ifstream inStream;
inStream.open(fdIt->inPath.string(), std::ifstream::in | std::ifstream::binary);
if (inStream.is_open() && inStream.good()) {
if (beVerbose) {
std::cout << "Adding data for \"" << fdIt->internalName << "\"" << std::endl;
}
std::streamsize overallDataSize = 0;
//copy data from input to output file
while (!inStream.eof() && inStream.good()) {
unsigned char buffer[1024];
std::streamsize readSize = sizeof(buffer);
try {
//try reading data from input file
inStream.read(reinterpret_cast<char *>(&buffer), sizeof(buffer));
}
catch (std::ios_base::failure) { /*ignore read failure. salvage what we can.*/ }
//store how many bytes were actually read
readSize = inStream.gcount();
//write to output file
outStream.write(reinterpret_cast<const char *>(&buffer), readSize);
//increate size of overall data read
overallDataSize += readSize;
}
//close input file
inStream.close();
//check if the file was completely read
if (overallDataSize != fdIt->size) {
std::cout << "Error: Failed to completely copy file \"" << fdIt->inPath.string() << "\" to binary data!" << std::endl;
outStream.close();
return false;
}
}
else {
std::cout << "Error: Failed to open file \"" << fdIt->inPath.string() << "\" for reading!" << std::endl;
outStream.close();
return false;
}
}
//final archive size is current size + checksum. write size to the header now
archiveSize = (uint32_t)outStream.tellg() + sizeof(uint32_t);
outStream.seekg(RES2H_OFFSET_ARCHIVE_SIZE);
outStream.write(reinterpret_cast<const char *>(&archiveSize), sizeof(uint32_t));
//close file
outStream.close();
if (beVerbose) {
std::cout << "Binary archive creation succeeded." << std::endl;
}
//calculate checksum of whole file
const uint32_t adler32 = calculateAdler32(filePath.string());
//open file again, move to end of file and append checksum
outStream.open(filePath.string(), std::ofstream::out | std::ofstream::binary | std::ofstream::app);
if (outStream.is_open() && outStream.good()) {
outStream.seekg(0, std::ios::end);
outStream.write(reinterpret_cast<const char *>(&adler32), sizeof(uint32_t));
//close file
outStream.close();
}
else {
std::cout << "Error: Failed to open file \"" << filePath.string() << "\" for writing!" << std::endl;
return false;
}
if (beVerbose) {
std::cout << "Archive checksum is " << std::hex << std::showbase << adler32 << "." << std::endl;
}
return true;
}
else {
std::cout << "Error: Failed to open file \"" << filePath.string() << "\" for writing!" << std::endl;
return false;
}
return false;
}
bool appendAtoB(const boost::filesystem::path & destinationPath, const boost::filesystem::path & sourcePath)
{
//try opening the output file.
std::fstream outStream;
outStream.open(destinationPath.string(), std::ofstream::out | std::ofstream::binary | std::ofstream::app);
if (outStream.is_open() && outStream.good()) {
if (beVerbose) {
std::cout << std::endl << "Opened output file " << destinationPath << std::endl;
}
//seek to the end
outStream.seekg(0, std::ios::end);
//open input file
std::ifstream inStream;
inStream.open(sourcePath.string(), std::ifstream::in | std::ifstream::binary);
if (inStream.is_open() && inStream.good()) {
if (beVerbose) {
std::cout << "Opened input file \"" << sourcePath << "\". Appending data to output." << std::endl;
}
//copy data from input to output file
while (!inStream.eof() && inStream.good()) {
unsigned char buffer[1024];
std::streamsize readSize = sizeof(buffer);
try {
//try reading data from input file
inStream.read(reinterpret_cast<char *>(&buffer), sizeof(buffer));
}
catch (std::ios_base::failure) { /*ignore read failure. salvage what we can.*/ }
//store how many bytes were actually read
readSize = inStream.gcount();
//write to output file
outStream.write(reinterpret_cast<const char *>(&buffer), readSize);
}
//close input file
inStream.close();
}
else {
std::cout << "Error: Failed to open input file \"" << sourcePath.string() << "\" for reading!" << std::endl;
outStream.close();
return false;
}
//close output file
outStream.close();
return true;
}
else {
std::cout << "Error: Failed to open output file \"" << destinationPath.string() << "\" for writing!" << std::endl;
}
return false;
}
//-----------------------------------------------------------------------------
int main(int argc, const char * argv[])
{
printVersion();
//check number of arguments and if all arguments can be read
if(argc < 3 || !readArguments(argc, argv)) {
printUsage();
return -1;
}
//check if the input path exist
if (!boost::filesystem::exists(inFilePath)) {
std::cout << "Error: Invalid input file/directory \"" << inFilePath.string() << "\"!" << std::endl;
return -2;
}
if (createBinary) {
//check if argument 2 is a file
if (boost::filesystem::is_directory(outFilePath)) {
std::cout << "Error: Output must be a file if -b is used!" << std::endl;
return -2;
}
}
else if (appendFile) {
//check if argument 2 is a file
if (boost::filesystem::is_directory(outFilePath)) {
std::cout << "Error: Output must be a file if -a is used!" << std::endl;
return -2;
}
}
else if (boost::filesystem::is_directory(inFilePath) != boost::filesystem::is_directory(outFilePath)) {
//check if output directory exists
if (boost::filesystem::is_directory(outFilePath) && !boost::filesystem::exists(outFilePath)) {
std::cout << "Error: Invalid output directory \"" << outFilePath.string() << "\"!" << std::endl;
return -2;
}
//check if arguments 1 and 2 are both files or both directories
std::cout << "Error: Input and output file must be both either a file or a directory!" << std::endl;
return -2;
}
if (appendFile) {
//append file a to b
if (!appendAtoB(outFilePath, inFilePath)) {
std::cout << "Error: Failed to append data to executable!" << std::endl;
return -3;
}
}
else {
//build list of files to process
std::vector<FileData> fileList;
if (boost::filesystem::is_directory(inFilePath) && boost::filesystem::is_directory(inFilePath)) {
//both files are directories, build file ist
fileList = getFileDataFrom(inFilePath, outFilePath, inFilePath, useRecursion);
if (fileList.empty()) {
std::cout << "Error: No files to convert!" << std::endl;
return -3;
}
}
else {
//just add single input/output file
FileData temp;
temp.inPath = inFilePath;
temp.outPath = outFilePath;
temp.internalName = inFilePath.filename().string(); //remove all, but the file name and extension
if (beVerbose) {
std::cout << "Found input file " << inFilePath << std::endl;
std::cout << "Internal name will be \"" << temp.internalName << "\"" << std::endl;
std::cout << "Output path is " << temp.outPath << std::endl;
}
//get file size
try {
temp.size = (size_t)boost::filesystem::file_size(inFilePath);
if (beVerbose) {
std::cout << "Size is " << temp.size << " bytes." << std::endl;
}
}
catch(...) {
std::cout << "Error: Failed to get size of " << inFilePath << "!" << std::endl;
temp.size = 0;
}
fileList.push_back(temp);
}
//does the user want an binary file?
if (createBinary) {
//yes. build it.
if (!createBlob(fileList, outFilePath)) {
std::cout << "Error: Failed to convert to binary file!" << std::endl;
return -4;
}
}
else {
//no. convert files to .c/.cpp. loop through list, converting files
for (auto fdIt = fileList.begin(); fdIt != fileList.cend(); ++fdIt) {
if (!convertFile(*fdIt, commonHeaderFilePath)) {
std::cout << "Error: Failed to convert all files. Aborting!" << std::endl;
return -4;
}
}
//do we need to write a header file?
if (!commonHeaderFilePath.empty()) {
if (!createCommonHeader(fileList, commonHeaderFilePath, !utilitiesFilePath.empty(), useC)) {
return -5;
}
//do we need to create utilities?
if (!utilitiesFilePath.empty()) {
if (!createUtilities(fileList, utilitiesFilePath, commonHeaderFilePath, useC, combineResults)) {
return -6;
}
}
}
}
} //if (!appendFile) {
//profit!!!
std::cout << "res2h succeeded." << std::endl;
return 0;
}
| {"hexsha": "69fae17903c065ca3ede622d5ff2a554d803d064", "size": 36469, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "data/res2h-master/src/res2h.cpp", "max_stars_repo_name": "tlanks/esbusyness", "max_stars_repo_head_hexsha": "41ed9e6b552585476c81f2f89b9e3d539c54d4ab", "max_stars_repo_licenses": ["Apache-2.0", "MIT"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2016-08-08T17:50:18.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-08T02:54:13.000Z", "max_issues_repo_path": "data/res2h-master/src/res2h.cpp", "max_issues_repo_name": "tlanks/esbusyness", "max_issues_repo_head_hexsha": "41ed9e6b552585476c81f2f89b9e3d539c54d4ab", "max_issues_repo_licenses": ["Apache-2.0", "MIT"], "max_issues_count": 7.0, "max_issues_repo_issues_event_min_datetime": "2016-08-10T03:07:30.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-10T15:24:40.000Z", "max_forks_repo_path": "data/res2h-master/src/res2h.cpp", "max_forks_repo_name": "tlanks/esbusyness", "max_forks_repo_head_hexsha": "41ed9e6b552585476c81f2f89b9e3d539c54d4ab", "max_forks_repo_licenses": ["Apache-2.0", "MIT"], "max_forks_count": 16.0, "max_forks_repo_forks_event_min_datetime": "2016-08-09T02:11:02.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-24T10:17:56.000Z", "avg_line_length": 40.2527593819, "max_line_length": 216, "alphanum_fraction": 0.598151855, "num_tokens": 9377} |
\section{Discussion}\label{section:discussion}
We have introduced relative suffix trees (\RCST), a new kind of compressed suffix tree for repetitive sequence collections. Our \RCST{} compresses the suffix tree of an individual sequence relative to the suffix tree of a reference sequence. It combines an already known relative suffix array with a novel relative-compressed longest common prefix representation (\RLCP). When the sequences are similar enough (e.g., two human genomes), the \RCST{} requires about 3 bits per symbol on each target sequence. This is close to the space used by the most space-efficient compressed suffix trees designed to store repetitive collections in a single tree, but the \RCST{} provides a different functionality as it indexes each sequence individually. The \RCST{} supports query and navigation operations within a few microseconds, which is competitive with the largest and fastest compressed suffix trees.
The size of \RCST{} is proportional to the amount of sequence that is present either in the reference or in the target, but not both. This is unusual for relative compression, where any additional material in the reference is generally harmless. Sorting the suffixes in lexicographic tends to distribute the additional suffixes all over the suffix array, creating many mismatches between the suffix-based structures of the reference and the target. For example, the 60~million suffixes from chromosome~Y created 34~million new phrases in the RLZ parse of the \DLCP{} array of a female genome, doubling the size of the \RLCP{} array. Having multiple references (e.g.~male and female) can hence be worthwhile when building relative data structures for many target sequences.
While our \RCST{} implementation provides competitive time/space trade-offs, there is still much room for improvement. Most importantly, some of the construction algorithms require significant amounts of time and memory. In many places, we have chosen simple and fast implementation options, even though there could be alternatives that require significantly less space without being too much slower.
Our \RCST{} is a relative version of the \CSTnpr. Another alternative for future work is a relative \CSTsada, using \RLZ{} compressed bitvectors for suffix tree topology and \PLCP. %Based on our preliminary experiments, the main obstacle is the compression of phrase pointers. Relative pointers work well when most differences between the reference and the target are single-character substitutions. As suffix sorting multiplies the differences and transforms substitutions into insertions and deletions, we need new compression schemes for the pointers.
| {"hexsha": "6ca296c09b4e58f46fa017390b2bc475b10b2d42", "size": 2681, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "rcst/concl.tex", "max_stars_repo_name": "jltsiren/relative-fm", "max_stars_repo_head_hexsha": "68c11f172fd2a546792aad3ad81ee1e185b5ee7f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2015-04-29T11:18:01.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-21T20:32:08.000Z", "max_issues_repo_path": "rcst/concl.tex", "max_issues_repo_name": "jltsiren/relative-fm", "max_issues_repo_head_hexsha": "68c11f172fd2a546792aad3ad81ee1e185b5ee7f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rcst/concl.tex", "max_forks_repo_name": "jltsiren/relative-fm", "max_forks_repo_head_hexsha": "68c11f172fd2a546792aad3ad81ee1e185b5ee7f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2015-12-06T20:49:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-14T10:33:01.000Z", "avg_line_length": 206.2307692308, "max_line_length": 897, "alphanum_fraction": 0.8146214099, "num_tokens": 519} |
import os
import numpy.linalg as la
import numpy as np
from skimage.draw import line_nd
from os.path import join, expanduser
from dipy.io import read_bvals_bvecs
from dipy.io.image import load_nifti, save_nifti
rel_path = '~/.dnn/datasets/synth'
name = 'synth'
def process_movement():
bvals, bvecs = load_bvals_bvecs()
img, affine = load_image_from_nifti()
mov = get_movement_estimates(img, bvecs)
save_mov_image(mov, affine, name)
def load_image_from_numpy():
path = os.path.expanduser(rel_path)
url = os.path.join(path, name + '.npz')
img_dict = np.load(url, allow_pickle=True)
return img_dict['img']
def load_image_from_nifti():
base_path = expanduser(rel_path)
digit_hardi_url = join(base_path, name + '.nii.gz')
img, affine = load_nifti(digit_hardi_url)
return img, affine
def load_bvals_bvecs():
path = os.path.expanduser(rel_path)
bvals_url = join(path, 'bvals')
bvecs_url = join(path, 'bvecs')
bvals, bvecs = read_bvals_bvecs(bvals_url, bvecs_url)
return bvals, bvecs
def save_mov_image(mov, affine, name):
path = os.path.expanduser(rel_path)
if not os.path.isdir(path):
os.makedirs(path)
# np.savez(os.path.join(path, name + '_mov'), mov=mov)
save_nifti(os.path.join(path, name + '_mov.nii.gz'), mov, affine)
def mov_img(img, direction):
mov = np.zeros_like(img)
dims = img.shape
for i in range(dims[0]):
for j in range(dims[1]):
for k in range(dims[2]):
mov_ijk = movement(img, (i, j, k), direction, radius=10, eps=0.01)
mov[i, j, k] = mov_ijk
return mov
def movement(img, center, direction, radius=10, eps=0.01, min_val=1e-9):
center_value = img[center[0], center[1], center[2]]
mov = 0
if abs(center_value) > min_val:
coords = get_points_bidirectional(center, direction, radius, img.shape)
z = img[coords[0], coords[1], coords[2]]
if len(z) > 1:
deltas = np.abs(z[0] - z[1:]) + eps
variation = (1 / (len(z) - 1)) * np.sum(deltas)
mov = center_value / variation
return mov
def get_movement_estimates(img, bvecs, max_bvecs=None):
bvec_list = bvecs.tolist()[:max_bvecs]
movs = []
for k, direction in enumerate(bvec_list):
print(f'direction {k +1} of {len(bvec_list)}')
mov_for_direction = mov_img(img, direction)
movs.append(mov_for_direction)
mov = np.transpose(np.array(movs), (1, 2, 3, 0))
return mov
if __name__ == '__main__':
process_movement()
| {"hexsha": "cf71e616c93f24230b80d9ff351ad76474645a18", "size": 2586, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataset/mnist/dwi/movement.py", "max_stars_repo_name": "cassianobecker/dnn", "max_stars_repo_head_hexsha": "bb2ea04f77733de9df10f795bb049ac3b9d30478", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-02-21T21:35:07.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-29T15:20:00.000Z", "max_issues_repo_path": "dataset/mnist/dwi/movement.py", "max_issues_repo_name": "cassianobecker/dnn", "max_issues_repo_head_hexsha": "bb2ea04f77733de9df10f795bb049ac3b9d30478", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2020-02-20T21:00:23.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-22T15:23:25.000Z", "max_forks_repo_path": "dataset/mnist/dwi/movement.py", "max_forks_repo_name": "cassianobecker/dnn", "max_forks_repo_head_hexsha": "bb2ea04f77733de9df10f795bb049ac3b9d30478", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5090909091, "max_line_length": 82, "alphanum_fraction": 0.6415313225, "include": true, "reason": "import numpy", "num_tokens": 743} |
[STATEMENT]
lemma vars_of_instances:
shows "vars_of (subst t \<sigma>)
= \<Union> { V. \<exists>x. (x \<in> (vars_of t)) \<and> (V = vars_of (subst (Var x) \<sigma>)) }"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vars_of (t \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
proof (induction t)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>x. vars_of (Const x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Const x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
3. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
case (Const a)
[PROOF STATE]
proof (state)
this:
goal (3 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>x. vars_of (Const x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Const x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
3. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
have "vars_of (Const a) = {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vars_of (Const a) = {}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vars_of (Const a) = {}
goal (3 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>x. vars_of (Const x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Const x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
3. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
vars_of (Const a) = {}
[PROOF STEP]
have rhs_empty: "\<Union> { V. \<exists>x. (x \<in> (vars_of (Const a))) \<and> (V = vars_of (subst (Var x) \<sigma>)) } = {}"
[PROOF STATE]
proof (prove)
using this:
vars_of (Const a) = {}
goal (1 subgoal):
1. \<Union> {V. \<exists>x. x \<in> vars_of (Const a) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = {}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Union> {V. \<exists>x. x \<in> vars_of (Const a) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = {}
goal (3 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>x. vars_of (Const x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Const x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
3. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
have lhs_empty: "(subst (Const a) \<sigma>) = (Const a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Const a \<lhd> \<sigma> = Const a
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Const a \<lhd> \<sigma> = Const a
goal (3 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>x. vars_of (Const x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Const x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
3. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
from rhs_empty and lhs_empty
[PROOF STATE]
proof (chain)
picking this:
\<Union> {V. \<exists>x. x \<in> vars_of (Const a) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = {}
Const a \<lhd> \<sigma> = Const a
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
\<Union> {V. \<exists>x. x \<in> vars_of (Const a) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = {}
Const a \<lhd> \<sigma> = Const a
goal (1 subgoal):
1. vars_of (Const a \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (Const a) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vars_of (Const a \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (Const a) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
goal (2 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
case (Var a)
[PROOF STATE]
proof (state)
this:
goal (2 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
have "vars_of (Var a) = { a }"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vars_of (Var a) = {a}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vars_of (Var a) = {a}
goal (2 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
vars_of (Var a) = {a}
[PROOF STEP]
have rhs: "\<Union> { V. \<exists>x. (x \<in> (vars_of (Var a))) \<and> (V = vars_of (subst (Var x) \<sigma>)) } =
vars_of (subst (Var a) \<sigma>)"
[PROOF STATE]
proof (prove)
using this:
vars_of (Var a) = {a}
goal (1 subgoal):
1. \<Union> {V. \<exists>x. x \<in> vars_of (Var a) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (Var a \<lhd> \<sigma>)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Union> {V. \<exists>x. x \<in> vars_of (Var a) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (Var a \<lhd> \<sigma>)
goal (2 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
have lhs: "(subst (Var a) \<sigma>) = (subst (Var a) \<sigma>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Var a \<lhd> \<sigma> = Var a \<lhd> \<sigma>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Var a \<lhd> \<sigma> = Var a \<lhd> \<sigma>
goal (2 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
from rhs and lhs
[PROOF STATE]
proof (chain)
picking this:
\<Union> {V. \<exists>x. x \<in> vars_of (Var a) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (Var a \<lhd> \<sigma>)
Var a \<lhd> \<sigma> = Var a \<lhd> \<sigma>
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
\<Union> {V. \<exists>x. x \<in> vars_of (Var a) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (Var a \<lhd> \<sigma>)
Var a \<lhd> \<sigma> = Var a \<lhd> \<sigma>
goal (1 subgoal):
1. vars_of (Var a \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (Var a) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vars_of (Var a \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (Var a) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
case (Comb t1 t2)
[PROOF STATE]
proof (state)
this:
vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
have "vars_of (Comb t1 t2) = (vars_of t1) \<union> (vars_of t2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vars_of (t1 \<cdot> t2) = vars_of t1 \<union> vars_of t2
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vars_of (t1 \<cdot> t2) = vars_of t1 \<union> vars_of t2
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
vars_of (t1 \<cdot> t2) = vars_of t1 \<union> vars_of t2
[PROOF STEP]
have "\<Union> { V. \<exists>x. (x \<in> (vars_of (Comb t1 t2))) \<and> (V = vars_of (subst (Var x) \<sigma>)) }
= \<Union> { V. \<exists>x. (x \<in> (vars_of t1)) \<and> (V = vars_of (subst(Var x) \<sigma>)) }
\<union> \<Union> { V. \<exists>x. (x \<in> (vars_of t2)) \<and> (V = vars_of (subst (Var x) \<sigma>)) }"
[PROOF STATE]
proof (prove)
using this:
vars_of (t1 \<cdot> t2) = vars_of t1 \<union> vars_of t2
goal (1 subgoal):
1. \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)} \<union> \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)} \<union> \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)} \<union> \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
have rhs: "\<Union> { V. \<exists>x. (x \<in> (vars_of (Comb t1 t2))) \<and> (V = vars_of (subst (Var x) \<sigma>)) }
= (vars_of (subst t1 \<sigma>)) \<union> (vars_of (subst t2 \<sigma>))"
[PROOF STATE]
proof (prove)
using this:
\<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)} \<union> \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
goal (1 subgoal):
1. \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
[PROOF STEP]
using \<open>vars_of (subst t1 \<sigma>)
= \<Union> { V. \<exists>x. (x \<in> (vars_of t1)) \<and> (V = vars_of (subst (Var x) \<sigma>)) }\<close>
and
\<open>vars_of (subst t2 \<sigma>)
= \<Union> { V. \<exists>x. (x \<in> (vars_of t2)) \<and> (V = vars_of (subst (Var x) \<sigma>)) }\<close>
[PROOF STATE]
proof (prove)
using this:
\<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)} \<union> \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
goal (1 subgoal):
1. \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
have "(subst (Comb t1 t2) \<sigma>) = (Comb (subst t1 \<sigma>) (subst t2 \<sigma>))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. t1 \<cdot> t2 \<lhd> \<sigma> = (t1 \<lhd> \<sigma>) \<cdot> (t2 \<lhd> \<sigma>)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
t1 \<cdot> t2 \<lhd> \<sigma> = (t1 \<lhd> \<sigma>) \<cdot> (t2 \<lhd> \<sigma>)
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
t1 \<cdot> t2 \<lhd> \<sigma> = (t1 \<lhd> \<sigma>) \<cdot> (t2 \<lhd> \<sigma>)
[PROOF STEP]
have lhs: "(vars_of (subst (Comb t1 t2) \<sigma>)) =
(vars_of (subst t1 \<sigma>)) \<union> (vars_of (subst t2 \<sigma>))"
[PROOF STATE]
proof (prove)
using this:
t1 \<cdot> t2 \<lhd> \<sigma> = (t1 \<lhd> \<sigma>) \<cdot> (t2 \<lhd> \<sigma>)
goal (1 subgoal):
1. vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
from lhs and rhs
[PROOF STATE]
proof (chain)
picking this:
vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
\<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
\<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
goal (1 subgoal):
1. vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
goal:
No subgoals!
[PROOF STEP]
qed | {"llama_tokens": 9771, "file": "SuperCalc_terms", "length": 44} |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import LeaveOneGroupOut
from plot_with_PE_imputation import plot_with_PE_imputation
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.signal import medfilt
#Load Data
data = pd.read_csv('./facies_vectors.csv')
# Parameters
feature_names = ['GR', 'ILD_log10', 'DeltaPHI', 'PHIND', 'PE', 'NM_M', 'RELPOS']
facies_names = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D', 'PS', 'BS']
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
# Store features and labels
# X = data[feature_names].values
# y = data['Facies'].values
# Store well labels and depths
wells = data['Well Name'].values
depth = data['Depth'].values
# Imputation
DataImp_dropNA = data.dropna(axis = 0, inplace = False)
F9idx = DataImp_dropNA[DataImp_dropNA['Well Name'] == 'Recruit F9'].index
DataImp_dropF9 = DataImp_dropNA.drop(F9idx)
wells_noPE = DataImp_dropF9['Well Name'].values
DataImp = DataImp_dropF9.drop(['Formation', 'Well Name', 'Depth'], axis=1).copy()
Ximp=DataImp.loc[:, DataImp.columns != 'PE'].values
Yimp=DataImp.loc[:, 'PE'].values
from sklearn.preprocessing import RobustScaler
scaler = RobustScaler()
scaler.fit(Ximp)
Ximp_scaled = scaler.transform(Ximp)
logo = LeaveOneGroupOut()
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
## 같은 parameter로 여러번 반복
loop = 10
loop_mse_list = []
loop_R2_list = []
df_loop = pd.DataFrame(columns=["R2","MSE"])
for i in range(loop):
mselist = []
R2list = []
for train, test in logo.split(Ximp_scaled, Yimp, groups=wells_noPE):
well_name = wells_noPE[test[0]]
# Imputation using MLP
reg = MLPRegressor(hidden_layer_sizes=50, max_iter=1000)
reg.fit(Ximp_scaled[train], Yimp[train])
Yimp_predicted = reg.predict(Ximp_scaled[test])
## medfilt
Yimp_predicted = medfilt(Yimp_predicted, kernel_size=5)
R2 = r2_score(Yimp[test], Yimp_predicted)
mse = mean_squared_error(Yimp[test], Yimp_predicted)
print("Well name_test : ", well_name)
print("R2 : %.4f" % R2)
print("mse : %.4f" % mse)
R2list.append(R2)
mselist.append(mse)
# predict_data = data[data['Well Name'] == well_name].copy()
# predict_data["PE_pred"] = Yimp_predicted
#
# plot_with_PE_imputation(predict_data, facies_colors,R2)
average_R2 = np.mean(np.array(R2list))
average_mse = np.mean(np.array(mselist))
print("%i of %i" % (i+1,loop), end=" ")
print("average R2 : %.4f " % average_R2, end=" ")
print("average MSE : %.4f " % average_mse)
loop_mse_list.append(average_mse)
loop_R2_list.append(average_R2)
df_loop.loc["try %i"%(i+1)] = [average_R2, average_mse]
average_R2_loop = np.mean(np.array(loop_R2_list))
average_mse_loop = np.mean(np.array(loop_mse_list))
df_loop.loc["average"] = [average_R2_loop, average_mse_loop]
print(df_loop)
# df_loop.to_excel("MLP_try10.xlsx")
| {"hexsha": "e6bbf3ea06862e0e492948c45271192370b86fdb", "size": 3154, "ext": "py", "lang": "Python", "max_stars_repo_path": "MLP_mean_impute.py", "max_stars_repo_name": "suniipang/PE_Imputation", "max_stars_repo_head_hexsha": "836b9c687883ac87f091785fc17fded6d122be83", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MLP_mean_impute.py", "max_issues_repo_name": "suniipang/PE_Imputation", "max_issues_repo_head_hexsha": "836b9c687883ac87f091785fc17fded6d122be83", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MLP_mean_impute.py", "max_forks_repo_name": "suniipang/PE_Imputation", "max_forks_repo_head_hexsha": "836b9c687883ac87f091785fc17fded6d122be83", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5154639175, "max_line_length": 112, "alphanum_fraction": 0.6940393152, "include": true, "reason": "import numpy,from scipy", "num_tokens": 927} |
[STATEMENT]
lemma ns_mul_ext_bottom: "(A,{#}) \<in> ns_mul_ext ns s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (A, {#}) \<in> ns_mul_ext ns s
[PROOF STEP]
by (auto intro!: ns_mul_extI) | {"llama_tokens": 92, "file": "Weighted_Path_Order_Multiset_Extension2", "length": 1} |
from policy import LSTMPolicy, MlpPolicyValue
import gym
import gym_compete
import pickle
import sys
import argparse
import tensorflow as tf
import numpy as np
def load_from_file(param_pkl_path):
with open(param_pkl_path, 'rb') as f:
params = pickle.load(f)
return params
def setFromFlat(var_list, flat_params):
shapes = list(map(lambda x: x.get_shape().as_list(), var_list))
total_size = np.sum([int(np.prod(shape)) for shape in shapes])
theta = tf.placeholder(tf.float32, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = int(np.prod(shape))
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
op = tf.group(*assigns)
tf.get_default_session().run(op, {theta: flat_params})
def run(config):
if config.env == "kick-and-defend":
env = gym.make("kick-and-defend-v0")
policy_type = "lstm"
elif config.env == "run-to-goal-humans":
env = gym.make("run-to-goal-humans-v0")
policy_type = "mlp"
elif config.env == "run-to-goal-ants":
env = gym.make("run-to-goal-ants-v0")
policy_type = "mlp"
elif config.env == "you-shall-not-pass":
env = gym.make("you-shall-not-pass-humans-v0")
policy_type = "mlp"
elif config.env == "sumo-humans":
env = gym.make("sumo-humans-v0")
policy_type = "lstm"
elif config.env == "sumo-ants":
env = gym.make("sumo-ants-v0")
policy_type = "lstm"
else:
print("unsupported environment")
print("choose from: run-to-goal-humans, run-to-goal-ants, you-shall-not-pass, sumo-humans, sumo-ants, kick-and-defend")
sys.exit()
param_paths = config.param_paths
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__()
policy = []
for i in range(2):
scope = "policy" + str(i)
if policy_type == "lstm":
policy.append(LSTMPolicy(scope=scope, reuse=False,
ob_space=env.observation_space.spaces[i],
ac_space=env.action_space.spaces[i],
hiddens=[128, 128], normalize=True))
else:
policy.append(MlpPolicyValue(scope=scope, reuse=False,
ob_space=env.observation_space.spaces[i],
ac_space=env.action_space.spaces[i],
hiddens=[64, 64], normalize=True))
# initialize uninitialized variables
sess.run(tf.variables_initializer(tf.global_variables()))
params = [load_from_file(param_pkl_path=path) for path in param_paths]
for i in range(len(policy)):
setFromFlat(policy[i].get_variables(), params[i])
max_episodes = config.max_episodes
num_episodes = 0
nstep = 0
total_reward = [0.0 for _ in range(len(policy))]
total_scores = [0 for _ in range(len(policy))]
# total_scores = np.asarray(total_scores)
observation = env.reset()
print("-"*5 + " Episode %d " % (num_episodes+1) + "-"*5)
while num_episodes < max_episodes:
env.render()
action = tuple([policy[i].act(stochastic=True, observation=observation[i])[0]
for i in range(len(policy))])
observation, reward, done, infos = env.step(action)
nstep += 1
for i in range(len(policy)):
total_reward[i] += reward[i]
if done[0]:
num_episodes += 1
draw = True
for i in range(len(policy)):
if 'winner' in infos[i]:
draw = False
total_scores[i] += 1
print("Winner: Agent {}, Scores: {}, Total Episodes: {}".format(i, total_scores, num_episodes))
if draw:
print("Game Tied: Agent {}, Scores: {}, Total Episodes: {}".format(i, total_scores, num_episodes))
observation = env.reset()
nstep = 0
total_reward = [0.0 for _ in range(len(policy))]
for i in range(len(policy)):
policy[i].reset()
if num_episodes < max_episodes:
print("-"*5 + "Episode %d" % (num_episodes+1) + "-"*5)
if __name__ == "__main__":
p = argparse.ArgumentParser(description="Environments for Multi-agent competition")
p.add_argument("--env", default="sumo-humans", type=str, help="competitive environment: run-to-goal-humans, run-to-goal-ants, you-shall-not-pass, sumo-humans, sumo-ants, kick-and-defend")
p.add_argument("--param-paths", nargs='+', required=True, type=str)
p.add_argument("--max-episodes", default=10, help="max number of matches", type=int)
config = p.parse_args()
run(config)
| {"hexsha": "d2997f9fd0befd133e3a7728c04ecbf4d5053abb", "size": 4910, "ext": "py", "lang": "Python", "max_stars_repo_path": "multiagent-competition/main.py", "max_stars_repo_name": "MachengShen/torchbeast", "max_stars_repo_head_hexsha": "3853fdda44db4d91d773ff2a3db3658a02fa1a15", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "multiagent-competition/main.py", "max_issues_repo_name": "MachengShen/torchbeast", "max_issues_repo_head_hexsha": "3853fdda44db4d91d773ff2a3db3658a02fa1a15", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "multiagent-competition/main.py", "max_forks_repo_name": "MachengShen/torchbeast", "max_forks_repo_head_hexsha": "3853fdda44db4d91d773ff2a3db3658a02fa1a15", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.918699187, "max_line_length": 191, "alphanum_fraction": 0.5890020367, "include": true, "reason": "import numpy", "num_tokens": 1189} |
# -*- coding:utf-8 -*-
###############################################################################
# Rutap Bot 2019 Hangul Clock Module #
# 해당 모듈은 한글시계에서 파생된 소프트웨어로서, GPLv3 라이선스의 적용을 받습니다. #
# 모듈 사용시 원작자분께 허락을 받으시길 바랍니다. #
# 모듈에 대한 저작권은 화향이 소유합니다. #
###############################################################################
import random, datetime, os
import numpy as np
from PIL import Image
from activity_log import log_actvity
def alpha_composite(src, dst):
src = np.asarray(src)
dst = np.asarray(dst)
out = np.empty(src.shape, dtype = 'float')
alpha = np.index_exp[:, :, 3:]
rgb = np.index_exp[:, :, :3]
src_a = src[alpha]/255.0
dst_a = dst[alpha]/255.0
out[alpha] = src_a+dst_a*(1-src_a)
old_setting = np.seterr(invalid = 'ignore')
out[rgb] = (src[rgb]*src_a + dst[rgb]*dst_a*(1-src_a))/out[alpha]
np.seterr(**old_setting)
out[alpha] *= 255
np.clip(out,0,255)
out = out.astype('uint8')
out = Image.fromarray(out, 'RGBA')
return out
def hangul_clock():
open('clock_rendering.rtl', 'w').close()
now = datetime.datetime.now()
filename = "%s_%s_%s_%s_%s_%s.png" % (now.year, now.month, now.day, now.hour, now.minute, now.second)
BG = Image.open("hangul_clock_base/BG_1000_1500.png")
ment = Image.open("hangul_clock_base/ment/ment%s_1000_1500.png" % (random.randint(1, 3)))
one = alpha_composite(ment, BG)
hour_base = Image.open("hangul_clock_base/hour/hour_base_1000_1500.png")
two = alpha_composite(hour_base, one)
min_base = Image.open("hangul_clock_base/minute/minute_base_1000_1500.png")
three = alpha_composite(min_base, two)
hour = now.hour
if hour > 12:
hour = now.hour - 12
now_hour = Image.open("hangul_clock_base/hour/hour_%s_1000_1500.png" % (hour))
four = alpha_composite(now_hour, three)
now_minute = Image.open("hangul_clock_base/minute/minute_%s_1000_1500.png" % (now.minute))
five = alpha_composite(now_minute, four)
result = five
result.save(filename)
log_actvity("I completed rendering Clock Render")
os.remove('clock_rendering.rtl')
return filename | {"hexsha": "1d4fc7c4808fd7099ec15a4757909fe0a8de1007", "size": 2263, "ext": "py", "lang": "Python", "max_stars_repo_path": "hangul_clock.py", "max_stars_repo_name": "HyunsDev/Rutap-bot_Discord", "max_stars_repo_head_hexsha": "13f664864953e56a4bb887fd9cc29519a58b49db", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-05T11:27:51.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-05T11:27:51.000Z", "max_issues_repo_path": "hangul_clock.py", "max_issues_repo_name": "HyunsDev/Rutap-bot_Discord", "max_issues_repo_head_hexsha": "13f664864953e56a4bb887fd9cc29519a58b49db", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hangul_clock.py", "max_forks_repo_name": "HyunsDev/Rutap-bot_Discord", "max_forks_repo_head_hexsha": "13f664864953e56a4bb887fd9cc29519a58b49db", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.776119403, "max_line_length": 105, "alphanum_fraction": 0.5894829872, "include": true, "reason": "import numpy", "num_tokens": 682} |
from __future__ import division, absolute_import, print_function
import glob
import argparse
import os
import shutil
import pdb
import numpy as np
from tqdm import tqdm
CONTINUAL_LEARNING_LABELS = ['CC', 'SC', 'EC', 'SQC']
CL_LABEL_KEY = "continual_learning_label"
def main():
parser = argparse.ArgumentParser(description='Dataset Manipulator: useful to merge two datasets by concatenating '
+ 'episodes. PS: Deleting sources after merging into the destination '
+ 'folder.')
parser.add_argument('--continual-learning-labels', type=str, nargs=2, metavar=('label_1', 'label_2'),
default=argparse.SUPPRESS, help='Labels for the continual learning RL distillation task.')
parser.add_argument('-f', '--force', action='store_true', default=False,
help='Force the merge, even if it overrides something else,'
' including the destination if it exist')
parser.add_argument('--timesteps', type=int, nargs=2, default=[-1,-1],
help="To have a certain number of frames for two data sets ")
group = parser.add_mutually_exclusive_group()
group.add_argument('--merge', type=str, nargs=3, metavar=('source_1', 'source_2', 'destination'),
default=argparse.SUPPRESS,
help='Merge two datasets by appending the episodes, deleting sources right after.')
args = parser.parse_args()
if 'merge' in args:
# let make sure everything is in order
assert os.path.exists(args.merge[0]), "Error: dataset '{}' could not be found".format(args.merge[0])
# If the merge file exists already, delete it for the convenince of updating student's policy
if os.path.exists(args.merge[2]) or os.path.exists(args.merge[2] + '/'):
assert args.force, "Error: destination directory '{}' already exists".format(args.merge[2])
shutil.rmtree(args.merge[2])
if 'continual_learning_labels' in args:
assert args.continual_learning_labels[0] in CONTINUAL_LEARNING_LABELS \
and args.continual_learning_labels[1] in CONTINUAL_LEARNING_LABELS, \
"Please specify a valid Continual learning label to each dataset to be used for RL distillation !"
# create the output
os.mkdir(args.merge[2])
#os.rename(args.merge[0] + "/dataset_config.json", args.merge[2] + "/dataset_config.json")
#os.rename(args.merge[0] + "/env_globals.json", args.merge[2] + "/env_globals.json")
shutil.copy2(args.merge[0] + "/dataset_config.json",args.merge[2] + "/dataset_config.json")
shutil.copy2(args.merge[0] + "/env_globals.json", args.merge[2] + "/env_globals.json")
# copy files from first source
num_timesteps_1, num_timesteps_2 = args.timesteps
local_path = os.getcwd()
all_records = sorted(glob.glob(args.merge[0] + "/record_[0-9]*/*"))
previous_records = all_records[0]
for ts_counter_1, record in enumerate(all_records):
#if the timesteps is larger than needed, we wait until this episode is over
if(num_timesteps_1>0 and ts_counter_1 >num_timesteps_1):
if(os.path.dirname(previous_records).split('_')[-1] != os.path.dirname(record).split('_')[-1]):
break
s = args.merge[2] + "/" + record.split("/")[-2] + '/' + record.split("/")[-1]
s = os.path.join(local_path,s)
record = os.path.join(local_path, record)
try:
shutil.copy2(record, s)
except FileNotFoundError:
os.mkdir(os.path.dirname(s))
shutil.copy2(record, s)
previous_records = record
num_episode_dataset_1 = int(previous_records.split("/")[-2][7:])
if (num_timesteps_1 == -1):
num_episode_dataset_1 += 1
ts_counter_1 += 1
# copy files from second source
all_records = sorted(glob.glob(args.merge[1] + "/record_[0-9]*/*"))
previous_records = all_records[0]
for ts_counter_2, record in enumerate(all_records):
if (num_timesteps_2 > 0 and ts_counter_2 > num_timesteps_2):
if (os.path.dirname(previous_records).split('_')[-1] != os.path.dirname(record).split('_')[-1]):
break
episode = str(num_episode_dataset_1 + int(record.split("/")[-2][7:]))
new_episode = record.split("/")[-2][:-len(episode)] + episode
s = args.merge[2] + "/" + new_episode + '/' + record.split("/")[-1]
s = os.path.join(local_path, s)
record = os.path.join(local_path, record)
try:
shutil.copy2(record, s)
except FileNotFoundError:
os.mkdir(os.path.dirname(s))
shutil.copy2(record, s)
previous_records = record
num_episode_dataset_2 = int(previous_records.split("/")[-2][7:])
if(num_timesteps_2==-1):
num_episode_dataset_2 +=1
ts_counter_2 +=1
ts_counter = [ts_counter_1, ts_counter_2]
# load and correct ground_truth
ground_truth = {}
ground_truth_load = np.load(args.merge[0] + "/ground_truth.npz")
ground_truth_load_2 = np.load(args.merge[1] + "/ground_truth.npz")
ground_truth["images_path"] = []
num_episode_dataset = num_episode_dataset_1
index_slash = args.merge[2].find("/")
index_margin_str = len("/record_")
directory_str = args.merge[2][index_slash+1:]
len_info_1 = [len(ground_truth_load[k]) for k in ground_truth_load.keys()]
num_eps_total_1, num_ts_total_1 = min(len_info_1), max(len_info_1)
len_info_2 = [len(ground_truth_load_2[k]) for k in ground_truth_load_2.keys()]
num_eps_total_2, num_ts_total_2 = min(len_info_2), max(len_info_2)
for idx_, gt_load in enumerate([ground_truth_load, ground_truth_load_2], 1):
for arr in gt_load.files:
if arr == "images_path":
# here, we want to rename just the folder containing the records, hence the black magic
for i in tqdm(range(ts_counter[idx_-1]),#range(len(gt_load["images_path"])),
desc="Update of paths (Folder " + str(1+idx_) + ")"):
# find the "record_" position
path = gt_load["images_path"][i]
end_pos = path.find("/record_")
inter_pos = path.find("/frame") # pos in the complete path.
if idx_ > 1:
episode = str(num_episode_dataset_1 + int(path[end_pos + index_margin_str: inter_pos]))
episode = episode.zfill(3)
new_record_path = "/record_" + episode + path[inter_pos:]
else:
new_record_path = path[end_pos:]
ground_truth["images_path"].append(directory_str + new_record_path)
else:
# anything that isnt image_path, we dont need to change
gt_arr = gt_load[arr]
if idx_ > 1:
num_episode_dataset = num_episode_dataset_2
# HERE check before overwritting that the target is random !+
if gt_load[arr].shape[0] < num_episode_dataset:
gt_arr = np.repeat(gt_load[arr], num_episode_dataset, axis=0)
if idx_ > 1:
# This is the first dataset
if (len(gt_arr) == num_eps_total_2):
# This is a episode non-change variable
ground_truth[arr] = np.concatenate((ground_truth[arr],
gt_arr[:num_episode_dataset_2]), axis=0)
elif (len(gt_arr) == num_ts_total_2): # a timesteps changing variable
ground_truth[arr] = np.concatenate((ground_truth[arr],
gt_arr[:ts_counter_2]), axis=0)
else:
assert 0 == 1, "No compatible variable in the stored ground truth for the second dataset {}" \
.format(args.merge[1])
else:
# This is the first dataset
if(len(gt_arr) == num_eps_total_1):
#This is a episode non-change variable
ground_truth[arr] = gt_arr[:num_episode_dataset_1]
elif(len(gt_arr) == num_ts_total_1): # a timesteps changing variable
ground_truth[arr] = gt_arr[:ts_counter_1]
else:
assert 0 ==1 , "No compatible variable in the stored ground truth for the first dataset {}"\
.format(args.merge[0])
# save the corrected ground_truth
np.savez(args.merge[2] + "/ground_truth.npz", **ground_truth)
# load and correct the preprocessed data (actions, rewards etc)
preprocessed = {}
preprocessed_load = np.load(args.merge[0] + "/preprocessed_data.npz")
preprocessed_load_2 = np.load(args.merge[1] + "/preprocessed_data.npz")
dataset_1_size = preprocessed_load["actions"].shape[0]
dataset_2_size = preprocessed_load_2["actions"].shape[0]
for idx, prepro_load in enumerate([preprocessed_load, preprocessed_load_2]):
for arr in prepro_load.files:
pr_arr = prepro_load[arr]
to_class = None
if arr == "episode_starts":
to_class = bool
elif arr == "actions_proba" or arr =="rewards":
to_class = float
else:
to_class = int
# all data is of timesteps changing (instead of episode changing)
if preprocessed.get(arr, None) is None: #for the first dataset
preprocessed[arr] = pr_arr.astype(to_class)[:ts_counter_1]
else:# for the second dataset
preprocessed[arr] = np.concatenate((preprocessed[arr].astype(to_class),
pr_arr[:ts_counter_2].astype(to_class)), axis=0)
if 'continual_learning_labels' in args:
if preprocessed.get(CL_LABEL_KEY, None) is None:
preprocessed[CL_LABEL_KEY] = \
np.array([args.continual_learning_labels[idx] for _ in range(ts_counter_1)])
else:
preprocessed[CL_LABEL_KEY] = \
np.concatenate((preprocessed[CL_LABEL_KEY], np.array([args.continual_learning_labels[idx]
for _ in range(ts_counter_2)])), axis=0)
print("The total timesteps: ", ts_counter_1+ts_counter_2)
print("The total episodes: ", num_episode_dataset_1+num_episode_dataset_2)
for k in preprocessed:
print(k)
print(preprocessed[k].shape)
for k in ground_truth:
print(k)
print(ground_truth[k].shape)
np.savez(args.merge[2] + "/preprocessed_data.npz", ** preprocessed)
# remove the old folders
# shutil.rmtree(args.merge[0])
# shutil.rmtree(args.merge[1])
if __name__ == '__main__':
main() | {"hexsha": "21b874d5a5239f77ef89feee4906bb387aeb0323", "size": 11782, "ext": "py", "lang": "Python", "max_stars_repo_path": "environments/dataset_merger.py", "max_stars_repo_name": "sun-te/robotics-rl-srl", "max_stars_repo_head_hexsha": "d321085b81eef63dcac58028af87eec6de7633b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "environments/dataset_merger.py", "max_issues_repo_name": "sun-te/robotics-rl-srl", "max_issues_repo_head_hexsha": "d321085b81eef63dcac58028af87eec6de7633b4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "environments/dataset_merger.py", "max_forks_repo_name": "sun-te/robotics-rl-srl", "max_forks_repo_head_hexsha": "d321085b81eef63dcac58028af87eec6de7633b4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-11-26T11:41:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-30T16:00:27.000Z", "avg_line_length": 49.9237288136, "max_line_length": 122, "alphanum_fraction": 0.5606009167, "include": true, "reason": "import numpy", "num_tokens": 2521} |
from pathlib import Path
import numpy as np
from tensorflow import keras
from tensorflow.keras.preprocessing.image import load_img
class MaskSequence(keras.utils.Sequence):
def __init__(self, base_path, split, batch_size, img_size):
self.batch_size = batch_size
self.img_size = img_size
self.input_img_paths, self.target_img_paths = self._load_paths(Path(base_path), split + ".txt")
def __len__(self):
return len(self.target_img_paths) // self.batch_size
def __getitem__(self, idx):
"""Returns tuple (input, target) correspond to batch #idx."""
i = idx * self.batch_size
batch_input_img_paths = self.input_img_paths[i: i + self.batch_size]
batch_target_img_paths = self.target_img_paths[i: i + self.batch_size]
x = np.zeros((self.batch_size,) + self.img_size + (3,), dtype="float32")
for j, path in enumerate(batch_input_img_paths):
img = load_img(path, target_size=self.img_size)
x[j] = img
y = np.zeros((self.batch_size,) + self.img_size + (1,), dtype="uint8")
for j, path in enumerate(batch_target_img_paths):
img = load_img(path, target_size=self.img_size, color_mode="grayscale")
y[j] = np.expand_dims(img, 2)
# Ground truth labels are 1, 2, 3. Subtract one to make them 0, 1, 2:
# y[j] -= 1
return x, y
@staticmethod
def _load_paths(directory, file):
with open(directory / file, ) as f:
rows = f.readlines()
rows = map(lambda x: x.strip(), rows)
rows = map(lambda x: x.split(" "), rows)
rows = list(rows)
inputs, outputs = zip(*rows)
absolute_path = lambda x: str((directory / x).resolve())
inputs, outputs = map(absolute_path, inputs), map(absolute_path, outputs)
return list(inputs), list(outputs)
| {"hexsha": "7b2e0c5eede2942dbe7d1c75add953385cc3089d", "size": 1916, "ext": "py", "lang": "Python", "max_stars_repo_path": "fashiondatasets/MaskSequence.py", "max_stars_repo_name": "NiklasHoltmeyer/FashionDatasets", "max_stars_repo_head_hexsha": "a9309f90abd6bff739ecffafd69cf52506f2cb97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fashiondatasets/MaskSequence.py", "max_issues_repo_name": "NiklasHoltmeyer/FashionDatasets", "max_issues_repo_head_hexsha": "a9309f90abd6bff739ecffafd69cf52506f2cb97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fashiondatasets/MaskSequence.py", "max_forks_repo_name": "NiklasHoltmeyer/FashionDatasets", "max_forks_repo_head_hexsha": "a9309f90abd6bff739ecffafd69cf52506f2cb97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1020408163, "max_line_length": 103, "alphanum_fraction": 0.6252609603, "include": true, "reason": "import numpy", "num_tokens": 457} |
%% Copyright (C) 2014, 2016-2017, 2019, 2022 Colin B. Macdonald
%% Copyright (C) 2020 Mike Miller
%% Copyright (C) 2020 Fernando Alvarruiz
%%
%% This file is part of OctSymPy.
%%
%% OctSymPy is free software; you can redistribute it and/or modify
%% it under the terms of the GNU General Public License as published
%% by the Free Software Foundation; either version 3 of the License,
%% or (at your option) any later version.
%%
%% This software is distributed in the hope that it will be useful,
%% but WITHOUT ANY WARRANTY; without even the implied warranty
%% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
%% the GNU General Public License for more details.
%%
%% You should have received a copy of the GNU General Public
%% License along with this software; see the file COPYING.
%% If not, see <http://www.gnu.org/licenses/>.
%% -*- texinfo -*-
%% @defun mat_rclist_asgn (@var{A}, @var{r}, @var{c}, @var{B})
%% Private helper routine for sym array assigment using lists.
%%
%% @code{(R(i),C(i))} specify entries of the matrix @var{A}.
%% We execute @code{A(R(i),C(i)) = B(i)}.
%%
%% Notes:
%% @itemize
%% @item @var{B} is accessed with linear indexing.
%% @item @var{B} might be a scalar, used many times.
%% @item @var{A} might need to get bigger, if so it will be padded
%% with zeros.
%% @end itemize
%%
%% @end defun
function z = mat_rclist_asgn(A, r, c, B)
if (isempty (r) && isempty (c) && (isempty (B) || isscalar (B)))
z = A;
return
end
if ~( isvector(r) && isvector(c) && (length(r) == length(c)) )
error('this routine is for a list of rows and cols');
end
if ((numel(B) == 1) && (numel(r) > 1))
B = repmat(B, size(r));
end
if (length(r) ~= numel(B))
error('not enough/too much in B')
end
% Easy trick to copy A into larger matrix AA:
% AA = sp.Matrix.zeros(n, m)
% AA[0, 0] = A
% Also usefil: .copyin_matrix
cmd = { '(A, r, c, B) = _ins'
'# B linear access fix, transpose for sympy row-based'
'if B is None or not B.is_Matrix:'
' B = sp.Matrix([[B]])'
'BT = B.T'
'# make a resized copy of A, and copy existing stuff in'
'if isinstance(A, list):'
' assert len(A) == 0, "unexpectedly non-empty list: report bug!"'
' n = max(max(r) + 1, 1)'
' m = max(max(c) + 1, 1)'
' AA = [[0]*m for i in range(n)]'
'elif A is None or not isinstance(A, MatrixBase):'
' # we have non-matrix, put in top-left'
' n = max(max(r) + 1, 1)'
' m = max(max(c) + 1, 1)'
' AA = [[0]*m for i in range(n)]'
' AA[0][0] = A'
'else:'
' # build bigger matrix'
' n = max(max(r) + 1, A.rows)'
' m = max(max(c) + 1, A.cols)'
' AA = [[0]*m for i in range(n)]'
' # copy current matrix in'
' for i in range(A.rows):'
' for j in range(A.cols):'
' AA[i][j] = A[i, j]'
'# now insert the new bits from B'
'for i, (r, c) in enumerate(zip(r, c)):'
' AA[r][c] = BT[i]'
'return sp.Matrix(AA),' };
rr = num2cell(int32(r-1));
cc = num2cell(int32(c-1));
z = pycall_sympy__ (cmd, A, rr, cc, B);
% a simpler earlier version, but only for scalar r,c
%cmd = { '(A, r, c, b) = _ins'
% 'if not A.is_Matrix:'
% ' A = sp.Matrix([[A]])'
% 'AA = sp.Matrix.zeros(max(r+1, A.rows), max(c+1, A.cols))'
% 'AA[0, 0] = A'
% 'AA[r, c] = b'
% 'return AA,' };
end
| {"author": "cbm755", "repo": "octsympy", "sha": "c1ecd1e08f027d5101d0f4250dfc496aa98c8bcd", "save_path": "github-repos/MATLAB/cbm755-octsympy", "path": "github-repos/MATLAB/cbm755-octsympy/octsympy-c1ecd1e08f027d5101d0f4250dfc496aa98c8bcd/inst/@sym/private/mat_rclist_asgn.m"} |
import numpy as np
import os
import textwrap
import tkinter as tk
import tkinter.ttk as tk_ttk
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
TREEVIEW_SELECT_EVENT = '<<treeview_select>>'
class FullDisplay(tk.Frame):
def __init__(self, master):
super().__init__(master)
self.grid(row=0, column=0, sticky='nsew')
self.tree = DirectoryViewer(self)
self.canvas = GraphPlotter(self)
self.bind(TREEVIEW_SELECT_EVENT, self.treeview_new_selection)
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=3)
self.columnconfigure(1, weight=10)
def treeview_new_selection(self, event):
self.canvas.draw_plot(self.tree.get_selected_file())
class DirectoryViewer(tk.Frame):
def __init__(self, master=None, path='.'):
super().__init__(master)
self.master = master
self.grid(row=0, column=0, sticky='nswe')
self.setup_tree(path)
def tell_master_select(self, event):
self.master.event_generate(TREEVIEW_SELECT_EVENT)
def get_selected_file(self):
return self.build_path(self.tree.focus())
def build_path(self, curr_id):
curr_item = self.tree.item(curr_id)
parent_id = self.tree.parent(curr_id)
curr_item_path = curr_item['text']
while parent_id != '':
parent = self.tree.item(parent_id)
curr_item_path = os.path.join(parent['text'], curr_item_path)
curr_id = parent_id
curr_item = self.tree.item(curr_id)
parent_id = self.tree.parent(curr_id)
return curr_item_path
def setup_tree(self, path):
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.tree = tk_ttk.Treeview(self)
self.tree.bind('<<TreeviewSelect>>', self.tell_master_select)
self.tree.grid(row=0, column=0, sticky='nswe')
ysb = tk_ttk.Scrollbar(self,
orient='vertical',
command=self.tree.yview)
ysb.grid(row=0, column=1, sticky='ns')
xsb = tk_ttk.Scrollbar(self,
orient='horizontal',
command=self.tree.xview)
xsb.grid(row=1, column=0, sticky='ew')
self.tree.configure(yscroll=ysb.set, xscroll=xsb.set)
path = os.path.abspath(path)
self.path = path
self.tree.heading('#0', text=path, anchor='w')
root_node = self.tree.insert('', 'end', text=path, open=True)
self.opened = set([root_node])
for p in os.listdir(path):
self.insert_node(root_node, p, os.path.join(path, p))
self.tree.bind('<<TreeviewOpen>>', self.open_node)
# insert_node() and open_node() are for lazy loading
def insert_node(self, parent, text, path):
node = self.tree.insert(parent, 'end', text=text, open=False)
if os.path.isdir(path):
self.tree.insert(node, 'end') # dummy to show the dir icon
def open_node(self, event):
curr_node = self.tree.focus()
abspath = self.build_path(curr_node)
if os.path.isdir(abspath) and curr_node not in self.opened:
self.tree.delete(self.tree.get_children(curr_node))
for p in os.listdir(abspath):
self.insert_node(curr_node, p, os.path.join(abspath, p))
self.opened.add(curr_node)
# process_directory() does eager loading
def process_directory(self, parent, path):
for p in os.listdir(path):
abspath = os.path.join(path, p)
isdir = os.path.isdir(abspath)
oid = self.tree.insert(parent, 'end', text=p, open=False)
if isdir:
self.process_directory(oid, abspath)
class GraphPlotter(tk.Frame):
def __init__(self, master):
super().__init__(master)
self.grid(row=0, column=1, sticky='nsew')
self.load_plotters()
self.setup_canvas()
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
def setup_canvas(self):
self.figure = matplotlib.figure.Figure(figsize=(5, 5), dpi=100)
self.canvas = FigureCanvasTkAgg(self.figure, self)
self.draw_plot(None)
self.canvas.get_tk_widget().grid(column=0, row=0, sticky='nsew')
def load_plotters(self):
import data_browser.plotting_modules
self.plotters = {module.FILE_EXTENSION: module.DEFAULT_PLOTTER
for module
in data_browser.plotting_modules.__all__}
def draw_plot(self, file):
self.figure.clf()
if file is None or os.path.isdir(file):
plot_dir(file, self.figure)
elif os.path.splitext(file)[1] in self.plotters:
try:
self.plotters[os.path.splitext(file)[1]](file, self.figure)
except Exception as e:
plot_error(e, self.figure)
else:
plot_error(ValueError('cannot plot {}'.format(file)), self.figure)
self.canvas.draw_idle()
def plot_error(error, fig):
msg = 'An error occurred:\n'
msg += type(error).__name__ + '\n'
msg += '\n'.join(textwrap.wrap(str(error), 60))
ax = fig.add_subplot(111)
ax.text(0, 0, msg)
ax.set_axis_off()
def plot_dir(file, fig):
ax = fig.add_subplot(111)
ax.set_axis_off()
def _main():
root = tk.Tk()
root.geometry('800x500')
root.title('Data Browser')
app = FullDisplay(master=root)
root.rowconfigure(0, weight=1)
root.columnconfigure(0, weight=1)
app.mainloop()
if __name__ == '__main__':
_main()
| {"hexsha": "4474ea2f2b460f2931327cd2b8210c28ed377ca0", "size": 5683, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_browser/data_browser.py", "max_stars_repo_name": "gfetterman/file_browser", "max_stars_repo_head_hexsha": "8f54fb0f3a4a1fcce93b98ae44431accd943ac00", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data_browser/data_browser.py", "max_issues_repo_name": "gfetterman/file_browser", "max_issues_repo_head_hexsha": "8f54fb0f3a4a1fcce93b98ae44431accd943ac00", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_browser/data_browser.py", "max_forks_repo_name": "gfetterman/file_browser", "max_forks_repo_head_hexsha": "8f54fb0f3a4a1fcce93b98ae44431accd943ac00", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8650306748, "max_line_length": 78, "alphanum_fraction": 0.6165757522, "include": true, "reason": "import numpy", "num_tokens": 1339} |
[STATEMENT]
lemma rt_graph_not_dip [dest]:
"\<And>ip ip' \<sigma> dip. (ip, ip') \<in> rt_graph \<sigma> dip \<Longrightarrow> ip \<noteq> dip"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>ip ip' \<sigma> dip. (ip, ip') \<in> rt_graph \<sigma> dip \<Longrightarrow> ip \<noteq> dip
[PROOF STEP]
unfolding rt_graph_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>ip ip' \<sigma> dip. (ip, ip') \<in> {uu_. \<exists>ip ip' dsn dsk hops. uu_ = (ip, ip') \<and> ip \<noteq> dip \<and> rt (\<sigma> ip) dip = Some (dsn, dsk, val, hops, ip')} \<Longrightarrow> ip \<noteq> dip
[PROOF STEP]
by auto | {"llama_tokens": 261, "file": "AODV_variants_c_gtobcast_C_Loop_Freedom", "length": 2} |
module LibRealSense
# Load in `deps.jl`, complaining if it does not exist
const depsjl_path = joinpath(@__DIR__, "..", "deps", "deps.jl")
if !isfile(depsjl_path)
error("LibRealSense was not build properly. Please run Pkg.build(\"LibRealSense\").")
end
include(depsjl_path)
# Module initialization function
function __init__()
check_deps()
end
include("CEnum.jl")
using .CEnum
include("ctypes.jl")
export Ctm, Ctime_t, Cclock_t
include(joinpath(@__DIR__, "..", "gen", "rs2_common.jl"))
include(joinpath(@__DIR__, "..", "gen", "rs2_api.jl"))
foreach(names(@__MODULE__, all=true)) do s
if startswith(string(s), "rs2_") || startswith(string(s), "RS2_")
@eval export $s
end
end
const RS2_API_VERSION = RS2_API_MAJOR_VERSION * 10000 + RS2_API_MINOR_VERSION * 100 + RS2_API_PATCH_VERSION
const RS2_API_VERSION_STR = "$(RS2_API_MAJOR_VERSION).$(RS2_API_MINOR_VERSION).$(RS2_API_PATCH_VERSION)"
export RS2_API_VERSION, RS2_API_VERSION_STR
end # module
| {"hexsha": "575c309884d625c1fd6887594b6d814be89b08da", "size": 977, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/LibRealSense.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/RealSense.jl-1d20419d-a1bd-598e-846b-24709a6a9336", "max_stars_repo_head_hexsha": "3cdc32505064468416fc891dfd877daf265d483a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/LibRealSense.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/RealSense.jl-1d20419d-a1bd-598e-846b-24709a6a9336", "max_issues_repo_head_hexsha": "3cdc32505064468416fc891dfd877daf265d483a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/LibRealSense.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/RealSense.jl-1d20419d-a1bd-598e-846b-24709a6a9336", "max_forks_repo_head_hexsha": "3cdc32505064468416fc891dfd877daf265d483a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4054054054, "max_line_length": 107, "alphanum_fraction": 0.7308085977, "num_tokens": 287} |
import time
from random import *
import numpy as np
import matplotlib.pyplot as plt
def question_1():
# 初始化生成器
seed()
# 返回给定范围内的随机数
print(randrange(-10, 8))
# 返回给定范围内的随机数
print(randint(0, 20))
# 返回给定序列的随机元素
print(choice([1, 2, 5, 3, 5, 7]))
# 返回序列的给定样本
print(sample([1, 2, 3, 5, -4, 'ss'], 3))
# 返回 0~1 的浮点数
print(random())
# 返回两个给定参数之间的随机浮点数
print(uniform(1, 2))
# 返回两个给定参数之间的随机浮点数,指定 mode
print(triangular(0.2, 0.9, mode=0.4))
x = [1, 2, 3, 4]
# 打乱序列
shuffle(x)
print(x)
def question_2():
for s in "PYTHON":
if s == "T":
continue
print(s, end="")
print()
for s in "PYTHON":
if s == "T":
break
print(s, end="")
print()
for s in "BIT":
for i in range(10):
print(s, end="")
if s == "I":
break
def question_3():
# 作用是格式化时间戳为本地的时间
print(time.localtime())
# 作用是格式化时间戳为本地的时间
print(time.asctime())
time.sleep(2 + 3)
# 作用是格式化时间戳为本地的时间
print(time.ctime())
# 1970纪元后经过的浮点秒数
print(time.time())
print(time.process_time() / time.process_time_ns())
def question_4():
# 单分支
s = eval(input("请输入一个整数:"))
if s % 2 == 0:
print("这是个偶数")
print("输入的数字是:", s)
# 二分支
if True:
print("语句1")
else:
print("语句2")
# 紧凑形式:用于表达简单地二分支结构
guess = eval(input())
print("猜{}了".format("对" if guess == 99 else "错"))
# 多分支
if True:
print("1")
elif True:
print("2")
else:
print("3")
def question_6():
try:
raise IOError
except IOError:
print("IOError")
try:
raise SystemExit
except SystemExit:
print("SystemExit")
try:
raise OverflowError
except OverflowError:
print("OverflowError")
try:
raise EOFError
except EOFError:
print("EOFError")
def f(x0) -> int:
return x0 ** 2
def question_7():
"""
蒙特卡罗方法求函数 y=x^2 在[0,1]内的定积分(值)
"""
# 投点次数
n = 10000
# 矩形区域边界
x_min, x_max = 0.0, 1.0
y_min, y_max = 0.0, 1.0
# 在矩形区域内随机投点
x = np.random.uniform(x_min, x_max, n) # 均匀分布
y = np.random.uniform(y_min, y_max, n)
# 统计 落在函数 y=x^2图像下方的点的数目
res = sum(np.where(y < f(x), 1, 0))
# 计算 定积分的近似值(Monte Carlo方法的精髓:用统计值去近似真实值)
integral = res / n
print('integral: ', integral)
# 画个图看看
fig = plt.figure()
axes = fig.add_subplot(111)
axes.plot(x, y, 'ro', markersize=1)
plt.axis('equal') # 防止图像变形
axes.plot(np.linspace(x_min, x_max, 10), f(np.linspace(x_min, x_max, 10)), 'b-') # 函数图像
plt.show()
| {"hexsha": "d964f09281ac1a16b182274671829ab480293f5a", "size": 2703, "ext": "py", "lang": "Python", "max_stars_repo_path": "basic_exercises/experiment_3.py", "max_stars_repo_name": "vuhe/LearnPython", "max_stars_repo_head_hexsha": "0a081a85456557ae542925cce950b23313c3c9b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "basic_exercises/experiment_3.py", "max_issues_repo_name": "vuhe/LearnPython", "max_issues_repo_head_hexsha": "0a081a85456557ae542925cce950b23313c3c9b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "basic_exercises/experiment_3.py", "max_forks_repo_name": "vuhe/LearnPython", "max_forks_repo_head_hexsha": "0a081a85456557ae542925cce950b23313c3c9b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.2635135135, "max_line_length": 92, "alphanum_fraction": 0.5290418054, "include": true, "reason": "import numpy", "num_tokens": 1094} |
/*
* VisualServoing is a tutorial program for introducing students to
* robotics.
*
* Copyright 2009, 2010 Kevin Quigley <[email protected]> and
* Marsette Vona <[email protected]>
*
* VisualServoing is free software: you can redistribute it andor modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* VisualServoing is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* as the file COPYING along with VisualServoing. If not, see
* <http://www.gnu.org/licenses/>.
*/
// system headers
#include <cstdio>
#include <fstream>
#include <iostream>
#include <signal.h>
#include <unistd.h>
// CTY arm project
#include "ArmControl.hpp"
#include "ArmGui.hpp"
#include "ArmGuiGTK.hpp"
#include "IK.hpp"
#include "ImageProcessing.hpp"
#include "Params.hpp"
// OpenCV
#include <cv.h>
#include <highgui.h>
// Boost
#include <boost/numeric/ublas/vector.hpp>
#include <boost/numeric/ublas/io.hpp>
using namespace boost::numeric;
#if RUN_THREADED
#include <errno.h>
#include <pthread.h>
#endif
// constants
#define FOCAL_LENGTH 481.0 // calc p65 //TBD - add calculation info
#define DIAMETER .038 //!< Diameter of ball in meters (measurexd)
#define MIN_TRACKING_RADIUS_PIXELS 2.0 //!< Minimum tracking radius required
void mark_images(const ublas::vector<double>& target, const CvSeq* circles,
const Params& params, Images& images);
void calibrate_offsets(std::string& file, ublas::vector<double>& offsets);
void update_gui_position (ArmControl& ctl, Params& params);
void handler(int sig);
ArmControl* sig_ctl = 0; //!< Pointer to ArmControl for stopping arm
//! movement upon received signal.
/*!
* \Brief Starting function containing main control loop.
* Start up and configure all objects, spin off the GUI, and continue
* in main loop until told to stop.
*/
int main(int argc, char** argv) {
// set signal handling
struct sigaction action;
action.sa_handler = &handler;
if (sigaction(SIGHUP, &action, NULL) < 0)
printf("Error setting action for SIGHUP\n");
if (sigaction(SIGINT, &action, NULL) < 0)
printf("Error setting action for SIGINT\n");
if (sigaction(SIGQUIT, &action, NULL) < 0)
printf("Error setting action for SIGQUIT\n");
if (sigaction(SIGILL, &action, NULL) < 0)
printf("Error setting action for SIGILL\n");
if (sigaction(SIGABRT, &action, NULL) < 0)
printf("Error setting action for SIGABRT\n");
if (sigaction(SIGFPE, &action, NULL) < 0)
printf("Error setting action for SIGFPE\n");
if (sigaction(SIGSEGV, &action, NULL) < 0)
printf("Error setting action for SIGSEGV\n");
if (sigaction(SIGTERM, &action, NULL) < 0)
printf("Error setting action for SIGTERM\n");
Images images;
images.set = false;
images.bgr = 0;
images.filtered_bgr = 0;
images.filtered_hls = 0;
Params params;
init_params(params);
CvSeq* circles = 0;
unsigned int cameraID(0);
std::string port("/dev/ttyS0");
std::string config_file;
std::string flags = "hp:f:";
int opt;
bool help = false;
while ((opt = getopt(argc, argv, flags.c_str())) > 0) {
switch (opt) {
case 'h': help = true; break;
case 'p': port = optarg; break;
case 'f': config_file = optarg; break;
default: break;
}
}
if (help) {
printf("Visual Servo Arm Options:\n"
" -h Print this help menu\n"
" -f <file> Use a calibration file to set joint offsets\n"
" -p <port> Use an alternate serial port (default: /dev/ttyS0\n");
exit(0);
}
CvCapture* capture(0);
IplImage* frame(0);
ImageProcessing ip;
ublas::vector<double> features(3);
ublas::vector<double> delta_angles(3);
ublas::vector<double> target_pos(3);
ublas::vector<double> grab_target(3);
target_pos(0) = 0.0; //x
target_pos(1) = 0.0; //y
target_pos(2) = 0.2; //Z
grab_target(0) = 0.0; //x
grab_target(1) = 0.0; //y
grab_target(2) = 0.05; //Z
// div by focal_length to normalize target x,y
ublas::vector<double> target_pos_norm(target_pos);
target_pos_norm(0) /= FOCAL_LENGTH;
target_pos_norm(1) /= FOCAL_LENGTH;
IK ik;
ik.setTarget(target_pos_norm);
ik.setLengths(0.0, .152, 0.122, 0.075);
ik.setV(.015, -.150, .25); //m, m, rad
ArmGuiGTK* gui = ArmGuiGTK::instance();
gui->update(images, params);
#if RUN_THREADED
pthread_t guiTID;
switch (pthread_create(&guiTID, 0, ArmGui::threadRun, gui)) {
case EAGAIN: printf("Max threads reached\n"); return -1;
case EINVAL: printf("Invalid thread attributes\n"); return -1;
case EPERM: printf("Invalid permissions\n"); return -1;
default: break;
}
#endif
SSC32Controller ssc(port);
ArmControl ctl(ssc);
sig_ctl = &ctl;
ctl.setRateLimit(500);
ublas::vector<double> off(ublas::zero_vector<double>(NUM_JOINTS));
calibrate_offsets(config_file, off);
ctl.setOffset(off);
ublas::vector<double> angle_limits(ublas::vector<double>(NUM_JOINTS));
// max limits
angle_limits(0) = 3.0/8.0 * M_PI;
angle_limits(1) = M_PI_2;
angle_limits(2) = M_PI - .70; // off arm brace
angle_limits(3) = M_PI_2;
std::cout << "max limits: " << angle_limits << std::endl;
ctl.setMaxAngle(angle_limits);
ArmControl::radiansToDegrees(angle_limits);
for (int i = 0; i < NUM_JOINTS; i++)
params.ctl.max_limits[i] = angle_limits(i);
params.limits_changed = true;
// min limits
angle_limits(0) = -3.0/8.0 * M_PI;
angle_limits(1) = -M_PI_2 + 0.35; // off spring pedestal
// angle_limits(2) = 0;
angle_limits(2) = -50.0*2.0*M_PI/360.0;
angle_limits(3) = -M_PI_2;
ctl.setMinAngle(angle_limits);
std::cout << "min limits: " << angle_limits << std::endl;
ArmControl::radiansToDegrees(angle_limits);
for (int i = 0; i < NUM_JOINTS; i++)
params.ctl.min_limits[i] = angle_limits(i);
params.limits_changed = true;
ctl.park();
update_gui_position(ctl, params);
params.current_mode = PARK;
while (params.run) { //mainloop
gui->update(images, params);
#if !RUN_THREADED
gui->run();
#endif
if (!params.run) continue; //to next mainloop iteration
if (params.gui.estop) {
params.gui.estop = false;
printf("ESTOP received\n");
ctl.stop();
if (params.current_mode != ESTOP) {
params.current_mode = ESTOP;
}
}
// all activities respond to these new modes
switch (params.new_mode) {
case HOME:
params.new_mode = NONE;
printf("*** -> HOME\n");
ctl.home();
update_gui_position(ctl, params);
params.current_mode = READY;
break;
case PARK:
printf("park request\n");
params.new_mode = NONE;
printf("*** -> PARK\n");
ctl.park();
update_gui_position(ctl, params);
params.current_mode = PARK;
break;
default:
break;
}
// all activities respond to these current modes
switch (params.current_mode) {
case HOME:
printf("HOME->READY\n");
params.current_mode = READY;
break;
case PARK:
// getting out of PARK handled above
usleep(10000); // 10ms
case BUSY:
printf("BUSY -> READY\n");
if (!ctl.busy())
params.current_mode = READY;
break;
default:
break;
}
if (params.activity == KINEMATICS) {
usleep(10000); // 10ms
ctl.slaveWrist(false);
ublas::vector<double> new_position(NUM_JOINTS);
if (params.current_mode == READY) {
switch (params.new_mode) {
case MOVE:
params.new_mode = NONE;
printf("Moving\n");
for (int i = 0; i < NUM_JOINTS; i++ )
new_position(i) = params.gui.new_theta[i];
ArmControl::degreesToRadians(new_position);
ctl.moveToPosition(new_position);
update_gui_position(ctl, params);
break;
case DRAW:
params.new_mode = NONE;
printf("Drawing\n");
if (params.ctl.holding_marker) {
//ctl.drawX();
} else {
params.new_mode = ERROR;
params.error = "Must hold marker to draw.";
}
break;
// end movement modes
case GRAB:
params.new_mode = NONE;
printf("Grab marker\n");
if (!params.ctl.holding_marker) {
ctl.grabMarker();
//sleep(1);
params.ctl.holding_marker = true;
} else {
printf("error set\n");
params.error_set = true;
params.error = "Marker already held\n";
}
break;
case RELEASE:
params.new_mode = NONE;
printf("Release marker\n");
if (params.ctl.holding_marker) {
ctl.openGrip();
params.ctl.holding_marker = false;
} else {
params.error_set = true;
params.error = "Marker not being held\n";
}
break;
default:
break;
}
}
// update param struct
continue; //to next mainloop iteration
} //end of kinematics
//
// Setup code for Image Processing and Visual Servoing
//
if (capture == 0) {
capture = cvCreateCameraCapture(cameraID);
if (capture == 0) {
printf("failed to init capture device\n");
sleep(1); continue; //to next mainloop iteration
}
printf("initialized capture device\n");
printf("allocating images\n");
images.bgr = cvCreateImage(IMAGE_SIZE, IPL_DEPTH_8U, 3);
#if FLOAT_HLS
images.bgr32 = cvCreateImage(IMAGE_SIZE, IPL_DEPTH_32F, 3);
images.hls = cvCreateImage(IMAGE_SIZE, IPL_DEPTH_32F, 3);
#else
images.hls = cvCreateImage(IMAGE_SIZE, IPL_DEPTH_8U, 3);
#endif
images.filtered_bgr = cvCreateImage(IMAGE_SIZE, IPL_DEPTH_8U, 1);
images.filtered_hls = cvCreateImage(IMAGE_SIZE, IPL_DEPTH_8U, 1);
if (images.bgr == 0 || images.hls == 0
#if FLOAT_HLS
|| images.bgr32 == 0
#endif
|| images.filtered_bgr == 0 || images.filtered_hls == 0) {
params.current_mode = ERROR;
params.error = "Cannot create image holders";
std::cout << params.error << std::endl;
params.run = false;
continue; //to next mainloop iteration
}
//some images might be displayed before being initialized
cvSet(images.bgr, cvScalar(0,0,0));
#if FLOAT_HLS
cvSet(images.bgr32, cvScalar(0,0,0));
#endif
cvSet(images.hls, cvScalar(0,0,0));
cvSet(images.filtered_bgr, cvScalar(0));
cvSet(images.filtered_hls, cvScalar(0));
images.set = true;
} //capture was 0
//
// Image Processing
//
frame = cvQueryFrame(capture);
if (frame == 0) {
params.current_mode = ERROR;
params.error = "Null frame";
std::cout << params.error << std::endl;
params.run = false;
continue; //to next mainloop iteration
}
cvResize(frame, images.bgr);
ctl.slaveWrist(true);
ip.filterImages(images, params);
if (!params.gui.target_set) continue;
if (params.activity == VS ||
params.activity == IP) {
//find ball
circles = ip.findBall(images, params);
mark_images(target_pos, circles, params, images);
} //find ball
if (params.activity != VS) {
usleep(1000);
continue; //to next mainloop iteration
}
//
// Visual Servoing code
//
switch (params.new_mode) {
case GRAB: params.new_mode = NONE; ctl.grabBall(); break;
case RELEASE: params.new_mode = NONE; ctl.openGrip(); break;
default: break;
}
printf("current_mode = %d\n", params.current_mode);
switch (params.current_mode) {
case READY:
printf("old: READY\t");
switch (params.new_mode) {
case MOVE:
printf("new: MOVE\n");
params.new_mode = NONE;
params.current_mode = MOVE;
break;
case PAUSE:
printf("new: PAUSE\n");
params.new_mode = NONE;
params.current_mode = PAUSE;
continue; //to next mainloop iteration
default:
break;
}
break;
case PAUSE:
printf("old: PAUSE\t");
if (params.new_mode == MOVE) {
printf("new: MOVE\n");
params.new_mode = NONE;
params.current_mode = MOVE;
break;
}
break;
case MOVE:
printf("old: MOVE\t");
if (params.new_mode == PAUSE) {
printf("new: PAUSE\n");
params.new_mode = NONE;
//ctl.stop();
params.current_mode = PAUSE;
continue; //to next mainloop iteration
}
break;
default:
break;
}
if (circles != 0 && circles->total > 0 &&
params.gui.target_set &&
(params.current_mode == MOVE || params.current_mode == GRAB)) {
ublas::vector<double> features(3);
float* p = (float*) cvGetSeqElem(circles, 0);
printf("first circle at (%d,%d) radius %d\n",
cvRound(p[0]), cvRound(p[1]), cvRound(p[2]));
features(0) = p[0]; features(1) = p[1]; features(2) = p[2];
if (features(2) >= MIN_TRACKING_RADIUS_PIXELS) {
// rotate/translate to center origin, x left, y up
features(0) = (images.hls->width / 2.0) - features(0); // x
if (images.hls->origin == 0) // top left origin
features(1) = (images.hls->height / 2.0) - features(1); // y
// normalize x & y
features(0) /= FOCAL_LENGTH; features(1) /= FOCAL_LENGTH;
// circular approximation of Z
// Z = D*f / radius*2
features(2) = DIAMETER * FOCAL_LENGTH / (features(2) * 2.0);
printf("Norm features x,y = (%3f, %3f), Z = %3f\n",
features(0), features(1), features(2));
printf("Norm target x,y = (%3f, %3f), Z = %3f\n",
target_pos_norm(0), target_pos_norm(1), target_pos_norm(2));
std::cout << "current angles: " << ctl.getCurrentAngles() << std::endl;
bool dls = ik.damped_least_squares(features, ctl.getCurrentAngles(),
params, delta_angles);
if (dls && params.current_mode != PARK) {
std::cout << "commanded angle deltas: " << delta_angles << std::endl;
ctl.moveDelta(delta_angles);
}
} else {
std::cout <<
"radius below tracking enable threshold " <<
MIN_TRACKING_RADIUS_PIXELS;
}
} //tracking ball
} //mainloop
#if RUN_THREADED
switch (pthread_join(guiTID, 0)) {
case 0: break; // all ok
case EINVAL:
printf("pthread_join: Invalid thread id %d\n", (int) guiTID); break;
case ESRCH:
printf("pthread_join: Thread ID %d not found\n", (int) guiTID); break;
case EDEADLK:
printf("pthread_join: Deadlock detected\n"); break;
default:
break;
}
#endif
if (images.set) {
printf("releasing images\n");
cvReleaseImage(&(images.bgr));
cvReleaseImage(&(images.hls));
cvReleaseImage(&(images.filtered_hls));
cvReleaseImage(&(images.filtered_bgr));
#ifdef FLOAT_HLS
cvReleaseImage(&(images.bgr32));
#endif
}
if (gui != 0) {
printf("destroying gui\n");
gui->destroy();
gui = 0;
}
if (capture != 0) {
printf("releasing capture device\n");
cvReleaseCapture(&capture);
}
} //main()
/*!
* \brief Markup images with circles and lines.
* Used for giving feedback to the user on the location of the visual
* servo target and where the ball is detected in the image.
*
* \param[in] target Cartesian coordinates of the target in [pixel,
* pixel, meter] units.
* \param[in] circles Sequence of detected circles (u,v,r) in pixels
* \param[in] params Params struct
* \param[in,out] images Images struct
*/
void mark_images(const ublas::vector<double>& target, const CvSeq* circles,
const Params& params, Images& images) {
// draw target cross
if (params.gui.target_set && params.activity == VS) {
// fl * D / Z = apparent diameter, so div by 2 to get apparent radius
double radius = (FOCAL_LENGTH * DIAMETER / target(2)) / 2.0;
// rescale since target(x,y) was normalized using FOCAL_LENGTH
double ih = images.bgr->height/2.0;
double iw = images.bgr->width/2.0;
CvPoint v1 = cvPoint(cvRound(target(0) + iw ),
cvRound(target(1) + ih - radius)); // up
CvPoint v2 = cvPoint(cvRound(target(0) + iw ),
cvRound(target(1) + ih + radius)); // down
CvPoint h1 = cvPoint(cvRound(target(0) + iw - radius),
cvRound(target(1) + ih )); // left
CvPoint h2 = cvPoint(cvRound(target(0) + iw + radius),
cvRound(target(1) + ih )); // right
// Draw target cross for sighting.
cvLine(images.bgr, h1, h2, CV_RGB(0x00, 0x00, 0xff));
cvLine(images.bgr, v1, v2, CV_RGB(0x00, 0x00, 0xff));
}
int num_circles = /*params.activity == VS ? 1 :*/ circles->total;
// draw the ball
for (int i = 0; i < num_circles; i++ ) {
float* p = (float*) cvGetSeqElem(circles, i);
CvPoint pt = cvPoint(cvRound(p[0]),cvRound(p[1]));
cvCircle(images.bgr, pt, cvRound(p[2]), CV_RGB(0xff, 0x00, 0x00));
cvCircle(images.filtered_hls, pt, cvRound(p[2]), cvScalar(192)); //greyscale
//TBD mark filtered_bgr if using that to find the ball
}
}
/*!
* \brief Uses calibration file to set offsets.
* Reads servo numbers and calibration positions from the provided
* file. Offsets are calculated from calibration position differences
* to ideal positions.
*/
void
calibrate_offsets(std::string& file, ublas::vector<double>& offsets){
if (file.empty()) {
offsets(Arm::ELBOW) = 400;
} else {
std::fstream input(file.c_str());
int servo, val;
ublas::vector<double> calibration_position(NUM_JOINTS);
calibration_position(Arm::GRIP) = 1350;
calibration_position(Arm::WRIST) = 1500;
calibration_position(Arm::ELBOW) = 1500;
calibration_position(Arm::SHOULDER) = 1500;
calibration_position(Arm::BASE) = 1500;
std::cout << "cal: " << calibration_position << std::endl;
std::cout << "grip: " << Arm::GRIP << std::endl;
while (!input.eof()) {
input >> std::skipws >> servo >> val;
printf("servo: %d, val: %d, cal: %g\t",
servo, val, calibration_position(servo));
offsets[servo] = val - calibration_position(servo);
printf("offset: %g\n", offsets(servo));
}
std::cout << "off: " << offsets << std::endl;
}
}
/*!
* \brief Update params with current angles.
* Sets current angles of ctl in struct params to be picked up by GUI.
*/
void
update_gui_position (ArmControl& ctl, Params& params) {
ublas::vector<double> current_theta(ctl.getCurrentAngles());
ArmControl::radiansToDegrees(current_theta);
for (int i = 0; i < NUM_JOINTS; i++) {
params.ctl.current_theta[i] = current_theta(i);
}
params.position_changed = true;
}
/*!
* \brief Signal handler function for stopping threads.
*
* \param[in] sig The received signal.
*/
void handler(int sig) {
if (sig_ctl == 0) {
printf("No control object for emergency shutdown!\n");
} else {
sig_ctl->stop();
}
exit(sig);
}
| {"hexsha": "0dbc5c42afbc92ce92ea827adfc0d1d19f68e9d9", "size": 19550, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Source/rvctools 1/robot/interfaces/crustcrawler/VisualServoing.cpp", "max_stars_repo_name": "Maria-Paulacf/PlumaBot", "max_stars_repo_head_hexsha": "d4bf2e667b88e955f40e33d55db2a8f22c35b47b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 227.0, "max_stars_repo_stars_event_min_datetime": "2021-01-20T05:34:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T12:43:05.000Z", "max_issues_repo_path": "Source/rvctools 1/robot/interfaces/crustcrawler/VisualServoing.cpp", "max_issues_repo_name": "Maria-Paulacf/PlumaBot", "max_issues_repo_head_hexsha": "d4bf2e667b88e955f40e33d55db2a8f22c35b47b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2021-04-22T05:56:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-26T06:00:17.000Z", "max_forks_repo_path": "Source/rvctools 1/robot/interfaces/crustcrawler/VisualServoing.cpp", "max_forks_repo_name": "Maria-Paulacf/PlumaBot", "max_forks_repo_head_hexsha": "d4bf2e667b88e955f40e33d55db2a8f22c35b47b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 239.0, "max_forks_repo_forks_event_min_datetime": "2021-01-28T02:59:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T08:02:17.000Z", "avg_line_length": 28.6656891496, "max_line_length": 80, "alphanum_fraction": 0.6124296675, "num_tokens": 5366} |
import re
import argparse
import emoji
import MeCab
import numpy as np
import matplotlib.pyplot as plt
mecab = MeCab.Tagger('-Ochasen')
letters_pattern = re.compile(r'[a-zA-Z]+')
bracket_pairs = [['[', ']'], ['(', ')'], ['「', '」'], ['『', '』'], ['(', ')'],
['(', ')'], ['(', ')']]
# Non-breaking space symbol for html
symbols = [' ', '<', '>', '&', '"', ''',
'¢', '£', '¥', '&euro']
def has_target_postag(node):
tokens = []
has_noun = False
has_adj = False
while node:
tokens.append(node.surface)
features = node.feature.split(',')
tag = features[0]
tag_type = features[1]
if tag == '名詞' and tag_type == '一般':
has_noun = True
#if tag == '形容詞':
#has_adj = True
node = node.next
return tokens[1:-1], has_noun # and has_adj
def has_en_word(tokens):
has_letter = False
for token in tokens:
if letters_pattern.findall(token):
has_letter = True
break
return has_letter
def remove_bracket_content(text, bracket_pairs):
low = 0
high = 0
for left_b, right_b in bracket_pairs:
low = text.find(left_b)
high = text.find(right_b, low)
while low != -1 and high != -1:
content = text[low:high + 1]
text = text.replace(content, '')
low = text.find(left_b)
high = text.find(right_b, low)
return text
def remove_special_symbol(text):
for symbol in symbols:
text = text.replace(symbol, '')
text = text.replace(symbol[:-1], '')
return text
def remove_emoji(text):
return emoji.get_emoji_regexp().sub(r'', text)
def main(args):
f = open(args.output_file, 'w')
freq_dict = dict()
token_sum = 0
sample_num = 0
for line in open(args.input_file):
items = line.strip().split('\t')
if len(items) != 2:
continue
image = items[0]
caption = items[1].replace(' ', '')
# Remove content inside the bracket pairs
caption = remove_bracket_content(caption, bracket_pairs)
# Remove special symbol
caption = remove_special_symbol(caption)
# Remove emoji
caption = remove_emoji(caption)
# Tokenize caption
node = mecab.parseToNode(caption)
tokens, postag_flag = has_target_postag(node)
# Filter the caption with specific topics or tags
if caption.find('【') != -1 and caption.find('】') != -1:
# print(f'{line.strip()}')
continue
if len(tokens) < 5 or len(tokens) > 20:
continue
if has_en_word(tokens):
# print(f'{line.strip()}')
continue
if postag_flag:
token_sum += len(tokens)
sample_num += 1
if len(tokens) not in freq_dict:
freq_dict[len(tokens)] = 1
else:
freq_dict[len(tokens)] += 1
new_line = image + '\t' + ' '.join(tokens)
f.write(new_line + '\n')
# print(f'{new_line}')
f.close()
average_len = token_sum * 1.0 / sample_num
print(f'Average token length -> {average_len}')
# Plot the frequency curve
ordered = sorted(freq_dict.items(), key=lambda tup: tup[0])
x = np.array([t[0] for t in ordered])
y = np.array([t[1] for t in ordered])
plt.switch_backend('agg')
plt.figure()
plt.plot(x, y)
plt.grid(True, linestyle=':')
plt.savefig('./freq-figure.jpg')
if __name__ == '__main__':
parser = argparse.ArgumentParser('Clean Train Data')
parser.add_argument('-i', '--input-file', type=str)
parser.add_argument('-o', '--output-file', type=str, default='./output.txt')
args = parser.parse_args()
main(args)
| {"hexsha": "dbe54f8d627f9c590bb3316f3fbe3c593d5c92db", "size": 3836, "ext": "py", "lang": "Python", "max_stars_repo_path": "image-comment-generation/data/clean.py", "max_stars_repo_name": "stonyhu/Image-Commenting", "max_stars_repo_head_hexsha": "eb925a3f99075d8b74c6cabd125f7b9a1f9786d2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "image-comment-generation/data/clean.py", "max_issues_repo_name": "stonyhu/Image-Commenting", "max_issues_repo_head_hexsha": "eb925a3f99075d8b74c6cabd125f7b9a1f9786d2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "image-comment-generation/data/clean.py", "max_forks_repo_name": "stonyhu/Image-Commenting", "max_forks_repo_head_hexsha": "eb925a3f99075d8b74c6cabd125f7b9a1f9786d2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0606060606, "max_line_length": 80, "alphanum_fraction": 0.5573514077, "include": true, "reason": "import numpy", "num_tokens": 967} |
from scipy import spatial
# Find the distance between each embedding
def get_pairwise_dist(embeddings):
return spatial.distance.squareform(spatial.distance.pdist(embeddings, metric="cosine"))
| {"hexsha": "cf15412fe9b44f24408a1a6ad77545e5ccb9c23f", "size": 197, "ext": "py", "lang": "Python", "max_stars_repo_path": "similarity.py", "max_stars_repo_name": "Peter-Devine/text_finder", "max_stars_repo_head_hexsha": "b09ae796511dc1d000b07c12996d25576566e012", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "similarity.py", "max_issues_repo_name": "Peter-Devine/text_finder", "max_issues_repo_head_hexsha": "b09ae796511dc1d000b07c12996d25576566e012", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "similarity.py", "max_forks_repo_name": "Peter-Devine/text_finder", "max_forks_repo_head_hexsha": "b09ae796511dc1d000b07c12996d25576566e012", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8333333333, "max_line_length": 91, "alphanum_fraction": 0.8121827411, "include": true, "reason": "from scipy", "num_tokens": 40} |
import numpy as np
from core.buffer.replay_buffer import ReplayBuffer
def test_replay_buffer(mock_transition):
buffer_size = 10
memory = ReplayBuffer(buffer_size=buffer_size)
# test after init
assert memory.buffer_size == buffer_size
assert memory.buffer_index == 0
assert memory.size == 0
# test store
store_iteration = 15
for _ in range(store_iteration):
memory.store(mock_transition)
# test after store
assert memory.buffer_index == (store_iteration % buffer_size)
assert memory.size == min(buffer_size, store_iteration)
# test sample
batch_size = 8
sample_transitions = memory.sample(batch_size=batch_size)
assert isinstance(sample_transitions, dict)
for key, val in sample_transitions.items():
assert key in mock_transition[0].keys()
if isinstance(val, list):
for i, v in enumerate(val):
assert isinstance(v, np.ndarray)
assert v.shape == (batch_size, *mock_transition[0][key][i].shape[1:])
else:
assert isinstance(val, np.ndarray)
assert val.shape == (batch_size, *mock_transition[0][key].shape[1:])
| {"hexsha": "026d6ef7181b7626bb6c14f052acf2ba3a45ee56", "size": 1181, "ext": "py", "lang": "Python", "max_stars_repo_path": "jorldy/test/core/buffer/test_replay_buffer.py", "max_stars_repo_name": "zenoengine/JORLDY", "max_stars_repo_head_hexsha": "1eb867e52a03e0282a55fa612cbc5b5de701ffe7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 300, "max_stars_repo_stars_event_min_datetime": "2021-11-03T07:06:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T02:23:56.000Z", "max_issues_repo_path": "jorldy/test/core/buffer/test_replay_buffer.py", "max_issues_repo_name": "zenoengine/JORLDY", "max_issues_repo_head_hexsha": "1eb867e52a03e0282a55fa612cbc5b5de701ffe7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 37, "max_issues_repo_issues_event_min_datetime": "2021-11-04T04:31:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T01:40:49.000Z", "max_forks_repo_path": "jorldy/test/core/buffer/test_replay_buffer.py", "max_forks_repo_name": "zenoengine/JORLDY", "max_forks_repo_head_hexsha": "1eb867e52a03e0282a55fa612cbc5b5de701ffe7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 45, "max_forks_repo_forks_event_min_datetime": "2021-11-03T08:05:56.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T08:35:05.000Z", "avg_line_length": 31.9189189189, "max_line_length": 85, "alphanum_fraction": 0.6706181202, "include": true, "reason": "import numpy", "num_tokens": 264} |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 3 01:30:26 2021
@author: alan
"""
import tensorflow as tf
import glob
import random
import tensorflow.keras.layers as layers
import numpy as np
from skimage.io import imread
import os
import matplotlib.pyplot as plt
import cv2
from datetime import datetime
from packaging import version
import datetime
from tensorboard.plugins.hparams import api as hp
import time
device_name = tf.test.gpu_device_name()
if not device_name:
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
tf.debugging.set_log_device_placement(True)
#Detecting GPU
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
def unet():
inputs = tf.keras.Input((112, 112, 3))
# Entry block
x = layers.Conv2D(32, 3, strides=2, padding="same")(inputs)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
previous_block_activation = x # Set aside residual
# Blocks 1, 2, 3 are identical apart from the feature depth.
for filters in [64, 128, 256]:
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D(3, strides=2, padding="same")(x)
# Project residual
residual = layers.Conv2D(filters, 1, strides=2, padding="same")(
previous_block_activation
)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
### [Second half of the network: upsampling inputs] ###
for filters in [256, 128, 64, 32]:
x = layers.Activation("relu")(x)
x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.UpSampling2D(2)(x)
# Project residual
residual = layers.UpSampling2D(2)(previous_block_activation)
residual = layers.Conv2D(filters, 1, padding="same")(residual)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
# Add a per-pixel classification layer
outputs = layers.Conv2D(1, (1, 1), activation='sigmoid') (x)
# Define the model
model = tf.keras.Model(inputs, outputs)
return model
class data(tf.keras.utils.Sequence):
"""Helper to iterate over the data (as Numpy arrays)."""
def __init__(self, batch_size, img_size, input_img_paths, target_img_paths):
self.batch_size = batch_size
self.img_size = img_size
self.input_img_paths = input_img_paths
self.target_img_paths = target_img_paths
def __len__(self):
return len(self.target_img_paths) // self.batch_size
def __getitem__(self, idx):
"""Returns tuple (input, target) correspond to batch #idx."""
i = idx * self.batch_size
batch_input_img_paths = self.input_img_paths[i : i + self.batch_size]
batch_target_img_paths = self.target_img_paths[i : i + self.batch_size]
x = np.zeros((self.batch_size,) + self.img_size + (3,), dtype="float32")
for j, path in enumerate(batch_input_img_paths):
x[j] = plt.imread(path)
y = np.zeros((self.batch_size,) + self.img_size , dtype="uint8")
for j, path in enumerate(batch_target_img_paths):
img = plt.imread(path)
y[j] = img
return x, y
#Impoting data
img_files = glob.glob('DATA/frames/*.png')
mask_files = [glob.glob('DATA/masks/' + os.path.basename(im))[0] for im in img_files]
N = len (img_files)
# Spliting data
ixRand = list(range(N))
random.shuffle(ixRand)
train_data = [img_files[e] for e in ixRand[:round(N*.8)]]
train_labels = [mask_files[e] for e in ixRand[:round(N*.8)]]
test_data = [img_files[e] for e in ixRand[round(N*.8):]]
test_labels = [mask_files[e] for e in ixRand[round(N*.8):]]
# torch needs that data comes from an instance with getitem and len methods (Map-style datasets)
training_dataset = data(32,(112,112), train_data, train_labels)
val_dataset = data(32,(112,112), test_data, test_labels)
model = unet()
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
'''
tf.keras.utils.plot_model(
model, to_file='model.png', show_shapes=False, show_dtype=False,
show_layer_names=True, rankdir='TB', expand_nested=False, dpi=96
)
'''
# Train the model, doing validation at the end of each epoch.
epochs = 20
start = time.time()
history = model.fit(training_dataset, epochs=epochs, validation_data=val_dataset)
end = time.time()
elapsed = end-start
#%%
# "Accuracy"
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('$Model_{Accuracy}$')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='lower right')
plt.show()
# "Loss"
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('$Model_{Loss}$')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper right')
plt.show()
#%% Display some results
pred = model.predict(val_dataset)
savePred = [cv2.imwrite('DATA/pred/' + os.path.basename(test_data[i]), np.squeeze(np.array(pred[i]>.5, dtype='uint8'),-1) *255) for i in range (len(pred))]
plt.figure()
plt.subplot(121)
plt.imshow(np.squeeze(pred[6,:,:,:],-1) + cv2.cvtColor(plt.imread(test_data[6]), cv2.COLOR_BGR2GRAY))
plt.title('Prediction')
plt.subplot(122)
plt.imshow( cv2.cvtColor(plt.imread(test_data[6]), cv2.COLOR_BGR2GRAY) + plt.imread(test_labels[6]))
plt.title('Ground trouth')
#%% Get metrics for evaluation of segmentation
'''
import seg_metrics.seg_metrics as sg
import csv
csv_file = 'metrics.csv'
pred_path = glob.glob('DATA/pred/*.png')
gdth_path = [glob.glob('DATA/masks/' + os.path.basename(im))[0] for im in pred_path]
metrics = [sg.write_metrics(labels = [255], gdth_path=gdth_path[i], pred_path=pred_path[i], csv_file=csv_file) for i in range(len(pred))]
keys = list(metrics[0].keys())
keys.remove('filename')
means = [ sum(d[k][0] for d in metrics) / len(metrics) for k in keys]
metrics_mean = dict (zip(keys,means))
with open('metrics_mean.csv', 'w') as f: # You will need 'wb' mode in Python 2.x
w = csv.DictWriter(f, metrics_mean.keys())
w.writeheader()
w.writerow(metrics_mean)
''' | {"hexsha": "0cec8392a4c8f81e84914c431af17f474b4de088", "size": 6639, "ext": "py", "lang": "Python", "max_stars_repo_path": "segmentacionCNN.py", "max_stars_repo_name": "alandgabriel/LV-Segmentation-with-U-Net", "max_stars_repo_head_hexsha": "7cfad5791e91321a1d4afb73559dbeeeeaee9347", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "segmentacionCNN.py", "max_issues_repo_name": "alandgabriel/LV-Segmentation-with-U-Net", "max_issues_repo_head_hexsha": "7cfad5791e91321a1d4afb73559dbeeeeaee9347", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "segmentacionCNN.py", "max_forks_repo_name": "alandgabriel/LV-Segmentation-with-U-Net", "max_forks_repo_head_hexsha": "7cfad5791e91321a1d4afb73559dbeeeeaee9347", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3150684932, "max_line_length": 157, "alphanum_fraction": 0.6826329266, "include": true, "reason": "import numpy", "num_tokens": 1743} |
# Copyright (c) 2017- Salas Lin (leVirve)
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import numpy as np
from scipy.optimize import linear_sum_assignment
np.seterr(divide='ignore', invalid='ignore')
def confusion_table(preds, labels, num_class: int):
''' Calculate the confusion matrix
*credit: refer from [chainer/chainercv] eval_semantic_segmentation.py
Args:
preds: tensor, ndarray
labels: tensor, ndarray
'''
confusion = np.zeros(num_class * num_class, dtype=np.int64)
def flatten(x):
if isinstance(x, np.ndarray):
return x.flatten()
return x.view(-1)
def numpy(x):
if isinstance(x, np.ndarray):
return x
return x.cpu().numpy()
for pred, label in zip(preds, labels):
pred, label = flatten(pred), flatten(label)
mask = label < 255
hist = num_class * label[mask] + pred[mask]
confusion += np.bincount(numpy(hist), minlength=num_class ** 2)
return confusion.reshape((num_class, num_class))
def intersection_over_union(confusion: np.ndarray):
iou_denominator = (confusion.sum(axis=1) + confusion.sum(axis=0) - np.diag(confusion))
return np.diag(confusion) / (iou_denominator)
def max_bipartite_matching_score(predictions: np.ndarray, targets: np.ndarray):
def to_numpy(x):
import torch
if torch.is_tensor(x):
return x.cpu().numpy()
return x
def _one_sample(prediction, target):
''' calculate the maximum bipartite matching between two labels
prediction: 2-D numpy array
target: 2-D numpy array
'''
pred_labels = np.unique(prediction)
gt_labels = np.unique(target)
cost = np.zeros((len(pred_labels), len(gt_labels)))
for i, p in enumerate(pred_labels):
p_mask = prediction == p
cost[i] = [-np.sum(p_mask & (target == g)) for g in gt_labels]
row_ind, col_ind = linear_sum_assignment(cost)
score = -cost[row_ind, col_ind].sum()
return score / target.size
predictions = np.squeeze(to_numpy(predictions))
targets = np.squeeze(to_numpy(targets))
if len(predictions.shape) == len(targets.shape) and len(predictions.shape) == 3:
scores = [_one_sample(p, t) for p, t in zip(predictions, targets)]
return np.mean(scores)
return _one_sample(predictions, targets)
class Metric():
def __init__(self, num_class, only_scalar=False, prefix='acc/'):
self.num_class = num_class
self.only_scalar = only_scalar
self.prefix = prefix
def __call__(self, output, target):
'''
output: Variable
target: Variable
'''
confusion = confusion_table(output, target, num_class=self.num_class)
iou = intersection_over_union(confusion)
pixel_accuracy = np.diag(confusion).sum() / confusion.sum()
class_accuracy = np.diag(confusion) / np.sum(confusion, axis=1)
if self.only_scalar:
return {f'{self.prefix}miou': np.nanmean(iou),
f'{self.prefix}pixel': pixel_accuracy,
f'{self.prefix}mean_class': np.nanmean(class_accuracy)}
else:
return {'iou': iou, 'miou': np.nanmean(iou),
'pixel_accuracy': pixel_accuracy,
'class_accuracy': class_accuracy,
'mean_class_accuracy': np.nanmean(class_accuracy)}
| {"hexsha": "b6434705a9c84e6382a3b4cbf62adb6db847cd45", "size": 3520, "ext": "py", "lang": "Python", "max_stars_repo_path": "onegan/metrics/semantic_segmentation.py", "max_stars_repo_name": "leVirve/OneGAN", "max_stars_repo_head_hexsha": "e0d5f387c957fbf599919078d8c6277740015336", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2018-01-26T08:58:10.000Z", "max_stars_repo_stars_event_max_datetime": "2018-05-03T20:44:06.000Z", "max_issues_repo_path": "onegan/metrics/semantic_segmentation.py", "max_issues_repo_name": "leVirve/OneGAN", "max_issues_repo_head_hexsha": "e0d5f387c957fbf599919078d8c6277740015336", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-08-13T03:02:13.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-20T04:15:13.000Z", "max_forks_repo_path": "onegan/metrics/semantic_segmentation.py", "max_forks_repo_name": "leVirve/OneGAN", "max_forks_repo_head_hexsha": "e0d5f387c957fbf599919078d8c6277740015336", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-11-21T07:44:29.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-08T14:15:06.000Z", "avg_line_length": 32.2935779817, "max_line_length": 90, "alphanum_fraction": 0.6261363636, "include": true, "reason": "import numpy,from scipy", "num_tokens": 795} |
# Pre-image for Gaussian kernel
# From Kwok and Tsang, "The Pre-Image problem in kernel methods", ICML 2003
# (based on matlab code provided by authors)
# Also:
# Mika, et al. "Kernel PCA and Denoising in Feature Spaces", NIPS 1998
# and
# Teixeira et al. "KPCA Denoising and the pre-image problem revisited", DSP 2008
#
# Danny Perry ([email protected])
# May 2015
using Debug
include("Kernels.jl")
# Fixed point method to preimage
# y - projected data (trying to recover into input space)
# U - eigenvectors of K
# X - training data
# z_init - initial guess at z
# sigma - Gaussian kernel parameter
# tolerance - convergence criteria
# maxiters - max number of iters
function GaussianKernelPreImage(y,U, X, z_init, sigma, tolerance, maxiters)
gamma = U * y # results in n x 1 vector
z = copy(z_init)
last_z = copy(z)
iter = 0
for iter=1:maxiters
last_z = copy(z)
diff = X - ones(size(X,1),1)*z
diff .^= 2
kz = gamma .* exp( -sum(diff,2) ./ sigma^2)
if sum(kz) == 0
println("Preimage fixed point iteration failed: initial guess too far away - reverting to initial guess.")
return z_init # can't get any closer, returning initial guess.
end
z = sum(X .* (kz*ones(1,size(X,2))),1)/sum(kz)
if norm(last_z-z)/sqrt(norm(z)*norm(last_z)) < tolerance
break
end
end
if false
println("iters: ", iter)
println("err: ", norm(last_z-z)/sqrt(norm(z)*norm(last_z)) , " < ", tolerance)
end
if iter == maxiters
println("warning, did not converge.")
end
return z, iter
end
# Linear algebra approach to preimage
function GaussianKernelPreImage(distsq, X, neibsize)
sidx = sortperm(distsq)
sdistsq = distsq[sidx]
XH = X[sidx[1:neibsize],:]
Xmean = mean(XH,1)
XH = XH - ones(neibsize,1) * Xmean # centered around neighborhood mean
UM,SM,VM = svd(XH')
rankM = rank(diagm(SM),1e-5)
UM = UM[:,1:rankM]
SM = SM[1:rankM]
VM = VM[:,1:rankM]
transf = UM*diagm(1./SM)*(VM'./2)
sd0 = zeros(neibsize,1)
ZM = diagm(SM)*VM'
for i=1:neibsize
sd0[i] = (ZM[:,i]'*ZM[:,i])[1]
end
result = transf * (vec(sd0) - vec(sdistsq[1:neibsize])) + vec(Xmean)
end
# X - training data (n x d)
# K - uncentered training data Gram (n x n)
# Ktest - uncentered test data Gram with training (n x nt)
function GaussianKernelPreImage(X,K,Ktest, neibsize, sigma)
n = size(K,1)
nt = size(Ktest,2)
d = size(X,2)
spectrum_pct = 0
target_dim = n
centering = "additive"
Kc,P,V,S,Y = KernelPCA(K, spectrum_pct, target_dim, centering)
H = eye(n)-ones(n,n)/n # centering matrix
HMH = H*P*P'*H
cK = mean(K,2)
meanK = mean(cK)
result = zeros(nt,d)
neibs = zeros(Int64, nt, neibsize)
for j=1:nt
# calculate the distance between the testing point and training points
k_x = Ktest[:,j]
gammaC = HMH*(k_x-cK);
PphiNormC = ((k_x+cK)'*gammaC + meanK)[1]
d2 = zeros(n);
for i = 1:n
PphiProjC = (K[i,:]*gammaC)[1]+cK[i]
d2[i] = -log(abs((1-PphiNormC+2*PphiProjC)/2))*(sigma*2)
end
result[j,:] = GaussianKernelPreImage(d2,X,neibsize);
closestind = sortperm(d2)
neibs[j,:] = closestind[1:neibsize]
end
return result,neibs
end
function GaussianKernelDenoise(X,Xtest, iters, neibsize, sigma)
n = size(X,1)
nt = size(Xtest,1)
d = size(X,2)
K = GaussianKernel(X,X,sigma)
spectrum_pct = 0
target_dim = n
centering = "additive"
Kc,P,V,S,Y = KernelPCA(K, spectrum_pct, target_dim, centering)
H = eye(n)-ones(n,n)/n # centering matrix
HMH = H*P*P'*H
cK = mean(K,2)
meanK = mean(cK)
result = copy(Xtest)
neibs = zeros(Int64, nt, neibsize)
for iter=1:iters
for j=1:nt
# calculate the distance between the testing point and training points
k_x = GaussianKernel(X,Xtest[j,:],sigma)
gammaC = HMH*(k_x-cK);
PphiNormC = ((k_x+cK)'*gammaC + meanK)[1]
d2 = zeros(n);
for i = 1:n
PphiProjC = (vec(K[i,:])'*vec(gammaC)+cK[i])[1]
d2[i] = -log((1-PphiNormC+2*PphiProjC)/2)*sigma
end
result[j,:] = GaussianKernelPreImage(d2,X,neibsize);
closestind = sortperm(d2)
neibs[j,:] = closestind[1:neibsize]
end
end
return result,neibs
end
| {"hexsha": "91a4b0374c0e56afcd4ed27c084140bb0e33d2cb", "size": 4055, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/PreImage.jl", "max_stars_repo_name": "daniel-perry/Kernel.jl", "max_stars_repo_head_hexsha": "da7255ffe7b9e1341d4f2decc82128451dc3d383", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/PreImage.jl", "max_issues_repo_name": "daniel-perry/Kernel.jl", "max_issues_repo_head_hexsha": "da7255ffe7b9e1341d4f2decc82128451dc3d383", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-07-12T17:40:45.000Z", "max_issues_repo_issues_event_max_datetime": "2016-07-12T17:40:45.000Z", "max_forks_repo_path": "src/PreImage.jl", "max_forks_repo_name": "daniel-perry/Kernel.jl", "max_forks_repo_head_hexsha": "da7255ffe7b9e1341d4f2decc82128451dc3d383", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.8773006135, "max_line_length": 109, "alphanum_fraction": 0.6577065351, "num_tokens": 1433} |
#!/usr/bin/env python
import numpy as np
def get_input(prompt, default):
return input(prompt) or str(default)
N = int(get_input('Number of NUWS dimensions [1]: ', 1))
cos_power = int(get_input('Power of window function, n (cos^n) [2]: ',2))
Nmax = int(get_input('Maximum number of repeats [16]: ', 16))
print('Please enter time domain sizes as REAL points')
if N==1:
td1 = int(get_input('td (nominal) [64]: ', 64)) // 2
td2 = 1
else:
td1 = int(get_input('td (outer loop, nominal) [64]: ', 64)) // 2
td2 = int(get_input('td (inner loop, nominal) [16]: ', 16)) // 2
# calculate effective td sizes
td1eff=td1
td2eff=td2
for i in range(td1):
if int(round(Nmax * np.cos(i/td1*np.pi/2)**cos_power))==0:
td1eff = i
break
if N>1:
for i in range(td2):
if int(round(Nmax * np.cos(i/td2*np.pi/2)**cos_power))==0:
td2eff = i
break
print()
total_cycles = 0
# now calculate vc lists
if N==1: # 2D
for i in range(td1eff):
w1 = np.cos(i/td1*np.pi/2)**cos_power
c = int(round(Nmax*w1))
print(c)
print(c) # second copy for complex points
total_cycles += 2*c
else: # 3D
for i in range(td1eff): # outer loop
w1 = np.cos(i/td1*np.pi/2)**cos_power
for i2 in range(2): # complex pts
for j in range(td2eff): # inner loop
w2 = np.cos(j/td2*np.pi/2)**cos_power
c = int(round(Nmax * w1 * w2))
print(c)
print(c) # second copy for complex points
total_cycles += 2*c
print()
print('Effective td1 = ' + str(2*td1eff))
if N>1:
print('Effective td2 = ' + str(2*td2eff))
print('Total cycles = ' + str(total_cycles))
| {"hexsha": "2669a329c656d228dc07c5c54eb98fa9b7e60c61", "size": 1752, "ext": "py", "lang": "Python", "max_stars_repo_path": "util/make-nuws.py", "max_stars_repo_name": "chriswaudby/pp", "max_stars_repo_head_hexsha": "a1da83b5cba5ebb5e42b846478dc4bce8bace875", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "util/make-nuws.py", "max_issues_repo_name": "chriswaudby/pp", "max_issues_repo_head_hexsha": "a1da83b5cba5ebb5e42b846478dc4bce8bace875", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "util/make-nuws.py", "max_forks_repo_name": "chriswaudby/pp", "max_forks_repo_head_hexsha": "a1da83b5cba5ebb5e42b846478dc4bce8bace875", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-09-28T13:50:14.000Z", "max_forks_repo_forks_event_max_datetime": "2017-09-28T13:50:14.000Z", "avg_line_length": 28.2580645161, "max_line_length": 73, "alphanum_fraction": 0.5667808219, "include": true, "reason": "import numpy", "num_tokens": 548} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import numpy as np
from models.losses import FocalLoss
from models.losses import RegL1Loss, RegLoss, NormRegL1Loss, RegWeightedL1Loss
from models.decode import ctdet_decode
from models.utils import _sigmoid
from utils.debugger import Debugger
from utils.post_process import ctdet_post_process
from utils.oracle_utils import gen_oracle_map
from .base_trainer import BaseTrainer
class CtdetLoss(torch.nn.Module):
def __init__(self, opt):
super(CtdetLoss, self).__init__()
self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
RegLoss() if opt.reg_loss == 'sl1' else None
self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
NormRegL1Loss() if opt.norm_wh else \
RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
self.opt = opt
def forward(self, outputs, batch):
opt = self.opt
#heat map loss (hm_loss), size loss (wh_loss), offset loss (offset loss)
hm_loss, wh_loss, off_loss = 0, 0, 0
for s in range(opt.num_stacks):
output = outputs[s]
if not opt.mse_loss:
output['hm'] = _sigmoid(output['hm'])
if opt.eval_oracle_hm:
output['hm'] = batch['hm']
if opt.eval_oracle_wh:
output['wh'] = torch.from_numpy(gen_oracle_map(
batch['wh'].detach().cpu().numpy(),
batch['ind'].detach().cpu().numpy(),
output['wh'].shape[3], output['wh'].shape[2])).to(opt.device)
if opt.eval_oracle_offset:
output['reg'] = torch.from_numpy(gen_oracle_map(
batch['reg'].detach().cpu().numpy(),
batch['ind'].detach().cpu().numpy(),
output['reg'].shape[3], output['reg'].shape[2])).to(opt.device)
hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks
if opt.wh_weight > 0:
if opt.dense_wh:
mask_weight = batch['dense_wh_mask'].sum() + 1e-4
wh_loss += (
self.crit_wh(output['wh'] * batch['dense_wh_mask'],
batch['dense_wh'] * batch['dense_wh_mask']) /
mask_weight) / opt.num_stacks
elif opt.cat_spec_wh:
wh_loss += self.crit_wh(
output['wh'], batch['cat_spec_mask'],
batch['ind'], batch['cat_spec_wh']) / opt.num_stacks
else:
wh_loss += self.crit_reg(
output['wh'], batch['reg_mask'],
batch['ind'], batch['wh']) / opt.num_stacks
if opt.reg_offset and opt.off_weight > 0:
off_loss += self.crit_reg(output['reg'], batch['reg_mask'],
batch['ind'], batch['reg']) / opt.num_stacks
loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \
opt.off_weight * off_loss
loss_stats = {'loss': loss, 'hm_loss': hm_loss,
'wh_loss': wh_loss, 'off_loss': off_loss}
return loss, loss_stats
class CtdetTrainer(BaseTrainer):
def __init__(self, opt, model, optimizer=None):
super(CtdetTrainer, self).__init__(opt, model, optimizer=optimizer)
def _get_losses(self, opt):
loss_states = ['loss', 'hm_loss', 'wh_loss', 'off_loss']
loss = CtdetLoss(opt)
return loss_states, loss
def debug(self, batch, output, iter_id):
opt = self.opt
reg = output['reg'] if opt.reg_offset else None
dets = ctdet_decode(
output['hm'], output['wh'], reg=reg,
cat_spec_wh=opt.cat_spec_wh, K=opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets[:, :, :4] *= opt.down_ratio
dets_gt = batch['meta']['gt_det'].numpy().reshape(1, -1, dets.shape[2])
dets_gt[:, :, :4] *= opt.down_ratio
for i in range(1):
debugger = Debugger(
dataset=opt.dataset, ipynb=(opt.debug==3), theme=opt.debugger_theme)
img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip(((
img * opt.std + opt.mean) * 255.), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_blend_img(img, gt, 'gt_hm')
debugger.add_img(img, img_id='out_pred')
for k in range(len(dets[i])):
if dets[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets[i, k, :4], dets[i, k, -1],
dets[i, k, 4], img_id='out_pred')
debugger.add_img(img, img_id='out_gt')
for k in range(len(dets_gt[i])):
if dets_gt[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets_gt[i, k, :4], dets_gt[i, k, -1],
dets_gt[i, k, 4], img_id='out_gt')
if opt.debug == 4:
# print(opt.debug_dir)
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
def save_result(self, output, batch, results):
reg = output['reg'] if self.opt.reg_offset else None
dets = ctdet_decode(
output['hm'], output['wh'], reg=reg,
cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets_out = ctdet_post_process(
dets.copy(), batch['meta']['c'].cpu().numpy(),
batch['meta']['s'].cpu().numpy(),
output['hm'].shape[2], output['hm'].shape[3], output['hm'].shape[1])
results[batch['meta']['img_id'].cpu().numpy()[0]] = dets_out[0] | {"hexsha": "9c10d03971a33006c42d75000aaf91d4f16f01dc", "size": 5626, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/lib/trains/ctdet.py", "max_stars_repo_name": "hrlblab/CircleNet", "max_stars_repo_head_hexsha": "219aa47fa4dc4f362b28448c0dcd41b29c4f1166", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2020-10-28T10:29:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T17:32:01.000Z", "max_issues_repo_path": "detection/lib/trains/ctdet.py", "max_issues_repo_name": "hrlblab/Glo-In-One", "max_issues_repo_head_hexsha": "7daef49c557bccd6f5c956b88603357346dc78a2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-09-14T01:26:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-28T00:07:15.000Z", "max_forks_repo_path": "src/lib/trains/ctdet.py", "max_forks_repo_name": "hrlblab/CircleNet", "max_forks_repo_head_hexsha": "219aa47fa4dc4f362b28448c0dcd41b29c4f1166", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-11-13T01:55:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T11:56:00.000Z", "avg_line_length": 41.9850746269, "max_line_length": 78, "alphanum_fraction": 0.6112691077, "include": true, "reason": "import numpy", "num_tokens": 1575} |
# -*- coding: utf-8 -*-
"""
@File : generator.py
@Time : 2019/12/22 下午8:22
@Author : yizuotian
@Description : 中文数据生成器
"""
import random
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from torch.utils.data.dataset import Dataset
from fontutils import FONT_CHARS_DICT
def random_color(lower_val, upper_val):
return [random.randint(lower_val, upper_val),
random.randint(lower_val, upper_val),
random.randint(lower_val, upper_val)]
def put_text(image, x, y, text, font, color=None):
"""
写中文字
:param image:
:param x:
:param y:
:param text:
:param font:
:param color:
:return:
"""
im = Image.fromarray(image)
draw = ImageDraw.Draw(im)
color = (255, 0, 0) if color is None else color
draw.text((x, y), text, color, font=font)
return np.array(im)
class Generator(Dataset):
def __init__(self, alpha, direction='horizontal'):
"""
:param alpha: 所有字符
:param direction: 文字方向:horizontal|vertical
"""
super(Generator, self).__init__()
self.alpha = alpha
self.direction = direction
self.alpha_list = list(alpha)
self.min_len = 5
self.max_len_list = [16, 19, 24, 26]
self.max_len = max(self.max_len_list)
self.font_size_list = [30, 25, 20, 18]
self.font_path_list = list(FONT_CHARS_DICT.keys())
self.font_list = [] # 二位列表[size,font]
for size in self.font_size_list:
self.font_list.append([ImageFont.truetype(font_path, size=size)
for font_path in self.font_path_list])
if self.direction == 'horizontal':
self.im_h = 32
self.im_w = 512
else:
self.im_h = 512
self.im_w = 32
def gen_background(self):
"""
生成背景;随机背景|纯色背景|合成背景
:return:
"""
a = random.random()
pure_bg = np.ones((self.im_h, self.im_w, 3)) * np.array(random_color(0, 100))
random_bg = np.random.rand(self.im_h, self.im_w, 3) * 100
if a < 0.1:
return random_bg
elif a < 0.8:
return pure_bg
else:
b = random.random()
mix_bg = b * pure_bg + (1 - b) * random_bg
return mix_bg
def horizontal_draw(self, draw, text, font, color, char_w, char_h):
"""
水平方向文字合成
:param draw:
:param text:
:param font:
:param color:
:param char_w:
:param char_h:
:return:
"""
text_w = len(text) * char_w
h_margin = max(self.im_h - char_h, 1)
w_margin = max(self.im_w - text_w, 1)
x_shift = np.random.randint(0, w_margin)
y_shift = np.random.randint(0, h_margin)
i = 0
while i < len(text):
draw.text((x_shift, y_shift), text[i], color, font=font)
i += 1
x_shift += char_w
y_shift = np.random.randint(0, h_margin)
# 如果下个字符超出图像,则退出
if x_shift + char_w > self.im_w:
break
return text[:i]
def vertical_draw(self, draw, text, font, color, char_w, char_h):
"""
锤子方向文字生成
:param draw:
:param text:
:param font:
:param color:
:param char_w:
:param char_h:
:return:
"""
text_h = len(text) * char_h
h_margin = max(self.im_h - text_h, 1)
w_margin = max(self.im_w - char_w, 1)
x_shift = np.random.randint(0, w_margin)
y_shift = np.random.randint(0, h_margin)
i = 0
while i < len(text):
draw.text((x_shift, y_shift), text[i], color, font=font)
i += 1
x_shift = np.random.randint(0, w_margin)
y_shift += char_h
# 如果下个字符超出图像,则退出
if y_shift + char_h > self.im_h:
break
return text[:i]
def draw_text(self, draw, text, font, color, char_w, char_h):
if self.direction == 'horizontal':
return self.horizontal_draw(draw, text, font, color, char_w, char_h)
return self.vertical_draw(draw, text, font, color, char_w, char_h)
def gen_image(self):
idx = np.random.randint(len(self.max_len_list))
image = self.gen_background()
image = image.astype(np.uint8)
target_len = int(np.random.uniform(self.min_len, self.max_len_list[idx], size=1))
# 随机选择size,font
size_idx = np.random.randint(len(self.font_size_list))
font_idx = np.random.randint(len(self.font_path_list))
font = self.font_list[size_idx][font_idx]
font_path = self.font_path_list[font_idx]
# 在选中font字体的可见字符中随机选择target_len个字符
text = np.random.choice(FONT_CHARS_DICT[font_path], target_len)
text = ''.join(text)
# 计算字体的w和h
w, char_h = font.getsize(text)
char_w = int(w / len(text))
# 写文字,生成图像
im = Image.fromarray(image)
draw = ImageDraw.Draw(im)
color = tuple(random_color(105, 255))
text = self.draw_text(draw, text, font, color, char_w, char_h)
target_len = len(text) # target_len可能变小了
# 对应的类别
indices = np.array([self.alpha.index(c) for c in text])
# 转为灰度图
image = np.array(im)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# 亮度反转
if random.random() > 0.5:
image = 255 - image
return image, indices, target_len
def __getitem__(self, item):
image, indices, target_len = self.gen_image()
if self.direction == 'horizontal':
image = np.transpose(image[:, :, np.newaxis], axes=(2, 1, 0)) # [H,W,C]=>[C,W,H]
else:
image = np.transpose(image[:, :, np.newaxis], axes=(2, 0, 1)) # [H,W,C]=>[C,H,W]
# 标准化
image = image.astype(np.float32) / 255.
image -= 0.5
image /= 0.5
target = np.zeros(shape=(self.max_len,), dtype=np.long)
target[:target_len] = indices
if self.direction == 'horizontal':
input_len = self.im_w // 4 - 3
else:
input_len = self.im_w // 16 - 1
return image, target, input_len, target_len
def __len__(self):
return len(self.alpha) * 100
def test_image_gen(direction='vertical'):
from config import cfg
gen = Generator(cfg.word.get_all_words()[:10], direction=direction)
for i in range(10):
im, indices, target_len = gen.gen_image()
# cv2.imwrite('output/{}-{:03d}.jpg'.format(direction, i + 1), im)
print(''.join([gen.alpha[j] for j in indices]))
def test_gen():
from data.words import Word
gen = Generator(Word().get_all_words())
for x in gen:
print(x[1])
def test_font_size():
font = ImageFont.truetype('fonts/simsun.ttc')
print(font.size)
font.size = 20
print(font.size)
if __name__ == '__main__':
test_image_gen('horizontal')
# test_image_gen('vertical')
# test_gen()
# test_font_size()
| {"hexsha": "632d1a859abe80752ea5bb45da5c96aecc64a69f", "size": 7090, "ext": "py", "lang": "Python", "max_stars_repo_path": "generator.py", "max_stars_repo_name": "yizt/crnn.pytorch", "max_stars_repo_head_hexsha": "2f626841f35c8f69a23518ee2496554cac080cff", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 197, "max_stars_repo_stars_event_min_datetime": "2020-01-11T06:45:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T01:51:46.000Z", "max_issues_repo_path": "generator.py", "max_issues_repo_name": "17865135532/crnn.pytorch", "max_issues_repo_head_hexsha": "2f626841f35c8f69a23518ee2496554cac080cff", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 32, "max_issues_repo_issues_event_min_datetime": "2020-03-16T04:53:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T11:00:17.000Z", "max_forks_repo_path": "generator.py", "max_forks_repo_name": "17865135532/crnn.pytorch", "max_forks_repo_head_hexsha": "2f626841f35c8f69a23518ee2496554cac080cff", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 55, "max_forks_repo_forks_event_min_datetime": "2020-03-02T12:03:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-24T08:04:11.000Z", "avg_line_length": 30.6926406926, "max_line_length": 93, "alphanum_fraction": 0.5662905501, "include": true, "reason": "import numpy", "num_tokens": 1956} |
from keras.models import load_model
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers.advanced_activations import LeakyReLU, PReLU
from keras.utils import np_utils, generic_utils, to_categorical
import keras
import keras.backend as K
import numpy as np
import sys
nb_classes = 10
X_test = np.load(sys.argv[1])
Y_test = np.load(sys.argv[2])
Y_test = to_categorical(Y_test, nb_classes)
img_channels = 3
img_rows = 112
img_cols = 112
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
print('X_test shape:', X_test.shape)
print('Y_test shape:', Y_test.shape)
model = load_model(sys.argv[3])
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', round(score[0], 4))
print('Test accuracy:', round(score[1], 4) * 100)
| {"hexsha": "41b29c5438c65a899dde510ce8ec5290216632c7", "size": 1016, "ext": "py", "lang": "Python", "max_stars_repo_path": "Assignment06/test.py", "max_stars_repo_name": "Ericbrod10/Deep-Learning", "max_stars_repo_head_hexsha": "5b0a01597ce19f2da5bf45b76023b898c494f46a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Assignment06/test.py", "max_issues_repo_name": "Ericbrod10/Deep-Learning", "max_issues_repo_head_hexsha": "5b0a01597ce19f2da5bf45b76023b898c494f46a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Assignment06/test.py", "max_forks_repo_name": "Ericbrod10/Deep-Learning", "max_forks_repo_head_hexsha": "5b0a01597ce19f2da5bf45b76023b898c494f46a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8823529412, "max_line_length": 118, "alphanum_fraction": 0.7785433071, "include": true, "reason": "import numpy", "num_tokens": 267} |
classdef ControlMode < Simulink.IntEnumType
enumeration
None(0)
Manual(1)
Acro(2)
Stabilize(3)
ALTCTL(4)
POSCTL(5)
Offboard(6)
end
methods (Static)
function defaultValue = getDefaultValue()
% GETDEFAULTVALUE Returns the default enumerated value.
% If this method is not defined, the first enumeration is used.
defaultValue = ControlMode.None;
end
function dScope = getDataScope()
% GETDATASCOPE Specifies whether the data type definition should be imported from,
% or exported to, a header file during code generation.
dScope = 'Auto';
end
function desc = getDescription()
% GETDESCRIPTION Returns a description of the enumeration.
desc = 'enumeration of control mode';
end
function fileName = getHeaderFile()
% GETHEADERFILE Returns path to header file if non-empty.
fileName = '';
end
function flag = addClassNameToEnumNames()
% ADDCLASSNAMETOENUMNAMES Indicate whether code generator applies the class name as a prefix
% to the enumeration.
flag = true;
end
end
end | {"author": "Firmament-Autopilot", "repo": "FMT-Model", "sha": "adb85b9379cb4268f60bd8414f35aacfbdf8dec1", "save_path": "github-repos/MATLAB/Firmament-Autopilot-FMT-Model", "path": "github-repos/MATLAB/Firmament-Autopilot-FMT-Model/FMT-Model-adb85b9379cb4268f60bd8414f35aacfbdf8dec1/bus/enum/ControlMode.m"} |
from pymc import *
from numpy import ones, array
# Samples for each dose level
n = 5 * ones(4, dtype=int)
# Log-dose
dose = array([-.86, -.3, -.05, .73])
# Logit-linear model parameters
alpha = Normal('alpha', 0, 0.01)
beta = Normal('beta', 0, 0.01)
# Calculate probabilities of death
theta = Lambda('theta', lambda a=alpha, b=beta, d=dose: invlogit(a + b * d))
# Data likelihood
deaths = Binomial(
'deaths',
n=n,
p=theta,
value=array([0,
1,
3,
5],
dtype=float),
observed=True)
# Calculate LD50
LD50 = Lambda('LD50', lambda a=alpha, b=beta: -a / b)
| {"hexsha": "422d7a35ebc83cfaa94575b141bc8e5f9d476904", "size": 641, "ext": "py", "lang": "Python", "max_stars_repo_path": "pymc/examples/gelman_bioassay.py", "max_stars_repo_name": "kyleabeauchamp/pymc", "max_stars_repo_head_hexsha": "6ce0094584f1fa00eed0b2ecee533c2fb7f190d6", "max_stars_repo_licenses": ["AFL-3.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-06T08:17:20.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-06T08:17:20.000Z", "max_issues_repo_path": "pymc/examples/gelman_bioassay.py", "max_issues_repo_name": "kyleabeauchamp/pymc", "max_issues_repo_head_hexsha": "6ce0094584f1fa00eed0b2ecee533c2fb7f190d6", "max_issues_repo_licenses": ["AFL-3.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-14T08:57:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-14T14:55:57.000Z", "max_forks_repo_path": "pymc/examples/gelman_bioassay.py", "max_forks_repo_name": "kyleabeauchamp/pymc", "max_forks_repo_head_hexsha": "6ce0094584f1fa00eed0b2ecee533c2fb7f190d6", "max_forks_repo_licenses": ["AFL-3.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-07-05T04:56:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-02T03:17:05.000Z", "avg_line_length": 21.3666666667, "max_line_length": 76, "alphanum_fraction": 0.5694227769, "include": true, "reason": "from numpy", "num_tokens": 199} |
###################################################################
# Imports e inits #
###################################################################
import streamlit as st
import yfinance as yf
import pandas as pd
import numpy as np
import plotly.express as px
###################################################################
# Funções / Functions #
###################################################################
@st.cache(allow_output_mutation=True)
def yf_get_data(tickers, period, interval):
'''Puxa os dados do (get symbol data from) Yahoo Finance '''
return yf.download(tickers=tickers, period=period, interval=interval)
@st.cache
def yf_is_symbol(symbol):
'''Retorna true se Yahoo Finance tem informações sobre o ativo symbol'''
s = yf.Ticker(symbol)
return not (s.info['regularMarketPrice'] == None)
def yf_dataframe(symbol='usdbrl=x', period='2y', interval='1d'):
'''Prepara os dados e retorna dataframe com dados do Yahoo Finance sobre o ativo symbol'''
symbol = symbol + '.sa' if not yf_is_symbol(symbol) else symbol
d = pd.DataFrame() if not yf_is_symbol(
symbol) else yf_get_data(symbol, period, interval)
return d
def cooking(df):
'''Executa o tratamento dos dados. Neste caso, cria a coluna de ranges e elimina as colunas desnecessárias'''
if len(df) > 0:
st.write('Dados coletados. Processando...')
if 'High' in df:
df['Range'] = abs(df.High - df.Low)
df.drop(['Open', 'High', 'Low', 'Close',
'Adj Close', 'Volume'], axis=1, inplace=True)
# st.write(df.head())
else:
st.write('Ops! Não achei as informações deste ativo. O código pode ter mudado ou ter sido desativado. Confira e tente novamente.')
return False
return True
def format_link(text='', name='', url=''):
'''retorna f string com texto e link para url '''
return f'{text}[{name}]({url})'
def app_header():
'''Apresenta o topo da página. Neste caso mostra entrada de código do ativo que será pesquisado'''
st.title('Frequência diária de variação de preços')
i = st.text_input(
'Informe o codigo do ativo: ', 'PETR4')
st.markdown(
format_link('Use o formato ', 'Yahoo Finance',
'https://br.financas.yahoo.com'),
unsafe_allow_html=True)
return i
###################################################################
# Código Principal / Main Code #
###################################################################
# Mostra o cabeçalho da página
s = app_header()
# Busca os dados e cria dataframe
df = yf_dataframe(symbol=s)
# Se os dados estiverem corretos, exibe o histograma correspondente
if cooking(df):
fig = px.histogram(df,
x="Range",
nbins=250,
labels={'Range': 'Variação diária',
'Count': 'Frequência'}
)
st.plotly_chart(fig)
| {"hexsha": "828b56074b60e1d17662a677c47a1baf95d292a2", "size": 3114, "ext": "py", "lang": "Python", "max_stars_repo_path": "app-st.py", "max_stars_repo_name": "appznoix/streamlit-yfinance-range-hist", "max_stars_repo_head_hexsha": "4f8a4f513a2eec47299f0530f0c688c2707b38cc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app-st.py", "max_issues_repo_name": "appznoix/streamlit-yfinance-range-hist", "max_issues_repo_head_hexsha": "4f8a4f513a2eec47299f0530f0c688c2707b38cc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app-st.py", "max_forks_repo_name": "appznoix/streamlit-yfinance-range-hist", "max_forks_repo_head_hexsha": "4f8a4f513a2eec47299f0530f0c688c2707b38cc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6352941176, "max_line_length": 138, "alphanum_fraction": 0.5208734746, "include": true, "reason": "import numpy", "num_tokens": 670} |
import math
import numpy as np
import tensorflow as tf
def identity_initializer(scale=1.0):
"""Identity initializer by Quoc V. Le et al.
This is also recommended by at least one paper to initialize
the weights matrix in a RNN.
References:
Paper: Quoc V. Le et al., http://arxiv.org/abs/1504.00941
Parameters
----------
scale: float, optional
The scale of the indentity values.
Returns
----------
_initializer: function
Returns the init function.
"""
def _initializer(shape, dtype=tf.float32, partition_info=None):
if len(shape) == 1:
return tf.constant(0., dtype=dtype, shape=shape)
elif len(shape) == 2 and shape[0] == shape[1]:
return tf.constant(scale*np.identity(shape[0], dtype))
elif len(shape) == 4 and shape[2] == shape[3]:
array = np.zeros(shape, dtype=float)
cx, cy = shape[0]/2, shape[1]/2
for i in range(shape[2]):
array[cx, cy, i, i] = 1
return tf.constant(scale*array, dtype=dtype)
else:
raise ValueError("Invalid shape.")
return _initializer
def _orthogonal(shape):
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
# pick the one with the correct shape
q = u if u.shape == flat_shape else v
return q.reshape(shape) #this needs to be corrected to float32
def orthogonal_initializer(scale=1.0):
"""Orthogonal initializer by Saxe et al.
This initialization is recommended for initializing the
hidden weights in a RNN.
References:
From Lasagne and Keras.
Paper: Saxe et al., http://arxiv.org/abs/1312.6120
Parameters
----------
scale: float, optional
The scale of the orthogonal values.
Returns
----------
_initializer: function
Returns the init function.
"""
def _initializer(shape, dtype=tf.float32, partition_info=None):
q = _orthogonal(shape)
return tf.constant(scale * q[:shape[0], :shape[1]], dtype=dtype)
return _initializer
def bn_lstm_identity_initializer(scale=1.0):
"""Special indentity initializer used for batch normalization in LSTMs.
References:
From: http://olavnymoen.com/2016/07/07/rnn-batch-normalization
Parameters
----------
scale: float, optional
The scale of the identity values.
Returns
----------
_initializer: function
Returns the init function.
"""
def _initializer(shape, dtype=tf.float32, partition_info=None):
'''Ugly cause LSTM params calculated in one matrix multiply'''
size = shape[0]
# gate (j) is identity
t = np.zeros(shape)
t[:, size:size * 2] = np.identity(size) * scale # j
t[:, :size] = _orthogonal([size, size]) # i
t[:, size * 2:size * 3] = _orthogonal([size, size]) # f
t[:, size * 3:] = _orthogonal([size, size]) # o
return tf.constant(t, dtype)
return _initializer
def bilinear_initializer():
"""Bilinear initializer, which is recommended for deconvolution when
used for upscaling. This op is called conv2d_transposed() in TensorFlow.
References:
J. Long et al.
From: http://arxiv.org/abs/1411.4038
Returns
----------
_initializer: function
Returns the init function.
"""
def _initializer(shape, dtype=tf.float32, partition_info=None):
width = shape[0]
heigh = shape[0]
f = math.ceil(width/2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([shape[0], shape[1]])
for x in range(width):
for y in range(heigh):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
weights = np.zeros(shape)
for i in range(shape[2]):
weights[:, :, i, i] = bilinear
return tf.constant(weights, dtype)
return _initializer | {"hexsha": "04c69ca7b9953161f587bd3a91c8271ec075a3e3", "size": 4059, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorlight/init.py", "max_stars_repo_name": "bsautermeister/tensorlight", "max_stars_repo_head_hexsha": "3139cf508a4d4d76e30c1591e26933d117883b49", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2016-11-08T10:53:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T18:20:08.000Z", "max_issues_repo_path": "tensorlight/init.py", "max_issues_repo_name": "bsautermeister/tensorlight", "max_issues_repo_head_hexsha": "3139cf508a4d4d76e30c1591e26933d117883b49", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-10-16T07:29:26.000Z", "max_issues_repo_issues_event_max_datetime": "2018-01-02T10:24:28.000Z", "max_forks_repo_path": "tensorlight/init.py", "max_forks_repo_name": "bsautermeister/tensorlight", "max_forks_repo_head_hexsha": "3139cf508a4d4d76e30c1591e26933d117883b49", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-12-07T08:05:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T18:19:35.000Z", "avg_line_length": 33.825, "max_line_length": 79, "alphanum_fraction": 0.5944813994, "include": true, "reason": "import numpy", "num_tokens": 1042} |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
import os, sys
from numba import jit
from etaprogress.progress import ProgressBar
"""Previous version of Kullback Leivier Divergence(KLD).
This module calculate real values of KLD of optical flow with motion platform vector. See
https://en.wikipedia.org/wiki/Kullback–Leibler_divergence.
Examples:
Nope.
Attributes:
optflow_to_hist(bin_magnitude, bin_degree, optical_flows)
Todo:
*using cuda to calculate optical flow, farneback3d see https://pypi.org/project/farneback3d/.
*Motion platform probability integrity check
"""
########################################## GOLBAL VARIABLES ########################################
DIM_OF_IMG = (768, 1024) # Or, we can get dimension of image from file
EXT_JSON = '.json'
EXT_EXCEL = '.xlsx'
SAVE_DIR = '../VideoDataProcessed/'
BIN_MAGNITUDE = [1,6,20,50,100000]
BIN_DEGREE = np.linspace(15, 345, 12)
####################################################################################################
############################################# FUNCTIONS ############################################
@jit()
def bin_selection(polar):
tmp = 0
if polar[1] < 195:
if polar[1] < 105:
if polar[1] < 45:
if polar[1] > 15:
tmp = 1
else:
if polar[1] < 75:
tmp = 2
else:
tmp = 3
else:
if polar[1] < 165:
if polar[1] < 135:
tmp = 4
else:
tmp = 5
else:
tmp = 6
else:
if polar[1] < 315:
if polar[1] < 255:
if polar[1] < 195:
tmp = 7
else:
tmp = 8
else:
if polar[1] < 285:
tmp = 9
else:
tmp = 10
else:
if polar[1] < 345:
tmp = 11
if polar[0] < 6:
if polar[0] > 1:
tmp += 12
else:
if polar[0] < 50:
if polar[0] < 20:
tmp += 24
else:
tmp += 36
else:
tmp += 48
return tmp
def caption_to_optflow(cap):
optical_flows = []
prevgray = cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2GRAY)
frame_max = cap.get(cv2.CAP_PROP_FRAME_COUNT)
frame_num = 1
bar = ProgressBar(frame_max, max_width = 100)
tick = 0
while frame_num < frame_max:
ret, frame = cap.read()
if frame_num % 10 == 1:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
optical_flows.append(
cv2.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0))
if frame_num % 10 == 0:
prevgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
bar.numerator = tick
print(bar, end = '\r')
sys.stdout.flush()
frame_num += 1
tick += 1
return optical_flows
def optflow_to_hist(optical_flows,
bin_magnitude = BIN_MAGNITUDE,
bin_degree = BIN_DEGREE):
"""Make histogam of optical flows. Currently, it calculate all of frame at end of frame. However
THIS will be change when problem of motion platform vector distribution is solved.
Args:
bin_magnitude:
bin_degree:
optical_flows:
Returns:
probability of optical flow(12 * 5): sum-upped probability
TODO:
*parallelization
*do not sum up, take each probability.
"""
#assert polars[0].shape == DIM_OF_IMG and polars[1].shape == DIM_OF_IMG,\
#'Error: dimention of image is not' + str(DIM_OF_IMG)
polars_total = []
for flow in optical_flows:
polars = np.asarray(cv2.cartToPolar(flow[...,0], flow[...,1],None,None,True))
polars_total.append(polars.reshape(2, polars.shape[1] * polars.shape[2]).T)
bar2 = ProgressBar(len(polars_total), max_width = 100)
tick = 0
counts_set = []
counts = np.zeros(60)
for polars in polars_total:
for polar in polars:
counts[bin_selection(polar)] += 1
counts_set.append(counts / counts.sum())
bar2.numerator = tick
tick += 1
print(bar2, end = '\r')
sys.stdout.flush()
return counts_set
def main(args):
video_name = os.path.splitext(os.path.basename(args.video_file))[0]
save_dir = args.save_path
save_json = save_dir + video_name + EXT_JSON
save_excel = save_dir + video_name + EXT_EXCEL
cap = cv2.VideoCapture(args.video_file)
optical_flows = caption_to_optflow(cap)
histogram = optflow_to_hist(optical_flows)
df = pd.DataFrame(histogram)
df.columns = [str((n % 12) * 30) + 'deg//~' + str(BIN_MAGNITUDE[n // 12]) for n in range(60)]
df.to_json(save_json)
df.to_excel(save_excel)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Calculate optical flows of video per 10frame and save it as json and excel')
parser.add_argument('video_file', nargs='?', type=str, help='mp4 file')
parser.add_argument('save_path', nargs='?', type=str, default=SAVE_DIR)
main(parser.parse_args())
| {"hexsha": "600c0d45d4eeabb0031c85f54294b071a93dc8bf", "size": 5436, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test4.py", "max_stars_repo_name": "nearj/mpvr-motionfiltering", "max_stars_repo_head_hexsha": "478304391e031a11bd15a604a272017ce8e48abf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test4.py", "max_issues_repo_name": "nearj/mpvr-motionfiltering", "max_issues_repo_head_hexsha": "478304391e031a11bd15a604a272017ce8e48abf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test4.py", "max_forks_repo_name": "nearj/mpvr-motionfiltering", "max_forks_repo_head_hexsha": "478304391e031a11bd15a604a272017ce8e48abf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-14T01:32:04.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-14T01:32:04.000Z", "avg_line_length": 31.9764705882, "max_line_length": 100, "alphanum_fraction": 0.5358719647, "include": true, "reason": "import numpy,from numba", "num_tokens": 1376} |
@doc raw"""
Entropy <: AbstractImageBinarizationAlgorithm
Entropy()
binarize([T,] img, f::Entropy)
binarize!([out,] img, f::Entropy)
An algorithm for finding the binarization threshold value using
the entropy of the image histogram.
# Output
Return the binarized image as an `Array{Gray{T}}` of size `size(img)`. If
`T` is not specified, it is inferred from `out` and `img`.
# Details
This algorithm uses the entropy of a one-dimensional histogram to produce a threshold
value.
Let ``f_1, f_2, \ldots, f_I`` be the frequencies in the various bins of the
histogram and ``I`` the number of bins. With ``N = \sum_{i=1}^{I}f_i``, let
``p_i = \frac{f_i}{N}`` (``i = 1, \ldots, I``) denote the probability
distribution of gray levels. From this distribution one derives two additional
distributions. The first defined for discrete values ``1`` to ``s`` and the
other, from ``s+1`` to ``I``. These distributions are
```math
A: \frac{p_1}{P_s}, \frac{p_2}{P_s}, \ldots, \frac{p_s}{P_s}
\quad \text{and} \quad
B: \frac{p_{s+1}}{1-P_s}, \ldots, \frac{p_n}{1-P_s}
\quad \text{where} \quad
P_s = \sum_{i=1}^{s}p_i.
```
The entropies associated with each distribution are as follows:
```math
H(A) = \ln(P_s) + \frac{H_s}{P_s}
```
```math
H(B) = \ln(1-P_s) + \frac{H_n-H_s}{1-P_s}
```
```math
\quad \text{where} \quad
H_s = -\sum_{i=1}^{s}p_i\ln{p_i}
\quad \text{and} \quad
H_n = -\sum_{i=1}^{I}p_i\ln{p_i}.
```
Combining these two entropy functions we have
```math
\psi(s) = \ln(P_s(1-P_s)) + \frac{H_s}{P_s} + \frac{H_n-H_s}{1-P_s}.
```
Finding the discrete value ``s`` which maximises the function ``\psi(s)`` produces
the sought-after threshold value (i.e. the bin which determines the threshold).
See Section 4 of [1] for more details on the derivation of the entropy.
# Arguments
The function argument is described in more detail below.
## `img::AbstractArray`
The image that needs to be binarized. The image is automatically converted
to `Gray` in order to construct the requisite graylevel histogram.
# Example
Binarize the "cameraman" image in the `TestImages` package.
```julia
using TestImages, ImageBinarization
img = testimage("cameraman")
img_binary = binarize(img, Entropy())
```
# References
1. J. N. Kapur, P. K. Sahoo, and A. K. C. Wong, “A new method for gray-level picture thresholding using the entropy of the histogram,” *Computer Vision, Graphics, and Image Processing*, vol. 29, no. 1, p. 140, Jan. 1985.[doi:10.1016/s0734-189x(85)90156-2](https://doi.org/10.1016/s0734-189x%2885%2990156-2)
"""
struct Entropy <: AbstractImageBinarizationAlgorithm end
function (f::Entropy)(out::GenericGrayImage, img::GenericGrayImage)
edges, counts = build_histogram(img, 256)
t = find_threshold(HistogramThresholding.Entropy(), counts[1:end], edges)
@simd for i in CartesianIndices(img)
out[i] = img[i] < t ? 0 : 1
end
out
end
(f::Entropy)(out::GenericGrayImage, img::AbstractArray{<:Color3}) =
f(out, of_eltype(Gray, img))
| {"hexsha": "825602a45a5e2a236b3c4f42604bd5adb77810d1", "size": 2992, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/algorithms/entropy.jl", "max_stars_repo_name": "UnofficialJuliaMirror/ImageBinarization.jl-cbc4b850-ae4b-5111-9e64-df94c024a13d", "max_stars_repo_head_hexsha": "e6b3b13279196544e815c821e45e2bef37c18376", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/algorithms/entropy.jl", "max_issues_repo_name": "UnofficialJuliaMirror/ImageBinarization.jl-cbc4b850-ae4b-5111-9e64-df94c024a13d", "max_issues_repo_head_hexsha": "e6b3b13279196544e815c821e45e2bef37c18376", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/algorithms/entropy.jl", "max_forks_repo_name": "UnofficialJuliaMirror/ImageBinarization.jl-cbc4b850-ae4b-5111-9e64-df94c024a13d", "max_forks_repo_head_hexsha": "e6b3b13279196544e815c821e45e2bef37c18376", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-13T20:29:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-13T20:29:41.000Z", "avg_line_length": 30.8453608247, "max_line_length": 306, "alphanum_fraction": 0.692513369, "num_tokens": 973} |
import numpy as np
from multiagent.core import World, Landmark
from multiagent.scenario import BaseScenario
from particle_environments.mager.world import MortalAgent, HazardousWorld
from particle_environments.mager.observation import format_observation
from particle_environments.common import is_collision, distance, delta_pos
from particle_environments.common import DefaultParameters as DP
class Obstacle(Landmark):
def __init__(self):
super().__init__()
self.known = False
class ObstacleWorld(World):
def __init__(self):
super().__init__()
self.obstacles = []
@property
def entities(self):
return self.agents + self.landmarks + self.obstacles
class Scenario(BaseScenario):
num_agents = 10
num_landmarks = 3
num_obstacles = 1
def make_world(self):
world = HazardousWorld()
# observation-based communication
world.dim_c = 0
world.max_communication_distance = DP.max_communication_distance
# add landmarks
world.landmarks = [Landmark() for i in range(self.num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = True
landmark.movable = False
landmark.size = DP.landmark_size
# add obstacles
world.obstacles = [Obstacle() for i in range(self.num_obstacles)]
for i, obstacle in enumerate(world.obstacles):
obstacle.name = 'obstacle %d' % i
obstacle.collide = True
obstacle.size = 0.05
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
# add agents with random properties
world.agents = [MortalAgent() for i in range(self.num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.terminated = False
agent.collide = True
agent.silent = True
agent.size = DP.agent_size
agent.color = np.array([0.35, 0.35, 0.85])
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
landmark.color = np.array([0.25, 0.25, 0.25])
goal = np.random.choice(world.landmarks)
goal.color = np.array([0.15, 0.65, 0.15])
for i, obstacle in enumerate(world.obstacles):
obstacle.color = np.array([0.90, 0.40, 0.40])
# set random initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for landmark in world.landmarks:
landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
for obstacle in world.obstacles:
obstacle.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
obstacle.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
rew = 0
collisions = 0
occupied_landmarks = 0
min_dists = 0
for l in world.landmarks:
dists = [np.linalg.norm(a.state.p_pos - l.state.p_pos) for a in world.agents]
min_dists += min(dists)
rew -= min(dists)
if min(dists) < 0.1:
occupied_landmarks += 1
if agent.collide:
for a in world.agents:
if is_collision(a, agent):
rew -= 1
collisions += 1
return (rew, collisions, min_dists, occupied_landmarks)
def reward(self, agent, world):
# Agents are rewarded based on minimum agent distance to each landmark, penalized for collisions
rew = 0
for l in world.landmarks:
dists = [np.linalg.norm(a.state.p_pos - l.state.p_pos) for a in world.agents]
rew -= min(dists)
for o in world.obstacles:
if o.known:
dists = [np.linalg.norm(a.state.p_pos - o.state.p_pos) for a in world.agents]
rew += min(dists)
if agent.collide:
for a in world.agents:
if is_collision(a, agent):
rew -= 1
return rew
def observation(self, agent, world):
def communications_observed(other_agent):
''' fill in information communicated between agents
'''
comms = delta_pos(other_agent, agent).tolist()
comms += [other_agent.state.c]
# will only work with zero-padding formatting
# TODO: I think non-communication should send None instead of zero, because zero has real meaning
# however this causes a problem with action function
if distance(agent, other_agent) > world.max_communication_distance:
comms = [0] * len(comms)
return comms
landmark_positions = format_observation(observe = lambda landmark: delta_pos(landmark, agent).tolist(),
objects = world.landmarks,
num_observations = len(world.landmarks),
observation_size = world.dim_p)
communications = format_observation(observe = communications_observed,
objects = [a for a in world.agents if (a is not agent and not a.terminated)],
num_observations = self.num_agents,
observation_size = world.dim_p + 1,
sort_key = lambda o: distance(agent, o))
return np.asarray(agent.state.p_pos.tolist() + landmark_positions + communications)
| {"hexsha": "dfebad03b12b1249c6d8085c1387992b76a74cd6", "size": 5919, "ext": "py", "lang": "Python", "max_stars_repo_path": "particle_environments/mager/scenarios/old_ergo_spread.py", "max_stars_repo_name": "rallen10/ergo_particle_gym", "max_stars_repo_head_hexsha": "5bb8073d880ab1da60ee333d892ea8a4720f3396", "max_stars_repo_licenses": ["FSFULLR", "FSFUL"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "particle_environments/mager/scenarios/old_ergo_spread.py", "max_issues_repo_name": "rallen10/ergo_particle_gym", "max_issues_repo_head_hexsha": "5bb8073d880ab1da60ee333d892ea8a4720f3396", "max_issues_repo_licenses": ["FSFULLR", "FSFUL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "particle_environments/mager/scenarios/old_ergo_spread.py", "max_forks_repo_name": "rallen10/ergo_particle_gym", "max_forks_repo_head_hexsha": "5bb8073d880ab1da60ee333d892ea8a4720f3396", "max_forks_repo_licenses": ["FSFULLR", "FSFUL"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-12-08T08:36:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-07T17:35:53.000Z", "avg_line_length": 41.6830985915, "max_line_length": 121, "alphanum_fraction": 0.5860787295, "include": true, "reason": "import numpy", "num_tokens": 1285} |
import numpy as np
import pandas as pd
import geopandas as gpd
from geopandas import GeoDataFrame, GeoSeries
import pysal
# Load initial csv file
routing = pd.read_csv('/var/otp/scripting/output/otp-scripting-newark-parcels.csv')
routing["min_time"] = routing["min_time"].astype(float)
# Split out by mode of transportation
transit_routing = routing[routing["mode"] == "TRANSIT,WALK"]
walk_routing = routing[routing["mode"] == "WALK"]
car_routing = routing[routing["mode"] == "CAR"]
# Get distance and time pivots for each mode
transit_time_pivot = transit_routing.pivot(index="ID", columns="school", values="min_time")
transit_distance_pivot = transit_routing.pivot(index="ID", columns="school", values="route_distance")
walk_time_pivot = walk_routing.pivot(index="ID", columns="school", values="min_time")
walk_distance_pivot = walk_routing.pivot(index="ID", columns="school", values="route_distance")
car_time_pivot = car_routing.pivot(index="ID", columns="school", values="min_time")
car_distance_pivot = car_routing.pivot(index="ID", columns="school", values="route_distance")
# Reset indices so that dataframes work on merge
pivots = [transit_time_pivot, transit_distance_pivot, walk_time_pivot, walk_distance_pivot,
car_time_pivot, car_distance_pivot]
for p in pivots:
p.reset_index(inplace=True)
# Write each pivot to csv
transit_time_pivot.to_csv("output/pivot-csv/transit-time-pivot.csv")
transit_distance_pivot.to_csv("output/pivot-csv/transit-distance-pivot.csv")
walk_time_pivot.to_csv("output/pivot-csv/walk-time-pivot.csv")
walk_distance_pivot.to_csv("output/pivot-csv/walk-distance-pivot.csv")
car_time_pivot.to_csv("output/pivot-csv/car-time-pivot.csv")
car_distance_pivot.to_csv("output/pivot-csv/car-distance-pivot.csv")
# Get shapefile into GeoDataFrame
newark_blocks = GeoDataFrame.from_file("data/newark-residential-parcels/newark-residential-parcels-4326.shp")
newark_blocks["ID"] = newark_blocks["ID"].astype(int)
# Merge into new dataframes
transit_time_merge = pd.merge(newark_blocks, transit_time_pivot, on="ID")
transit_distance_merge = pd.merge(newark_blocks, transit_distance_pivot, on="ID")
walk_time_merge = pd.merge(newark_blocks, walk_time_pivot, on="ID")
walk_distance_merge = pd.merge(newark_blocks, walk_distance_pivot, on="ID")
car_time_merge = pd.merge(newark_blocks, car_time_pivot, on="ID")
car_distance_merge = pd.merge(newark_blocks, car_distance_pivot, on="ID")
merges = [transit_time_merge, transit_distance_merge, walk_time_merge, walk_distance_merge,
car_time_merge, car_distance_merge]
# Set CRS to use for all
crs = {'init': 'epsg:4326', 'no_defs': True}
for m in merges:
# Set each geometry column to GeoSeries
# Documented bug: https://github.com/geopandas/geopandas/issues/247)
m.geometry = m.geometry.astype(gpd.geoseries.GeoSeries)
m.geometry.crs = crs
m["ID"] = m["ID"].astype(str)
# Convert column names to strings (so that fiona doesn't throw error)
m.rename(columns = lambda x: str(x), inplace=True)
# Convert DataFrames back to GeoDataFrames (bug in geopandas)
transit_time_merge = GeoDataFrame(transit_time_merge, crs=crs, geometry=transit_time_merge.geometry)
transit_distance_merge = GeoDataFrame(transit_distance_merge, crs=crs, geometry=transit_distance_merge.geometry)
walk_time_merge = GeoDataFrame(walk_time_merge, crs=crs, geometry=walk_time_merge.geometry)
walk_distance_merge = GeoDataFrame(walk_distance_merge, crs=crs, geometry=walk_distance_merge.geometry)
car_time_merge = GeoDataFrame(car_time_merge, crs=crs, geometry=car_time_merge.geometry)
car_distance_merge = GeoDataFrame(car_distance_merge, crs=crs, geometry=car_distance_merge.geometry)
# Write to shapefiles
transit_time_merge.to_file("output/merged-shp/transit-time.shp")
transit_distance_merge.to_file("output/merged-shp/transit-distance.shp")
walk_time_merge.to_file("output/merged-shp/walk-time.shp")
walk_distance_merge.to_file("output/merged-shp/walk-distance.shp")
car_time_merge.to_file("output/merged-shp/car-time.shp")
car_distance_merge.to_file("output/merged-shp/car-distance.shp")
| {"hexsha": "20d8fe8c4a8f45bdf04bb081b27c64b317383aba", "size": 4095, "ext": "py", "lang": "Python", "max_stars_repo_path": "make-shp.py", "max_stars_repo_name": "pjsier/newark-parcel-routing", "max_stars_repo_head_hexsha": "ade320dfd3525eee3e1668b5c2ffa6b987be7176", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "make-shp.py", "max_issues_repo_name": "pjsier/newark-parcel-routing", "max_issues_repo_head_hexsha": "ade320dfd3525eee3e1668b5c2ffa6b987be7176", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "make-shp.py", "max_forks_repo_name": "pjsier/newark-parcel-routing", "max_forks_repo_head_hexsha": "ade320dfd3525eee3e1668b5c2ffa6b987be7176", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.3373493976, "max_line_length": 112, "alphanum_fraction": 0.7921855922, "include": true, "reason": "import numpy", "num_tokens": 1057} |
from __future__ import print_function
import torch.nn as nn
from torch.autograd import Variable
import torch as t
import torch.cuda as torch
import torch.nn.functional as F
import time
from collections import defaultdict
import random
import math
import sys
import argparse
import numpy as np
# much of the beginning is the same as the text retrieval
# format of files: each line is "word1 word2 ..." aligned line-by-line
criterion = nn.CrossEntropyLoss()
use_cuda = torch.is_available()
atype = torch.LongTensor
class encoder(nn.Module):
def __init__(self, nwords_trg, emb_size, hidden_dim, num_layer, use_cuda):
super(encoder, self).__init__()
self.num_layer = num_layer
self.hidden_dim = hidden_dim
self.use_cuda = use_cuda
self.embed = nn.Embedding(nwords_trg, emb_size)
nn.init.uniform_(self.embed.weight, -0.25, 0.25)
self.lstm = nn.LSTM(input_size=emb_size, hidden_size=hidden_dim, num_layers=num_layer,
batch_first=True)
def forward(self, source, src_length=None, hidden=None):
src_emb = self.embed(source)
src_emb = src_emb.unsqueeze(0)
if hidden is None:
h0 = Variable(t.zeros(self.num_layer, 1, self.hidden_dim).cuda())
c0 = Variable(t.zeros(self.num_layer, 1, self.hidden_dim).cuda())
hidden = (h0, c0)
# if src_length is not None:
# src_emb = torch.nn.utils.rnn.pack_padded_sequence(src_emb, src_length, batch_first=True)
output, enc_h_t = self.lstm(src_emb, hidden)
# if src_length is not None:
# output, _ = torch.nn.utils.rnn.pad_packed_sequence(enc_h, batch_first=True)
return output[:, -1, :]
class decoder(nn.Module):
def __init__(self, vocab_size, embed_dim, hidden_dim):
super(decoder, self).__init__()
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.embed = nn.Embedding(vocab_size, embed_dim)
self.lstm = nn.LSTM(input_size=embed_dim, hidden_size=hidden_dim, batch_first=True)
self.dec2word = nn.Linear(hidden_dim, vocab_size)
def forward(self, trg, hidden):
'''
enc_h : B x S x H
prev_s : B x H
'''
src_emb = self.embed(trg)
src_emb = src_emb.unsqueeze(0).unsqueeze(1)
output, hidden = self.lstm(src_emb, hidden)
output = self.dec2word(output).squeeze(1)
return output, hidden
class Seq2Seq(nn.Module):
def __init__(self, src_nword, trg_nword, num_layer, embed_dim, hidden_dim, max_len, trg_soi, trg_eos, use_cuda):
super(Seq2Seq, self).__init__()
self.hidden_dim = hidden_dim
self.trg_nword = trg_nword
self.max = max_len
self.trg_soi = trg_soi
self.trg_eos = trg_eos
self.encoder = encoder(src_nword, embed_dim, hidden_dim, num_layer, use_cuda)
self.decoder = decoder(trg_nword, embed_dim, hidden_dim)
def forward(self, source, target=None):
total_loss = []
trg_sent = []
if target is not None:
enc_h = self.encoder(source)
enc_h = enc_h.unsqueeze(1)
hidden = (enc_h, F.tanh(enc_h))
for i in range(target.shape[0]):
output, hidden = self.decoder(target[i], hidden)
loss = criterion(output, target[i].unsqueeze(0))
total_loss.append(loss)
else:
enc_h = self.encoder(source)
enc_h = enc_h.unsqueeze(1)
hidden = (enc_h, F.tanh(enc_h))
trg_word = t.tensor(self.trg_soi).type(atype)
i = 0
for i in range(self.max):
output, hidden = self.decoder(trg_word, hidden)
trg_word = torch.argmax(output, dim=1)[0]
trg_sent.append(trg_word.item())
if (trg_word == self.trg_eos):
break
return total_loss, trg_sent
train_src_file = "train.ja"
train_trg_file = "train.en"
dev_src_file = "dev.ja"
dev_trg_file = "dev.en"
test_src_file = "test.ja"
test_trg_file = "test.en"
w2i_src = defaultdict(lambda: len(w2i_src))
w2i_trg = defaultdict(lambda: len(w2i_trg))
def read(fname_src, fname_trg):
"""
Read parallel files where each line lines up
"""
with open(fname_src, "r", encoding="utf-8") as f_src, open(fname_trg, "r", encoding="utf-8") as f_trg:
for line_src, line_trg in zip(f_src, f_trg):
# need to append EOS tags to at least the target sentence
sent_src = [w2i_src[x] for x in line_src.strip().split() + ['</s>']]
sent_trg = [w2i_trg[x] for x in ['<s>'] + line_trg.strip().split() + ['</s>']]
yield (sent_src, sent_trg)
# Read the data
train = list(read(train_src_file, train_trg_file))
unk_src = w2i_src["<unk>"]
eos_src = w2i_src['</s>']
w2i_src = defaultdict(lambda: unk_src, w2i_src)
unk_trg = w2i_trg["<unk>"]
eos_trg = w2i_trg['</s>']
sos_trg = w2i_trg['<s>']
w2i_trg = defaultdict(lambda: unk_trg, w2i_trg)
i2w_trg = {v: k for k, v in w2i_trg.items()}
nwords_src = len(w2i_src)
nwords_trg = len(w2i_trg)
dev = list(read(dev_src_file, dev_trg_file))
test = list(read(test_src_file, test_trg_file))
# Model parameters
EMBED_SIZE = 64
HIDDEN_SIZE = 128
BATCH_SIZE = 16
MAX_SENT_SIZE = 50
num_layer = 1
model = Seq2Seq(nwords_src, nwords_trg, 1, EMBED_SIZE, HIDDEN_SIZE, MAX_SENT_SIZE, sos_trg, eos_trg, use_cuda)
model.cuda()
optimizer = t.optim.Adam(model.parameters())
def calc_loss(sent):
# Transduce all batch elements with an LSTM
src = sent[0]
trg = sent[1]
src_tensor = t.tensor(src).type(atype)
trg_tensor = t.tensor(trg).type(atype)
loss, _ = model(src_tensor, trg_tensor)
loss = t.stack(loss).sum()
return loss
def generate(sent):
src = sent
src_tensor = t.tensor(src).type(atype)
_, trg_sent = model(src_tensor)
for i in range(len(trg_sent)):
trg_sent[i] = i2w_trg[trg_sent[i]]
return trg_sent
for ITER in range(10):
# Perform training
random.shuffle(train)
train_words, train_loss = 0, 0.0
start = time.time()
for sent_id, sent in enumerate(train):
my_loss = calc_loss(sent)
train_loss += my_loss.item()
train_words += len(sent)
optimizer.zero_grad()
my_loss.backward()
optimizer.step()
if (sent_id + 1) % 1000 == 0:
print("--finished %r sentences" % (sent_id + 1))
print("iter %r: train loss/word=%.4f, ppl=%.4f, time=%.2fs" % (
ITER, train_loss / train_words, math.exp(train_loss / train_words), time.time() - start))
# Evaluate on dev set
dev_words, dev_loss = 0, 0.0
start = time.time()
for sent_id, sent in enumerate(dev):
my_loss = calc_loss(sent)
dev_loss += my_loss.item()
dev_words += len(sent)
print("iter %r: dev loss/word=%.4f, ppl=%.4f, time=%.2fs" % (
ITER, dev_loss / dev_words, math.exp(dev_loss / dev_words), time.time() - start))
# this is how you generate, can replace with desired sentenced to generate
sentences = []
for sent_id, sent in enumerate(test):
translated_sent = generate(sent[0])
sentences.append(translated_sent)
for sent in sentences:
print(sent)
| {"hexsha": "8805d353f5b2fa4480b4a39690a6c759930389fb", "size": 7262, "ext": "py", "lang": "Python", "max_stars_repo_path": "08-condlm-pytorch/cuda_version.py", "max_stars_repo_name": "tinySean/nn4nlp-tensorflow", "max_stars_repo_head_hexsha": "17d64427ad3cf276f2d43eac706d14a6145cc3e6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-03-04T10:53:23.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-25T02:31:44.000Z", "max_issues_repo_path": "08-condlm-pytorch/cuda_version.py", "max_issues_repo_name": "tinySean/nn4nlp-tensorflow", "max_issues_repo_head_hexsha": "17d64427ad3cf276f2d43eac706d14a6145cc3e6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "08-condlm-pytorch/cuda_version.py", "max_forks_repo_name": "tinySean/nn4nlp-tensorflow", "max_forks_repo_head_hexsha": "17d64427ad3cf276f2d43eac706d14a6145cc3e6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-22T10:33:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-22T10:33:02.000Z", "avg_line_length": 32.7117117117, "max_line_length": 116, "alphanum_fraction": 0.6385293308, "include": true, "reason": "import numpy", "num_tokens": 1967} |
/*=========================================================================
Program: Visualization Toolkit
Module: TestBoostAlgorithms.cxx
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
#include "vtkActor.h"
#include "vtkBoostBetweennessClustering.h"
#include "vtkDataSetAttributes.h"
#include "vtkIntArray.h"
#include "vtkMutableUndirectedGraph.h"
#include "vtkPoints.h"
#include "vtkSmartPointer.h"
#include "vtkVertexListIterator.h"
#include <boost/version.hpp>
#include <map>
int TestBoostBetweennessClustering(int vtkNotUsed(argc),
char* vtkNotUsed(argv)[])
{
// Create the test graph
vtkSmartPointer<vtkMutableUndirectedGraph> g
(vtkSmartPointer<vtkMutableUndirectedGraph>::New());
vtkSmartPointer<vtkIntArray> weights (
vtkSmartPointer<vtkIntArray>::New());
weights->SetName("weights");
g->GetEdgeData()->AddArray(weights);
vtkSmartPointer<vtkPoints> pts (vtkSmartPointer<vtkPoints>::New());
g->AddVertex();
pts->InsertNextPoint(1, 1, 0);
g->AddVertex();
pts->InsertNextPoint(1, 0, 0);
g->AddVertex();
pts->InsertNextPoint(1, -1, 0);
g->AddVertex();
pts->InsertNextPoint(2, 0, 0);
g->AddVertex();
pts->InsertNextPoint(3, 0, 0);
g->AddVertex();
pts->InsertNextPoint(2.5, 1, 0);
g->AddVertex();
pts->InsertNextPoint(4, 1, 0);
g->AddVertex();
pts->InsertNextPoint(4, 0, 0);
g->AddVertex();
pts->InsertNextPoint(4, -1, 0);
g->SetPoints(pts);
vtkEdgeType e = g->AddEdge(0, 3);
weights->InsertTuple1(e.Id, 10);
e = g->AddEdge(1, 3);
weights->InsertTuple1(e.Id, 10);
e = g->AddEdge(2, 3);
weights->InsertTuple1(e.Id, 10);
e = g->AddEdge(3, 4);
weights->InsertTuple1(e.Id, 1);
e = g->AddEdge(3, 5);
weights->InsertTuple1(e.Id, 10);
e = g->AddEdge(5, 4);
weights->InsertTuple1(e.Id, 10);
e = g->AddEdge(6, 4);
weights->InsertTuple1(e.Id, 10);
e = g->AddEdge(7, 4);
weights->InsertTuple1(e.Id, 10);
e = g->AddEdge(8, 4);
weights->InsertTuple1(e.Id, 10);
// Test centrality
vtkSmartPointer<vtkBoostBetweennessClustering> bbc (
vtkSmartPointer<vtkBoostBetweennessClustering>::New());
bbc->SetInputData(g);
bbc->SetThreshold(4);
bbc->SetEdgeWeightArrayName("weights");
bbc->SetEdgeCentralityArrayName("bbc_centrality");
bbc->UseEdgeWeightArrayOn();
bbc->Update();
vtkGraph* og = bbc->GetOutput();
if(!og)
{
return 1;
}
vtkIntArray* compArray = vtkIntArray::SafeDownCast(og->GetVertexData()->
GetArray("component"));
if(!compArray)
{
return 1;
}
// Now lets create the correct mapping so that we can compare the results
// against it.
std::map<int, int> expResults;
expResults[0] = 0;
expResults[1] = 0;
expResults[2] = 0;
expResults[3] = 0;
expResults[4] = 1;
expResults[5] = 1;
expResults[6] = 1;
expResults[7] = 1;
expResults[8] = 2;
vtkSmartPointer<vtkVertexListIterator> vlItr (
vtkSmartPointer<vtkVertexListIterator>::New());
vlItr->SetGraph(og);
while(vlItr->HasNext())
{
vtkIdType id = vlItr->Next();
if(expResults[id] != compArray->GetVariantValue(id).ToInt())
{
return 1;
}
}
return 0;
}
| {"hexsha": "1066b5b80be81a45e45a832b7d1399ae42acfee0", "size": 3637, "ext": "cxx", "lang": "C++", "max_stars_repo_path": "Infovis/BoostGraphAlgorithms/Testing/Cxx/TestBoostBetweennessClustering.cxx", "max_stars_repo_name": "jasper-yeh/VtkDotNet", "max_stars_repo_head_hexsha": "84b56f781cb511694e4380cebfb245bbefe2560b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2020-06-20T23:31:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-11T02:17:16.000Z", "max_issues_repo_path": "Infovis/BoostGraphAlgorithms/Testing/Cxx/TestBoostBetweennessClustering.cxx", "max_issues_repo_name": "jasper-yeh/VtkDotNet", "max_issues_repo_head_hexsha": "84b56f781cb511694e4380cebfb245bbefe2560b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2020-12-01T23:21:02.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-02T23:44:43.000Z", "max_forks_repo_path": "Infovis/BoostGraphAlgorithms/Testing/Cxx/TestBoostBetweennessClustering.cxx", "max_forks_repo_name": "jasper-yeh/VtkDotNet", "max_forks_repo_head_hexsha": "84b56f781cb511694e4380cebfb245bbefe2560b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2015-10-09T04:12:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-15T16:57:11.000Z", "avg_line_length": 23.9276315789, "max_line_length": 76, "alphanum_fraction": 0.627440198, "num_tokens": 1073} |
#include "net/rpc/server.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <optional>
#include <system_error>
#include <utility>
#include <boost/icl/interval_set.hpp>
#include <boost/smart_ptr/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include "base/logging.h"
#include "net/rpc/flatbuffers-handler.h"
#include "net/rpc/flatbuffers/list_generated.h"
#include "net/rpc/wire-structs.h"
#include "security/keys/zero.key.h"
namespace net {
namespace rpc {
class Server::PingHandler : public Handler {
public:
void handle(
std::vector<uint8_t> request,
const security::Key &key,
std::function<void(std::vector<uint8_t>)> callback) final;
};
class Server::ListHandler : public FlatbuffersHandler<
ListHandler, fbs::ListRequest, fbs::ListResponse> {
public:
ListHandler(const Server &server) : server_(server) {}
void handle(
const fbs::ListRequestT &,
const security::Key &,
std::function<void(fbs::ListResponseT)> callback);
private:
const Server &server_;
};
class Server::Operation : public boost::intrusive_ref_counter<
Operation, boost::thread_unsafe_counter> {
public:
Operation(
Server &server,
const udp::endpoint &client_endpoint,
uint64_t key_fingerprint,
uint64_t request_id);
Operation(const Operation &) = delete;
Operation &operator=(const Operation &) = delete;
~Operation();
void unpack();
void set_client_endpoint(const udp::endpoint &client_endpoint) {
client_endpoint_ = client_endpoint;
}
private:
void handle();
void pack();
void respond();
Server &server_;
steady_timer keep_alive_timer_;
udp::endpoint client_endpoint_;
uint64_t key_fingerprint_;
uint64_t request_id_;
std::optional<security::Key> key_;
std::vector<uint8_t> request_;
boost::icl::interval_set<size_t> request_intervals_;
size_t request_size_ = std::numeric_limits<size_t>::max();
std::vector<uint8_t> response_;
std::vector<std::vector<uint8_t>> response_buffers_;
};
Server::Server(
const any_io_executor &executor,
const udp::endpoint &endpoint,
const Options &options)
: executor_(executor),
options_(options),
socket_(executor_, endpoint),
receive_buffer_(
std::make_unique<uint8_t[]>(options_.receive_buffer_size)) {
handle("ping", std::make_unique<PingHandler>());
handle("list", std::make_unique<ListHandler>(*this));
add_key(security::keys::zero);
}
void Server::handle(std::string_view name, std::unique_ptr<Handler> handler) {
handlers_.emplace(name, std::move(handler));
}
void Server::add_key(const security::Key &key) {
keystore_.add(key);
}
void Server::receive() {
socket_.async_receive_from(
buffer(receive_buffer_.get(), options_.receive_buffer_size),
receive_endpoint_,
[this](std::error_code ec, size_t size) {
if (ec) {
LOG(error) << "async_receive_from failed: " << ec;
if (ec == std::errc::operation_canceled) {
return;
}
receive();
return;
}
receive_size_ = size;
dispatch();
});
}
void Server::dispatch() {
if (receive_size_ < sizeof(wire::request::Header)) {
receive();
return;
}
const auto *request_header = reinterpret_cast<wire::request::Header *>(
&receive_buffer_[0]);
auto iter = operations_.find(
{request_header->key_fingerprint, request_header->request_id});
if (iter != operations_.end()) {
iter->second->set_client_endpoint(receive_endpoint_);
iter->second->unpack();
} else {
boost::intrusive_ptr<Operation>(new Operation(
*this,
receive_endpoint_,
request_header->key_fingerprint,
request_header->request_id))->unpack();
}
receive();
}
void Server::PingHandler::handle(
std::vector<uint8_t> request,
const security::Key &,
std::function<void(std::vector<uint8_t>)> callback) {
callback(std::move(request));
}
void Server::ListHandler::handle(
const fbs::ListRequestT &,
const security::Key &,
std::function<void(fbs::ListResponseT)> callback) {
fbs::ListResponseT response;
for (const auto &pair : server_.handlers_) {
response.methods.push_back(pair.first);
}
std::sort(response.methods.begin(), response.methods.end());
callback(std::move(response));
}
Server::Operation::Operation(
Server &server,
const udp::endpoint &client_endpoint,
uint64_t key_fingerprint,
uint64_t request_id)
: server_(server),
keep_alive_timer_(server_.executor_),
client_endpoint_(client_endpoint),
key_fingerprint_(key_fingerprint),
request_id_(request_id) {
server_.operations_.emplace(
OperationKey{key_fingerprint_, request_id_}, this);
}
Server::Operation::~Operation() {
server_.operations_.erase({key_fingerprint_, request_id_});
}
void Server::Operation::unpack() {
const auto *request_header = reinterpret_cast<wire::request::Header *>(
&server_.receive_buffer_[0]);
size_t offset = request_header->offset;
size_t request_body_size =
server_.receive_size_ - sizeof(wire::request::Header);
if (request_.size() < offset + request_body_size) {
request_.resize(offset + request_body_size);
}
security::NonceArray nonce{};
*reinterpret_cast<uint64_t *>(&nonce[0]) = request_header->request_id;
*reinterpret_cast<uint16_t *>(&nonce[8]) = request_header->offset;
size_t decrypt_result;
if (key_) {
decrypt_result = key_->decrypt(
{
&server_.receive_buffer_[sizeof(wire::request::Header)],
request_body_size,
},
&nonce[0],
&request_[offset]);
if (decrypt_result == std::numeric_limits<size_t>::max()) {
return;
}
} else {
std::vector<security::Key> keys;
server_.keystore_.find(request_header->key_fingerprint, keys);
decrypt_result = std::numeric_limits<size_t>::max();
for (const security::Key &key : keys) {
decrypt_result = key.decrypt(
{
&server_.receive_buffer_[sizeof(wire::request::Header)],
request_body_size,
},
&nonce[0],
&request_[offset]);
if (decrypt_result != std::numeric_limits<size_t>::max()) {
key_.emplace(key);
break;
}
}
if (decrypt_result == std::numeric_limits<size_t>::max()) {
return;
}
keep_alive_timer_.expires_after(std::chrono::minutes(1));
keep_alive_timer_.async_wait(
[_ = boost::intrusive_ptr<Operation>(this)](std::error_code) {});
}
if (contains(request_intervals_,
boost::icl::interval<size_t>::right_open(0, request_size_))) {
if (!(request_header->flags & wire::request::flags::partial)) {
respond();
}
return;
}
request_intervals_ += boost::icl::interval<size_t>::right_open(
offset, offset + decrypt_result);
if (!(request_header->flags & wire::request::flags::partial)) {
request_size_ = offset + decrypt_result;
}
if (contains(request_intervals_,
boost::icl::interval<size_t>::right_open(0, request_size_))) {
handle();
}
}
void Server::Operation::handle() {
const uint8_t *request_body_end = &request_[request_size_];
const uint8_t *request_end = request_body_end;
while (request_end != &request_[0] && request_end[-1]) {
--request_end;
}
std::string_view method(
reinterpret_cast<const char *>(request_end),
request_body_end - request_end);
if (request_end != &request_[0]) {
--request_end;
}
auto handler_iter = server_.handlers_.find(method);
if (handler_iter == server_.handlers_.end()) {
// TODO(iceboy): Invalid request?
return;
}
request_.resize(request_end - &request_[0]);
handler_iter->second->handle(
std::move(request_),
*key_,
[this, _ = boost::intrusive_ptr<Operation>(this)](
std::vector<uint8_t> response) {
response_ = std::move(response);
pack();
});
}
void Server::Operation::pack() {
security::NonceArray nonce{};
*reinterpret_cast<uint64_t *>(&nonce[0]) = request_id_;
nonce[11] = 1;
size_t offset = 0;
do {
size_t size = std::min(response_.size() - offset,
server_.options_.send_fragment_size);
response_buffers_.emplace_back(sizeof(wire::response::Header) + size +
security::Key::encrypt_overhead);
auto *response_header = reinterpret_cast<wire::response::Header *>(
&response_buffers_.back()[0]);
response_header->key_fingerprint = key_fingerprint_;
response_header->request_id = request_id_;
response_header->offset = static_cast<uint16_t>(offset);
response_header->flags = 0;
if (offset + size < response_.size()) {
response_header->flags |= wire::response::flags::partial;
}
*reinterpret_cast<uint16_t *>(&nonce[8]) =
static_cast<uint16_t>(offset);
size_t encrypt_result = key_->encrypt(
{&response_[offset], size},
&nonce[0],
&response_buffers_.back()[sizeof(wire::response::Header)]);
response_buffers_.back().resize(
sizeof(wire::response::Header) + encrypt_result);
offset += size;
} while (offset < response_.size());
respond();
}
void Server::Operation::respond() {
for (const std::vector<uint8_t> &response_buffer : response_buffers_) {
server_.socket_.async_send_to(
buffer(response_buffer),
client_endpoint_,
[_ = boost::intrusive_ptr<Operation>(this)](
std::error_code, size_t) {});
}
}
} // namespace rpc
} // namespace net
| {"hexsha": "c9b6cf430815cdc4a150b1a72e825e4443f3de65", "size": 10300, "ext": "cc", "lang": "C++", "max_stars_repo_path": "net/rpc/server.cc", "max_stars_repo_name": "iceboy233/trunk", "max_stars_repo_head_hexsha": "83024a83f07a587e00a3f2e1906361de521d8f12", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2021-12-23T06:36:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T10:49:01.000Z", "max_issues_repo_path": "net/rpc/server.cc", "max_issues_repo_name": "iceboy233/trunk", "max_issues_repo_head_hexsha": "83024a83f07a587e00a3f2e1906361de521d8f12", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "net/rpc/server.cc", "max_forks_repo_name": "iceboy233/trunk", "max_forks_repo_head_hexsha": "83024a83f07a587e00a3f2e1906361de521d8f12", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4921135647, "max_line_length": 79, "alphanum_fraction": 0.6213592233, "num_tokens": 2335} |
'''
GloVe embedding functions
Created June, 2017
Author: [email protected]
'''
import numpy as np
import tqdm
from .tokenizer import normalize_text
from .utils import count_lines
def load_emb_vocab(path, dim=300, fast_vec_format=False):
vocab = set()
num_lines = count_lines(path)
with open(path, encoding='utf-8') as f:
line_count = 0
for line in tqdm.tqdm(f, total=num_lines):
if fast_vec_format and line_count == 0:
continue
line_count += 1
elems = line.split()
token = normalize_text(' '.join(elems[0:-dim]))
vocab.add(token)
return vocab
def build_embedding(path, vocab, dim=300, fast_vec_format=False):
"""Support fasttext format"""
vocab_size = len(vocab)
emb = np.zeros((vocab_size, dim))
emb[0] = 0
num_lines = count_lines(path)
with open(path, encoding='utf-8') as f:
line_count = 0
for line in tqdm.tqdm(f, total=num_lines):
if fast_vec_format and line_count == 0:
items = [int(i) for i in line.split()]
assert len(items) == 2
assert items[1] == dim
continue
line_count += 1
elems = line.split()
token = normalize_text(' '.join(elems[0:-dim]))
if token in vocab:
emb[vocab[token]] = [float(v) for v in elems[-dim:]]
return emb
| {"hexsha": "58c6275672ae3d9b6fc16adae28d001458877237", "size": 1439, "ext": "py", "lang": "Python", "max_stars_repo_path": "my_utils/word2vec_utils.py", "max_stars_repo_name": "ashishbaghudana/san_mrc", "max_stars_repo_head_hexsha": "03ed7d94c735f1fe2854bb9c208385b5fde44905", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-08-23T13:33:37.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-23T13:33:37.000Z", "max_issues_repo_path": "my_utils/word2vec_utils.py", "max_issues_repo_name": "ashishbaghudana/san_mrc", "max_issues_repo_head_hexsha": "03ed7d94c735f1fe2854bb9c208385b5fde44905", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "my_utils/word2vec_utils.py", "max_forks_repo_name": "ashishbaghudana/san_mrc", "max_forks_repo_head_hexsha": "03ed7d94c735f1fe2854bb9c208385b5fde44905", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3673469388, "max_line_length": 68, "alphanum_fraction": 0.5802640723, "include": true, "reason": "import numpy", "num_tokens": 359} |
# =============================================================================
# Authors: PAR Government
# Organization: DARPA
#
# Copyright (c) 2016 PAR Government
# All rights reserved.
# ==============================================================================
from maskgen.mask_rules import Probe, VideoSegment
import os
import shutil
from maskgen.tool_set import openImage
def serialize_segment(segment, copyFileDirectory=None):
"""
:param segment:
:return:
@type segment: VideoSegment
"""
if copyFileDirectory is not None:
shutil.copy(segment.filename,
os.path.join(copyFileDirectory, os.path.basename(segment.filename)))
return {"startframe": segment.startframe,
"starttime": segment.starttime,
"endframe": segment.endframe,
"endtime": segment.endtime,
"frames": segment.frames,
"rate": segment.rate,
"type": segment.media_type,
"error":segment.error}
def serialize_probe(probe, copyFileDirectory=None):
"""
:param probe:
:return:
@type probe: Probe
"""
item = {}
item['targetBaseNodeId'] = probe.targetBaseNodeId
item['edgeId'] = probe.edgeId
item['finalNodeId'] = probe.finalNodeId
item['donorBaseNodeId'] = probe.donorBaseNodeId
if probe.targetMaskFileName is not None:
item['targetMaskFileName'] = os.path.basename(probe.targetMaskFileName)
if copyFileDirectory is not None:
shutil.copy(probe.targetMaskFileName,
os.path.join(copyFileDirectory, os.path.basename(probe.targetMaskFileName)))
if probe.donorMaskFileName is not None:
if copyFileDirectory is not None:
shutil.copy(probe.donorMaskFileName,
os.path.join(copyFileDirectory, os.path.basename(probe.donorMaskFileName)))
item['donorMaskFileName'] = os.path.basename(probe.donorMaskFileName)
targetsegment = []
donorsegment = []
if probe.targetVideoSegments is not None:
for segment in probe.targetVideoSegments:
targetsegment.append(serialize_segment(segment))
if probe.donorVideoSegments is not None:
for segment in probe.donorVideoSegments:
donorsegment.append(serialize_segment(segment))
item['targetsegments'] = targetsegment
item['donorsegments'] = donorsegment
return item
def deserialize_segment(segmentItem, fileDirectory='.'):
return VideoSegment(segmentItem["rate"],
segmentItem["starttime"],
segmentItem["startframe"],
segmentItem["endtime"],
segmentItem["endframe"],
segmentItem["frames"],
os.path.join(fileDirectory, segmentItem["filename"]) if "filename" in segmentItem else None,
segmentItem["type"],
0)
def deserialize_probe(probeItem, fileDirectory='.'):
"""
:param probeItem: dict[str,str]
:return:
@rtype Probe
"""
from maskgen.tool_set import getValue
def deserializeSegments(segments, fileDirectory='.'):
return [deserialize_segment(item, fileDirectory=fileDirectory) for item in segments]
def resolveFile(item, key, fileDirectory):
if key in item:
return os.path.join(fileDirectory, item[key]) if fileDirectory is not None else item[key]
return None
return Probe(probeItem['edgeId'],
probeItem['finalNodeId'],
probeItem['targetBaseNodeId'],
probeItem['donorBaseNodeId'],
donorMaskFileName=resolveFile(probeItem, 'donorMaskFileName', fileDirectory),
targetMaskFileName=resolveFile(probeItem, 'targetMaskFileName', fileDirectory),
targetVideoSegments=deserializeSegments(getValue(probeItem,'targetsegments',[]), fileDirectory=fileDirectory),
donorVideoSegments=deserializeSegments(getValue(probeItem,'donorsegments',[]), fileDirectory=fileDirectory)
)
def compare_mask_images(got, expected):
"""
:param got:
:param expected:
:return:
@type got: ImageWrapper
@type expected: ImageWrapper
"""
import numpy as np
if got is not None and expected is not None:
diff = abs(got.image_array.astype('float') - expected.image_array.astype('float'))
diffsize = np.sum(diff > 0)
masksize = np.sum(expected.image_array > 0)
if diffsize / masksize <= 0.05:
return True
return False
def compare_images(file1, file2):
if file1 is None and file2 is None:
return True
if file1 is not None and file2 is not None:
return compare_mask_images(openImage(file1), openImage(file2))
return False
def compare_python_objects(obj1, obj2, keys_func={}):
bad_keys = []
for key in keys_func:
v1 = getattr(obj1, "key")
v2 = getattr(obj2, "key")
if not keys_func[key](v1, v2):
bad_keys.append(key)
return bad_keys
def match_video_segments(expected, actual):
"""
:param expected:
:param actual:
:return:
@type expected: list of VideoSegment
@type actual:list of VideoSegment
"""
matched = {}
errors = []
for expected_pos in range(len(expected)):
expected_segment = expected[expected_pos]
for act_pos in range(len(actual)):
if act_pos in matched:
continue
actual_segment = actual[act_pos]
if actual_segment.startframe == expected_segment.startframe and \
actual_segment.media_type == expected_segment.media_type:
matched[act_pos] = expected_pos
ok = actual_segment.endframe == expected_segment.endframe and \
abs(actual_segment.starttime - expected_segment.starttime) < expected_segment.rate and \
abs(actual_segment.endtime - expected_segment.endtime) < expected_segment.rate and \
abs(actual_segment.rate - expected_segment.rate) < 0.01
if not ok:
errors.append(
'Got mismatched value in actual video segment {} vs. expected segment {}'.format(act_pos,
expected_segment))
for pos in range(len(actual)):
if pos not in matched:
errors.append('Unexpected value in video segement {}'.format(pos))
for pos in range(len(expected)):
if len([match_pos for match_pos in matched.values() if pos == match_pos]) == 0:
errors.append('Unmatched value in video segement {}'.format(pos))
return errors
def compare_video_segments(segments1, segments2):
return len(match_video_segments(segments1, segments2)) == 0
def compare_probes(probe1, probe2):
return compare_python_objects(probe1, probe2, keys_func={
'donorBaseNodeId': lambda x, y: x == y,
'edgeId': lambda x, y: x == y,
'targetBaseNodeId': lambda x, y: x == y,
'finalNodeId': lambda x, y: x == y,
'targetChangeSizeInPixels': lambda x, y: x == y,
'finalImageFileName': lambda x, y: x == y,
'donorMaskFileName': lambda x, y: compare_images(x, y),
'targetMaskFileName': lambda x, y: compare_images(x, y),
'donorVideoSegments': lambda x, y: compare_video_segments(x, y),
'targetVideoSegments': lambda x, y: compare_video_segments(x, 7)
})
| {"hexsha": "3f728bf7aa2ba6be0c8627b7baed851ceae854eb", "size": 7598, "ext": "py", "lang": "Python", "max_stars_repo_path": "maskgen/serialization/probes.py", "max_stars_repo_name": "j-h-m/Media-Journaling-Tool", "max_stars_repo_head_hexsha": "4ab6961e2768dc002c9bbad182f83188631f01bd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "maskgen/serialization/probes.py", "max_issues_repo_name": "j-h-m/Media-Journaling-Tool", "max_issues_repo_head_hexsha": "4ab6961e2768dc002c9bbad182f83188631f01bd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "maskgen/serialization/probes.py", "max_forks_repo_name": "j-h-m/Media-Journaling-Tool", "max_forks_repo_head_hexsha": "4ab6961e2768dc002c9bbad182f83188631f01bd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7653061224, "max_line_length": 127, "alphanum_fraction": 0.6137141353, "include": true, "reason": "import numpy", "num_tokens": 1601} |
"""
Script to create grid(s), given input args.
"""
# Authors: Gianni Barlacchi <[email protected]>
import argparse
import sys
import logging
import pandas as pd
import gensim
import pkg_resources
from geol.geol_logger.geol_logger import logger
from geol.utils import utils
import re
import os
import numpy as np
def main(argv):
parser = argparse.ArgumentParser('Build your own grid.')
parser.add_argument('-o', '--outputfolder',
help='Output folder where to save the matrix.',
action='store',
dest='outputfolder',
required=True,
type=str)
parser.add_argument('-i', '--input',
help='Input file with point-of-interests. NOTE: in the case of strategy=nearest|alphabetically, the input file must contains the column cellID.',
action='store',
dest='inputfile',
required=True,
type=str)
parser.add_argument('-a', '--area',
action='store',
dest='area',
help='Area name',
default=None,
type=str)
parser.add_argument('-s', '--size',
action='store',
dest='size',
help='Word2Vec words size. Used when employing Google News model.',
default=None,
type=str)
parser.add_argument('-v', '--verbose',
help='Level of output verbosity.',
action='store',
dest='verbosity',
default=0,
type=int,
nargs="?")
args = parser.parse_args()
if(args.verbosity == 1):
logging.basicConfig(
format='%(levelname)s: %(message)s', level=logging.INFO)
elif(args.verbosity == 2):
logging.basicConfig(
format='%(levelname)s: %(message)s', level=logging.DEBUG)
logger.info("Loading w2v model.")
model = None
ext = tuple([".biz", ".bin"])
if(args.inputfile.endswith(ext)):
model = gensim.models.KeyedVectors.load_word2vec_format(args.inputfile, binary=True)
else:
model = gensim.models.Word2Vec.load(args.inputfile)
tree = pd.read_csv(pkg_resources.resource_filename(
'geol', '/resources/category_tree.csv'), encoding='iso-8859-1')
words = tree['level1_name'].dropna().drop_duplicates().tolist() + \
tree['level2_name'].dropna().drop_duplicates().tolist() + \
tree['level3_name'].dropna().drop_duplicates().tolist() + \
tree['level4_name'].dropna().drop_duplicates().tolist()
m = re.search('_s([0-9]+)_', args.inputfile)
if args.size:
size = args.size
else:
if m:
size = m.group(1)
m = re.search('.+/(.+).model', args.inputfile)
if m:
model_details = m.group(1)
else:
model_details = 'gnews'
outputfile = os.path.abspath(os.path.join(
args.outputfolder, "matrix_" + args.area + "_" + model_details + ".txt"))
f = open(outputfile, 'w', encoding='utf-8')
for word in words:
word = utils.normalize_word(word)
w = word.split(' ')
v = [0] * int(size)
if len(w) > 1:
tmp_w2v = []
for e in w:
if e in model:
tmp_w2v.append(model[e])
if len(tmp_w2v) > 0:
v = np.mean(tmp_w2v, axis=0)
elif word in model:
v = model[word]
v = map(str, v)
s = ','.join(map(str, v))
f.write(word.replace(" ", "_") + "::n" + "\t1.0\t0\t" + s + "\n")
f.close()
if __name__ == "__main__":
main(sys.argv[1:])
| {"hexsha": "25965e76b296c05f9e2807f144e0ac0f709f2a6e", "size": 3914, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/SPTK_matrix.py", "max_stars_repo_name": "PyGeoL/GeoL", "max_stars_repo_head_hexsha": "67a5bd2f63091e19041094c14d419055fa5ce6f0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2018-03-09T16:44:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-07T11:33:30.000Z", "max_issues_repo_path": "scripts/SPTK_matrix.py", "max_issues_repo_name": "PyGeoL/GeoL", "max_issues_repo_head_hexsha": "67a5bd2f63091e19041094c14d419055fa5ce6f0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-03-24T15:34:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-01T21:54:33.000Z", "max_forks_repo_path": "scripts/SPTK_matrix.py", "max_forks_repo_name": "PyGeoL/GeoL", "max_forks_repo_head_hexsha": "67a5bd2f63091e19041094c14d419055fa5ce6f0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-13T14:30:55.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-13T14:30:55.000Z", "avg_line_length": 28.9925925926, "max_line_length": 169, "alphanum_fraction": 0.5143076137, "include": true, "reason": "import numpy", "num_tokens": 856} |
[STATEMENT]
lemma map_ide_simp [simp]:
assumes "A.ide a"
shows "map a = B.inv (\<tau> a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. local.map a = B.inv (\<tau> a)
[PROOF STEP]
using assms map_def
[PROOF STATE]
proof (prove)
using this:
A.ide a
local.map = \<tau>'.map
goal (1 subgoal):
1. local.map a = B.inv (\<tau> a)
[PROOF STEP]
by fastforce | {"llama_tokens": 159, "file": "Category3_NaturalTransformation", "length": 2} |
import ctypes
import numpy
import six
import cupy
from cupy import cuda
def prod(args, init=1):
for arg in args:
init *= arg
return init
def get_reduced_dims(shape, strides, itemsize):
if not shape:
return (), ()
elif 0 in shape:
return (0,), (itemsize,)
reduced_shape = [shape[0]]
reduced_strides = [strides[0]]
for i in six.moves.range(1, len(shape)):
if strides[i - 1] == shape[i] * strides[i] or \
reduced_shape[-1] == 1:
reduced_shape[-1] *= shape[i]
reduced_strides[-1] = strides[i]
else:
reduced_shape.append(shape[i])
reduced_strides.append(strides[i])
return tuple(reduced_shape), tuple(reduced_strides)
def get_reduced_dims_from_array(a):
return get_reduced_dims(a.shape, a.strides, a.itemsize)
def get_strides_for_nocopy_reshape(array, new_shape):
shape, strides = map(list, get_reduced_dims_from_array(array))
new_strides = []
dim = 0
ndim = len(shape)
if len(array.shape) == 0:
last_stride = array.itemsize
else:
last_stride = array.strides[0] * array.shape[0]
for size in new_shape:
if size <= 1:
new_strides.append(last_stride)
continue
if dim >= ndim or shape[dim] % size != 0:
return None
shape[dim] //= size
last_stride = shape[dim] * strides[dim]
new_strides.append(last_stride)
if shape[dim] == 1:
dim = dim + 1
return tuple(new_strides)
def get_contiguous_strides(shape, itemsize):
strides = [itemsize for _ in shape]
for i in six.moves.range(len(strides) - 1, 0, -1):
strides[i - 1] = strides[i] * max(1, shape[i])
return tuple(strides)
def get_ndarray_ptr(a_cpu):
if a_cpu.dtype.type == numpy.bool_:
# Boolean array cannot be directly converted to ctypes
a_cpu = a_cpu.view(dtype=numpy.uint8)
elif a_cpu.dtype.type == numpy.float16:
# Float16 array cannot be directly converted to ctypes
a_cpu = a_cpu.view(dtype=numpy.uint16)
if a_cpu.shape:
return ctypes.cast(numpy.ctypeslib.as_ctypes(a_cpu), ctypes.c_void_p)
else:
return ctypes.cast(ctypes.pointer(numpy.ctypeslib.as_ctypes(a_cpu)),
ctypes.c_void_p)
def complete_slice(slc, dim):
step = 1 if slc.step is None else slc.step
if step == 0:
raise ValueError('Slice step must be nonzero.')
elif step > 0:
start = 0 if slc.start is None else max(0, min(dim, slc.start))
stop = dim if slc.stop is None else max(start, min(dim, slc.stop))
else:
start = dim - 1 if slc.start is None else max(0, min(dim, slc.start))
stop = -1 if slc.stop is None else max(0, min(start, slc.stop))
return slice(start, stop, step)
def get_c_contiguity(shape, strides, itemsize):
if 0 in shape:
return True
_, strides = get_reduced_dims(shape, strides, itemsize)
return len(strides) == 0 or (len(strides) == 1 and strides[0] == itemsize)
def infer_unknown_dimension(shape, size):
if sum(dim < 0 for dim in shape) > 1:
raise ValueError('can only specify only one unknown dimension')
shape = tuple(dim if dim >= 0 else -1 for dim in shape)
p = prod(shape)
if p < 0:
return tuple(dim if dim >= 0 else size // -p for dim in shape)
else:
return shape
def check_args_device(args):
dev = cuda.Device()
for arg in args:
if isinstance(arg, cupy.ndarray):
arg_dev = arg.data.device
if arg_dev != dev:
raise ValueError('Array device must be same as the current '
'device: array device = %d while current = %d'
% (arg_dev.id, dev.id))
| {"hexsha": "2decec416a21e4aa4a601450d14dceaf3ce9c29a", "size": 3834, "ext": "py", "lang": "Python", "max_stars_repo_path": "cupy/internal.py", "max_stars_repo_name": "umitanuki/chainer", "max_stars_repo_head_hexsha": "225c56b233e684ff4855451d2af4c2fb66915f21", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cupy/internal.py", "max_issues_repo_name": "umitanuki/chainer", "max_issues_repo_head_hexsha": "225c56b233e684ff4855451d2af4c2fb66915f21", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cupy/internal.py", "max_forks_repo_name": "umitanuki/chainer", "max_forks_repo_head_hexsha": "225c56b233e684ff4855451d2af4c2fb66915f21", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-11-18T00:36:51.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-18T00:36:51.000Z", "avg_line_length": 30.4285714286, "max_line_length": 79, "alphanum_fraction": 0.6074595722, "include": true, "reason": "import numpy,import cupy,from cupy", "num_tokens": 990} |
# Class contains auxiliary methods
from numpy import array
from numpy.linalg import det
from .Intersection import Intersection
def isValidPos(oPos, sl):
if oPos < 0 or oPos >= len(sl):
return False
return True
# credit to Dr. Sheehy for provinding orientation class code
def orientation(*points):
d = array(det(points))
if d > 0:
return 1 # ccw
elif d < 0:
return -1 # cw
else:
return 0 # colinear
def checkIntersect(oPos, seg, sl):
# check for intersection
oSeg = sl.get(oPos)
coords = intersects(seg, oSeg)
if len(coords) > 0:
return coords # return a tuple of the intersection coordinates
return ()
def intersects(seg1, seg2):
l1, r1 = seg1.endpoints() # extract endpoints from each segment
l2, r2 = seg2.endpoints()
ret1 = orientation(l1, r1, l2)
ret2 = orientation(l1, r1, r2)
ret3 = orientation(l2, r2, l1)
ret4 = orientation(l2, r2, r1)
if ret1 * ret2 < 0 and ret3 * ret4 < 0: # determinants have same sign so these 2 segments cannot intersect
# calculate coords
xNum = seg2.getYIntercept() - seg1.getYIntercept()
xDenom = seg1.getSlope() - seg2.getSlope()
x = xNum / xDenom
y = seg1.getSlope() * x + seg1.getYIntercept()
# determine which segment is "above" and which "below"
l3 = seg1.getLeftEndpoint().coords()
l4 = seg2.getLeftEndpoint().coords()
intersection = (x, y)
if orientation(l3, intersection, l4) > 0: # ccw
return Intersection(x, y, seg2, seg1)
elif orientation(l3, intersection, l4) < 0: # cw
return Intersection(x, y, seg1, seg2)
return ()
| {"hexsha": "f7dd69ad24d07da6c84cec4802a6ccd15682c9d3", "size": 1636, "ext": "py", "lang": "Python", "max_stars_repo_path": "linesegmentintersections/helper.py", "max_stars_repo_name": "LiahNikol/line-segment-intersections", "max_stars_repo_head_hexsha": "0a2eb14b54619568d2b79839319c079b27a53d77", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "linesegmentintersections/helper.py", "max_issues_repo_name": "LiahNikol/line-segment-intersections", "max_issues_repo_head_hexsha": "0a2eb14b54619568d2b79839319c079b27a53d77", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "linesegmentintersections/helper.py", "max_forks_repo_name": "LiahNikol/line-segment-intersections", "max_forks_repo_head_hexsha": "0a2eb14b54619568d2b79839319c079b27a53d77", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.701754386, "max_line_length": 108, "alphanum_fraction": 0.6479217604, "include": true, "reason": "from numpy", "num_tokens": 489} |
function mdl_outer_ode!(device_states,
output_ode,
f0,
device::DynInverter{C,VirtualInertiaQdroop{VirtualInertia,ReactivePowerDroop},VC,DC,P,F}) where {C <: Converter,
VC<: VSControl,
DC<: DCSource,
P <: FrequencyEstimator,
F <: Filter}
#Obtain external states inputs for component
external_ix = device.input_port_mapping[device.outercontrol]
vpll_d = device_states[external_ix[1]]
vpll_q = device_states[external_ix[2]]
ϵ_pll = device_states[external_ix[3]]
vod = device_states[external_ix[4]]
voq = device_states[external_ix[5]]
iod = device_states[external_ix[6]]
ioq = device_states[external_ix[7]]
#Obtain inner variables for component
ω_pll = device.inner_vars[ω_freq_estimator_var]
#Get Active Power Controller parameters
Ta = device.outercontrol.active_power.Ta #VSM Inertia constant
kd = device.outercontrol.active_power.kd #VSM damping constant
kω = device.outercontrol.active_power.kω #Frequency droop gain
ωb = device.outercontrol.active_power.ωb #Rated angular frequency
#Get Reactive Power Controller parameters
kq = device.outercontrol.reactive_power.kq #Reactive power droop gain
ωf = device.outercontrol.reactive_power.ωf #Reactive power filter cutoff frequency
#Obtain external parameters
kp_pll = device.freq_estimator.kp_pll
ki_pll = device.freq_estimator.ki_pll
p_ref = device.P_ref
ω_ref = device.ω_ref
V_ref = device.V_ref
q_ref = device.Q_ref
ωg = 1.0
#Obtain indices for component w/r to device
local_ix = device.local_state_ix[device.outercontrol]
#Define internal states for frequency estimator
internal_states = @view device_states[local_ix]
δω_vsm = internal_states[1]
δθ_vsm = internal_states[2]
qm = internal_states[3]
#Compute 3 states ODEs
output_ode[local_ix[1]] = (- iod*vod/Ta
- ioq*voq/Ta
+ kd*kp_pll*atan(vpll_q/vpll_d)/Ta
+ kd*ki_pll*ϵ_pll/Ta
- (kd+kω)*δω_vsm/Ta
+ p_ref/Ta
+ kω*ω_ref/Ta
- kω*ωg/Ta)
output_ode[local_ix[2]] = ωb*δω_vsm
output_ode[local_ix[3]] = (- ωf*ioq*vod
+ ωf*iod*voq
- ωf*qm)
#Update inner vars
device.inner_vars[δdqRI_var] = δθ_vsm
device.inner_vars[ω_control_var] = δω_vsm + 1.0
device.inner_vars[v_control_var] = V_ref + kq*(q_ref - qm)
end
| {"hexsha": "2845c571984bf871329aea94684821ccb441283f", "size": 3006, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/models/inverter_models/outer_control_models.jl", "max_stars_repo_name": "UnofficialJuliaMirror/LITS.jl-86b0dc02-7903-11e9-325f-f195ca7e6c1a", "max_stars_repo_head_hexsha": "e27e29e21487737d5faac4180beb269dcc385c79", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-04T07:28:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-04T07:28:06.000Z", "max_issues_repo_path": "src/models/inverter_models/outer_control_models.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/LITS.jl-86b0dc02-7903-11e9-325f-f195ca7e6c1a", "max_issues_repo_head_hexsha": "e81ce7988bbcdf1e001761e5f5564df7eae124d2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/models/inverter_models/outer_control_models.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/LITS.jl-86b0dc02-7903-11e9-325f-f195ca7e6c1a", "max_forks_repo_head_hexsha": "e81ce7988bbcdf1e001761e5f5564df7eae124d2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.338028169, "max_line_length": 136, "alphanum_fraction": 0.5535595476, "num_tokens": 726} |
# Hep Recommender
> A recommender system for scientific articles in the field of High Energy Physics.
- toc: true
- badges: true
- comments: true
- categories: [jupyter]
- image: images/hep_recommender.png
# Introduction
In this note I want to discuss [hep-recommender](https://hep-recommender.herokuapp.com/), a recommender system for scientific papers in the field of High Energy Physics (HEP) aimed to help researchers in the exploration of relevant literature. This is a personal project I have been working recently in collaboration with [José Eliel Camargo Molina](https://github.com/JoseEliel). A [submission](https://devpost.com/software/hep-recommender) of this project to the [Facebook Artificial Intelligence Hackathon (2020)](https://fbai1.devpost.com) won the third place.
There are different approaches to determine the degree of similarity between articles in order to identify related work. Some works have used text-mining and natural language processing methods. Another popular approach is based on citation analysis, where the similarity between two articles is estimated based on bibliographic information [1,2,4,5]. Finally, other works have used a combination of the previous approaches, see for instance [3].
In this project, we develop a recommender system based on citation analysis. More specifically, we will develop a recommender system that elaborates on the ideas of co-citation analysis [1,4] and co-citation proximity analysis (CPA) [2]. Co-citation analysis is based on the premise that articles which are frequently cited together (by the same papers) should be related to each other. CPA extends this idea by incorporating the notion that the closer the citations are to each other within the article text, the more likely it is that they are related. While these methods are relatively simple they provide a high quality of related article recommendations. Our recommender system relies on a distributed representation of articles obtained by training a Skip-Gram model on reference lists. This model also captures the notion that articles cited close to each other on the text are similar.
In this note I want to go to some detail about the model powering our recommender system, how it compares to other approaches, and how we made it available via a web application.
# Related work
I would like to start discussing some of the standard methods in the literature to quantify the similarity between two articles. I will define the following concepts:
* bibliometric coupling
* co-citation count
* co-citation proximity analysis (CPA)
Bibliometric couplign reflects the idea that two papers sharing a large portion of their references should be similar [5]. Co-citation count assigns a high degree of similarity to articles which are frequenly cited together (regardless of where the citations occcured within the text) [1]. CPA extends co-citation count by assigning more weigth to cases where the articles are cited close to each other within the text [2]. In general CPA is expected to give better results than co-citation count, though it requires some work in order to parse the references of each article keeping information about the location of the references within the text.
Lets formalize these concepts. We consider a set of papers $\{ \omega_1, \omega_2, \ldots \omega_n \}$. We define the quantities $c_{i,j}$, where $c_{i,j}=1$ if $\omega_i$ is cited by $\omega_j$ and $c_{i,j}=0$ otherwise. Then we are ready to introduce our three measures of similarity:
* bibliographic coupling is defined as $\sum_{k=1}^{n} c_{k,i} c_{k,j}$
* co-citation count is defined as $\sum_{k=1}^{n} c_{i,k} c_{j,k}$
* CPA can be used to define a measure $\sum_{k=1}^{n} \eta_{i,j}^{k} c_{i,k} c_{j,k}$, where $n_{ij}^{k}$ are coefficients that penalize cases where the citations occur far from each other within the text.
In the original CPA article [2], $\eta$ was fixed to $1$ if the citations occur in the same sentence, $1/2$ if they only occur in the same paragraph, $1/4$ if they only occur in the same chapter, and $1/8$ if they only appear on the same article. Parametric representations of $\eta$ have also been proposed [7].
# Distributed representation on co-citations
We propose a model that relies on the same idea behind CPA, namely, that the proximity of references within an article provide valuable information regarding the similarity of two articles. However, we wanted to simplify the process of extracting the necessary data as much as possible.
We assume that it is possible to extract the list of references of each article in the order of appearance within the text. For instance the following article
> Higgs boson pair production in gluon fusion is the most promising process to find out whether the Higgs boson self-coupling is Standard-Model-like. Early studies of Higgs boson pair production within an EFT framework can be found in Refs. [1–3]. Many phenomenological investigations about the potential of this process to reveal New Physics have been performed since, see e.g. Refs. [4–8]...
would give rise to a reference list [1,2,3,4,5,6,7,8,...]. In this way, articles which are frequently close to each other on these reference lists should be very similar, as the community is citing them close to each other within the text of the articles.
We will use an approach that has proven to be very fruitful in Natural Language Processing (NLP). We will take the lists of references and train a Skip-Gram model, such that articles which tend to be cited close to each other will have similar embeddings.
Our dataset consists of ordered lists of references. Suppose we have a total of $n$ unique articles in our dataset and for each of these articles we have its reference list. The Skip-gram model maximizes the following objective function:
\begin{align}
\frac{1}{n} \sum_{q=1}^{n} \left[ \frac{1}{n_q} \sum_{i=1}^{n_q} \sum_{ -c \leq j \leq c ,\, j\neq 0 } \log p(w_{i+j}^{q}| w_i^{q}) \right]
\end{align}
Here $\omega_{i}^{q}$ represents the embedding of the $i$-th article appearing on the reference list of article $q$; $n_q$ represents the size of the reference list for article $q$, and $c$ represents the context-window size of the Skip-Gram model. The probability is modelled using a softmax function
\begin{align}
p( \omega_{i} | \omega_j ) = \frac{ \exp( \omega_{i} \cdot \omega_{j} )}{ \sum_{l=1}^{n} \exp( \omega_{l} \cdot \omega_j ) }
\end{align}
Just like in the Skip-Gram model used in NLP, in order to make the model training more efficient we need to implement negative sampling or hierarchical softmax. A good implementation of the Skip-Gram model is provided by the [Gensim](https://radimrehurek.com/gensim/auto_examples/index.html) library for instance.
In order to explore the structure of the trained embeddings, I took a sample of articles and made a t-SNE visualization of their embeddings in two dimensions:
For this, I picked a set of articles that were published on the [arXiv](https://arxiv.org). Each arXiv category was displayed with a different color. On this plot, some of the arXiv categories that appear are: astro-ph (red), hep-ex (blue), hep-th (black) and hep-ph (green), among others. As expected, the embeddings form cluster around their categories.
# Data
Regarding open acces digital libraries, the research community in the field of High Energy Physics mainly uses the [INSPIRE-HEP](https://inspirehep.net) and the [arXiv](https://arxiv.org). INSPIRE-HEP provides an API from which data for articles in the field of HEP can be retrieved, we use this API to extract the data we need. Below I show some exploratory example. Lets first define some classes that talk to the API and handle the response.
```python
import requests
from typing import List, Dict
class InspireAPI:
"""
Simple wrapper class around the INSPIRE API
https://inspirehep.net
methods:
literature: gives access to the literature endpoint
"""
LITERATURE = "https://inspirehep.net/api/literature/"
def __init__(self):
pass
def literature(self, record_id: str):
"""
Returns api response for a given record_id
"""
url = self.LITERATURE + record_id
return LiteratureRecord(requests.get(url).json())
class LiteratureRecord:
"""
Datamodel class for handling literature record data,
implementing basic methods to access the properties
"""
def __init__(self, data: Dict):
self.data = data
@property
def record_id(self) -> str:
"""
Returns the INSPIRE id of the article
"""
return self.data['id']
@property
def metadata(self) -> Dict:
"""
Returns article metadata
"""
return self.data['metadata']
@property
def references(self) -> List[str]:
"""
Returns reference list of the article as a List of INSPIRE article ids
"""
if self.metadata.get("references"):
return [
element["record"]["$ref"].split("/")[-1]
for element in self.metadata["references"]
if element.get("record")
]
```
We can now look at one example. All the bibliographic data is contained on the *metadata* property of our LiteratureRecord class. I implemented a *references* property in order to retrieve the list of references (only the article identifiers). Lets extract data for the article with the identifier ['11883'](https://labs.inspirehep.net/literature/11883),
```python
inspireapi = InspireAPI()
record = inspireapi.literature('11883')
record.references
```
['40440',
'12289',
'12290',
'14006',
'12291',
'12288',
'9159',
'43800',
'43801']
In this way we are able to extract the lists of references we need for our model. Note that the INSPIRE-API returns references in the order of appearance on the reference section of the article, and by tradition, the HEP community orders references by the order of appearance on the text.
# Recommendations
We can use the Skip-Gram model embeddings and some metric on the vector space to provide recommendations for articles with at least some citations. Very recent articles or unpopular articles with no citations will not have embeddings and we must find other method to provide recommendations for these articles. For this reason, we consider two scenarios when we want to provide recommendations for a given article:
*i)* The article has embeddings produced by our Skip-Gram model.
*ii)* The article has no embeddings from the Skip-Gram model.
In case *i)* we retrieve the top similar articles using cosine similarity as the metric. Cosine similarity is defined as $\cos \theta_{12} = \hat \omega_{1} \cdot \hat \omega_{2}$, where $\hat \omega = \omega/|\omega|$ is a unit norm vector. In this case we retrieve as recommendations the articles whose embedding have a smaller angle difference with the original article.
In case *ii)*, we dont have an embedding but we can build one out of the references of the article. We retrieve the references of the article by calling the INSPIRE-HEP api and then take the average vector $\omega_{\rm{avg}}^q = 1/n_q \sum_{i=1}^{n_q} \omega_i^{q}$ of the references. We can then proceed to retrieve the top similar articles using cosine similarity as before.
These two simple methods are able to provide recommendations for most articles.
# Web application
The recommender system has been made available at [hep-recommender](https://hep-recommender.herokuapp.com/). It is a Flask web application
currently hosted on Heroku with a gunicorn server. Storage of the model artifacts is done in AWS S3.
# Discussion
To see one example of the application, we can look the recommendations of similar articles for:
["Broken Symmetries and the Masses of Gauge Bosons"](https://labs.inspirehep.net/literature/11883) by Higgs, Peter W. (1964).
This article is one of the works behind the 2013 Physics nobel prize for the theoretical development of the so-called Higgs mechanism. The recommendations from our system are very good, including the other articles which were also responsible for these theoretical developments as aknowledged by the community.
Another interesting example is this one
["Unitary Symmetry and Leptonic Decays"](http://www.hep-recommender.com/?article=4510) by Cabibbo, Nicola (1963)
This article introduced the idea of quark mixing when only two quark generations were known. The recommendations include the article
["CP Violation in the Renormalizable Theory of Weak Interaction"](https://labs.inspirehep.net/literature/81350) by Kobayashi, Makoto; Maskawa, Toshihide (1973)
which extends the idea of Cabibbo to three quark generations and gave rise to what is now known as the CKM matrix.
Exploring a large set of articles I was surprised by the high quality of the recommendations. Several experts on the field of HEP have also been using the application and find the recommendations satisfactory.
# References
[[1]](https://www.semanticscholar.org/paper/Co-citation-in-the-scientific-literature%3A-A-new-of-Small/da30b84925764b550b55c7d00596f8f1b9608fe2) H. Small, “Co-citation in the scientific literature: A new measure of the relationship between two documents” Journal of the American Society for Information Science, vol. 24, no. 4, pp. 265–269, 1973.
[[2]](https://www.semanticscholar.org/paper/Citation-Proximity-Analysis-%28CPA%28-%3A-A-New-Approach-Gipp-Beel/22b519fcf4b623bef5cd702f2e1c995640d4c833) B. Gipp and J. Beel, “Citation proximity analysis (cpa) : A new approach for identifying related work based on co-citation analysis” in Proceedings of the 12th International Confer- ence on Scientometrics and Informetrics, vol. 1 (B. Larsen, ed.), (Sao Paulo), pp. 571–575, BIREME/PANO/WHO, 2009.
[[3]](https://www.semanticscholar.org/paper/A-Scalable-Hybrid-Research-Paper-Recommender-System-Kanakia-Shen/bb246e08bc6641672c2bb2b93d4214eccf3f84b6) A. Kanakia, Z. Shen, D. Eide, and K. Wang, “A scalable hybrid research paper recommender system for microsoft academic” CoRR, vol. abs/1905.08880, 2019.
[[4]](https://www.semanticscholar.org/paper/System-of-Document-Connections-Based-on-References-Marshakova-shaikevich/2d871489eb7288dd1bec4be99bc363efd4933d48) I. V. Marshakova-shaikevich, “System of document connections based on references” 2009.
[[5]](https://www.semanticscholar.org/paper/Bibliographic-coupling-between-scientific-papers-Kessler/68300052245f0f3aed1a4d65943e436cf1227242) M. M. Kessler, “Bibliographic coupling between scientific papers” American Documentation, vol. 14, no. 1, pp. 10–25, 1963.
[[6]](https://www.semanticscholar.org/paper/Efficient-Estimation-of-Word-Representations-in-Mikolov-Chen/330da625c15427c6e42ccfa3b747fb29e5835bf0) T. Mikolov, K. Chen, G. S. Corrado, and J. Dean, “Efficient estimation of word representations in vector space” CoRR, vol. abs/1301.3781, 2013.
[[7]](https://www.semanticscholar.org/paper/Evaluating-link-based-recommendations-for-Wikipedia-Schwarzer-Schubotz/7c72a2008f94078ce50ac251c6437038bbcf0185) M. Schwarzer, M. Schubotz, N. Meuschke, C. Breitinger, V. Markl, and B. Gipp, “Evaluating link-based recommendations for wikipedia” 2016 IEEE/ACM Joint Conference on Digital Libraries (JCDL), pp. 191–200, 2016.
| {"hexsha": "46c1c34efc98f4665aef8c0d409aaac66f6206e6", "size": 19499, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "_notebooks/2020-02-20-hep-recommender.ipynb", "max_stars_repo_name": "celis/personal", "max_stars_repo_head_hexsha": "c10d3159f830c463711aec6db6e6189c09c50449", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "_notebooks/2020-02-20-hep-recommender.ipynb", "max_issues_repo_name": "celis/personal", "max_issues_repo_head_hexsha": "c10d3159f830c463711aec6db6e6189c09c50449", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-05-20T21:08:16.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-28T05:30:20.000Z", "max_forks_repo_path": "_notebooks/2020-02-20-hep-recommender.ipynb", "max_forks_repo_name": "celis/personal", "max_forks_repo_head_hexsha": "c10d3159f830c463711aec6db6e6189c09c50449", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 54.619047619, "max_line_length": 907, "alphanum_fraction": 0.6548540951, "converted": true, "num_tokens": 3893} |
#include <HElib/FHE.h>
#include <HElib/FHEContext.h>
#include <HElib/EncryptedArray.h>
#include <HElib/NumbTh.h>
#include "SMP/Matrix.hpp"
#include "SMP/Timer.hpp"
#include "SMP/literal.hpp"
#include "SMP/network/net_io.hpp"
#include <boost/asio.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <iostream>
#include <numeric>
#include <list>
using boost::asio::ip::tcp;
constexpr long REPEATS = 10;
struct EncVec {
long length;
std::vector<Ctxt> ctxts;
};
void pack_vector(std::vector<NTL::ZZX> &out, Matrix const& mat, long COL, EncryptedArray const* ea)
{
const long ROWS = mat.NumRows();
long num_polys = (ROWS + ea->size() - 1) / ea->size();
long itr = 0;
out.resize(num_polys);
for (long i = 0; i < num_polys; i++) {
std::vector<long> slots(ea->size(), 0);
for (long j = 0; j < ea->size() and itr < ROWS; j++) {
slots[j] = mat[itr++][COL];
}
ea->encode(out[i], slots);
}
}
void rotate_vector(Matrix &vec, long offset)
{
assert(vec.NumCols() == 1);
long length = vec.NumRows();
while (offset < 0)
offset += length;
if (offset == 0)
return;
Matrix tmp;
tmp.SetDims(vec.NumRows(), 1);
for (long i = 0; i < length; i++)
tmp[i][0] = vec[(i + offset) % length][0];
vec = tmp;
}
void encrypt_vector(EncVec &out,
Matrix const& vec,
FHESecKey const& key,
double *pack_time = nullptr)
{
assert(vec.NumCols() == 1);
FHEcontext const& context= key.getContext();
EncryptedArray const* ea = context.ea;
out.length = vec.NumRows();
std::vector<NTL::ZZX> polys;
{
AutoTimer timer(pack_time);
pack_vector(polys, vec, 0, ea);
}
out.ctxts.resize(polys.size(), Ctxt(key));
for (size_t i = 0; i < polys.size(); i++) {
key.Encrypt(out.ctxts[i], polys.at(i));
}
}
void columns_to_vector(Matrix &vec, Matrix const&mat, long from, long to)
{
vec.SetDims((to - from) * mat.NumRows(), 1);
for (long j = from; j < to; j++) {
long offset = (j - from) * mat.NumRows();
for (long i = 0; i < mat.NumRows(); i++) {
vec[offset + i][0] = mat[i][j];
}
}
}
void rows_to_vector(Matrix &vec, Matrix const&mat, long from, long to)
{
assert(from >= 0 && to > from && to <= mat.NumRows());
const long length = (to - from) * mat.NumCols();
vec.SetDims(length, 1);
for (long i = from; i < to; i++) {
long offset = (i - from) * mat.NumCols();
for (long j = 0; j < mat.NumCols(); j++) {
vec[offset + j][0] = mat[i][j];
}
}
}
/// To fully pack the matrix, we concat each row of matrix into a long vector.
void encrypt_matrix_as_vector(EncVec &out,
Matrix const& mat,
FHESecKey const& key,
double *pack_time = nullptr)
{
Matrix vec;
rows_to_vector(vec, mat, 0, mat.NumRows());
encrypt_vector(out, vec, key, pack_time);
}
void randomize(Matrix &mat) {
for (long i = 0; i < mat.NumRows(); i++)
for (long j = 0; j < mat.NumCols(); j++)
mat[i][j] = NTL::RandomBnd(10L);
}
void mat_mult(std::list<Ctxt> &out,
EncVec const& enc_mat,
Matrix const& mat,
EncryptedArray const *ea)
{
const auto row_cnt = mat.NumRows();
const auto col_cnt = mat.NumCols();
long cols_per_ctxt = ea->size() / row_cnt;
assert(cols_per_ctxt >= 1 && "Current version only work for n2 <= ea->size()");
for (long col = 0; col < col_cnt; col += cols_per_ctxt) {
long cols_to_pack = std::min(col_cnt - col, cols_per_ctxt);
Matrix vec;
columns_to_vector(vec, mat, col, col + cols_to_pack);
Matrix rnd;
rnd.SetDims(cols_to_pack * row_cnt, 1);
for (long rot = 0; rot < cols_to_pack; rot++) { // rotatiton
long offset = rot * row_cnt;
rotate_vector(vec, offset);
std::vector<NTL::ZZX> polys;
pack_vector(polys, vec, 0, ea); // pack the 0-th column
assert(polys.size() == 1);
for (const auto &ctx : enc_mat.ctxts) {
Ctxt tmp(ctx);
tmp.multByConstant(polys[0]); // multiplication
randomize(rnd);
std::vector<NTL::ZZX> rnd_poly;
pack_vector(rnd_poly, rnd, 0, ea);
assert(rnd_poly.size() == 1);
tmp.addConstant(rnd_poly[0]); // adding the random share
out.push_back(std::move(tmp));
}
}
}
}
struct ClientBenchmark {
std::vector<double> pack_times;
std::vector<double> enc_times;
std::vector<double> unpack_times;
std::vector<double> dec_times;
std::vector<double> total_times;
int ctx_sent, ctx_recv;
};
ClientBenchmark clt_ben;
struct ServerBenchmark {
std::vector<double> eval_times;
};
ServerBenchmark srv_ben;
void play_client(tcp::iostream &conn,
FHESecKey &sk,
FHEcontext &context,
const long n1,
const long n2,
const long n3)
{
sk.convertToSymmetric();
FHEPubKey ek(sk);
conn << ek; // send evaluation key
const EncryptedArray *ea = context.ea;
const long l = ea->size();
const long d = ea->getDegree();
NTL::SetSeed(NTL::to_ZZ(123)); //use fixed seed for debugging
Matrix A, B;
A.SetDims(n1, n2);
B.SetDims(n2, n3);
randomize(A);
randomize(B);
EncVec enc_vec;
double pack_time;
double enc_time;
do {
AutoTimer timer(&enc_time);
encrypt_matrix_as_vector(enc_vec, A, sk, &pack_time);
} while(0);
clt_ben.pack_times.push_back(pack_time);
clt_ben.enc_times.push_back(enc_time);
conn << static_cast<int64_t>(enc_vec.ctxts.size()) << std::endl;
for (const auto &ctx : enc_vec.ctxts)
conn << ctx;
clt_ben.ctx_sent = enc_vec.ctxts.size();
int64_t result_ctx_cnt;
conn >> result_ctx_cnt;
clt_ben.ctx_recv = result_ctx_cnt;
std::vector<Ctxt> result(result_ctx_cnt, Ctxt(ek));
for (size_t i = 0; i < result_ctx_cnt; i++)
conn >> result[i];
double eval_time = 0.;
conn >> eval_time;
srv_ben.eval_times.push_back(eval_time);
double dec_time;
double unpack_time;
std::vector<long> slots;
NTL::ZZX decrypted;
for (const auto &ctx : result) {
double one_dec_time;
double one_unpack_time;
{
AutoTimer timer(&one_dec_time);
if (!ctx.isCorrect())
std::cerr << "decryption might fail" << std::endl;
sk.Decrypt(decrypted, ctx);
}
{
AutoTimer timer(&one_unpack_time);
ea->decode(slots, decrypted);
}
dec_time += one_dec_time;
unpack_time += one_unpack_time;
}
clt_ben.dec_times.push_back(dec_time);
clt_ben.unpack_times.push_back(unpack_time);
}
void play_server(tcp::iostream &conn,
const long n1,
const long n2,
const long n3) {
FHEcontext context = receive_context(conn);
FHEPubKey evk(context);
conn >> evk;
NTL::SetSeed(NTL::to_ZZ(123)); //use fixed seed for debugging
Matrix A, B;
A.SetDims(n1, n2);
B.SetDims(n2, n3);
randomize(A);
randomize(B);
int64_t ctx_cnt;
conn >> ctx_cnt;
EncVec enc_vec;
enc_vec.ctxts.resize(ctx_cnt, Ctxt(evk));
for (size_t i = 0; i < ctx_cnt; i++) // receive ciphertexts from the client.
conn >> enc_vec.ctxts[i];
double eval_time;
std::list<Ctxt> result;
{
AutoTimer timer(&eval_time);
mat_mult(result, enc_vec, B, context.ea);
}
srv_ben.eval_times.push_back(eval_time);
conn << static_cast<int64_t>(result.size()) << std::endl;
for (const auto &ctx : result) // sending result back
conn << ctx;
conn << eval_time; // send back eval time for statistics
}
int run_client(std::string const& addr, long port,
long n1, long n2, long n3) {
const long m = 8192;
const long p = 65537;
const long r = 1;
const long L = 3;
NTL::zz_p::init(p);
FHEcontext context(m, p, r);
context.bitsPerLevel = 20; // trial and error to find this value.
buildModChain(context, L);
std::cerr << "kappa = " << context.securityLevel() << std::endl;
std::cerr << "slot = " << context.ea->size() << std::endl;
std::cerr << "degree = " << context.ea->getDegree() << std::endl;
FHESecKey sk(context);
sk.GenSecKey(64);
for (long t = 0; t < REPEATS; t++) {
tcp::iostream conn(addr, std::to_string(port));
if (!conn) {
std::cerr << "Can not connect to server!" << std::endl;
return -1;
}
/// send FHEcontext obj
send_context(conn, context);
double all_time;
do {
AutoTimer time(&all_time);
play_client(conn, sk, context, n1, n2, n3);
} while(0);
clt_ben.total_times.push_back(all_time);
conn.close();
}
return 1;
}
int run_server(long port, long n1, long n2, long n3) {
boost::asio::io_service ios;
tcp::endpoint endpoint(tcp::v4(), port);
tcp::acceptor acceptor(ios, endpoint);
for (long run = 0; run < REPEATS; run++) {
tcp::iostream conn;
boost::system::error_code err;
acceptor.accept(*conn.rdbuf(), err);
if (!err) {
play_server(conn, n1, n2, n3);
}
}
return 0;
}
int main(int argc, char *argv[]) {
ArgMapping argmap;
long role = -1;
long n1 = 8;
long n2 = 8;
long n3 = 8;
std::string addr = "127.0.0.1";
long port = 12345;
argmap.arg("N", n1, "n1");
argmap.arg("M", n2, "n2");
argmap.arg("D", n3, "n3");
argmap.arg("R", role, "role. 0 for server and 1 for client");
argmap.arg("a", addr, "server address");
argmap.arg("p", port, "port");
argmap.parse(argc, argv);
if (role == 0) {
run_server(port, n1, n2, n3);
} else if (role == 1) {
int st = run_client(addr, port, n1, n2, n3);
auto times = mean_std(clt_ben.pack_times);
printf("%.3f %.3f ", times.first, times.second);
times = mean_std(clt_ben.enc_times);
printf("%.3f %.3f ", times.first, times.second);
times = mean_std(clt_ben.dec_times);
printf("%.3f %.3f ", times.first, times.second);
times = mean_std(clt_ben.unpack_times);
printf("%.3f %.3f ", times.first, times.second);
times = mean_std(clt_ben.total_times);
printf("%.3f %.3f ", times.first, times.second);
times = mean_std(srv_ben.eval_times);
printf("%.3f %.3f ", times.first, times.second);
printf("%d %d\n", clt_ben.ctx_sent, clt_ben.ctx_recv);
} else {
argmap.usage("General Matrix Multiplication for |N*M| * |M*D|");
return -1;
}
}
#if 0
int main(int argc, char *argv[]) {
ArgMapping argmap;
long n1 = 8;
long n2 = 8;
long n3 = 8;
argmap.arg("N", n1, "n1");
argmap.arg("M", n2, "n2");
argmap.arg("K", n3, "n3");
argmap.parse(argc, argv);
Matrix A, B;
A.SetDims(n1, n2);
randomize(A);
B.SetDims(n2, n3);
randomize(B);
Matrix AB = mul(A, B);
FHEcontext context(8192, 8191, 1);
std::cout << "slots " << context.ea->size() << std::endl;
buildModChain(context, 4);
std::cout << "security level " << context.securityLevel() << std::endl;
FHESecKey sk(context);
sk.GenSecKey(64);
auto ea = context.ea;
EncVec enc_vec;
double pack_time;
double enc_time;
{
AutoTimer timer(&enc_time);
encrypt_matrix_as_vector(enc_vec, A, sk, &pack_time);
}
std::list<Ctxt> result;
double eval_time;
{
AutoTimer timer(&eval_time);
mat_mult(result, enc_vec, B, ea);
}
double dec_time;
{
AutoTimer timer(&dec_time);
std::vector<long> slots;
for (auto &ctx : result) {
ea->decrypt(ctx, sk, slots);
}
}
printf("%.3f %.3f %.3f %zd %zd\n",
enc_time, dec_time, eval_time,
enc_vec.ctxts.size(), result.size());
return 0;
}
#endif
| {"hexsha": "ce43fbc2979c61026c432f9c88b97580afeb46dd", "size": 11274, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/MiniONN.cpp", "max_stars_repo_name": "Vampsj/SMP", "max_stars_repo_head_hexsha": "ec332ed29bc33685d050478090e0a679ddef0e4d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/MiniONN.cpp", "max_issues_repo_name": "Vampsj/SMP", "max_issues_repo_head_hexsha": "ec332ed29bc33685d050478090e0a679ddef0e4d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/MiniONN.cpp", "max_forks_repo_name": "Vampsj/SMP", "max_forks_repo_head_hexsha": "ec332ed29bc33685d050478090e0a679ddef0e4d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7156398104, "max_line_length": 99, "alphanum_fraction": 0.6163739578, "num_tokens": 3453} |
(* Definitions and theory of natural numbers that is useful in cryptographi proofs. *)
Set Implicit Arguments.
Require Export Arith.
Require Export Omega.
Require Export Arith.Div2.
Require Export Coq.Numbers.Natural.Peano.NPeano.
Require Import Coq.NArith.BinNat.
Lemma mult_same_r : forall n1 n2 n3,
n3 > 0 ->
n1 * n3 = n2 * n3 ->
n1 = n2.
induction n1; destruct n2; intuition; simpl in *.
remember (n2 * n3) as x.
omega.
remember (n1 * n3) as x.
omega.
f_equal.
eapply IHn1; eauto.
eapply plus_reg_l. eauto.
Qed.
Lemma mult_same_l : forall n3 n1 n2,
n3 > 0 ->
n3 * n1 = n3 * n2 ->
n1 = n2.
intuition.
eapply mult_same_r; eauto.
rewrite mult_comm.
rewrite (mult_comm n2 n3).
trivial.
Qed.
Lemma mult_gt_0 : forall n1 n2,
n1 > 0 ->
n2 > 0 ->
n1 * n2 > 0.
destruct n1; intuition; simpl in *.
remember (n1 * n2) as x.
omega.
Qed.
Lemma minus_eq_compat : forall n1 n2 n3 n4,
n1 = n2 ->
n3 = n4 ->
n1 - n3 = n2 - n4.
intuition.
Qed.
Lemma plus_eq_compat : forall n1 n2 n3 n4,
n1 = n2 ->
n3 = n4 ->
n1 + n3 = n2 + n4.
intuition.
Qed.
Lemma minus_diag_eq : forall n1 n2,
n1 = n2 ->
n1 - n2 = 0.
intuition.
Qed.
Lemma le_eq : forall n1 n2,
n1 = n2 ->
n1 <= n2.
intuition.
Qed.
Lemma minus_add_assoc : forall n1 n2 n3,
(n3 <= n2)%nat ->
(n1 + (n2 - n3) = n1 + n2 - n3)%nat.
intuition.
Qed.
Class nz (a : nat) := {
agz : a > 0
}.
Instance nz_nat : forall (n : nat), nz (S n).
intuition.
econstructor.
omega.
Defined.
Definition posnat := {n : nat | n > 0}.
Definition posnatToNat(p : posnat) :=
match p with
| exist n _ => n
end.
Inductive posnatEq : posnat -> posnat -> Prop :=
| posnatEq_intro :
forall (n1 n2 : nat) pf1 pf2,
n1 = n2 ->
posnatEq (exist _ n1 pf1) (exist _ n2 pf2).
Definition posnatMult(p1 p2 : posnat) : posnat :=
match (p1, p2) with
| (exist n1 pf1, exist n2 pf2) =>
(exist (fun n => n > 0) (n1 * n2) (mult_gt_0 pf1 pf2))
end.
Lemma posnatMult_comm : forall p1 p2,
(posnatEq (posnatMult p1 p2) (posnatMult p2 p1)).
intuition.
unfold posnatMult.
destruct p1; destruct p2.
econstructor.
apply mult_comm.
Qed.
Coercion posnatToNat : posnat >-> nat.
Lemma posnat_pos : forall (p : posnat),
p > 0.
intuition.
destruct p.
unfold posnatToNat.
trivial.
Qed.
Instance nz_posnat : forall (p : posnat),
nz p.
intuition.
econstructor.
eapply posnat_pos.
Qed.
Definition natToPosnat(n : nat)(pf : nz n) :=
(exist (fun x => x > 0) n agz).
Notation "'pos' x" := (@natToPosnat x _) (at level 40).
Fixpoint expnat n1 n2 :=
match n2 with
| 0 => 1
| S n2' =>
n1 * (expnat n1 n2')
end.
Theorem expnat_pos : forall x n,
x > 0 ->
expnat x n > 0.
induction n; intuition; simpl in *.
remember (x * expnat x n) as y.
assert (y <> 0); try omega.
intuition; subst.
apply mult_is_O in H1.
destruct H1; omega.
Qed.
Lemma div2_le : forall n,
le (div2 n) n.
intuition.
eapply NPeano.div2_decr.
omega.
Qed.
Lemma div2_ge_double : forall n,
n >= (div2 n) + (div2 n).
intuition.
destruct (Even.even_odd_dec n).
rewrite (even_double n) at 1.
unfold double.
omega.
trivial.
rewrite (odd_double n) at 1.
unfold double.
omega.
trivial.
Qed.
Local Open Scope N_scope.
Definition modNat (n : nat)(p : posnat) : nat :=
N.to_nat ((N.of_nat n) mod (N.of_nat p)).
Lemma modNat_plus : forall n1 n2 p,
(modNat (n1 + n2) p = modNat ((modNat n1 p) + n2) p)%nat.
unfold modNat.
intuition.
rewrite Nnat.Nat2N.inj_add.
rewrite <- N.add_mod_idemp_l.
f_equal.
rewrite <- (Nnat.Nat2N.id n2) at 2.
rewrite Nnat.Nat2N.inj_add.
repeat rewrite Nnat.N2Nat.id.
trivial.
destruct p.
simpl.
destruct x;
simpl.
omega.
Lemma Npos_nz : forall p,
Npos p <> N0.
destruct p; intuition; simpl in *.
inversion H.
inversion H.
inversion H.
Qed.
apply Npos_nz.
Qed.
Lemma modNat_arg_eq : forall (p : posnat),
modNat p p = O.
intuition.
unfold modNat.
rewrite N.mod_same.
trivial.
unfold N.of_nat, posnatToNat.
destruct p.
destruct x.
omega.
apply Npos_nz.
Qed.
Lemma of_nat_ge_0 : forall n,
0 <= N.of_nat n.
intuition.
unfold N.of_nat.
destruct n.
intuition.
simpl.
unfold N.le.
case_eq ((0 ?= N.pos (Pos.of_succ_nat n))); intuition;
try discriminate.
Qed.
Lemma of_posnat_gt_0 : forall (p : posnat),
0 < N.of_nat p.
intuition.
unfold N.of_nat, posnatToNat.
destruct p.
destruct x.
omega.
destruct x; intuition; simpl in *.
case_eq (N.compare 0 1)%N; intuition.
inversion H.
inversion H.
case_eq (N.compare 0 (N.pos (Pos.succ (Pos.of_succ_nat x))))%N; intuition.
inversion H.
inversion H.
Qed.
Lemma modNat_lt : forall x p, (modNat x p < p)%nat.
intuition.
unfold modNat.
assert (N.of_nat x mod N.of_nat p < N.of_nat p)%N.
apply N.mod_bound_pos.
apply of_nat_ge_0.
apply of_posnat_gt_0.
specialize (Nnat.N2Nat.inj_compare); intuition.
rewrite <- (Nnat.Nat2N.id p) at 2.
apply nat_compare_lt.
rewrite <- H0.
apply N.compare_lt_iff.
trivial.
Qed.
Lemma modNat_eq : forall (n : posnat) x, (x < n -> modNat x n = x)%nat.
intuition.
unfold modNat.
rewrite N.mod_small.
apply Nnat.Nat2N.id.
specialize (Nnat.N2Nat.inj_compare); intuition.
specialize (N.compare_lt_iff (N.of_nat x) (N.of_nat n)); intuition.
apply H2.
rewrite H0.
repeat rewrite Nnat.Nat2N.id.
apply nat_compare_lt.
trivial.
Qed.
Definition modNatAddInverse (n : nat)(p : posnat) :=
(p - (modNat n p))%nat.
Lemma modNatAddInverse_correct_gen : forall x y p,
modNat x p = modNat y p ->
modNat (x + modNatAddInverse y p) p = O.
intuition.
unfold modNatAddInverse.
rewrite <- H.
rewrite modNat_plus.
rewrite minus_add_assoc.
rewrite (plus_comm).
rewrite <- minus_add_assoc.
rewrite minus_diag.
rewrite plus_0_r.
apply modNat_arg_eq.
trivial.
assert (modNat x p < p)%nat.
apply modNat_lt.
omega.
Qed.
Lemma modNatAddInverse_correct : forall n p,
modNat (n + modNatAddInverse n p) p = O.
intuition.
eapply modNatAddInverse_correct_gen.
trivial.
Qed.
Lemma modNat_correct : forall x (p : posnat),
exists k, (x = k * p + modNat x p)%nat.
intuition.
unfold modNat in *.
assert (p > 0)%nat.
eapply posnat_pos.
assert (posnatToNat p <> 0)%nat.
omega.
assert (N.of_nat p <> 0%N).
intuition.
eapply H0.
rewrite <- Nnat.Nat2N.id.
rewrite <- (Nnat.Nat2N.id p).
f_equal.
trivial.
exists (N.to_nat (N.of_nat x / N.of_nat p)).
rewrite N.mod_eq; trivial.
rewrite <- (Nnat.Nat2N.id p) at 2.
rewrite <- Nnat.N2Nat.inj_mul.
rewrite <- Nnat.N2Nat.inj_add.
rewrite N.mul_comm.
remember (N.of_nat p * (N.of_nat x / N.of_nat p)) as z.
rewrite N.add_sub_assoc.
rewrite N.add_comm.
rewrite N.add_sub.
rewrite Nnat.Nat2N.id.
trivial.
subst.
eapply N.mul_div_le.
trivial.
Qed.
Lemma modNat_divides : forall x p,
modNat x p = O ->
exists k, (x = k * p)%nat.
intuition.
destruct (modNat_correct x p).
rewrite H in H0.
econstructor.
rewrite plus_0_r in H0.
eauto.
Qed.
Local Open Scope nat_scope.
Lemma modNatAddInverse_sum_0 : forall x y p,
modNat (x + (modNatAddInverse y p)) p = O ->
modNat x p = modNat y p.
intuition.
assert (modNat x p < p).
eapply modNat_lt.
assert (modNat y p < p).
eapply modNat_lt.
rewrite modNat_plus in H.
unfold modNatAddInverse in *.
rewrite minus_add_assoc in H; intuition.
rewrite plus_comm in H.
apply modNat_divides in H.
destruct H.
remember (modNat x p) as a.
remember (modNat y p) as b.
assert (p + a >= p).
omega.
assert (p + a < 2 * p)%nat.
omega.
assert (p + a - b < 2 * p).
omega.
assert (p + a - b > 0).
omega.
assert (x0 * p > 0).
omega.
assert (x0 * p < 2 * p).
omega.
destruct x0.
omega.
destruct x0.
simpl in H.
rewrite plus_0_r in H.
omega.
assert (p > 0).
eapply posnat_pos.
simpl in H7.
remember (x0 * p)%nat as c.
omega.
Qed.
Lemma modNat_correct_if : forall x y z (p : posnat),
x * p + y = z ->
modNat z p = modNat y p.
induction x; intuition; simpl in *.
subst.
trivial.
assert (x * p + (y + p) = z).
omega.
apply IHx in H0.
rewrite H0.
rewrite plus_comm.
rewrite modNat_plus.
rewrite modNat_arg_eq.
rewrite plus_0_l.
trivial.
Qed.
Lemma modNat_mult : forall x (p : posnat),
modNat (x * p) p = 0.
induction x; intuition; simpl in *.
rewrite modNat_plus.
rewrite modNat_arg_eq.
rewrite plus_0_l.
eauto.
Qed.
Lemma modNat_add_same_l : forall x y z p,
modNat (x + y) p = modNat (x + z) p ->
modNat y p = modNat z p.
induction x; intuition; simpl in *.
assert (S (x + y) = x + S y).
omega.
rewrite H0 in H.
clear H0.
assert (S (x + z) = x + S z).
omega.
rewrite H0 in H.
clear H0.
apply IHx in H.
destruct (modNat_correct (S y) p).
destruct (modNat_correct (S z) p).
rewrite H in H0.
assert (S y - x0 * p = modNat (S z) p).
omega.
assert (S z - x1 * p = modNat (S z) p).
omega.
rewrite <- H2 in H3.
assert (z - x1 * p = y - x0 * p).
omega.
assert (x1 * p + y = x0 * p + z).
omega.
apply modNat_correct_if in H5.
rewrite modNat_plus in H5.
rewrite modNat_mult in H5.
rewrite plus_0_l in H5.
auto.
Qed.
Lemma modNat_add_same_r : forall x y z p,
modNat (y + x) p = modNat (z + x) p ->
modNat y p = modNat z p.
intuition.
eapply (modNat_add_same_l x y z).
rewrite plus_comm.
rewrite H.
rewrite plus_comm.
trivial.
Qed.
Lemma expnat_base_S : forall n k,
((expnat k n) + n * (expnat k (pred n)) <= expnat (S k) n)%nat.
induction n; intuition.
simpl in *.
eapply le_trans.
Focus 2.
eapply plus_le_compat.
eapply IHn.
eapply mult_le_compat.
eapply le_refl.
eapply IHn.
rewrite mult_plus_distr_l.
repeat rewrite mult_assoc.
repeat rewrite plus_assoc.
eapply plus_le_compat.
rewrite plus_comm.
eapply plus_le_compat.
rewrite <- (plus_0_r (expnat k n)) at 1.
eapply plus_le_compat.
omega.
intuition.
intuition.
rewrite (mult_comm k n).
rewrite <- (mult_assoc n).
destruct n; simpl; intuition.
Qed.
Lemma expnat_base_S_same : forall n,
n > 0 ->
(2 * (expnat n n) <= expnat (S n) n)%nat.
intuition.
simpl in *.
rewrite plus_0_r.
eapply le_trans.
Focus 2.
eapply expnat_base_S.
destruct n; simpl.
omega.
intuition.
Qed.
Lemma sqrt_le_lin_gen : forall a b,
(a <= b ->
sqrt a <= b)%nat.
intuition.
eapply le_trans.
eapply Nat.sqrt_le_lin.
trivial.
Qed.
Lemma div2_le_mono : forall n1 n2,
(n1 <= n2 ->
div2 n1 <= div2 n2)%nat.
induction n1; intuition.
destruct n2.
omega.
destruct (Even.even_odd_dec n1).
destruct (Even.even_odd_dec n2).
repeat rewrite <- even_div2; trivial.
eapply IHn1.
omega.
rewrite <- even_div2; trivial.
rewrite <- odd_div2; trivial.
econstructor.
eapply IHn1.
omega.
destruct (Even.even_odd_dec n2).
destruct (lt_dec n1 n2).
assert (n1 <= (S n2))%nat.
omega.
destruct n2.
omega.
rewrite <- odd_div2; trivial.
rewrite <- even_div2.
rewrite <- odd_div2.
eapply le_n_S.
eapply IHn1.
omega.
inversion e.
trivial.
trivial.
assert (n1 = n2).
omega.
subst.
exfalso.
eapply Even.not_even_and_odd; eauto.
rewrite <- odd_div2; trivial.
rewrite <- odd_div2; trivial.
eapply le_n_S.
eapply IHn1.
omega.
Qed.
Lemma div2_ge : forall n n',
n >= n' ->
forall x,
(n' = 2 * x)%nat ->
div2 n >= x.
induction 1; intuition; subst; simpl in *.
specialize (div2_double x); intuition; simpl in *.
rewrite H.
omega.
destruct m.
omega.
destruct (Even.even_odd_dec m).
rewrite even_div2.
assert (div2 (S m) >= x).
eapply IHle.
trivial.
omega.
trivial.
rewrite odd_div2.
eapply IHle.
trivial.
trivial.
Qed.
Instance expnat_nz : forall k n (p : nz n),
nz (expnat n k).
intuition.
induction k; intuition; simpl in *.
econstructor.
omega.
econstructor.
edestruct IHk; eauto.
destruct p.
eapply mult_gt_0; intuition.
Qed.
Lemma expnat_2_ge_1 : forall n,
(1 <= expnat 2 n)%nat.
induction n; intuition; simpl in *.
omega.
Qed.
Lemma le_expnat_2 : forall n,
(n <= expnat 2 n)%nat.
induction n; intuition; simpl in *.
rewrite plus_0_r.
assert (S n = 1 + n)%nat.
omega.
rewrite H.
eapply plus_le_compat.
eapply expnat_2_ge_1.
trivial.
Qed.
Lemma expnat_1 : forall k,
expnat 1%nat k = 1%nat.
induction k; intuition; simpl in *.
rewrite plus_0_r.
trivial.
Qed.
Theorem expnat_base_le :
forall k n1 n2,
n1 <= n2 ->
expnat n1 k <=
expnat n2 k.
induction k; intuition; simpl in *.
eapply mult_le_compat; intuition.
Qed.
Theorem expnat_double_le :
forall k n,
n >= 2 ->
expnat n (S k) >= 2 * expnat n k.
induction k; intuition; simpl in *.
omega.
rewrite plus_0_r.
rewrite <- mult_plus_distr_l.
eapply mult_le_compat.
trivial.
rewrite <- plus_0_r at 1.
rewrite <- plus_assoc.
eapply IHk.
trivial.
Qed.
Theorem nat_half_plus :
forall x,
x > 1 ->
exists a b,
a > 0 /\ b <= 1 /\ x = 2 * a + b.
induction x; intuition; simpl in *.
omega.
destruct (eq_nat_dec x 1); subst.
exists 1.
exists 0.
intuition; omega.
edestruct (IHx).
omega.
destruct H0.
intuition.
destruct x1.
rewrite plus_0_r in H3.
exists x0.
exists 1.
subst.
intuition; omega.
exists (S x0).
exists 0.
subst.
intuition.
Qed.
Theorem log2_div2 :
forall x y,
S y = log2 x ->
log2 (div2 x) = y.
intuition.
specialize (Nat.log2_double); intuition.
destruct (@nat_half_plus x).
eapply Nat.log2_lt_cancel.
rewrite Nat.log2_1.
omega.
destruct H1.
intuition.
subst.
destruct x1.
rewrite plus_0_r in *.
rewrite div2_double.
rewrite H0 in H.
omega.
omega.
destruct x1.
rewrite plus_comm.
rewrite div2_double_plus_one.
rewrite Nat.log2_succ_double in H.
omega.
omega.
omega.
Qed.
Lemma log2_0 :
log2 0 = 0.
trivial.
Qed.
Theorem expnat_0 :
forall k,
k > 0 ->
expnat 0 k = 0.
induction k; intuition; simpl in *.
Qed.
Theorem expnat_plus :
forall k1 k2 n,
expnat n (k1 + k2) = expnat n k1 * expnat n k2.
induction k1; simpl in *; intuition.
rewrite IHk1.
rewrite mult_assoc.
trivial.
Qed.
Theorem expnat_ge_1 :
forall k n,
n > 0 ->
1 <= expnat n k.
induction k; intuition; simpl in *.
rewrite <- mult_1_r at 1.
eapply mult_le_compat.
omega.
eauto.
Qed.
Theorem expnat_exp_le :
forall n2 n4 n,
(n2 > 0 \/ n > 0) ->
n2 <= n4 ->
expnat n n2 <= expnat n n4.
induction n2; destruct n4; simpl in *; intuition.
rewrite <- mult_1_l at 1.
eapply mult_le_compat.
omega.
eapply expnat_ge_1; trivial.
destruct (eq_nat_dec n 0); subst.
simpl; intuition.
eapply mult_le_compat; intuition.
Qed.
Lemma mult_lt_compat :
forall a b c d,
a < b ->
c < d ->
a * c < b * d.
intuition.
eapply le_lt_trans.
eapply mult_le_compat.
assert (a <= b).
omega.
eapply H1.
eapply le_refl.
eapply mult_lt_compat_l.
trivial.
omega.
Qed.
Theorem orb_same_eq_if :
forall a b c,
(a = false -> b = c) ->
orb a b = orb a c.
intuition.
destruct a; trivial; intuition.
Qed. | {"author": "FreeAndFair", "repo": "RLA", "sha": "4295e4bb700ebbfe69affeb35dda7ed42273c3a1", "save_path": "github-repos/coq/FreeAndFair-RLA", "path": "github-repos/coq/FreeAndFair-RLA/RLA-4295e4bb700ebbfe69affeb35dda7ed42273c3a1/src/fcf/StdNat.v"} |
import Base.map
# Utility structure for collections of samples.
mutable struct Particles{C}
calls::Vector{C}
lws::Vector{Float64}
lmle::Float64
end
map(fn::Function, ps::Particles) = map(fn, ps.calls)
include("inference/is.jl")
include("inference/pf.jl")
include("inference/mh.jl")
include("inference/vi.jl")
include("inference/hmc.jl")
const hmc = hamiltonian_monte_carlo
const mh = metropolis_hastings
const mh! = metropolis_hastings!
const is = importance_sampling
const advi = automatic_differentiation_variational_inference
# ------------ Documentation (IS) ------------ #
@doc(
"""
Samples from the model prior.
```julia
particles, normalized_weights = importance_sampling(observations::ConstrainedSelection,
num_samples::Int,
model::Function,
args::Tuple)
```
Samples from a programmer-provided proposal function.
```julia
particles, normalized_weights = importance_sampling(observations::ConstrainedSelection,
num_samples::Int,
model::Function,
args::Tuple,
proposal::Function,
proposal_args::Tuple)
```
Run importance sampling on the posterior over unconstrained addresses and values. Returns an instance of `Particles` and normalized weights.
""", importance_sampling)
# ------------ Documentation (PF) ------------ #
@doc(
"""
```julia
particles = initialize_filter(observations::ConstrainedHierarchicalSelection,
num_particles::Int,
fn::Function,
args::Tuple)
```
Instantiate a set of particles using a call to `importance_sampling`.
""", initialize_filter)
@doc(
"""
```julia
filter_step!(observations::ConstrainedHierarchicalSelection,
ps::Particles,
new_args::Tuple)
```
Perform a single filter step from an instance `ps` of `Particles`, applying the constraints specified by `observations`.
```julia
filter_step!(observations::ConstrainedHierarchicalSelection,
ps::Particles,
new_args::Tuple,
proposal::Function,
proposal_args::Tuple)
```
Perform a single filter step using a custom proposal function, applying the constraints specified by `observations`.
""", filter_step!)
@doc(
"""
```julia
check_ess_resample!(ps::Particles)
```
Checks the effective sample size using `ess`, then resamples from an existing instance of `Particles` by mutation in place.
""", check_ess_resample!)
# ------------ Documentation (MH) ------------ #
@doc(
"""
```julia
call, accepted, metropolis_hastings(sel::UnconstrainedSelection,
call::HierarchicalCallSite)
```
Perform a Metropolis-Hastings step by proposing new choices using the prior at addressed specified by `sel`. Returns a call site, as well as a Boolean value `accepted` to indicate if the proposal was accepted or rejected.
```julia
call, accepted = metropolis_hastings(sel::UnconstrainedSelection,
call::HierarchicalCallSite,
proposal::Function,
proposal_args::Tuple)
```
Perform a Metropolis-Hastings step by proposing new choices using a custom proposal at addressed specified by `sel`. Returns a call site, as well as a Boolean value `accepted` to indicate if the proposal was accepted or rejected.
""", metropolis_hastings)
# ------------ Documentation (VI) ------------ #
@doc(
"""
```julia
params, elbows, call_sites = advi(sel::K,
iters::Int,
v_mod::Function,
v_args::Tuple,
mod::Function,
args::Tuple;
opt = ADAM(),
gs_samples = 100) where K <: ConstrainedSelection
```
Given a selection `sel`, perform _automatic-differentiation variational inference_ with a proposal model `v_mod`. The result is a new set of trained parameters `params` for the variational model, the history of ELBO estimates `elbows`, and the call sites `calls` produced by the gradient estimator computation.
""", advi)
| {"hexsha": "53652d93c6cd235c25c795cce8a37686f66c5351", "size": 4579, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/inference.jl", "max_stars_repo_name": "mschauer/Jaynes.jl", "max_stars_repo_head_hexsha": "f76ec08b4e4eb517ae55c52232b0f6ec0914a469", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/inference.jl", "max_issues_repo_name": "mschauer/Jaynes.jl", "max_issues_repo_head_hexsha": "f76ec08b4e4eb517ae55c52232b0f6ec0914a469", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-10T14:49:08.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-10T14:49:08.000Z", "max_forks_repo_path": "src/inference.jl", "max_forks_repo_name": "mschauer/Jaynes.jl", "max_forks_repo_head_hexsha": "f76ec08b4e4eb517ae55c52232b0f6ec0914a469", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7734375, "max_line_length": 310, "alphanum_fraction": 0.5876829002, "num_tokens": 870} |
[STATEMENT]
lemma HT_Wait: "HT(Wait(n)) = Wait(n)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. HT (Wait n) = Wait n
[PROOF STEP]
by (rel_auto) | {"llama_tokens": 69, "file": "UTP_utp_examples_utp_simple_time", "length": 1} |
include("straight_roadways.jl")
include("vehicles.jl") | {"hexsha": "c99c7151f6992e90f21981ce6de6ef657fe9b0d5", "size": 54, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/1d/main.jl", "max_stars_repo_name": "wxuejing/AutoViz.jl", "max_stars_repo_head_hexsha": "91a6b57949f5d839bda55ad18d81667d6930da09", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/1d/main.jl", "max_issues_repo_name": "wxuejing/AutoViz.jl", "max_issues_repo_head_hexsha": "91a6b57949f5d839bda55ad18d81667d6930da09", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/1d/main.jl", "max_forks_repo_name": "wxuejing/AutoViz.jl", "max_forks_repo_head_hexsha": "91a6b57949f5d839bda55ad18d81667d6930da09", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0, "max_line_length": 31, "alphanum_fraction": 0.7962962963, "num_tokens": 15} |
[STATEMENT]
lemma ld_alt[simp]: "n > 0 \<Longrightarrow> ld n = Max {i. 2 ^ i \<le> n}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < n \<Longrightarrow> ld n = Max {i. 2 ^ i \<le> n}
[PROOF STEP]
proof (safe intro!: Max_eqI[symmetric])
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. 0 < n \<Longrightarrow> finite {i. 2 ^ i \<le> n}
2. \<And>y. \<lbrakk>0 < n; 2 ^ y \<le> n\<rbrakk> \<Longrightarrow> y \<le> ld n
3. 0 < n \<Longrightarrow> 2 ^ ld n \<le> n
[PROOF STEP]
assume "n > 0"
[PROOF STATE]
proof (state)
this:
0 < n
goal (3 subgoals):
1. 0 < n \<Longrightarrow> finite {i. 2 ^ i \<le> n}
2. \<And>y. \<lbrakk>0 < n; 2 ^ y \<le> n\<rbrakk> \<Longrightarrow> y \<le> ld n
3. 0 < n \<Longrightarrow> 2 ^ ld n \<le> n
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
0 < n
[PROOF STEP]
show "2 ^ ld n \<le> n"
[PROOF STATE]
proof (prove)
using this:
0 < n
goal (1 subgoal):
1. 2 ^ ld n \<le> n
[PROOF STEP]
by (induct n rule: ld.induct) auto
[PROOF STATE]
proof (state)
this:
2 ^ ld n \<le> n
goal (2 subgoals):
1. 0 < n \<Longrightarrow> finite {i. 2 ^ i \<le> n}
2. \<And>y. \<lbrakk>0 < n; 2 ^ y \<le> n\<rbrakk> \<Longrightarrow> y \<le> ld n
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. 0 < n \<Longrightarrow> finite {i. 2 ^ i \<le> n}
2. \<And>y. \<lbrakk>0 < n; 2 ^ y \<le> n\<rbrakk> \<Longrightarrow> y \<le> ld n
[PROOF STEP]
fix y
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. 0 < n \<Longrightarrow> finite {i. 2 ^ i \<le> n}
2. \<And>y. \<lbrakk>0 < n; 2 ^ y \<le> n\<rbrakk> \<Longrightarrow> y \<le> ld n
[PROOF STEP]
assume "2 ^ y \<le> n"
[PROOF STATE]
proof (state)
this:
2 ^ y \<le> n
goal (2 subgoals):
1. 0 < n \<Longrightarrow> finite {i. 2 ^ i \<le> n}
2. \<And>y. \<lbrakk>0 < n; 2 ^ y \<le> n\<rbrakk> \<Longrightarrow> y \<le> ld n
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
2 ^ y \<le> n
[PROOF STEP]
show "y \<le> ld n"
[PROOF STATE]
proof (prove)
using this:
2 ^ y \<le> n
goal (1 subgoal):
1. y \<le> ld n
[PROOF STEP]
proof (induct n arbitrary: y rule: ld.induct)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>y. 2 ^ y \<le> 0 \<Longrightarrow> y \<le> ld 0
2. \<And>y. 2 ^ y \<le> Suc 0 \<Longrightarrow> y \<le> ld (Suc 0)
3. \<And>va y. \<lbrakk>\<And>y. 2 ^ y \<le> Suc (Suc va) div 2 \<Longrightarrow> y \<le> ld (Suc (Suc va) div 2); 2 ^ y \<le> Suc (Suc va)\<rbrakk> \<Longrightarrow> y \<le> ld (Suc (Suc va))
[PROOF STEP]
case (3 z)
[PROOF STATE]
proof (state)
this:
2 ^ ?y \<le> Suc (Suc z) div 2 \<Longrightarrow> ?y \<le> ld (Suc (Suc z) div 2)
2 ^ y \<le> Suc (Suc z)
goal (3 subgoals):
1. \<And>y. 2 ^ y \<le> 0 \<Longrightarrow> y \<le> ld 0
2. \<And>y. 2 ^ y \<le> Suc 0 \<Longrightarrow> y \<le> ld (Suc 0)
3. \<And>va y. \<lbrakk>\<And>y. 2 ^ y \<le> Suc (Suc va) div 2 \<Longrightarrow> y \<le> ld (Suc (Suc va) div 2); 2 ^ y \<le> Suc (Suc va)\<rbrakk> \<Longrightarrow> y \<le> ld (Suc (Suc va))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
2 ^ ?y \<le> Suc (Suc z) div 2 \<Longrightarrow> ?y \<le> ld (Suc (Suc z) div 2)
2 ^ y \<le> Suc (Suc z)
[PROOF STEP]
have "y - 1 \<le> ld (Suc (Suc z) div 2)"
[PROOF STATE]
proof (prove)
using this:
2 ^ ?y \<le> Suc (Suc z) div 2 \<Longrightarrow> ?y \<le> ld (Suc (Suc z) div 2)
2 ^ y \<le> Suc (Suc z)
goal (1 subgoal):
1. y - 1 \<le> ld (Suc (Suc z) div 2)
[PROOF STEP]
by (cases y) simp_all
[PROOF STATE]
proof (state)
this:
y - 1 \<le> ld (Suc (Suc z) div 2)
goal (3 subgoals):
1. \<And>y. 2 ^ y \<le> 0 \<Longrightarrow> y \<le> ld 0
2. \<And>y. 2 ^ y \<le> Suc 0 \<Longrightarrow> y \<le> ld (Suc 0)
3. \<And>va y. \<lbrakk>\<And>y. 2 ^ y \<le> Suc (Suc va) div 2 \<Longrightarrow> y \<le> ld (Suc (Suc va) div 2); 2 ^ y \<le> Suc (Suc va)\<rbrakk> \<Longrightarrow> y \<le> ld (Suc (Suc va))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
y - 1 \<le> ld (Suc (Suc z) div 2)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
y - 1 \<le> ld (Suc (Suc z) div 2)
goal (1 subgoal):
1. y \<le> ld (Suc (Suc z))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
y \<le> ld (Suc (Suc z))
goal (2 subgoals):
1. \<And>y. 2 ^ y \<le> 0 \<Longrightarrow> y \<le> ld 0
2. \<And>y. 2 ^ y \<le> Suc 0 \<Longrightarrow> y \<le> ld (Suc 0)
[PROOF STEP]
qed (auto simp: le_eq_less_or_eq)
[PROOF STATE]
proof (state)
this:
y \<le> ld n
goal (1 subgoal):
1. 0 < n \<Longrightarrow> finite {i. 2 ^ i \<le> n}
[PROOF STEP]
qed simp | {"llama_tokens": 2189, "file": "Formula_Derivatives_WS1S_Formula", "length": 20} |
import numpy as np
import cv2
def detect_shadow(img_bgr):
kernel = np.ones((5, 5), np.uint8)
height, width, depth = img_bgr.shape
black_img = np.zeros((height, width, 1), dtype="uint8")
img_hsv: np.ndarray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(src=img_hsv, lowerb=np.array(
[0, 34, 83]), upperb=np.array([179, 255, 255]))
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
dilation = cv2.dilate(closing, kernel, iterations=1)
contours, hierarchy = cv2.findContours(
dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
c = max(contours, key=cv2.contourArea)
masked_largest_shadow = cv2.fillPoly(
black_img, pts=[c], color=(255, 255, 255))
return masked_largest_shadow
| {"hexsha": "11b342e146b5c9b70d9babb0c6cb0289f394ed1d", "size": 837, "ext": "py", "lang": "Python", "max_stars_repo_path": "trajectory-extraction/object_detection/shadow_detection.py", "max_stars_repo_name": "JudithVerstegen/scarabs-abm", "max_stars_repo_head_hexsha": "09cd43ae43e0faccb1a725037d226a29cd390fe2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-23T10:46:34.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-23T10:46:34.000Z", "max_issues_repo_path": "trajectory-extraction/object_detection/shadow_detection.py", "max_issues_repo_name": "JudithVerstegen/scarabs-abm", "max_issues_repo_head_hexsha": "09cd43ae43e0faccb1a725037d226a29cd390fe2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trajectory-extraction/object_detection/shadow_detection.py", "max_forks_repo_name": "JudithVerstegen/scarabs-abm", "max_forks_repo_head_hexsha": "09cd43ae43e0faccb1a725037d226a29cd390fe2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-09T08:22:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-09T08:22:11.000Z", "avg_line_length": 34.875, "max_line_length": 66, "alphanum_fraction": 0.688172043, "include": true, "reason": "import numpy", "num_tokens": 258} |
"""
Test Autodock Vina Utility Functions.
"""
import os
import numpy as np
import unittest
from deepchem.utils import vina_utils
from deepchem.utils import rdkit_utils
class TestVinaUtils(unittest.TestCase):
def setUp(self):
# TODO test more formats for ligand
current_dir = os.path.dirname(os.path.realpath(__file__))
self.docked_ligands = os.path.join(current_dir, 'data',
'1jld_ligand_docked.pdbqt')
def test_load_docked_ligand(self):
docked_ligands, scores = vina_utils.load_docked_ligands(self.docked_ligands)
assert len(docked_ligands) == 9
assert len(scores) == 9
for ligand, score in zip(docked_ligands, scores):
xyz = rdkit_utils.get_xyz_from_mol(ligand)
assert score < 0 # This is a binding free energy
assert np.count_nonzero(xyz) > 0
def test_prepare_inputs(self):
pdbid = '3cyx'
ligand_smiles = 'CC(C)(C)NC(O)C1CC2CCCCC2C[NH+]1CC(O)C(CC1CCCCC1)NC(O)C(CC(N)O)NC(O)C1CCC2CCCCC2N1'
protein, ligand = vina_utils.prepare_inputs(
pdbid, ligand_smiles, pdb_name=pdbid)
assert np.isclose(protein.GetNumAtoms(), 1415, atol=3)
assert np.isclose(ligand.GetNumAtoms(), 124, atol=3)
protein, ligand = vina_utils.prepare_inputs(pdbid + '.pdb',
'ligand_' + pdbid + '.pdb')
assert np.isclose(protein.GetNumAtoms(), 1415, atol=3)
assert np.isclose(ligand.GetNumAtoms(), 124, atol=3)
os.remove(pdbid + '.pdb')
os.remove('ligand_' + pdbid + '.pdb')
os.remove('tmp.pdb')
| {"hexsha": "7c0a147ffcfa477678f2a7ca52e2115493b95d81", "size": 1567, "ext": "py", "lang": "Python", "max_stars_repo_path": "deepchem/utils/test/test_vina_utils.py", "max_stars_repo_name": "cjgalvin/deepchem", "max_stars_repo_head_hexsha": "64993a129e7f0f78fed9500298b1828ac8a0757a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-05-29T19:18:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-25T05:44:05.000Z", "max_issues_repo_path": "deepchem/utils/test/test_vina_utils.py", "max_issues_repo_name": "cjgalvin/deepchem", "max_issues_repo_head_hexsha": "64993a129e7f0f78fed9500298b1828ac8a0757a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2017-02-23T19:39:22.000Z", "max_issues_repo_issues_event_max_datetime": "2017-08-31T22:21:18.000Z", "max_forks_repo_path": "deepchem/utils/test/test_vina_utils.py", "max_forks_repo_name": "cjgalvin/deepchem", "max_forks_repo_head_hexsha": "64993a129e7f0f78fed9500298b1828ac8a0757a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-09-22T00:53:53.000Z", "max_forks_repo_forks_event_max_datetime": "2018-09-22T00:53:53.000Z", "avg_line_length": 32.6458333333, "max_line_length": 103, "alphanum_fraction": 0.6687938736, "include": true, "reason": "import numpy", "num_tokens": 466} |
"""Tests for locomotion.tasks.two_tap."""
import multi_gpu
import functools
from unittest.mock import patch
from absl.testing import absltest
import numpy as np
import os
DEMO_PATH = "../demo/markerless_mouse_1"
os.chdir(DEMO_PATH)
CONFIG_PATH = "../../tests/configs/config_mousetest.yaml"
MULTI_INSTANCE_CONFIG_PATH = "../../tests/configs/config_mousetest_multi_instance.yaml"
DANNCE_PATH = "../../tests/configs/label3d_dannce.mat"
class MultiGpuTest(absltest.TestCase):
def test_dannce_predict_help_message(self):
os.system("dannce-predict-multi-gpu --help")
def test_com_predict_help_message(self):
os.system("com-predict-multi-gpu --help")
def test_dannce_predict_batch_params(self):
handler = multi_gpu.MultiGpuHandler(
CONFIG_PATH,
n_samples_per_gpu=100,
verbose=False,
test=True,
dannce_file=DANNCE_PATH,
)
batch_params, _ = handler.submit_dannce_predict_multi_gpu()
self.assertTrue(os.path.exists(handler.batch_param_file))
self.assertTrue(len(batch_params) == 10)
def test_dannce_predict_batch_params_multi_instance(self):
handler = multi_gpu.MultiGpuHandler(
MULTI_INSTANCE_CONFIG_PATH,
n_samples_per_gpu=100,
verbose=False,
test=True,
dannce_file=DANNCE_PATH,
)
batch_params, _ = handler.submit_dannce_predict_multi_gpu()
print(batch_params)
self.assertTrue(len(batch_params) == 20)
def test_dannce_inference_submission(self):
with patch("sys.argv", ["dannce-inference", MULTI_INSTANCE_CONFIG_PATH, MULTI_INSTANCE_CONFIG_PATH, "--test=True"]):
multi_gpu.submit_inference()
def test_com_predict_batch_params(self):
handler = multi_gpu.MultiGpuHandler(
CONFIG_PATH,
n_samples_per_gpu=100,
verbose=False,
test=True,
dannce_file=DANNCE_PATH,
)
batch_params, _ = handler.submit_com_predict_multi_gpu()
self.assertTrue(os.path.exists(handler.batch_param_file))
self.assertTrue(len(batch_params) == 180)
def test_raises_error_if_no_dannce_file(self):
# Move to a directory in which there is no dannce.mat file
os.chdir("..")
with self.assertRaises(FileNotFoundError):
handler = multi_gpu.MultiGpuHandler(
CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True
)
def test_dannce_predict_multi_gpu_cli(self):
cmd = (
"dannce-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s"
% (
CONFIG_PATH,
DANNCE_PATH,
)
)
os.system(cmd)
def test_com_predict_multi_gpu_cli(self):
cmd = (
"com-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s"
% (
CONFIG_PATH,
DANNCE_PATH,
)
)
os.system(cmd)
if __name__ == "__main__":
absltest.main()
| {"hexsha": "8c38af4bc655047d812e1a8372a59968b9d21f8d", "size": 3102, "ext": "py", "lang": "Python", "max_stars_repo_path": "cluster/multi_gpu_test.py", "max_stars_repo_name": "tqxli/dannce-pytorch", "max_stars_repo_head_hexsha": "7676f4f7bfc7f9ce7dabf39e55d02823f3f18c1b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-21T19:49:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T19:49:18.000Z", "max_issues_repo_path": "cluster/multi_gpu_test.py", "max_issues_repo_name": "tqxli/dannce-pytorch", "max_issues_repo_head_hexsha": "7676f4f7bfc7f9ce7dabf39e55d02823f3f18c1b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cluster/multi_gpu_test.py", "max_forks_repo_name": "tqxli/dannce-pytorch", "max_forks_repo_head_hexsha": "7676f4f7bfc7f9ce7dabf39e55d02823f3f18c1b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3125, "max_line_length": 124, "alphanum_fraction": 0.637653127, "include": true, "reason": "import numpy", "num_tokens": 681} |
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
! This file was ported from Lean 3 source module data.finset.powerset
! leanprover-community/mathlib commit cc70d9141824ea8982d1562ce009952f2c3ece30
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Data.Finset.Lattice
import Mathbin.Data.Multiset.Powerset
/-!
# The powerset of a finset
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
-/
namespace Finset
open Function Multiset
variable {α : Type _} {s t : Finset α}
/-! ### powerset -/
section Powerset
#print Finset.powerset /-
/-- When `s` is a finset, `s.powerset` is the finset of all subsets of `s` (seen as finsets). -/
def powerset (s : Finset α) : Finset (Finset α) :=
⟨s.1.powerset.pmap Finset.mk fun t h => nodup_of_le (mem_powerset.1 h) s.Nodup,
s.Nodup.powerset.pmap fun a ha b hb => congr_arg Finset.val⟩
#align finset.powerset Finset.powerset
-/
#print Finset.mem_powerset /-
@[simp]
theorem mem_powerset {s t : Finset α} : s ∈ powerset t ↔ s ⊆ t := by
cases s <;> simp only [powerset, mem_mk, mem_pmap, mem_powerset, exists_prop, exists_eq_right] <;>
rw [← val_le_iff]
#align finset.mem_powerset Finset.mem_powerset
-/
#print Finset.coe_powerset /-
@[simp, norm_cast]
theorem coe_powerset (s : Finset α) :
(s.powerset : Set (Finset α)) = coe ⁻¹' (s : Set α).powerset :=
by
ext
simp
#align finset.coe_powerset Finset.coe_powerset
-/
#print Finset.empty_mem_powerset /-
@[simp]
theorem empty_mem_powerset (s : Finset α) : ∅ ∈ powerset s :=
mem_powerset.2 (empty_subset _)
#align finset.empty_mem_powerset Finset.empty_mem_powerset
-/
#print Finset.mem_powerset_self /-
@[simp]
theorem mem_powerset_self (s : Finset α) : s ∈ powerset s :=
mem_powerset.2 Subset.rfl
#align finset.mem_powerset_self Finset.mem_powerset_self
-/
#print Finset.powerset_nonempty /-
theorem powerset_nonempty (s : Finset α) : s.powerset.Nonempty :=
⟨∅, empty_mem_powerset _⟩
#align finset.powerset_nonempty Finset.powerset_nonempty
-/
#print Finset.powerset_mono /-
@[simp]
theorem powerset_mono {s t : Finset α} : powerset s ⊆ powerset t ↔ s ⊆ t :=
⟨fun h => mem_powerset.1 <| h <| mem_powerset_self _, fun st u h =>
mem_powerset.2 <| Subset.trans (mem_powerset.1 h) st⟩
#align finset.powerset_mono Finset.powerset_mono
-/
#print Finset.powerset_injective /-
theorem powerset_injective : Injective (powerset : Finset α → Finset (Finset α)) :=
injective_of_le_imp_le _ fun s t => powerset_mono.1
#align finset.powerset_injective Finset.powerset_injective
-/
#print Finset.powerset_inj /-
@[simp]
theorem powerset_inj : powerset s = powerset t ↔ s = t :=
powerset_injective.eq_iff
#align finset.powerset_inj Finset.powerset_inj
-/
#print Finset.powerset_empty /-
@[simp]
theorem powerset_empty : (∅ : Finset α).powerset = {∅} :=
rfl
#align finset.powerset_empty Finset.powerset_empty
-/
#print Finset.powerset_eq_singleton_empty /-
@[simp]
theorem powerset_eq_singleton_empty : s.powerset = {∅} ↔ s = ∅ := by
rw [← powerset_empty, powerset_inj]
#align finset.powerset_eq_singleton_empty Finset.powerset_eq_singleton_empty
-/
#print Finset.card_powerset /-
/-- **Number of Subsets of a Set** -/
@[simp]
theorem card_powerset (s : Finset α) : card (powerset s) = 2 ^ card s :=
(card_pmap _ _ _).trans (card_powerset s.1)
#align finset.card_powerset Finset.card_powerset
-/
#print Finset.not_mem_of_mem_powerset_of_not_mem /-
theorem not_mem_of_mem_powerset_of_not_mem {s t : Finset α} {a : α} (ht : t ∈ s.powerset)
(h : a ∉ s) : a ∉ t := by
apply mt _ h
apply mem_powerset.1 ht
#align finset.not_mem_of_mem_powerset_of_not_mem Finset.not_mem_of_mem_powerset_of_not_mem
-/
#print Finset.powerset_insert /-
theorem powerset_insert [DecidableEq α] (s : Finset α) (a : α) :
powerset (insert a s) = s.powerset ∪ s.powerset.image (insert a) :=
by
ext t
simp only [exists_prop, mem_powerset, mem_image, mem_union, subset_insert_iff]
by_cases h : a ∈ t
· constructor
· exact fun H => Or.inr ⟨_, H, insert_erase h⟩
· intro H
cases H
· exact subset.trans (erase_subset a t) H
· rcases H with ⟨u, hu⟩
rw [← hu.2]
exact subset.trans (erase_insert_subset a u) hu.1
· have : ¬∃ u : Finset α, u ⊆ s ∧ insert a u = t := by simp [Ne.symm (ne_insert_of_not_mem _ _ h)]
simp [Finset.erase_eq_of_not_mem h, this]
#align finset.powerset_insert Finset.powerset_insert
-/
/- ./././Mathport/Syntax/Translate/Basic.lean:635:2: warning: expanding binder collection (t «expr ⊆ » s) -/
#print Finset.decidableExistsOfDecidableSubsets /-
/-- For predicate `p` decidable on subsets, it is decidable whether `p` holds for any subset. -/
instance decidableExistsOfDecidableSubsets {s : Finset α} {p : ∀ (t) (_ : t ⊆ s), Prop}
[∀ (t) (h : t ⊆ s), Decidable (p t h)] : Decidable (∃ (t : _)(h : t ⊆ s), p t h) :=
decidable_of_iff (∃ (t : _)(hs : t ∈ s.powerset), p t (mem_powerset.1 hs))
⟨fun ⟨t, _, hp⟩ => ⟨t, _, hp⟩, fun ⟨t, hs, hp⟩ => ⟨t, mem_powerset.2 hs, hp⟩⟩
#align finset.decidable_exists_of_decidable_subsets Finset.decidableExistsOfDecidableSubsets
-/
/- ./././Mathport/Syntax/Translate/Basic.lean:635:2: warning: expanding binder collection (t «expr ⊆ » s) -/
#print Finset.decidableForallOfDecidableSubsets /-
/-- For predicate `p` decidable on subsets, it is decidable whether `p` holds for every subset. -/
instance decidableForallOfDecidableSubsets {s : Finset α} {p : ∀ (t) (_ : t ⊆ s), Prop}
[∀ (t) (h : t ⊆ s), Decidable (p t h)] : Decidable (∀ (t) (h : t ⊆ s), p t h) :=
decidable_of_iff (∀ (t) (h : t ∈ s.powerset), p t (mem_powerset.1 h))
⟨fun h t hs => h t (mem_powerset.2 hs), fun h _ _ => h _ _⟩
#align finset.decidable_forall_of_decidable_subsets Finset.decidableForallOfDecidableSubsets
-/
#print Finset.decidableExistsOfDecidableSubsets' /-
/-- A version of `finset.decidable_exists_of_decidable_subsets` with a non-dependent `p`.
Typeclass inference cannot find `hu` here, so this is not an instance. -/
def decidableExistsOfDecidableSubsets' {s : Finset α} {p : Finset α → Prop}
(hu : ∀ (t) (h : t ⊆ s), Decidable (p t)) : Decidable (∃ (t : _)(h : t ⊆ s), p t) :=
@Finset.decidableExistsOfDecidableSubsets _ _ _ hu
#align finset.decidable_exists_of_decidable_subsets' Finset.decidableExistsOfDecidableSubsets'
-/
#print Finset.decidableForallOfDecidableSubsets' /-
/-- A version of `finset.decidable_forall_of_decidable_subsets` with a non-dependent `p`.
Typeclass inference cannot find `hu` here, so this is not an instance. -/
def decidableForallOfDecidableSubsets' {s : Finset α} {p : Finset α → Prop}
(hu : ∀ (t) (h : t ⊆ s), Decidable (p t)) : Decidable (∀ (t) (h : t ⊆ s), p t) :=
@Finset.decidableForallOfDecidableSubsets _ _ _ hu
#align finset.decidable_forall_of_decidable_subsets' Finset.decidableForallOfDecidableSubsets'
-/
end Powerset
section Ssubsets
variable [DecidableEq α]
#print Finset.ssubsets /-
/-- For `s` a finset, `s.ssubsets` is the finset comprising strict subsets of `s`. -/
def ssubsets (s : Finset α) : Finset (Finset α) :=
erase (powerset s) s
#align finset.ssubsets Finset.ssubsets
-/
#print Finset.mem_ssubsets /-
@[simp]
theorem mem_ssubsets {s t : Finset α} : t ∈ s.ssubsets ↔ t ⊂ s := by
rw [ssubsets, mem_erase, mem_powerset, ssubset_iff_subset_ne, and_comm]
#align finset.mem_ssubsets Finset.mem_ssubsets
-/
#print Finset.empty_mem_ssubsets /-
theorem empty_mem_ssubsets {s : Finset α} (h : s.Nonempty) : ∅ ∈ s.ssubsets :=
by
rw [mem_ssubsets, ssubset_iff_subset_ne]
exact ⟨empty_subset s, h.ne_empty.symm⟩
#align finset.empty_mem_ssubsets Finset.empty_mem_ssubsets
-/
/- ./././Mathport/Syntax/Translate/Basic.lean:635:2: warning: expanding binder collection (t «expr ⊂ » s) -/
#print Finset.decidableExistsOfDecidableSsubsets /-
/-- For predicate `p` decidable on ssubsets, it is decidable whether `p` holds for any ssubset. -/
instance decidableExistsOfDecidableSsubsets {s : Finset α} {p : ∀ (t) (_ : t ⊂ s), Prop}
[∀ (t) (h : t ⊂ s), Decidable (p t h)] : Decidable (∃ t h, p t h) :=
decidable_of_iff (∃ (t : _)(hs : t ∈ s.ssubsets), p t (mem_ssubsets.1 hs))
⟨fun ⟨t, _, hp⟩ => ⟨t, _, hp⟩, fun ⟨t, hs, hp⟩ => ⟨t, mem_ssubsets.2 hs, hp⟩⟩
#align finset.decidable_exists_of_decidable_ssubsets Finset.decidableExistsOfDecidableSsubsets
-/
/- ./././Mathport/Syntax/Translate/Basic.lean:635:2: warning: expanding binder collection (t «expr ⊂ » s) -/
#print Finset.decidableForallOfDecidableSsubsets /-
/-- For predicate `p` decidable on ssubsets, it is decidable whether `p` holds for every ssubset. -/
instance decidableForallOfDecidableSsubsets {s : Finset α} {p : ∀ (t) (_ : t ⊂ s), Prop}
[∀ (t) (h : t ⊂ s), Decidable (p t h)] : Decidable (∀ t h, p t h) :=
decidable_of_iff (∀ (t) (h : t ∈ s.ssubsets), p t (mem_ssubsets.1 h))
⟨fun h t hs => h t (mem_ssubsets.2 hs), fun h _ _ => h _ _⟩
#align finset.decidable_forall_of_decidable_ssubsets Finset.decidableForallOfDecidableSsubsets
-/
#print Finset.decidableExistsOfDecidableSsubsets' /-
/-- A version of `finset.decidable_exists_of_decidable_ssubsets` with a non-dependent `p`.
Typeclass inference cannot find `hu` here, so this is not an instance. -/
def decidableExistsOfDecidableSsubsets' {s : Finset α} {p : Finset α → Prop}
(hu : ∀ (t) (h : t ⊂ s), Decidable (p t)) : Decidable (∃ (t : _)(h : t ⊂ s), p t) :=
@Finset.decidableExistsOfDecidableSsubsets _ _ _ _ hu
#align finset.decidable_exists_of_decidable_ssubsets' Finset.decidableExistsOfDecidableSsubsets'
-/
#print Finset.decidableForallOfDecidableSsubsets' /-
/-- A version of `finset.decidable_forall_of_decidable_ssubsets` with a non-dependent `p`.
Typeclass inference cannot find `hu` here, so this is not an instance. -/
def decidableForallOfDecidableSsubsets' {s : Finset α} {p : Finset α → Prop}
(hu : ∀ (t) (h : t ⊂ s), Decidable (p t)) : Decidable (∀ (t) (h : t ⊂ s), p t) :=
@Finset.decidableForallOfDecidableSsubsets _ _ _ _ hu
#align finset.decidable_forall_of_decidable_ssubsets' Finset.decidableForallOfDecidableSsubsets'
-/
end Ssubsets
section PowersetLen
#print Finset.powersetLen /-
/-- Given an integer `n` and a finset `s`, then `powerset_len n s` is the finset of subsets of `s`
of cardinality `n`. -/
def powersetLen (n : ℕ) (s : Finset α) : Finset (Finset α) :=
⟨(s.1.powersetLen n).pmap Finset.mk fun t h => nodup_of_le (mem_powersetLen.1 h).1 s.2,
s.2.powersetLen.pmap fun a ha b hb => congr_arg Finset.val⟩
#align finset.powerset_len Finset.powersetLen
-/
#print Finset.mem_powersetLen /-
/-- **Formula for the Number of Combinations** -/
theorem mem_powersetLen {n} {s t : Finset α} : s ∈ powersetLen n t ↔ s ⊆ t ∧ card s = n := by
cases s <;> simp [powerset_len, val_le_iff.symm] <;> rfl
#align finset.mem_powerset_len Finset.mem_powersetLen
-/
#print Finset.powersetLen_mono /-
@[simp]
theorem powersetLen_mono {n} {s t : Finset α} (h : s ⊆ t) : powersetLen n s ⊆ powersetLen n t :=
fun u h' => mem_powersetLen.2 <| And.imp (fun h₂ => Subset.trans h₂ h) id (mem_powersetLen.1 h')
#align finset.powerset_len_mono Finset.powersetLen_mono
-/
#print Finset.card_powersetLen /-
/-- **Formula for the Number of Combinations** -/
@[simp]
theorem card_powersetLen (n : ℕ) (s : Finset α) : card (powersetLen n s) = Nat.choose (card s) n :=
(card_pmap _ _ _).trans (card_powersetLen n s.1)
#align finset.card_powerset_len Finset.card_powersetLen
-/
#print Finset.powersetLen_zero /-
@[simp]
theorem powersetLen_zero (s : Finset α) : Finset.powersetLen 0 s = {∅} :=
by
ext; rw [mem_powerset_len, mem_singleton, card_eq_zero]
refine'
⟨fun h => h.2, fun h => by
rw [h]
exact ⟨empty_subset s, rfl⟩⟩
#align finset.powerset_len_zero Finset.powersetLen_zero
-/
#print Finset.powersetLen_empty /-
@[simp]
theorem powersetLen_empty (n : ℕ) {s : Finset α} (h : s.card < n) : powersetLen n s = ∅ :=
Finset.card_eq_zero.mp (by rw [card_powerset_len, Nat.choose_eq_zero_of_lt h])
#align finset.powerset_len_empty Finset.powersetLen_empty
-/
/- warning: finset.powerset_len_eq_filter -> Finset.powersetLen_eq_filter is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {n : Nat} {s : Finset.{u1} α}, Eq.{succ u1} (Finset.{u1} (Finset.{u1} α)) (Finset.powersetLen.{u1} α n s) (Finset.filter.{u1} (Finset.{u1} α) (fun (x : Finset.{u1} α) => Eq.{1} Nat (Finset.card.{u1} α x) n) (fun (a : Finset.{u1} α) => Nat.decidableEq (Finset.card.{u1} α a) n) (Finset.powerset.{u1} α s))
but is expected to have type
forall {α : Type.{u1}} {n : Nat} {s : Finset.{u1} α}, Eq.{succ u1} (Finset.{u1} (Finset.{u1} α)) (Finset.powersetLen.{u1} α n s) (Finset.filter.{u1} (Finset.{u1} α) (fun (x : Finset.{u1} α) => Eq.{1} Nat (Finset.card.{u1} α x) n) (fun (a : Finset.{u1} α) => instDecidableEqNat (Finset.card.{u1} α a) n) (Finset.powerset.{u1} α s))
Case conversion may be inaccurate. Consider using '#align finset.powerset_len_eq_filter Finset.powersetLen_eq_filterₓ'. -/
theorem powersetLen_eq_filter {n} {s : Finset α} :
powersetLen n s = (powerset s).filterₓ fun x => x.card = n :=
by
ext
simp [mem_powerset_len]
#align finset.powerset_len_eq_filter Finset.powersetLen_eq_filter
#print Finset.powersetLen_succ_insert /-
theorem powersetLen_succ_insert [DecidableEq α] {x : α} {s : Finset α} (h : x ∉ s) (n : ℕ) :
powersetLen n.succ (insert x s) = powersetLen n.succ s ∪ (powersetLen n s).image (insert x) :=
by
rw [powerset_len_eq_filter, powerset_insert, filter_union, ← powerset_len_eq_filter]
congr
rw [powerset_len_eq_filter, image_filter]
congr 1
ext t
simp only [mem_powerset, mem_filter, Function.comp_apply, and_congr_right_iff]
intro ht
have : x ∉ t := fun H => h (ht H)
simp [card_insert_of_not_mem this, Nat.succ_inj']
#align finset.powerset_len_succ_insert Finset.powersetLen_succ_insert
-/
#print Finset.powersetLen_nonempty /-
theorem powersetLen_nonempty {n : ℕ} {s : Finset α} (h : n ≤ s.card) : (powersetLen n s).Nonempty :=
by
classical
induction' s using Finset.induction_on with x s hx IH generalizing n
· rw [card_empty, le_zero_iff] at h
rw [h, powerset_len_zero]
exact Finset.singleton_nonempty _
· cases n
· simp
· rw [card_insert_of_not_mem hx, Nat.succ_le_succ_iff] at h
rw [powerset_len_succ_insert hx]
refine' nonempty.mono _ ((IH h).image (insert x))
convert subset_union_right _ _
#align finset.powerset_len_nonempty Finset.powersetLen_nonempty
-/
#print Finset.powersetLen_self /-
@[simp]
theorem powersetLen_self (s : Finset α) : powersetLen s.card s = {s} :=
by
ext
rw [mem_powerset_len, mem_singleton]
constructor
· exact fun ⟨hs, hc⟩ => eq_of_subset_of_card_le hs hc.ge
· rintro rfl
simp
#align finset.powerset_len_self Finset.powersetLen_self
-/
/- warning: finset.pairwise_disjoint_powerset_len -> Finset.pairwise_disjoint_powersetLen is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (s : Finset.{u1} α), Pairwise.{0} Nat (fun (i : Nat) (j : Nat) => Disjoint.{u1} (Finset.{u1} (Finset.{u1} α)) (Finset.partialOrder.{u1} (Finset.{u1} α)) (Finset.orderBot.{u1} (Finset.{u1} α)) (Finset.powersetLen.{u1} α i s) (Finset.powersetLen.{u1} α j s))
but is expected to have type
forall {α : Type.{u1}} (s : Finset.{u1} α), Pairwise.{0} Nat (fun (i : Nat) (j : Nat) => Disjoint.{u1} (Finset.{u1} (Finset.{u1} α)) (Finset.partialOrder.{u1} (Finset.{u1} α)) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} (Finset.{u1} α)) (Finset.powersetLen.{u1} α i s) (Finset.powersetLen.{u1} α j s))
Case conversion may be inaccurate. Consider using '#align finset.pairwise_disjoint_powerset_len Finset.pairwise_disjoint_powersetLenₓ'. -/
theorem pairwise_disjoint_powersetLen (s : Finset α) :
Pairwise fun i j => Disjoint (s.powersetLen i) (s.powersetLen j) := fun i j hij =>
Finset.disjoint_left.mpr fun x hi hj =>
hij <| (mem_powersetLen.mp hi).2.symm.trans (mem_powersetLen.mp hj).2
#align finset.pairwise_disjoint_powerset_len Finset.pairwise_disjoint_powersetLen
#print Finset.powerset_card_disjUnionᵢ /-
theorem powerset_card_disjUnionᵢ (s : Finset α) :
Finset.powerset s =
(range (s.card + 1)).disjUnionₓ (fun i => powersetLen i s)
(s.pairwise_disjoint_powersetLen.set_pairwise _) :=
by
refine' ext fun a => ⟨fun ha => _, fun ha => _⟩
· rw [mem_disj_Union]
exact
⟨a.card, mem_range.mpr (Nat.lt_succ_of_le (card_le_of_subset (mem_powerset.mp ha))),
mem_powerset_len.mpr ⟨mem_powerset.mp ha, rfl⟩⟩
· rcases mem_disj_Union.mp ha with ⟨i, hi, ha⟩
exact mem_powerset.mpr (mem_powerset_len.mp ha).1
#align finset.powerset_card_disj_Union Finset.powerset_card_disjUnionᵢ
-/
#print Finset.powerset_card_bunionᵢ /-
theorem powerset_card_bunionᵢ [DecidableEq (Finset α)] (s : Finset α) :
Finset.powerset s = (range (s.card + 1)).bunionᵢ fun i => powersetLen i s := by
simpa only [disj_Union_eq_bUnion] using powerset_card_disj_Union s
#align finset.powerset_card_bUnion Finset.powerset_card_bunionᵢ
-/
/- warning: finset.powerset_len_sup -> Finset.powerset_len_sup is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (u : Finset.{u1} α) (n : Nat), (LT.lt.{0} Nat Nat.hasLt n (Finset.card.{u1} α u)) -> (Eq.{succ u1} (Finset.{u1} α) (Finset.sup.{u1, u1} (Finset.{u1} α) (Finset.{u1} α) (Lattice.toSemilatticeSup.{u1} (Finset.{u1} α) (Finset.lattice.{u1} α (fun (a : α) (b : α) => _inst_1 a b))) (Finset.orderBot.{u1} α) (Finset.powersetLen.{u1} α (Nat.succ n) u) (id.{succ u1} (Finset.{u1} α))) u)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (u : Finset.{u1} α) (n : Nat), (LT.lt.{0} Nat instLTNat n (Finset.card.{u1} α u)) -> (Eq.{succ u1} (Finset.{u1} α) (Finset.sup.{u1, u1} (Finset.{u1} α) (Finset.{u1} α) (Lattice.toSemilatticeSup.{u1} (Finset.{u1} α) (Finset.instLatticeFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b))) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) (Finset.powersetLen.{u1} α (Nat.succ n) u) (id.{succ u1} (Finset.{u1} α))) u)
Case conversion may be inaccurate. Consider using '#align finset.powerset_len_sup Finset.powerset_len_supₓ'. -/
theorem powerset_len_sup [DecidableEq α] (u : Finset α) (n : ℕ) (hn : n < u.card) :
(powersetLen n.succ u).sup id = u := by
apply le_antisymm
· simp_rw [Finset.sup_le_iff, mem_powerset_len]
rintro x ⟨h, -⟩
exact h
· rw [sup_eq_bUnion, le_iff_subset, subset_iff]
cases' (Nat.succ_le_of_lt hn).eq_or_lt with h' h'
· simp [h']
· intro x hx
simp only [mem_bUnion, exists_prop, id.def]
obtain ⟨t, ht⟩ : ∃ t, t ∈ powerset_len n (u.erase x) := powerset_len_nonempty _
· refine' ⟨insert x t, _, mem_insert_self _ _⟩
rw [← insert_erase hx, powerset_len_succ_insert (not_mem_erase _ _)]
exact mem_union_right _ (mem_image_of_mem _ ht)
· rw [card_erase_of_mem hx]
exact Nat.le_pred_of_lt hn
#align finset.powerset_len_sup Finset.powerset_len_sup
#print Finset.powersetLen_card_add /-
@[simp]
theorem powersetLen_card_add (s : Finset α) {i : ℕ} (hi : 0 < i) : s.powersetLen (s.card + i) = ∅ :=
Finset.powersetLen_empty _ (lt_add_of_pos_right (Finset.card s) hi)
#align finset.powerset_len_card_add Finset.powersetLen_card_add
-/
#print Finset.map_val_val_powersetLen /-
@[simp]
theorem map_val_val_powersetLen (s : Finset α) (i : ℕ) :
(s.powersetLen i).val.map Finset.val = s.1.powersetLen i := by
simp [Finset.powersetLen, map_pmap, pmap_eq_map, map_id']
#align finset.map_val_val_powerset_len Finset.map_val_val_powersetLen
-/
#print Finset.powersetLen_map /-
theorem powersetLen_map {β : Type _} (f : α ↪ β) (n : ℕ) (s : Finset α) :
powersetLen n (s.map f) = (powersetLen n s).map (mapEmbedding f).toEmbedding :=
eq_of_veq <|
Multiset.map_injective (@eq_of_veq _) <| by
simp_rw [map_val_val_powerset_len, map_val, Multiset.map_map, Function.comp,
RelEmbedding.coeFn_toEmbedding, map_embedding_apply, map_val, ← Multiset.map_map _ val,
map_val_val_powerset_len, Multiset.powersetLen_map]
#align finset.powerset_len_map Finset.powersetLen_map
-/
end PowersetLen
end Finset
| {"author": "leanprover-community", "repo": "mathlib3port", "sha": "62505aa236c58c8559783b16d33e30df3daa54f4", "save_path": "github-repos/lean/leanprover-community-mathlib3port", "path": "github-repos/lean/leanprover-community-mathlib3port/mathlib3port-62505aa236c58c8559783b16d33e30df3daa54f4/Mathbin/Data/Finset/Powerset.lean"} |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 23 12:11:15 2020
Modified from the cornstover biorefinery constructed in Cortes-Peña et al., 2020,
with modification of fermentation system for 2,3-Butanediol instead of the original ethanol
[1] Cortes-Peña et al., BioSTEAM: A Fast and Flexible Platform for the Design,
Simulation, and Techno-Economic Analysis of Biorefineries under Uncertainty.
ACS Sustainable Chem. Eng. 2020, 8 (8), 3302–3310.
https://doi.org/10.1021/acssuschemeng.9b07040.
All units are explicitly defined here for transparency and easy reference
@author: sarangbhagwat
"""
# %% Setup
import numpy as np
import pandas as pd
import thermosteam as tmo
from biorefineries.TAL.chemicals_data import TAL_chemicals
_kg_per_ton = 907.18474
# Chemical Engineering Plant Cost Index from Chemical Engineering Magzine
# (https://www.chemengonline.com/the-magazine/)
#!!! BioSTEAM has it all: `bst.units.design_tools.CEPCI_by_year`
# CEPCI = {1997: 386.5,
# 1998: 389.5,
# 2007: 525.4,
# 2009: 521.9,
# 2010: 550.8,
# 2011: 585.7,
# 2012: 584.6,
# 2013: 567.3,
# 2014: 576.1,
# 2016: 541.7}
# %%
# =============================================================================
# Function to find the split ratios for Splitters, assume 0 for chemicals not specified in splits
# =============================================================================
def find_split(IDs, flow0, flow1, chemical_groups):
# Add 1e-6 to avoid flow0 and flow1 both being 0
flow0 = np.asarray(flow0) + 1e-6
flow1 = np.asarray(flow1) + 1e-6
splits = flow0/(flow0 + flow1)
thermo = tmo.settings.get_thermo()
chemicals = thermo.chemicals
array = np.zeros(chemicals.size)
for ID, split in zip(IDs, splits):
if ID in chemical_groups:
array[chemicals.get_index(chemical_groups[ID])] = split
else:
array[chemicals.index(ID)] = split
# WWTsludge is removed from the cell mass group
array[chemicals.index('WWTsludge')] = array[chemicals.index('FermMicrobe')]
return array
IDs = ('Ethanol', 'H2O', 'Glucose', 'Xylose', 'OtherSugars',
'SugarOligomers', 'OrganicSolubleSolids', 'InorganicSolubleSolids', 'Ammonia', 'AceticAcid',
'SulfuricAcid', 'Furfurals', 'OtherOrganics', 'CO2', 'CH4',
'O2', 'N2', 'COSOxNOxH2S', 'Glucan', 'Xylan',
'OtherStructuralCarbohydrates', 'Acetate', 'Lignin', 'Protein', 'CellMass',
'OtherInsolubleSolids')
streams = {}
streams['stream_535'] = (177, 329030, 502, 1022, 2094,
1552, 15808, 2513, 0, 0,
0, 513, 1348, 0, 0,
0, 0, 0, 25, 8,
2, 0, 250, 69, 19,
92)
streams['stream_571'] = (6, 12797, 19, 49, 81,
60, 612, 97, 0, 0,
0, 19, 52, 0, 0,
1, 1, 0, 1230, 415,
94, 0, 12226, 3376, 925,
4489)
streams['stream_611'] = (15, 356069, 42, 85, 175,
130, 2387, 110, 633, 5,
0, 70, 113, 181, 3,
1, 0, 300, 6, 2,
0, 0, 64, 18, 280,
23)
streams['stream_612'] = (1, 27158, 3, 7, 13,
10, 182, 8, 48, 0,
0, 5, 9, 14, 0,
0, 0, 23, 19, 6,
1, 0, 186, 51, 813,
68)
streams['stream_616'] = (1, 109098, 3, 6, 13,
9, 187, 1068, 46, 0,
0, 5, 8, 14, 0,
1, 1, 31, 1, 0,
0, 0, 13, 3, 80,
5)
streams['stream_623'] = (0, 7708, 0, 0, 1,
1, 13, 75, 3, 0,
0, 0, 1, 1, 0,
0, 0, 2, 25, 8,
2, 0, 250, 52, 1523,
92)
streams['stream_624'] = (0, 381300, 0, 1, 1,
1, 79, 4828, 3, 0,
0, 0, 1, 6, 0,
3, 5, 44, 0, 0,
0, 0, 0, 0, 0,
0)
streams['stream_625'] = (1, 2241169, 2, 3, 7,
6, 466, 28378, 16, 0,
0, 3, 7, 38, 0,
17, 32, 259, 194, 65,
15, 0, 1925, 90, 19778,
707)
stream_626 = (0,) + (376324,) + (0,) * (len(IDs)-2)
streams['stream_626'] = stream_626
streams['stream_627'] = (0, 4967, 0, 1, 1,
1, 79, 2828, 3, 0,
0, 0, 1, 3, 0,
0, 0, 44, 0, 0,
0, 0, 0, 0, 0,
0)
splits_df = pd.DataFrame.from_dict(streams)
splits_df.index = IDs
# %%
# =============================================================================
# Function to get feedstock flow by giving dry weight composition and moisture content
# =============================================================================
def get_feedstock_flow(dry_composition, moisture_content, dry_flow):
dry_array = TAL_chemicals.kwarray(dry_composition)
wet_flow = dry_flow / (1-moisture_content)
moisture_array = TAL_chemicals.kwarray(dict(Water=moisture_content))
feedstock_flow = wet_flow * (dry_array*(1-moisture_content)+moisture_array)
return feedstock_flow
dry_composition = dict(
Glucan=0.3505, Xylan=0.1953, Lignin=0.1576, Ash=0.0493, Acetate=0.0181,
Protein=0.0310, Arabinan=0.0238, Galactan=0.0143, Mannan=0.0060,
Sucrose=0.0077, Extract=0.1465,
)
moisture_content = 0.2
dry_feedstock_flow = 2205 * _kg_per_ton / 24
# dry_feedstock_flow = 1188.9732935254162 * _kg_per_ton / 24
# dry_feedstock_flow = 500 * _kg_per_ton / 24
baseline_feedflow = get_feedstock_flow(dry_composition, moisture_content,
dry_feedstock_flow)
# %%
# =============================================================================
# Functions to compute chemical loading and adjust recycle flows to maintain
# a certain ratio for Esterification and Hydrolysis reactor
# =============================================================================
def compute_extra_chemical(feed, recycle, reactants_ID, chemical_ID, ratios):
reactants_in_feed = feed.imol[reactants_ID]
reactants_in_recycle = recycle.imol[reactants_ID]
chemical_needed = (ratios*(reactants_in_feed+reactants_in_recycle)).sum()
chemical_extra = (feed.imol[chemical_ID]+recycle.imol[chemical_ID]) - chemical_needed
return chemical_extra
def adjust_recycle(feed, recycle, reactants_ID, chemical_ID, ratios):
feed_chemical_needed = (feed.imol[reactants_ID]*ratios).sum() \
- feed.imol[chemical_ID]
recycle_chemical_extra = recycle.imol[chemical_ID] \
- (recycle.imol[reactants_ID]*ratios).sum()
split = feed_chemical_needed / recycle_chemical_extra
effluent = feed.copy()
recycle_recycled = recycle.copy()
recycle_recycled.mol *= split
recycle_discarded = recycle.copy()
recycle_discarded.mol *= (1 - split)
effluent.mix_from([feed, recycle_recycled])
return effluent, recycle_discarded
# %%
# =============================================================================
# Function to output chemical properties
# =============================================================================
def get_chemical_properties(chemicals, T, P, output=False):
formulas = [chemical.formula for chemical in chemicals]
MWs = [chemical.MW for chemical in chemicals]
Hfs = [chemical.Hf for chemical in chemicals]
HHVs = [chemical.HHV for chemical in chemicals]
LHVs = [chemical.LHV for chemical in chemicals]
phases = []
Tbs = []
Psats = []
Vs = []
Cns = []
mus = []
kappas = []
for chemical in chemicals:
if chemical.locked_state:
phases.append(chemical.phase_ref)
Tbs.append('NA')
try: Psats.append(chemical.Psat(T=T, P=P))
except: Psats.append('')
try: Vs.append(chemical.V(T=T, P=P))
except: Vs.append('')
try: Cns.append(chemical.Cn(T=T))
except: Cns.append('')
try: mus.append(chemical.mu(T=T, P=P))
except: mus.append('')
try: kappas.append(chemical.kappa(T=T, P=P))
except: kappas.append('')
else:
ref_phase = chemical.get_phase(T=T, P=P)
phases.append(f'variable, ref={ref_phase}')
Tbs.append(chemical.Tb)
try: Psats.append(chemical.Psat(T=T, P=P))
except: Psats.append('')
try: Vs.append(chemical.V(ref_phase, T=T, P=P))
except: Vs.append('')
try: Cns.append(chemical.Cn(ref_phase, T=T))
except: Cns.append('')
try: mus.append(chemical.mu(ref_phase, T=T, P=P))
except: mus.append('')
try: kappas.append(chemical.kappa(ref_phase, T=T, P=P))
except: kappas.append('')
properties = pd.DataFrame(
{'ID': chemicals.IDs,
'formula': formulas,
'MW': MWs,
'HHV': HHVs,
'LHV': LHVs,
'Hf': Hfs,
'phase': phases,
'boiling point': Tbs,
'Psat': Psats,
'V': Vs,
'Cn': Cns,
'mu': mus,
'kappa': kappas}
)
if output:
properties.to_excel('chemical_properties.xlsx', sheet_name='properties')
# %%
# =============================================================================
# Function for quick result checking
# =============================================================================
def get_sugar_conc(stream, sugars=()):
fermentable_sugar = 0
for sugar in sugars:
fermentable_sugar += stream.imass[sugar]
fermentable_sugar_conc = fermentable_sugar/stream.F_vol
return fermentable_sugar_conc | {"hexsha": "fc87088c5315b853a73e0fe22daf1d8dd2bdee57", "size": 10198, "ext": "py", "lang": "Python", "max_stars_repo_path": "BioSTEAM 2.x.x/biorefineries/TAL/utils.py", "max_stars_repo_name": "yoelcortes/Bioindustrial-Complex", "max_stars_repo_head_hexsha": "d39edfec88e443ef7a62218ca0215e3b105f4b96", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-01-03T21:04:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-09T01:15:48.000Z", "max_issues_repo_path": "BioSTEAM 2.x.x/biorefineries/TAL/utils.py", "max_issues_repo_name": "yoelcortes/Bioindustrial-Complex", "max_issues_repo_head_hexsha": "d39edfec88e443ef7a62218ca0215e3b105f4b96", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-01-03T21:31:27.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-28T13:53:56.000Z", "max_forks_repo_path": "BioSTEAM 2.x.x/biorefineries/TAL/utils.py", "max_forks_repo_name": "yoelcortes/Bioindustrial-Complex", "max_forks_repo_head_hexsha": "d39edfec88e443ef7a62218ca0215e3b105f4b96", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-01-07T14:04:06.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-08T23:05:25.000Z", "avg_line_length": 36.1631205674, "max_line_length": 97, "alphanum_fraction": 0.5107864287, "include": true, "reason": "import numpy", "num_tokens": 2921} |
[STATEMENT]
lemma ereal_leq_imp_neg_leq [mono_intros]:
fixes x y::ereal
assumes "x \<le> y"
shows "-y \<le> -x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. - y \<le> - x
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
x \<le> y
goal (1 subgoal):
1. - y \<le> - x
[PROOF STEP]
by auto | {"llama_tokens": 148, "file": "Gromov_Hyperbolicity_Library_Complements", "length": 2} |
cBHEADER**********************************************************************
c Copyright (c) 2008, Lawrence Livermore National Security, LLC.
c Produced at the Lawrence Livermore National Laboratory.
c This file is part of HYPRE. See file COPYRIGHT for details.
c
c HYPRE is free software; you can redistribute it and/or modify it under the
c terms of the GNU Lesser General Public License (as published by the Free
c Software Foundation) version 2.1 dated February 1999.
c
c $Revision: 1.5 $
cEHEADER**********************************************************************
c***********************************************************************
c Routines to test struct_ls fortran interfaces
c***********************************************************************
c***********************************************************************
c HYPRE_StructBiCGSTAB routines
c***********************************************************************
c***********************************************************************
c fhypre_structbicgstabcreate
c***********************************************************************
subroutine fhypre_structbicgstabcreate(fcomm, fsolver)
integer ierr
integer fcomm
integer*8 fsolver
call HYPRE_StructBiCGSTABCreate(fcomm, fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structbicgstabcreate: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structbicgstabdestroy
c***********************************************************************
subroutine fhypre_structbicgstabdestroy(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructBiCGSTABDestroy(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structbicgstabdestroy: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structbicgstabsetup
c***********************************************************************
subroutine fhypre_structbicgstabsetup(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructBiCGSTABSetup(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structbicgstabsetup: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structbicgstabsolve
c***********************************************************************
subroutine fhypre_structbicgstabsolve(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructBiCGSTABSolve(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structbicgstabsolve: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structbicgstabsettol
c***********************************************************************
subroutine fhypre_structbicgstabsettol(fsolver, ftol)
integer ierr
integer*8 fsolver
double precision ftol
call HYPRE_StructBiCGSTABSetTol(fsolver, ftol, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structbicgstabsettol: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structbicgstabsetmaxiter
c***********************************************************************
subroutine fhypre_structbicgstabsetmaxiter(fsolver, fmaxiter)
integer ierr
integer fmaxiter
integer*8 fsolver
call HYPRE_StructBiCGSTABSetMaxIter(fsolver, fmaxiter, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structbicgstabsetmaxiter: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structbicgstabsetprecond
c***********************************************************************
subroutine fhypre_structbicgstabsetprecond(fsolver, fprecond_id,
1 fprecond_solver)
integer ierr
integer*8 fsolver
integer*8 fprecond_id
integer*8 fprecond_solver
call HYPRE_StructBiCGSTABSetPrecond(fsolver, fprecond_id,
1 fprecond_solver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structbicgstabsetprecond: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structbicgstabsetlogging
c***********************************************************************
subroutine fhypre_structbicgstabsetlogging(fsolver, flogging)
integer ierr
integer flogging
integer*8 fsolver
call HYPRE_StructBiCGSTABSetLogging(fsolver, flogging, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structbicgstabsetlogging: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structbicgstabsetprintlevel
c***********************************************************************
subroutine fhypre_structbicgstabsetprintle(fsolver, fprintlev)
integer ierr
integer fprintlev
integer*8 fsolver
call HYPRE_StructBiCGSTABSetPrintLev(fsolver, fprintlev, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structbicgstabsetprintle: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structbicgstabgetnumiterations
c***********************************************************************
subroutine fhypre_structbicgstabgetnumiter(fsolver, fnumiter)
integer ierr
integer fnumiter
integer*8 fsolver
call HYPRE_StructBiCGSTABGetNumItera(fsolver, fnumiter, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structbicgstabgetnumiter: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structbicgstabgetresidual
c***********************************************************************
subroutine fhypre_structbicgstabgetresidua(fsolver, fresidual)
integer ierr
integer*8 fsolver
double precision fresidual
call HYPRE_StructBiCGSTABGetResidual(fsolver, fresidual, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structbicgstabgetresidua: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structbicgstabgetfinalrelativeresidualnorm
c***********************************************************************
subroutine fhypre_structbicgstabgetfinalre(fsolver, fnorm)
integer ierr
integer*8 fsolver
double precision fnorm
call HYPRE_StructBiCGSTABGetFinalRel(fsolver, fnorm)
if (ierr .ne. 0) then
print *, 'fhypre_structbicgstabgetfinalre: err = ', ierr
endif
return
end
c***********************************************************************
c HYPRE_StructGMRES routines
c***********************************************************************
c***********************************************************************
c fhypre_structgmrescreate
c***********************************************************************
subroutine fhypre_structgmrescreate(fcomm, fsolver)
integer ierr
integer fcomm
integer*8 fsolver
call HYPRE_StructGMRESCreate(fcomm, fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structgmrescreate: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structgmresdestroy
c***********************************************************************
subroutine fhypre_structgmresdestroy(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructGMRESDestroy(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structgmresdestroy: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structgmressetup
c***********************************************************************
subroutine fhypre_structgmressetup(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructGMRESSetup(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structgmressetup: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structgmressolve
c***********************************************************************
subroutine fhypre_structgmressolve(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructGMRESSolve(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structgmressolve: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structgmressettol
c***********************************************************************
subroutine fhypre_structgmressettol(fsolver, ftol)
integer ierr
integer*8 fsolver
double precision ftol
call HYPRE_StructGMRESSetTol(fsolver, ftol, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structgmressettol: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structgmressetmaxiter
c***********************************************************************
subroutine fhypre_structgmressetmaxiter(fsolver, fmaxiter)
integer ierr
integer fmaxiter
integer*8 fsolver
call HYPRE_StructGMRESSetMaxIter(fsolver, fmaxiter, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structgmressetmaxiter: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structgmressetprecond
c***********************************************************************
subroutine fhypre_structgmressetprecond(fsolver, fprecond_id,
1 fprecond_solver)
integer ierr
integer fprecond_id
integer*8 fsolver
integer*8 fprecond_solver
call HYPRE_StructGMRESSetPrecond(fsolver, fprecond_id,
1 fprecond_solver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structgmressetprecond: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structgmressetlogging
c***********************************************************************
subroutine fhypre_structgmressetlogging(fsolver, flogging)
integer ierr
integer flogging
integer*8 fsolver
call HYPRE_StructGMRESSetLogging(fsolver, flogging, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structgmressetlogging: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structgmressetprintlevel
c***********************************************************************
subroutine fhypre_structgmressetprintlevel(fsolver, fprintlevel)
integer ierr
integer fprintlevel
integer*8 fsolver
call HYPRE_StructGMRESSetPrintLevel(fsolver, fprint_level, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structgmressetprintlevel: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structgmresgetnumiterations
c***********************************************************************
subroutine fhypre_structgmresgetnumiterati(fsolver, fnumiters)
integer ierr
integer fnumiters
integer*8 fsolver
call HYPRE_StructGMRESGetNumIteratio(fsolver, fnumiters, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structgmresgetnumiterati: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structgmresgetfinalrelativeresidualnorm
c***********************************************************************
subroutine fhypre_structgmresgetfinalrelat(fsolver, fnorm)
integer ierr
integer*8 fsolver
double precision fnorm
call HYPRE_StructGMRESGetFinalRelati(fsolver, fnorm, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structgmresgetfinalrelat: err = ', ierr
endif
return
end
c***********************************************************************
c HYPRE_StructHybrid routines
c***********************************************************************
c***********************************************************************
c fhypre_structhybridcreate
c***********************************************************************
subroutine fhypre_structhybridcreate(fcomm, fsolver)
integer ierr
integer fcomm
integer*8 fsolver
call HYPRE_StructHybridCreate(fcomm, fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridcreate: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybriddestroy
c***********************************************************************
subroutine fhypre_structhybriddestroy(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructHybridDestroy(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybriddestroy: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridsetup
c***********************************************************************
subroutine fhypre_structhybridsetup(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructHybridSetup(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridsetup: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridsolve
c***********************************************************************
subroutine fhypre_structhybridsolve(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructHybridSolve(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridsolve: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridsetsolvertype
c***********************************************************************
subroutine fhypre_structhybridsetsolvertyp(fsolver, fsolver_typ)
integer ierr
integer fsolver_typ
integer*8 fsolver
call HYPRE_StructHybridSetSolverType(fsolver, fsolver_typ, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridsetsolvertyp: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridsetstopcrit
c***********************************************************************
subroutine fhypre_structhybridsetstopcrit(fsolver, fstop_crit)
integer ierr
integer fstop_crit
integer*8 fsolver
call HYPRE_StructHybridSetStopCrit(fsolver, fstop_crit, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridsetstopcrit: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridsetkdim
c***********************************************************************
subroutine fhypre_structhybridsetkdim(fsolver, fkdim)
integer ierr
integer fkdim
integer*8 fsolver
call HYPRE_StructHybridSetKDim(fsolver, fkdim, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridsetkdim: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridsettol
c***********************************************************************
subroutine fhypre_structhybridsettol(fsolver, ftol)
integer ierr
integer*8 fsolver
double precision ftol
call HYPRE_StructHybridSetTol(fsolver, ftol, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridsettol: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridsetconvergencetol
c***********************************************************************
subroutine fhypre_structhybridsetconvergen(fsolver, fcftol)
integer ierr
integer*8 fsolver
double precision fcftol
call HYPRE_StructHybridSetConvergenc(fsolver, fcftol, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridsetconvergen: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridsetpcgabsolutetolfactor
c***********************************************************************
subroutine fhypre_structhybridsetpcgabsolu(fsolver, fpcgtol)
integer ierr
integer*8 fsolver
double precision fpcgtol
call HYPRE_StructHybridSetPCGAbsolut(fsolver, fpcgtol, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridsetpcgabsolu: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridsetdscgmaxiter
c***********************************************************************
subroutine fhypre_structhybridsetdscgmaxit(fsolver, fdscgmaxitr)
integer ierr
integer fdscgmaxitr
integer*8 fsolver
call HYPRE_StructHybridSetDSCGMaxIte(fsolver, fdscgmaxitr, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridsetdscgmaxit: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridsetpcgmaxiter
c***********************************************************************
subroutine fhypre_structhybridsetpcgmaxite(fsolver, fpcgmaxitr)
integer ierr
integer fpcgmaxitr
integer*8 fsolver
call HYPRE_StructHybridSetPCGMaxIter(fsolver, fpcgmaxitr, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridsetpcgmaxite: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridsettwonorm
c***********************************************************************
subroutine fhypre_structhybridsettwonorm(fsolver, ftwonorm)
integer ierr
integer ftwonorm
integer*8 fsolver
call HYPRE_StructHybridSetTwoNorm(fsolver, ftwonorm, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridsettwonorm: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridsetrelchange
c***********************************************************************
subroutine fhypre_structhybridsetrelchange(fsolver, frelchng)
integer ierr
integer frelchng
integer*8 fsolver
call HYPRE_StructHybridSetRelChange(fsolver, frelchng, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridsetrelchange: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridsetprecond
c***********************************************************************
subroutine fhypre_structhybridsetprecond(fsolver, fprecond_id,
1 fprecond)
integer ierr
integer fprecond_id
integer*8 fsolver
integer*8 fprecond
call HYPRE_StructHybridSetPrecond(fsolver, fprecond_id, fprecond,
1 ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridsetprecond: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridsetlogging
c***********************************************************************
subroutine fhypre_structhybridsetlogging(fsolver, flogging)
integer ierr
integer flogging
integer*8 fsolver
call HYPRE_StructHybridSetLogging(fsolver, flogging, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridsetlogging: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridsetprintlevel
c***********************************************************************
subroutine fhypre_structhybridsetprintleve(fsolver, fprntlvl)
integer ierr
integer fprntlvl
integer*8 fsolver
call HYPRE_StructHybridSetPrintLevel(fsolver, fprntlvl, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridsetprintleve: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridgetnumiterations
c***********************************************************************
subroutine fhypre_structhybridgetnumiterat(fsolver, fnumits)
integer ierr
integer fnumits
integer*8 fsolver
call HYPRE_StructHybridGetNumIterati(fsolver, fnumits, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridgetnumiterat: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridgetdscgnumiterations
c***********************************************************************
subroutine fhypre_structhybridgetdscgnumit(fsolver, fdscgnumits)
integer ierr
integer fdscgnumits
integer*8 fsolver
call HYPRE_StructHybridGetDSCGNumIte(fsolver, fdscgnumits, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridgetdscgnumit: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridgetpcgnumiterations
c***********************************************************************
subroutine fhypre_structhybridgetpcgnumite(fsolver, fpcgnumits)
integer ierr
integer fpcgnumits
integer*8 fsolver
call HYPRE_StructHybridGetPCGNumIter(fsolver, fpcgnumits, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridgetpcgnumite: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structhybridgetfinalrelativeresidualnorm
c***********************************************************************
subroutine fhypre_structhybridgetfinalrela(fsolver, fnorm)
integer ierr
integer*8 fsolver
double precision fnorm
call HYPRE_StructHybridGetFinalRelat(fsolver, fnorm, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structhybridgetfinalrela: err = ', ierr
endif
return
end
c***********************************************************************
c HYPRE_StructInterpreter routines
c***********************************************************************
c***********************************************************************
c fhypre_structvectorsetrandomvalues
c***********************************************************************
subroutine fhypre_structvectorsetrandomvalu(fvector, fseed)
integer ierr
integer fseed
integer*8 fvector
call hypre_StructVectorSetRandomValu(fvector, fseed, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structvectorsetrandomvalues: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsetrandomvalues
c***********************************************************************
subroutine fhypre_structsetrandomvalues(fvector, fseed)
integer ierr
integer fseed
integer*8 fvector
call hypre_StructSetRandomValues(fvector, fseed, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsetrandomvalues: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsetupinterpreter
c***********************************************************************
subroutine fhypre_structsetupinterpreter(fi)
integer ierr
integer*8 fi
call HYPRE_StructSetupInterpreter(fi, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsetupinterpreter: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsetupmatvec
c***********************************************************************
subroutine fhypre_structsetupmatvec(fmv)
integer ierr
integer*8 fmv
call HYPRE_StructSetupMatvec(fmv, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsetupmatvec: err = ', ierr
endif
return
end
c***********************************************************************
c HYPRE_StructJacobi routines
c***********************************************************************
c***********************************************************************
c fhypre_structjacobicreate
c***********************************************************************
subroutine fhypre_structjacobicreate(fcomm, fsolver)
integer ierr
integer fcomm
integer*8 fsolver
call HYPRE_StructJacobiCreate(fcomm, fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structjacobicreate: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structjacobidestroy
c***********************************************************************
subroutine fhypre_structjacobidestroy(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructJacobiDestroy(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structjacobidestroy: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structjacobisetup
c***********************************************************************
subroutine fhypre_structjacobisetup(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructJacobiSetup(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structjacobisetup: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structjacobisolve
c***********************************************************************
subroutine fhypre_structjacobisolve(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructJacobiSolve(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structjacobisolve: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structjacobisettol
c***********************************************************************
subroutine fhypre_structjacobisettol(fsolver, ftol)
integer ierr
integer*8 fsolver
double precision ftol
call HYPRE_StructJacobiSetTol(fsolver, ftol, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structjacobisettol: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structjacobigettol
c***********************************************************************
subroutine fhypre_structjacobigettol(fsolver, ftol)
integer ierr
integer*8 fsolver
double precision ftol
call HYPRE_StructJacobiGetTol(fsolver, ftol, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structjacobigettol: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structjacobisetmaxiter
c***********************************************************************
subroutine fhypre_structjacobisetmaxiter(fsolver, fmaxiter)
integer ierr
integer fmaxiter
integer*8 fsolver
call HYPRE_StructJacobiSetMaxIter(fsolver, fmaxiter, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structjacobisetmaxiter: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structjacobigetmaxiter
c***********************************************************************
subroutine fhypre_structjacobigetmaxiter(fsolver, fmaxiter)
integer ierr
integer fmaxiter
integer*8 fsolver
call HYPRE_StructJacobiGetMaxIter(fsolver, fmaxiter, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structjacobigetmaxiter: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structjacobisetzeroguess
c***********************************************************************
subroutine fhypre_structjacobisetzeroguess(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructJacobiSetZeroGuess(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structjacobisetzeroguess: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structjacobigetzeroguess
c***********************************************************************
subroutine fhypre_structjacobigetzeroguess(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructJacobiGetZeroGuess(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structjacobigetzeroguess: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structjacobisetnonzeroguess
c***********************************************************************
subroutine fhypre_structjacobisetnonzerogu(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructJacobiSetNonZeroGue(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structjacobisetnonzerogu: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structjacobigetnumiterations
c***********************************************************************
subroutine fhypre_structjacobigetnumiterat(fsolver, fnumiters)
integer ierr
integer fnumiters
integer*8 fsolver
call HYPRE_StructJacobiGetNumIterati(fsolver, fnumiters, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structjacobigetnumiterat: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structjacobigetfinalrelativeresidualnorm
c***********************************************************************
subroutine fhypre_structjacobigetfinalrela(fsolver, fnorm)
integer ierr
integer*8 fsolver
double precision fnorm
call HYPRE_StructJacobiGetFinalRelat(fsolver, fnorm, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structjacobigetfinalrela: err = ', ierr
endif
return
end
c***********************************************************************
c HYPRE_StructPCG routines
c***********************************************************************
c***********************************************************************
c fhypre_structpcgcreate
c***********************************************************************
subroutine fhypre_structpcgcreate(fcomm, fsolver)
integer ierr
integer fcomm
integer*8 fsolver
call HYPRE_StructPCGCreate(fcomm, fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpcgcreate: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpcgdestroy
c***********************************************************************
subroutine fhypre_structpcgdestroy(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructPCGDestroy(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpcgdestroy: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpcgsetup
c***********************************************************************
subroutine fhypre_structpcgsetup(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructPCGSetup(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpcgsetup: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpcgsolve
c***********************************************************************
subroutine fhypre_structpcgsolve(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructPCGSolve(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpcgsolve: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpcgsettol
c***********************************************************************
subroutine fhypre_structpcgsettol(fsolver, ftol)
integer ierr
integer*8 fsolver
double precision ftol
call HYPRE_StructPCGSetTol(fsolver, ftol, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpcgsettol: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpcgsetmaxiter
c***********************************************************************
subroutine fhypre_structpcgsetmaxiter(fsolver, fmaxiter)
integer ierr
integer fmaxiter
integer*8 fsolver
call HYPRE_StructPCGSetMaxIter(fsolver, fmaxiter, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpcgsetmaxiter: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpcgsettwonorm
c***********************************************************************
subroutine fhypre_structpcgsettwonorm(fsolver, ftwonorm)
integer ierr
integer ftwonorm
integer*8 fsolver
call HYPRE_StructPCGSetTwoNorm(fsolver, ftwonorm, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpcgsettwonorm: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpcgsetrelchange
c***********************************************************************
subroutine fhypre_structpcgsetrelchange(fsolver, frelchng)
integer ierr
integer frelchng
integer*8 fsolver
call HYPRE_StructPCGSetRelChange(fsolver, frelchng, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpcgsetrelchange: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpcgsetprecond
c***********************************************************************
subroutine fhypre_structpcgsetprecond(fsolver, fprecond_id,
1 fprecond)
integer ierr
integer fprecond_id
integer*8 fsolver
integer*8 fprecond
call HYPRE_StructPCGSetPrecond(fsolver, fprecond_id, fprecond,
1 ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpcgsetprecond: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpcgsetlogging
c***********************************************************************
subroutine fhypre_structpcgsetlogging(fsolver, flogging)
integer ierr
integer flogging
integer*8 fsolver
call HYPRE_StructPCGSetLogging(fsolver, flogging, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpcgsetlogging: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpcgsetprintlevel
c***********************************************************************
subroutine fhypre_structpcgsetprintlevel(fsolver, fprntlvl)
integer ierr
integer fprntlvl
integer*8 fsolver
call HYPRE_StructPCGSetPrintLevel(fsolver, fprntlvl, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpcgsetprintlevel: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpcggetnumiterations
c***********************************************************************
subroutine fhypre_structpcggetnumiteration(fsolver, fnumiters)
integer ierr
integer fnumiters
integer*8 fsolver
call HYPRE_StructPCGGetNumIterations(fsolver, fnumiters, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpcggetnumiteration: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpcggetfinalrelativeresidualnorm
c***********************************************************************
subroutine fhypre_structpcggetfinalrelativ(fsolver, fnorm)
integer ierr
integer*8 fsolver
double precision fnorm
call HYPRE_StructPCGGetFinalRelative(fsolver, fnorm, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structjacobigetfinalrelativ: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structdiagscalesetup
c***********************************************************************
subroutine fhypre_structdiagscalesetup(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructDiagScaleSetup(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structdiagscalesetup: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structdiagscale
c***********************************************************************
subroutine fhypre_structdiagscale(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructDiagScale(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structdiagscale: err = ', ierr
endif
return
end
c***********************************************************************
c HYPRE_StructPFMG routines
c***********************************************************************
c***********************************************************************
c fhypre_structpfmgcreate
c***********************************************************************
subroutine fhypre_structpfmgcreate(fcomm, fsolver)
integer ierr
integer fcomm
integer*8 fsolver
call HYPRE_StructPFMGCreate(fcomm, fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgcreate: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmgdestroy
c***********************************************************************
subroutine fhypre_structpfmgdestroy(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructPFMGDestroy(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgdestroy: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmgsetup
c***********************************************************************
subroutine fhypre_structpfmgsetup(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructPFMGSetup(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgsetup: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmgsolve
c***********************************************************************
subroutine fhypre_structpfmgsolve(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructPFMGSolve(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgsolve: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmgsettol
c***********************************************************************
subroutine fhypre_structpfmgsettol(fsolver, ftol)
integer ierr
integer*8 fsolver
double precision ftol
call HYPRE_StructPFMGSetTol(fsolver, ftol, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgsettol: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmggettol
c***********************************************************************
subroutine fhypre_structpfmggettol(fsolver, ftol)
integer ierr
integer*8 fsolver
double precision ftol
call HYPRE_StructPFMGGetTol(fsolver, ftol, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmggettol: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmgsetmaxiter
c***********************************************************************
subroutine fhypre_structpfmgsetmaxiter(fsolver, fmaxiter)
integer ierr
integer fmaxiter
integer*8 fsolver
call HYPRE_StructPFMGSetMaxIter(fsolver, fmaxiter, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgsetmaxiter: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmggetmaxiter
c***********************************************************************
subroutine fhypre_structpfmggetmaxiter(fsolver, fmaxiter)
integer ierr
integer fmaxiter
integer*8 fsolver
call HYPRE_StructPFMGGetMaxIter(fsolver, fmaxiter, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmggetmaxiter: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmgsetmaxlevels
c***********************************************************************
subroutine fhypre_structpfmgsetmaxlevels(fsolver, fmaxlevels)
integer ierr
integer fmaxlevels
integer*8 fsolver
call HYPRE_StructPFMGSetMaxLevels(fsolver, fmaxlevels, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgsetmaxlevels: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmggetmaxlevels
c***********************************************************************
subroutine fhypre_structpfmggetmaxlevels(fsolver, fmaxlevels)
integer ierr
integer fmaxlevels
integer*8 fsolver
call HYPRE_StructPFMGGetMaxLevels(fsolver, fmaxlevels, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmggetmaxlevels: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmgsetrelchange
c***********************************************************************
subroutine fhypre_structpfmgsetrelchange(fsolver, frelchange)
integer ierr
integer frelchange
integer*8 fsolver
call HYPRE_StructPFMGSetRelChange(fsolver, frelchange, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgsetrelchange: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmggetrelchange
c***********************************************************************
subroutine fhypre_structpfmggetrelchange(fsolver, frelchange)
integer ierr
integer frelchange
integer*8 fsolver
call HYPRE_StructPFMGGetRelChange(fsolver, frelchange, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmggetrelchange: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmgsetzeroguess
c***********************************************************************
subroutine fhypre_structpfmgsetzeroguess(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructPFMGSetZeroGuess(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgsetzeroguess: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmggetzeroguess
c***********************************************************************
subroutine fhypre_structpfmggetzeroguess(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructPFMGGetZeroGuess(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmggetzeroguess: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmgsetnonzeroguess
c***********************************************************************
subroutine fhypre_structpfmgsetnonzerogues(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructPFMGSetNonZeroGuess(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgsetnonzerogues: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmggetnumiterations
c***********************************************************************
subroutine fhypre_structpfmggetnumiteratio(fsolver, fnumiters)
integer ierr
integer fnumiters
integer*8 fsolver
call HYPRE_StructPFMGGetNumIteration(fsolver, fnumiters, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmggetnumiteratio: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmggetfinalrelativeresidualnorm
c***********************************************************************
subroutine fhypre_structpfmggetfinalrelati(fsolver, fnorm)
integer ierr
integer*8 fsolver
double precision fnorm
call HYPRE_StructPFMGGetFinalRelativ(fsolver, fnorm, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmggetfinalrelati: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmgsetskiprelax
c***********************************************************************
subroutine fhypre_structpfmgsetskiprelax(fsolver, fskiprelax)
integer ierr
integer fskiprelax
integer*8 fsolver
call HYPRE_StructPFMGSetSkipRelax(fsolver, fskiprelax, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgsetskiprelax: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmggetskiprelax
c***********************************************************************
subroutine fhypre_structpfmggetskiprelax(fsolver, fskiprelax)
integer ierr
integer fskiprelax
integer*8 fsolver
call HYPRE_StructPFMGGetSkipRelax(fsolver, fskiprelax, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmggetskiprelax: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmgsetrelaxtype
c***********************************************************************
subroutine fhypre_structpfmgsetrelaxtype(fsolver, frelaxtype)
integer ierr
integer frelaxtype
integer*8 fsolver
call HYPRE_StructPFMGSetRelaxType(fsolver, frelaxtype, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgsetrelaxtype: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmggetrelaxtype
c***********************************************************************
subroutine fhypre_structpfmggetrelaxtype(fsolver, frelaxtype)
integer ierr
integer frelaxtype
integer*8 fsolver
call HYPRE_StructPFMGGetRelaxType(fsolver, frelaxtype, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmggetrelaxtype: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmgsetraptype
c***********************************************************************
subroutine fhypre_structpfmgsetraptype(fsolver, fraptype)
integer ierr
integer fraptype
integer*8 fsolver
call HYPRE_StructPFMGSetRAPType(fsolver, fraptype, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgsetraptype: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmggetraptype
c***********************************************************************
subroutine fhypre_structpfmggetraptype(fsolver, fraptype)
integer ierr
integer fraptype
integer*8 fsolver
call HYPRE_StructPFMGGetRAPType(fsolver, fraptype, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmggetraptype: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmgsetnumprerelax
c***********************************************************************
subroutine fhypre_structpfmgsetnumprerelax(fsolver,
1 fnumprerelax)
integer ierr
integer fnumprerelax
integer*8 fsolver
call HYPRE_StructPFMGSetNumPreRelax(fsolver, fnumprerelax, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgsetnumprerelax: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmggetnumprerelax
c***********************************************************************
subroutine fhypre_structpfmggetnumprerelax(fsolver,
1 fnumprerelax)
integer ierr
integer fnumprerelax
integer*8 fsolver
call HYPRE_StructPFMGGetNumPreRelax(fsolver, fnumprerelax, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmggetnumprerelax: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmgsetnumpostrelax
c***********************************************************************
subroutine fhypre_structpfmgsetnumpostrela(fsolver,
1 fnumpostrelax)
integer ierr
integer fnumpostrelax
integer*8 fsolver
call HYPRE_StructPFMGSetNumPostRelax(fsolver, fnumpostrelax, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgsetnumpostrela: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmggetnumpostrelax
c***********************************************************************
subroutine fhypre_structpfmggetnumpostrela(fsolver,
1 fnumpostrelax)
integer ierr
integer fnumpostrelax
integer*8 fsolver
call HYPRE_StructPFMGGetNumPostRelax(fsolver, fnumpostrelax, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmggetnumpostrela: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmgsetdxyz
c***********************************************************************
subroutine fhypre_structpfmgsetdxyz(fsolver, fdxyz)
integer ierr
integer*8 fsolver
double precision fdxyz
call HYPRE_StructPFMGSetDxyz(fsolver, fdxyz, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgsetdxyz: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmgsetlogging
c***********************************************************************
subroutine fhypre_structpfmgsetlogging(fsolver, flogging)
integer ierr
integer flogging
integer*8 fsolver
call HYPRE_StructPFMGSetLogging(fsolver, flogging, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgsetlogging: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmggetlogging
c***********************************************************************
subroutine fhypre_structpfmggetlogging(fsolver, flogging)
integer ierr
integer flogging
integer*8 fsolver
call HYPRE_StructPFMGGetLogging(fsolver, flogging, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmggetlogging: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmgsetprintlevel
c***********************************************************************
subroutine fhypre_structpfmgsetprintlevel(fsolver, fprintlevel)
integer ierr
integer fprintlevel
integer*8 fsolver
call HYPRE_StructPFMGSetPrintLevel(fsolver, fprintlevel, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmgsetprintlevel: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structpfmggetprintlevel
c***********************************************************************
subroutine fhypre_structpfmggetprintlevel(fsolver, fprintlevel)
integer ierr
integer fprintlevel
integer*8 fsolver
call HYPRE_StructPFMGGetPrintLevel(fsolver, fprintlevel, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structpfmggetprintlevel: err = ', ierr
endif
return
end
c***********************************************************************
c HYPRE_StructSMG routines
c***********************************************************************
c***********************************************************************
c fhypre_structsmgcreate
c***********************************************************************
subroutine fhypre_structsmgcreate(fcomm, fsolver)
integer ierr
integer fcomm
integer*8 fsolver
call HYPRE_StructSMGCreate(fcomm, fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmgcreate: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmgdestroy
c***********************************************************************
subroutine fhypre_structsmgdestroy(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructSMGDestroy(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmgdestroy: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmgsetup
c***********************************************************************
subroutine fhypre_structsmgsetup(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructSMGSetup(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmgsetup: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmgsolve
c***********************************************************************
subroutine fhypre_structsmgsolve(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructSMGSolve(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmgsolve: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmgsetmemoryuse
c***********************************************************************
subroutine fhypre_structsmgsetmemoryuse(fsolver, fmemuse)
integer ierr
integer fmemuse
integer*8 fsolver
call HYPRE_StructSMGSetMemoryUse(fsolver, fmemuse, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmgsetmemoryuse: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmggetmemoryuse
c***********************************************************************
subroutine fhypre_structsmggetmemoryuse(fsolver, fmemuse)
integer ierr
integer fmemuse
integer*8 fsolver
call HYPRE_StructSMGGetMemoryUse(fsolver, fmemuse, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmggetmemoryuse: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmgsettol
c***********************************************************************
subroutine fhypre_structsmgsettol(fsolver, ftol)
integer ierr
integer*8 fsolver
double precision ftol
call HYPRE_StructSMGSetTol(fsolver, ftol, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmgsettol: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmggettol
c***********************************************************************
subroutine fhypre_structsmggettol(fsolver, ftol)
integer ierr
integer*8 fsolver
double precision ftol
call HYPRE_StructSMGGetTol(fsolver, ftol, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmggettol: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmgsetmaxiter
c***********************************************************************
subroutine fhypre_structsmgsetmaxiter(fsolver, fmaxiter)
integer ierr
integer fmaxiter
integer*8 fsolver
call HYPRE_StructSMGSetMaxIter(fsolver, fmaxiter, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmgsetmaxiter: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmggetmaxiter
c***********************************************************************
subroutine fhypre_structsmggetmaxiter(fsolver, fmaxiter)
integer ierr
integer fmaxiter
integer*8 fsolver
call HYPRE_StructSMGGetMaxIter(fsolver, fmaxiter, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmggetmaxiter: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmgsetrelchange
c***********************************************************************
subroutine fhypre_structsmgsetrelchange(fsolver, frelchange)
integer ierr
integer frelchange
integer*8 fsolver
call HYPRE_StructSMGSetRelChange(fsolver, frelchange, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmgsetrelchange: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmggetrelchange
c***********************************************************************
subroutine fhypre_structsmggetrelchange(fsolver, frelchange)
integer ierr
integer frelchange
integer*8 fsolver
call HYPRE_StructSMGGetRelChange(fsolver, frelchange, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmggetrelchange: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmgsetzeroguess
c***********************************************************************
subroutine fhypre_structsmgsetzeroguess(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructSMGSetZeroGuess(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmgsetzeroguess: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmggetzeroguess
c***********************************************************************
subroutine fhypre_structsmggetzeroguess(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructSMGGetZeroGuess(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmggetzeroguess: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmgsetnonzeroguess
c***********************************************************************
subroutine fhypre_structsmgsetnonzeroguess(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructSMGSetNonZeroGuess(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmgsetnonzeroguess: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmggetnumiterations
c***********************************************************************
subroutine fhypre_structsmggetnumiteration(fsolver, fnumiters)
integer ierr
integer fnumiters
integer*8 fsolver
call HYPRE_StructSMGGetNumIterations(fsolver, fnumiters, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmggetnumiteration: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmggetfinalrelativeresidualnorm
c***********************************************************************
subroutine fhypre_structsmggetfinalrelativ(fsolver, fnorm)
integer ierr
integer*8 fsolver
double precision fnorm
call HYPRE_StructSMGGetFinalRelative(fsolver, fnorm, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmggetfinalrelativ: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmgsetnumprerelax
c***********************************************************************
subroutine fhypre_structsmgsetnumprerelax(fsolver, fnumprerelax)
integer ierr
integer fnumprerelax
integer*8 fsolver
call HYPRE_StructSMGSetNumPreRelax(fsolver, fnumprerelax, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmgsetnumprerelax: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmggetnumprerelax
c***********************************************************************
subroutine fhypre_structsmggetnumprerelax(fsolver, fnumprerelax)
integer ierr
integer fnumprerelax
integer*8 fsolver
call HYPRE_StructSMGGetNumPreRelax(fsolver, fnumprerelax, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmggetnumprerelax: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmgsetnumpostrelax
c***********************************************************************
subroutine fhypre_structsmgsetnumpostrelax(fsolver, fnumpstrlx)
integer ierr
integer fnumpstrlx
integer*8 fsolver
call HYPRE_StructSMGSetNumPostRelax(fsolver, fnumpstrlx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmgsetnumpostrelax: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmggetnumpostrelax
c***********************************************************************
subroutine fhypre_structsmggetnumpostrelax(fsolver, fnumpstrlx)
integer ierr
integer fnumpstrlx
integer*8 fsolver
call HYPRE_StructSMGGetNumPostRelax(fsolver, fnumpstrlx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmggetnumpostrelax: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmgsetlogging
c***********************************************************************
subroutine fhypre_structsmgsetlogging(fsolver, flogging)
integer ierr
integer flogging
integer*8 fsolver
call HYPRE_StructSMGSetLogging(fsolver, flogging, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmgsetlogging: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmggetlogging
c***********************************************************************
subroutine fhypre_structsmggetlogging(fsolver, flogging)
integer ierr
integer flogging
integer*8 fsolver
call HYPRE_StructSMGGetLogging(fsolver, flogging, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmggetlogging: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmgsetprintlevel
c***********************************************************************
subroutine fhypre_structsmgsetprintlevel(fsolver, fprintlevel)
integer ierr
integer fprintlevel
integer*8 fsolver
call HYPRE_StructSMGSetPrintLevel(fsolver, fprintlevel, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmgsetprintlevel: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsmggetprintlevel
c***********************************************************************
subroutine fhypre_structsmggetprintlevel(fsolver, fprintlevel)
integer ierr
integer fprintlevel
integer*8 fsolver
call HYPRE_StructSMGGetPrintLevel(fsolver, fprintlevel, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsmggetprintlevel: err = ', ierr
endif
return
end
c***********************************************************************
c HYPRE_StructSparseMSG routines
c***********************************************************************
c***********************************************************************
c fhypre_structsparsemsgcreate
c***********************************************************************
subroutine fhypre_structsparsemsgcreate(fcomm, fsolver)
integer ierr
integer fcomm
integer*8 fsolver
call HYPRE_StructSparseMSGCreate(fcomm, fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsgcreate: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsparsemsgdestroy
c***********************************************************************
subroutine fhypre_structsparsemsgdestroy(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructSparseMSGDestroy(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsgdestroy: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsparsemsgsetup
c***********************************************************************
subroutine fhypre_structsparsemsgsetup(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructSparseMSGSetup(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsgsetup: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsparsemsgsolve
c***********************************************************************
subroutine fhypre_structsparsemsgsolve(fsolver, fA, fb, fx)
integer ierr
integer*8 fsolver
integer*8 fA
integer*8 fb
integer*8 fx
call HYPRE_StructSparseMSGSolve(fsolver, fA, fb, fx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsgsolve: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsparsemsgsetjump
c***********************************************************************
subroutine fhypre_structsparsemsgsetjump(fsolver, fjump)
integer ierr
integer fjump
integer*8 fsolver
call HYPRE_StructSparseMSGSetJump(fsolver, fjump, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsgsetjump: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsparsemsgsettol
c***********************************************************************
subroutine fhypre_structsparsemsgsettol(fsolver, ftol)
integer ierr
integer*8 fsolver
double precision ftol
call HYPRE_StructSparseMSGSetTol(fsolver, ftol, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsgsettol: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsparsemsgsetmaxiter
c***********************************************************************
subroutine fhypre_structsparsemsgsetmaxite(fsolver, fmaxiter)
integer ierr
integer fmaxiter
integer*8 fsolver
call HYPRE_StructSparseMSGSetMaxIter(fsolver, fmaxiter, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsgsetmaxite: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsparsemsgsetrelchange
c***********************************************************************
subroutine fhypre_structsparsemsgsetrelcha(fsolver, frelchange)
integer ierr
integer frelchange
integer*8 fsolver
call HYPRE_StructSparseMSGSetRelChan(fsolver, frelchange, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsgsetrelcha: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsparsemsgsetzeroguess
c***********************************************************************
subroutine fhypre_structsparsemsgsetzerogu(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructSparseMSGSetZeroGue(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsgsetzerogu: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsparsemsgsetnonzeroguess
c***********************************************************************
subroutine fhypre_structsparsemsgsetnonzer(fsolver)
integer ierr
integer*8 fsolver
call HYPRE_StructSparseMSGSetNonZero(fsolver, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsgsetnonzer: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsparsemsggetnumiterations
c***********************************************************************
subroutine fhypre_structsparsemsggetnumite(fsolver, fniters)
integer ierr
integer fniters
integer*8 fsolver
call HYPRE_StructSparseMSGGetNumIter(fsolver, fniters, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsggetnumite: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsparsemsggetfinalrelativeresidualnorm
c***********************************************************************
subroutine fhypre_structsparsemsggetfinalr(fsolver, fnorm)
integer ierr
integer*8 fsolver
double precision fnorm
call HYPRE_StructSparseMSGGetFinalRe(fsolver, fnorm, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsggetfinalr: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsparsemsgsetrelaxtype
c***********************************************************************
subroutine fhypre_structsparsemsgsetrelaxt(fsolver, frelaxtype)
integer ierr
integer frelaxtype
integer*8 fsolver
call HYPRE_StructSparseMSGSetRelaxTy(fsolver, frelaxtype, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsgsetrelaxt: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsparsemsgsetnumprerelax
c***********************************************************************
subroutine fhypre_structsparsemsgsetnumpre(fsolver, fnprelax)
integer ierr
integer fnprelax
integer*8 fsolver
call HYPRE_StructSparseMSGSetNumPreR(fsolver, fnprelax, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsgsetnumpre: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsparsemsgsetnumpostrelax
c***********************************************************************
subroutine fhypre_structsparsemsgsetnumpos(fsolver, fnpstrlx)
integer ierr
integer fnpstrlx
integer*8 fsolver
call HYPRE_StructSparseMSGSetNumPost(fsolver, fnpstrlx, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsgsetnumpos: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsparsemsgsetnumfinerelax
c***********************************************************************
subroutine fhypre_structsparsemsgsetnumfin(fsolver, fnfine)
integer ierr
integer fnfine
integer*8 fsolver
call HYPRE_StructSparseMSGSetNumFine(fsolver, fnfine, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsgsetnumfin: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsparsemsgsetlogging
c***********************************************************************
subroutine fhypre_structsparsemsgsetloggin(fsolver, flogging)
integer ierr
integer flogging
integer*8 fsolver
call HYPRE_StructSparseMSGSetLogging(fsolver, flogging, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsgsetloggin: err = ', ierr
endif
return
end
c***********************************************************************
c fhypre_structsparsemsgsetprintlevel
c***********************************************************************
subroutine fhypre_structsparsemsgsetprintl(fsolver, fprntlvl)
integer ierr
integer fprntlvl
integer*8 fsolver
call HYPRE_StructSparseMSGSetPrintLe(fsolver, fprntlvl, ierr)
if (ierr .ne. 0) then
print *, 'fhypre_structsparsemsgsetprintl: err = ', ierr
endif
return
end
| {"hexsha": "38404f3e5628247921d8c7fdd373ecdbf5600ad9", "size": 79700, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "linux_packages/source/hypre-2.9.0b/src/test/fstruct_ls.f", "max_stars_repo_name": "pangkeji/warp3d", "max_stars_repo_head_hexsha": "8b273b337e557f734298940a63291697cd561d02", "max_stars_repo_licenses": ["NCSA"], "max_stars_count": 75, "max_stars_repo_stars_event_min_datetime": "2015-07-06T18:14:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T02:54:32.000Z", "max_issues_repo_path": "src/ilPSP/layer_0/3rd_party/Hypre2.9.0b/src/test/fstruct_ls.f", "max_issues_repo_name": "leyel/BoSSS", "max_issues_repo_head_hexsha": "39f58a1a64a55e44f51384022aada20a5b425230", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2017-04-07T18:09:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-28T01:48:33.000Z", "max_forks_repo_path": "src/ilPSP/layer_0/3rd_party/Hypre2.9.0b/src/test/fstruct_ls.f", "max_forks_repo_name": "leyel/BoSSS", "max_forks_repo_head_hexsha": "39f58a1a64a55e44f51384022aada20a5b425230", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 41, "max_forks_repo_forks_event_min_datetime": "2015-05-24T23:24:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-13T22:07:45.000Z", "avg_line_length": 31.427444795, "max_line_length": 78, "alphanum_fraction": 0.4676787955, "num_tokens": 17589} |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains utilities for downloading and converting datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, glob
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
LABELS_FILENAME = 'labels.txt'
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR,'utils'))
def decode_raw(encoded_string, org_dtype, org_shape):
out_tensor = tf.reshape(tf.decode_raw(encoded_string, org_dtype),
org_shape) # Shape information is lost
return out_tensor
def int64_feature(values):
"""Returns a TF-Feature of int64s.
Args:
values: A scalar or list of values.
Returns:
A TF-Feature.
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def bytes_feature(values):
"""Returns a TF-Feature of bytes.
Args:
values: A string.
Returns:
A TF-Feature.
"""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def float_feature(values):
"""Returns a TF-Feature of floats.
Args:
values: A scalar of list of values.
Returns:
A TF-Feature.
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
def pl_bxm_to_tfexample(points, object_label, sg_all_bidxmaps, bidxmaps_flat, fmap_neighbor_idis, no_bidxmap=False):
assert points.dtype == np.float32
assert sg_all_bidxmaps.dtype == np.int32
assert bidxmaps_flat.dtype == np.int32
assert fmap_neighbor_idis.dtype == np.float32
points_bin = points.tobytes()
points_shape_bin = np.array(points.shape, np.int32).tobytes()
sg_all_bidxmaps_bin = sg_all_bidxmaps.tobytes()
sg_all_bidxmaps_shape_bin = np.array(sg_all_bidxmaps.shape, np.int32).tobytes()
bidxmaps_flat_bin = bidxmaps_flat.tobytes()
bidxmaps_flat_shape_bin = np.array(bidxmaps_flat.shape, np.int32).tobytes()
fmap_neighbor_idis_bin = fmap_neighbor_idis.tobytes()
fmap_neighbor_idis_shape_bin = np.array(fmap_neighbor_idis.shape, np.int32).tobytes()
feature_map = {
'points/encoded': bytes_feature(points_bin),
'points/shape': bytes_feature(points_shape_bin),
'object/label': int64_feature(object_label) }
feature_map1 = {
'sg_all_bidxmaps/encoded': bytes_feature(sg_all_bidxmaps_bin),
'sg_all_bidxmaps/shape': bytes_feature(sg_all_bidxmaps_shape_bin),
'bidxmaps_flat/encoded': bytes_feature(bidxmaps_flat_bin),
'bidxmaps_flat/shape': bytes_feature(bidxmaps_flat_shape_bin),
'fmap_neighbor_idis/encoded': bytes_feature(fmap_neighbor_idis_bin),
'fmap_neighbor_idis/shape': bytes_feature(fmap_neighbor_idis_shape_bin) }
if not no_bidxmap:
feature_map.update(feature_map1)
example = tf.train.Example(features=tf.train.Features(feature=feature_map))
return example
def data_meta_to_tfexample(point_idxs):
data_eles = ['xyz', 'nxnynz', 'color', 'intensity']
feature_map = {}
point_idxs_bin = {}
for ele in data_eles:
if ele not in point_idxs:
point_idxs_bin[ele] = np.array([],np.int32).tobytes()
else:
point_idxs_bin[ele] = np.array(point_idxs[ele],np.int32).tobytes()
feature_map['point_idxs/%s'%(ele)] = bytes_feature( point_idxs_bin[ele] )
example = tf.train.Example(features=tf.train.Features(feature=feature_map))
def write_pl_bxm_tfrecord(bxm_tfrecord_writer, tfrecord_meta_writer,\
datasource_name, points, point_idxs, object_labels,\
sg_all_bidxmaps, bidxmaps_flat, fmap_neighbor_idis, \
no_bidxmap=False):
if tfrecord_meta_writer!=None:
example = data_meta_to_tfexample(point_idxs)
tfrecord_meta_writer.write(example)
num_gblocks = sg_all_bidxmaps.shape[0]
assert num_gblocks == points.shape[0]
for j in range(num_gblocks):
example = pl_bxm_to_tfexample(points[j], object_labels[j], sg_all_bidxmaps[j], bidxmaps_flat[j], fmap_neighbor_idis[j])
bxm_tfrecord_writer.write(example.SerializeToString())
def pc_normalize(points):
has_normal = points.shape[-1].value == 6
points_xyz = points[:,0:3]
if has_normal:
points_normal = points[:,3:6]
centroid = tf.reduce_mean(points_xyz, axis=0)
points_xyz -= centroid
m = tf.reduce_max(tf.sqrt(tf.reduce_sum(tf.pow(points_xyz, 2),axis=1)))
points_xyz = points_xyz / m
if has_normal:
points_normed = tf.concat([points_xyz, points_normal], -1)
else:
points_normed = points_xyz
return points_normed
def parse_pl_record(tfrecord_serialized, is_training, data_shaps=None, bsg=None):
from aug_data_tf import aug_main, aug_views
#if data_shaps!=None:
# from aug_data_tf import aug_data, tf_Rz
# R = tf_Rz(1)
# import pdb; pdb.set_trace() # XXX BREAKPOINT
feature_map = {
'object/label': tf.FixedLenFeature([], tf.int64),
'points/shape': tf.FixedLenFeature([], tf.string),
'points/encoded': tf.FixedLenFeature([], tf.string),
}
tfrecord_features = tf.parse_single_example(tfrecord_serialized,
features=feature_map,
name='pl_features')
object_label = tf.cast(tfrecord_features['object/label'], tf.int32)
object_label = tf.expand_dims(object_label,0)
points = tf.decode_raw(tfrecord_features['points/encoded'], tf.float32)
if data_shaps == None:
points_shape = tf.decode_raw(tfrecord_features['points/shape'], tf.int32)
else:
points_shape = data_shaps['points']
# the image tensor is flattened out, so we have to reconstruct the shape
points = tf.reshape(points, points_shape)
#if data_shaps != None:
# points = pc_normalize(points)
# ------------------------------------------------
# data augmentation
features = {}
b_bottom_centers_mm = []
if is_training:
if data_shaps != None and data_shaps['aug_types']!='none':
points, b_bottom_centers_mm, augs = aug_main(points, b_bottom_centers_mm,
data_shaps['aug_types'],
data_shaps['data_metas']['data_idxs'])
#features['augs'] = augs
else:
if data_shaps!=None and 'eval_views' in data_shaps and data_shaps['eval_views'] > 1:
#features['eval_views'] = data_shaps['eval_views']
points, b_bottom_centers_mm, augs = aug_views(points, b_bottom_centers_mm,
data_shaps['eval_views'],
data_shaps['data_metas']['data_idxs'])
features['points'] = points
# ------------------------------------------------
# grouping and sampling on line
if bsg!=None:
grouped_pindex, vox_index, grouped_xyz, empty_mask, bot_cen_top, nblock_valid, others = \
bsg.grouping_multi_scale(points[:,0:3])
num_scale = len(grouped_xyz)
for s in range(num_scale+1):
features['empty_mask_%d'%(s)] = tf.cast(empty_mask[s], tf.int8)
features['vox_index_%d'%(s)] = vox_index[s]
if s==num_scale:
continue
features['grouped_pindex_%d'%(s)] = grouped_pindex[s]
features['grouped_xyz_%d'%(s)] = grouped_xyz[s]
features['bot_cen_top_%d'%(s)] = bot_cen_top[s]
for k in range(len(others[s]['name'])):
name = others[s]['name'][k]+'_%d'%(s)
if name not in features:
features[name] = []
features[name].append( others[s]['value'][k] )
return features, object_label
def parse_pl_record_withbmap(tfrecord_serialized, is_training, data_net_configs=None):
from aug_data_tf import aug_main, aug_views
#if data_net_configs!=None:
# from aug_data_tf import aug_data, tf_Rz
# R = tf_Rz(1)
# import pdb; pdb.set_trace() # XXX BREAKPOINT
feature_map = {
'object/label': tf.FixedLenFeature([], tf.int64),
'points/shape': tf.FixedLenFeature([], tf.string),
'points/encoded': tf.FixedLenFeature([], tf.string),
'sg_all_bidxmaps/encoded': tf.FixedLenFeature([], tf.string),
'sg_all_bidxmaps/shape': tf.FixedLenFeature([], tf.string),
'bidxmaps_flat/encoded': tf.FixedLenFeature([], tf.string),
'bidxmaps_flat/shape': tf.FixedLenFeature([], tf.string),
'fmap_neighbor_idis/encoded': tf.FixedLenFeature([], tf.string),
'fmap_neighbor_idis/shape': tf.FixedLenFeature([], tf.string),
}
tfrecord_features = tf.parse_single_example(tfrecord_serialized,
features=feature_map,
name='pl_features')
object_label = tf.cast(tfrecord_features['object/label'], tf.int32)
object_label = tf.expand_dims(object_label,0)
points = tf.decode_raw(tfrecord_features['points/encoded'], tf.float32)
if data_net_configs == None:
points_shape = tf.decode_raw(tfrecord_features['points/shape'], tf.int32)
else:
points_shape = data_net_configs['points']
# the image tensor is flattened out, so we have to reconstruct the shape
points = tf.reshape(points, points_shape)
#if data_net_configs != None:
# points = pc_normalize(points)
# ------------------------------------------------
# do not need for single scale net
is_need_bidmap = data_net_configs==None or \
( 'block_params' in data_net_configs and len(data_net_configs['block_params']['filters'])>1 )
if not is_need_bidmap:
features = {}
b_bottom_centers_mm = []
if is_training:
if data_net_configs != None and data_net_configs['aug_types']!='none':
points, b_bottom_centers_mm, augs = aug_main(points, b_bottom_centers_mm,
data_net_configs['aug_types'],
data_net_configs['data_idxs'])
#features['augs'] = augs
else:
if data_net_configs!=None and 'eval_views' in data_net_configs and data_net_configs['eval_views'] > 1:
#features['eval_views'] = data_net_configs['eval_views']
points, b_bottom_centers_mm, augs = aug_views(points, b_bottom_centers_mm,
data_net_configs['eval_views'],
data_net_configs['data_idxs'])
features['points'] = points
return features, object_label
# ------------------------------------------------
sg_all_bidxmaps = tf.decode_raw(tfrecord_features['sg_all_bidxmaps/encoded'], tf.int32)
if data_net_configs == None:
sg_all_bidxmaps_shape = tf.decode_raw(tfrecord_features['sg_all_bidxmaps/shape'], tf.int32)
else:
sg_all_bidxmaps_shape = data_net_configs['sg_all_bidxmaps']
sg_all_bidxmaps = tf.reshape(sg_all_bidxmaps, sg_all_bidxmaps_shape)
if data_net_configs != None:
sg_bidxmaps, b_bottom_centers_mm = extract_sg_bidxmap(
sg_all_bidxmaps, data_net_configs['sg_bm_extract_idx'])
bidxmaps_flat = tf.decode_raw(tfrecord_features['bidxmaps_flat/encoded'], tf.int32)
if data_net_configs == None:
bidxmaps_flat_shape = tf.decode_raw(tfrecord_features['bidxmaps_flat/shape'], tf.int32)
else:
bidxmaps_flat_shape = data_net_configs['bidxmaps_flat']
bidxmaps_flat = tf.reshape(bidxmaps_flat, bidxmaps_flat_shape)
fmap_neighbor_idis = tf.decode_raw(tfrecord_features['fmap_neighbor_idis/encoded'], tf.float32)
if data_net_configs == None:
fmap_neighbor_idis_shape = tf.decode_raw(tfrecord_features['fmap_neighbor_idis/shape'], tf.int32)
else:
fmap_neighbor_idis_shape = data_net_configs['fmap_neighbor_idis']
fmap_neighbor_idis = tf.reshape(fmap_neighbor_idis, fmap_neighbor_idis_shape)
# ------------------------------------------------
features = {}
features['bidxmaps_flat'] = bidxmaps_flat
features['fmap_neighbor_idis'] = fmap_neighbor_idis
if is_training:
if data_net_configs != None and data_net_configs['aug_types']!='none':
points, b_bottom_centers_mm, augs = aug_main(points, b_bottom_centers_mm,
data_net_configs['aug_types'],
data_net_configs['data_idxs'])
#features['augs'] = augs
else:
if data_net_configs!=None and data_net_configs['eval_views'] > 1:
#features['eval_views'] = data_net_configs['eval_views']
points, b_bottom_centers_mm, augs = aug_views(points, b_bottom_centers_mm,
data_net_configs['eval_views'],
data_net_configs['data_idxs'])
if data_net_configs != None:
features['sg_bidxmaps'] = sg_bidxmaps
features['b_bottom_centers_mm'] = b_bottom_centers_mm
else:
features['sg_all_bidxmaps'] = sg_all_bidxmaps
features['points'] = points
return features, object_label
def extract_sg_bidxmap(sg_all_bidxmaps, sg_bm_extract_idx):
cascade_num = sg_bm_extract_idx.shape[0] - 1
sg_bidxmaps = {}
b_bottom_centers_mm = {}
for k in range(cascade_num):
start = sg_bm_extract_idx[k]
end = sg_bm_extract_idx[k+1]
sg_bidxmap_k = sg_all_bidxmaps[ start[0]:end[0],0:end[1] ]
bot_cen_top_mm = sg_all_bidxmaps[ start[0]:end[0],end[1]:end[1]+6 ]
sg_bidxmaps[k] = sg_bidxmap_k
b_bottom_centers_mm[k] = bot_cen_top_mm
return sg_bidxmaps, b_bottom_centers_mm
def get_dataset_summary(DATASET_NAME, path, loss_lw_gama=2):
dataset_summary = read_dataset_summary(path)
if dataset_summary['intact']:
print('dataset_summary intact, no need to read')
get_label_num_weights(dataset_summary, loss_lw_gama)
return dataset_summary
filenames = glob.glob(os.path.join(path,'*.tfrecord'))
assert len(filenames) > 0
from datasets.all_datasets_meta.datasets_meta import DatasetsMeta
DatasetsMeta = DatasetsMeta(DATASET_NAME)
num_classes = DatasetsMeta.num_classes
with tf.Graph().as_default():
dataset = tf.data.TFRecordDataset(filenames,
compression_type="",
buffer_size=1024*100,
num_parallel_reads=1)
batch_size = 50
is_training = False
dataset = dataset.prefetch(buffer_size=batch_size)
dataset = dataset.apply(
tf.contrib.data.map_and_batch(
lambda value: parse_pl_record(value, is_training),
batch_size=batch_size,
num_parallel_batches=1,
drop_remainder=False))
dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
get_next = dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
m = 0
n = 0
label_hist = np.zeros(num_classes)
try:
print('start reading all the dataset to get summary')
while(True):
features, object_label = sess.run(get_next)
label_hist += np.histogram(object_label, range(num_classes+1))[0]
if m%10==0:
print('%d %d'%(m,n))
m += 1
n += object_label.size
#if n==batch_size:
# print(features['points'][0])
# print(object_label)
# #for i in range(batch_size):
# # plyfn = '/tmp/tfrecord_%d.ply'%(i)
# import ply_util
# # ply_util.create_ply(features['points'][i], plyfn)
except:
print('Total: %d %d'%(m,n))
print(label_hist)
dataset_summary = {}
dataset_summary['size'] = n
dataset_summary['label_hist'] = label_hist
write_dataset_summary(dataset_summary, path)
get_label_num_weights(dataset_summary, loss_lw_gama)
return dataset_summary
def get_label_num_weights(dataset_summary, loss_lw_gama):
if loss_lw_gama<0:
return
IsPlot = False
label_hist = dataset_summary['label_hist']
mean = np.mean(label_hist)
weight = mean / label_hist
weights = {}
gamas = [loss_lw_gama, 1, 2, 5, 10, 20]
gamas = [loss_lw_gama]
for gama in gamas:
weights[gama] = gama * weight
dataset_summary['label_num_weights'] = weights[loss_lw_gama]
if IsPlot:
import matplotlib.pyplot as plt
for gama in gamas:
plt.plot(label_hist, weights[gama], '.', label=str(gama))
plt.legend()
plt.show()
def write_dataset_summary(dataset_summary, data_dir):
import pickle, shutil
summary_path = os.path.join(data_dir, 'summary.pkl')
dataset_summary['intact'] = True
with open(summary_path, 'w') as sf:
pickle.dump(dataset_summary, sf)
print(summary_path)
print_script = os.path.join(BASE_DIR,'print_pkl.py')
shutil.copyfile(print_script,os.path.join(data_dir,'print_pkl.py'))
def read_dataset_summary(data_dir):
import pickle
summary_path = os.path.join(data_dir, 'summary.pkl')
if not os.path.exists(summary_path):
dataset_summary = {}
dataset_summary['intact'] = False
return dataset_summary
dataset_summary = pickle.load(open(summary_path, 'r'))
return dataset_summary
def merge_tfrecord( filenames, merged_filename ):
with tf.Graph().as_default():
dataset = tf.data.TFRecordDataset(filenames,
compression_type="",
buffer_size=1024*100,
num_parallel_reads=5)
batch_size = 50
is_training = False
dataset = dataset.prefetch(buffer_size=batch_size)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator().get_next()
num_blocks = 0
with tf.Session() as sess:
with tf.python_io.TFRecordWriter(merged_filename) as tfrecord_writer:
print('merging tfrecord: {}'.format(merged_filename))
while True:
try:
ds = sess.run(iterator)
for ds_i in ds:
tfrecord_writer.write(ds_i)
num_blocks += len(ds)
if num_blocks%100==0:
print('merging {} blocks'.format(num_blocks))
except:
print('totally {} blocks, merge tfrecord OK:\n\t{}'.format(num_blocks,merged_filename))
break
if __name__ == '__main__':
#test_encode_raw()
DATASET_NAME = 'MODELNET40'
path = '/home/z/Research/dynamic_pointnet/data/MODELNET40H5F/Merged_tfrecord/6_mgs1_gs2_2-mbf-neg_fmn14_mvp1-1024_240_1-64_27_256-0d2_0d4-0d1_0d2-pd3-2M2pp'
get_dataset_summary(DATASET_NAME, path)
#merge_tfrecord()
| {"hexsha": "c1975dcbeb8d81bc081b72e840b756cce035be80", "size": 19077, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/dataset_utils.py", "max_stars_repo_name": "xuyongzhi/SparseVoxelNet", "max_stars_repo_head_hexsha": "2b8338c3291880ee1ef7739580eeaefb737f6164", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/dataset_utils.py", "max_issues_repo_name": "xuyongzhi/SparseVoxelNet", "max_issues_repo_head_hexsha": "2b8338c3291880ee1ef7739580eeaefb737f6164", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/dataset_utils.py", "max_forks_repo_name": "xuyongzhi/SparseVoxelNet", "max_forks_repo_head_hexsha": "2b8338c3291880ee1ef7739580eeaefb737f6164", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-10-31T04:12:34.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-31T04:12:34.000Z", "avg_line_length": 38.9326530612, "max_line_length": 158, "alphanum_fraction": 0.6657755412, "include": true, "reason": "import numpy", "num_tokens": 4697} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import numpy as np
from common import write_csv
from device import Oscilloscope, SignalGenerator
from gui import GPIBArgumentParser, DialogMode
from logging import getLogger, INFO, StreamHandler, NullHandler
root_logger = getLogger()
root_logger.addHandler(StreamHandler())
root_logger.setLevel(INFO)
logger = getLogger(__name__)
logger.addHandler(NullHandler())
def parse_arguments():
parser = GPIBArgumentParser('Frequency sweep Oscilloscope monitor')
parser.add_argument('output directory', './data/oscilloscope',
browse_mode=DialogMode.DIRECTORY,
help='Path to output directory.')
parser.add_argument('start frequency', 0.0, type=float,
help='Sweep frequency from this value. (Hz)')
parser.add_argument('end frequency', 1.0, type=float,
help='Sweep frequency to this value. (Hz)')
parser.add_argument('sample frequency', 1, type=int,
help='The number of sample frequencies.')
parser.add_argument('cumulative time', 1.0, type=float,
help='Oscilloscope signal cumlative time. (s)')
parser.add_device('oscilloscope', Oscilloscope)
parser.add_device('signal generator', SignalGenerator)
args = parser.parse_args()
return args
def main():
args = parse_arguments()
dst_dir = args['output directory']
os.makedirs(dst_dir, exist_ok=True)
osc = args['device']['oscilloscope']
sig = args['device']['signal generator']
freq = np.linspace(args.get('start frequency'),
args.get('end frequency'),
args.get('sample frequency'), endpoint=True)
wait_time = args.get('cumulative time')
for f in freq:
logger.info('Set frequency: %.4e Hz' % f)
sig.set_frequency(f)
sig.start()
osc.start()
time.sleep(wait_time)
osc.stop()
sig.stop()
data = osc.get_data()
labels = list()
columns = list()
for label, v in data.items():
if v[0].shape[0] == 0:
continue
if 'time' not in labels:
labels.append('time')
columns.append(v[0])
labels.append(label)
columns.append(v[1])
comments = ('Frequency: %.4e Hz' % f,
'Cumulative time: %.2f s' % wait_time)
filename = 'osciiloscope_monitor_%.5e_Hz.dat' % f
write_csv(os.path.join(dst_dir, filename), columns, labels, comments)
if __name__ == '__main__':
main()
| {"hexsha": "7bc2143d70467684eafd9ace4401771762da4e9d", "size": 2657, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/frequency_sweep_oscilloscpoe_monitor.py", "max_stars_repo_name": "heptaliane/my_measurements_scripts", "max_stars_repo_head_hexsha": "0c977a1677d7881a33863ab376cab48a387a7d52", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/frequency_sweep_oscilloscpoe_monitor.py", "max_issues_repo_name": "heptaliane/my_measurements_scripts", "max_issues_repo_head_hexsha": "0c977a1677d7881a33863ab376cab48a387a7d52", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/frequency_sweep_oscilloscpoe_monitor.py", "max_forks_repo_name": "heptaliane/my_measurements_scripts", "max_forks_repo_head_hexsha": "0c977a1677d7881a33863ab376cab48a387a7d52", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8953488372, "max_line_length": 77, "alphanum_fraction": 0.6078283779, "include": true, "reason": "import numpy", "num_tokens": 584} |
"""Tests for the attribute .X"""
import numpy as np
from scipy import sparse
from anndata import AnnData
from anndata.utils import asarray
import pytest
from anndata.tests.helpers import gen_adata, assert_equal
UNLABELLED_ARRAY_TYPES = [
pytest.param(sparse.csr_matrix, id="csr"),
pytest.param(sparse.csc_matrix, id="csc"),
pytest.param(asarray, id="ndarray"),
]
SINGULAR_SHAPES = [
pytest.param(shape, id=str(shape)) for shape in [(1, 10), (10, 1), (1, 1)]
]
@pytest.mark.parametrize("shape", SINGULAR_SHAPES)
@pytest.mark.parametrize("orig_array_type", UNLABELLED_ARRAY_TYPES)
@pytest.mark.parametrize("new_array_type", UNLABELLED_ARRAY_TYPES)
def test_setter_singular_dim(shape, orig_array_type, new_array_type):
# https://github.com/theislab/anndata/issues/500
adata = gen_adata(shape, X_type=orig_array_type)
adata.X = new_array_type(np.ones(shape))
np.testing.assert_equal(asarray(adata.X), 1)
###############################
# Tests for `adata.X is None` #
###############################
def test_set_x_is_none():
# test setter and getter
adata = AnnData(np.array([[1, 2, 3], [4, 5, 6]]), dict(o1=[1, 2], o2=[3, 4]))
adata.X = None
assert adata.X is None
def test_del_set_equiv_X():
"""Tests that `del adata.X` is equivalent to `adata.X = None`"""
# test setter and deleter
orig = gen_adata((10, 10))
copy = orig.copy()
del orig.X
copy.X = None
assert orig.X is None
assert_equal(orig, copy)
# Check that deleting again is still fine
del orig.X
assert orig.X is None
def test_init_X_as_none():
# test initialiser
shape = (3, 5)
adata = AnnData(None, uns=dict(test=np.array((3, 3))), shape=shape)
assert adata.X is None
assert adata.shape == shape
@pytest.mark.parametrize("shape", SINGULAR_SHAPES + [pytest.param((5, 3), id="(5, 3)")])
def test_transpose_with_X_as_none(shape):
adata = gen_adata(shape, X_type=lambda x: None)
adataT = adata.transpose()
assert_equal(adataT.shape, shape[::-1])
assert_equal(adataT.obsp.keys(), adata.varp.keys())
assert_equal(adataT.T, adata)
| {"hexsha": "0cade171239ef8bf7acef707e8482d556c1071fb", "size": 2134, "ext": "py", "lang": "Python", "max_stars_repo_path": "anndata/tests/test_x.py", "max_stars_repo_name": "michalk8/anndata", "max_stars_repo_head_hexsha": "664e32b0aa6625fe593370d37174384c05abfd4e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 262, "max_stars_repo_stars_event_min_datetime": "2017-11-10T11:43:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T20:24:24.000Z", "max_issues_repo_path": "anndata/tests/test_x.py", "max_issues_repo_name": "michalk8/anndata", "max_issues_repo_head_hexsha": "664e32b0aa6625fe593370d37174384c05abfd4e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 654, "max_issues_repo_issues_event_min_datetime": "2017-11-22T13:26:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T04:31:15.000Z", "max_forks_repo_path": "anndata/tests/test_x.py", "max_forks_repo_name": "michalk8/anndata", "max_forks_repo_head_hexsha": "664e32b0aa6625fe593370d37174384c05abfd4e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 137, "max_forks_repo_forks_event_min_datetime": "2017-12-28T14:33:11.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T02:39:53.000Z", "avg_line_length": 28.0789473684, "max_line_length": 88, "alphanum_fraction": 0.6691658857, "include": true, "reason": "import numpy,from scipy", "num_tokens": 601} |
# Detect objects using tensorflow-gpu served by zerorpc.
#
# This needs to be called from a zerorpc client with an array of alarm frame image paths.
# Image paths must be in the form of:
# '/nvr/zoneminder/events/BackPorch/18/06/20/19/20/04/00224-capture.jpg'.
#
# This program should be run in the 'od' virtual python environment, i.e.,
# $ /home/lindo/.virtualenvs/od/bin/python ./obj_detect_server.py
#
# This is part of the smart-zoneminder project.
#
# Copyright (c) 2018, 2019 Lindo St. Angel
import numpy as np
import tensorflow as tf
import json
import zerorpc
from PIL import Image
# Object detection imports.
from object_detection.utils import label_map_util
# For tensorrt optimized models...
import tensorflow.contrib.tensorrt as trt
# Debug.
#import warnings
#warnings.simplefilter('default')
# Get configuration.
with open('./config.json') as fp:
config = json.load(fp)['objDetServer']
# Tensorflow object detection file system paths.
PATH_TO_MODEL = config['modelPath']
PATH_TO_LABEL_MAP = config['labelMapPath']
# Max number of classes for TF object detection.
NUM_CLASSES = config['numClasses']
# If consecutive ZoneMinder image frames are found then skip this many after the first.
CON_IMG_SKIP = config['conseqImagesToSkip']
# Minimum score for valid TF object detection.
MIN_SCORE_THRESH = config['minScore']
# Crop image to minimize processing (at some expense of accuracy).
# In pixels.
CROP_IMAGE_WIDTH = config['cropImageWidth']
CROP_IMAGE_HEIGHT = config['cropImageHeight']
# Heartbeat interval for zerorpc client in ms.
# This must match the zerorpc client config.
ZRPC_HEARTBEAT = config['zerorpcHeartBeat']
# IPC (or TCP) socket for zerorpc.
# This must match the zerorpc client config.
ZRPC_PIPE = config['zerorpcPipe']
# Load frozen Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_MODEL, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Only grow the memory usage as required.
# See https://www.tensorflow.org/guide/using_gpu#allowing-gpu-memory-growth
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config, graph=detection_graph)
sess.run(tf.global_variables_initializer())
# Load label map.
label_map = label_map_util.load_labelmap(PATH_TO_LABEL_MAP)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Helper code.
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
# zerorpc server.
class DetectRPC(object):
def detect(self, test_image_paths):
with detection_graph.as_default():
objects_in_image = []
old_labels = []
frame_num = 0
monitor = ''
(img_width, img_height) = (CROP_IMAGE_WIDTH, CROP_IMAGE_HEIGHT)
for image_path in test_image_paths:
# If consecutive frames then repeat last label to minimize processing.
# Image paths must be in the form of:
# '/nvr/zoneminder/events/BackPorch/18/06/20/19/20/04/00224-capture.jpg'.
old_frame_num = frame_num
old_monitor = monitor
try:
frame_num = int((image_path.split('/')[-1]).split('-')[0])
monitor = image_path.split('/')[4]
except (ValueError, IndexError):
print("Could not derive information from image path.")
continue
# Only apply skip logic if frames are from the same monitor.
if monitor == old_monitor:
# Only apply skip logic if alarm frames are from the same event.
# Intra-event frames are monotonically increasing.
frame_diff = frame_num - old_frame_num
if frame_diff > 0:
# Skip CON_IMG_SKIP frames after the first one.
if frame_diff <= CON_IMG_SKIP:
objects_in_image.append({'image': image_path, 'labels': old_labels})
print('monitor {} old_monitor {} frame_num {} old_frame_num {}'
.format(monitor,old_monitor,frame_num,old_frame_num))
print('Consecutive frame {}, skipping detect and copying previous labels.'
.format(frame_num))
continue
with Image.open(image_path) as image:
# Resize to minimize tf processing.
# Note: resize will slightly lower accuracy. 640 x 480 seems like a good balance.
image_resize = image.resize((img_width, img_height))
# Convert image to numpy array
image_np = load_image_into_numpy_array(image_resize)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Define input node.
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Define output nodes.
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# This contains class scores for the detections.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
# This contains classes for the detections.
classes = detection_graph.get_tensor_by_name('detection_classes:0')
# This specifies the number of valid boxes per image in the batch.
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Get labels and scores of detected objects.
labels = []
(im_width, im_height) = image.size # use original image size for box coords
for index, value in enumerate(classes[0]):
if scores[0, index] > MIN_SCORE_THRESH:
object_dict = {}
object_dict['id'] = category_index.get(value)['id']
object_dict['name'] = category_index.get(value)['name']
object_dict['score'] = float(scores[0, index])
ymin = boxes[0, index][0] * im_height
xmin = boxes[0, index][1] * im_width
ymax = boxes[0, index][2] * im_height
xmax = boxes[0, index][3] * im_width
object_dict['box'] = {'ymin': ymin, 'xmin': xmin, 'ymax': ymax, 'xmax': xmax}
labels.append(object_dict)
old_labels = labels
objects_in_image.append({'image': image_path, 'labels': labels})
return json.dumps(objects_in_image)
# Streaming server.
@zerorpc.stream
def detect_stream(self, test_image_paths):
(img_width, img_height) = (CROP_IMAGE_WIDTH, CROP_IMAGE_HEIGHT)
with detection_graph.as_default():
for image_path in test_image_paths:
with Image.open(image_path) as image:
# Resize to minimize tf processing.
# Note: resize will slightly lower accuracy. 640 x 480 seems like a good balance.
image_resize = image.resize((img_width, img_height))
# Convert image to numpy array
image_np = load_image_into_numpy_array(image_resize)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Get labels and scores of detected objects.
labels = []
object_dict = {}
for index, value in enumerate(classes[0]):
if scores[0, index] > MIN_SCORE_THRESH:
object_dict = category_index.get(value)
object_dict['score'] = float(scores[0, index])
object_dict['box'] = boxes[0, index].tolist()
labels.append(object_dict)
yield json.dumps({'image': image_path, 'labels': labels})
s = zerorpc.Server(DetectRPC(), heartbeat=ZRPC_HEARTBEAT)
s.bind(ZRPC_PIPE)
s.run() | {"hexsha": "418cd4f0da51cf45d38eed66282a7b98af2a17bb", "size": 9749, "ext": "py", "lang": "Python", "max_stars_repo_path": "obj-detect/obj_detect_server.py", "max_stars_repo_name": "EmpireofKings/smart-zoneminder", "max_stars_repo_head_hexsha": "78c62bd9cf730e0081741f7fe7a51339e0a5c46b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "obj-detect/obj_detect_server.py", "max_issues_repo_name": "EmpireofKings/smart-zoneminder", "max_issues_repo_head_hexsha": "78c62bd9cf730e0081741f7fe7a51339e0a5c46b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "obj-detect/obj_detect_server.py", "max_forks_repo_name": "EmpireofKings/smart-zoneminder", "max_forks_repo_head_hexsha": "78c62bd9cf730e0081741f7fe7a51339e0a5c46b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-11T04:17:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-11T04:17:51.000Z", "avg_line_length": 47.0966183575, "max_line_length": 122, "alphanum_fraction": 0.621499641, "include": true, "reason": "import numpy", "num_tokens": 2030} |
#include "streamTrace.hpp"
#include "writeRinex.hpp"
#include "acsConfig.hpp"
#include <boost/algorithm/string/replace.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/log/trivial.hpp>
#include <algorithm>
#include <fstream>
#include <math.h>
void recordRinexObservations(
RinexOutput& rinexOutput,
ObsList& obsList)
{
bool foundNewObs = false;
for (auto& obs : obsList)
{
E_Sys sys = obs.Sat.sys;
auto& obsCodeDesc = rinexOutput.codesPerSys[sys];
for (auto& [ftype,sigsList] : obs.SigsLists)
for (auto& sig : sigsList)
{
if (sig.code == +E_ObsCode::NONE)
continue;
if (std::find(obsCodeDesc.begin(), obsCodeDesc.end(), pair<E_ObsCode,E_ObsDesc>(sig.code,E_ObsDesc::C)) == obsCodeDesc.end()
&& sig.P != 0
&& acsConfig.rinex_obs_print_C_code)
{
foundNewObs = true;
obsCodeDesc.push_back({sig.code, E_ObsDesc::C});
}
if (std::find(obsCodeDesc.begin(), obsCodeDesc.end(), pair<E_ObsCode,E_ObsDesc>(sig.code,E_ObsDesc::L)) == obsCodeDesc.end()
&& sig.L != 0
&& acsConfig.rinex_obs_print_L_code)
{
foundNewObs = true;
obsCodeDesc.push_back({sig.code, E_ObsDesc::L});
}
if (std::find(obsCodeDesc.begin(), obsCodeDesc.end(), pair<E_ObsCode,E_ObsDesc>(sig.code,E_ObsDesc::D)) == obsCodeDesc.end()
&& sig.D != 0
&& acsConfig.rinex_obs_print_D_code)
{
foundNewObs = true;
obsCodeDesc.push_back({sig.code, E_ObsDesc::D});
}
double sn_raw = (sig.snr-0.5) / 4; //todo aaron, check this
if (std::find(obsCodeDesc.begin(), obsCodeDesc.end(), pair<E_ObsCode,E_ObsDesc>(sig.code,E_ObsDesc::S)) == obsCodeDesc.end()
&& sn_raw != 0
&& acsConfig.rinex_obs_print_S_code)
{
foundNewObs = true;
obsCodeDesc.push_back({sig.code, E_ObsDesc::S});
}
}
if (obsCodeDesc.size() == 0)
rinexOutput.codesPerSys.erase(rinexOutput.codesPerSys.find(sys));
}
//first create if non existing
{
std::fstream maker (rinexOutput.fileName, std::ios::app);
}
std::fstream rinexStream(rinexOutput.fileName);
rinexStream.seekp(0, std::ios::end);
long endFilePos = rinexStream.tellp();
if (endFilePos == 0)
{
if (obsList.empty())
{
BOOST_LOG_TRIVIAL(error) << "Writing RINEX file header with no observations.";
return;
}
rinexOutput.headerTimePos = 0;
// Write the RINEX header.
GTime now = obsList[0].time;
char tempStr[20];
time2str(now, tempStr, 0);
string timeDate(tempStr);
boost::replace_all(timeDate, "/", "");
boost::replace_all(timeDate, ":", "");
timeDate += " UTC";
double rnxver = 3.05;
const char sys[] = "M: Mixed";
const char prog[] = "PEA v1";
const char runby[] = "Geoscience Australia";
tracepdeex(0,rinexStream,"%9.2f%-11s%-20s%-20s%-20s\n",
rnxver,
"",
"OBSERVATION DATA",
sys,
"RINEX VERSION / TYPE");
tracepdeex(0,rinexStream,"%-20.20s%-20.20s%-20.20s%-20s\n",
prog,
runby,
timeDate.c_str(),
"PGM / RUN BY / DATE");
tracepdeex(0,rinexStream,"%-60.60s%-20s\n",
rinexOutput.snx.sitecode,
"MARKER NAME");
tracepdeex(0,rinexStream,"%-20.20s%-40.40s%-20s\n",
rinexOutput.snx.monuid,
"",
"MARKER NUMBER");
//TODO Add marker type as RINEX version is greater than 2.99
//tracepdeex(0,rinexStream,"%-20.20s%-40.40s%-20s\n",rinexOutput.snx.,"","MARKER TYPE");
tracepdeex(0,rinexStream,"%-20.20s%-40.40s%-20s\n",
"",
acsConfig.analysis_center,
"OBSERVER / AGENCY");
tracepdeex(0, rinexStream, "%-20.20s%-20.20s%-20.20s%-20s\n",
rinexOutput.snx.recsn,
rinexOutput.snx.rectype,
rinexOutput.snx.recfirm,
"REC # / TYPE / VERS");
tracepdeex(0, rinexStream, "%-20.20s%-20.20s%-20.20s%-20s\n",
rinexOutput.snx.antsn,
rinexOutput.snx.anttype,
"",
"ANT # / TYPE");
tracepdeex(0, rinexStream, "%14.4f%14.4f%14.4f%-18s%-20s\n",
rinexOutput.snx.pos.x(),
rinexOutput.snx.pos.y(),
rinexOutput.snx.pos.z(),
"",
"APPROX POSITION XYZ");
tracepdeex(0, rinexStream, "%14.4f%14.4f%14.4f%-18s%-20s\n",
rinexOutput.snx.ecc[2],
rinexOutput.snx.ecc[1],
rinexOutput.snx.ecc[0],
"",
"ANTENNA: DELTA H/E/N");
rinexOutput.headerObsPos = rinexStream.tellp();
}
if ( endFilePos == 0
|| foundNewObs)
{
rinexStream.seekp(rinexOutput.headerObsPos);
const char label[] = "SYS / # / OBS TYPES";
int numSysLines = 0;
for (auto &[sys, obsCodeDesc] : rinexOutput.codesPerSys)
{
auto dummySat = SatSys(sys, 0);
char sys_c = dummySat.sysChar();
if (sys_c == '-')
{
BOOST_LOG_TRIVIAL(error) << "Writing RINEX file undefined system.";
return;
}
tracepdeex(0, rinexStream, "%c %3d", sys_c, obsCodeDesc.size());
int obsCodeCnt = 0;
for (auto& [obsCode, obsDesc] : obsCodeDesc)
{
obsCodeCnt++;
auto obsDescStr = obsDesc._to_string();
auto obsCodeStr = obsCode._to_string();
char obsStr[4];
obsStr[0] = obsDescStr[0];
obsStr[1] = obsCodeStr[1];
obsStr[2] = obsCodeStr[2];
obsStr[3] = 0;
if ( obsCodeCnt % 13 == 1
&&obsCodeCnt != 1)
{
tracepdeex(0, rinexStream, " ");
}
tracepdeex(0, rinexStream, " %3s", obsStr);
if (obsCodeCnt % 13 == 0)
{
// After 13 observations make a new line.
tracepdeex(0, rinexStream, " %-20s\n", label);
numSysLines++;
}
}
if (obsCodeCnt % 13 != 0)
{
// less than 13 entries and a new line is required.
while (obsCodeCnt % 13 != 0)
{
obsCodeCnt++;
tracepdeex(0, rinexStream, " %3s", "");
}
tracepdeex(0, rinexStream, " %-20s\n", label);
numSysLines++;
}
}
while (numSysLines < 2 * E_Sys::NUM_SYS)
{
//add some lines to be filled in later to allow for the maximum number expected
tracepdeex(0, rinexStream, "%-60.60s%-20s\n", "", "COMMENT");
numSysLines++;
}
}
char tsys[] = "GPS"; // PEA internal time is GPS.
double ep[6];
time2epoch(obsList[0].time, ep);
if (rinexOutput.headerTimePos == 0)
{
tracepdeex(0, rinexStream, "%10.3f%50s%-20s\n",
acsConfig.epoch_interval,
"",
"INTERVAL");
tracepdeex(0,rinexStream," %04.0f%6.0f%6.0f%6.0f%6.0f%13.7f %-12s%-20s\n",
ep[0],
ep[1],
ep[2],
ep[3],
ep[4],
ep[5],
tsys,
"TIME OF FIRST OBS");
rinexOutput.headerTimePos = rinexStream.tellp();
}
rinexStream.seekp(rinexOutput.headerTimePos);
tracepdeex(0, rinexStream, " %04.0f%6.0f%6.0f%6.0f%6.0f%13.7f %-12s%-20s\n",
ep[0],
ep[1],
ep[2],
ep[3],
ep[4],
ep[5],
tsys,
"TIME OF LAST OBS");
if (endFilePos == 0)
{
tracepdeex(0, rinexStream, "%-60.60s%-20s\n",
"",
"END OF HEADER");
}
// Write the RINEX body.
rinexStream.seekp(0, std::ios::end);
// flag epoch flag (0:ok,1:power failure,>1:event flag)
int flag = 0;
tracepdeex(0,rinexStream,"> %04.0f %02.0f %02.0f %02.0f %02.0f%11.7f %d%3d%21s\n",
ep[0],
ep[1],
ep[2],
ep[3],
ep[4],
ep[5],
flag,
obsList.size(),
"");
for (auto& obs : obsList)
{
tracepdeex(0, rinexStream, "%s", obs.Sat.id().c_str());
auto& obsCodeDesc = rinexOutput.codesPerSys[obs.Sat.sys];
for (auto& [obsCode, obsDesc] : obsCodeDesc)
{
if (obsCode == +E_ObsCode::NONE)
continue;
bool foundObsPair = false;
for (auto& [ftype, sigList] : obs.SigsLists)
for (auto& sig : sigList)
{
if (sig.code != obsCode)
continue;
// if it locates the E_ObsCode then it will always locate E_ObsDesc.
if (foundObsPair)
{
BOOST_LOG_TRIVIAL(error) << "Writing RINEX file duplicated observation.";
break;
}
else
{
foundObsPair = true;
}
double sn_raw = (sig.snr - 0.5) / 4;
int sn_rnx = std::min(std::max((int) std::round(sn_raw / 6.0), 1), 9);
switch (obsDesc)
{
case E_ObsDesc::C:
//tracepdeex(0,rinexStream,"%14.3f %d",sig.P,sSI);
if (sig.P == 0) tracepdeex(0, rinexStream, "%14.3s ", "");
else tracepdeex(0, rinexStream, "%14.3f ", sig.P);
break;
case E_ObsDesc::L:
if (sig.L == 0) tracepdeex(0, rinexStream, "%14.3s ", "");
else tracepdeex(0, rinexStream, "%14.3f%d%d", sig.L, (uint)sig.LLI, sn_rnx);
break;
case E_ObsDesc::D:
if (sig.D == 0) tracepdeex(0, rinexStream, "%14.3s ", "");
else tracepdeex(0, rinexStream, "%14.3f ", sig.D);
break;
case E_ObsDesc::S:
if (sn_raw == 0) tracepdeex(0, rinexStream, "%14.3s ", "");
else tracepdeex(0, rinexStream, "%14.3f ", sn_raw);
break;
default :
BOOST_LOG_TRIVIAL(error) << "Writing RINEX unknown/unused observation code.";
break;
}
}
if (foundObsPair == false)
{
// Observation code and description not in observation.
tracepdeex(0, rinexStream, "%14.3s ", "");
}
}
rinexStream << "\n";
}
}
| {"hexsha": "8cdbbf038957e68a00086e375ee0554015fa3937", "size": 8799, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/cpp/common/writeRinex.cpp", "max_stars_repo_name": "umma-zannat/ginan", "max_stars_repo_head_hexsha": "a4d1a3bb8696267f23d26e8c6a2f6080b87bb494", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-01-12T15:14:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T15:14:55.000Z", "max_issues_repo_path": "src/cpp/common/writeRinex.cpp", "max_issues_repo_name": "umma-zannat/ginan", "max_issues_repo_head_hexsha": "a4d1a3bb8696267f23d26e8c6a2f6080b87bb494", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/cpp/common/writeRinex.cpp", "max_forks_repo_name": "umma-zannat/ginan", "max_forks_repo_head_hexsha": "a4d1a3bb8696267f23d26e8c6a2f6080b87bb494", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2022-01-12T15:15:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-12T15:15:12.000Z", "avg_line_length": 24.0409836066, "max_line_length": 128, "alphanum_fraction": 0.6082509376, "num_tokens": 3293} |
[STATEMENT]
lemma map_graph_inv' [simp]:
"graph_map' (map_graph f) = Some f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. graph_map' (map_graph f) = Some f
[PROOF STEP]
by (simp add: graph_map'_def) | {"llama_tokens": 89, "file": "UTP_toolkit_Map_Extra", "length": 1} |
import numpy as np
from scipy.stats import median_test
from lemonadefashion_flask_monitoringdashboard.core.reporting.questions.report_question import (
ReportAnswer,
ReportQuestion,
)
from lemonadefashion_flask_monitoringdashboard.database import session_scope
from lemonadefashion_flask_monitoringdashboard.database.request import get_latencies_sample
class MedianLatencyReportAnswer(ReportAnswer):
def __init__(
self,
is_significant,
latencies_sample=None,
baseline_latencies_sample=None,
percentual_diff=None,
median=None,
baseline_median=None,
):
super().__init__('MEDIAN_LATENCY')
self._is_significant = is_significant
self._baseline_latencies_sample = baseline_latencies_sample
self._latencies_sample = latencies_sample
self._percentual_diff = percentual_diff
self._baseline_median = baseline_median
self._median = median
def meta(self):
return dict(
latencies_samples=dict(
baseline=self._baseline_latencies_sample,
comparison=self._latencies_sample
),
median=self._median,
baseline_median=self._baseline_median,
percentual_diff=self._percentual_diff,
)
def is_significant(self):
return self._is_significant
class MedianLatency(ReportQuestion):
def get_answer(self, endpoint, requests_criterion, baseline_requests_criterion):
with session_scope() as session:
latencies_sample = get_latencies_sample(session, endpoint.id,
requests_criterion)
baseline_latencies_sample = get_latencies_sample(
session, endpoint.id, baseline_requests_criterion
)
if len(latencies_sample) == 0 or len(baseline_latencies_sample) == 0:
return MedianLatencyReportAnswer(
is_significant=False,
latencies_sample=latencies_sample,
baseline_latencies_sample=baseline_latencies_sample,
)
median = float(np.median(latencies_sample))
baseline_median = float(np.median(baseline_latencies_sample))
percentual_diff = (median - baseline_median) / baseline_median * 100
_, p, _, _ = median_test(latencies_sample, baseline_latencies_sample)
is_significant = abs(float(percentual_diff)) > 0 and float(p) < 0.05
return MedianLatencyReportAnswer(
is_significant=is_significant,
percentual_diff=percentual_diff,
# Sample latencies
latencies_sample=latencies_sample,
baseline_latencies_sample=baseline_latencies_sample,
# Latency medians
median=median,
baseline_median=baseline_median,
)
| {"hexsha": "a3f7837b1136220e76797101a9abea60269b279b", "size": 2982, "ext": "py", "lang": "Python", "max_stars_repo_path": "lemonadefashion_flask_monitoringdashboard/core/reporting/questions/median_latency.py", "max_stars_repo_name": "us88/LF_Flask-MonitoringDashboard", "max_stars_repo_head_hexsha": "5917543fe480a3d46b52663d6937558078e9f705", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lemonadefashion_flask_monitoringdashboard/core/reporting/questions/median_latency.py", "max_issues_repo_name": "us88/LF_Flask-MonitoringDashboard", "max_issues_repo_head_hexsha": "5917543fe480a3d46b52663d6937558078e9f705", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lemonadefashion_flask_monitoringdashboard/core/reporting/questions/median_latency.py", "max_forks_repo_name": "us88/LF_Flask-MonitoringDashboard", "max_forks_repo_head_hexsha": "5917543fe480a3d46b52663d6937558078e9f705", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5, "max_line_length": 96, "alphanum_fraction": 0.6478873239, "include": true, "reason": "import numpy,from scipy", "num_tokens": 560} |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% GKS User Guide -- LaTeX Source %
% %
% Chapter 1 %
% %
% The following external EPS files are referenced: %
% hbbatch.eps, hbookc11.eps %
% %
% Editor: Michel Goossens / CN-AS %
% Last Mod.: 14 July 1992 12:30 mg %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{\protect\label{sec:intro}Introduction}
As a matter of policy, a recommendation was made by HEPCCC to base
future HEP computer graphics applications on the ISO standard
Graphical Kernel System,
GKS \cite{bib-gks2d} and GKS-3D \cite{bib-gks3d}.
GKS had been in use already at CERN for 2D applications, but the decision
to use GKS also for 3D work led to a new tender operation
being carried out
in 1986. This resulted in the firm GTS-GRAL, of Darmstadt,
being selected to provide new implementations of both GKS and GKS-3D
(called {\it GKSGRAL} and {\it GKSGRAL-3D}).
These have been installed on all the major CERN
operating systems (VM/CMS, UNICOS, VMS, AEGIS and UNIX) and
the contract with GTS-GRAL includes
provision for institutes affiliated to CERN to obtain a licence for the
use of the software at very favourable rates.
GKSGRAL drivers are available for a large range of graphics terminals.
Users should be aware of the implications in testing and maintenance
work which has to be carried out on the very large number of
operating-system/driver combinations.
\section{What is GKS (the Graphical Kernel System)}
The Graphical Kernel System (GKS) \cite{bib-gks2d} is a document produced
by the International Standards Organization (ISO) which defines a common
interface to interactive computer graphics for application programs.
GKS has been designed by a group of experts representing the national
standards institutions of most major industrialized countries.
The full standard provides functional specifications for some
200 subroutines which perform graphics input and output
in a device independent way.
Application programs can thus move freely between different graphics
devices and different host computers.
For the first time graphics programs have become genuinely portable.
However, one should point out that GKS itself is not portable.
Individual GKS implementations will vary substantially as they have to
support different graphics devices on different computers.
Moreover, GKS is a {\it kernel} system, and thus does not include
an arbitrary collection of functions to produce histograms or
contour plots, etc. Such facilities are regarded as applications
which sit on top of the basic graphics package and, at CERN,
they are provided by the Graphical Extensions to the NAG Library
\cite{bib-nagref}, or the HPLOT package \cite{bib-HPLOT}.
In order to allow particular applications to choose a graphics package
with the appropriate capability, GKS has been defined to have
different {\it levels}. The level structure has two dimensions,
one for output (0, 1, or 2) and one for input (a, b, or c).
Higher levels include the capabilities of lower levels.
In the United States, ANSI has defined also a level 'm', for very simple
applications, which sits below output level '0'.
Most implementations provide all output (level '2') and intermediate
input (level 'b'). The reason input level 'c' is not usually supported
is that it requires asynchronous input facilities not found in all
operating systems.
\index{FORTRAN binding}
The GKS functions have been defined independently from a specific
programming language, and {\it bindings} to individual languages are
subject to separate standards efforts which have been undertaken
for all the major languages.
The FORTRAN binding is defined by \cite{bib-gksftn}.
The Graphical Kernel System for two dimensional graphics was
adopted as an ISO standard in 1985, and since that date work has been
in progress to define a three dimensional super-set \cite{bib-gks3d}
which was accepted as an International Standard during 1988.
The FORTRAN binding to GKS-3D has also been published as a
Draft International Standard \cite{bib-gksftn3}.
The GKS functions are separated into those which pass values to GKS for
control, setting or output, and those which inquire about
status information. There are 8 distinct classes:
\begin{center}
\begin{tabular}{ll}
1. Control functions&5. Transformations\\
2. Output Attributes&6. Input functions\\
3. Output Primitives&7. Metafile functions\\
4. Segment functions&8. Inquiry functions\\
\end{tabular}
\end{center}
\begin{note}
{\bf Throughout this document many values are specified using}
{\bf GKS Enumerated Types}. The symbolic names for these all
begin with 'G' and are enclosed in single quotes within the text.
(But do {\bf not} include the quotes in FORTRAN calls!)
They have been coded into an {\it Include File},
called ENUM.INC, which is listed in Appendix on Page~\pageref{sec:hdenum}.
\end{note}
\section{Major Differences to Earlier Graphics Interfaces}
In some respects GKS is a major departure from earlier software
interfaces to graphics display systems, and several new concepts have
been introduced. The following is a brief list of the new features,
which will be more fully described within the body of the document.
\begin{OL}
\index{current point}
\item GKS has no concept of a {\it current point}. This idea dates
from when most graphics devices were pen plotters, in which case the
current point corresponded clearly to the position of the pen.
However, it would be less obvious where one would define the current
point when drawing a filled polygon on a raster display.
Thus, all GKS primitives which require
a starting point specify that point in the function call.
\item In most cases earlier software interfaces could only control a single
device at a time, and if some means was provided to 'capture' the graphics
output for storage, then this was handled in a special way.
GKS simplifies this situation considerably by defining
{\it Workstations}, which correspond to 'virtual devices'.
GKS maps each workstation onto a real piece of hardware, but to the
application programmer they are all handled in the same way, and
several workstations can be active simultaneously.
There is a workstation which writes the output it receives onto a
{\it Metafile} for storage and/or transmission over a network
and this, also, is handled just like any other workstation.
\item GKS incorporates the concept of a {\it Logical Device} to
control input. There are six classes of Logical Input Devices, and these
are mapped onto the actual physical devices in use, without the application
programmer having to write special code if, for example, (s)he is using
a mouse instead of a tablet. This helps to make application code much
more portable. The following logical devices are of particular interest:
\begin{OL}
\item The Logical {\it Locator} Device, which returns the locator position
{\it in the user's coordinate system}, not that of the physical device.
\item The Logical {\it Pick} Device, which tells the application
at which object the user is pointing, without having to provide the code to
work it out.
\item The Logical {\it Choice} Device, which allows the user to
choose one option out of a set.
\end{OL}
\item GKS provides a mechanism to group the graphics output commands into
{\it Segments}, where they may be stored for later use.
Thus, an image stored in a segment could be made invisible and then visible
again without the application having to re-draw it.
Information sent to a workstation to produce an image which is not
stored is called {\it non-retained data}.
\item Finally, GKS provides a whole host of functions allowing the application
to {\it inquire} at run-time the state and/or capabilities of a
workstation or the implementation of GKS itself.
This is a very important feature for allowing code to be written in a
portable way, and still to make the best possible use of the environment in
which it is running.
\end{OL}
\section{\protect\label{sec:metint}Computer Graphics Metafiles (GKSM and CGM)}
A graphics metafile is a computer file containing a set of data records
describing a graphical image. It may be used for
\begin{OL}
\item Transporting graphical information between different types of computers.
\item Transporting graphical information from one site to another.
(by magnetic tape for example)
\item Transporting graphical information from one application to another.
\item Device spooling, e.g. for a plotter.
\end{OL}
\index{Appendix E metafile}
As yet there is no official ISO standard for writing a GKS Metafile (GKSM).
However, the ISO GKS Functional Description assumes the existence of one and,
in Appendix E of the document, a metafile format is described
and its use is recommended.
A GKS metafile created using this format is known as an Appendix E metafile.
Unfortunately, not all implementations follow the Appendix E format,
and so GKSM metafiles created by different GKS packages may be incompatible.
In fact, even different examples of Appendix E metafiles may be
incompatible due to variations in the file record structures, etc.
\index{Appendix E metafile!Computer Graphics Metafile}
\index{Appendix E metafile!CGM}
The Computer Graphics Metafile (CGM) has been produced by a separate
standards group within ISO and, when implementations become available,
it will be an alternative to the Appendix E metafile for the storage
and transmission of complete pictures.
One should note that because CGM is an independent standard
compatible with GKS, it is expected to become adopted as the
picture interchange format used between all graphics standards.
\section{The Computer Graphics Virtual Device Interface (CGI)}
\index{logical workstations}
One of the new features introduced by GKS was the concept of a
{\it Logical Workstation}.
This provides an interface layer within the graphics package below
which are situated a set of workstation drivers.
The workstations simulate in software any features which are not provided
by a particular display, and so simplify the problem of driving
new devices. To the applications programmer all workstations
have (more-or-less) similar capabilities, although their response
time will clearly be faster if these capabilities are built into the hardware,
rather than emulated. However, GKS defines only the interface as
seen by the application, and not the internal workstation interface.
This means that it is not possible to move workstation drivers
from one implementation of GKS to another.
There are difficulties in defining this interface because, if the
level is too high, then most workstation drivers must incorporate
a great deal of code. If on the other hand the level is too low,
then the software will not be able to make use of advanced features
in high-performance display systems.
There is thus is a trade-off to be made, which is currently
decided upon by the designers of each graphics package.
The goal of the CGI \cite{bib-cgiref} is to standardize this interface.
However, it is proving to be a lengthy business,
because there are difficulties to persuade all the
parties involved to compromise.
It should be mentioned that one of the goals of CGI is to allow
the functionality of the graphics system to be split between multiple
CPUs. For example, the features defined by the CGI could either be
built into a graphics terminal, or programmed into a Personal Workstation.
This type of functionality has become loosely termed
{\it Networked Graphics}.
\index{networked graphics}
\index{X-Window}
In this realm CGI may be overtaken by events, because a networked graphics
system called {\it X-Window} is fast becoming a 'de facto' standard.
X-Window comes from MIT, and is an outcome of the Athena Project
financed by DEC and IBM.
\section{Overview of Basic Facilities available at CERN}
\index{MGKS}
\index{PLOT10/GKS}
The following graphics services and facilities are supported at CERN
and will be described in more detail within this manual.
Note that PLOT10/GKS and MGKS are no longer supported.
\subsection{GKSGRAL}
\index{GKSGRAL}
\index{GKSGRAL-3D}
Both GKS (2D) and GKS-3D libraries from GTS-GRAL ({\it GKSGRAL} and
{\it GKSGRAL-3D}) are available for general use
on IBM VM/CMS, VAX VMS, CRAY UNICOS, APOLLO AEGIS, and UNIX.
However, in the case of UNIX, compiled libraries are only available from CERN
for those machines on which they may be produced.
As of March, 1990, the PROduction GKSGRAL version
is 7.4/3.2, and the GKSGRAL-3D version is 2.0.
\subsection{Include Files}
\index{include files!gtsdev}
\index{include files!enum}
To aid program writing two {\it INCLUDE FILES} are available:
\begin{DLtt}{123456}
\item[GTSDEV]This file contains a set of parameters
defining the GKS Workstation Types available on GKSGRAL.
\item[ENUM]This file contains a set of parameters
defining the GKS Enumeration Types. {\bf It is highly recommended}
that these are used in preference to directly coding in collections of
integers, the meaning of which is immediately forgotten.
\end{DLtt}
Although the use of include files is not standard FORTRAN-77,
this facility is supported by the compilers on all the major CERN systems.
To produce standard FORTRAN code the text of the include file must be inserted
into the application program.
\subsection{GKS Metafile Display and Editing}
\index{GRVIEW}
\index{GRVIEW}
\index{GKSTV}
\index{GKSED}
\index{metafiles}
\index{editor}
GKS Appendix E metafiles may be interpreted on display screens
using the interactive command {\bf GRVIEW}, which is available on
IBM, VAX and APOLLO systems. GRVIEW also allows metafiles to be edited,
and combines features from the old commands GKSTV and GKSED, which
it replaces.
\subsection{Hardcopy and Conversion Facilities}
\index{metafiles}
\index{hardcopy}
\index{conversion}
\index{postscript}
\index{GRPLOT}
\index{GRCONV}
\index{GKSVT}
\index{GKSCP}
\index{GKSX87}
\index{GKS3812}
\index{GKSSGML}
\index{VERSATEC}
\index{XEROX}
\index{IBM3812}
GKS Appendix E metafiles may be interpreted onto a range of hardcopy
devices using the command {\bf GRPLOT}, or converted to
another format using {\bf GRCONV}. These two commands have replaced
the old utilities GKSVT, GKSCP, GKSX87, GKS3812, and GKSSGML.
GRCONV runs locally to convert a GKS Appendix~E metafile into a PostScript
file or a file of Tektronix 4014 escape sequences. The command also may
be used to convert a metafile into IBM~3812 format for inclusion
in documents produced by SGML.
GRPLOT plots a metafile on a device specified by a parameter.
Supported devices include the computer centre monochrome and colour
Versatec plotters, the Xerox 4050s, and the IBM~3812s.
Apart from Appendix~E metafiles, it is possible to produce PostScript
or Tektronix 4014 output files, either directly from GKS, via GRVIEW,
or via GRCONV.
PostScript files may be printed on any local PostScript printer,
although they are less versatile than a metafile and may not be edited or
plotted on any other type of hardcopy device. Various laser printers
accept files of Tektronix 4014 escape codes as input.
\subsection{Mainframe Graphics using PCs}
\index{PCs}
\index{Emulators}
\index{TeemTalk}
After evaluation of several graphics emulators, the TeemTalk package
from Pericom has been selected as being suitable to allow IBM compatible
PCs to be used as graphics terminals. TeemTalk emulates Tektronix
4107/4207 colour terminals which are supported by GKS on all CERN
time-sharing systems, and it is possible for CERN to order licences at
very favorable rates.
\Lit{$==>$} ??? Those people who would like to obtain a licence should contact
R.~Cailliau or F.~Ovett of AS-MI.
\subsection{Documentation}
\index{documentation}
Apart from this {\bf GKS/GKS-3D at CERN}, there exists
\Lit{$==>$} To complete ...
the {\bf GRVIEW, GRCONV and GRPLOT; Metafile Utility User's Guide}
\cite{bib-grref}, + ...,
all of which are available from the UCO.
Copies of the {\bf GKSGRAL Reference Manual} \cite{bib-gtsref} and the
{\bf GKSGRAL-3D Reference Manual} \cite{bib-gtsref3} may be borrowed
from the Computer Science Library, purchased from GTS-GRAL,
or purchased from CERN under an agreement with GTS-GRAL
(see Appendix on Page~\pageref{sec:gtsdist} for details).
{\bf Note that the GKSGRAL manuals are covered by copyright.}
\index{HELP}
\index{FIND}
On-Line help files are available using the {\bf FIND~xxx} command on
IBM, the {\bf HELP~xxx} command on VMS,
and via the command {\bf HELP~CERN~xxx} on APOLLO.
Keywords (xxx) are:
GRAPHICS, GKS, GRPLOT, GRCONV, GRVIEW, and GKSPACK.
\subsection{The User Consultancy Office (UCO)}
\index{UCO}
\index{User Consultancy Office}
General graphics inquiries should be directed to the User Consultancy Office
on the ground floor of Building~513 (Tel:~4952,
Electronic Mail: \Lit{[email protected]}.
\chapter{\protect\label{sec:gkspr}GKS Primer}
\section{Workstations}
\index{workstation}
\subsection{Definition}
GKS defines the concept of an abstract {\it graphical workstation}
which provide the logical interface through which the application
program controls the physical devices.
A workstation can have one {\it display surface} and/or a collection
of {\it logical input devices}.
Thus, a particular interactive workstation may
belong to one of three classes: Output-Only, Input-Only,
or Input-Output. (There are a further two classes of
workstation: Metafile-Input and Metafile-Output.)
GKS allows several workstations to be open simultaneously,
for example, one for an interactive graphics terminal
and one for a metafile or hardcopy device.
The actual maximum number of simultaneously open workstations is
installation dependent; in the GTS-GRAL implementation it is set to 15.
GKS allows the appearance of output primitives to vary between
workstations in order to take advantage of their differing
capabilities. However, it is possible to inquire at run-time what
the actual capabilities are, and to design one's code appropriately.
The inquiry functions are too numerous to list in this Primer,
and the reader is referred to one of the texts in the bibliography.
\subsection{Opening and Closing GKS}
Before any workstation is {\it opened} or {\it activated},
GKS itself must be {\it opened}.
This must be the first call in any GKS program
as it performs the initialization.
The last call should be to {\it close} (exit from) GKS.
\index{GKS routine!{\protect\tt GOPKS}}
\index{GKS routine!{\protect\tt GCLKS}}
\index{error file}
\begin{XMP}
CALL GOPKS(ERRFIL, BUF)
.......
CALL GCLKS
\end{XMP}
ERRFIL defines the FORTRAN logical unit number to which all error
messages will be sent. BUF is the amount of memory space to be
used. (This parameter is ignored by the GKSGRAL and GKSGRAL-3D implementations.)
\subsection{Opening and Closing a Workstation}
\index{workstation!opening/closing}
\index{workstation!identifier}
Each workstation is identified in the application program by a
unique number, the {\it workstation identifier}. This number is used
in routines which communicate with the device, i.e. when output is to be
sent to it, or when it is to be cleared, etc. In order
to establish a connection between the application program and
a workstation, the workstation has to be {\it opened}.
When the workstation is {\it closed},
the connection is released, segments stored in Workstation Dependent
Segment Storage are lost, and no further interactions are possible.
\index{GKS routine!{\protect\tt GOPWK}}
\index{GKS routine!{\protect\tt GCLWK}}
\index{connection identifier}
\index{workstation!connection identifier}
\index{workstation!type}
The calls to open and close a workstation are:
\begin{XMP}
CALL GOPWK(WKID, CONID, WTYPE)
and
CALL GCLWK(WKID)
\end{XMP}
The workstation identifier WKID is a positive integer which the
application chooses, and is typically a number like 1,2,3...
The integer CONID is the Connection Identifier which connects the
workstation to an I/O channel. It is usually a FORTRAN logical unit
number, but depends on the workstation and the host computer being used.
Thus, the value of conid must be obtained from the
workstation-dependent documentation.
The {\it Workstation Type} (WTYPE) is an integer which specifies
which type of workstation should be opened,
e.g. a Tektronix 4014 or a Versatec plotter.
Workstation Types are implementation dependent.
A list of workstation identifiers in use at CERN will be found in the
appendix.
\subsection{Activating and Deactivating a Workstation}
\index{workstation!activation}
When primitives and attributes are output they are sent to all
{\it open} and {\it active} workstations.
When a workstation is {\it deactivated} no further
output is sent to it. At the end of the application program all open
workstations must be deactivated and closed before GKS itself can be closed.
\index{GKS routine!{\protect\tt GACWK}}
\index{GKS routine!{\protect\tt GDAWK}}
To activate or deactivate the workstation the calls are:
\begin{XMP}
CALL GACWK(WKID)
CALL GDAWK(WKID)
\end{XMP}
Note that Input-Only and Metafile-Input workstations may not be activated.
\subsection{Clearing the Workstation}
\index{workstation!update}
The application has control over clearing the display
surface at any time by calling:
\index{GKS routine!{\protect\tt GCLRWK}}
\begin{XMP}
CALL GCLRWK(WKID, COFL)
\end{XMP}
The integer COFL controls the way in which the update happens.
If COFL is set to 'GALWAY' (see note at bottom of page 1),
then the workstation is cleared even if there was no previous output.
If COFL is set to 'GCONDI', then the drawing surface is cleared only
if there has been previous output.
Clearing the workstation deletes all WDSS segments stored on that
workstation (segments are described in Chapter on Page~\pageref{sec:seghdr}),
and on a hardcopy device the paper is advanced.
For metafile interpretation by the CERN utilities this
function is used to indicate 'end of picture'.
Note that the workstation is cleared automatically when
it is opened. Some implementations also clear the workstation when it is
closed, but this is not mandated in the standard.
\subsection{\protect\label{sec:defsta}Deferral States}
\index{deferral states}
GKS allows the programmer to {\it defer} sending output to a
workstation by calling the function Set Deferral State.
For example, it might be more efficient to send
primitives in batches, rather than one at a time.
Thus, 'As Soon As Possible' often may not be as {\it fast} as possible!
For most users of GKS the default values should be
acceptable (but note that these are device-dependent).
The call is:
\index{GKS routine!{\protect\tt GSDS}}
\begin{XMP}
CALL GSDS(WKID, DEFMOD, REGMOD)
\end{XMP}
The second argument, DEFMOD, controls when output should be sent to
the workstation specified by WKID, and can take the following values:
\begin{DLtt}{123456}
\item['GASAP']
send output As Soon As Possible
\item['GBNIG']
send output Before Next Interaction Globally. This makes sure
that the workstation is up-to-date before the next input action
on {\it any} open workstation
\item['GBNIL']
send output Before Next Interaction Locally. This makes sure
that the workstation is up-to-date before the next input action
on the workstation specified in the call
\item['GASTI']
At Some TIme allows GKS to choose some opportune moment to
bring the workstation up-to-date
\end{DLtt}
The third argument, REGMOD, controls what should happen on
those workstations which require the image on the display to be
regenerated in some circumstances.
For example, to delete a segment on a direct view storage tube display,
such as a TEKTRONIX 4014, would require the screen
to be cleared and then the remaining contents re-drawn.
\index{implicit regeneration}
This is called {\it implicit regeneration}.
REGMOD can take the following values:
\begin{DLtt}{123456}
\item['GSUPPD']
Re-drawing is suppressed until an update is forced by calling
the Update Workstation function, GUWK, or by re-drawing all the
segments with GRSGWK.
\item['GALLOW']
Immediate re-drawing is allowed.
\end{DLtt}
\subsection{Updating the Workstation}
Because of the deferral mode set, not all the output may yet have been sent
to the display. The routine
\index{workstation!update}
\index{GKS routine!{\protect\tt GUWK}}
\begin{XMP}
CALL GUWK(WKID, REGFL)
\end{XMP}
\index{non-retained data}
\index{implicit regeneration}
is provided to bring the contents of the workstation WKID up-to-date.
The argument REGFL can take the values 'GPOSTP' or 'GPERFO'.
If 'GPOSTP' is specified, then all deferred actions are performed
at some time.
If 'GPERFO' is specified, then all deferred actions are performed,
the workstation transformation is updated,
the display surface is cleared, and all segments are
re-drawn as necessary. If an implicit regeneration of the image
has to be performed then non-retained data (primitives not stored
in segments) will be lost.
Note that a call to GUWK will {\bf not} necessarily clear
the workstation and re-draw all segments from scratch.
However, this can be achieved by calling the function GRSGWK
(see section on Page~\pageref{sec:grsgwk}).
\subsection{Sending Messages/Prompts}
\index{messages}
\index{prompts}
In order to display messages and prompts on the display surface
the following routine is provided:
\index{GKS routine!{\protect\tt GMSG}}
\begin{XMP}
CALL GMSG(WKID, STR)
\end{XMP}
WKID specifies on which workstation the text string STR should appear.
Where and how the message is written out is both implementation
and workstation-dependent.
\section{\protect\label{sec:dprim}The Drawing Primitives}
\index{primitives!2D}
Drawing primitives are the basic elements of graphics output, such as
lines, text etc.
GKS supports the six output primitives which are described below:
{\it polyline, polymarker, fill area, text, cell array},
and the {\it generalised drawing primitive}.
Each primitive has a corresponding set of {\it attributes}
e.g. line type, colour index, character height, etc.
The appearance of an individual primitive is governed by the
attributes in force at the time when the primitive is passed to GKS,
and attribute setting will be described in the following chapter.
A number of example programs may be found in Appendix on Page~\pageref{sec:exmpref}
which illustrate the use of the following features.
\begin{figure}[h]
\caption{Examples of some GKS Primitives}
\label{fig:prims}
\end{figure}
\begin{note}
The Polyline, Polymarker, and Fill Area primitives are specified by an
array of points which may not be arbitrarily large, and the maximum
size of this array is implementation-dependent. The GKSGRAL
implementation at CERN restricts the number points in a Fill Area to
300. However, calls to Polyline or Polymarker can handle more points
than this by internally sub-dividing the array.
\end{note}
\subsection{Polyline}
\index{polyline}
This draws line segments between two or more points using the
currently set attributes for line style, polyline colour index and
line width scale factor. The call is:
\index{GKS routine!{\protect\tt GPL}}
\begin{XMP}
CALL GPL(N, PXA, PYA)
\end{XMP}
where N is the number of points, and PXA(N), PYA(N) are real
arrays containing the X and Y values of the points in the
application program's own coordinate system (called the
{\it World Coordinates System}).
\subsection{Polymarker}
\index{polymarker}
This marks a sequence of points with the marker symbol selected by the
currently set attributes for polymarker colour index, marker type and
marker size scale factor. The marker size to which the scale factor
is applied is workstation-dependent.
The call is:
\index{GKS routine!{\protect\tt GPM}}
\begin{XMP}
CALL GPM(N, PXA, PYA)
\end{XMP}
where N, PXA and PYA have the same meanings as for GPL.
\subsection{Text}
\index{text}
Text is displayed using the current attribute settings for text colour
index, text font and precision, character-up vector, text alignment,
character expansion factor, character spacing, text path and
character height. The call is:
\index{GKS routine!{\protect\tt GTX}}
\begin{XMP}
CALL GTX(PX, PY, STR)
\end{XMP}
where PX and PY are real numbers which define in World Coordinates the
starting position of the text string STR.
\subsection{Fill Area}
\index{fill area}
Fill Area draws an area which is specified by a polygon.
The interior of the polygon can be either not filled (hollow), filled
with a solid colour, or filled with various patterns.
The fill area style and colour is specified by the current setting of
the fill area colour index, fill area style and fill area style index
attributes. The call is:
\index{GKS routine!{\protect\tt GFA}}
\begin{XMP}
CALL GFA(N, PXA, PYA)
\end{XMP}
where N, PXA, PYA have the same meaning as for GPL and GPM.
If the first and last points are different, they are joined by a line.
\subsection{Cell Array}
\index{cell array}
The Cell Array is an array of rectangular cells with individual
colours. It serves for passing raster images to GKS.
Note that in general, colour values within the Cell Array do not
correspond to picture elements (pixels) on a hardware display,
and that all primitives, including Cell Arrays, are subject to
the usual GKS transformations.
The call is:
\index{GKS routine!{\protect\tt GCA}}
\begin{XMP}
CALL GCA(PX,PY,QX,QY,DIMX,DIMY,ISC,ISR,DX,DY,COLIA)
\end{XMP}
Neither the Cell Array primitive nor the function call arguments will be
described in detail in this Primer, and the reader is referred to the GKSGRAL
manual, or one of the texts in the bibliography, for more information.
\subsection{Generalised Drawing Primitive}
\index{generalised drawing primitive}
\index{GDP}
Although at present GKS defines only six types of graphics primitives,
the standard allows for these to be augmented within particular
implementations by using {\it Generalised Drawing Primitives} (GDPs).
Thus, the Generalised Drawing Primitive may be thought
of as a 'standard way to be non-standard', and in fact a mechanism
exists for implementors of GKS to register GDPs with the International
Standards Organization group responsible for GKS.
The intention is that these registered GDPs will be incorporated as new
primitives within future revisions of the standard.
However, note that the use of GDPs may make programs
{\bf non-portable to other GKS implementations}. On the other
hand, they do permit the GKS driver to make use of special hardware
features, circle drawing for example, if these features are available.
GKSGRAL supports 6 GDPs:
\index{circle GDP}
\index{arc GDP}
\index{ellipse GDP}
\index{Bezier curve GDP}
\index{cubic curve GDP}
\begin{OL}
\item Circle
\item Circular Arc
\item Ellipse
\item Elliptical Arc
\item Bezier Curve defined by Bezier Polygon
\item Cubic Curve defined by Interpolation Points
\end{OL}
The call is:
\index{GKS routine!{\protect\tt GGDP}}
\begin{XMP}
CALL GGDP(N, PXA, PYA, PRIMID, IDR, DATREC)
\end{XMP}
See the GKSGRAL Manual for more details.
\section{\protect\label{sec:attrbs}The Output Attributes}
\index{attributes}
Before actually outputting any primitives, the application will want to
specify the exact way in which the drawing appears on the display
surface. This {\it rendering} process is controlled
by {\it output attributes}.
Output primitives have geometric and non-geometric attributes.
Geometric attributes, such as the character height,
affect the size and shape of a primitive, whereas non-geometric
attributes are qualities such as colour, line style, etc.
Output attributes affect the appearance or rendering of primitives at the
moment when the primitives are sent to GKS, and attributes are said to be
{\it bound} to primitives at this time.
Thus, modification of an attribute has no effect on primitives which
have been output already.
GKS attributes are said to be {\it modal} in character because,
after setting an attribute, GKS is in a mode in which the value of that
attribute will be bound to all primitives of the appropriate type which
follow. Setting a different value for the attribute would then
change the mode.
\subsection{Attribute Bundles}
\index{bundled attributes}
\index{individual attributes}
\index{bundle index}
There are two ways of specifying attributes,
{\it Bundled} and {\it Individually}.
Attributes may be set individually by calling the appropriate routines
one at a time. As an example, for a polyline one could set the
line style, the line width, and the colour by calling the routines
GSLN, GSLWSC, and GPLCI, before calling GPL to output the polyline.
Setting attributes individually will ensure that
the primitives to which they are bound appear the same on all
workstations, assuming that the workstations have sufficient capability.
However, if attributes are set using bundles, then the results will
be workstation-dependent.
Bundled attributes are assigned by selecting a {\it bundle index}
for a particular type of primitive using the routines given in section
on Page~\pageref{sec:setbnd}. The bundle index points to an entry in the
appropriate workstation {\it bundle table}.
Hence, each workstation has a bundle table for every primitive type,
and each entry in a bundle table contains a pre-defined set of
attributes for that particular primitive.
For example, the first entry in the polyline bundle table may contain
the attributes {\it dotted} and {\it red}, the second may
contain {\it solid, double thickness} and {\it blue}, etc.
Note that attribute bundles do
{\bf not} contain geometric attributes.
Beginners are advised to ignore attribute bundles and to set each
attribute individually. However, read the next section on
Aspect Source Flags before trying out a program.
As an example of why one might make use of attribute bundles,
consider an application which sometimes uses a colour terminal and
sometimes a monochrome one.
By drawing polylines with, say, bundle table index 5, the actual
appearance of the polylines will depend on the contents of
polyline bundle 5 on the two workstations. Thus, the application
can arrange to distinguish the polylines by using a particular
colour on the colour terminal, and a particular dash pattern
on the monochrome terminal, without making changes to the body of the
code which draws the primitives.
By using attribute bundles to specify attributes, and assuming that
the primitives have been stored in segments
(segments are described in Chapter on Page~\pageref{sec:seghdr}),
the application can also change the appearance of primitives
{\it after} they have been output to a workstation by
re-defining the contents of the bundle table.
This effect can not be achieved if the
attributes are set individually without deleting and re-drawing
the primitives.
\subsubsection{Aspect Source Flags}
\index{ASFs}
\index{aspect source flags}
To control whether a primitive attribute should be set individually,
or using a bundle table index, each primitive has a set of attributes
called the {\it Aspect Source Flags} (ASFs);
one flag for each primitive attribute.
If the ASF for a particular attribute is set to 'GBUNDL',
then the primitive will be bound to the attributes in the bundle table
entry pointed to by the bundle index currently in force.
If the ASF for a particular attribute is set to 'GINDIV',
then the primitive will be bound to the current individual attribute values
for that type of primitive.
Unfortunately, the committee which designed GKS could not agree on
whether the default setting for the ASFs should be bundled or individual.
Thus, American implementations, such as PLOT10/GKS, tend to default
the ASFs to set attributes individually, whilst European implementations,
such as GKSGRAL, tend to default ASFs to bundled attributes.
In order to be safe, {\bf users of GKS are advised to set their own default
values for the ASFs} as soon as they open GKS.
This can be done by calling:
\index{GKS routine!{\protect\tt GSASF}}
\begin{XMP}
CALL GSASF(ASFLST)
\end{XMP}
where ASFLST is an array of 13 integers, one for each attribute,
which must be set to 'GBUNDL' or 'GINDIV'.
The bundle table index for each attribute is given in parentheses
in the attribute list below.
An example program which calls GSASF may be
found in Appendix on Page~\pageref{sec:expfa}.
\subsubsection{Defining Attribute Bundles}
It is possible to define the entries in the bundle tables by calling one
of the following routines:
\index{GKS routine!{\protect\tt GSPLR}}
\index{GKS routine!{\protect\tt GSPMR}}
\index{GKS routine!{\protect\tt GSTXR}}
\index{GKS routine!{\protect\tt GSFAR}}
\index{GKS routine!{\protect\tt GSPAR}}
Set {\bf polyline} representation:
\begin{XMP}
GSPLR(WKID, PLI, LN, LWSC, PLCI)
\end{XMP}
Set {\bf polymarker} representation
\begin{XMP}
GSPMR(WKID, PMI, MK, MKSC, PMCI)
\end{XMP}
Set {\bf text} representation
\begin{XMP}
GSTXR(WKID, TXI, TXF, TXP, CHXP, CHSP, TXCI)
\end{XMP}
Set {\bf fill area} representation
\begin{XMP}
GSFAR(WKID, FAI, FAIS, FASI, FACI)
\end{XMP}
Set {\bf pattern} representation
\begin{XMP}
GSPAR(WKID, PAI, DIMX, DIMY, NCS, NRS, DX, DY, PACI)
\end{XMP}
As arguments, each routine requires the workstation identifier (WKID)
and bundle index (PLI, etc.) to be set, plus a value for each of the
non-geometric attributes for that particular primitive,
and which are listed below.
Details of GSPAR will not be given in this Primer; see the GKSGRAL manual
or one of the references in the bibliography for more information.
\subsection{\protect\label{sec:attlst}The List of GKS Attributes}
\index{attributes!list of}
\subsubsection{Individual Attributes}
\begin{UL}
\item {\bf POLYLINE}
\index{polyline}
\begin{DLtt}{123456}
\item[LN]
\index{GKS routine!{\protect\tt GSLN}}
(integer) the polyline line style (ASF 1). Set by GSLN(LN).
\item[LWSC]
\index{GKS routine!{\protect\tt GSLWSC}}
(real) the line width scale factor (ASF 2). Set by GSLWSC(LWSC).
\item[PLCI]
\index{GKS routine!{\protect\tt GSPLCI}}
(integer) the polyline colour index (ASF 3). Set by GSPLCI(PLCI).
\end{DLtt}
\item {\bf POLYMARKER}
\index{polymarker}
\begin{DLtt}{123456}
\item[MK]
\index{GKS routine!{\protect\tt GSMK}}
(integer) the polymarker type (ASF 4). Set by GSMK(MK).
\item[MKSC]
\index{GKS routine!{\protect\tt GSMKSC}}
(real) the marker size scale factor (ASF 5). Set by GSMKSC(MKSC).
\item[PMCI]
\index{GKS routine!{\protect\tt GSPMCI}}
(integer) the polymarker colour index (ASF 6). Set by GSPMCI(PMCI).
\end{DLtt}
\item {\bf TEXT}
\index{text}
\begin{DLtt}{123456}
\item[TXF]
\index{GKS routine!{\protect\tt GSTXFP}}
(integer) the text font (ASF 7). Set by GSTXFP(TXF, TXP).
\item[TXP]
(enumerated) the text precision (ASF 7). Set by GSTXFP(TXF, TXP).
\item[CHXP]
\index{GKS routine!{\protect\tt GSCHXP}}
(real) the character expansion factor (ASF 8). Set by GSCHXP(CHXP).
\item[CHSP]
\index{GKS routine!{\protect\tt GSCHSP}}
(real) the character spacing (ASF 9). Set by GSCHSP(CHSP).
\item[TXCI]
\index{GKS routine!{\protect\tt GSTXCI}}
(integer) the text colour index (ASF 10). Set by GSTXCI(TXCI).
\item[CHUP]
\index{GKS routine!{\protect\tt GSCHUP}}
(real) the character up vector. Set by GSCHUP(CHUX, CHUY).
\item[TXAL]
\index{GKS routine!{\protect\tt GSTXAL}}
(enumerated) the text alignment. Set by GSTXAL(TXALH, TXALV).
\item[TXP]
\index{GKS routine!{\protect\tt GSTXP}}
(enumerated) the text path. Set by GSTXP(TXP).
\item[CHH]
\index{GKS routine!{\protect\tt GSCHH}}
(real) the character height. Set by GSCHH(CHH).
\end{DLtt}
\item {\bf FILL AREA}
\index{fill area}
\begin{DLtt}{123456}
\item[FAIS]
\index{GKS routine!{\protect\tt GSFAIS}}
(enumerated) the fill area interior style (ASF 11). Set by GSFAIS(FAIS).
\item[FASI]
\index{GKS routine!{\protect\tt GSFASI}}
(integer) the fill area style index (ASF 12). Set by GSFASI(FASI).
\item[FACI]
\index{GKS routine!{\protect\tt GSFACI}}
(integer) the fill area colour index (ASF 13). Set by GSFACI(FACI).
\end{DLtt}
\item {\bf PATTERN}
\begin{DLtt}{123456}
\index{pattern}
\item[PA]
\index{GKS routine!{\protect\tt GSPA}}
(real) the pattern size. Set by GSPA(PASZX, PASZY).
\item[PARF]
\index{GKS routine!{\protect\tt GSPARF}}
(real) the pattern reference point. Set by GSPARF(RFX, RFY).
\end{DLtt}
\end{UL}
\subsubsection{\protect\label{sec:setbnd}Bundled Attributes}
\begin{UL}
\item {\bf POLYLINE}
\index{polyline}
\begin{DLtt}{123456}
\item[PLI]
\index{GKS routine!{\protect\tt GSPLI}}
(integer) the polyline bundle index. Set by GSPLI(PLI).
\end{DLtt}
\item {\bf POLYMARKER}
\index{polymarker}
\begin{DLtt}{123456}
\item[PMI]
\index{GKS routine!{\protect\tt GSPMI}}
(integer) the polymarker bundle index. Set by GSPMI(PMI).
\end{DLtt}
\item {\bf TEXT}
\index{text}
\begin{DLtt}{123456}
\item[TXI]
\index{GKS routine!{\protect\tt GSTXI}}
(integer) the text bundle index. Set by GSTXI(TXI).
\end{DLtt}
\item {\bf FILL AREA}
\index{fill area}
\begin{DLtt}{123456}
\item[FAI]
\index{GKS routine!{\protect\tt GSFAI}}
(integer) the fill area bundle index. Set by GSFAI(FAI).
\end{DLtt}
\end{UL}
\subsection{Specifying Line Styles For Polylines}
\index{polyline styles}
It is possible to draw polylines {\it solid, dashed, dotted}
or {\it dashed-dotted} ('GLSOLI', 'GLDASH', 'GLDOT', 'GLDASD').
For example:
\index{GKS routine!{\protect\tt GSLN}}
\begin{XMP}
CALL GSLN(GLDASH) - sets the line style to dashed
\end{XMP}
The different line styles available are shown in \ref{fig:linstyl}.
\begin{figure}[h]
\caption{GKS line styles}
\label{fig:linstyl}
\end{figure}
.pa
It is also possible to specify a scale factor which modifies the nominal
width of lines on the workstation. For example:
\index{GKS routine!{\protect\tt GSLWSC}}
\begin{XMP}
CALL GSLWSC(2.0)
\end{XMP}
should double the nominal line width. Note, however, that this (Real) parameter
is implementation-dependent, and may be ignored by terminal drivers as it
is both difficult and expensive to emulate in software if the device
does not support the feature in hardware.
\subsection{Specifying Marker Types for Polymarkers}
\index{polymarker types}
The five GKS Marker types, {\it point, plus, asterisk, circle}
and {\it cross} ('GPOINT', 'GPLUS', 'GAST', 'GOMARK', 'GXMARK'),
are demonstrated in \ref{fig:mark}.
For example:
\index{GKS routine!{\protect\tt GSMK}}
\begin{XMP}
CALL GSMK(GPOINT) - sets the marker type to point
\end{XMP}
\begin{figure}[h]
\caption{GKS marker types}
\label{fig:mark}
\end{figure}
Markers may be scaled in size by calling GSMKSC. For example:
\index{GKS routine!{\protect\tt GSMKSC}}
\begin{XMP}
CALL GSMKSC(3.5)
\end{XMP}
will scale the following markers by 3.5 times. The implementation of this
function is workstation-dependent. In particular, markers drawn on terminals
by hardware may only exist in certain fixed sizes.
\subsection{Specifying Fill Areas}
\index{fill area}
\index{hatching}
Many applications need shaded or coloured areas as well as lines and points.
GKS allows one to draw such an area by specifying an array of points
representing a closed polygon. If the last point in the array is not the
same as the first, these two will be joined.
The interior style of the area can be set to one of the four values
{\it hollow, solid, pattern}, and {\it hatch},
which are demonstrated in \ref{fig:fill}
('GHOLLO', 'GSOLID', 'GPATTR', 'GHATCH').
Examples for setting fill area interior style are:
\index{GKS routine!{\protect\tt GSFAIS}}
\begin{XMP}
CALL GSFAIS(GHOLLO)
or
CALL GSFAIS(GSOLID)
\end{XMP}
For interior style hollow the boundary polygon only is drawn as a solid line.
For style {\it solid} the interior is completely filled with a uniform
colour, as specified by the fill area colour index set by calling GSFACI.
\index{GKS routine!{\protect\tt GSFACI}}
Workstations for devices which support area filling of polygons by hardware
should normally make use of this feature.
However, this is not always possible, as some monochrome terminals do not
use the correct algorithm to perform the area fill.
For interior styles {\it hatch} and {\it pattern},
the particular hatch algorithm or pattern used may be chosen by specifying
a fill area style index.
This represents a second level of selection on the way the area is filled,
and the index points into either a hatch or pattern table stored at the
workstation. Thus, hatches and patterns are workstation-dependent.
The fill area style index is set by calling:
\index{GKS routine!{\protect\tt GSFASI}}
\begin{XMP}
CALL GSFASI(FASI)
\end{XMP}
where FASI is an integer value. To find out the effect of setting a particular
index, it is necessary to consult the workstation documentation.
\index{GKS routine!{\protect\tt GUSHTR}}
\index{GKS routine!{\protect\tt GSPARF}}
\index{GKS routine!{\protect\tt GSPAR}}
\index{GKS routine!{\protect\tt GSPA}}
The GKS standard provides calls to modify patterns by setting the pattern
reference point (GSPARF) and pattern size (GSPA).
The pattern representation attribute bundle is set using GSPAR.
The GKSGRAL package also allows the possibility to define one's
own hatch styles by calling the routine GUSHTR,
although this is not a standard GKS feature.
None of these routines will be described further in this Primer,
and the reader is referred to the texts in the bibliography
for more information.
\begin{figure}[h]
\caption{GKS fill area styles}
\label{fig:fill}
\end{figure}
\subsection{Specifying Text}
\index{text}
\index{converting numeric variables}
\index{numeric variables}
Text is the most complex of the GKS primitives because there are
so many different variations. The text attributes are described below.
Note that only the non-geometric attributes may be set in the text
bundle table.
({\bf HINT:} For FORTRAN programmers, it is possible to use the
{\it Internal Write} construct to convert numeric variables to character
strings for output as text primitives, see section on Page~\pageref{sec:refintw}).
\subsubsection{Font and Precision}
\index{text!fonts}
\index{text!precision}
\index{string precision text}
\index{character precision text}
\index{stroke precision text}
The {\it text font} is specified by an integer font number,
and fonts are workstation-dependent.
Any text font which is not supported on a given workstation is defaulted
to 1. This text font is a simple, roman type font produced by stroking
out each character using line segments.
To find out which fonts are provided by a particular implementation
one should consult the relevant reference manual.
The {\it text precision} attribute determines how closely the chosen
font has to obey the specifications of the other text attributes.
The choices for text precision are:
{\it string, char} and {\it stroke}
('GSTRP', 'GCHARP', 'GSTRKP').
In {\it string} precision the text string should be placed as close
as possible to the position specified by the primitive call.
None of the other text attributes need be taken into account
except the approximate size specified by character height.
Thus, if string precision is specified, the implementation is usually free
to make use of hardware characters which can be drawn much faster than
generating the fonts in software.
If {\it char} precision together with a workstation-provided font is
chosen, GKS will try a workstation dependent approximation.
In {\it stroke} precision GKS has to follow precisely the attribute
specifications, and always defaults to font 1
if the requested font does not exist.
Note that an implementation of GKS {\it may} provide stroke precision
text on a particular workstation even if the application specified only
a lower precision.
Both the text font and the text precision are set by the same routine:
\index{GKS routine!{\protect\tt GSTXFP}}
\begin{XMP}
CALL GSTXFP(FONT, PREC)
\end{XMP}
\begin{figure}[h]
\caption{Text fonts, precisions and up vectors}
\label{fig:fonts}
\end{figure}
\subsubsection{Expansion Factor}
\index{character!expansion factor}
The character expansion factor, a real number, causes each character to
appear 'fatter' or 'thinner' than normal. The default value is 1.0.
The height of the character is not affected, nor is the space
between the characters.
\index{GKS routine!{\protect\tt GSCHXP}}
\begin{XMP}
CALL GSCHXP(CHXP)
\end{XMP}
\subsubsection{Spacing}
\index{character!spacing}
The character spacing attribute defines the amount of extra
blank space to be inserted between adjacent characters of a text string.
This is a real number defined as a fraction of the character height.
The default value is 0.0. A positive character spacing spreads the
letters out, a negative one makes the letters overlap.
\index{GKS routine!{\protect\tt GSCHSP}}
\begin{XMP}
CALL GSCHSP(CHSP)
\end{XMP}
\subsubsection{Character-Up-Vector}
\index{character!up-vector}
The char-up vector defines the orientation of the text.
The text is written from left to right along a line perpendicular to the
char-up vector, which is specified by its X and Y components
(two real numbers):
\index{GKS routine!{\protect\tt GSCHUP}}
\begin{XMP}
CALL GSCHUP(CHUX, CHUY)
\end{XMP}
The effect of choosing different combinations of text font,
text precision and character-up-vector is shown in \ref{fig:fonts}.
\subsubsection{Alignment}
\index{text!alignment}
The text string as a whole is positioned relative to the point specified
in the GTX primitive call according to the current setting of the text
alignment attribute. The call to GSTXAL has two parameters for
horizontal and vertical alignment.
\index{GKS routine!{\protect\tt GSTXAL}}
\begin{XMP}
CALL GSTXAL(TXALH, TXALV).
\end{XMP}
The horizontal alignment is either {\it normal, left edge, centre},
or the {\it right edge} of the text string
('GAHNOR', 'GALEFT', 'GACENT', 'GARITE').
The vertical alignment is either {\it normal, top, cap line, half,
base line}, or {\it bottom}
(GAVNOR', 'GATOP', 'GACAP', 'GAHALF', 'GABASE', 'GABOTT').
The alignment attribute is illustrated in \ref{fig:align}.
The selection of {\it normal} for either horizontal or vertical
alignment results in GKS choosing the most appropriate value
depending on the current Text Path.
\begin{figure}[h]
\caption{Text alignment}
\label{fig:align}
\end{figure}
\subsubsection{Text Path}
\index{text!path}
The text path describes the direction in which the text string is written.
The possible values are {\it Right, Left, Up} and {\it Down}
('GRIGHT', 'GLEFT', 'GUP', ' GDOWN').
The default for the text path is perpendicular to the up vector (i.e. 'GRIGHT').
\index{GKS routine!{\protect\tt GSTXP}}
\begin{XMP}
CALL GSTXP(TXP)
\end{XMP}
\subsubsection{Height}
\index{character!height}
The character height is set in {\bf World Coordinates} by calling
GSCHH. Increasing the height automatically causes a corresponding
increase in the width of the character.
\index{GKS routine!{\protect\tt GSCHH}}
\begin{XMP}
CALL GSCHH(CHH)
\end{XMP}
Note that the use of World Coordinates to specify the character height may
cause problems if the normalization transformation chosen has a very large
or very small range in Y. In this case it is possible that when using a
metafile as an output workstation the number of digits required to
specify the character height may be more than the range used to define
real numbers within the metafile. If it would be inconvenient to modify
the range of World Coordinate Y values, then another possible solution to
the problem is to superimpose two viewports, one on top of the other.
All characters may then be drawn on one of the viewports which has been
defined using a more reasonable normalization transformation.
\subsection{Specifying Colour}
\index{colour!index}
Colour is specified separately for each primitive type by a colour index value.
\index{GKS routine!{\protect\tt GSPLCI}}
\begin{DLtt}{123456}
\item[Polyline]CALL GSPLCI(PLCI)
\item[Polymarker]CALL GSPMCI(PMCI)
\item[Text]CALL GSTXCI(TXCI)
\item[Fill Area]CALL GSFACI(FACI)
\end{DLtt}
The {\it colour index}, instead of defining a colour directly, points to
an entry in a {\it colour look-up table} which is workstation-dependent.
If the colour is an attribute of a primitive, then one can specify a colour
index either as an individual attribute or as part of a bundle table.
The size of the colour table is workstation-dependent, but
the table always contains the entries 0 and 1.
The background colour is 0, and the default foreground colour is 1.
Each entry greater than 1 defines an additional foreground colour.
If a colour index greater than the maximum is
specified, then the default value 1 is taken, which ensures that a program
written using colour will run on a monochrome device.
\index{colour!look-up table}
To set entries in the colour look-up table
one must call the function Set Colour Representation (GSCR).
It takes as parameters the workstation identifier, the colour index
to be set, and a value (from 0 to 1.0) for each of the red, blue, and green
intensities. The workstation then uses the closest available
colour to that which has been requested.
\index{GKS routine!{\protect\tt GSCR}}
\begin{XMP}
CALL GSCR(WKID, INDEX, RED, GREEN, BLUE)
\end{XMP}
The highest possible value for the colour index (NCOLI)
depends on the capabilities of the device and can be inquired using
GQCF and specifying the workstation type:
\index{GKS routine!{\protect\tt GQCF}}
\begin{XMP}
CALL GQCF(WTYPE, ERRIND, NCOLI, COLA, NPCI)
\end{XMP}
The parameter COLA ('GMONOC' or 'GCOLOR') indicates whether or not colour
is available, and NPCI specifies the number of pre-defined colour indices.
The colour index can also be used if part of a previously drawn picture
is to be erased. The part to be erased should be re-drawn in
the background colour (index=0).
All other attribute settings and transformations must be
exactly as they where when the picture was produced initially.
However, even in this case, the trick may not work if the primitive
being erased overlays another primitive, and not just the background.
Colour may be part of a fill area pattern, in which case different parts
of the pattern may be defined to be different colours.
For the cell array primitive an array of colours is specified
as part of the primitive itself.
\section{Coordinates and Transformations}
\index{transformations}
\index{coordinate systems}
\index{world coordinates}
\index{normalized device coordinates}
\index{device coordinates}
GKS defines three coordinate systems:
\begin{OL}
\item WORLD Coordinates (WC)
\item NORMALIZED DEVICE Coordinates (NDC)
\item DEVICE Coordinates (DC)
\end{OL}
The application program specifies the coordinates of points in primitive
calls using a cartesian coordinate system with whichever scale
is the most convenient.
This system is called the {\it World Coordinate System}.
The WC range can go from negative to positive infinity along both axes,
which gives the application an unlimited choice of coordinates.
The definition of the WC space is independent of any graphics
output device and can be different for each application as the
requirements dictate.
All world coordinates are transformed by GKS to a 'virtual' device
space, the {\it Normalized Device Coordinate} (NDC) space.
The NDC space ranges from 0 to 1 in both x and y.
This space is device independent and appears identical for all
workstations in the system. A transformation from WC to NDC is called a
{\it Normalization Transformation}.
GKS allows more than one such transformation to be defined,
and so an application may draw each part of a picture in
its own WC system, and then map them into a single NDC space.
See \ref{fig:windows} and \ref{fig:trafos}.
The third coordinate system is that of the hardware device.
GKS uses specific information about the graphics output device
to transform normalized device coordinates into
coordinates that the device itself understands. Usually the units
in the device dependent space will be metres or raster units.
The translation from NDC to DC is called the
{\it Workstation Transformation}.
\index{window}
\index{viewport}
\index{normal!transformation}
\index{transformation!normalization}
\subsection{\protect\label{sec:nortrn}Normalization Transformation}
This transformation is specified by mapping a {\it Window}
in the application's WC space to a {\it Viewport} in NDC space.
Both the WC window and the NDC viewport are rectangles parallel to the
x,y axes.
They are specified by calls to Set WiNdow and Set ViewPort:
\index{GKS routine!{\protect\tt GSWN}}
\index{GKS routine!{\protect\tt GSVP}}
\begin{XMP}
CALL GSWN (TNR, WXMIN, WXMAX, WYMIN, WYMAX)
CALL GSVP (TNR, VXMIN, VXMAX, VYMIN, VYMAX)
\end{XMP}
\index{aspect ratio}
\index{TNR}
Because there may be several Normalization Transformations, the integer
value TNR indicates to which one the call applies.
The other values are real numbers defining the {\it bottom left} and
{\it top right} corners of the two rectangles. The window is
specified in the WC system, and the viewport in the NDC system. If the
x and y sides of the window and the viewport do not have the same
{\it Aspect Ratio} a distortion effect will take place.
This may or may not be intentional!
As has been mentioned, it is possible to generate a complex picture
with different world coordinates for different parts of the image,
or to place several pictures on different areas of the screen.
This is achieved by specifying multiple normalization transformations.
Each transformation is assigned a transformation number (TNR) for which
the range is 0 to some implementation-dependent maximum (usually about 20).
Thus, the transformation corresponding to each number
specifies a separate window-to-viewport mapping.
If the viewports overlap then their contents will be superimposed.
The default transformation number is 0, for which the WC and NDC
coordinate systems are the same.
Transformation 0 may not be modified, and so its use
is always equivalent to calling:
\begin{XMP}
CALL GSWN (TNR, 0.0, 1.0, 0.0, 1.0)
CALL GSVP (TNR, 0.0, 1.0, 0.0, 1.0)
\end{XMP}
The application specifies which normalization transformation is
active at a given time by a call to GSELNT (SELect Normalization
Transformation):
\index{GKS routine!{\protect\tt GSELNT}}
\begin{XMP}
CALL GSELNT(TNR)
\end{XMP}
All primitives created subsequently will be transformed according to this
transformation until a different one is selected.
\begin{figure}[h]
\index{window}
\index{viewport}
\caption{Windows and Viewports}
\label{fig:windows}
\end{figure}
\subsection{\protect\label{sec:wstntfm}Workstation Transformation}
\index{workstation!transformation}
\index{transformation!workstation}
The method of defining which portion of NDC space is to appear on
a specific workstation is similar to the way a viewport is positioned
in NDC space (the Normalization Transformation). The {\it Workstation
Transformation} defines that part of NDC space which will be visible,
and where it will appear on the display surface.
It is set by calling the functions Set WorKstation WiNdow and
Set WorKstation ViewPort:
\index{GKS routine!{\protect\tt GSWKWN}}
\index{GKS routine!{\protect\tt GSWKVP}}
\index{window}
\index{viewport}
\index{workstation!window}
\index{workstation!viewport}
\begin{XMP}
CALL GSWKWN(WKID, WXMIN, WXMAX, WYMIN, WYMAX)
CALL GSWKVP(WKID, VXMIN, VXMAX, VYMIN, VYMAX)
\end{XMP}
Set Workstation Window specifies in NDC coordinates the area of the NDC
space to be output to the device. Set Workstation Viewport specifies in
{\it Display Coordinates} (DC) where on the device the window will appear.
These functions may be called at any time. However, whether or not
the effect is immediately visible is workstation-dependent.
For example, if changing the workstation transformation requires an
implicit image regeneration, then the timing of when it will take place
is affected by the current deferral mode (see section on Page~\pageref{sec:defsta}).
\index{implicit regeneration}
\index{deferral states}
\index{aspect ratio}
The aspect ratio for the workstation window and the workstation viewport
{\bf must always be the same}.
If they are not, then {\bf the specified transformation is ignored},
and the complete workstation window is displayed on the device
in the correct aspect ratio.
A rectangle is used with the bottom left corner set to the bottom
left corner of the specified viewport, and which is as large as possible.
The {\bf default setting} for the workstation transformation is
to map the whole unit square of NDC onto the largest square possible for the
workstation. For rectangular displays, this means that not all of
the display surface is used. Conversely, if the workstation
transformation is set so that all of a rectangular display is
used, then either: (1) the normalization transformation can not
preserve the aspect ratio, or: (2) not all of NDC space can be used.
\index{GKS routine!{\protect\tt GQDSP}}
\index{GKS routine!{\protect\tt GQMDS (see GQDSP)}}
To inquire the range of device coordinates corresponding to a particular
workstation type, it is possible to call the function GQDSP
(inQuire Display SPace). (Note: The 3D version is called GQDVOL.)
\begin{XMP}
CALL GQDSP (WTYPE, ERRIND, DCUNIT, RX, RY, LX, LY)
\end{XMP}
The routine returns the units in which the display surface
is measured (DCUNIT), and also the maximum x and y values
in each direction.
Some devices, for example hardcopy plotters, are measured in metres
(DCUNIT='GMETRE'), so the routine will return the actual size of the
device. Other devices, for example graphics terminals,
will be defined in raster or some other units (DCUNIT='GOTHU').
RX, RY are real device coordinates,
and LX, LY are in integer raster units.
As an example of how to use this routine, below is a fragment of code
which allows an application to draw on the whole of a rectangular
display surface with a 'Landscape' aspect ratio
(width~>~height).
See also the example in Appendix on Page~\pageref{sec:ex3dv}.
\begin{XMP}
C Inquire the Workstation Display Surface Size
CALL gqdsp (wtype, errind, dcunit, rx, ry, lx, ly)
C
C Set the Workstation Window and Viewport (assume x > y)
CALL gswkwn(wkid, 0.0, 1.0, 0.0, ry/rx)
CALL gswkvp(wkid, 0.0, rx, 0.0, ry)
C
C Make sure Workstation Transformation is used
CALL guwk (wkid, gperfo)
C
C Set Window and Viewport for Normalization Tfrm 'trn'
CALL gswn (trn, xmn, xmx, ymn, ymx)
CALL gsvp (trn, 0.0, 1.0, 0.0, ry/rx)
C
C Select this Normalization Tfrm for future primitives
CALL gselnt(trn)
\end{XMP}
\begin{figure}[h]
\caption{Normalization and Workstation Transformations}
\label{fig:trafos}
\end{figure}
\subsection{\protect\label{sec:nrmclp}Clipping}
\index{clipping}
\index{GKS routine!{\protect\tt GSCLIP}}
Clipping is the exclusion of those primitives, or parts of primitives,
which lie outside a specified window or viewport. That is to say, if
clipping to a particular window is enabled, then only those parts of
primitives which lie within the boundaries of the window will be
displayed. Both the normalization transformation viewport (in NDC space)
and the workstation window can be used to clip a picture.
Clipping at the boundary of the normalization transformation viewport
(clipping rectangle) effects the image displayed on ALL workstations.
However, clipping at the boundary of the
workstation window only affects the image on the display surface
belonging to that particular workstation.
This is illustrated in Workstation Viewport 1 in \ref{fig:windows}.
The application can control whether clipping at the boundary of the
clipping rectangle is enabled or not by calling the routine GSCLIP(VAL).
This routine takes the value 'GNCLIP' (no clip) or 'GCLIP' (clip),
and the default is to clip.
Clipping at the workstation window may not be switched off.
\section{\protect\label{sec:seghdr}Segments}
\index{segments}
GKS provides a way to collect and store together the primitives that
make up all or part of a picture. Such a collection of primitives is
called a {\it segment}, has a unique name, and may be manipulated
as a unit in various ways.
Only one segment can be open at a time, and once a segment has
been closed further output primitives cannot be added to it,
nor can the primitives in it be modified.
\index{non-retained data}
Any {\it non-retained} data output whilst there is no open
segment will be lost if the screen is cleared for some reason.
There are several circumstances when this may be useful.
For example, a message written on the screen may have only limited
validity, or one may wish to output a very complicated image with
too much data to be stored, or which needs only to be displayed once.
However, much of the power of GKS comes from the ability to
interactively modify the image {\it without} always having
to regenerate the complete picture.
To make use of this feature, primitives must be stored in segments.
\subsection{Segment Storage}
\index{segment!storage}
\index{WDSS}
\index{WISS}
Every workstation in a GKS output level 1 or 2 implementation has
associated with it a {\it Workstation Dependent Segment Store}
(WDSS). At the time a segment is created, it is stored in the WDSS of all
open and activated workstations.
The WDSS may be physically located in the Graphics Terminal,
in which case there can be a substantial improvement in performance.
Of course, care must be taken not to exceed the available memory space.
In addition to WDSS, GKS output level 2 implementations
have also a {\it Workstation Independent Segment Store} (WISS).
WISS is a mechanism for storing segments in a
workstation-independent way, and allows segments to be re-used
and manipulated on different workstations at different times.
WISS is treated like any another workstation, just like a terminal
for example, and if it is open and activated when a segment is created,
then that segment will be stored there
as well as on the WDSSs of the other active workstations.
For example, one might store an object in WISS at the same time
as displaying it on a terminal. Then, at a later time, it would
be possible to make a hardcopy of the object by extracting the segment(s)
in which it was stored from the WISS and copying it (them) to a plotter.
A transformation or segment deletion affects the segment wherever it is stored.
Before primitives are stored in WDSS or WISS they first undergo
the normalization transformation currently in force.
Also, when a segment is created, the current clipping rectangle
together with the clipping flags, are stored in the segment,
but clipping is not performed
on the primitives when they are stored, but only when the
contents of the segment are output to a device.
\subsection{Segment Creation, Deletion, and Re-naming}
To use GKS segmentation, first a segment must be created by calling:
\index{GKS routine!{\protect\tt GCRSG}}
\begin{XMP}
CALL GCRSG(SGNA)
\end{XMP}
where SGNA is an integer segment name. A segment called SGNA will
be created on all active workstations, including WISS, and will remain
open and store all primitives and attributes which are output until one calls:
\index{GKS routine!{\protect\tt GCLSG}}
\begin{XMP}
CALL GCLSG
\end{XMP}
Only a single segment may be open at one time.
Segments may be renamed, or deleted on a particular workstation
or all workstations by the following calls:
\index{GKS routine!{\protect\tt GRENSG}}
\index{GKS routine!{\protect\tt GDSGWK}}
\index{GKS routine!{\protect\tt GDSG}}
\begin{XMP}
CALL GRENSG(SGNOLD, SGNNEW)
CALL GDSGWK(WKID, SGNA)
CALL GDSG(SGNA)
\end{XMP}
Once a segment has been deleted the same segment name may be re-used.
Note that the Clear Workstation function deletes all the segments
stored in WDSS on that workstation.
The following fragment of code shows the use of segments.
See also the example in Appendix on Page~\pageref{sec:ex3dv}.
\begin{XMP}
---------
---------
CALL gcrsg(1) ! Create segment 1
CALL objone ! Draw an object
CALL gclsg ! close segment
CALL gcrsg(2) ! Create segment 2
CALL objtwo ! Draw an object
CALL gclsg ! Close segment
---------
---------
---------
CALL guwk(wkid, gperfo) ! Update Workstation
---------
---------
\end{XMP}
\subsection{\protect\label{sec:segnam}Segment Names}
\index{segment!names}
As indicated above, the FORTRAN binding requires segment names to be
integers, and these may be restricted by an implementation to a particular
range of values (1-32763 for GKSGRAL). However, because segment names
are specified by integers does not imply that an implementation has to
make available as many segments as there are names, and the maximum
number of available segments is also an implementation-dependent feature
which may be much smaller than the maximum legal name.
(Thus, one can not assume that an implementation stores name {\it N}
as the {\it N}'th element of an array.)
GKS does not provide inquiry functions to obtain either of these maximum
values, so one must consult the relevant documentation.
The GKS standard provides a function, GQSGUS, to inQuire the set of SeGment
names in USe. However, as it is difficult to work with variable length
sets in FORTRAN, the FORTRAN binding has modified this function to allow
only the inquiry of the name of one element of the set at a time:
\begin{XMP}
CALL GQSGUS(N, ERRIND, NSG, SGNA)
\end{XMP}
where
\begin{DLtt}{123456}
\item[N]is the N'th segment in the set (an input parameter)
\item[ERRIND]is an error number
\item[NSG]is the total number of segments in use
\item[SGNA]is the name of the N'th segment
\end{DLtt}
Thus, in FORTRAN, it is necessary to call GQSGUS in a loop from 1 to NSG
in order to obtain a list of all the segment names in use.
Note that the function:
\begin{XMP}
CALL GQSGWK(WKID, N, ERRIND, NSG, SGNA)
\end{XMP}
works in a similar way to GQSGUS but provides the list of segment names
associated with workstation WKID.
\subsection{\protect\label{sec:segasi}Segment Association, Copying, and Insertion,}
\index{segment!association}
\index{segment!copying}
\index{segment!insertion}
To view the contents of segments stored in WISS
it is necessary to transfer them to a real device.
This can be accomplished in three ways:
\begin{DLtt}{123456}
\item[Association]
Associate SeGment to WorKstation retrieves a segment stored in WISS
and sends it to the designated workstation as if this workstation had been
activated when the segment was first created.
It is thus stored in the WDSS of the designated workstation.
Nothing may be added to the segment.
There must be no open segment when this function is called.
\index{GKS routine!{\protect\tt GASGWK}}
\begin{XMP}
CALL GASGWK(WKID, SGNA)
\end{XMP}
\item[Copying]
Copy SeGment to WorKstation transforms the {\it contents}
of the designated WISS segment using the segment transformation
(described below), and then sends the result to the workstation specified.
The clipping volume of the copied segment is also sent to the
workstation. Note that the copied segment itself is not transferred;
its contents are just drawn on the output device {\bf without being
stored in WDSS}.
Thus, there must be no open segment when this function is called.
\index{GKS routine!{\protect\tt GCSGWK}}
\begin{XMP}
CALL GCSGWK(WKID, SGNA)
\end{XMP}
\item[Insertion]
INsert SeGment transforms the {\it contents} of the designated WIS
segment using both the transformation stored in the segment header
{\bf and} then the matrix provided in the call (MTX),
before inserting the results into the output stream as if
they had come directly from the application.
Thus, the output of Insert Segment goes to all activated workstations,
including the WISS, and will be stored in a segment if one is open.
All clipping rectangles and indicators, etc. are ignored.
The transformation matrix, MTX, may be produced using the routine
GEVTM as described below.
\index{GKS routine!{\protect\tt GINSG}}
\begin{XMP}
CALL GINSG(SGNA, MTX)
\end{XMP}
\end{DLtt}
The flow of graphics data between WISS, WDSS and the application
is shown in \ref{fig:dataflo}.
\begin{figure}[h]
\caption{Data Flow for GKS}
\label{fig:dataflo}
\end{figure}
\subsection{Segment Attributes}
\index{segment!attributes}
The appearance of segments depends on the following segment attributes:
\begin{UL}
\item segment transformation
\item visibility
\item highlighting
\item segment priority
\item detectability
\index{transformation!segment}
\end{UL}
Segment attributes may be modified after the segment has been closed,
and the changes will become visible on all active workstations
on which the segment has been stored. However, the timing of when these
changes take place is workstation-dependent, and may also be
affected by the deferral mode which is in force
(see sections on Page~\pageref{sec:defsta} and on Page~\pageref{grsgwk}).
This is because some workstations may require picture regeneration
to produce the new attribute state on the display.
\subsubsection{\protect\label{sec:segtfm}Segment Transformations}
\index{segment!transformation}
A {\it segment transformation} is a transformation of all the
coordinates within a segment and is performed by a 2~x~3
matrix stored in the segment header. It maps from NDC to NDC. As an
example of the use of segment transformations, consider a circuit
design application which has symbols for transistors, capacitors,
resistors, etc. Such an application would store each symbol in a
separate segment, and then call INsert SeGement specifying a
transformation matrix in order to duplicate a particular symbol at the
positions and orientations required.
When a segment is created GKS sets a default null transformation
which leaves the original coordinates unchanged.
Before setting a transformation it is necessary
to evaluate the transformation matrix by either using one's own
algorithm, or by using the routines:
\index{GKS routine!{\protect\tt GEVTM}}
\index{GKS routine!{\protect\tt GACTM}}
\begin{XMP}
CALL GEVTM(X0, Y0, DX, DY, PHI, FX, FY, SW, MXOUT)
and/or
CALL GACTM(MXIN, X0, Y0, DX, DY, PHI, FX, FY, SW, MXOUT)
\end{XMP}
GEVTM evaluates a matrix (MXOUT), whilst GACTM accumulates changes to
an existing matrix (MXIN).
Both routines require the definition of:
\begin{DLtt}{123456}
\item[X0, Y0]
(real) a fixed reference point about which 2D rotations take place.
\item[DX, DY]
(real) a translation (or shift) vector.
\item[PHI]
(real) an angle of rotation about X0, Y0.
\item[FX, FY]
(real) X and Y scale factors.
\item[SW]
(enumerated) a switch specifying whether the reference point and
shift vector are given in World Coordinates
or Normalized Device Coordinates ('GWC' or 'GNDC').
\end{DLtt}
The transformation is composed in the order: scale, rotate, shift.
In the case of GACTM, the matrix MXIN is pre-concatenated with that
formed from the scale, rotate, and shift parameters, so
MXOUT~=~SHIFT~*~ROTATE~*~SCALE~*~MXIN.
Once the transformation matrix has been evaluated, it may then be
Set in the SeGmenT by calling the routine:
\index{GKS routine!{\protect\tt GSSGT}}
\begin{XMP}
CALL GSSGT(SGNA, MTX)
\end{XMP}
An example of using a segment transformation may be
found in Appendix on Page~\pageref{sec:ex3dv}.
\subsubsection{Visibility}
\index{segment!visibility}
Segment {\it VISibility} (GSVIS) determines whether or not the
segment is displayed; the default is for the segment to be visible.
The values are 'GINVIS' or 'GVISI'.
As an example of its use, messages or icons could be created in segments
which would normally be invisible, but which could be made visible
at the appropriate time by the application program. The call is:
\index{GKS routine!{\protect\tt GSVIS}}
\begin{XMP}
CALL GSVIS(SGNA, GVISI)
\end{XMP}
Note that if a segment is invisible it is not detectable
(cannot be picked), even if detectability is enabled.
Also, even if a segment has visibility enabled, it may not actually be
drawn if the deferral state is set to something other than 'GASAP'
(see section on Page~\pageref{sec:defsta}).
\subsubsection{Highlighting}
\index{segment!highlighting}
Many display systems have some means for {\it highlighting}
graphical output, eg by making it brighter, or by causing it to blink.
The implementation of this attribute is dependent on the device
being used. Note that in order for a Segment to be HighLIghTed (GSHLIT),
it must first be made visible. The default is not highlighted.
The possible values are 'GNORML' or 'GHILIT'. The call is:
\index{GKS routine!{\protect\tt GSHLIT}}
\begin{XMP}
CALL GSHLIT(SGNA, GHILIT)
\end{XMP}
\subsubsection{Priority}
\index{segment!priority}
SeGment {\it Priority}, a real number in the range 0.0 to 1.0,
enables the control of the order in which segments are re-drawn when
the picture has been changed.
It also controls the order in which segments are picked.
If parts of segments overlap, then the segment with the highest
priority will be picked first and displayed on top of all the others.
If two segments with the same priority occupy the same area of the display
surface, then the last one will be drawn on top.
The default priority is 0.0. The call is:
\index{GKS routine!{\protect\tt GSSGP}}
\begin{XMP}
CALL GSSGP(SGNA, PRIOR)
\end{XMP}
The number of segment priority levels available (NSGP) for a particular
workstation, WTYPE, may be inquired using:
\index{GKS routine!{\protect\tt GQSGP}}
\begin{XMP}
CALL GQSGP(WTYPE, ERRIND, NSGP)
\end{XMP}
\subsubsection{Detectability}
\index{segment!detectability}
Segment {\it DeTECtability} controls which segments can be
picked by the operator using a Logical Pick Device, and which ones cannot.
Values are 'GUNDET' or 'GDETEC'.
If a segment is detectable, it can be picked.
The default setting is un-detectable. The call is:
\index{GKS routine!{\protect\tt GSDTEC}}
\begin{XMP}
CALL GSDTEC(SGNA, GDETEC)
\end{XMP}
Note that if a segment is invisible it is not detectable
(cannot be picked), even if detectability is enabled.
\subsection{The Pick Identifier}
\index{pick input}
\index{pick identifier}
It is possible to attach an integer {\it Pick Identifier} to primitives
which are stored in segments using the routine:
\index{GKS routine!{\protect\tt GSPKID}}
\begin{XMP}
CALL GSPKID(PCID)
\end{XMP}
This inserts one or more Pick Identifiers (PCIDs) into a
segment, and these are associated with the subsequent primitives.
If one of these primitives is picked sometime later using a Logical Pick
input device (see on Page~\pageref{sec:inphdr}), then one of the
pieces of information returned to the application is its PCID.
As the application is free to choose the value of the Pick Identifier,
this provides a second level of naming,
and provides a mechanism to refer back to an application database.
For example, if the identifiers 1 to 4 were attached to the four wheels
of a car, then picking wheel number three would return the identifier
'3' to the application program.
\subsection{\protect\label{sec:grsgwk}Segment Re-drawing}
\index{segment!re-drawing}
It is possible to force all segments within the WDSS on a particular
workstation to be re-drawn by calling the routine
Re-draw SeGments on WorKstation:
\index{GKS routine!{\protect\tt GRSGWK}}
\begin{XMP}
CALL GRSGWK(WKID)
\end{XMP}
\index{non-retained data}
The routine clears the screen, performs all deferred actions,
and re-draws all segments. All non-retained data is lost.
Possible reasons to re-draw all segments are:
\begin{OL}
\item if a segment had been over-written or deleted and it is
desired to regenerate a clean image;
\item if one wishes to remove all non-retained data,
system messages, etc;
\item if, on this workstation, image regeneration is required in order to
display the effect of modifying a segment attribute, and implicit
regeneration is switched off.
\end{OL}
The action of this routine differs from that of
Update WorKstation (GUWK), which may or may not re-draw segments
which have not been changed.
\section{Graphical Input}
\subsection{\protect\label{sec:inphdr}Logical Input Devices}
\index{input}
\index{logical input devices}
\index{input!classes}
\index{input!device numbers}
GKS organizes data that can be input to an applications program
into six types, each related to a {\it Logical Input Device}.
The actual physical input devices are mapped onto these logical
devices, which makes it possible for GKS to organize the different
forms of data in a device-independent way, and thus helps to make the
code more portable.
A logical input device is identified by 3 items:
\begin{OL}
\item a workstation identifier
\item an input class
\item a device number
\end{OL}
The six input classes and the logical input values they provide are:
\begin{DLtt}{123456}
\item[LOCATOR]
\index{locator input}
\index{transformation!locator input}
\index{viewport}
Returns a position (an x,y value) in World Coordinates
and a Normalization Transformation number corresponding to that
used to map back from Normalized Device Coordinates to World Coordinates.
The NT used corresponds to that viewport with the highest
{\it Viewport Input Priority} (set by calling GSVPIP).
{\bf Warning:} {\it If there is no viewport input priority set then
NT 0 is used as default, in which case the coordinates are
returned in NDC.} This may not be what is expected!
\index{GKS routine!{\protect\tt GSVPIP}}
\begin{XMP}
CALL GSVPIP(TNR, RTNR, RELPRI)
\end{XMP}
\begin{DLtt}{123456}
\item[TNR]Transformation Number
\item[RTNR]Reference Transformation Number
\item[RELPRI]
One of the values 'GHIGHR' or 'GLOWER' defined in the Include File,
ENUM.INC, which is listed in the Appendix on Page~\pageref{sec:hdenum}.
\end{DLtt}
\item[STROKE]
\index{stroke input}
Returns a sequence of (x,y) points in World Coordinates
and a Normalization Transformation as for the Locator.
\item[VALUATOR]
\index{valuator input}
Returns a real value, for example, to control some sort
of analogue device.
\item[CHOICE]
\index{choice input}
Returns a non-negative integer which represents a choice from a
selection of several possibilities. This could be implemented as a
menu, for example.
\item[STRING]
\index{string input}
Returns a string of characters from the keyboard.
\item[PICK]
\index{pick input}
Returns a segment name and a pick identifier of an object pointed
at by the user. Thus, the application does not have
to use the locator to return a position, and then try to find out
to which object the position corresponds.
\end{DLtt}
\subsection{Prompt/Echo Types and Triggers}
\index{prompt/echo type}
\index{trigger}
\index{input!prompt/echo type}
\index{input!trigger}
A {\it Prompt} and an {\it Echo} type is defined for
each logical input device.
For example, enabling the pick device might
prompt with a cursor of a particular shape which would track
the tablet or mouse to constitute an echo.
Acceptance of a {\it trigger} by the application,
hitting a key, for example, causes feedback
via an {\it acknowledgment} process.
Thus, the picked object could be made to blink.
Different prompt/echo types can be set when initializing the device.
They are implementation and workstation-dependent,
so see the relevant reference manual for details.
\subsection{Input Modes}
\index{input!modes}
\index{input!request}
\index{input!sample}
\index{input!event}
\index{request input}
\index{sample input}
\index{event input}
Logical input devices can be operated in three modes:
{\it Request}, {\it Sample}, and {\it Event}
('GREQU', 'GSAMPL', 'GEVENT').
In Request mode the application enables a device and then waits
for the user to trigger input, perhaps by pushing a key.
This is similar to issuing a FORTRAN {\it READ}.
In Sample mode the application can {\it measure}
the current value of the device, for example the locator position,
without waiting for a trigger. Finally, in Event mode, the application
can enable a set of several logical devices simultaneously.
Output measures from devices which have been triggered will be
placed in an {\it Event Queue}, from whence they can be
extracted by the application.
Note that in GKS input level 'b' only Request mode input is available,
and that it is not possible to request GKS input
and issue a FORTRAN I/O operation at the same time.
(However, although not required by the standard, the GKSGRAL level 'b'
implementation provides Sample input for some devices.)
\subsection{Request Input Functions}
\index{GKS routine!{\protect\tt GRQCH}}
\index{GKS routine!{\protect\tt GRQLC}}
\index{GKS routine!{\protect\tt GRQST}}
\index{GKS routine!{\protect\tt GRQSK}}
\index{GKS routine!{\protect\tt GRQVL}}
\index{GKS routine!{\protect\tt GRQPK}}
To request input one of the following calls needs to be made.
\begin{DLtt}{123456}
\item[Locator]CALL GRQLC(WKID, LCDNR, STAT, TNR, PX, PY)
\begin{DLtt}{123456}
\item[WKID](I) Workstation identifier
\item[LCDNR](I) The number of the logical locator device (usually 1).
\item[STAT](O) Error status (integer)
\item[TNR](O) The Normalization Transformation number used to
convert the input position to World Coordinates.
\item[PX,PY](O) The returned coordinates in WC.
\end{DLtt}
\item[String]CALL GRQST(WKID, STDNR, STAT, LOSTR, STR)
\begin{DLtt}{123456}
\item[WKID](I) Workstation identifier
\item[STDNR](I) The number of the logical string device (usually 1).
\item[STAT](O) Error status (integer)
\item[LOSTR](O) string length
\item[STR](O) The returned character string
\end{DLtt}
\item[Stroke]CALL GRQSK(WKID, STDNR, N, STAT, TNR, NPX, PX, PY)
\begin{DLtt}{123456}
\item[WKID](I) Workstation identifier
\item[STDNR](I) The number of the logical stroke device (usually 1).
\item[N](I) Maximum number of points (size of point arrays).
\item[STAT](O) Error status (integer)
\item[TNR](O) The Normalization Transformation number used to
convert the input position to World Coordinates.
\item[NPX](O) Number of points returned.
\item[PX,PY](O) The returned coordinate arrays (WC).
\end{DLtt}
\item[Valuator]CALL GRQVL(WKID, VLDNR, STAT, VAL)
\begin{DLtt}{123456}
\item[WKID](I) Workstation identifier
\item[VLDNR](I) The number of the logical valuator device (usually 1).
\item[STAT](O) Error status (integer)
\item[VAL](O) The returned value (real number).
\end{DLtt}
\item[Choice]CALL GRQCH(WKID, CHDNR, STAT, CHNR)
\begin{DLtt}{123456}
\item[WKID](I) Workstation identifier
\item[CHDNR](I) The number of the logical choice device (usually 1).
\item[STAT](O) Error status (integer)
\item[CHNR](O) The returned choice number (integer).
\end{DLtt}
\item[Pick]CALL GRQPK(WKID, PCDNR, STAT, SGNA, PCID)
\begin{DLtt}{123456}
\item[WKID](I) Workstation identifier.
\item[PCDNR](I) The number of the logical pick device (usually 1).
\item[STAT](O) Error status (integer)
\item[SGNA](O) Picked segment name.
\item[PCID](O) Pick identifier (integer set by GSPKID).
\end{DLtt}
\end{DLtt}
\subsection{Initializing Logical Input Devices}
\index{initializing logical input devices}
For each of the input classes there is an initialization function which
can change the values set as default by GKS for use in Request Mode
(and which should be called after the mode has been set).
The function calls are:
\index{GKS routine!{\protect\tt GINCH}}
\index{GKS routine!{\protect\tt GINLC}}
\index{GKS routine!{\protect\tt GINST}}
\index{GKS routine!{\protect\tt GINSK}}
\index{GKS routine!{\protect\tt GINVL}}
\index{GKS routine!{\protect\tt GINPK}}
\begin{DLtt}{123456}
\item[Initialize locator]CALL GINLC(...)
\item[Initialize string]CALL GINST(...)
\item[Initialize stroke]CALL GINSK(...)
\item[Initialize valuator]CALL GINVL(...)
\item[Initialize choice]CALL GINCH(...)
\item[Initialize pick]CALL GINPK(...)
\end{DLtt}
\index{echo area}
\index{normal!transformation}
\index{transformation!input initialization}
For all the input classes the echo area can be changed
(i.e. the portion of the display surface where the prompt appears and
which can accept input from the operator).
For Locator and Stroke input the initialization function sets the
initial normalization transformation and the initial locator position.
The initialization functions also select the required prompt/echo type.
A detailed description of the initialization functions is outside the scope
of this Primer, so those readers who wish to dig somewhat deeper should
consult the reference manual for the GKS implementation being used.
An example using GKSGRAL may be found in
Appendix on Page~\pageref{sec:iinput}).
\subsection{Input Inquiry Functions}
\index{input!inquiry functions}
There are two types of inquiry functions for GKS input. The first
is {\it Inquire Device State}, and the information is obtained by
calling GQCHS, GQLCS, etc.
The second is {\it Inquire Default Device Data} and the
information is obtained by GQDVL, GQDST etc. There is also a function
GQLI which can inquire the number of available logical input devices.
A detailed description of these functions is outside the scope of this
Primer, and so for more information the reader is referred to the
GKSGRAL manual or one of the texts in the bibliography.
\chapter{GKS Metafiles}
\index{metafiles}
GKS provides metafiles for the storage of graphical information.
Their principal uses are:
\begin{OL}
\item transporting graphical information between computer systems
\item transporting graphical information from one site to another
(by magnetic tape for example)
\item device spooling, e.g. for a plotter
\end{OL}
\index{Appendix E metafile}
There is no official ISO standard for writing a GKS metafile.
However, in Appendix E of the ISO GKS Functional Description
document a metafile format is described, and its use is recommended.
A GKS metafile created using this format is
known as an Appendix E metafile.
Unfortunately, not all implementations follow the Appendix E format,
and so metafiles created by different GKS packages may be incompatible.
In fact, even different examples of Appendix E metafiles may be
incompatible due to variations in the file record lengths, etc.
\section{Writing Metafiles}
A GKS metafile is produced by a standard GKS output-only workstation.
The workstation must first be 'opened' (GOPWK), then 'activated' (GACWK),
and all graphical information following these calls is recorded on the
metafile as a series of items (listed in an appendix of the
{\it GKS/GKS-3D Primer})
until a 'deactivate workstation' is encountered (GDAWK).
Hence the application must control it in the same way as a terminal
or a plotter. Clearly, some of the workstation inquiry functions,
such as Inquire Text Extent, can not be used because this knowledge
depends on the device(s) ultimately chosen to interpret the metafile.
Thus, a GKS metafile does not record a complete 'picture' so much
as all the actions performed to make such a picture. If the application
deletes segments and then draws different ones, all this will be recorded
on the metafile if it is active.
This last point has a number of ramifications. Because the metafile
{\it captures} all the output generated whilst it is active,
it could be used to record a dynamic sequence, like a cartoon,
as long as it is re-interpreted onto a dynamic display system.
However, if the object of the exercise is to interpret the metafile onto
a hardcopy device, it is not clear what the interpreter would be expected
to do with, for example, a Delete Segment operation!
Thus, for this kind of use, the metafile workstation should only be
activated when a finished picture is ready to be output, perhaps by
copying the image from WISS. A classic mistake is to leave the metafile
workstation activated whilst one works interactively on a terminal,
and then crash when all the available disc space is used up.
\index{GKS routine!{\protect\tt GOPWK}}
\index{GKS routine!{\protect\tt GACWK}}
\index{GKS routine!{\protect\tt GDAWK}}
\index{GKS routine!{\protect\tt GCLWK}}
\index{conid}
\index{connection identifier}
To open an Appendix E metafile workstation the call is:
\begin{XMP}
CALL GOPWK(WKID, CONID, WTYPE)
\end{XMP}
where the parameter WTYPE specifies the metafile workstation which is
defined in the documentation for the GKS implementation in use
(see Appendix on Page~\pageref{sec:gtstyp} for the GKSGRAL values).
\index{VAX!metafile open}
\index{IBM!metafile open}
The metafile will be written to the logical unit number corresponding
to the connection identifier (conid) in the GOPWK call
(see section on Page~\pageref{sec:conref} for legal values).
On VM/CMS, a conid of XX with no OPEN or FILEDEF statements would result
in a metafile name 'FILE~FTXXF001'.
A convenient way to declare FILEDEFs from a FORTRAN program is to use
the CERN library routine VMCMS (code Z305).
However, Version 2.3 of the VM/CMS FORTRAN compiler provides the
possibility to specify the file name in the OPEN statement,
and this is now the recommended mechanism. A '/' is required
before the filename:
\begin{XMP}
OPEN(UNIT=CONID, FILE='/filename filetype filemode', STATUS='NEW')
\end{XMP}
On VAX/VMS the metafile can be written to a file which should be opened
prior to the call to GOPWK. If there is no OPEN statement, the metafile will
be written to FOR0XX.DAT, where XX is the specified conid, unless
FOR0XX is defined as a logical name.
Under VMS, the OPEN statement should read:
\begin{XMP}
OPEN(UNIT=CONID, FILE='filename.ext', STATUS='NEW')
\end{XMP}
On APOLLO the OPEN statement is mandatory with a format as above.
On the CRAY the OPEN statement is optional. If it is missing, then
the file will be given the name 'fort.N' where N is a number from 1 to
99 corresponding to the connection id. Note that unlike on VMS,
a value of N from 1 to 9 has no leading 0.
If a file name has not been defined via the open statement, then one can
be assigned using the command 'assign~-a~myname~fort.N'.
\section{Shipping Metafiles}
One of the fundamental uses of a metafile is that it can
be stored in a device independent manner and transferred to
other hosts and other sites where it can be subsequently interpreted.
Metafiles at CERN are normal text files written in 80 character records,
which makes transfers between host computers straightforward.
However, it should be mentioned that for transfers from VAX/VMS to VM/CMS
using NFT, the qualifier '/CR' is essential if the file has not been
opened using the parameter CARRIAGECONTROL='LIST'.
The REXX exec below gives an example of an appropriate metafile transfer
using interlink:
\begin{XMP}
/* Interlink metafile transfer */
'exec nft receive vxcern::disk$gg:[userid]gts.met gts metafile a/cr'
\end{XMP}
\section{GRVIEW and the Interpretation of Metafiles}
\index{metafile!interpreter}
\index{interpreting metafiles}
\index{GRVIEW}
\index{editor}
To use the graphical information stored in the metafile it must be
interpreted so that the original picture can be re-created on whatever
device the application requests.
GKS provides three functions in order to interpret metafiles,
and these may be called by applications which require to manipulate
the contents of a metafile explicitly:
\index{GKS routine!{\protect\tt GRDITM}}
\index{GKS routine!{\protect\tt GGTITM}}
\index{GKS routine!{\protect\tt GIITM}}
\begin{DLtt}{123456}
\item[GGTITM]get item from metafile
\item[GRDITM]read item from metafile
\item[GIITM]interpret item from metafile
\end{DLtt}
In order to view a GKS metafile on a terminal a program is available
called {\bf GRVIEW}.
This is an interactive program which allows the user to view and/or edit
the pictures (frames) in one or more metafiles.
The pictures may be viewed sequentially,
or it is possible to skip frames to get to a particular picture.
If the frames have been named using the {\bf GCNAME} function,
then it is possible to search for pictures by name, and also to make
a directory list of all pictures on the metafile.
\index{PostScript}
After viewing a picture the user has the possibility to write it out
onto a GKS metafile, or in various other formats, including PostScript
and Tektronix 4014 escape codes.
This allows sub-sets of the pictures on the original file to be produced.
There is also a feature provided which allows the position and scale of the
Workstation Viewport to be re-set. Thus, the size of the output picture can
be changed to suit better a particular device.
Without being quite so sophisticated, GRVIEW attempts to provide the sort
of capabilities available on commercial products such as {\it MACDRAW},
except that GRVIEW runs on VAX, APOLLO, and IBM computers with any GKS
supported terminal. Having said this, it should be clear that the
'user friendliness' of an editor is much greater on a colour APOLLO
or VAX workstation, or on a Tektronix 4207, than on a machine with only
the capabilities of a Tektronix 4014.
GRVIEW operates in one of three modes, which are selected on the command line:
\begin{OL}
\item VIEWING mode is the default. In this case GRVIEW is used to allow the
pictures on the metafile to be displayed on a graphics terminal.
In this pure viewing mode GRVIEW does not make use of any internal picture
storage and so will run more efficiently.
\item COPY mode also allows the input metafile to be displayed, but in
addition frames may be selected for copying onto an output file.
This may be another metafile, a PostScript file, or a file containing
Tektronix 4014 escape sequences. In copy mode the contents of each picture
may not be changed, but each picture may be named (if it is not already),
scaled in size, and several pictures may be packed onto a single page.
\item EDIT mode allows the pictures on an input metafile to be edited,
or for a completely new picture to be generated from scratch.
\end{OL}
Depending on the system in use, GRVIEW allows parameters to be provided
on the command line, or via an interactive dialogue. The program also
prompts the user to provide any missing information.
The user now HAS to define which terminal type is
being used because the diversity of incompatible terminals available does
not permit a useful default to be chosen.
There are also features which warn the user if too many GKS errors have
been produced (to avoid filling up the user's file space with messages),
and which allow VM/CMS users to regain control after a pre-set number of
graphics operations as VM has no simple interrupt facility.
\index{HELP}
\index{FIND}
More details of how GRVIEW operates may be found in reference
\cite{bib-grref}, or by typing {\bf HELP~GRVIEW} or
{\bf FIND~GRVIEW}.
\section{GRCONV and the Conversion of Metafiles}
\index{converting metafiles}
\index{GRCONV}
As described in section on Page~\pageref{sec:mhcpref}, metafiles may be used for
the production of hardcopy output via the command {\bf GRPLOT}.
However, there are output devices for which there may not be support within
GRPLOT, or it may be desired to combine pictures into documents existing
in a particular output format or Page Description Language.
For these and other reasons it is often useful to be able to transform
a metafile into a different format, and to accomplish this the utility
{\bf GRCONV} has been written. Note, however, that keeping the
original picture in the form of a metafile is the most flexible, as
this does not place any restrictions on its final use.
GRCONV converts the input metafile to a new format which is stored on one
or more output files. For example, GRCONV can produce as output
normal or Encapsulated PostScript files, files of Tektronix 4014%
\footnote{Will be installed on VM/CMS if there is a demand.}
escape sequences, or bit maps in IBM~3812 format.
The PostScript files may be printed at CERN using the {\bf XPRINT}
command, as well as being used as a mechanism for the transmission of pictures
for printing at external institutes.
Pictures encoded in Encapsulated PostScript format
(see section on Page~\pageref{sec:epsref})
also may be incorporated into documents produced by SGML, BookMaster,
or TeX text processing systems.
The Tektronix 4014 escape sequences may be used to drive some laser printers,
including the DEC LN03, which do not support PostScript.
GRCONV operates in one of three modes: {\bf Interactively}, in which it
prompts for input and allows a selection of pictures chosen by commands from
an alpha-numeric terminal to be converted; {\bf Batch}, in which all the
relevant parameters must be given on the command line and in which the whole
metafile is converted; and {\bf Remote-Batch} which is similar to the
last case but in which the command is executed on a remote machine.
Thus, whilst Interactive and Batch modes run locally, the
Remote-Batch mode allows the input metafile to be converted into a form
which may only be available on the remote host. For example, this feature
allows GRCONV to subsume the old GKSSGML by producing a set of IBM-3812
bitmap files on CERNVM.
\index{HELP}
\index{FIND}
More details of how GRCONV operates may be found in reference
\cite{bib-grref}, or by typing {\bf HELP~GRCONV} or
{\bf FIND~GRCONV}.
\chapter{\protect\label{sec:hcopy}Making Hardcopies}
\index{hardcopy}
It is possible to make hardcopy plots either interactively with GKS or via
a metafile. Some devices accept acetate sheets, which allow transparencies
to be produced for overhead projectors.
The GKS plotter or metafile workstations operate just like any other,
with an important exception. Namely, functions such as 'Delete Segment'
will not undraw what has been output on the paper!
Thus, when making a hardcopy, it is suggested that the picture to be plotted
is produced first interactively on a terminal screen and stored in
WISS (Workstation Independent Segment Storage). During this time
the plotter or metafile workstation should be deactivated.
When the picture is complete, and does not require further modification,
then the plotter or metafile workstation may be activated and the
WISS segments containing the picture sent to it by calling
Associate SeGment to WorKstation (GASGWK).
\section{Hardcopies via an Interactive Program}
Plotter output may be produced by an interactive program running on a
machine directly connected to a hardcopy device.
For example, GKSGRAL at CERN supports Hewlett Packard pen plotters,
VERSATEC electrostatic and thermal plotters, and any laser printer
\index{VERSATEC}
\index{PostScript}
driven via the PostScript interface. (See the latest version of the
include file {\it GTSDEV} for a complete up-to-date
list of supported devices.)
In the case of VERSATEC or PostScript,
an intermediate plot file is generally produced
which must be queued to the device.
However, use of an HP plotter normally assumes that the device is connected
directly in parallel with the terminal line, and it is activated by special
escape sequences sent by the driver, but it is also possible to produce
an HP plotter file by specifying a connection identifier of
conid~=~(100~+~n), where n is a small integer greater
than 1. This causes the output to be written to FORTRAN Logical Unit number n,
and the file has the name PLxxxx.PLT, where 'xxxx' indicates the
workstation type. Some laser printers do not support
PostScript but do, in fact, support the Tektronix 4014 protocol.
In this case it is possible to capture the 4014 escape codes
on a file which is later sent to the printer. In any case,
to capture the graphics on a file, first open a file on unit n,
and then open the corresponding GKS
workstation with a conid of (100~+~n).
Note that use of a hardcopy device via an interactive program has the
advantage that the application can inquire the specific device
characteristics, and thus tailor the output accordingly.
\section{\protect\label{sec:mhcpref}Hardcopies via a Metafile}
\index{GRPLOT}
\index{GKSVT}
\index{GKSCP}
\index{GKSX87}
\index{GKS3812}
\index{APA 6670}
\index{VERSATEC}
\index{XEROX}
\index{PostScript}
\index{IBM!metafile hardcopy}
\index{IBM!3812}
Using a metafile it is possible to make hardcopy plots on devices connected
to the central computing facilities. The devices currently supported
include:
\Lit{$==>$} To be updated ...
\begin{UL}
\item VERSATEC Model CE 3236E (colour with 36 inch roll paper, 400 dpi)
\item VERSATEC Versacolor Model VE2700 (white and transparent A4 cut sheet)
\item XEROX Model 4050 Laser Printer (A4 cut sheet)
\item IBM 3812 Laser Printers (A4 cut sheet)
\item PostScript printers, such as Apple Laser Writers, etc.
\end{UL}
It is not necessary to be logged on to one of the computer centre machines
to use these services. The command {\bf GRPLOT} may be used to output
metafiles on any of the central plotting devices from all centrally supported
machines at CERN connected via either the DECNET or TCP/IP protocols,
as the command first transfers the metafile over the network
to the correct destination.
The GRPLOT command has replaced the previous
collection of GKSVT, GKSCP, GKSX87, and GKS3812;
rather than having a separate command for each plotter, the output device is
provided as a parameter. The mechanism for naming output devices is the
same as that for the latest version of XPRINT.
\index{HELP}
\index{FIND}
More details of how GRPLOT operates may be found in reference
\cite{bib-grref}, or by typing {\bf HELP~GRPLOT} or
{\bf FIND~GRPLOT}.
\begin{note}
Postscript and VERSACOLOR colour plotter produces output on
A4 cut sheets. However, the paper and ink donor rolls are expensive.
Thus, users are asked to use the device only for the final
version of their plots, and not whilst debugging programs.
Plots can not be released automatically, and users have to release them by
hand using a terminal next to the device
which is installed in the user area on the ground floor of
building 513.
\end{note}
\section{\protect\label{sec:sgmlgra}Mixed Text and Graphics}
\Lit{$==>$} To be updated ...
\subsection{Via SGML/BookMaster}
\index{SGML}
\index{PostScript}
\index{BookMaster}
\index{GRCONV}
A version of the document markup language SGML layered on top of the
IBM BookMaster product \cite{CERNSGM1} is implemented on the central
VM/CMS service and supports the inclusion of pictures into compound
documents.
The syntax of SGML/BookMaster is not identical to that of the original
Waterloo/Script based SGML implementation available at CERN, but the procedure
is similar. Namely,
\begin{OL}
\item Use a graphics application program, including calls to GCNAME,
to produce a metafile.
\item Run GRCONV to produce the set of picture files.
\item Mark-up a document with SGML/BookMaster and associate the picture files
with particular figures within the document.
\item Run SGML/BookMaster on the document.
\item Edit the document as necessary and repeat previous step until
result is satisfactory.
\end{OL}
Note that it is not necessary to repeat the first two steps every time
SGML/BookMaster is used to re-process the document as long as the picture
files are stored on the user's mini-disk.
The GRCONV command can be used to generate the picture files,
but as it is possible to print the resulting document on either IBM-3812
compatible or PostScript printers the user must take care to select
the correct output format.
As it would be cumbersome to require every picture to be stored in
its own metafile and processed separately, GRCONV can handle metafiles
which contain several pictures (separated by 'CLEAR WORKSTATION' items),
and be instructed to produce a separate output file for each picture on the
metafile.
If the user is not working under VM/CMS%
\footnote{Care must be taken to ensure that there is no conflict
in having GRCONV write to a VM/CMS mini-disk which is accessed in
write mode by another process. The user should have a write password
on a mini-disk available for access by GRCONV in write-mode},
GRCONV first transfers the metafile to the user's VM account, and then
interprets it to produce the required set of output files.
Note that GRCONV replaces the command GKSSGML, and has a wider range of
features.
A complete job to print a PostScript picture is given below.
PostScript pictures can be scaled at will, but if a {\bf BoundingBox}
command is present in the Encapsulated PostScript file,
the WIDTH parameter of the {\bf ARTWORK} tag is redundant,
and the size specified by the BoundingBox or DEPTH inside the file will
be used. In order to avoid conflicts in the X and Y scaling,
users are advised not to specify both DEPTH {\it and} WIDTH.
\begin{XMP}
<!DOCTYPE USERDOC SYSTEM "USERDOC DTD *" [
<!-- Declare the Encapsulated PostScript file -->
<!ENTITY FIGURE1 SYSTEM "MYFIG1$S EPS" CDATA EPS>
]>
<USERDOC>
<PROLOG>
<DOCPROF>
</PROLOG>
<BODY>
<... some text
<ARTWORK NAME=FIGURE1 DEPTH=17CM ALIGN=CENTER>
\end{XMP}
The procedure for output onto an IBM-3812 compatible printer
(IBM-3812 or IBM-3816) is similar, but in this case
the graphics image is stored not in an Encapsulated PostScript
file but in a PSEG3820 image file which cannot be scaled.
\begin{XMP}
<!DOCTYPE USERDOC SYSTEM "USERDOC DTD *" [
<!-- Declare the PSEG file -->
<!ENTITY FIGURE1 SYSTEM "MYFIG1$S PSEG3820 *" NDATA PSEG>
]>
<USERDOC>
<PROLOG>
<DOCPROF>
</PROLOG>
<BODY>
\chapter{Title text}
<ARTWORK NAME=FIGURE1>
\end{XMP}
Note that the figure name {\it FIGURE1} specified by the {\bf NAME}
attribute of the {\bf ARTWORK} tag is defined at the beginning of the
file via the {\bf ENTITY} declaration. Thus, the actual file name
on the user's disk and the name of the figure in the text are decoupled;
one can change the figure used by changing the ENTITY declaration
and without modifying the body of the text.
There should be one picture file and one ENTITY declaration for each figure.
SGML/BookMaster does not place restrictions on the file names used,
but the original CERN implementation of SGML did. Thus, for backwards
compatability, GRCONV still generates file names conforming
to the old convention: the file name must contain
eight (8) characters terminating in an 'S'.
As described in section on Page~\pageref{sec:gcnref},
if the name supplied by the application
which calls GCNAME provides less than 7 characters, then GCNAME pads the name
out with \Lit{'$'}s. Hence \Lit{'MYFIG1'} becomes \Lit{'MYFIG1$S'}.
If GCNAME was not used by the application to write names for
each figure onto the metafile then GRCONV will generate names
for each picture file automatically. The algorithm used is described
in \cite{bib-grref}.
\subsection{Via Waterloo/Script SGML}
\Lit{$==>$} KEEP it ???
\index{SGML}
\index{GGRCONV}
The original version of the document markup language SGML \cite{bib-sgmlref}
implemented on the central VM/CMS service supports the inclusion of pictures.
However, note that this SGML implementation is being replaced
by a new one layered on top of the IBM BookMaster product (see above).
For each picture to be included when using the IBM-3812
output devices SGML requires two files;
one to define the amount of space to be reserved within the document,
and one containing the actual bit-map. These files must be accessible to
SGML on one of the users mini-disks.
As described in \cite{bib-sgmlref}, the files may originate from several
sources. This section will discuss how to use the {\bf GRCONV} command to
produce them from a GKS metafile and store them on the specified mini-disk.
The sequence of operations would be:
\begin{OL}
\item Use a graphics application program, including calls to GCNAME,
to produce a metafile.
\item Run GRCONV to produce the set of picture files and store them on
a user disk.
\item Mark-up a document with SGML and associate the picture files
with particular figures within the document.
\item Run SGML on the document.
\item Edit the document as necessary and repeat previous step until
result is satisfactory.
\end{OL}
Note that it is not necessary to repeat the first two steps every time
SGML is used to re-process the document so long as the picture files
are stored on the user's disk.
An example of the SGML syntax necessary to include a figure corresponding
to a pair of picture files would be:
\begin{XMP}
<FIG>
<PICTURE NAME=XXXXXXXS>
<FIGCAP>This is the Figure Caption
</FIG>
\end{XMP}
When SGML encounters the tag 'PICTURE' it reads a file
specified by the 'NAME' attribute.
The name {\bf must} consist of 8 characters ending with an 'S'.
The file type under VM/CMS is 'SGML'. (The name of the corresponding
bit-map file has the same first 7 characters, but the eighth is 'P'.)
The GRCONV command can be used to generate these two files.
In fact, as it would be cumbersome to require every picture to be stored in
its own metafile and processed separately,
GRCONV can handle metafiles which contain several pictures (separated by
'CLEAR WORKSTATION' items). It then produces one pair of output files
for each picture on the metafile.
If the user is not working under VM/CMS, GRCONV first transfers the metafile to
the user's VM account, and then interprets it to produce the required
set of output files. (Care must be taken to ensure that there is no conflict
in having GRCONV write to a VM/CMS mini-disk which is accessed in
write mode by another process. The user should have a write password
on a mini-disk available for access by GRCONV in write-mode.)
Note that GRCONV replaces the command GKSSGML,
and has a wider range of features.
\subsection{Via \LaTeX}
\index{latex@\LaTeX}
As the mechanism used to incorporate pictures into TeX documents depends
not on TeX itself, but on the printer driver used by TeX, no single
mechanism exists to achieve this. Thus, unfortunately, the procedure
varies and is not described here. For details, see \cite{bib-TAC}.
Assuming the hardcopy will be onto a device which supports PostScript
or an IBM-3812, then GRCONV should be used to produce an Encapsulated PostScript
or PSEG38PP/PSEG3820 file as for the SGML/BookMaster case above.
\subsection{\protect\label{sec:gcnref}Naming Pictures}
\Lit{$==>$} Keep ???
\index{GCNAME}
\index{GKS routine!{\protect\tt GCNAME}}
In order to include pictures within a document clearly it is necessary to
have some way in which to designate which picture goes where.
Thus, the pictures require naming in some way.
To accomplish this, a routine called GCNAME has been added to the
GKSGRAL and GKSGRAL-3d libraries.
(It is available also for DECGKS, and in source format in GKSPACK.)
GCNAME outputs a user item containing a name string onto the metafile
and, for example, this is used by GRCONV to generate the file names,
and by GRVIEW to allow the user to choose by name the picture to be edited.
GCNAME should be called to name a picture immediately after the previous
picture delimiter (Clear Workstation) and before any primitives or attributes
for the named frame have been written out. The call is:
\begin{XMP}
CALL GCNAME(WKID, NAME)
\end{XMP}
where WKID is the metafile workstation identifier,
and NAME is a FORTRAN variable or constant of type CHARACTER
and with a length of {\bf seven (7)} characters.
The seven characters must correspond to the 'XXXXXXX' preceeding the 'S'
used in the picture NAME tag used by SGML.
If less than 7 characters are used then GRCONV
will pad the name out with dollar signs (\Lit{'$'}).
Thus, if a picture was named 'FPROJ' by GCNAME, then the corresponding SGML
NAME tag would need to be be \Lit{'FPROJ$$S'}.
This convention is maintained for backwards compatability, although
it is not required by SGML/BookMaster.
Even though not required for other operating systems, in order
to remain compatible with the file naming scheme used by VM/CMS,
GCNAME restricts the character set allowed for GCNAME to upper-case alphabetic,
the digits 0-9, and the dollar sign (\Lit{'$'}).
Lower-case characters are automatically converted to upper-case.
This is also why the number of characters is limited.
In order to avoid getting spurious or empty picture files produced by
GRCONV, application code should issue the Clear Workstation call to
the metafile workstation only if a complete picture really has been written to
the file.
\subsection{\protect\label{sec:epsref}Encapsulated PostScript}
\index{Encapsulated PostScript}
\index{PostScript}
It is possible to use the GKS PostScript driver in order to produce output
files in the format {\bf Encapsulated Postscript}, which is used by
various text-processors (TeX, SGML, BookMaster, etc.) in order to combine
PostScript-encoded data originating from different sources.
Encapsulated Postscript format is similar to standard PostScript but
with some additional header information concerning the picture size.
The PostScript instruction "showpage" which is normally included
at the end of the file in order to tell the interpreter to print the
preceeding information is removed in the Encapsulated format.
This avoids any conflict with the contents of other PostScript files
into which the Encapsulated file may be embedded.
Thus, sending an Encapsulated PostScript file to a printer directly will
not produce any output.
It is possible to use GRCONV to convert a GKS metafile to Encapsulated
PostScript format by specifying the requisite qualifier ('EPSPM', ...).
If the input metafile contains several pictures it is also possible to
use the 'SPLIT' option in order to get one output file per picture.
If a PostScript file is to be produced directly by a GKS application
program then the following applies:
\begin{OL}
\item
Open a GKS PostScript Workstation. The Workstation Type is the same
for both normal and Encapsulated PostScript formats.
\item
As for all drivers, if the connection identifier is less than 100
(see on Page~\pageref{sec:conref}) then the PostScript instructions
are sent directly to the terminal.
(Useful if the terminal supports Display PostScript).
\item
If the connection identifier is greater than 100 but less than 200
(conid~=~100~+~n), then PostScript instructions will be
written to a file in normal PostScript format opened on unit number 'n'.
The file may include several pictures.
\item
If the connection identifier is greater than 200
(conid~=~200~+~n), then PostScript instructions will be
written to a file in Encapsulated PostScript format opened on unit number 'n'.
Only one picture should be included per file.
\end{OL}
\section{Use of Workstation Transformations to Change Picture Size}
\index{workstation transformation}
\index{picture size}
\index{size}
When using GKS from an application program it is not necessary to
specify a workstation transformation to set the size of the output picture.
In this case, the Normalization Viewport will automatically be made to map to
the largest possible square which fits onto the output device display surface.
This is often acceptable for an interactive graphics session.
However, when making a hardcopy image, especially if this
must fit within a document, then the actual size of the picture may
well be very important.
The GKS Workstation Transformation, which is described fully in
on Page~\pageref{sec:gkspr}, allows the actual area required
to be specified in metres.
A brief summary will be given here.
The {\it Workstation Transformation} defines which part of the
Normalized Device Coordinate (NDC) space will
be visible, and where it will appear on the display surface.
It is set by calling the functions Set WorKstation WiNdow and
Set WorKstation ViewPort:
\begin{XMP}
\index{GKS routine!{\protect\tt GSWKWN}}
\index{GKS routine!{\protect\tt GSWKVP}}
CALL GSWKWN(WKID, WXMIN, WXMAX, WYMIN, WYMAX)
CALL GSWKVP(WKID, VXMIN, VXMAX, VYMIN, VYMAX)
\end{XMP}
Set Workstation Window specifies in NDC coordinates the area of the NDC
space to be output to the device. Set Workstation Viewport specifies in
{\it Display Coordinates} (DC) where on the device the window will appear.
The aspect ratio for the workstation window and the workstation viewport
{\bf must always be the same}.
If they are not, then {\bf the specified transformation is ignored},
and the complete workstation window is displayed on the device
in the correct aspect ratio.
As an example, suppose that the picture occupies the area (wxmin,~wymin)
to (wxmax,~wymax) in World Coordinates. This may be mapped via the
Normalization Transformation to a Viewport (vxmin,~vymin) to
(vxmax,~vymax), and this does not have to preserve the aspect ratio.
This Viewport must now be output onto an A4 sheet so as to fill the width
of the page, assuming an aspect ratio with width~>~height.
\begin{note}
Although the width of an A4 page is 21cm, most output devices do not
permit the full width to be used. Consult the Workstation Description Table
for the device to find out what is the maximum size.
\end{note}
\begin{XMP}
C Set the Window and Viewport for Normalization Tfrm 'trn'
C and select this Normalization Tfrm for future primitives.
C
CALL gswn (trn, wxmin, wxmax, wymin, wymax)
CALL gsvp (trn, vxmin, vxmax, vymin, vymax)
CALL gselnt(trn)
C
C Set the Workstation Window and Workstation Viewport
C Note that 18.9 cm is the maximum width for PostScript Portrait Mode
C (The Workstation Window to map from the Normalization Viewport)
C
CALL gswkwn(wkid, vxmin, vxmax, vymin, vymax)
CALL gswkvp(wkid, 0.0, 0.189,
* 0.0, 0.189*(vymax-vymin)/(vxmax-vxmin))
\end{XMP}
\chapter{GKS-3D Primer}
\index{GKS-3D}
\begin{note}
Whilst in general the information in this Primer is independent of
a particular GKS implementation, this is not entirely the case for
GKS-3D. The reason is that the goal of the Primer is not simply
to describe the functionality of GKS, but to explain in detail
how it may be used via the FORTRAN language binding.
However, as the binding has not yet been completely finalised for
GKS-3D, this chapter explains its use in terms of a
particular implementation, namely GKSGRAL-3D Version 2.0.
The text of the Draft International Standard for the GKS-3D
FORTRAN binding was published at the end of 1988,
with voting to finish by May, 1989.
Discrepancies between the FORTRAN DIS and GKSGRAL-3D Version 2.0 are minor,
and are mentioned at the appropriate places within the chapter.
Only the setting of the viewing parameters and the aspect source
flags are affected, and the text will be revised after the final
voting on the standard is complete.
The changes between VSN 1.1 and VSN 2.0 of GKSGRAL-3D
are described in Appendix on Page~\pageref{sec:vsn2upd}.
\end{note}
\section{Introduction to GKS-3D}
GKS-3D is a pure super-set of GKS designed to handle 3D graphics in
a compatible way. That is to say, a 2D application written to
the GKS standard is guaranteed to run in a GKS-3D environment
without change. However, apart from the usual GKS functions,
GKS-3D provides additional ones to handle 3D primitives,
3D input, and 3D viewing.
As for GKS, the standard is in multiple parts with the language
bindings separate from functional specification.
The FORTRAN binding is defined in \cite{bib-gksftn3}.
All primitives within GKS-3D are deemed to be three dimensional.
Thus, although an application using GKS-3D may make only 2D function
calls, all the 2D primitives will be turned immediately into the
corresponding 3D versions inside the package by the addition of
a Z coordinate. This has several noticeable effects:
\begin{UL}
\item 2D function calls may be slightly slower than 3D ones (using the
same GKS-3D package), as a 2D call will add a Z coordinate
and then make the corresponding 3D function call.
\item 2D applications running on GKS-3D will take more storage space
in WDSS and WISS.
\item A 2D picture stored on a 3D metafile will require more space
than on a 2D metafile. Also, quite clearly, {\bf such a picture could
not be re-interpreted by reading the metafile into a GKS (2D)
implementation}.
\end{UL}
\section{The Drawing Primitives}
\index{GKS3D!drawing primitives}
\index{primitives!3D}
With one addition, GKS-3D supports the same GKS primitive types as
described in chapter on Page~\pageref{sec:dprim}.
Four of the functions are called in a similar way to that for GKS-2D,
but with the addition of a third coordinate:
\index{GKS routine!{\protect\tt GPL3}}
\index{GKS routine!{\protect\tt GPM3}}
\index{GKS routine!{\protect\tt GFA3}}
\index{GKS routine!{\protect\tt GGDP3}}
\begin{DLtt}{123456}
\item[Polyline 3D]CALL GPL3(N, PXA, PYA, PZA)
\item[Polymarker 3D]CALL GPM3(N, PXA, PYA, PZA)
\item[Fill Area 3D]CALL GFA3(N, PXA, PYA, PZA)
\item[GDP 3D]CALL GGDP3(N, PXA, PYA, PZA, PRIMID, IDR, DATREC)
\end{DLtt}
However, the 3D routines for Cell Array (GCA3) and Text (GTX3) have
calling sequences which are substantially changed.
GCA3 is not described here, and those brave enough to peruse
the standards document will see why. Concerning GTX3, it should be
emphasized that this function is only required if it is desired to place
text on a surface which is not parallel to the X-Y plane,
otherwise the 2D text function (GTX) is sufficient.
For example, one would need to use GTX3 to write 'MIGROS' on the
side of a truck drawn with some random orientation.
\index{text!3D}
\index{GKS routine!{\protect\tt GTX3}}
The function call for 3D Text is:
\begin{XMP}
CALL GTX3(PX, PY, PZ, TDX, TDY, TDZ, STR)
\end{XMP}
where the arguments are as follows:
\begin{DLtt}{123456}
\item[PX, PY, PZ]
Reference Point (start point of text string)
\item[TDX/Y/Z(2)]
Two Text Direction vectors (three real arrays of length 2).
\item[STR]
The character string to be output.
\end{DLtt}
For both GTX and GTX3 the character string, STR, is drawn in a plane.
The difference is that in the 3D case this {\it text plane} can be
oriented in space using two {\it text direction vectors}, U and V,
specified by TDX(1), TDY(1), TDZ(1), and TDX(2), TDY(2), TDZ(2).
The text is drawn in a plane perpendicular to the vector formed from
taking the vector product U~x~V,
and the origin of the local coordinate system in which the geometric
text attributes are measured is defined by the reference point, P.
The X axis of the local text coordinate system is parallel to the
first direction vector, U, and the Y axis is perpendicular
to this direction. Thus, the system reduces to the 2D case if
U and V are defined as (1,~0,~0) and (0,~1,~0).
An example program using 3D Text is given in Appendix on Page~\pageref{sec:ex3dv}.
The additional primitive, called {\it Fill Area Set}
(or {\it Fill Area Set 3}), generates a set of polygonal
areas which may be hollow or filled with a uniform colour,
pattern, or hatch style.
Thus, it allows the application to specify regions with holes,
or disjoint regions which must be treated as a single entity.
There are both 2D and 3D versions of Fill Area Set, and the
2D function will be added to the next revision of GKS.
Unlike the Fill Area primitive, Fill Area Set and Fill Area Set 3
have separate attributes, described below, to control the edge
style of the polygons.
The calls are:
\index{fill area set}
\index{GKS routine!{\protect\tt GFAS}}
\index{GKS routine!{\protect\tt GFAS3}}
\begin{XMP}
CALL GFAS(NPTR, PTR, PX, PY)
CALL GFAS3(NPTR, PTR, PX, PY, PZ)
\end{XMP}
where the arguments are as follows:
\begin{DLtt}{123456}
\item[NPTR]
(integer) Number of fill areas in set
\item[PTR(NPTR)]
(integer) Array of pointers to starting elements of lists in
PX(N), PY(N), PZ(N). Total number of coordinates is PTR(NPTR)-1.
\item[XA/YA/ZA(*)]
(real) List of points for all fill areas of the set.
\end{DLtt}
Thus, all the polygons are packed into the arrays, XA(N), YA(N), ZA(N),
and the start point of each polygon is indicated by the pointers in
the array PTR(NPTR).
\begin{note}
Although they are specified in a 3D space, the primitives Text,
Cell Array, Fill Area, and Fill Area Set are all {\bf coplanar}.
It is the responsibility of the application program to ensure that the
coordinates supplied fulfil this condition.
What happens if they are not coplanar is implementation-dependent!
\end{note}
\section{The Output Attributes}
\index{GKS3D!attributes}
\index{attributes!3D}
As for the primitives, GKS-3D uses almost the same output attributes as
GKS-2D (described in Chapter on Page~\pageref{sec:attrbs}), but with the
three following additions:
\begin{OL}
\item Extra attributes are required for the new Fill Area Set primitive.
\index{view index}
\index{GKS3D!view index}
\index{attributes!view index}
\item The {\it View Index} attribute.
This is analogous to the Normalization Transformation index attribute,
and specifies which viewing transformation is to be used to process
the primitive.
\item The {\it Hidden Line/Hidden Surface} (HLHSR) attribute specifies
which HLHSR algorithm should be used to process the primitive
(if HLHSR is supported by the workstation).
\end{OL}
The Fill Area Set primitive uses the same attributes as Fill Area to
control the interior of the polygons (see section on Page~\pageref{sec:attlst}),
plus the following which allow independent control of the edges:
\index{fill area set}
\index{GKS routine!{\protect\tt GSEDFG}}
\index{GKS routine!{\protect\tt GSEDT}}
\index{GKS routine!{\protect\tt GSEWSC}}
\index{GKS routine!{\protect\tt GSEDCI}}
\begin{DLtt}{123456}
\item[EDFLAG]
the Fill Area Set edge flag (ASF 1). Set by GSEDFG(EDFLAG).
EDFLAG may take the vales 'GON' or 'GOFF'.
\item[EDTYPE]
the Fill Area Set edge type (ASF 2). Set by GSEDT(EDTYPE).
EDTYPE is an integer value which is workstation-dependent.
\item[EDWSF]
the Fill Area Set edge width scale factor (ASF 3). Set by GSEWSC(EDWSF).
The value of EDWSF is a real number which modifies the width of the line
used to draw the edge of the Fill Area Set.
\item[EDCI]
the Fill Area Set edge colour index (ASF 4). Set by GSEDCI(EDCI).
\end{DLtt}
The numbers in brackets are, for the GTS-GRAL implementation,
the indices into the integer array ASFLST(4)
used to set the corresponding Attribute Source Flags by calling:
\index{GKS routine!{\protect\tt GASF3}}
\index{attribute source flags}
\begin{XMP}
CALL GASF3(ASFLST)
\end{XMP}
However, the DIS FORTRAN binding uses GASF3 to set all the
Attribute Source Flags in one go by defining the array ASFLST to be of
length 17, where elements 1 to 13 correspond to the 2D case and the last
four elements are those listed here.
Note that the Fill Area Set
primitive is rendered using two independent sets of attributes,
one for the interior, and one for the edge. This separate set of
edge attributes also has its own attribute bundle, selected by calling
GSEDI, plus a corresponding routine to Set the EDge Representation:
\index{GKS routine!{\protect\tt GSEDI}}
\index{GKS routine!{\protect\tt GSEDR}}
\begin{XMP}
GSEDI(EDI)
and
GSEDR(WKID, EDI, EDFLAG, EDTYPE, EDWSF, EDCI)
\end{XMP}
\section{Viewing in 3D}
\index{GKS3D!viewing}
\index{transformation!3D viewing}
\index{viewing pipeline}
Setting up the {\it Viewing Parameters} is undoubtedly the most
complicated part of any 3D graphics system. When primitives are output
to a workstation they (conceptually) pass through a series of processes
called the {\it Viewing Pipeline} before they finally reach the
display surface. This pipeline is briefly described below in order that
the reader is aware of the complete process (see \ref{fig:pipe}).
The transformations will then be covered in more detail.
\begin{OL}
\item The primitives are transformed by the {\it Normalization
Transformation} from World Coordinates (WC3) to Normalized
Device Coordinates (NDC3), which are always in the range [0.0, 1.0].
This transformation is composed of a translation and change of scale,
but no rotation. GKS-3D allows for the existence of many World
Coordinates systems, and their corresponding Normalization
Transformations are numbered from 0 upwards. Normalization
Transformation 0 always corresponds to the identity matrix.
Normalization in 3D is exactly analogous to the 2D case described
in section on Page~\pageref{sec:nortrn}.
\item Primitives which are stored in segments are also processed by the
{\it Segment Transformation} before proceeding to the next stage.
In the 3D case this requires a 3~x~4 matrix which is
described below. The segment transformation maps NDC3 to NDC3,
and includes scaling, rotation, and translation.
\item Having assembled the components in a unique NDC3 space,
primitives may next be clipped to a box to remove extraneous
information. This is called the {\it Normalization Clip}, and may be
switched on or off using the {\it Normalization Clip Flag}.
\item The primitives are now 'viewed' from some arbitrary direction.
The {\it View Orientation Transformation} performs a rotation only
to take Normalized Device Coordinates to View Reference Coordinates
(VRC3). The application is free to calculate the corresponding matrix
itself, or to use a utility routine which is described below.
\item The {\it View Mapping (Projection) Transformation} next takes
View Reference Coordinates to Normalized Projection Coordinates
(NPC3)
in order to provide parallel or perspective projection of the image.
As for the View Orientation Transformation, the application is free
to calculate the required matrix using its own algorithm, or to call a
utility function.
\begin{figure}[h]
\caption{The GKS-3D Viewing Pipeline}
\label{fig:pipe}
\end{figure}
\item At this point the {\it View Clip} takes place.
It is positioned at this stage in the pipeline so that the clip box may be
defined as a rectangular parallelepiped with its sides parallel to
the axes of the NPC3 system, and thus the clipping algorithm is
more efficient. The View Clip is controlled by three {\it Clip Flags}
which allow clipping to be turned on or off separately for the
front plane, back plane, and window.
\item Finally, the {\it Workstation Transformation} takes
Normalized Projection Coordinates to Display Coordinates (DC3) in order
to position the projected image in the device coordinate space. It
preserves the aspect ratio, and includes a clipping operation which
cannot be disabled. As their clip faces are parallel, the View Clip and
Workstation Clip are usually combined internally for efficiency. DC3
coordinates may be in metres or raster units. The Workstation Window
limits are [0,1]x[0,1]x[0,1].
\end{OL}
A good implementation of the graphics pipeline will attempt to combine
as many as possible of the stages in the pipeline using matrix
concatenation in order to reduce the amount of computation necessary.
\subsection{The Normalization Transformation}
\index{normal!transformation}
\index{transformation!3D normalization}
As in the 2D case, this is specified by a Window volume in World
Coordinates, and a Viewport volume in Normalized Device Coordinates.
The Normalization Clip is controlled as for GKS-2D
(see section on Page~\pageref{sec:nrmclp}).
The calls are:
\index{GKS routine!{\protect\tt GSW3}}
\index{GKS routine!{\protect\tt GSV3}}
\begin{XMP}
CALL GSW3(TNR, WN)
CALL GSV3(TNR, VP)
\end{XMP}
where WN and VP are real arrays of dimension 6 containing (XMIN,
XMAX, YMIN, YMAX, ZMIN, ZMAX).
\subsection{The View Orientation Transformation}
\index{view orientation transformation}
\index{transformation!3D view orientation}
The {\it View Orientation Transformation} algorithm provided by a
utility function in the GKS-3D standard performs a rotation in three
dimensions in order to take Normalized Device Coordinates to View
Reference Coordinates (VRC3), where the axes are labeled U, V and N
(see \ref{fig:vrc}).
The definition of the UVN system requires the application to specify:
\begin{OL}
\item The {\it View Reference Point} (VRP), which is a point on or
near the object to be viewed, and is the origin of the VRC3 system.
\item The {\it View Plane Normal} (VPN), which points from the
View Reference Point towards the eye point. The VPN is the third axis
of the VRC3 system, and the plane perpendicular to the View Plane Normal
through the View Reference Point is called the {\it View Reference Plane}.
\item The {\it View Up Vector} (VUV), which defines the direction
to be considered as 'UP' within the View Plane.
It is the second axis of the VRC3 system.
{\bf Clearly, the View Plane Normal and the View Up Vector must not
be defined to be collinear.}
\end{OL}
\index{window}
\index{viewport}
Thus, in the View Reference system, the axis N is along the
View Plane Normal, the axis V is the projection of the View Up Vector
onto the View Reference Plane, and the axis U is constructed to form the
third axis of a right-handed coordinate system.
The default transformation sets the identity matrix.
The limits of the VRC3 system are [0,1]x[0,1]x[0,1].
\begin{figure}[h]
\caption[The GKS-3D View Reference System]%
{The GKS-3D View Reference System.
The View Reference Point, defined in World Coordinates,
should be situated near the object to be viewed.
The View Plane Normal is directed at the eye point.
}
\label{fig:vrc}
\end{figure}
\subsection{The View Mapping (Projection) Transformation}
\index{view mapping transformation}
\index{projection transformation}
\index{transformation!3D view mapping}
The {\it View Mapping (Projection) Transformation} provided by a
utility function in the GKS-3D standard takes View Reference Coordinates
to Normalized Projection Coordinates (NPC3), and the projection may be
either parallel or perspective (see \ref{fig:proj}).
The default View Mapping Transformation sets the identity matrix.
The transformation maps the {\it View Volume} in VRC3 space to a
rectangular parallelepiped in Normalized Projection Coordinates.
The horizontal and vertical boundaries of the View Volume are specified
by the projectors drawn from the {\it Projection Reference Point}
(the centre of projection) to the corners of the {\it View Window},
which is a rectangle parallel to the View Reference Plane,
and with edges parallel to the U and V axes.
The View Window cuts the N axis of the VRC3 system at the
{\it View Plane Distance} (VPD) from the View Reference Point.
The hither (front) and yon (back) boundaries of the View volume are
specified by the {\it Front Plane} and the {\it Back
Plane}, which are planes parallel to the View Reference Plane at
distances from the View Reference Point called the {\it Front Plane
Distance} (FPD) and the {\it Back Plane Distance} (BPD).
The rectangular parallelepiped in NPC3 to which the View Volume is
mapped, called the {\it Projection Viewport Limits}, is
specified by the two points (XMIN, YMIN, ZMIN) and (XMAX, YMAX, ZMAX).
Although the View Mapping operation maps a volume to a volume,
which is slightly different from the usual idea of projection onto a plane,
a little thought should (hopefully) convince the reader that the effect is
equivalent. Consider looking at the result along the N axis; the effect is
that the contents of a truncated pyramid are distorted to map into a
rectangular volume, and thus objects closer to the viewer will be magnified.
For parallel projection, the projectors are parallel to a line drawn from
the Projection Reference Point to the centre of the View Window,
and thus the PRP should be set in the middle of the View Window
to produce an orthographic projection.
In this case the actual Z value of the PRP is unimportant, so long as it is
not identical to the View Plane Distance.
The {\it View Clip} takes place at the edges of the View
Clipping Limits according to the setting of the clipping indicators.
There are three of these: the x-y clipping indicator, the front clipping
indicator, and the back clipping indicator.
The default clipping limits are set to
[0,1]x[0,1]x[0,1] and all clipping indicators set to clip on ('GCLIP').
It would normally be the case that the View Clipping Limits and the
Projection Viewport Limits were set to the same values, but this is
not a requirement. If not set to be co-incident, there is clearly the
possibility for an application to clip away all of the image by mistake!
\begin{figure}[h]
\caption[The GKS-3D Projection System]%
{The GKS-3D Projection System.
The figure shows the definition of the View Volume with
Perspective Projection.
}
\label{fig:proj}
\end{figure}
\subsection{Setting the 3D View Representation}
\index{view representation in 3D}
Having described the concepts, this section will attempt to explain
how all these parameters are actually set. This is an area of
the GKS-3D standard which was modified fairly late,
and release 2.0 of GKSGRAL-3D still took place before the publication
of the Draft International Standard for the FORTRAN binding
\cite{bib-gksftn3}.
As for the Normalization Transformation, there may be more than one
Viewing Transformation (actually, combined Viewing and Projection
Transformation plus Clip), and the different transformations
are specified by a {\it View Index}.
However, unlike the Normalization Transformation, which applies to all
primitives no matter on which workstation they are displayed,
the Viewing Transformation is workstation-dependent, and so the
same VieW Index (VWI) may produce a different effect on each active display.
The View Index is set using the routine:
\index{GKS routine!{\protect\tt GSVWI}}
\index{view index}
\index{GKS3D!view index}
\index{attributes!view index}
\begin{XMP}
CALL GSVWI(VWI)
\end{XMP}
Following this call all primitives will be transformed according to the
parameters specified by viewing attribute bundle VWI, assuming that
the deferral mode set has allowed the workstation to be brought
up-to-date. The default viewing attributes,
corresponding to VWI~=~0,
define identity matrices for the View Orientation and View Mapping
transformations, and place the clip limits at the boundary of NPC3 space.
As indicated, the attribute values contained in
the viewing attribute bundle specified by VWI must be defined separately
for each workstation using the call:
\index{GKS routine!{\protect\tt GSVWR}}
\begin{XMP}
CALL GSVWR(WKID, VWI, VWM, PRM, VCLP, CLW, CLB, CLF)
\end{XMP}
\begin{DLtt}{123456}
\item[WKID]Workstation Identifier
\item[VWI]The View Index
\item[VWM]The View Orientation Matrix (a~4~x~4 real array),
which may be calculated by the function GEVVWM
\item[PRM]The Projection (View Mapping) Matrix
(a~4~x~4 real array),
which may be calculated by the function GEVPJM
\item[VCLP]The View Clipping Limits (XMIN, XMAX, YMIN, YMAX, ZMIN, ZMAX)
\item[CLW]Clip Indicator for Window Clipping ('GNCLIP', 'GCLIP')
\item[CLB]Clip Indicator for Back Plane Clipping ('GNCLIP', 'GCLIP')
\item[CLF]Clip Indicator for Front Plane Clipping ('GNCLIP', 'GCLIP')
\end{DLtt}
The utility functions provided to evaluate the matrices are EValuate
VieW orientation Matrix and EValuate ProJection (View Mapping) Matrix:
\index{GKS routine!{\protect\tt GEVVWM}}
\begin{XMP}
CALL GEVVWM(VRPX, VRPY, VRPZ, VUPX, VUPY, VUPZ,
VPNX, VPNY, VPNZ, CSW, ERR, VWM)
\end{XMP}
\begin{DLtt}{123456}
\item[VRPX/Y/Z]The View Reference Point in NDC3 or WC3
\item[VUPX/Y/Z]The View Up Vector in NDC3 or WC3
\item[VPNX/Y/Z]The View Plane Normal in NDC3 or WC3
\item[CSW]Switch specifying whether the vectors are given in
World Coordinates or Normalized Device Coordinates ('GWC' or 'GNDC')
\item[ERR](out) Error indicator
\item[VWM](out) View Matrix (a~4~x~4 real array)
\end{DLtt}
\index{GKS routine!{\protect\tt GEVPJM}}
\begin{XMP}
CALL GEVPJM(UMIN, UMAX, VMIN, VMAX, PRVP, PROTYP,
PRPU, PRPV, PRPN, VPD, BPD, FPD, ERR, PRM)
\end{XMP}
\begin{DLtt}{123456}
\item[U, V](MIN,MAX) The limits of the View Window on the
View Reference Plane measured relative to the View Reference Point.
UMIN, VMIN is the bottom left corner, UMAX, VMAX is the top right corner.
\item[PRVP]The Projection Viewport Limits (a 6 element real array
containing XMIN, XMAX, YMIN, YMAX, ZMIN, ZMAX)
\item[PROTYP]The Projection Type flag integer ('GPARL' or 'GPERS')
\item[PRUP/V/N]The Projection Reference Point.
\item[VPD]The View Plane Distance from the View Reference Point
along the N axis.
(Note that the View Reference Point does {\it not} have to be
contained in the View Plane).
\item[BPD, FPD]The Back and Front Plane Distances from the
View Reference Point along the N axis. FPD must be greater than BPD.
\item[ERR](out) Error indicator
\item[PRM](out) Projection Matrix (a~4~x~4 real array)
\end{DLtt}
\subsection{Workstation Transformation}
\index{workstation!transformation 3D}
\index{transformation!3D workstation}
This is specified by a {\it Workstation Window} volume in
Normalized Projection Coordinates, and a {\it Workstation
Viewport} volume in Device Coordinates.
The Workstation Transformation always preserves the aspect ratio,
and the default transformation will be used if the window or viewport
volumes are not correctly set.
\index{GKS routine!{\protect\tt GSWKW3}}
\index{GKS routine!{\protect\tt GSWKV3}}
\begin{XMP}
CALL GSWKW3(WKID, WKWN)
CALL GSWKV3(WKID, WKVP)
\end{XMP}
where WKWN and WKVP are real arrays of dimension 6 containing
(XMIN, XMAX, YMIN, YMAX, ZMIN, ZMAX)
\index{GKS routine!{\protect\tt GQDVOL}}
To inquire the range of device coordinates corresponding to a particular
workstation type, it is possible to call the function GQDVOL
(inQuire Display VOLume).
\begin{XMP}
CALL GQDVOL (WTYPE, ERRIND, DCUNIT, RX, RY, RZ, LX, LY, LZ)
\end{XMP}
The routine returns the units in which the display surface
is measured (DCUNIT), and also the maximum x, y, and z values
in each direction.
Some devices, for example hardcopy plotters, are measured in metres
(DCUNIT='GMETRE'), so the routine will return the actual size of the
device. Other devices, for example graphics terminals,
will be defined in raster or some other units (DCUNIT='GOTHU').
RX, RY, RZ are real device coordinates,
and LX, LY, LZ are in integer raster units.
The use of GQDVOL is analogous to the use of GQDSP,
which is described in section on Page~\pageref{sec:wstntfm}.
\subsection{But I don't see anything!}
The first (and second and maybe third) time one tries out a 3D application,
even after having checked the code very carefully, there may be nothing
on the screen. The following is a list of possible pitfalls:
\begin{OL}
\item Is the View Reference Point really on or near the object to be viewed?
\item Is it defined in WC or NDC, and does this match the coordinate switch?
\item Is the View Plane Normal pointing the correct way?
A classical bug is to look 180 degrees in the wrong direction.
GKS-3D uses a Right-Handed system, so if the object to be viewed is
at the origin, and is viewed along the Z axis, then one
{\it looks} in the {\bf negative} Z direction, but the VPN
{\it points} in the {\bf positive} Z direction (0.0, 0.0, 1.0).
\begin{note}
As only the direction and not the length of View Plane Normal vector
matters, one can simply use the coordinates of the eye point to specify this
vector, so long as the View Reference Point is placed at the origin.
\end{note}
\item Assuming one wants an orthographic projection, is the Projection
Reference Point in the middle of the View Window?
\item Has the View Window (UMIN, VMIN), (UMAX, VMAX)
{\it really} been set around the object?
\item Have the Front Plane and Back Plane distances been set correctly
in front and behind the object?
\item Is the aspect ratio of the Normalization Viewport volume the same
as that of View volume?
\item Are the Clipping Limits which define the projection parallelepiped
correctly set within the NPC system limits? Do the Clipping Limits
and Projection Viewport Limits match?
{\bf It is always safer to start with clipping switched off!}
\item Have the Workstation Window and Workstation Viewport been set
to the same aspect ratio?
\end{OL}
\section{Segments}
\index{segment!3D}
\index{transformation!3D segment}
\index{GKS3D!segments}
Segmentation operates in GKS-3D in the same way as for GKS,
described in section on Page~\pageref{sec:segtfm}, except that the segment
transformation matrix is defined to be 3~x~4.
(3~x~3 for scaling and rotation plus 3~x~1 for
translation).
Thus, the 2D utility routines which calculate a segment
transformation matrix, GEVTM and GACTM, are modified as follows:
\index{GKS routine!{\protect\tt GEVTM3}}
\index{GKS routine!{\protect\tt GACTM3}}
\begin{XMP}
CALL GEVTM3(X0, Y0, Z0, DX, DY, DZ,
ROTX, ROTY, ROTZ, FX, FY, FZ, SW, MXOUT)
CALL GACTM3(MXIN, X0, Y0, Z0, DX, DY, DZ,
ROTX, ROTY, ROTZ, FX, FY, FZ, SW, MXOUT)
\end{XMP}
Similarly to the 2D case, GEVTM3 evaluates a matrix (MXOUT),
whilst GACTM3 accumulates changes to an existing matrix (MXIN).
Both routines require the definition of:
\begin{DLtt}{123456}
\item[X0, Y0, Z0]
(real) a fixed reference point about which 3D rotations take place.
\item[DX, DY, DZ]
(real) a translation (or shift) vector.
\item[ROTX, Y, Z]
(real) angles of rotation about the X, Y, and Z axes.
\item[FX, FY, FZ]
(real) X, Y, and Z scale factors.
\item[SW]
(enumerated) a switch specifying whether the reference point and
shift vector are given in World Coordinates
or Normalized Device Coordinates ('GWC' or 'GNDC').
\item[MXOUT]
(real) 3~x~4 output matrix composed in the
order: scale, rotate, shift.
In the case of GACTM, the matrix MXIN is pre-concatenated with that
formed from the scale, rotate, and shift parameters, so
MXOUT~=~SHIFT~*~ROTATE~*~SCALE~*~MXIN.
\end{DLtt}
Once the transformation matrix has been evaluated, it may then be
set in the segment by calling the routine:
\index{GKS routine!{\protect\tt GSSGT3}}
\begin{XMP}
CALL GSSGT3(SGNA, MTX)
\end{XMP}
To INsert a SeGment into the output stream GINSG becomes:
\index{GKS routine!{\protect\tt GINSG3}}
\begin{XMP}
CALL GINSG3(SGNA, MTX)
\end{XMP}
Because GKS-3D is upwards compatible to GKS, one can still use the
2D versions of these routines.
In this case, 2~x~3 matrices will be automatically filled-out
to 3~x~4 by the suitable additions of 0s and 1s.
\begin{note}
GKS segment transformations are provided in order to orient the
contents of segments with respect to the coordinate system in which
their primitives were originally specified. In most cases {\bf it is
extremely inefficient to modify the transformations in each segment
in order to view a scene from a different direction}. The viewing
transformation should be used for this purpose
\end{note}
\section{Graphical Input}
\index{GKS3D!input}
\index{input!for 3D}
As only the Locator and Stroke logical devices return coordinate
values, only these two have 3D versions of the routines used
for the three input modes. Thus, for request mode, the calls are:
\begin{XMP}
CALL GRQLC3(WKID, LCDNR, STAT, TNR, VWI, PX, PY, PZ)
and
CALL GRQSK3(WKID, SKDNR, STAT, N, TNR, VWI, NP, PX, PY, PZ)
\end{XMP}
Where, apart from the extra dimension, PZ, the only difference to the
2D calls is the addition of the View Index, VWI.
(Note that as for TNR, VWI is an output parameter.)
This corresponds to the index of the viewing attribute bundle used to
convert the position(s) back from NPC3 coordinates to NDC3 coordinates.
Of course, when using a physical 2D device, it is awkward for the user
to provide the third coordinate for 3D input.
Although only Locator and Stroke have 3D functions to obtain input,
all six logical devices have 3D versions of the initialization
functions. For example, these allow the echo areas to be positioned
in space, rather than in a plane. They will not be discussed
further in this Primer.
\section{GKS-3D Metafile}
\index{metafile!for 3D}
\index{GKS3D!metafile}
\index{Appendix E metafile}
\index{CGM}
As for GKS, the GKS-3D standard has an Appendix E metafile.
The logical format of the 2D and 3D Appendix E metafiles are the
same. However, the contents are incompatible, as in one
case points are represented by two values, and in the other by
three values. The two types of metafile are clearly distinguishable
by inspecting the metafile header record. In all other respects,
the control and usage of the 2D and 3D metafiles are the same.
The Computer Graphics Metafile, CGM, will not initially have a 3D
version. Therefore, if a CGM metafile driver is added to GKS-3D,
the output could contain only a 2D projection. This would be sufficient
to make a hardcopy, but only with the viewing parameters chosen
when the metafile was created.
\begin{note}
The GKS-3D Appendix E metafile has never been implemented by
GTSGRAL. Nevertheless, an additional output workstation is
provided to produce a 2D
metafile by carrying out the viewing and projection operations before
outputting data to the file. This feature is useful for making hardcopies.
\end{note}
\chapter{\protect\label{sec:refhint}General Hints for GKS Use}
\index{hints}
\section{System and Implementation Dependencies}
As mentioned elsewhere in this document certain features of GKS and GKS-3D
are system or implementation dependent. System dependencies are described
in the manual appropriate for the implementation in use.
The principle implementation dependencies to watch for are text fonts and
fill area hatch styles
but it is also possible to have difficulties by inadvertently trying to
exceed some maximum value, such as a table length.
GKS has many inquiry function which may be used to discover the current
and/or maximum value of various parameters,
and it is not the intention of the Primer to describe all of them.
However, one should take note of the following routines:
\index{GKS routine!{\protect\tt GQWKM}}
\index{GKS routine!{\protect\tt GQMNTN}}
\index{GKS routine!{\protect\tt GQLWK}}
\index{GKS routine!{\protect\tt GQLWK3}}
\index{GKS routine!{\protect\tt GQSGS}}
\index{GKS routine!{\protect\tt GQSGP}}
\begin{XMP}
CALL GQWKM(ERRIND, MXOPWK, MXACWK, MXWKAS)
CALL GQMNTN(ERRIND, MAXTNR)
CALL GQLWK(WTYPE, ERRIND, MPLBTE, MPMBTE, MTXBTE, MFABTE, MPAI, MCOLI)
or
CALL GQLWK3(WTYPE, ERRIND, MPLBTE, MPMBTE, MTXBTE, MFABTE, MPAI,
MEDBTE, MCOLI, MVTE)
CALL GQSGP(WTYPE, ERRIND, NSGP)
\end{XMP}
where the parameters are as follows:
\begin{DLtt}{123456}
\item[WTYPE]workstation type (input parameter)
\item[ERRIND]error number
\item[MXOPWK]maximum number of simultaneously open workstations
\item[MXACWK]maximum number of simultaneously active workstations
\item[MXWKAS]maximum number of workstations associated with a segment
\item[MAXTNR]maximum normalization transformation number
\item[MPLBTE]maximum number of polyline bundle table entries
\item[MPMBTE]maximum number of polymarker bundle table entries
\item[MTXBTE]maximum number of text bundle table entries
\item[MFABTE]maximum number of fill area bundle table entries
\item[MPAI]maximum number of pattern indices
\item[MEDBTE]maximum number of edge bundle table entries
\item[MCOLI]maximum number of colour indices
\item[MVTE]maximum number of view table entries
\item[NSGP]maximum number of segment priorities
\end{DLtt}
There is unfortunately no function provided to inquire the maximum available
number of segments or the maximum available segment name so one must consult
the relevant documentation.
\section{Integrating separately written modules of code.}
As is the case when any independent software modules are combined into a
single program, care must be taken that on entry a module saves the
state of the current environment and sets up its own defaults.
The original environment must then be restored on exiting from
the module. This applies to saving and restoring registers when
making a subroutine call no more than saving and restoring (parts of) the
GKS State List and Workstation State Lists when entering a graphics module.
For example, two modules of graphics code may use the same Normalization
Transformation indices. If module B sets different windows and viewports
than module A, then on re-using routines in A after calling module B the
transformations will no longer produce the expected results.
GKS provides a mechanism to handle this situation in the form of a large set
of inquiry functions. These enable a module of code to inquire at run time the
values of those parameters it intends to modify in order that they may
be correctly restored afterwards. In particular, functions exist
to inquire attribute values and bundle table entries, values of the
aspect source flags, and the normalization and viewing (for GKS-3D)
transformations corresponding to a particular normalization or view index.
As an example:
\begin{XMP}
----
REAL WINDOW(4)
REAL VIEWPT(4)
----
C Inquire current window and viewport for Transformation Number X
C
CALL GQNT(TNX, ERRIND, WINDOW, VIEWPT)
CALL GSWN(TNX, WXMINA, WXMAXA, WYMINA, WYMAXA)
----
C Restore window which was modified for Transformation Number X
C
CALL GSWN(TNX, WINDOW(1), WINDOW(2), WINDOW(3), WINDOW(4))
----
\end{XMP}
If several modules of code which are to be combined use GKS segments then
clearly they must not both attempt to use the same segment names.
Either one can agree before hand on the allocation of a range of names
to each module, or else code can be written which assigns segment names
at run time after checking that the values are not in use
(see section on Page~\pageref{sec:segnam}).
\subsection{Using GKS-3D libraries for GKS (2D) applications}
As GKS-3D is a super-set of GKS it is possible to run a GKS 2D application
linked to a GKS-3D library. However, if a 2D code module is incorporated
into a larger 3D program then the programmer must be aware that primitives
will be bound to the current View Index {\bf as well as} the current
Normalization Transformation Index, possibly entailing 3D transformations
plus view clipping.
Thus, to be sure that the code within the 2D module will behave as if linked
to a 2D library the View Index should be set to 0 (identity matrix) in the
3D code before calling the 2D module.
This can not be done in the 2D module itself, as otherwise the linker would give
an error when linking to GKS which does include the 3D function GSVWI.
\section{\protect\label{sec:refintw}Plotting numbers as text strings}
For FORTRAN programmers it is possible to use the {\it Internal Write}
construct to convert numeric variables to character strings for output as
text primitives or via GMSG:
\begin{XMP}
REAL rvar
CHARACTER str*11
----
C Set Variable
rvar = v1 * v2 / v3
C Convert to a character string
WRITE(str, '(''RVAR= '',F5.1)') rvar
CALL GMSG(wkid, str)
----
\end{XMP}
\section{\protect\label{sec:gkspref}GKSPACK}
\index{GKSPACK}
GKSPACK contains routines which may be placed into several categories.
\begin{UL}
\item
One category of routines is intended to ease the use of GKS for
those people with simple applications who do not need further GKS features,
NAG graphics users for example. These routines are built on top
of GKS and are not implementation dependant. They include
routines to initialize and stop GKS (GCINIT, GCSTOP),
to ask the user for input (GPRMPT), etc.
\item
The next category of routines provide facilities to allow applications
which must run on several GKS implementations to obtain information
about workstation types and connection identifiers from a data file.
This avoids having to ask the user interactively to supply implementation
dependant values. These routines include GCGIMP, GCGWTC, etc.
\item
Some routines provide access to GKS features which might be awkward to use,
or which may be implementation dependant. For example, use of GDPs,
or the construction of menus. GKSGRAL includes a large number of utility
routines of this type starting with the sentinel characters {\bf GU},
and GKSPACK contains CERN-written emulations of many of them for
use with other GKS implementations.
\item
Another category of routines is for non-standard GKS utilities which allow the
user to perform specific actions not foreseen by the GKS standard, and which may
require internal knowledge of the GKS implementation.
Thus, it may not be possible to implement these routines for GKS
implementations other than the one from GTS-GRAL if access to the source
code is not available.
The principal examples of routines in this category, heavily used
at CERN by PAW for instance, are GCATOG and GCGTOA. These are used to switch
the graphics terminal between graphics and alphanumeric mode in order for
applications to intermix graphics and Fortran I/O on the same device.
\end{UL}
For various technical reasons it has been decided to include entry points
for all routines in GKSPACK in the GTS-GRAL libraries%
\footnote{At the time of preparing this document there
is a possibility that there may be
a delay in introducing a few of the GKSPACK routines into
the Apollo library due to the change over to SR~10.}
maintained by CERN.
This means that users of GKSGRAL do {\bf not} need to link to a separate
library in order to have access to GKSPACK routines, which is a change to the
previous situation when it was necessary to link applications
also with the CERN library 'NGRAFLIB'.
For users of other GKS implementations a PAM file, named GKSPACK,
is available.
Initially, apart from GKSGRAL, this has support only for DECGKS.
The PAM file distributed by the CERN Program Library will be
included also on the GTS-GRAL distribution tape, and is available at CERN
as follows:
\begin{DLtt}{123456}
\item[IBM:]via \Ucom{GIME CERNPAMS}
\item[VXCERN:]in \Lit{CERN_ROOT:[PAM]GKSPACK.PAM}
\item[Apollo:]in \Lit{/cern/pro/pam/gkspack.pam}
\item[UNIX:]in \Lit{$CERN/pro/pam/gkspack.pam}
\end{DLtt}
The compiled version for DECGKS is available at CERN:
\begin{DLtt}{123456}
\item[VXCERN:] \Lit{GKS_ROOT:[LIB]GKSPACK_DEC.OLB}
\end{DLtt}
As mentioned above, GKSPACK includes some routines for which it may not
be possible to produce correctly working versions for implementations
other than GKSGRAL.
For example, this is the situation for GCATOG and GCGTOA with DECGKS.
In these cases GKSPACK contains dummy routines with the correct
calling sequences, but which write out an error message to a file
named GKSPACK.ERR.
In fact, for GCATOG and GCGTOA, calling the dummy routines
when using a terminal%
\footnote{As opposed to a using a workstation where the
Fortran and graphics I/O are directed at different windows.}
actually stops the application program as well. This is because if the
application mixes Fortran I/O and graphics without using correctly
working versions of GCATOG and GCGTOA then probably the terminal will
block and need to be re-set.
(If used via VM/CMS this also can block the communications.)
The GKSPACK routines which provide access to the implementation
dependant values for workstation types and connection identifiers
require access to a data file.
Examples of this file are distributed with GKSPACK,
and at CERN are available as follows:
\begin{DLtt}{123456}
\item[IBM]The file is called \Lit{GKS$IMPL DAT}.
A GTS-GRAL GKS version is available on the 'Q' disk, but users may provide
their own if they wish.
\item[VAX]The file is available via the Logical Name
\Lit{GKS_IMPLEMENTATION}.
Both GTS-GRAL and DECGKS versions of the file exist,
and users must assign the one they wish to use.
\begin{XMP}
GKS_ROOT:[DAT]GKS_IMPLEM.GTS
GKS_ROOT:[DAT]GKS_IMPLEM.DEC
\end{XMP}
\item[UNIX]The file is accessed via \Lit{$GKS_IMPLEM},
which must be defined as an environment variable.
A GTS-GRAL GKS version called \Lit{gks_implem.gts} is available in
\Lit{$gkshome}, or in \Lit{/cern/gks/pro/dat}.
\end{DLtt}
An example of such a file would be:
\begin{XMP}
DECGKS
NB WKTYPES 5
PSTSCR 61 9
T4107 82 0
VT340 17 0
VXUIS 41 0
WISS 5 1
\end{XMP}
\subsection{List of routines}
\begin{OL}
\item \Lit{CALL GCATOG(WKID)} or \Lit{CALL GCGTOA(WKID)}
\index{GKSPACK!{\protect\tt GCATOG}}
\index{GKS routine!{\protect\tt GCATOG}}
\index{GKSPACK!{\protect\tt GCGTOA}}
\index{GKS routine!{\protect\tt GCGTOA}}
{\bf Input:}
\begin{DLtt}{123456}
\item[WKID]GKS workstation identifier of the terminal (INTEGER).
\end{DLtt}
\Lit{GCGTOA} and \Lit{GCATOG} work only on GKSGRAL with the CERN-supplied
driver versions as the routines require modifications to the driver code.
The DECGKS version tests the workstation type; if it is VXUIS or VXXW
it does nothing, otherwise it writes an error message on the file
GKSPACK.ERR.
The routines change a terminal from graphics to alpha mode (GCGTOA)
and from alpha to graphics mode (GCATOG). The terminal must be an
activated GKS workstation.
Thus they allow the application to perform FORTRAN I/O to
the terminal during a graphics session. The effect on the terminal
depends on its capabilities. For example, as Pericom PG terminals
do not have a dialog area, calling GCGTOA causes the bell to ring
and the program then halts until the operator presses return.
This gives the user time to look at the image drawn in graphics
mode before switching the screen to the alpha-numeric bit plane.
However, on terminals with a dialog area (e.g. Pericom
MG series and Tektronix 4107 compatible terminals),
the FORTRAN I/O will appear immediately overlayed with the graphics.
After the FORTRAN I/O has been performed the application
{\bf must} call GCATOG to reset the graphics environment.
Note that GCGTOA returns immediately, without waiting for a user
action. Thus, if one wishes the program to wait until the user has read
a message and is ready to continue, then it is essential to include
a READ statement, as the following program fragment illustrates:
\begin{XMP}
CALL GCGTOA (WKID)
WRITE(*,*) ' GIVE X COORDINATE: '
C Wait for a reply
READ (*,*) X
CALL GCATOG (WKID)
\end{XMP}
\item {\bf CALL GCCDWN(WKID, RWIN, TITLE)}
\index{GKSPACK!{\protect\tt GCCDWN}}
\index{GKS routine!{\protect\tt GCCDWN}}
This routine is described fully in section on Page~\pageref{sec:vstnref}.
The DECGKS version is a dummy.
{\bf Input:}
\begin{DLtt}{123456}
\item[WKID (I)]Workstation Identifier
\item[RWIN (R*4)]Window Size
\item[TITLE (C)]Window title
\end{DLtt}
\item {\bf CALL GCINIT(IDTERM, TERMTP, IDFILE, LUFILE, KERFIL)}
\index{GKSPACK!{\protect\tt GCINIT}}
\index{GKS routine!{\protect\tt GCINIT}}
{\bf Input:}
\begin{DLtt}{123456}
\item[IDTERM]GKS workstation ID for terminal (INTEGER).
If $\leq0$ or in batch no terminal is assigned.
\item[TERMTP]GKS terminal workstation type (INTEGER).
Note that this number depends on the terminal {\it and} the
GKS implementation you are using.
\item[IDFILE]GKS workstation ID for disk file output (INTEGER).
No file is output if $\leq0$.
\item[LUFILE]FORTRAN logical unit number for disk file output (INTEGER).
Not used if IDFILE $\leq0$.
\item[KERFIL]FORTRAN logical unit number for the GKS error file (INTEGER).
\end{DLtt}
GCINIT provides an easy initialization of GKS for interactive or
batch applications.
If TERMTP=0 GCINIT will prompt the interactive user for the
terminal type and connection ID.
LUFILE and KERFIL are Fortran logical unit numbers and no file names
are assigned as these can easily be provided by the user.
Note that the logical unit numbers 91, 92, 93 are reserved for
GKSGRAL and, to avoid nasty surprises, do not use 5, 6 or 7.
The current values for LUFILE and their effect are:
\begin{XMP}
LUFILE<=0 An interactive dialog will guide the user.
0<LUFILE<100 GKS Appendix E metafile on unit LUFILE.
100<LUFILE<200 PostScript file on unit LUFILE-100.
200<LUFILE<300 Encapsulated PostScript file on LUFILE-200.
1000<LUFILE<1100 Tektronix 4014 style file on unit LUFILE-1000
\end{XMP}
For example, LUFILE=109 would produce PostScript output on the
Fortran file defined by logical unit=9.
\begin{note}
Output of Tektronix 4014 escape codes is available under VMS and Aegis.
It can be provided under VM/CMS if requested.
\end{note}
\item {\bf CALL GCGIMP (MAXTYP, GKSNWT, GKSSYN, GKSWCN, GKSWTP)}
\index{GKSPACK!{\protect\tt GCGIMP}}
\index{GKS routine!{\protect\tt GCGIMP}}
Supplies information in implementation file.
See introduction to this section for file naming details.
{\bf Input:}
\begin{DLtt}{123456}
\item[MAXTYP (I)]maximum number of wk types
(i.e. dimension of output arrays)
\end{DLtt}
{\bf Output:}
\begin{DLtt}{123456}
\item[GKSNWT (I)]number of wk types in the implementation file
\item[GKSSYN (C*6)]array (dimension MAXTYP) with the workstation types names
(e.g. "VXUIS~")
\item[GKSWCN (I)]array (dimension MAXTYP) with the workstation connection id's
\item[GKSSYN (I)]array (dimension MAXTYP) with the workstation types integer
values (e.g. 41)
\end{DLtt}
\item {\bf CALL GCIMPL(IMPLEM)}
\index{GKSPACK!{\protect\tt GCIMPL}}
\index{GKS routine!{\protect\tt GCIMPL}}
Supplies implementation name string.
{\bf Output:}
\begin{DLtt}{123456}
\item[IMPLEM (C*6)]Name of implementation
\end{DLtt}
.pa
\item {\bf CALL GCGWTC(WKTYP, CONID)}
\index{GKSPACK!{\protect\tt GCGWTC}}
\index{GKS routine!{\protect\tt GCGWTC}}
Asks the user to enter interactively a workstation type, and
provides the list of the available workstation types if the user enters '?'.
Automatically returns to the program the connection id to be used
for the selected workstation type.
{\bf Output:}
\begin{DLtt}{123456}
\item[WKTYP (I)]Workstation Type
\item[CONID (I)]Connection Id
\end{DLtt}
\item {\bf CALL GCNAME(WKID, STRING)}
\index{GKSPACK!{\protect\tt GCNAME}}
\index{GKS routine!{\protect\tt GCNAME}}
{\bf Input:}
\begin{DLtt}{123456}
\item[WKID (I)]GKS workstation identifier of the metafile
receiving the frame (INTEGER).
\item[STRING (C*7)]The name of the frame (CHARACTER*(*)).
\end{DLtt}
GCNAME enables the user to give the name 'STRING' to the current frame
when output to a metafile for inclusion in compound documents.
The name 'STRING' may also be used by GRVIEW and GRCONV
to select frames on the metafile.
Users of this facility {\bf must} place the call to GCNAME before
outputting any primitives to a new frame.
For examples of its use, see on Page~\pageref{sec:gcnref} and on Page~\pageref{sgmlgra}.
{\bf Restrictions:} Only the first 7 characters of 'STRING' are used.
If less than 7 characters are given 'STRING' will be padded with
\Lit{'$'}s (dollars).
For VM/CMS system reasons, the character set for 'STRING' can
only contain upper-case alphabetic, the digits 0-9, and the dollar
sign (\Lit{'$'}), so lower-case characters are converted to upper-case.
\item {\bf CALL GCQWKN(WKID, IERR, CONID, WKNAM)}
\index{GKSPACK!{\protect\tt GCQWKN}}
\index{GKS routine!{\protect\tt GCQWKN}}
Analog to the routine GQWKC but returns a workstation type name
instead of a workstation type integer value.
{\bf Input:}
\begin{DLtt}{123456}
\item[WKID (I)]Workstation Id
\end{DLtt}
{\bf Output:}
\begin{DLtt}{123456}
\item[IERR (I)]value returned by GQWKC
\item[CONID (I)]Connection Id
\item[WKNAM (C*6)]Workstation Name (e.g. "VXUIS~")
\end{DLtt}
\item {\bf CALL GCSDWN(WKTYP, RWIN, TITLE)}
\index{GKSPACK!{\protect\tt GCSDWN}}
\index{GKS routine!{\protect\tt GCSDWN}}
This routine is described fully in section on Page~\pageref{sec:vstnref}.
The DECGKS version is a dummy.
{\bf Input:}
\begin{DLtt}{123456}
\item[WKTYP (I)]Workstation Type
\item[RWIN (R*4)]Window Size
\item[TITLE (C)]Window title
\end{DLtt}
\item {\bf CALL GCSTOP}
\index{GKSPACK!{\protect\tt GCSTOP}}
\index{GKS routine!{\protect\tt GCSTOP}}
GCSTOP deactivates and closes all GKS workstations and closes GKS.
\item {\bf CALL GCWTPC(WKTYP, WKTSYN)}
\index{GKSPACK!{\protect\tt GCWTPC}}
\index{GKS routine!{\protect\tt GCWTPC}}
Returns the workstation type name corresponding to a workstation
type integer value.
{\bf Input:}
\begin{DLtt}{123456}
\item[WKTYP (I)]Workstation Type (e.g. 41 for DECGKS)
\end{DLtt}
{\bf Output:}
\begin{DLtt}{123456}
\item[WKSTYN (C*6)]Workstation Type Name (e.g. "VXUIS~")
\end{DLtt}
\item {\bf CALL GCWTPI(WKTSYN, CONID, WKTYP)}
\index{GKSPACK!{\protect\tt GCWTPI}}
\index{GKS routine!{\protect\tt GCWTPI}}
Get workstation type and connection id corresponding to a given
workstation type name (e.g. 'VXXW').
{\bf Input:}
\begin{DLtt}{123456}
\item[WKSTYN (C*6)]Workstation Type Name
\end{DLtt}
{\bf Output:}
\begin{DLtt}{123456}
\item[WKTYP (I)]Workstation Type
\item[CONID (I)]Connection Id
\end{DLtt}
\item {\bf CALL GPRMPT(WKID, PROMPT, LSTRI, REPLY)}
\index{GKSPACK!{\protect\tt GPRMPT}}
\index{GKS routine!{\protect\tt GPRMPT}}
{\bf Input:}
\begin{DLtt}{123456}
\item[WKID]GKS workstation identifier of the terminal (INTEGER).
\item[PROMPT]Application prompt (CHARACTER *(*)).
\end{DLtt}
{\bf Output:}
\begin{DLtt}{123456}
\item[LSTRI]Length of reply (INTEGER).
\item[REPLY]User reply (CHARACTER).
\end{DLtt}
GPRMPT gives a prompt and waits for a reply from the user in a GKS
interactive graphics program.
As an example, GPRMPT could be used to hold a picture on
the screen until the user was ready to view the next one,
or terminate the program.
If the user hits only a Carriage Return then LSTRI=0 and 'REPLY' is not
defined. For example:
\begin{XMP}
CALL GPRMPT(WKID, 'Type RETURN or QUIT', LSTRI, REPLY)
IF(LSTRI.GT.0) THEN
C Call STOPPG to do whatever you want to do on QUIT
IF REPLY(1:4) .EQ. 'QUIT') CALL STOPPG
ENDIF
\end{XMP}
Currently the prompt is put into a GKS segment which is deleted
after the reply. If a segment is open when GPRMPT is called,
the prompt will be added to it but it will not be deleted. This
could be confusing and should be avoided by closing an open
segment before calling GPRMPT.
If the workstation is not a terminal or the job is in batch
GPRMPT does nothing.
{\bf Restrictions:} If GPRMPT is used repeatedly within one picture,
the prompts will overprint if the terminal does not have selective erasure.
If long prompts and/or small workstation windows are used the
prompt will be truncated.
\item {\bf CALL GRQSK(WKID, LDSTK, NMAX, ISTATS, IT, NP, PX, PY)}
\index{GKSPACK!{\protect\tt GRQSK}}
\index{GKS routine!{\protect\tt GRQSK}}
Emulates the GTSGRAL request stroke (locator loop)
which requires a button push to input each point.
This is in contrast to the DECGKS implementation of GRQSK
which reads the current cursor position in a loop
with fixed time or position intervals.
\begin{note}
If it is intended to use this routine to replace the version of GRQSK in the
GKS library it must be linked ahead of the library.
\end{note}
{\bf Input:}
\begin{DLtt}{123456}
\item[WKID (I)]Workstation Identifier
\item[LDSTK (I)]Stroke logical device
\item[NMAX (I)]Maximum number of points
\end{DLtt}
{\bf Output:}
\begin{DLtt}{123456}
\item[ISTATS (I)]Status
\item[IT (I)]Normalization Transformation Number
\item[NP (I)]Number of points returned
\item[PX (I*NMAX)]X coordinates
\item[PY (I*NMAX)]Y coordinates
\end{DLtt}
\item {\bf CALL GUARC(XM,YM,XP,YP,XH,YH,XQ,YQ)}
\index{GKSPACK!{\protect\tt GUARC}}
\index{GKS routine!{\protect\tt GUARC}}
Emulation of GTS-GRAL utility to draw a circular arc defined by 4 points.
{\bf Input:}
\begin{DLtt}{123456}
\item[XM, YM]Mid point of arc
\item[XP, YP]Start point
\item[XH, YH]Point on arc
\item[XQ, YQ]Point on end radius of arc
\end{DLtt}
\item {\bf CALL GUBEZ1(N,XP,YP)}
\index{GKSPACK!{\protect\tt GUBEZ1}}
\index{GKS routine!{\protect\tt GUBEZ1}}
Emulation of GTS-GRAL utility to draw a Bezier curve defined by a
Bezier polygon.
{\bf Input:}
\begin{DLtt}{123456}
\item[N (I)]Dimension of XP, YP
\item[XP, YP]Points on Bezier polygon
\end{DLtt}
.pa
\item {\bf CALL GUCIR1(XM,YM,XP,YP)}
\index{GKSPACK!{\protect\tt GUCIR1}}
\index{GKS routine!{\protect\tt GUCIR1}}
Emulation of GTS-GRAL utility to draw a circle defined by
(midpoint, peripheral point).
{\bf Input:}
\begin{DLtt}{123456}
\item[XM, YM]Mid point of circle
\item[XP, YP]Peripheral point on circle
\end{DLtt}
\item {\bf CALL GUCIR2(XM,YM,R)}
\index{GKSPACK!{\protect\tt GUCIR2}}
\index{GKS routine!{\protect\tt GUCIR2}}
Emulation of GTS-GRAL utility to draw a circle defined by
(midpoint, radius).
{\bf Input:}
\begin{DLtt}{123456}
\item[XM, YM]Mid point of circle
\item[R]Radius
\end{DLtt}
\item {\bf CALL GUCUR1(N,XP,YP)}
\index{GKSPACK!{\protect\tt GUCUR1}}
\index{GKS routine!{\protect\tt GUCUR1}}
Emulation of GTS-GRAL utility to draw a curve defined by interpolating points.
{\bf Input:}
\begin{DLtt}{123456}
\item[N (I)]Dimension of XP, YP
\item[XP, YP]Points in polygon.
\end{DLtt}
\item {\bf CALL GUELL1(XM,YM,A,B)}
\index{GKSPACK!{\protect\tt GUELL1}}
\index{GKS routine!{\protect\tt GUELL1}}
Emulation of GTS-GRAL utility to draw an ellipse defined by
(midpoint, semi-axes).
An ellipse is drawn with midpoint XM,YM; the length of semi-axis
in the X-direction is A, and in the Y-direction is B.
{\bf Input:}
\begin{DLtt}{123456}
\item[XM, YM] Midpoint of Ellipse
\item[A, B]Semi-axes of ellipse in X and Y directions
\end{DLtt}
\item {\bf CALL GUELL2(XM,YM,A,B,BEGRAD,ENDRAD,ROTATE)}
\index{GKSPACK!{\protect\tt GUELL2}}
\index{GKS routine!{\protect\tt GUELL2}}
Emulation of GTS-GRAL utility to draw an elliptical arc specified by the
midpoint XM,YM, the size of the semi-axes in direction X and Y (A, B),
and \Lit{BEGRAD} and \Lit{ENDRAD} which define the radius of the start and end points.
The ellipse is rotated with angle ROTATE in an anti-clockwise direction.
{\bf Input:}
\begin{DLtt}{123456}
\item[XM, YM] Midpoint of Ellipse
\item[A, B]Semi-axes of ellipse in X and Y directions
\item[BEGRAD]Angle of arc start point
\item[ENDRAD]Angle of arc end point
\item[ROTATE]Angle of anti-clockwise rotation
\end{DLtt}
.pa
\item {\bf CALL GUMEN2(WK,DNR,CHECXL,CHECXH,CHECYL,CHECYH,MENU)}
\index{GKSPACK!{\protect\tt GUMEN2}}
\index{GKS routine!{\protect\tt GUMEN2}}
Emulation of GTS-GRAL utility to define a menu in a given echo area.
{\bf Input:}
\begin{DLtt}{123456}
\item[WKID (I)]Workstation Id
\item[DNR (I)]Device Number
\item[CHECXL]Echo area X Lower Bound
\item[CHECHL]Echo area X Higher Bound
\item[CHECYL]Echo area Y Lower Bound
\item[CHECYL]Echo area Y Higher Bound
\item[MENU (C)]String of menu items separated by ',' and terminated
by '.'.
\end{DLtt}
\item {\bf CALL GUNERR(N)}
\index{GKSPACK!{\protect\tt GUNERR}}
\index{GKS routine!{\protect\tt GUNERR}}
Dummy routine which writes error message to file GTSTODEC.ERR.
{\bf Output:}
\begin{DLtt}{123456}
\item[N]Number of GKS errors which occurred.
\end{DLtt}
\item {\bf CALL GUSIGD(FLAG)}
\index{GKSPACK!{\protect\tt GUSIGD}}
\index{GKS routine!{\protect\tt GUSIGD}}
Dummy routine which writes error message to file GTSTODEC.ERR.
{\bf Input:}
\begin{DLtt}{123456}
\item[FLAG (L)]Set (.TRUE.) or reset (.FALSE.) simulation flag.
\end{DLtt}
\end{OL}
\subsection{GKSPACK Error Messages}
\index{GKSPACK!Error Messages}
\begin{DLtt}{123456}
\item[GTSGRAL]
If GOPKS has been called before any call to GKSPACK then any GKSPACK
errors will be written to the GKS error file. Otherwise, if an error occurs
from a call to a GKSPACK routine preceeding the GOPKS call, then the error
message will be sent to the screen.
\item[DECGKS]
All errors are written to GKSPACK.ERR.
\end{DLtt}
The list of GKSPACK error messages is as follows:
\begin{DLtt}{123456}
\item[-1]Cannot open implementation file.
The file does not exist or: (VMS) the logical name \Lit{GKS_IMPLEMENTATION}
is not correctly defined; or (UNIX) the environment variable \Lit{GKS_IMPLEM}
is not correctly defined.
\item[-2]Internal error in GCGIMP. The arrays are too small to contain
the information for all the workstations listed in the implementation file.
Contact Graphics Section at CERN (CN/US/GR).
\item[-3]The user arrays are too small to contain the information for all
the workstations listed in the implementation file .
\item[-4]The GKSPACK routine called is a dummy for this GKS implementation.
\item[-5]Error in GCNAME: invalid characters in frame name.
\end{DLtt}
\subsection{GKSPACK Example Program}
\index{GKSPACK!Example Program}
The following complete program illustrates the use of some
routines available in GKSPACK:
\begin{XMP}
PROGRAM DEMOC1
*
* Include file defines GKSGRAL workstation types
*
INCLUDE 'GKS$GTSDEV'
CHARACTER*80 REPLY
REAL XSINX(51),XSINY(51)
REAL XTWOPI
DATA XTWOPI/6.28318/
*
* DEFINE THE WORKSTATION TYPE (Pericom MG600) AND METAFILE
*
CALL GCINIT (1, MG600, 2, 30, 11)
*
* DEFINE AND SELECT A TRANSFORMATION THAT DIRECTS OUTPUT
* TO A WINDOW OF (0.0, 1.0) X (-1.0, 1.0)
*
CALL GCNAME (2, 'TITLE01')
CALL GSWN (1, 0.0, 1.0 ,-1.0, 1.0)
CALL GSELNT (1)
*
* BEGIN OUTPUT
*
DO 10 K=1,51
XSINX(K) = FLOAT(K-1)*0.02
XSINY(K) = SIN(XSINX(K)*XTWOPI)
10 CONTINUE
CALL GPL (51, XSINX, XSINY)
*
* ALL DONE WITH GKS, CLOSE THE SYSTEM
*
CALL GPRMPT (1, 'TYPE RETURN', LSTRI, REPLY)
CALL GCSTOP
END
\end{XMP}
\chapter{\protect\label{sec:gtschap}GKSGRAL and GKSGRAL-3D}
\index{GKSGRAL}
\index{GKSGRAL-3D}
\index{FORTRAN binding}
\index{levels of GKS}
GKSGRAL is a full implementations of the ISO GKS standard.
It is written in FORTRAN, and the application interface follows
the final version of the FORTRAN binding \cite{bib-gksftn}.
The kernel is written to the level '2c', although versions on
some systems (and some of the drivers) only support level '2b'.
In particular, the version on IBM is only '2b'.
\index{input!event mode}
\index{input!sample mode}
The main reason not to support '2c' is that Event Mode requires
the operating and communications systems to support asynchronous
interrupts. However, this feature is not required for Sample Mode.
Thus, although it requires 'bending' the standard slightly,
a pseudo Sample Mode is available on some devices which do not conform
to level '2c' using the following procedure:
\begin{XMP}
C Set string mode to sample
CALL GSSTM (WKID,1,1,0)
C Request the locator position
CALL GRQLC (WKID,1,STAT,TNR,PX,PY)
C Sample the character typed
CALL GSMST (WKID,1,1,NCH,STRING)
C Set string mode back to request
CALL GSSTM (WKID,1,0,1)
\end{XMP}
The current release of GKSGRAL-3D, Version 2.0, follows the final version of
the ISO functional standard. It is also very close to the final FORTRAN binding
apart from some minor differences which will be resolved at a future date.
\section{Devices Drivers}
\index{device drivers}
\index{MEGATEK}
\index{IBM!5080}
Both GKSGRAL and GKSGRAL-3D use the same 2D device drivers.
Thus, any 2D device supported by GKSGRAL can also be driven from
GKSGRAL-3D. In addition, GKSGRAL-3D supports the IBM~5080,
TEKTRONIX~4235/6, and the MEGATEK WHIZZARD series (models 72xx and 33xx)
which have 3D hardware transformations.
\index{include files}
The list of devices supported may be found in the include file
GTSDEV reproduced in Appendix on Page~\pageref{sec:gtstyp}.
although one should check the latest machine-readable version to see
if there have been any changes.
The precise definition of the facilities provided by each workstation
driver, such as the assignment of keys, number of available colours,
and so on, is given in the Workstation Description Tables.
An abridged version of the most common of these may be found in
Appendix on Page~\pageref{sec:wdtref}.
\section{\protect\label{sec:conref}Connection Identifiers}
\index{conid}
\index{connection identifier}
\index{GKS routine!{\protect\tt GOPWK}}
For terminal connections on VAX VMS and VM/CMS, assuming that there are
no special instructions in the Workstation Description Table for the
device, any number in the range from 1 to 90 may be used for the conid
in calls to GOPWK (but see note below).
On VAX VMS, GKSGRAL uses an automatically-generated logical name to connect
to the terminal, but it is possible to override this with the command:
\begin{XMP}
\Ucom{DEFINE GKS_DEVICE_n TTxx:}
\end{XMP}
where \Lit{n} is the conid to be used and \Lit{'TTxx'}
(or \Lit{'TXxx'}, \Lit{'LTxx'}, \Lit{'RTxx'}, \Lit{'NVxx'}, etc.
depending on the type of controller)
is the name of the terminal.
Note that this will only allow use of a terminal other than the one on which
the user is logged in if the user has the requisite privileges,
or if the second terminal is not protected.
Moreover, if the second terminal is not hard-wired, for example, if it is
connected via a Local Area Network terminal server, then one must take care
at the time of use to find out the correct name of the controller and port
to which the physical terminal is connected.
If, for debugging or other purposes, one does not want to get any graphical
output on the terminal (and no graphical input is requested),
then one can connect to the NULL device with the command:
\begin{XMP}
DEFINE GKS_DEVICE_n NL:
\end{XMP}
where n is the conid to be used.
On VMS, it is possible to capture graphics output command sequences from the
HP Plotter, PostScript (see section on Page~\pageref{sec:epsref} for information
on Encapsulated PostScript) and Tektronix 4014 drivers on a file.
To do this, open a file with unit number~=~n and define the
connection identifier to be conid~=~(100~+~n),
where n is a small integer greater than 1.
If there is sufficient interest,
this feature could be extended to other devices.
\begin{note}
\index{FORTRAN Logical Unit Numbers}
At CERN, the GKSGRAL and GKSGRAL-3D packages reserve FORTRAN
Logical Unit Numbers (and hence conids) {\bf 91-93} for internal use.
Apart from these three numbers the actual range of permissible values
for connection identifiers is in fact 1-100.
\end{note}
\section{\protect\label{sec:impref1}Implementation-Dependent Features}
\index{implementation-dependent features}
\index{text fonts}
\index{fonts}
\index{italic text}
\index{proportionally spaced text}
\index{hatch styles}
\index{fill area styles}
\index{fill area limitation}
\index{maximum parameter values}
Although an ISO standard, GKS still includes some features which vary from
implementation to implementation or device to device.
The principle features to watch are text fonts and fill area hatch styles.
GKSGRAL supports 11 stroke-precision Latin text fonts plus Greek
\index{Greek text font}
and a solid filled font.
These are illustrated in \ref{fig:fonts}. The fonts are available
both in italic and non-italic scripts, as well as proportionally and
non-proportionally spaced.
The font indices are given in Appendix on Page~\pageref{sec:wdtref}.
At CERN, GKSGRAL has been modified to provide 24 Fill Area Hatch Styles
(indices -101 to -124), and these should produce the same result on all
workstations (see \ref{fig:hatch}).
Other parameters with which the package has currently been configured are:
\begin{DLtt}{123456}
\item[Workstations]10 simultaneously open workstations (6 on VM)
\item[Segments]2000 segments, names in range 1-32763
\item[Normalization Transformations]21 (0 to 20)
\item[Viewing Transformations]21 (0 to 20)
\item[Fill Area]The number of points in a Fill Area primitive
is limited to 300.
\item[Polyline]If the number of points in a Polyline is larger
than 300 then they will be split into several primitives.
\end{DLtt}
The actual values of these and other parameters
may be inquired at run-time by calling the routines:
\index{GKS routine!{\protect\tt GQWKM}}
\index{GKS routine!{\protect\tt GQMNTN}}
\index{GKS routine!{\protect\tt GQLWK}}
\index{GKS routine!{\protect\tt GQLWK3}}
\index{GKS routine!{\protect\tt GQSGS}}
\index{GKS routine!{\protect\tt GQSGP}}
\begin{XMP}
CALL GQWKM(ERRIND, MXOPWK, MXACWK, MXWKAS)
CALL GQMNTN(ERRIND, MAXTNR)
CALL GQLWK(WTYPE, ERRIND, MPLBTE, MPMBTE, MTXBTE, MFABTE, MPAI, MCOLI)
or
CALL GQLWK3(WTYPE, ERRIND, MPLBTE, MPMBTE, MTXBTE, MFABTE, MPAI,
MEDBTE, MCOLI, MVTE)
CALL GQSGP(WTYPE, ERRIND, NSGP)
\end{XMP}
where the parameters are as follows:
\begin{DLtt}{123456}
\item[WTYPE]workstation type (input parameter)
\item[ERRIND]error number
\item[MXOPWK]maximum number of simultaneously open workstations
\item[MXACWK]maximum number of simultaneously active workstations
\item[MXWKAS]maximum number of workstations associated with a segment
\item[MAXTNR]maximum normalization transformation number
\item[MPLBTE]maximum number of polyline bundle table entries
\item[MPMBTE]maximum number of polymarker bundle table entries
\item[MTXBTE]maximum number of text bundle table entries
\item[MFABTE]maximum number of fill area bundle table entries
\item[MPAI]maximum number of pattern indices
\item[MEDBTE]maximum number of edge bundle table entries
\item[MCOLI]maximum number of colour indices
\item[MVTE]maximum number of view table entries
\item[NSGP]maximum number of segment priorities
\end{DLtt}
There is unfortunately no function provided to inquire the maximum available
number of segments.
\begin{figure}[h]
\begin{verbatim}
picture name=FONTS$$S
\end{verbatim}
\caption{GTS-GRAL italic, proportionally-spaced fonts}
\label{fig:fonts}
\end{figure}
\begin{figure}[h]
\begin{verbatim}
picture name=HATCH$$S
\end{verbatim}
\caption{CERN-defined hatch patterns}
\label{fig:hatch}
\end{figure}
\section{System-Dependent Considerations}
\subsection{IBM}
\index{IBM!GKSGRAL on}
\index{GKSGRAL!on IBM}
\index{HPLOT}
\index{HIGZ}
\index{GKSPACK}
\index{GRAFLIB}
The recommended way of linking GKS is to use the GKS or GRAFLIB parameters
to the {\bf CERNLIB} command to provide automatic access to just
the GKS library, or to both GKS and various higher level graphics
packages which use GKS, notably the CERN Program Library packages
GKSPACK (J551), HPLOT (Y251) and HIGZ (Q120).
Further details on what follows may be obtained via the command
{\bf FIND~CERNLIB}.
\begin{XMP}
CERNLIB GKS
or
CERNLIB GKS3D
or
CERNLIB GRAFLIB (GTS2D [or (GTS3D]
\end{XMP}
The optional parameter '(GTS2D' or '(GTS3D' gives access to the 2D or 3D
GTS-GRAL libraries. In addition, the CERNLIB command
provides access to three versions of each library: {\bf OLD},
{\bf PRO} (default), and {\bf NEW}.
For example, to access the previous library version use:
{\bf 'GRAFLIB!OLD~(GTS2D'}.
Application programs must use VS FORTRAN.
The optional INCLUDE files for the GKS parameters are kept in the data set
{\bf 'GKSINCL~MACLIB'} on the automatically accessed Q-Disk.
The basic commands and tools to use GKS are:
\begin{DLtt}{123456}
\item[CERNLIB GKS]
To access just GKS (or GKS3D for 3D)
\item[CERNLIB GRAFLIB (GTS2D or GTS3D]
To access the GKS and higher level packages
\item[VFORT gksprog]To compile.
\item[LOAD gksprog (START]To execute.
\end{DLtt}
These last two commands may be replaced by the sequence:
\begin{DLtt}{123456}
\item[VFORT gksprog (GO]To compile and go
\end{DLtt}
In this sequence the CERNLIB Exec makes available the VS FORTRAN libraries,
the CERN program library and the GKS library in the correct
order, as well as other associated files.
Any number of user libraries may be specified.
\index{include!on IBM}
To use the GKS include files one must give the command:
\begin{XMP}
GLOBAL MACLIB GKSINCL
\end{XMP}
before compiling the program. Within the code, the files may be included
by using the construct:
\index{include!enum}
\index{include!gtsdev}
\begin{XMP}
INCLUDE (ENUM)
INCLUDE (GTSDEV)
\end{XMP}
where the first file contains the ENUMeration types, and the second
contains the GTS-GRAL DEVice types. No compiler options are necessary.
An alternative method to access the information in the include files
would be via the PATCHY utility by using the KEEP sequences of
GTSDEV and ENUM stored in the PATCH GKSINCL on the GKSPACK Pam file.
To access this Pam file on IBM type GIME~CERNPAMS.
Full information on GKS under VM can be found by using the command
{\bf FIND~GKS} and for the higher level packages via
{\bf FIND~HPLOT}, {\bf FIND~GKSPACK},
{\bf FIND~NAGLIB}, {\bf FIND~HIGZ},
and {\bf FIND~PAW}.
The example programs listed in the Appendix of the
{\it GKS/GKS-3D Primer} are available on the disk accessed via
{\bf GIME~GKS} with the file names:
\index{exref}
\index{execution!on IBM}
\begin{XMP}
GKSEXn FORTRAN (where n = 1, 6)
\end{XMP}
The GKS and GSK-3D libraries released in October 1989 are compatible with
the IBM XA exploitation mode.
\subsection{Use of Segment Storage on IBM}
The CERN GKS installation uses logical unit 91 for the dynamic segment
storage file.
A small file is created on the mini-disk with the largest free writeable
space, dynamically extended as necessary, and deleted when GKS is closed.
Jobs which crash or fail to call GCLKS will leave the file
GKSDYNAM~FT91F001 on a mini-disk and it is best to delete it.
Applications which create a large number of segments may require
up to 5 cylinders of free disk space which can be put on a temporary
disk for example.
\subsection{Debugging on IBM}
\index{Debugging on IBM}
\index{IBM!debugging}
\index{IBM!DIAL (use of)}
Debugging {\it interactive} graphics programs on VM is very difficult,
but by using two terminals it can be done more comfortably, and one can
also use the interactive debugger. The procedure is as follows:
\begin{OL}
\item Login on the alphanumeric terminal as usual, then inform VM that
the graphics output will be on another terminal by the command:
\index{DIAL}
\begin{XMP}
DEF GRAF 019 3270
\end{XMP}
(Where 019 is simply a free address.)
\item Then on the graphics terminal which probably has to be connected
through index class 125, connect using the DIAL command:
\begin{XMP}
DIAL userid
\end{XMP}
\end{OL}
Now all alphanumeric i/o, debug commands and FORTRAN run time errors
will be on the alpha terminal and only the graphics i/o on the DIALled
terminal.
\subsection{VAX/VMS}
\index{VAX!GKSGRAL on}
\index{GKSGRAL!on VAX/VMS}
\index{HPLOT}
\index{HIGZ}
\index{GKSPACK}
\index{GRAFLIB}
The recommended way of linking GKS is to use the GKS or GRAFLIB parameters
to the {\bf CERNLIB} command to define the symbol%
\footnote{These commands also define the logical name \Lit{GKS\_ROOT}.}
'\Lit{LIB$}' which provides access to just
the GKS library, or to both GKS and various higher level graphics
packages which use GKS, notably the CERN Program Library packages
GKSPACK (J551), HPLOT (Y251) and HIGZ (Q120).
Further details on what follows may be obtained via the command
\Ucom{HELP CERNLIB}.
\begin{XMP}
CERNLIB GKS
or
CERNLIB GKS3D
or
CERNLIB GRAFLIB/GTS2D [or /GTS3D]
\end{XMP}
The optional parameter '/GTS2D' or '/GTS3D' gives access to the 2D or 3D
versions of the GTS-GRAL package.
In addition, the CERNLIB command provides access to three versions
of the libraries: {\bf OLD}, {\bf PRO} (default), and {\bf NEW}.
For example, to access the previous version of GRAFLIB use the switch
{\bf 'GRAFLIB/OLD'}.
The current default version of GKS is the version from GTS-GRAL,
and so the switch '/GTS2D' may be omitted.
The default situation is to link to shared versions of the libraries.
For non-shared versions it is necessary to add the extra switch
'/NOSH[AREABLE]'.
The basic commands and tools to use GKS are:
\begin{DLtt}{123456}
\item[CERNLIB GKS or GKS3D]
To access just GKS or GKS3D
\item[CERNLIB GKS/NOSH[AREABLE]
To access a non-shared version of GKS or GKS3D
\item[CERNLIB GRAFLIB/GTS2D or /GTS3D]
To access the GKS and higher level packages
\item[FOR gksprog]To compile.
\item[LINK gksprog,'LIB\$']To link.
\item[RUN gksprog.exe]To run.
\end{DLtt}
\index{include!on VAX/VMS}
The include files used when compiling graphics programs which
define the Workstation Types and GKS Parameters (Enumerated Types)
are accessed via the logical names:
\index{include!enum}
\index{include!gtsdev}
\begin{XMP}
INCLUDE 'GKS$GTSDEV'
and
INCLUDE 'GKS$ENUM'
\end{XMP}
and the example programs listed in the appendix of the
{\it GKS/GKS-3D Primer} may be found in:
\index{execution!on VAX}
\begin{XMP}
GKS_ROOT:[DMO]GKSEXn.FOR (n = 1,6)
\end{XMP}
An alternative method to access the information in the include files
would be via the PATCHY utility by using the KEEP sequences of
GTSDEV and ENUM stored in the PATCH~GKSINCL on the GKSPACK Pam file.
On VAX VMS this is stored in:
\begin{XMP}
CERN_ROOT:[PAM]GKSPACK.PAM.
\end{XMP}
\subsection{\protect\label{sec:vstnref}VAXstation features}
\index{VAX!setting window size}
\index{VAXstation}
\index{X-Window}
In the PROduction version (3.2) of GKSGRAL the driver for the VAXStation
uses the UIS interface. To use the X-Window driver (for VAXStations
with DECWindows interface) one has to move to the new version of
GKSGRAL (3.4) which is only available on request.
The UIS driver allows a single process to
open multiple GKS workstations, each corresponding to a new window.
To use this feature it is necessary to call GOPWK once to open each workstation
with a different value in the series of UIS workstation types.
There are also several special features available for users of VAXstations
in order to manipulate the windows:
\begin{UL}
\item The logical name \Lit{GKS_WINDOW} set by the system startup procedure
\Lit{GKSSTART.COM} can point to any directory which contains a copy of
the file WINDOW.DAT. Users can redefine this logical name later
as they wish. However, if a private version of the file WINDOW.DAT
is found in the current working directory, then this one will be used
rather than \Lit{GKS_WINDOW:WINDOW.DAT}.
\item
\index{GKS routine!{\protect\tt GCSDWN}}
The CERN utility routine GCSDWN (Set Display Window) can be used for
VAXstations in order to set inside a program the display window size,
position and title (used by GOPWK), instead of using the values stored
in the file WINDOW.DAT. The function must be called {\bf before}
calling GOPWK. The calling sequence is:
\begin{XMP}
SUBROUTINE GCSDWN (IWTYPE, RWIN, TITLE)
INTEGER IWTYPE
REAL RWIN(4)
CHARACTER*(*) TITLE
\end{XMP}
Where IWTYPE is the workstation type (8601, 8602, etc...),
RWIN is the window size and position, and TITLE contains the name given
to the window by the display manager.
\item
\index{GKS routine!{\protect\tt GCCDWN}}
The CERN utility routine GCCDWN (Change Display Window) can be used for
VAXstations in order to change interactively inside a program the display
window size and position after the window has been created.
The calling sequence is:
\begin{XMP}
SUBROUTINE GCCDWN (WKID, RWIN, CODE)
INTEGER WKID
REAL RWIN(4)
INTEGER CODE
\end{XMP}
Where WKID is the workstation identifier, RWIN is the window size and
position, and CODE takes either the value '1' or '2'.
If CODE=1, then any window size can be defined but the workstation viewport
is not changed, which may change the aspect ratio.
If CODE=2, then there is a restriction placed on the window size that can be
chosen to ensure that the aspect ratio of the window contents remains
unchanged. The function uses the largest window that can fit into the size
the user has chosen and that has the same height/width ratio as the initial
window. The workstation viewport is automatically resized and the
workstation updated.
\end{UL}
For both GCSDWN and GCCDWN, the window size and position have to be given in
the same order as in the file WINDOW.DAT:
\begin{XMP}
RWIN(1) = window size in X in metres
RWIN(2) = window size in Y in metres
RWIN(3) = window position X on screen in metres
RWIN(4) = window position Y on screen in metres
\end{XMP}
\subsection{\protect\label{sec:unixlib}UNIX}
\Lit{$==>$} Uptodate ???
\index{Unix!GKSGRAL on}
\index{GKSGRAL!on UNIX}
\begin{note}
{\it File names and directory paths under UNIX}
\footnote{UNIX is a trade mark of AT\&T}
{\it are case sensitive.}
\end{note}
Since October 1989 the CERN GTS-GRAL licence has been extended to include
general use of the company's software on any UNIX platform, including
UNICOS on the CRAY. In addition, from July, 1990, GKSGRAL on Apollo
platforms is supported under UNIX, rather than Aegis. However, affiliated
institutes with a UNIX licence will still need a licence specifically for
Apollo if they wish to receive the Apollo screen drivers.
As the UNIX system runs on many hardware platforms, only a few of which
are available at CERN, it may not be possible for the CERN Program Library
to distribute binary libraries for the particular machine a user requires.
Thus, UNIX users may have to install the software from
\index{Unix!tar file}
\index{tar file}
a TAR file as explained in Appendix on Page~\pageref{sec:unixdis}.
To use GKSGRAL or GKSGRAL-3D one requires access
to the libraries, font files and include files.
These should be made available via environment variables provided
by the system manager. For example, if the C shell is being used,
the following should be included in the .login file:
\begin{XMP}
setenv gkshome /user/gts-gral_root_directory
setenv gkslib $gkshome/gks/libs/gkslib.a
setenv gksdriv $gkshome/gks/libs/gksdriv.a
setenv gks3dlib $gkshome/gks3d/libs/gks3d.a
setenv GKS_FONTS $gkshome/gks/fonts
\end{XMP}
Thus, all variables are defined in terms of \Lit{$gkshome}, which should
be set to point to the local root directory.
Conversely, one can also store the files in a way analogous
to that used for the CERN Program Library, in which case the
environment variables should be set to:
\begin{XMP}
setenv gkslib /cern/gks/pro/lib/gkslib.a
setenv gksdriv /cern/gks/pro/lib/gksdriv.a
setenv gks3dlib /cern/gks/pro/lib/gks3dlib.a
setenv GKS_FONTS /cern/gks/pro/dat
\end{XMP}
Environment variables may be set under the Bourne or Aegis shells
as in the example:
\begin{XMP}
GKS_FONTS=/cern/gks/pro/dat; export GKS_FONTS #Bourne
or
GKS_FONTS := '/cern/gks/pro/dat'; export GKS_FONTS #Aegis.
\end{XMP}
To drive terminals attached via RS232 terminal lines, it is necessary
also to make a logical connection between the Connection Identifier
specified in the call to Open Workstation and the device.
GKSGRAL communicates with a set of devices defined by environment
variables {\it gksch01} to {\it gksch18}, where the numbers
1 to 18 correspond to the connection identifier. Thus, if the
connection identifier is set to '1', and one wishes to perform graphics
on the same terminal used for alpha-numeric commands, then under
the C shell one would need to type:
\begin{XMP}
setenv gksch01 /dev/tty
\end{XMP}
To compile and link a Fortran program 'myprog.f' use the commands:
\begin{XMP}
f77 myprog.f $gkslib $gksdriv -o myprog.exe
\end{XMP}
In this case the f77 command is used both to compile and link the
program. One can also compile and link the program separately.
However, even in this case one should probably use the f77 command to
link, rather than ld. For example, on DECstations the f77 command
automatically includes all the system libraries, whilst ld does not.
\begin{XMP}
f77 -c myprog.f -o myprog.o
\end{XMP}
\begin{note}
In general under UNIX the linker searches libraries in order.
Thus, a reference from a library at the end of the sequence to a routine
stored in an earlier library will result in an unresolved external
reference. This is the case for GKS-3D. A solution is to specify
the earlier library twice:
\begin{XMP}
f77 myprog.o $gks3dlib $gksdriv $gks3dlib -o myprog.exe
\end{XMP}
\end{note}
\index{include!on UNIX}
The include files used when compiling graphics programs which
define the Workstation Types and GKS Parameters (Enumerated Types)
are to be found in the directory \Lit{$gkshome/utl}
and/or, on systems which
follow the CERN library conventions, /cern/gks/pro/utl.
They may be accessed from a FORTRAN program as follows:
\index{include!enum}
\index{include!gtsdev}
\begin{XMP}
INCLUDE '/cern/gks/pro/utl/gks_gtsdev'
and
INCLUDE '/cern/gks/pro/utl/gks_enum'
\end{XMP}
At run time GKS reads in the files defining the software fonts.
These are accessed via an environment variable \Lit{GKS_FONTS} which should
defined either by the system or user login procedure.
\index{execution!on UNIX}
The example programs listed in the appendix of the
{\it GKS/GKS-3D Primer} are available either in \Lit{$gkshome/dmo}
or in \Lit{/cern/gks/pro/dmo}, with the names gksexN.f (N=1 to 6).
However, one may need to edit them in order to set the desired workstation
type and to use the correct paths to the include files.
On some systems the top directory /cern may not exist, in which case
one should use the environment variable \Lit{$CERN} to point to the root
directory.
\begin{note}
{\it
File names and directory paths under UNIX are case sensitive;
\Lit{$CERN} is not equivalent to \Lit{$cern}!}
\end{note}
A selection of help files, including this one, are to be found
in \Lit{$gkshome/doc}.
\subsection{APOLLO}
\Lit{$==>$} Uptodate ???
\index{Apollo!GKSGRAL on}
\index{GKSGRAL!on APOLLO}
\index{Apollo!GPR}
\index{Apollo!GSR}
From July 1990 onwards only Aegis SR~10 and later system releases
will be supported. Aegis SR~9.7 library versions are obtainable,
but will not be updated. Aegis SR~10 is compatible with UNIX,
and so simplifies support. However, this affects file formats and access control,
as well as the case sensitivity of file names. Although earlier versions of AEGIS
were insensitive to the case of file names, this is no longer true from for
SR~10.0 onwards. Thus, for example, the program statement:
\begin{XMP}
INCLUDE '/CERN/GKS/PRO/UTL/GKS_GTSDEV'
\end{XMP}
will not work with SR~10.0. The path name within quotes
{\bf must be in lower case}.
Other points to watch for are that a '/' character is used after the tilde
when accessing files via the naming directory,
and that when using UNIX shells every command creates a new process, so care must
be taken when using scripts to set variables that they run in the context
of the current process.
For example, by typing \Ucom{source my\_script} under the
C~shell, or \Ucom{. my\_script} under the Bourne shell.
Apart from the Appendix E metafile and PostScript drivers, the standard
CERN APOLLO libraries include drivers for both the GPR and GSR graphics
interfaces. For machines such as the DN590, with GSR hardware support,
the GSR driver will produce improved performance. This driver also
allows a single process to open multiple GKS workstations, each
corresponding to a new APOLLO window. The next release of this driver
is scheduled to support level C input, as well as a display surface size
which may be changed dynamically. To use multiple GSR windows from a single
graphics process it is necessary to call GOPWK once to open each workstation
with a different value in the series of GSR workstation types.
At run time GKS reads in the files defining the software fonts (which are
used by all drivers). These are accessed via an environment variable
\Lit{'GKS_FONTS'} which may be created by a command in the
startup or \Lit{.login} files.
At CERN, this environment variable points to the cernlib directory
\Lit{/cern/gks/pro/dat} (or \Lit{.../new/dat} or \Lit{.../old/dat},
depending on the version
required). The fonts are distributed in the directory
\Lit{$gkshome/gks/fonts}.
(In addition to the software fonts used by all drivers, the GSR driver
can use also the hardware fonts described below.)
For use of the GPR interface, the user's home directory should contain a
file (or link) called \Lit{gks_characteristic} which contains set-up
information describing the display. An example of this file may be found in:
\begin{XMP}
/cern/gks/pro/dat/gks_characteristic
\end{XMP}
If the user does not have a link from the naming directory to a
private copy of \Lit{gks_characteristic}, then GKSGRAL will attempt to read a
default version which should be made available by creating the
following link:
\begin{XMP}
crl /dev/gks_characteristic @
/cern/gks/pro/dat/gks_characteristic
\end{XMP}
(As /dev is protected, this must be done from a privileged account.)
A private copy of the file may be edited to give the
desired window size. As an example, the Workstation Type 10002
corresponds to the second line of the file.
The standard versions of this file is distributed in
\Lit{$gkshome/gks/drivers/adgpr}.
The GSR driver requires access to two configuration files, one called
\Lit{gks_workstations.config} which is similar to the GPR
\Lit{gks_characteristic} file containing set-up information describing
the display, and one called \Lit{gks_fonts.config} which lists
the available hardware fonts. ~
Copies of these files, modified as necessary,
may be stored in (or pointed to by links from) the user's home directory,
or default versions should be made available by creating the links:
\begin{XMP}
crl /sys/node_data/gks_workstations.config @
/cern/gks/pro/dat/gks_workstations.config
and
crl /sys/node_data/gks_fonts.config @
/cern/gks/pro/dat/gks_fonts.config
\end{XMP}
The standard versions of these files are distributed in
\Lit{$gkshome/gks/drivers/adgsr}.
As for other UNIX machines, the libraries are stored either in
\Lit{$gkshome/gks/libs} or /cern/gks/pro/lib, and it is easiest to access
them via environment variables:
\begin{XMP}
setenv gkslib /cern/gks/pro/lib/gkslib_3000.a
setenv gksdriv /cern/gks/pro/lib/gksdriv_3000.a
setenv gks3dlib /cern/gks/pro/lib/gks3d_3000.a
\end{XMP}
Where the '3000' refers to the compiler option used to produce executable modules
targeted at the DN3000 style machines with a Motorola 68020/30/40. One
can replace '3000' by '10000' for the DN10000 library versions.
To compile and link directly to the libraries use:
\begin{XMP}
/com/ftn myprog.ftn -indexl
/com/bind myprog.bin $gkslib $gksdriv -b myprog.exe
\end{XMP}
where the parameter '-indexl' is optional but recommended.
The parameter -save may also be used for safety if variables which must be
available on re-entry to a routine have not been stored in a COMMON BLOCK.
However, use of -save is very detrimental to code optimization,
especially on the DN10000. The Aegis versions of the Fortran compiler
must be used to ensure that the external global symbols use the same
naming conventions as for the libraries (i.e. lower case names with
no trailing underscore).
\index{include!on Apollo}
The include files used when compiling graphics programs which
define the Workstation Types and GKS Parameters (Enumerated Types)
are to be found in the directory '/cern/gks/pro/utl'.
(Distributed in \Lit{$gkshome/utl}.)
They may be accessed from a Fortran program as follows:
\index{include!enum}
\index{include!gtsdev}
\begin{XMP}
INCLUDE '/cern/gks/pro/utl/gks_gtsdev'
and
INCLUDE '/cern/gks/pro/utl/gks_enum'
\end{XMP}
(Under SR~10.2 the Fortran compiler accepts VMS syntax for the
INCLUDE statement.)
\index{Apollo!inlib}
As linking to INLIB (shared) versions of the libraries takes up much less
space, and is much faster, INLIB libraries are defined as follows:
\begin{XMP}
setenv gkslib_inlib /cern/gks/pro/lib/gkslib_3000.inlib
setenv gks3dlib_inlib /cern/gks/pro/lib/gks3d_3000.inlib
\end{XMP}
These may be linked using the command:
\begin{XMP}
\Ucom{/com/bind myprog.o -b myprog.exe -inlib $gkslib_inlib}
or
\Ucom{/com/bind myprog.o -b myprog.exe -inlib $gks3dlib_inlib}
\end{XMP}
\index{execution!on APOLLO}
The example programs listed in the appendix of the
{\it GKS/GKS-3D Primer} may be found in:
\begin{XMP}
/cern/gks/pro/dmo/gksexN.ftn (N = 1,6)
\end{XMP}
They are distributed in \Lit{$gkshome/dmo}.
\subsubsection{APOLLO Models}
APOLLO nodes exit with various CPU options. The latest machine, the DN10000,
will only work with libraries compiled specifically for this hardware.
All other recent APOLLO models use the standard M68020 instruction set,
on which can be run libraries compiled with the '3000' option.
Note that CERN no longer produces libraries compiled with the 'any' switch.
\subsubsection{Apollo Workstation Types}
The following consists of a list of the workstation types for
the GTS-GRAL workstation drivers installed on Apollo.
The full list may be consulted in \Lit{gks_gtsdev}, or in
Appendix on Page~\pageref{sec:gtstyp}.
\begin{DLtt}{123456}
\item[3]WISS
\item[4]Metafile Output
\item[5]Metafile Input
\item[9701-9708]Apollo GSR interface
\item[10002]Apollo DN300, DN3000, Monochrome (GPR interface)
\item[10003]Apollo DN550, DN660, Colour (GPR interface)
\item[10004]Apollo DN3000/4000, Colour (GPR interface)
\item[12201]Postscript colour portrait
\item[12202]Postscript colour landscape
\item[12203]Postscript monochrome portrait
\item[12204]Postscript monochrome landscape
\item[10201]2D-Metafile for 3D GKS
\end{DLtt}
\subsection{CRAY under UNICOS}
\Lit{$==>$} Uptodate ???
\index{Cray!GKSGRAL on}
\index{GKSGRAL!on CRAY}
\index{Unix!on CRAY}
As the CRAY is used as a batch production service,
only the WISS and metafile workstations have been made available in the
GKSGRAL library, which should be accessed via the 'cernlib' command.
Detailed information on use of the 'cernlib' command for the CRAY is
available under VM/CMS by typing: {\bf FIND~CRAY~CERNLIB}.
However, for those users wishing simply to make use of GKS,
then the commands to type are:
\begin{XMP}
cernlib gks
or
cernlib gks3d
\end{XMP}
which create a file \Lit{'LIB$'} in the user's working directory
which contains the required libraries (the case matters).
The cernlib command may take the switches {\bf -o} and {\bf -n}
to allow the selection of {\bf o}ld or {\bf n}ew library versions.
To compile and link the FORTRAN program 'myprog.ftn' use the commands:
\begin{XMP}
cft77 myprog.ftn
segldr -o myprog.exe myprog.o LIB$
\end{XMP}
At run time GKS reads in the files defining the software fonts.
These are accessed via a an environment variable \Lit{GKS_FONTS} which is
defined by the system login procedures.
\index{include!on CRAY}
The include files used when compiling graphics programs which
define the Workstation Types and GKS Parameters (Enumerated Types)
are to be found in the directory '/cern/gks/pro/utl'.
They may be accessed from a FORTRAN program as follows:
\index{include!enum}
\index{include!gtsdev}
\begin{XMP}
INCLUDE '/cern/gks/pro/utl/gks_gtsdev'
and
INCLUDE '/cern/gks/pro/utl/gks_enum'
\end{XMP}
The example programs listed in the appendix of the
{\it GKS/GKS-3D Primer} are not available on the CRAY.
\section{\protect\label{sec:decgks}DECGKS: Coexistence with GKSGRAL and Implementation}
One of the advantages of using an ISO standard graphics package
is that users are not limited to the products of a single supplier.
Thus, although the principal GKS implementation supported at CERN
is that of GTS-GRAL, users of Digital Equipment Corporation (DEC)
machines may wish to use the DEC implementation of GKS.
This might be because of the availability of drivers, or because
the performance of DEC software on the company's own machines is
likely to be better than that of third party software suppliers
who are constrained to ensure that their libraries operate in
many different environments.
Whilst there are no major problems in moving between DECGKS and GKSGRAL
there are several implementation dependencies, and these are documented
below. A large number of routines have been added to the set of tools
in GKSPACK (see section on Page~\pageref{sec:gkspref}).
Some of these have been written in order to aid the portability of applications
between different GKS implementations by supplying information
about the Workstation Types and Connection Identifiers of a particular
implementation. Other routines have been provided to emulate extensions of
GKS available in the GKSGRAL implementation. Whilst users of GKSGRAL
will have these routines available in the GKSGRAL library,
users of DECGKS will need to link to an additional library
containing a version of GKSPACK tailored for the DECGKS implementation.
This library is called \Lit{GKSPACK_DEC.OLB}, and on the CERN VAX cluster
may be found in:
\begin{XMP}
GKS_ROOT:[LIB]GKSPACK_DEC.OLB
\end{XMP}
Implementation Dependencies:
\begin{UL}
\item Workstation Types and Connection Ids
Implementations are free to choose whichever Workstation Types and
Connection Identifiers they wish. Thus, those in use by GKSGRAL and
DECGKS do not match. The routines in the library GKSPACK go some way to
alleviating this problem (see section on Page~\pageref{sec:gkspref}).
\item Fonts and Attributes
Neither GKS, nor any other Graphics Standard, defines the shapes of
the characters corresponding to a particular font number. In addition,
a particular implementation may provide access to hardware fonts
on some devices. This also applies to hatch styles and patterns, etc.
The fonts and hatch styles available from GKSGRAL are defined in
on Page~\pageref{sec:impref1}, and a brief comparison of the two implementations
follows:
\begin{XMP}
GTSGRAL | DECGKS
\end{XMP}
Hardware Fonts:
\begin{XMP}
See wk descr. tables | DECWINDOWS : -101 to -113
| UIS : -200 to -202
\end{XMP}
.pa
Software Fonts
\begin{XMP}
-1 to -11: normal,proport. | font 1 = font -1 = DEC GKS
| multinational font
-13 : greek |
-51 : solid filled font |
same font numbers - 100: | -2 to -23: Hershey fonts
idem but italics |
same font numbers - 200: |
idem but monospaced |
same font numbers - 300: |
idem but italics monospaced |
\end{XMP}
Line types
\begin{XMP}
| -1 to -8 DEC specific
\end{XMP}
Marker Types
\begin{XMP}
-101 to -114 GKSGRAL specific | -1 to -13 DEC specific
\end{XMP}
Fill Area Hatch Styles
\begin{XMP}
-101 to -124 (CERN specific) | -1 to -33 (UIS specific)
| -1 to -9 (DECwindows specific)
\end{XMP}
Fill Area Patterns
\begin{XMP}
None | 1 to 196 (UIS specific)
| 1 to 28 (DECwindows specific)
| -29 to -58 (DECwindows specific)
\end{XMP}
\item Data Records
Both the contents and internal format of data records used by
GKSGRAL and DECGKS are different. The format should not affect the majority
of users, who would not require to access data record explicitly.
However, users will be affected by the differences in data record
contents if they make use of facilities to initialize input devices
or use GDPs.
To help solve this problem, higher-level routines have been provided
by GTS-GRAL which hide details of the data record contents.
These include GUARC, GUBEZ1, GUCIR1, GUCIR2, GUCUR1, GUELL1, GUELL2,
and GUMEN2. The library \Lit{GKSPACK_DEC.OLB}, described in section
on Page~\pageref{sec:gkspref}, contains
emulations of these routines which work with DECGKS.
\item Metafiles
Whilst the content of the GTS-GRAL and DECGKS metafiles are logically
the same, the file formats are not. In order that the CERN metafile
utility programs GRVIEW and GRPLOT may be used with metafiles produced
with DECGKS, an option will be introduced into GRCONV to convert them
to the same format as those written by GTS-GRAL (but not vice versa).
Until this feature is installed, anyone wishing to convert a DECGKS
metafile should contact the UCO.
.pa
\item Input
Whilst stroke input requires a trigger for each locator position in the
GTS-GRAL GKS implementation, that of DEC does not, but simply samples
the locator position at fixed time or distance intervals.
Thus, GTS-GRAL's stroke input is more or less equivalent to
calling Request Locator in a loop.
In order to provide functionality when using DECGKS equivalent to that
in GKSGRAL, a CERN-written version of GRQSK may be found in the library
\Lit{GKSPACK_DEC.OLB}.
The DECGKS implementation uses separate windows for messages and
also for string, choice, and valuator input.
The window size depends on the echo area specified in GINST, GINCH,
and GINVL.
\end{UL}
| {"hexsha": "f26312715156ec438126c30a98a6747b947728f5", "size": 219442, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "gks/gksch1.tex", "max_stars_repo_name": "berghaus/cernlib-docs", "max_stars_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-24T12:30:01.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-24T12:30:01.000Z", "max_issues_repo_path": "gks/gksch1.tex", "max_issues_repo_name": "berghaus/cernlib-docs", "max_issues_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gks/gksch1.tex", "max_forks_repo_name": "berghaus/cernlib-docs", "max_forks_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.062871382, "max_line_length": 88, "alphanum_fraction": 0.77297418, "num_tokens": 58225} |
import argparse
import math
from urllib.request import urlopen
import sys
import os
import subprocess
import glob
from braceexpand import braceexpand
from types import SimpleNamespace
import os.path
from omegaconf import OmegaConf
import torch
from torch import nn, optim
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms import functional as TF
torch.backends.cudnn.benchmark = False # NR: True is a bit faster, but can lead to OOM. False is more deterministic.
#torch.use_deterministic_algorithms(True) # NR: grid_sampler_2d_backward_cuda does not have a deterministic implementation
from torch_optimizer import DiffGrad, AdamP, RAdam
from perlin_numpy import generate_fractal_noise_2d
# todo: fix this mess
try:
# installed by adding github.com/openai/CLIP to sys.path
from CLIP import clip
except ImportError:
# installed by doing `pip install git+https://github.com/openai/CLIP`
from clip import clip
import kornia
import kornia.augmentation as K
import numpy as np
import imageio
import random
from einops import rearrange
from PIL import ImageFile, Image, PngImagePlugin
default_color_table = []
default_color_table.append([ 0/255.0, 0/255.0, 0/255.0])
default_color_table.append([255/255.0, 255/255.0, 255/255.0])
default_color_table.append([ 63/255.0, 40/255.0, 50/255.0])
default_color_table.append([ 38/255.0, 43/255.0, 68/255.0])
default_color_table.append([ 90/255.0, 105/255.0, 136/255.0])
default_color_table.append([139/255.0, 155/255.0, 180/255.0])
default_color_table.append([ 25/255.0, 60/255.0, 62/255.0])
default_color_table.append([ 38/255.0, 92/255.0, 66/255.0])
default_color_table.append([ 62/255.0, 137/255.0, 72/255.0])
default_color_table.append([ 99/255.0, 199/255.0, 77/255.0])
default_color_table.append([254/255.0, 231/255.0, 97/255.0])
default_color_table.append([254/255.0, 174/255.0, 52/255.0])
default_color_table.append([254/255.0, 174/255.0, 52/255.0])
default_color_table.append([247/255.0, 118/255.0, 34/255.0])
default_color_table.append([184/255.0, 111/255.0, 80/255.0])
default_color_table.append([116/255.0, 63/255.0, 57/255.0])
from scipy.cluster.vq import kmeans2
class ColorLookup(nn.Module):
"""
Maps to fixed color table
"""
def __init__(self, color_table, device, beta=10.0):
super().__init__()
self.beta = beta
if color_table is None:
print("WARNING: using built in palette")
# eventually no table would mean make up your own table?
color_table = default_color_table
print(f"color table has {len(color_table)} entries like {color_table[0:5]}")
self.color_table = torch.FloatTensor(color_table).to(device)
# https://discuss.pytorch.org/t/how-to-find-k-nearest-neighbor-of-a-tensor/51593
def forward(self, z):
B, C, H, W = z.size()
# project and flatten out space, so (B, C, H, W) -> (B*H*W, C)
# reshape z -> (batch, height, width, channel) and flatten
z = rearrange(z, 'b c h w -> b h w c').contiguous()
ind = torch.cdist(z, self.color_table).argmin(axis=-1)
z_q = torch.index_select(self.color_table, 0, ind.flatten()).view(z.shape)
loss = self.beta * torch.mean((z_q.detach()-z)**2) + \
torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
z_q = z + (z_q - z).detach()
# reshape back to match original input shape
z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()
return z_q, loss
| {"hexsha": "9b7dc3a056a2b2f804d82edc3b7999e32678067f", "size": 3565, "ext": "py", "lang": "Python", "max_stars_repo_path": "colorlookup.py", "max_stars_repo_name": "noble-born/pixray", "max_stars_repo_head_hexsha": "2ec9aa04a4dd0ce6c196f5fd473af3684bb4ef87", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 343, "max_stars_repo_stars_event_min_datetime": "2021-09-09T03:41:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T18:02:37.000Z", "max_issues_repo_path": "colorlookup.py", "max_issues_repo_name": "ohwe/pixray", "max_issues_repo_head_hexsha": "93a4e441d03f1ebc53897ea67973dd8705cc18e6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 42, "max_issues_repo_issues_event_min_datetime": "2021-09-12T09:45:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-22T20:57:19.000Z", "max_forks_repo_path": "colorlookup.py", "max_forks_repo_name": "ohwe/pixray", "max_forks_repo_head_hexsha": "93a4e441d03f1ebc53897ea67973dd8705cc18e6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 51, "max_forks_repo_forks_event_min_datetime": "2021-09-12T15:04:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-22T20:01:34.000Z", "avg_line_length": 35.297029703, "max_line_length": 123, "alphanum_fraction": 0.6973352034, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1080} |
function arclen (r,z,crv,n)
c implicit double precision (a-h,o-z) dp
dimension crv(2,*)
tol=.001
arclen=0.
if (abs(r-crv(1,1)).le..001.and.abs(z-crv(2,1)).le..001) return
do 10 i=2,n
r1=crv(1,i-1)
z1=crv(2,i-1)
r2=crv(1,i)
z2=crv(2,i)
dl=sqrt((r2-r1)**2+(z2-z1)**2)
if (r.lt.min(r1,r2)-tol) go to 10
if (r.gt.max(r1,r2)+tol) go to 10
if (z.lt.min(z1,z2)-tol) go to 10
if (z.gt.max(z1,z2)+tol) go to 10
go to 20
10 arclen=arclen+dl
return
20 dl=sqrt((r-r1)**2+(z-z1)**2)
arclen=arclen+dl
return
end
| {"hexsha": "b5bd308c7ae3722bd75a6c8b8316d31c2cbac411", "size": 663, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/arclen.f", "max_stars_repo_name": "imohame/LabCode", "max_stars_repo_head_hexsha": "b7fca6e58f6c26917ff4a8862ab473da282d027d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/arclen.f", "max_issues_repo_name": "imohame/LabCode", "max_issues_repo_head_hexsha": "b7fca6e58f6c26917ff4a8862ab473da282d027d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/arclen.f", "max_forks_repo_name": "imohame/LabCode", "max_forks_repo_head_hexsha": "b7fca6e58f6c26917ff4a8862ab473da282d027d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-04-19T08:21:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-18T02:43:24.000Z", "avg_line_length": 27.625, "max_line_length": 79, "alphanum_fraction": 0.4811463047, "num_tokens": 280} |
#pragma once
#include <atomic>
#include <stdexcept>
#include <thread>
#include <cassert>
#include <future>
#include <boost/asio.hpp>
#include <boost/thread.hpp>
#include <boost/date_time.hpp>
#include <boost/variant.hpp>
namespace co
{
namespace impl
{
struct Unset{};
template<typename T>
struct ValueHandling
{
using PlaceholderType = T;
using Type = boost::variant<Unset, PlaceholderType, std::exception_ptr>;
template<typename Callable>
static void setByResult(Type& value, Callable&& f)
{
try
{
value = f();
}
catch(...)
{
value = std::current_exception();
}
}
static T get(Type& value)
{
if (value.which() == 2)
std::rethrow_exception(boost::get<std::exception_ptr>(value));
return boost::get<T>(value);
}
};
struct StatelessT
{
};
template<>
struct ValueHandling<void>
{
using PlaceholderType = StatelessT;
using Type = boost::variant<Unset, PlaceholderType, std::exception_ptr>;
template<typename Callable>
static void setByResult(Type& value, Callable&& f)
{
try
{
f();
value = StatelessT{};
}
catch(...)
{
value = std::current_exception();
}
}
static void get(Type& value)
{
if (value.which() == 2)
std::rethrow_exception(boost::get<std::exception_ptr>(value));
}
};
template<>
struct ValueHandling<StatelessT>
{
using PlaceholderType = StatelessT;
using Type = boost::variant<Unset, PlaceholderType, std::exception_ptr>;
template<typename Callable>
static void setByResult(Type& value, Callable&& f)
{
try
{
value = f();
}
catch(...)
{
value = std::current_exception();
}
}
static void get(Type& value)
{
if (value.which() == 2)
std::rethrow_exception(boost::get<std::exception_ptr>(value));
}
};
template<typename T>
using Value = typename ValueHandling<T>::Type;
template<typename T>
using PlaceholderType = typename ValueHandling<T>::PlaceholderType;
template<typename T>
inline bool isSet(const boost::variant<Unset, T, std::exception_ptr>& val)
{
return val.which() != 0;
}
template<typename T>
inline bool isException(const boost::variant<Unset, T, std::exception_ptr>& val)
{
return val.which() == 2;
}
template<typename Callable>
using ResultType = decltype(std::declval<Callable>()());
template<typename Callable>
using HandlerForResult = ValueHandling<ResultType<Callable>>;
struct ContinuationTask
{
virtual void operator()() = 0;
};
class ConditionVariableTask : public ContinuationTask
{
private:
boost::condition_variable mCond;
boost::mutex mMut;
bool mDataReady{false};
public:
void operator()() override
{
{
boost::unique_lock<boost::mutex> lock(mMut);
mDataReady = true;
}
mCond.notify_one();
}
void wait()
{
boost::unique_lock<boost::mutex> lock(mMut);
mCond.wait(lock, [this]
{
return mDataReady;
});
}
template <class Rep, class Period>
bool wait_for(const std::chrono::duration<Rep, Period>& timeoutDuration)
{
boost::unique_lock<boost::mutex> lock(mMut);
return mCond.wait_for(lock, timeoutDuration, [this]
{
return mDataReady;
});
}
};
constexpr ContinuationTask* EmptyHandle{nullptr};
constexpr uintptr_t InvalidHandle{0x1};
template <typename T>
class LightFutureData
{
public:
LightFutureData() {}
template<typename T1>
LightFutureData(T1&& t)
: continuationPtr{reinterpret_cast<impl::ContinuationTask*>(InvalidHandle)}, value{std::forward<T1>(t)}
{
}
inline bool is_ready() const
{
return continuationPtr.load(std::memory_order_acquire) == reinterpret_cast<impl::ContinuationTask*>(impl::InvalidHandle);
}
inline bool is_ready_weak() const
{
static_assert(sizeof(continuationPtr) == sizeof(impl::InvalidHandle), "std::atomic<> should not add overhead");
return !memcmp(&continuationPtr, &impl::InvalidHandle, sizeof(void*));
}
inline bool await_ready()
{
return is_ready_weak();
}
inline bool await_suspend(ContinuationTask& func)
{
return suspend(func);
}
inline auto await_resume()
{
return get_unchecked();
}
inline auto get_unchecked()
{
assert(is_ready_weak());
return ValueHandling<T>::get(value);
}
auto get_blocking()
{
if (!is_ready_weak())
wait();
return get_unchecked();
}
inline auto await_synchron()
{
return get_blocking();
}
bool suspend(impl::ContinuationTask& func)
{
auto expected = impl::EmptyHandle;
// fail-order changed to memory_order_seq_cst to make MSVC happy
bool res = continuationPtr.compare_exchange_strong(expected, &func, std::memory_order_acq_rel, std::memory_order_seq_cst);
// Suspended for the second time?
assert(res || expected == reinterpret_cast<impl::ContinuationTask*>(impl::InvalidHandle));
return res;
}
void wait()
{
int cnt{0};
do
{
if (is_ready())
return;
std::this_thread::yield();
} while (cnt++ < 4);
impl::ConditionVariableTask task;
if (suspend(task))
task.wait();
}
template <class Rep, class Period>
std::future_status wait_for(const std::chrono::duration<Rep, Period>& timeout_duration)
{
if (is_ready())
return std::future_status::ready;
impl::ConditionVariableTask task;
if (suspend(task))
{
if (!task.wait_for(timeout_duration))
{
auto state = continuationPtr.exchange(impl::EmptyHandle);
if (state == &task)
return std::future_status::timeout;
else
// Suspended for the second time
assert(state == reinterpret_cast<impl::ContinuationTask*>(impl::InvalidHandle));
}
}
return std::future_status::ready;
}
void set_value(T val)
{
value = std::move(val);
on_ready();
}
void set_exception(std::exception_ptr except)
{
value = std::move(except);
on_ready();
}
private:
inline void on_ready()
{
auto continuation = reinterpret_cast<impl::ContinuationTask*>(impl::InvalidHandle);
continuation = continuationPtr.exchange(continuation, std::memory_order_acq_rel);
if (continuation)
{
// check if set two times
assert(continuation != reinterpret_cast<impl::ContinuationTask*>(impl::InvalidHandle));
(*continuation)();
}
}
std::atomic<ContinuationTask*> continuationPtr{nullptr};
Value<T> value{};
};
}
template <typename T>
class future
{
private:
std::shared_ptr<impl::LightFutureData<impl::PlaceholderType<T>>> mData;
future(std::shared_ptr<impl::LightFutureData<impl::PlaceholderType<T>>>&& data) : mData(std::move(data)) {}
public:
future() = default;
static future fromData_(std::shared_ptr<impl::LightFutureData<impl::PlaceholderType<T>>> data)
{
return future{std::move(data)};
}
bool is_ready() const
{
return mData->is_ready();
}
constexpr bool await_ready()
{
return false; // handled by return value of await_suspend()
}
inline T await_resume()
{
return get_unchecked();
}
inline T await_synchron()
{
return get();
}
inline T get_unchecked()
{
return mData->get_unchecked();
}
inline T get()
{
return mData->get_blocking();
}
inline bool valid() const
{
return mData != nullptr;
}
void wait() const
{
return mData->wait();
}
#if false
template <typename FUNC>
auto then(FUNC&& func) -> decltype(func(future<T>&&))
{
auto funcPtr =
std::make_shared<std::decay_t<FUNC>>(std::forward<FUNC>(func));
struct ThenContinuation : public impl::ContinuationTask
{
FUNC func;
std::shared_ptr<ThenContinuation> self;
void operator()() override
{
func();
self.reset();
}
};
}
#endif
template <class Rep, class Period>
std::future_status wait_for(const std::chrono::duration<Rep, Period>& timeout_duration) const
{
return mData->wait_for(timeout_duration);
}
bool await_suspend(impl::ContinuationTask& func) const
{
return mData->suspend(func);
}
};
template <typename T>
class promise
{
private:
std::shared_ptr<impl::LightFutureData<T>> mData;
public:
inline promise() : mData{std::make_shared<impl::LightFutureData<T>>()} {}
future<T> get_future() { return future<T>::fromData_(mData); }
void set_value(T value)
{
mData->set_value(std::move(value));
}
void set_exception(std::exception_ptr except)
{
mData->set_exception(std::move(except));
}
};
template <>
class promise<void>
{
private:
std::shared_ptr<impl::LightFutureData<impl::StatelessT>> mData;
public:
inline promise() : mData{std::make_shared<impl::LightFutureData<impl::StatelessT>>()} {}
future<void> get_future() { return future<void>::fromData_(mData); }
void set_value()
{
mData->set_value(impl::StatelessT{});
}
void set_exception(std::exception_ptr except)
{
mData->set_exception(std::move(except));
}
};
template <typename T>
auto make_ready_future(T&& val)
{
auto data = std::make_shared<impl::LightFutureData<impl::PlaceholderType<T>>>(std::forward<T>(val));
return future<T>::fromData_(std::move(data));
}
inline future<void> make_ready_future()
{
auto data = std::make_shared<impl::LightFutureData<impl::PlaceholderType<void>>>(impl::PlaceholderType<void>{});
return future<void>::fromData_(std::move(data));
}
template <typename T>
auto make_exceptional_future(std::exception_ptr ex)
{
auto data = std::make_shared<impl::LightFutureData<impl::PlaceholderType<T>>>();
data->set_exception(std::move(ex));
return future<T>::fromData_(std::move(data));
}
template <typename T, typename E>
auto make_exceptional_future(E ex)
{
return make_exceptional_future<T>(std::make_exception_ptr(ex));
}
}
| {"hexsha": "35dcd02f15094ebd64f55c78103e4f69902140da", "size": 10525, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/co/future.hpp", "max_stars_repo_name": "mrpi/CoAwaitLib", "max_stars_repo_head_hexsha": "374990809ffb588453c8b9c8ab623d1e04b3ccf0", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2019-07-11T12:14:16.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-19T12:16:12.000Z", "max_issues_repo_path": "include/co/future.hpp", "max_issues_repo_name": "mrpi/CoAwaitLib", "max_issues_repo_head_hexsha": "374990809ffb588453c8b9c8ab623d1e04b3ccf0", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/co/future.hpp", "max_forks_repo_name": "mrpi/CoAwaitLib", "max_forks_repo_head_hexsha": "374990809ffb588453c8b9c8ab623d1e04b3ccf0", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-04-14T03:57:54.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-14T03:57:54.000Z", "avg_line_length": 22.3936170213, "max_line_length": 128, "alphanum_fraction": 0.618432304, "num_tokens": 2466} |
# %%
import numpy as np
import matplotlib.pyplot as plt
def reconstruct_with_sinc(ts,fd,t):
n, = ts.shape
dt = ts[1] - ts[0]
fr = []
for k,ti in enumerate(t):
# for each time point
sumf = 0.0
for i in range(n):
# for each point in a sampled set
sumf += fd[i]*np.sin(np.pi*(ti/dt-i))/(ti/dt-i)
fr.append((1./np.pi)*sumf)
return np.asarray(fr,dtype='f')
# %%
if __name__ == '__main__':
t = np.arange(0.0,0.6,0.001)
fa = 1.0*np.sin(2*np.pi*10*t)+0.2*np.sin(2*np.pi*6*t)
fs = 10 # Hz
ts = np.arange(0.0,0.6,1./fs) # sampling time
fd = 1.0*np.sin(2*np.pi*10*ts)+0.2*np.sin(2*np.pi*6*ts) # sampled data
plt.figure()
fr = reconstruct_with_sinc(ts,fd,t)
plt.plot(t,fa,'b-',ts,fd,'ro',t,fr,'g--')
plt.xlabel('t [sec]')
plt.ylabel('y [V]')
plt.legend(('Original','Sampled','Reconstructed'))
plt.show()
# %%
| {"hexsha": "6212ed665ec7ef37e7be35e17ce2a4320803e8bf", "size": 969, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/reconstruct_with_sinc.py", "max_stars_repo_name": "alexlib/engineering_experiments_measurements_course", "max_stars_repo_head_hexsha": "0b80d90519a2a72547ffd9ef4da2158530016196", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-05-03T09:41:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T12:39:27.000Z", "max_issues_repo_path": "scripts/reconstruct_with_sinc.py", "max_issues_repo_name": "alexlib/engineering_experiments_measurements_course", "max_issues_repo_head_hexsha": "0b80d90519a2a72547ffd9ef4da2158530016196", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-04-22T09:04:13.000Z", "max_issues_repo_issues_event_max_datetime": "2018-04-22T09:04:13.000Z", "max_forks_repo_path": "scripts/reconstruct_with_sinc.py", "max_forks_repo_name": "alexlib/engineering_experiments_measurements_course", "max_forks_repo_head_hexsha": "0b80d90519a2a72547ffd9ef4da2158530016196", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2015-07-02T11:39:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-03T15:49:42.000Z", "avg_line_length": 24.8461538462, "max_line_length": 74, "alphanum_fraction": 0.5190918473, "include": true, "reason": "import numpy", "num_tokens": 332} |
from tensorflow.keras.layers import Conv2D, Flatten, Dense
from tensorflow.keras.layers import Dropout, Lambda
from tensorflow.keras.layers import MaxPooling2D, Input
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications import vgg16
import cv2, logging, datetime
import numpy as np
import pandas as pd
import helper as utl
def load_filenames(train_n, dev_n, test_n):
"""
Loading file names for train, cv and test sets.
"""
dev_n = train_n + dev_n + 1
test_n = dev_n + test_n + 1
train_imgs = list(map(lambda x: str(x) + '.jpg', np.arange(train_n)))
dev_imgs = list(map(lambda x: str(x) + '.jpg', np.arange(train_n, dev_n)))
test_imgs = list(map(lambda x: str(x) + '.jpg', np.arange(dev_n, test_n)))
return train_imgs, dev_imgs, test_imgs
def load_steering_angles(train_imgs, dev_imgs, test_imgs):
"""
Loading angles for train, cv and test set.
"""
columns = ['imgs', 'angles', 'date', 'time']
data_df = pd.read_csv('data.txt', sep=' |,', engine='python',
header=None, names=columns).set_index('imgs')
train_angles = data_df.loc[train_imgs]['angles'].to_numpy()
dev_angles = data_df.loc[dev_imgs]['angles'].to_numpy()
test_angles = data_df.loc[test_imgs]['angles'].to_numpy()
return train_angles, dev_angles, test_angles
def custom_model(dropout_rate=0.3):
"""
An Hybrid Model.
"""
base_model = vgg16.VGG16(include_top=False, weights='imagenet',
input_shape=utl.INPUT_SHAPE)
for layer in base_model.layers[:7]:
layer.trainable = False
x = Conv2D(256, 5, activation='elu', strides=(2, 2))(base_model.layers[6].output)
x = Conv2D(128, 3, activation='elu', strides=(1, 1))(x)
x = Dropout(dropout_rate)(x)
x = Flatten()(x)
x = Dense(256, activation='elu')(x)
x = Dense(128, activation='elu')(x)
x = Dense(64, activation='elu')(x)
output_layer = Dense(1, activation='linear')(x)
model = Model(inputs=base_model.input, outputs=output_layer)
return model
def train_model(model, train_images, train_angles,
val_images,
val_angles,
batch_size,
nb_epochs,
lr_rate=1e-04):
"""
Training the model.
"""
# Callbacks
log = CSVLogger('log.txt', append=True, separator=';')
checkpoint = ModelCheckpoint('model-best-{epoch:02d}.h5',
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='auto')
# Data Generators
train_angles_dict = dict(zip(train_imgs, train_angles))
dev_angles_dict = dict(zip(dev_imgs, dev_angles))
train_gen = utl.DataGenerator(train_imgs, train_angles_dict, utl.data_dir,
dim=(utl.IMAGE_HEIGHT, utl.IMAGE_WIDTH),
batch_size=64,
shuffle=True,
training=True)
dev_gen = utl.DataGenerator(dev_imgs, dev_angles_dict, utl.data_dir,
dim=(utl.IMAGE_HEIGHT, utl.IMAGE_WIDTH),
batch_size=64,
shuffle=False,
training=False)
# Compiling and training the model.
model.compile(loss='mean_squared_error', optimizer=Adam(lr=lr_rate))
model.fit_generator(train_gen,
steps_per_epoch=utl.TRAIN_IMAGES//batch_size,
epochs=nb_epochs,
max_queue_size=30,
validation_data=dev_gen,
validation_steps=utl.TEST_IMAGES//batch_size,
callbacks=[checkpoint, log],
verbose=1,
shuffle=True,
use_multiprocessing=True,
workers=10)
if __name__ == '__main__':
train_imgs, dev_imgs, test_imgs = load_filenames(utl.TRAIN_IMAGES, utl.DEV_IMAGES, utl.TEST_IMAGES)
train_angles, dev_angles, test_angles = load_steering_angles(train_imgs, dev_imgs, test_imgs)
pickle.dump(test_imgs, open('test_imgs.pkl', 'wb'))
pickle.dump(test_angles, open('test_angles.pkl', 'wb'))
# initializing the custom model
model = custom_model()
train_model(model, train_imgs, train_angles, dev_imgs, dev_angles, batch_size=64, nb_epochs=15)
| {"hexsha": "eb6591eb6ca0797e9eaaf6a3e9a01e11a082a414", "size": 4773, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_model.py", "max_stars_repo_name": "abhipn/Automate-Driving-Behaviour", "max_stars_repo_head_hexsha": "8c16f9819d3528bebc73724dc4fa0eba6835471e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-11-08T23:13:08.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-09T21:07:49.000Z", "max_issues_repo_path": "train_model.py", "max_issues_repo_name": "abhipn/Automate-Driving-Behaviour", "max_issues_repo_head_hexsha": "8c16f9819d3528bebc73724dc4fa0eba6835471e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_model.py", "max_forks_repo_name": "abhipn/Automate-Driving-Behaviour", "max_forks_repo_head_hexsha": "8c16f9819d3528bebc73724dc4fa0eba6835471e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1229508197, "max_line_length": 104, "alphanum_fraction": 0.583699979, "include": true, "reason": "import numpy", "num_tokens": 1066} |
[STATEMENT]
lemma sequence_number_increases':
"paodv i \<TTurnstile>\<^sub>A (\<lambda>((\<xi>, _), _, (\<xi>', _)). sn \<xi> \<le> sn \<xi>')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. paodv i \<TTurnstile>\<^sub>A (\<lambda>((\<xi>, uu_), uu_, \<xi>', uu_). sn \<xi> \<le> sn \<xi>')
[PROOF STEP]
by (rule step_invariant_weakenE [OF sequence_number_increases]) (auto dest!: onllD) | {"llama_tokens": 169, "file": "AODV_variants_d_fwdrreqs_D_Seq_Invariants", "length": 1} |
import pytest
import jax.numpy as np
import jax.random as jr
from vmfg_etc import VonMisesFisherGaussian
SEED = jr.PRNGKey(1325)
@pytest.fixture
def sample_shape():
return (4,5,3) # (B1, B2, D)
@pytest.fixture
def vmfg(sample_shape):
"""Randomly instantiate a VonMisesFisherGuassian distribution object and
use default `center` and `radius` values.
"""
seed_ = iter(jr.split(SEED, 5))
# Unit direrctional vectors
mean_direction = jr.normal(next(seed_), sample_shape)
mean_direction /= np.linalg.norm(mean_direction, axis=-1, keepdims=True)
# Non-negative values
concentration = jr.normal(next(seed_,), sample_shape[:-1]) * 10 + 50
concentration = np.maximum(concentration, 5)
scale = 1 / jr.gamma(next(seed_), 1, sample_shape[:-1])
return VonMisesFisherGaussian(mean_direction,
concentration,
scale)
def test_instantiation(sample_shape, vmfg):
batch_shape = sample_shape[:-1]
event_shape = sample_shape[-1:]
# Check shapes
assert vmfg.batch_shape == batch_shape
assert vmfg.event_shape == event_shape
# Check default values for center and radius parameters
assert np.all(vmfg.center == 0)
assert np.all(vmfg.radius == 1)
def test_sample(sample_shape, vmfg):
n = 500
seed = jr.fold_in(SEED, 100)
xs = vmfg.sample(seed=seed, sample_shape=(n,))
assert xs.shape == (n, *sample_shape)
# Pick arbitrary number that lps should be greater than
lps = vmfg.log_prob(xs).mean(axis=0)
assert np.all(lps > -20) | {"hexsha": "3b53dd83b73dd9ae32ac6f91e3600d08467920ac", "size": 1599, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_vmfg.py", "max_stars_repo_name": "ezhang94/vmfg", "max_stars_repo_head_hexsha": "1ee655be6979ddb2c00b689639a4eb3c13b7f83f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_vmfg.py", "max_issues_repo_name": "ezhang94/vmfg", "max_issues_repo_head_hexsha": "1ee655be6979ddb2c00b689639a4eb3c13b7f83f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_vmfg.py", "max_forks_repo_name": "ezhang94/vmfg", "max_forks_repo_head_hexsha": "1ee655be6979ddb2c00b689639a4eb3c13b7f83f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1016949153, "max_line_length": 76, "alphanum_fraction": 0.6654158849, "include": true, "reason": "import jax", "num_tokens": 418} |
from .stationdata import build_station_list
from .stationdata import update_water_levels
from .analysis import poly_deriv
from .analysis import polyfit
import datetime
from floodsystem.datafetcher import fetch_measure_levels
import numpy as np
import matplotlib
def stations_level_over_threshold(stations, tol):
"""A function which returns a list of tuples where each tuple holds a station at which the latest relative
water level is over a given tol value and also the relative water level at that station"""
floods =[]
stations = build_station_list()
update_water_levels(stations)
for station in stations:
if station.typical_range_consistent() == True and station.relative_water_level() > tol:
floods.append((station.name,station.relative_water_level()))
floods = sorted(floods, key=lambda tup: tup[1], reverse = True)
return floods
def stations_highest_rel_level(stations, N):
"""A function which returns a list of the N stations at which the water level relative to the typical range
is highest"""
Stations_at_risk = []
Most_at_risk = []
stations = build_station_list()
update_water_levels(stations)
for station in stations:
if station.typical_range_consistent() == True:
Stations_at_risk.append((station.name,station.relative_water_level()))
Stations_at_risk = sorted(Stations_at_risk, key=lambda tup: tup[1], reverse = True)
Most_at_risk = Stations_at_risk[0:N]
return Most_at_risk
def Towns_risk_level(station):
'''Function which assesses the current risk level of each town using the rate of
change of nearby river levels and the current river level relative to the typical range'''
stations = build_station_list()
update_water_levels(stations)
floods = []
lowfloods = []
midfloods = []
severe = []
high = []
moderate = []
low = []
for station in stations:
try:
if station.typical_range_consistent() == True:
if station.measure_id != None:
try:
dates, levels = fetch_measure_levels(station.measure_id, datetime.timedelta(days=2))
except KeyError:
continue
if len(levels) == 0 or len(dates) == 0:
continue
hi = polyfit(dates, levels, 4)
deriv = poly_deriv(hi[0])
x = np.array(matplotlib.dates.date2num(dates))
current_der = np.polyval(deriv, x[-1]-x[0])
if station.typical_range_consistent() == True and station.relative_water_level() > 1.15:
floods.append(station.name)
if station.typical_range_consistent() == True and station.relative_water_level() > 1.4:
midfloods.append(station.name)
if station.typical_range_consistent() == True and station.relative_water_level() > 1:
lowfloods.append(station.name)
if current_der > 0.6 and station.name in floods:
if station.town in severe:
pass
else:
severe.append(station.town)
elif current_der > 0.6 or station.name in midfloods:
if station.town in high:
pass
else:
high.append(station.town)
elif current_der > 0.4 or station.name in lowfloods:
if station.town in moderate:
pass
else:
moderate.append(station.town)
else:
if station.town in low:
pass
else:
low.append(station.town)
except:
continue
'''print("Severe: ", severe)
print("\n")
print("High: ", high)
print("\n")
print("Moderate: ", moderate)
print("\n")
print("Low: ", low)'''
return severe
| {"hexsha": "8bcd1079269ca8d7691523ec1ad6464a23278fe8", "size": 4226, "ext": "py", "lang": "Python", "max_stars_repo_path": "floodsystem/flood.py", "max_stars_repo_name": "dan7267/1a-flood-risk-project-93", "max_stars_repo_head_hexsha": "d95cee987f5673d637626e1804f719371a25daa8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "floodsystem/flood.py", "max_issues_repo_name": "dan7267/1a-flood-risk-project-93", "max_issues_repo_head_hexsha": "d95cee987f5673d637626e1804f719371a25daa8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "floodsystem/flood.py", "max_forks_repo_name": "dan7267/1a-flood-risk-project-93", "max_forks_repo_head_hexsha": "d95cee987f5673d637626e1804f719371a25daa8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.431372549, "max_line_length": 111, "alphanum_fraction": 0.5712257454, "include": true, "reason": "import numpy", "num_tokens": 851} |
import pytest
import os
import numpy as np
from numpy.testing import assert_almost_equal
import time
import random
from termcolor import cprint
from itertools import product
import cProfile
import pstats
from compas.datastructures import Mesh
from compas.geometry import Frame, Transformation
from compas.robots import Joint
from compas_fab.robots import Configuration, AttachedCollisionMesh, CollisionMesh, JointConstraint
from pybullet_planning import link_from_name, get_link_pose, draw_pose, get_bodies, multiply, Pose, Euler, set_joint_positions, \
joints_from_names, quat_angle_between, get_collision_fn, create_obj, unit_pose
from pybullet_planning import wait_if_gui, wait_for_duration, remove_all_debug
from pybullet_planning import plan_cartesian_motion, plan_cartesian_motion_lg, compute_inverse_kinematics
from pybullet_planning import randomize, elapsed_time, BLUE, GREEN, RED, LockRenderer
import ikfast_abb_irb4600_40_255
# from compas_fab.backends import PyBulletClient
from compas_fab_pychoreo.client import PyChoreoClient
from compas_fab_pychoreo.utils import values_as_list
from compas_fab_pychoreo.conversions import pose_from_frame, frame_from_pose
from compas_fab_pychoreo.backend_features.pychoreo_frame_variant_generator import PyChoreoFiniteEulerAngleVariantGenerator
from compas_fab_pychoreo_examples.ik_solver import ik_abb_irb4600_40_255, InverseKinematicsSolver, get_ik_fn_from_ikfast
def compute_circle_path(circle_center=np.array([2, 0, 0.2]), circle_r=0.2, angle_range=(-0.5*np.pi, 0.5*np.pi)):
# generate a circle path to test IK and Cartesian planning
ee_poses = []
n_pt = int(abs(angle_range[1]-angle_range[0]) / (np.pi/180 * 5))
for a in np.linspace(*angle_range, num=n_pt):
pt = circle_center + circle_r*np.array([np.cos(a), np.sin(a), 0])
circ_pose = multiply(Pose(point=pt, euler=Euler(yaw=a+np.pi/2)), Pose(euler=Euler(roll=np.pi*3/4)))
draw_pose(circ_pose, length=0.01)
ee_poses.append(circ_pose)
return ee_poses
def ik_wrapper(compas_fab_ik_fn):
# convert a compas_fab ik solver to a function that conforms with pybullet_planning convention
def fn(pose):
configurations = compas_fab_ik_fn(frame_from_pose(pose))
return [np.array(configuration.values) for configuration in configurations if configuration is not None]
return fn
def compute_trajectory_cost(trajectory, init_conf_val=np.zeros(6)):
cost = np.linalg.norm(init_conf_val - np.array(trajectory.points[0].values))
for traj_pt1, traj_pt2 in zip(trajectory.points[:-1], trajectory.points[1:]):
cost += np.linalg.norm(np.array(traj_pt1.values) - np.array(traj_pt2.values))
return cost
#####################################
@pytest.mark.collision_check_abb
@pytest.mark.parametrize("tool_type", [
('static'),
('actuated'),
])
def test_collision_checker(abb_irb4600_40_255_setup, itj_TC_g1_cms, itj_beam_cm, column_obstacle_cm, base_plate_cm,
itj_tool_changer_grasp_transf, itj_gripper_grasp_transf, itj_beam_grasp_transf, tool_type,
itj_tool_changer_urdf_path, itj_g1_urdf_path,
viewer, diagnosis):
# modified from https://github.com/yijiangh/pybullet_planning/blob/dev/tests/test_collisions.py
urdf_filename, semantics = abb_irb4600_40_255_setup
move_group = 'bare_arm'
ee_touched_link_names = ['link_6']
with PyChoreoClient(viewer=viewer) as client:
with LockRenderer():
robot = client.load_robot(urdf_filename)
robot.semantics = semantics
client.disabled_collisions = robot.semantics.disabled_collisions
if tool_type == 'static':
for _, ee_cm in itj_TC_g1_cms.items():
client.add_collision_mesh(ee_cm)
else:
client.add_tool_from_urdf('TC', itj_tool_changer_urdf_path)
client.add_tool_from_urdf('g1', itj_g1_urdf_path)
# * add static obstacles
client.add_collision_mesh(base_plate_cm)
client.add_collision_mesh(column_obstacle_cm)
ik_joint_names = robot.get_configurable_joint_names(group=move_group)
ik_joint_types = robot.get_joint_types_by_names(ik_joint_names)
flange_link_name = robot.get_end_effector_link_name(group=move_group)
tool0_tf = Transformation.from_frame(client.get_link_frame_from_name(robot, flange_link_name))
tool0_from_tool_changer_base = itj_tool_changer_grasp_transf
tool0_from_gripper_base = itj_gripper_grasp_transf
client.set_object_frame('^{}'.format('TC'), Frame.from_transformation(tool0_tf*tool0_from_tool_changer_base))
client.set_object_frame('^{}'.format('g1'), Frame.from_transformation(tool0_tf*tool0_from_gripper_base))
names = client._get_collision_object_names('^{}'.format('g1')) + \
client._get_collision_object_names('^{}'.format('TC'))
for ee_name in names:
attach_options = {'robot' : robot}
if tool_type == 'actuated':
attached_child_link_name = 'toolchanger_base' if 'TC' in ee_name else 'gripper_base'
attach_options.update({'attached_child_link_name' : attached_child_link_name})
client.add_attached_collision_mesh(AttachedCollisionMesh(CollisionMesh(None, ee_name),
flange_link_name, touch_links=ee_touched_link_names), options=attach_options)
# client._print_object_summary()
# wait_if_gui('EE attached.')
if tool_type == 'actuated':
# lower 0.0008 upper 0.01
tool_bodies = client._get_bodies('^{}'.format('itj_PG500'))
tool_conf = Configuration(values=[0.01, 0.01], types=[Joint.PRISMATIC, Joint.PRISMATIC],
joint_names=['joint_gripper_jaw_l', 'joint_gripper_jaw_r'])
for b in tool_bodies:
client._set_body_configuration(b, tool_conf)
wait_if_gui('Open')
tool_conf = Configuration(values=[0.0008, 0.0008], types=[Joint.PRISMATIC, Joint.PRISMATIC],
joint_names=['joint_gripper_jaw_l', 'joint_gripper_jaw_r'])
for b in tool_bodies:
client._set_body_configuration(b, tool_conf)
wait_if_gui('Close')
cprint('safe start conf', 'green')
conf = Configuration(values=[0.]*6, types=ik_joint_types, joint_names=ik_joint_names)
assert not client.check_collisions(robot, conf, options={'diagnosis':diagnosis})
cprint('joint over limit', 'cyan')
conf = Configuration(values=[0., 0., 1.5, 0, 0, 0], types=ik_joint_types, joint_names=ik_joint_names)
assert client.check_collisions(robot, conf, options={'diagnosis':diagnosis})
cprint('attached gripper-obstacle collision - column', 'cyan')
vals = [-0.33161255787892263, -0.43633231299858238, 0.43633231299858238, -1.0471975511965976, 0.087266462599716474, 0.0]
# conf = Configuration(values=vals, types=ik_joint_types, joint_names=ik_joint_names)
# client.set_robot_configuration(robot, conf)
# wait_if_gui()
assert client.check_collisions(robot, conf, options={'diagnosis':diagnosis})
#* attach beam
client.add_collision_mesh(itj_beam_cm)
tool0_tf = Transformation.from_frame(client.get_link_frame_from_name(robot, flange_link_name))
tool0_from_beam_base = itj_beam_grasp_transf
client.set_object_frame('^{}$'.format('itj_beam_b2'), Frame.from_transformation(tool0_tf*tool0_from_beam_base))
client.add_attached_collision_mesh(AttachedCollisionMesh(CollisionMesh(None, 'itj_beam_b2'),
flange_link_name, touch_links=[]), options={'robot' : robot})
# wait_if_gui('beam attached.')
cprint('attached beam-robot body self collision', 'cyan')
vals = [0.73303828583761843, -0.59341194567807209, 0.54105206811824214, -0.17453292519943295, 1.064650843716541, 1.7278759594743862]
conf = Configuration(values=vals, types=ik_joint_types, joint_names=ik_joint_names)
assert client.check_collisions(robot, conf, options={'diagnosis':diagnosis})
cprint('attached beam-obstacle collision - column', 'cyan')
vals = [0.087266462599716474, -0.19198621771937624, 0.20943951023931956, 0.069813170079773182, 1.2740903539558606, 0.069813170079773182]
conf = Configuration(values=vals, types=ik_joint_types, joint_names=ik_joint_names)
assert client.check_collisions(robot, conf, options={'diagnosis':diagnosis})
cprint('attached beam-obstacle collision - ground', 'cyan')
vals = [-0.017453292519943295, 0.6108652381980153, 0.20943951023931956, 1.7627825445142729, 1.2740903539558606, 0.069813170079773182]
conf = Configuration(values=vals, types=ik_joint_types, joint_names=ik_joint_names)
assert client.check_collisions(robot, conf, options={'diagnosis':diagnosis})
cprint('robot link-obstacle collision - column', 'cyan')
vals = [-0.41887902047863912, 0.20943951023931956, 0.20943951023931956, 1.7627825445142729, 1.2740903539558606, 0.069813170079773182]
conf = Configuration(values=vals, types=ik_joint_types, joint_names=ik_joint_names)
assert client.check_collisions(robot, conf, options={'diagnosis':diagnosis})
cprint('robot link-obstacle collision - ground', 'cyan')
vals = [0.33161255787892263, 1.4660765716752369, 0.27925268031909273, 0.17453292519943295, 0.22689280275926285, 0.54105206811824214]
conf = Configuration(values=vals, types=ik_joint_types, joint_names=ik_joint_names)
assert client.check_collisions(robot, conf, options={'diagnosis':diagnosis})
cprint('Sweeping collision', 'cyan')
vals = [-0.12217304763960307, -0.73303828583761843, 0.83775804095727824, -2.4609142453120048, 1.2391837689159739, -0.85521133347722145]
conf1 = Configuration(values=vals, types=ik_joint_types, joint_names=ik_joint_names)
assert not client.check_collisions(robot, conf1, options={'diagnosis':diagnosis})
# wait_if_gui()
vals = [-0.12217304763960307, -0.73303828583761843, 0.83775804095727824, -2.4958208303518914, -1.5533430342749532, -0.85521133347722145]
conf2 = Configuration(values=vals, types=ik_joint_types, joint_names=ik_joint_names)
assert not client.check_collisions(robot, conf2, options={'diagnosis':diagnosis})
# wait_if_gui()
assert client.check_sweeping_collisions(robot, conf1, conf2, options={'diagnosis':diagnosis, 'line_width':3.0})
wait_if_gui("Finished.")
#####################################
@pytest.mark.frame_gen
def test_frame_variant_generator(viewer):
pose = unit_pose()
frame = frame_from_pose(pose)
options = {'delta_yaw' : np.pi/6, 'yaw_sample_size' : 30}
frame_gen = PyChoreoFiniteEulerAngleVariantGenerator(options).generate_frame_variant
with PyChoreoClient(viewer=viewer) as client:
draw_pose(pose)
cnt = 0
for frame in frame_gen(frame):
draw_pose(pose_from_frame(frame))
cnt += 1
assert cnt == options['yaw_sample_size']
wait_if_gui()
remove_all_debug()
# overwrite class options
cnt = 0
new_options = {'delta_yaw' : np.pi/3, 'yaw_sample_size' : 60}
for frame in frame_gen(frame, new_options):
draw_pose(pose_from_frame(frame))
cnt += 1
assert cnt == new_options['yaw_sample_size']
wait_if_gui()
#####################################
@pytest.mark.circle_cartesian
@pytest.mark.parametrize("planner_ik_conf", [
('IterativeIK', 'default-single'),
('LadderGraph', 'default-single'),
('LadderGraph', 'lobster-analytical'),
('LadderGraph', 'ikfast-analytical')
])
def test_circle_cartesian(fixed_waam_setup, viewer, planner_ik_conf):
urdf_filename, semantics, robotA_tool = fixed_waam_setup
planner_id, ik_engine = planner_ik_conf
move_group = 'robotA'
print('\n')
print('='*10)
cprint('Cartesian planner {} with IK engine {}'.format(planner_id, ik_engine), 'yellow')
with PyChoreoClient(viewer=viewer) as client:
robot = client.load_robot(urdf_filename)
robot.semantics = semantics
robot_uid = client.get_robot_pybullet_uid(robot)
base_link_name = robot.get_base_link_name(group=move_group)
ik_joint_names = robot.get_configurable_joint_names(group=move_group)
tool_link_name = robot.get_end_effector_link_name(group=move_group)
base_frame = robot.get_base_frame(group=move_group)
if ik_engine == 'default-single':
ik_solver = None
elif ik_engine == 'lobster-analytical':
ik_solver = InverseKinematicsSolver(robot, move_group, ik_abb_irb4600_40_255, base_frame, robotA_tool.frame)
elif ik_engine == 'ikfast-analytical':
ikfast_fn = get_ik_fn_from_ikfast(ikfast_abb_irb4600_40_255.get_ik)
ik_solver = InverseKinematicsSolver(robot, move_group, ikfast_fn, base_frame, robotA_tool.frame)
else:
raise ValueError('invalid ik engine name.')
init_conf = Configuration.from_revolute_values(np.zeros(6), ik_joint_names)
# replace default ik function with a customized one
if ik_solver is not None:
client.planner.inverse_kinematics = ik_solver.inverse_kinematics_function()
tool_link = link_from_name(robot_uid, tool_link_name)
robot_base_link = link_from_name(robot_uid, base_link_name)
ik_joints = joints_from_names(robot_uid, ik_joint_names)
# * draw EE pose
tcp_pose = get_link_pose(robot_uid, tool_link)
draw_pose(tcp_pose)
# * generate multiple circles
circle_center = np.array([2, 0, 0.2])
circle_r = 0.2
# full_angle = np.pi
# full_angle = 2*2*np.pi
angle_range = (-0.5*np.pi, 0.5*np.pi)
# total num of path pts, one path point per 5 degree
ee_poses = compute_circle_path(circle_center, circle_r, angle_range)
ee_frames_WCF = [frame_from_pose(ee_pose) for ee_pose in ee_poses]
options = {
'planner_id' : planner_id
}
if planner_id == 'LadderGraph':
client.set_robot_configuration(robot, init_conf)
st_time = time.time()
# options.update({'ik_function' : lambda pose: compute_inverse_kinematics(ikfast_abb_irb4600_40_255.get_ik, pose, sampled=[])})
trajectory = client.plan_cartesian_motion(robot, ee_frames_WCF, group=move_group, options=options)
cprint('W/o frame variant solving time: {}'.format(elapsed_time(st_time)), 'blue')
cprint('Cost: {}'.format(compute_trajectory_cost(trajectory, init_conf_val=init_conf.values)), 'blue')
print('-'*5)
f_variant_options = {'delta_yaw' : np.pi/3, 'yaw_sample_size' : 5}
options.update({'frame_variant_generator' : PyChoreoFiniteEulerAngleVariantGenerator(options=f_variant_options)})
print('With frame variant config: {}'.format(f_variant_options))
client.set_robot_configuration(robot, init_conf)
st_time = time.time()
trajectory = client.plan_cartesian_motion(robot, ee_frames_WCF, group=move_group, options=options)
cprint('{} solving time: {}'.format('With frame variant ' if planner_id == 'LadderGraph' else 'Direct', elapsed_time(st_time)), 'cyan')
cprint('Cost: {}'.format(compute_trajectory_cost(trajectory, init_conf_val=init_conf.values)), 'cyan')
if trajectory is None:
cprint('Client Cartesian planner {} CANNOT find a plan!'.format(planner_id), 'red')
else:
cprint('Client Cartesian planning {} find a plan!'.format(planner_id), 'green')
wait_if_gui('Start sim.')
time_step = 0.03
for traj_pt in trajectory.points:
client.set_robot_configuration(robot, traj_pt)
wait_for_duration(time_step)
wait_if_gui()
#####################################
@pytest.mark.plan_motion
@pytest.mark.parametrize("tool_type", [
('static'),
('actuated'),
])
def test_plan_motion(abb_irb4600_40_255_setup, itj_TC_g1_cms, itj_beam_cm, column_obstacle_cm, base_plate_cm,
itj_tool_changer_grasp_transf, itj_gripper_grasp_transf, itj_beam_grasp_transf, tool_type,
itj_tool_changer_urdf_path, itj_g1_urdf_path,
viewer, diagnosis):
# modified from https://github.com/yijiangh/pybullet_planning/blob/dev/tests/test_collisions.py
urdf_filename, semantics = abb_irb4600_40_255_setup
move_group = 'bare_arm'
ee_touched_link_names = ['link_6']
with PyChoreoClient(viewer=viewer) as client:
with LockRenderer():
robot = client.load_robot(urdf_filename)
robot.semantics = semantics
client.disabled_collisions = robot.semantics.disabled_collisions
if tool_type == 'static':
for _, ee_cm in itj_TC_g1_cms.items():
client.add_collision_mesh(ee_cm)
else:
client.add_tool_from_urdf('TC', itj_tool_changer_urdf_path)
client.add_tool_from_urdf('g1', itj_g1_urdf_path)
# * add static obstacles
client.add_collision_mesh(base_plate_cm)
client.add_collision_mesh(column_obstacle_cm)
ik_joint_names = robot.get_configurable_joint_names(group=move_group)
ik_joint_types = robot.get_joint_types_by_names(ik_joint_names)
flange_link_name = robot.get_end_effector_link_name(group=move_group)
tool0_tf = Transformation.from_frame(client.get_link_frame_from_name(robot, flange_link_name))
tool0_from_tool_changer_base = itj_tool_changer_grasp_transf
tool0_from_gripper_base = itj_gripper_grasp_transf
client.set_object_frame('^{}'.format('TC'), Frame.from_transformation(tool0_tf*tool0_from_tool_changer_base))
client.set_object_frame('^{}'.format('g1'), Frame.from_transformation(tool0_tf*tool0_from_gripper_base))
names = client._get_collision_object_names('^{}'.format('g1')) + \
client._get_collision_object_names('^{}'.format('TC'))
for ee_name in names:
attach_options = {'robot' : robot}
if tool_type == 'actuated':
attached_child_link_name = 'toolchanger_base' if 'TC' in ee_name else 'gripper_base'
attach_options.update({'attached_child_link_name' : attached_child_link_name})
client.add_attached_collision_mesh(AttachedCollisionMesh(CollisionMesh(None, ee_name),
flange_link_name, touch_links=ee_touched_link_names), options=attach_options)
#* attach beam
client.add_collision_mesh(itj_beam_cm)
tool0_tf = Transformation.from_frame(client.get_link_frame_from_name(robot, flange_link_name))
tool0_from_beam_base = itj_beam_grasp_transf
client.set_object_frame('^{}$'.format('itj_beam_b2'), Frame.from_transformation(tool0_tf*tool0_from_beam_base))
client.add_attached_collision_mesh(AttachedCollisionMesh(CollisionMesh(None, 'itj_beam_b2'),
flange_link_name, touch_links=[]), options={'robot' : robot})
wait_if_gui('beam attached.')
vals = [-1.4660765716752369, -0.22689280275926285, 0.27925268031909273, 0.17453292519943295, 0.22689280275926285, -0.22689280275926285]
start_conf = Configuration(values=vals, types=ik_joint_types, joint_names=ik_joint_names)
# client.set_robot_configuration(robot, start_conf)
# wait_if_gui()
# vals = [0.05235987755982989, -0.087266462599716474, -0.05235987755982989, 1.7104226669544429, 0.13962634015954636, -0.43633231299858238]
vals = [0.034906585039886591, 0.68067840827778847, 0.15707963267948966, -0.89011791851710809, -0.034906585039886591, -2.2514747350726849]
end_conf = Configuration(values=vals, types=ik_joint_types, joint_names=ik_joint_names)
# client.set_robot_configuration(robot, end_conf)
# wait_if_gui()
plan_options = {
'diagnosis' : True,
'resolutions' : 0.05
}
goal_constraints = robot.constraints_from_configuration(end_conf, [0.01], [0.01], group=move_group)
st_time = time.time()
trajectory = client.plan_motion(robot, goal_constraints, start_configuration=start_conf, group=move_group, options=plan_options)
print('Solving time: {}'.format(elapsed_time(st_time)))
if trajectory is None:
cprint('Client motion planner CANNOT find a plan!', 'red')
# assert False, 'Client motion planner CANNOT find a plan!'
# TODO warning
else:
cprint('Client motion planning find a plan!', 'green')
wait_if_gui('Start sim.')
time_step = 0.03
for traj_pt in trajectory.points:
client.set_robot_configuration(robot, traj_pt)
wait_for_duration(time_step)
wait_if_gui("Finished.")
#####################################
@pytest.mark.ik_abb
@pytest.mark.parametrize("ik_engine", [
('default-single'),
('lobster-analytical'),
('ikfast-analytical')
])
def test_ik(fixed_waam_setup, viewer, ik_engine):
urdf_filename, semantics, robotA_tool = fixed_waam_setup
move_group = 'robotA'
with PyChoreoClient(viewer=viewer) as client:
robot = client.load_robot(urdf_filename)
robot.semantics = semantics
robot_uid = client.get_robot_pybullet_uid(robot)
base_link_name = robot.get_base_link_name(group=move_group)
ik_joint_names = robot.get_configurable_joint_names(group=move_group)
tool_link_name = robot.get_end_effector_link_name(group=move_group)
base_frame = robot.get_base_frame(group=move_group)
if ik_engine == 'default-single':
ik_solver = None
elif ik_engine == 'lobster-analytical':
ik_solver = InverseKinematicsSolver(robot, move_group, ik_abb_irb4600_40_255, base_frame, robotA_tool.frame)
elif ik_engine == 'ikfast-analytical':
ikfast_fn = get_ik_fn_from_ikfast(ikfast_abb_irb4600_40_255.get_ik)
ik_solver = InverseKinematicsSolver(robot, move_group, ikfast_fn, base_frame, robotA_tool.frame)
else:
raise ValueError('invalid ik engine name.')
ik_joints = joints_from_names(robot_uid, ik_joint_names)
tool_link = link_from_name(robot_uid, tool_link_name)
ee_poses = compute_circle_path()
if ik_solver is not None:
# replace default ik function with a customized one
client.planner.inverse_kinematics = ik_solver.inverse_kinematics_function()
ik_time = 0
failure_cnt = 0
# ik function sanity check
for p in ee_poses:
frame_WCF = frame_from_pose(p)
st_time = time.time()
qs = client.inverse_kinematics(robot, frame_WCF, group=move_group, options={})
ik_time += elapsed_time(st_time)
if qs is None:
cprint('no ik solution found!', 'red')
# assert False, 'no ik solution found!'
failure_cnt += 1
elif isinstance(qs, list):
if not(len(qs) > 0 and any([qv is not None for qv in qs])):
cprint('no ik solution found', 'red')
failure_cnt += 1
if len(qs) > 0:
# cprint('{} solutions found!'.format(len(qs)), 'green')
for q in randomize(qs):
if q is not None:
assert isinstance(q, Configuration)
client.set_robot_configuration(robot, q)
# set_joint_positions(robot_uid, ik_joints, q.values)
tcp_pose = get_link_pose(robot_uid, tool_link)
assert_almost_equal(tcp_pose[0], p[0], decimal=3)
assert_almost_equal(quat_angle_between(tcp_pose[1], p[1]), 0, decimal=3)
elif isinstance(qs, Configuration):
# cprint('Single solutions found!', 'green')
q = qs
# set_joint_positions(robot_uid, ik_joints, q.values)
client.set_robot_configuration(robot, q)
tcp_pose = get_link_pose(robot_uid, tool_link)
assert_almost_equal(tcp_pose[0], p[0], decimal=3)
assert_almost_equal(quat_angle_between(tcp_pose[1], p[1]), 0, decimal=3)
else:
raise ValueError('invalid ik return.')
wait_if_gui('FK - IK agrees.')
cprint('{} | Success {}/{} | Average ik time: {} | avg over {} calls.'.format(ik_engine, len(ee_poses)-failure_cnt, len(ee_poses),
ik_time/len(ee_poses), len(ee_poses)), 'cyan')
###################################################
@pytest.mark.client
def test_client(fixed_waam_setup, viewer):
# https://github.com/gramaziokohler/algorithmic_details/blob/e1d5e24a34738822638a157ca29a98afe05beefd/src/algorithmic_details/accessibility/reachability_map.py#L208-L231
urdf_filename, semantics, _ = fixed_waam_setup
move_group = 'robotA'
with PyChoreoClient(viewer=viewer) as client:
robot = client.load_robot(urdf_filename)
robot.semantics = semantics
robot_uid = client.get_robot_pybullet_uid(robot)
tool_link_name = robot.get_end_effector_link_name(group=move_group)
# * draw EE pose
tool_link = link_from_name(robot_uid, tool_link_name)
tcp_pose = get_link_pose(robot_uid, tool_link)
draw_pose(tcp_pose)
assert robot_uid in get_bodies()
wait_if_gui()
| {"hexsha": "d9aca62ebb06e6b7ff4243cbb5f1c955a7c12cb0", "size": 25821, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_pb_client.py", "max_stars_repo_name": "yijiangh/compas_fab_pychoreo", "max_stars_repo_head_hexsha": "42a03c85331f6bab9383e162c62b099a34593d5f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-14T02:19:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T02:19:50.000Z", "max_issues_repo_path": "tests/test_pb_client.py", "max_issues_repo_name": "yijiangh/compas_fab_pychoreo", "max_issues_repo_head_hexsha": "42a03c85331f6bab9383e162c62b099a34593d5f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-09-17T14:12:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-28T13:29:21.000Z", "max_forks_repo_path": "tests/test_pb_client.py", "max_forks_repo_name": "yijiangh/compas_fab_pychoreo", "max_forks_repo_head_hexsha": "42a03c85331f6bab9383e162c62b099a34593d5f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.8474903475, "max_line_length": 173, "alphanum_fraction": 0.6847527207, "include": true, "reason": "import numpy,from numpy", "num_tokens": 6391} |
\section*{Week 4: Intangible Assets; Statement of Cash Flows}
\subsection*{Intangible Assets}
Intangible assets include:
\begin{itemize}[noitemsep,topsep=0pt]
\item Intellectual property (Patents, Copyrights, Trademarks)
\item Licenses, Franchise rights
\item Brand value
\item Customer lists
\item Goodwill
\end{itemize}
\subsection*{Statement of Cash Flows}
Financial statements links
$A = L + SE$ \\
$\Delta A = \Delta L + \Delta SE $ \\
$\Delta Cash = - \Delta NonCashAssets + \Delta L + \Delta SE $ \\
It has three sections
\begin{itemize}[noitemsep,topsep=0pt]
\item \textbf{Operating}: Primary business activities.
\item \textbf{Investing}: Acquiring and selling productive assets.
\item \textbf{Financing}: Related to external sources of financing
\end{itemize}
\subsection*{Working Capital (WC)}
$WC = CurrentAssets - CurrentLiabilities$ \\
$Non-Cash-WC = CurrentAssets - Cash - CurrentLiabilities $ \\
CFO : Cash Flow from operations
NI: Net Income
$ CFO = NI - Accruals $ \\
Start with Net Income, then: \\
1) \textbf{Add non cash expenses}: expenses that reduce NI \\
2) \textbf{Add (subtract) gains (losses)} associated with investing activities \\
3) \textbf{Add (subtract)} changes in non-cash WC. e.g. if A/R decreases by \$100, add \$100 to NI. If A/R decreases by by \$100, subtract \$100 to NI. \\
$FreeCashFlow = OperatingCashFlow - CapEx$ \\
Accruals are the difference between net income and cash flow from operations, that is: \\
$Net Income = Cash Flow from Operations + Accruals$ \\
$Accruals = Net Income - Cash Flow from Operations$ \\
Change in Cash = Cash Flow From Operations (CFO) + Cash Flow From Investing + Cash Flow From Financing
$Beg. Retained Earnings + Net Income – Dividends(Divs) = End. Retained Earnings$
| {"hexsha": "1aa4867723f03c6bd5ad6e0a53d6ce6424c5f09f", "size": 1776, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "15.516x/assets/week_04.tex", "max_stars_repo_name": "j053g/cheatsheets", "max_stars_repo_head_hexsha": "22f7a84879c04d44de40467ddcc0f6e551b812c7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-12-14T08:49:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-07T17:26:15.000Z", "max_issues_repo_path": "15.516x/assets/week_04.tex", "max_issues_repo_name": "j053g/cheatsheets", "max_issues_repo_head_hexsha": "22f7a84879c04d44de40467ddcc0f6e551b812c7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "15.516x/assets/week_04.tex", "max_forks_repo_name": "j053g/cheatsheets", "max_forks_repo_head_hexsha": "22f7a84879c04d44de40467ddcc0f6e551b812c7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6206896552, "max_line_length": 154, "alphanum_fraction": 0.7257882883, "num_tokens": 500} |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 29 2016
Author: Cedric Vallee
"""
import os
import re
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
from sklearn.cross_validation import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import Scraper as scraper
import Helper as helper
# The naive Bayes classifier from last week was just checking if a word was present or not in the text.
# MultinomialNB will help us create a naive Bayes classifier evaluating a text according to the number of times a word appears.
# The folder Data contains all the financial reports we downloaded during the previous week (sector: Industrial Gooods; code:20)
# We manually added a suffix '_neg' or '_pos' to the name of a report according to its sentiment polarity.
# Function to scrape the MDAs from the reports (inspired by Chong Wee Tan Group 7)
def getMDAfromText(filename,text):
try:
soup = BeautifulSoup(text, "lxml")
fullText = scraper.scrapeByAnchorTag(soup)
if fullText is not None:
print("{0}\tScraped By Anchor".format(filename))
return fullText
fullText = scraper.scrapeByRegex(soup)
if fullText is not None:
print("{0}\tScraped By Regex".format(filename))
return fullText
if fullText is None:
print("{0}\tUnable to scrape".format(filename))
text = ''.join(soup.findAll(text=True))
text.replace("’","'")
helper.writeToDirectoryFile("debug",filename.replace("sec-10-k/",""),text)
return None
except UnicodeEncodeError:
print("{0}\tUnicodeEncodeError".format(filename))
return None
# Function to put the MDAs in a panda frame
def get_dataset(path):
dataset=[]
for filename in os.listdir(path):
if filename.endswith("pos"): # If report is tagged as positive
doc = open(path + filename,"r")
mda = getMDAfromText(filename,doc)
if mda is not None:
soup = BeautifulSoup(mda, "html.parser")
t=soup.get_text()
dataset.append([re.sub('[^0-9a-zA-Z]+', '', t),filename,"pos"])
elif filename.endswith("neg"): # If report is tagged as negative
doc = open(path + filename,"r")
mda = getMDAfromText(filename,doc)
if mda is not None:
soup = BeautifulSoup(mda, "html.parser")
t=soup.get_text()
dataset.append([re.sub('[^0-9a-zA-Z]+', '', t),filename,"neg"])
dataset.append(['The crisis lack deceiving unpredictable bad bad bad','test1','neg'])
dataset.append(['the crisis non compliance losses','test2','neg'])
dataset.append(['We lost thousands of dollars and exited exit','test3','neg'])
dataset.append(['The company is ruined and we filed bankruptcy','test4','neg'])
dataset.append(['We modified our plans since results from last year were bad','test5','neg'])
dataset.append(['niche and unique opportunity to grow and grow develop acquisition acquisitions and contracts good prospects','test6','pos'])
dataset.append(['encouraging results and better outlook promising sales and new market main actor first','test7','pos'])
dataset = pd.DataFrame(dataset)
dataset.columns = ['MD&A_Text','Filename','Sentiment'] # Return the pd.DataFrame obtained from the list
return dataset
# Function to split the dataset into training and testing set
def split(df,test_ratio):
return train_test_split(df, test_size = test_ratio)
### Main function
dataset = get_dataset("../data/")
train, test = split(dataset,0.5)
vectorizer = CountVectorizer(stop_words="english",max_features = 50) # Build a vocabulary that only consider the top max_features ordered by term frequency across the corpus
counts = vectorizer.fit_transform(train['MD&A_Text'].values)
classifier = MultinomialNB(fit_prior="False") # Should we put fit_prior True or False? If False, a uniform prior will be used.
targets = train['Sentiment'].values
classifier.fit(counts, targets)
counts_test = vectorizer.transform(test['MD&A_Text'].values)
predictions = classifier.predict(counts_test)
test['Prediction'] = pd.Series(predictions, index=test.index)
print(test) # Print result dataframe (filenames, actual and predicted sentiments)
tab = pd.crosstab(test['Sentiment'], test['Prediction'], rownames=['Actual'], colnames=['Predicted'], margins=True) # Print confusion matrix
print(tab)
print(classification_report(test['Sentiment'], test['Prediction'])) # Print accuracy, precision, recall, F measure
| {"hexsha": "396e1c0811740c6ea1af83ec8d461cac54abfcd5", "size": 5016, "ext": "py", "lang": "Python", "max_stars_repo_path": "FinancialAnalystV1/main.py", "max_stars_repo_name": "CedricVallee/pythonFinancialAnalyst", "max_stars_repo_head_hexsha": "64c562134de7801aeef3981f4ef4ac5d5b5fd70b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "FinancialAnalystV1/main.py", "max_issues_repo_name": "CedricVallee/pythonFinancialAnalyst", "max_issues_repo_head_hexsha": "64c562134de7801aeef3981f4ef4ac5d5b5fd70b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FinancialAnalystV1/main.py", "max_forks_repo_name": "CedricVallee/pythonFinancialAnalyst", "max_forks_repo_head_hexsha": "64c562134de7801aeef3981f4ef4ac5d5b5fd70b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.6990291262, "max_line_length": 173, "alphanum_fraction": 0.6694577352, "include": true, "reason": "import numpy", "num_tokens": 1086} |
// =-=-=-=-=-=-=-
// local includes
#include "s3_archive_operations.hpp"
#include "libirods_s3.hpp"
// =-=-=-=-=-=-=-
// irods includes
#include <msParam.h>
#include <rcConnect.h>
#include <rodsLog.h>
#include <rodsErrorTable.h>
#include <objInfo.h>
#include <rsRegReplica.hpp>
#include <dataObjOpr.hpp>
#include <irods_string_tokenize.hpp>
#include <irods_resource_plugin.hpp>
#include <irods_resource_redirect.hpp>
// =-=-=-=-=-=-=-
// boost includes
#include <boost/lexical_cast.hpp>
#include <boost/algorithm/string.hpp>
extern size_t g_retry_count;
extern size_t g_retry_wait;
extern S3ResponseProperties savedProperties;
namespace irods_s3_archive {
// =-=-=-=-=-=-=-
// interface for file registration
irods::error s3RegisteredPlugin( irods::plugin_context& _ctx) {
return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__));
}
// =-=-=-=-=-=-=-
// interface for file unregistration
irods::error s3UnregisteredPlugin( irods::plugin_context& _ctx) {
return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__));
}
// =-=-=-=-=-=-=-
// interface for file modification
irods::error s3ModifiedPlugin( irods::plugin_context& _ctx) {
return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__));
}
// =-=-=-=-=-=-=-
// interface for POSIX create
irods::error s3FileCreatePlugin( irods::plugin_context& _ctx) {
return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__));
}
// =-=-=-=-=-=-=-
// interface for POSIX Open
irods::error s3FileOpenPlugin( irods::plugin_context& _ctx) {
return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__));
}
// =-=-=-=-=-=-=-
// interface for POSIX Read
irods::error s3FileReadPlugin( irods::plugin_context& _ctx,
void* _buf,
const int _len ) {
return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__));
}
// =-=-=-=-=-=-=-
// interface for POSIX Write
irods::error s3FileWritePlugin( irods::plugin_context& _ctx,
const void* _buf,
const int _len ) {
return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__));
}
// =-=-=-=-=-=-=-
// interface for POSIX Close
irods::error s3FileClosePlugin( irods::plugin_context& _ctx ) {
return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__));
}
// =-=-=-=-=-=-=-
// interface for POSIX Unlink
irods::error s3FileUnlinkPlugin(
irods::plugin_context& _ctx) {
// =-=-=-=-=-=-=-
// check incoming parameters
irods::error ret = s3CheckParams( _ctx );
if(!ret.ok()) {
return PASS(ret);
}
size_t retry_count_limit = S3_DEFAULT_RETRY_COUNT;
_ctx.prop_map().get<size_t>(s3_retry_count_size_t, retry_count_limit);
size_t retry_wait = S3_DEFAULT_RETRY_WAIT_SEC;
_ctx.prop_map().get<size_t>(s3_wait_time_sec_size_t, retry_wait);
irods::file_object_ptr file_obj = boost::dynamic_pointer_cast<
irods::file_object>(
_ctx.fco());
std::string repl_policy;
ret = _ctx.prop_map().get<std::string>(
REPL_POLICY_KEY,
repl_policy);
// If the policy is set then determine if we should
// actually unlink the S3 object or not. If several
// iRODS replicas point at the same S3 object we only
// need to unlink in S3 if we are the last S3 registration
if(ret.ok() && REPL_POLICY_VAL == repl_policy) {
try {
std::string vault_path;
ret = _ctx.prop_map().get<std::string>(
irods::RESOURCE_PATH,
vault_path);
if(!ret.ok()) {
std::stringstream msg;
msg << "[resource_name=" << get_resource_name(_ctx.prop_map()) << "] " << ret.result();
return PASSMSG(msg.str(), ret);
}
if(!determine_unlink_for_repl_policy(
_ctx.comm(),
file_obj->logical_path(),
vault_path)) {
return SUCCESS();
}
}
catch(const irods::exception& _e) {
return ERROR(
_e.code(),
_e.what());
}
} // if repl_policy
std::string bucket;
std::string key;
ret = parseS3Path(file_obj->physical_path(), bucket, key, _ctx.prop_map());
if(!ret.ok()) {
return PASS(ret);
}
ret = s3InitPerOperation(_ctx.prop_map());
if(!ret.ok()) {
return PASS(ret);
}
std::string key_id;
std::string access_key;
ret = s3GetAuthCredentials(_ctx.prop_map(), key_id, access_key);
if(!ret.ok()) {
return PASS(ret);
}
S3BucketContext bucketContext;
bzero(&bucketContext, sizeof(bucketContext));
bucketContext.bucketName = bucket.c_str();
bucketContext.protocol = s3GetProto(_ctx.prop_map());
bucketContext.stsDate = s3GetSTSDate(_ctx.prop_map());
bucketContext.uriStyle = S3UriStylePath;
bucketContext.accessKeyId = key_id.c_str();
bucketContext.secretAccessKey = access_key.c_str();
callback_data_t data;
S3ResponseHandler responseHandler = { 0, &responseCompleteCallback };
size_t retry_cnt = 0;
do {
bzero (&data, sizeof (data));
std::string&& hostname = s3GetHostname(_ctx.prop_map());
bucketContext.hostName = hostname.c_str();
data.pCtx = &bucketContext;
S3_delete_object(
&bucketContext,
key.c_str(), 0,
&responseHandler,
&data);
if(data.status != S3StatusOK) {
s3_sleep( retry_wait, 0 );
}
} while((data.status != S3StatusOK) &&
S3_status_is_retryable(data.status) &&
(++retry_cnt < retry_count_limit));
if(data.status != S3StatusOK) {
std::stringstream msg;
msg << "[resource_name=" << get_resource_name(_ctx.prop_map()) << "] ";
msg << " - Error unlinking the S3 object: \"";
msg << file_obj->physical_path();
msg << "\"";
if(data.status >= 0) {
msg << " - \"";
msg << S3_get_status_name((S3Status)data.status);
msg << "\"";
}
return ERROR(S3_FILE_UNLINK_ERR, msg.str());
}
return SUCCESS();
} // s3FileUnlinkPlugin
// =-=-=-=-=-=-=-
// interface for POSIX Stat
irods::error s3FileStatPlugin(
irods::plugin_context& _ctx,
struct stat* _statbuf )
{
irods::error result = SUCCESS();
size_t retry_count_limit = S3_DEFAULT_RETRY_COUNT;
_ctx.prop_map().get<size_t>(s3_retry_count_size_t, retry_count_limit);
size_t retry_wait = S3_DEFAULT_RETRY_WAIT_SEC;
_ctx.prop_map().get<size_t>(s3_wait_time_sec_size_t, retry_wait);
// =-=-=-=-=-=-=-
// check incoming parameters
irods::error ret = s3CheckParams( _ctx );
if((result = ASSERT_PASS(ret, "[resource_name=%s] Invalid parameters or physical path.", get_resource_name(_ctx.prop_map()).c_str())).ok()) {
// =-=-=-=-=-=-=-
// get ref to fco
irods::data_object_ptr _object = boost::dynamic_pointer_cast<irods::data_object>(_ctx.fco());
bzero (_statbuf, sizeof (struct stat));
if(_object->physical_path().find("/", _object->physical_path().size()) != std::string::npos) {
// A directory
_statbuf->st_mode = S_IFDIR;
} else {
irods::error ret;
std::string bucket;
std::string key;
std::string key_id;
std::string access_key;
ret = parseS3Path(_object->physical_path(), bucket, key, _ctx.prop_map());
if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed parsing the S3 bucket and key from the physical path: \"%s\".", get_resource_name(_ctx.prop_map()).c_str(),
_object->physical_path().c_str())).ok()) {
ret = s3InitPerOperation( _ctx.prop_map() );
if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to initialize the S3 system.", get_resource_name(_ctx.prop_map()).c_str())).ok()) {
ret = s3GetAuthCredentials(_ctx.prop_map(), key_id, access_key);
if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to get the S3 credentials properties.", get_resource_name(_ctx.prop_map()).c_str())).ok()) {
callback_data_t data;
S3BucketContext bucketContext;
bzero (&bucketContext, sizeof (bucketContext));
bucketContext.bucketName = bucket.c_str();
bucketContext.protocol = s3GetProto(_ctx.prop_map());
bucketContext.stsDate = s3GetSTSDate(_ctx.prop_map());
bucketContext.uriStyle = S3UriStylePath;
bucketContext.accessKeyId = key_id.c_str();
bucketContext.secretAccessKey = access_key.c_str();
S3ResponseHandler headObjectHandler = { &responsePropertiesCallback, &responseCompleteCallback };
size_t retry_cnt = 0;
do {
bzero (&data, sizeof (data));
std::string&& hostname = s3GetHostname(_ctx.prop_map());
bucketContext.hostName = hostname.c_str();
data.pCtx = &bucketContext;
S3_head_object(&bucketContext, key.c_str(), 0, &headObjectHandler, &data);
if (data.status != S3StatusOK) s3_sleep( retry_wait, 0 );
} while ( (data.status != S3StatusOK) && S3_status_is_retryable(data.status) && (++retry_cnt < retry_count_limit ) );
if (data.status != S3StatusOK) {
std::stringstream msg;
msg << "[resource_name=" << get_resource_name(_ctx.prop_map()) << "] ";
msg << " - Error stat'ing the S3 object: \"" << _object->physical_path() << "\"";
if (data.status >= 0) {
msg << " - \"" << S3_get_status_name((S3Status)data.status) << "\"";
}
result = ERROR(S3_FILE_STAT_ERR, msg.str());
}
else {
_statbuf->st_mode = S_IFREG;
_statbuf->st_nlink = 1;
_statbuf->st_uid = getuid ();
_statbuf->st_gid = getgid ();
_statbuf->st_atime = _statbuf->st_mtime = _statbuf->st_ctime = savedProperties.lastModified;
_statbuf->st_size = savedProperties.contentLength;
}
}
}
}
}
}
if( !result.ok() ) {
std::stringstream msg;
msg << "[resource_name=" << get_resource_name(_ctx.prop_map()) << "] "
<< result.result();
rodsLog(LOG_ERROR, msg.str().c_str());
}
return result;
}
// =-=-=-=-=-=-=-
// interface for POSIX Fstat
irods::error s3FileFstatPlugin( irods::plugin_context& _ctx,
struct stat* _statbuf ) {
return ERROR( SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__) );
} // s3FileFstatPlugin
// =-=-=-=-=-=-=-
// interface for POSIX lseek
irods::error s3FileLseekPlugin( irods::plugin_context& _ctx,
const size_t _offset,
const int _whence ) {
return ERROR( SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__) );
} // wosFileLseekPlugin
// =-=-=-=-=-=-=-
// interface for POSIX mkdir
irods::error s3FileMkdirPlugin( irods::plugin_context& _ctx ) {
return ERROR( SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__) );
} // s3FileMkdirPlugin
// =-=-=-=-=-=-=-
// interface for POSIX mkdir
irods::error s3FileRmdirPlugin( irods::plugin_context& _ctx ) {
return ERROR( SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__) );
} // s3FileRmdirPlugin
// =-=-=-=-=-=-=-
// interface for POSIX opendir
irods::error s3FileOpendirPlugin( irods::plugin_context& _ctx ) {
return ERROR( SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__) );
} // s3FileOpendirPlugin
// =-=-=-=-=-=-=-
// interface for POSIX closedir
irods::error s3FileClosedirPlugin( irods::plugin_context& _ctx) {
return ERROR( SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__) );
} // s3FileClosedirPlugin
// =-=-=-=-=-=-=-
// interface for POSIX readdir
irods::error s3FileReaddirPlugin( irods::plugin_context& _ctx,
struct rodsDirent** _dirent_ptr ) {
return ERROR( SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__) );
} // s3FileReaddirPlugin
// =-=-=-=-=-=-=-
// interface for POSIX rename
irods::error s3FileRenamePlugin( irods::plugin_context& _ctx,
const char* _new_file_name )
{
irods::error result = SUCCESS();
irods::error ret;
std::string key_id;
std::string access_key;
// retrieve archive naming policy from resource plugin context
std::string archive_naming_policy = CONSISTENT_NAMING; // default
ret = _ctx.prop_map().get<std::string>(ARCHIVE_NAMING_POLICY_KW, archive_naming_policy); // get plugin context property
if(!ret.ok()) {
std::stringstream msg;
msg << "[resource_name=" << get_resource_name(_ctx.prop_map()) << "] "
<< ret.result();
rodsLog(LOG_ERROR, msg.str().c_str());
}
boost::to_lower(archive_naming_policy);
irods::file_object_ptr object = boost::dynamic_pointer_cast<irods::file_object>(_ctx.fco());
// if archive naming policy is decoupled we're done
if (archive_naming_policy == DECOUPLED_NAMING) {
object->file_descriptor(ENOSYS);
return SUCCESS();
}
ret = s3GetAuthCredentials(_ctx.prop_map(), key_id, access_key);
if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to get S3 credential properties.", get_resource_name(_ctx.prop_map()).c_str())).ok()) {
// copy the file to the new location
ret = s3CopyFile(_ctx, object->physical_path(), _new_file_name, key_id, access_key,
s3GetProto(_ctx.prop_map()), s3GetSTSDate(_ctx.prop_map()));
if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to copy file from: \"%s\" to \"%s\".", get_resource_name(_ctx.prop_map()).c_str(),
object->physical_path().c_str(), _new_file_name)).ok()) {
// delete the old file
ret = s3FileUnlinkPlugin(_ctx);
result = ASSERT_PASS(ret, "[resource_name=%s] Failed to unlink old S3 file: \"%s\".", get_resource_name(_ctx.prop_map()).c_str(),
object->physical_path().c_str());
}
}
// issue 1855 (irods issue 4326) - resources must now set physical path
object->physical_path(_new_file_name);
return result;
} // s3FileRenamePlugin
// =-=-=-=-=-=-=-
// interface for POSIX truncate
irods::error s3FileTruncatePlugin(
irods::plugin_context& _ctx )
{
return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__));
} // s3FileTruncatePlugin
// interface to determine free space on a device given a path
irods::error s3FileGetFsFreeSpacePlugin(
irods::plugin_context& _ctx )
{
return ERROR(SYS_NOT_SUPPORTED, boost::str(boost::format("[resource_name=%s] %s") % get_resource_name(_ctx.prop_map()) % __FUNCTION__));
} // s3FileGetFsFreeSpacePlugin
irods::error s3FileCopyPlugin( int mode, const char *srcFileName,
const char *destFileName)
{
return ERROR(SYS_NOT_SUPPORTED, __FUNCTION__);
}
// =-=-=-=-=-=-=-
// s3StageToCache - This routine is for testing the TEST_STAGE_FILE_TYPE.
// Just copy the file from filename to cacheFilename. optionalInfo info
// is not used.
irods::error s3StageToCachePlugin(
irods::plugin_context& _ctx,
const char* _cache_file_name )
{
irods::error result = SUCCESS();
// =-=-=-=-=-=-=-
// check incoming parameters
irods::error ret = s3CheckParams( _ctx );
if((result = ASSERT_PASS(ret, "[resource_name=%s] Invalid parameters or physical path.", get_resource_name(_ctx.prop_map()).c_str())).ok()) {
struct stat statbuf;
std::string key_id;
std::string access_key;
irods::file_object_ptr object = boost::dynamic_pointer_cast<irods::file_object>(_ctx.fco());
ret = s3FileStatPlugin(_ctx, &statbuf);
if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed stating the file: \"%s\".", get_resource_name(_ctx.prop_map()).c_str(),
object->physical_path().c_str())).ok()) {
if((result = ASSERT_ERROR((statbuf.st_mode & S_IFREG) != 0, S3_FILE_STAT_ERR, "[resource_name=%s] Error stating the file: \"%s\".", get_resource_name(_ctx.prop_map()).c_str(),
object->physical_path().c_str())).ok()) {
if((result = ASSERT_ERROR(object->size() <= 0 || object->size() == static_cast<size_t>(statbuf.st_size), SYS_COPY_LEN_ERR,
"[resource_name=%s] Error for file: \"%s\" inp data size: %ld does not match stat size: %ld.", get_resource_name(_ctx.prop_map()).c_str(),
object->physical_path().c_str(), object->size(), statbuf.st_size)).ok()) {
ret = s3GetAuthCredentials(_ctx.prop_map(), key_id, access_key);
if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to get S3 credential properties.", get_resource_name(_ctx.prop_map()).c_str())).ok()) {
ret = s3GetFile( _cache_file_name, object->physical_path(), statbuf.st_size, key_id, access_key, _ctx.prop_map());
result = ASSERT_PASS(ret, "[resource_name=%s] Failed to copy the S3 object: \"%s\" to the cache: \"%s\".", get_resource_name(_ctx.prop_map()).c_str(),
object->physical_path().c_str(), _cache_file_name);
}
}
}
}
}
return result;
} // s3StageToCachePlugin
// =-=-=-=-=-=-=-
// s3SyncToArch - This routine is for testing the TEST_STAGE_FILE_TYPE.
// Just copy the file from cacheFilename to filename. optionalInfo info
// is not used.
irods::error s3SyncToArchPlugin(
irods::plugin_context& _ctx,
const char* _cache_file_name )
{
irods::error result = SUCCESS();
// =-=-=-=-=-=-=-
// check incoming parameters
irods::error ret = s3CheckParams( _ctx );
if((result = ASSERT_PASS(ret, "[resource_name=%s] Invalid parameters or physical path.", get_resource_name(_ctx.prop_map()).c_str())).ok()) {
struct stat statbuf;
int status;
std::string key_id;
std::string access_key;
irods::file_object_ptr object = boost::dynamic_pointer_cast<irods::file_object>(_ctx.fco());
status = stat(_cache_file_name, &statbuf);
int err_status = UNIX_FILE_STAT_ERR - errno;
if((result = ASSERT_ERROR(status >= 0, err_status, "[resource_name=%s] Failed to stat cache file: \"%s\".", get_resource_name(_ctx.prop_map()).c_str(),
_cache_file_name)).ok()) {
if((result = ASSERT_ERROR((statbuf.st_mode & S_IFREG) != 0, UNIX_FILE_STAT_ERR, "[resource_name=%s] Cache file: \"%s\" is not a file.", get_resource_name(_ctx.prop_map()).c_str(),
_cache_file_name)).ok()) {
ret = s3GetAuthCredentials(_ctx.prop_map(), key_id, access_key);
if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to get S3 credential properties.", get_resource_name(_ctx.prop_map()).c_str())).ok()) {
// retrieve archive naming policy from resource plugin context
std::string archive_naming_policy = CONSISTENT_NAMING; // default
ret = _ctx.prop_map().get<std::string>(ARCHIVE_NAMING_POLICY_KW, archive_naming_policy); // get plugin context property
if(!ret.ok()) {
std::stringstream msg;
msg << "[resource_name=" << get_resource_name(_ctx.prop_map()) << "] "
<< ret.result();
rodsLog(LOG_ERROR, msg.str().c_str());
}
boost::to_lower(archive_naming_policy);
// if archive naming policy is decoupled
// we use the object's reversed id as S3 key name prefix
if (archive_naming_policy == DECOUPLED_NAMING) {
// extract object name and bucket name from physical path
std::vector< std::string > tokens;
irods::string_tokenize(object->physical_path(), "/", tokens);
std::string bucket_name = tokens.front();
std::string object_name = tokens.back();
// reverse object id
std::string obj_id = boost::lexical_cast<std::string>(object->id());
std::reverse(obj_id.begin(), obj_id.end());
// make S3 key name
std::ostringstream s3_key_name;
s3_key_name << "/" << bucket_name << "/" << obj_id << "/" << object_name;
// update physical path
object->physical_path(s3_key_name.str());
}
ret = s3PutCopyFile(S3_PUTFILE, _cache_file_name, object->physical_path(), statbuf.st_size, key_id, access_key, _ctx.prop_map());
result = ASSERT_PASS(ret, "[resource_name=%s] Failed to copy the cache file: \"%s\" to the S3 object: \"%s\".", get_resource_name(_ctx.prop_map()).c_str(),
_cache_file_name, object->physical_path().c_str());
}
}
}
}
if( !result.ok() ) {
std::stringstream msg;
msg << "[resource_name=" << get_resource_name(_ctx.prop_map()) << "] "
<< result.result();
rodsLog(LOG_ERROR, msg.str().c_str());
}
return result;
} // s3SyncToArchPlugin
// =-=-=-=-=-=-=-
// used to allow the resource to determine which host
// should provide the requested operation
irods::error s3RedirectPlugin(
irods::plugin_context& _ctx,
const std::string* _opr,
const std::string* _curr_host,
irods::hierarchy_parser* _out_parser,
float* _out_vote )
{
irods::error result = SUCCESS();
irods::error ret;
// =-=-=-=-=-=-=-
// check the context validity
ret = _ctx.valid< irods::file_object >();
if((result = ASSERT_PASS(ret, "[resource_name=%s] Invalid resource context.", get_resource_name(_ctx.prop_map()).c_str())).ok()) {
// =-=-=-=-=-=-=-
// check incoming parameters
if((result = ASSERT_ERROR(_opr && _curr_host && _out_parser && _out_vote, SYS_INVALID_INPUT_PARAM,
"[resource_name=%s] One or more NULL pointer arguments.", get_resource_name(_ctx.prop_map()).c_str())).ok()) {
std::string resc_name;
// =-=-=-=-=-=-=-
// cast down the chain to our understood object type
irods::file_object_ptr file_obj = boost::dynamic_pointer_cast< irods::file_object >( _ctx.fco() );
// =-=-=-=-=-=-=-
// get the name of this resource
ret = _ctx.prop_map().get< std::string >( irods::RESOURCE_NAME, resc_name );
if((result = ASSERT_PASS(ret, "[resource_name=%s] Failed to get resource name property.", get_resource_name(_ctx.prop_map()).c_str())).ok() ) {
// =-=-=-=-=-=-=-
// add ourselves to the hierarchy parser by default
_out_parser->add_child( resc_name );
// =-=-=-=-=-=-=-
// test the operation to determine which choices to make
if( irods::OPEN_OPERATION == (*_opr) ) {
// =-=-=-=-=-=-=-
// call redirect determination for 'get' operation
result = s3RedirectOpen(
_ctx.comm(),
_ctx.prop_map(),
file_obj,
resc_name,
(*_curr_host),
(*_out_vote));
} else if( irods::CREATE_OPERATION == (*_opr) ) {
// =-=-=-=-=-=-=-
// call redirect determination for 'create' operation
result = s3RedirectCreate( _ctx.prop_map(), *file_obj, resc_name, (*_curr_host), (*_out_vote) );
}
else {
result = ASSERT_ERROR(false, SYS_INVALID_INPUT_PARAM, "[resource_name=%s] Unknown redirect operation: \"%s\".", get_resource_name(_ctx.prop_map()).c_str(),
_opr->c_str());
}
}
}
}
return result;
} // s3RedirectPlugin
// =-=-=-=-=-=-=-
// code which would rebalance the resource, S3 does not rebalance.
irods::error s3FileRebalance(
irods::plugin_context& _ctx ) {
return SUCCESS();
} // s3FileRebalance
irods::error s3FileNotifyPlugin( irods::plugin_context& _ctx,
const std::string* str ) {
return SUCCESS();
} // s3FileNotifyPlugin
}
| {"hexsha": "a1b90d9cf8101569889841787057bcfca97ab1c2", "size": 29124, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "s3/s3_archive_operations.cpp", "max_stars_repo_name": "korydraughn/irods_resource_plugin_s3", "max_stars_repo_head_hexsha": "8a06e03685982755c008e2d6a97603458995af3e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2016-06-09T09:26:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-15T13:02:36.000Z", "max_issues_repo_path": "s3/s3_archive_operations.cpp", "max_issues_repo_name": "korydraughn/irods_resource_plugin_s3", "max_issues_repo_head_hexsha": "8a06e03685982755c008e2d6a97603458995af3e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 198.0, "max_issues_repo_issues_event_min_datetime": "2015-02-27T18:37:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:25:58.000Z", "max_forks_repo_path": "s3/s3_archive_operations.cpp", "max_forks_repo_name": "korydraughn/irods_resource_plugin_s3", "max_forks_repo_head_hexsha": "8a06e03685982755c008e2d6a97603458995af3e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 15.0, "max_forks_repo_forks_event_min_datetime": "2015-09-23T18:50:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-04T14:10:31.000Z", "avg_line_length": 44.6687116564, "max_line_length": 195, "alphanum_fraction": 0.5316577393, "num_tokens": 6477} |
[STATEMENT]
lemma (in Order) Iod_less:
"\<lbrakk>T \<subseteq> carrier D; a \<in> T; b \<in> T\<rbrakk> \<Longrightarrow> (a \<prec>\<^bsub>Iod D T\<^esub> b) = (a \<prec> b)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>T \<subseteq> carrier D; a \<in> T; b \<in> T\<rbrakk> \<Longrightarrow> a \<prec>\<^bsub>Iod D T\<^esub> b = a \<prec> b
[PROOF STEP]
apply (simp add:oless_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>T \<subseteq> carrier D; a \<in> T; b \<in> T\<rbrakk> \<Longrightarrow> (a \<preceq>\<^bsub>Iod D T\<^esub> b \<and> a \<noteq> b) = (a \<preceq> b \<and> a \<noteq> b)
[PROOF STEP]
apply (simp add:Iod_le)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done | {"llama_tokens": 327, "file": "Group-Ring-Module_Algebra1", "length": 3} |
import numpy as np
import matplotlib.pyplot as plt
def gradient_pbl(
lidar_profile: np.ndarray,
min_grad: float = -2,
max_grad: float = 0.5,
) -> np.ndarray:
"""Gives the pblh heights given profiles
Args:
lidar_profile (np.ndarray): 2D array of lidar profile
max_grad (float, optional): A max thereshold for the gradient avoiding clouds or other interferences. Defaults to None.
max_height (int, optional): Max height to seek PBL (in meters). Defaults to 3000.
Returns:
np.ndarray: 1D array of pbl heights
"""
safe_profile = lidar_profile.copy()
safe_profile[safe_profile <= 0] = 1e-10
dimension = safe_profile.ndim
if dimension == 1:
gradient = np.gradient(np.log10(safe_profile))
else:
gradient = np.gradient(np.log10(safe_profile))[1]
# gradient2 = np.gradient(gradient)[1]
# num = 0
# final = 300
# plt.plot(gradient[num][10:final])
# plt.plot(np.gradient(gradient[num][10:final]))
# plt.show()
if max_grad is not None:
gradient[gradient < min_grad] = 0
gradient[gradient > 0] = 0
# gradient2[gradient2 > 0] = 0
# plt.plot(gradient[num][10:final])
# plt.plot(gradient2[num][10:final])
# plt.show()
if max_grad is not None:
gradient[gradient > max_grad] = 0
min_axis = 0 if dimension == 1 else 1
mins = np.argmin(gradient, axis=min_axis)
return mins
def variance_pbl(
lidar_profile: np.ndarray,
window_size: int = 10,
) -> np.ndarray:
window_number = lidar_profile.shape[0] // window_size
window_element = np.arange(window_number) * window_size
variance = np.zeros([window_number, lidar_profile.shape[1]])
for i, window in enumerate(window_element):
start = window
end = start + window_size
temp_var = np.var(lidar_profile[start:end, :], axis=0)
variance[i, :] = temp_var
# var_window = np.var(lidar_profile[start:end], axis=0)
# variance = np.hstack([variance, var_window])
variance_vote = np.argmax(variance, axis=1)
return window_element, variance_vote
def haar(array_shape: int, a: float = 1, b: float = 1 / 2) -> np.ndarray:
x = np.arange(0, array_shape)
return np.piecewise(
x,
[
np.logical_and(b - a / 2 <= x, x <= b),
np.logical_and(b <= x, x <= b + a / 2),
],
[1, -1, 0],
)
def wavelet_pbl(
lidar_profile: np.ndarray,
a: int = 4,
) -> np.ndarray:
def single_row_wavelet(row: np.ndarray) -> np.ndarray:
res = np.zeros(row.shape)
for [idx], _ in np.ndenumerate(row):
_fn = row * haar(row.shape[0], a, (2 * idx + 1) / 2)
_int = np.sum(_fn)
res[idx] += _int
res[0:a] = np.nan
res[-a:] = np.nan
return res / np.sqrt(a)
wavelets = np.apply_along_axis(single_row_wavelet, 1, lidar_profile)
wavelet_vote = np.nanargmax(wavelets, axis=1)
return wavelet_vote
| {"hexsha": "dd6043274a122f2029ff3de14eb4702a65896b86", "size": 3017, "ext": "py", "lang": "Python", "max_stars_repo_path": "lidar_pbl/core/methods.py", "max_stars_repo_name": "jdlar1/lidar-pbl", "max_stars_repo_head_hexsha": "6eb605c25719b77abe6e6f676f098e47c0d91292", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lidar_pbl/core/methods.py", "max_issues_repo_name": "jdlar1/lidar-pbl", "max_issues_repo_head_hexsha": "6eb605c25719b77abe6e6f676f098e47c0d91292", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lidar_pbl/core/methods.py", "max_forks_repo_name": "jdlar1/lidar-pbl", "max_forks_repo_head_hexsha": "6eb605c25719b77abe6e6f676f098e47c0d91292", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.2347826087, "max_line_length": 127, "alphanum_fraction": 0.6092144514, "include": true, "reason": "import numpy", "num_tokens": 850} |
module Dbcritic.Check.PrimaryKey
import Control.IOExcept
import Dbcritic.Check
import Dbcritic.Libpq
mkIssue : String -> Issue
mkIssue table =
let
identifier = [ table ]
description = "The table ‘" ++ table ++ "’ is missing a primary key constraint."
problems = [ "Rows cannot be individually addressed when updating or deleting them."
, "Rows cannot be individually addressed by potential foreign keys."
, "Some tools expect tables to have primary keys to function properly." ]
solutions = [ "Create a primary key constraint on ‘" ++ table ++ "’." ]
in
MkIssue identifier description problems solutions IsNonEmpty IsNonEmpty IsNonEmpty
export
checkPrimaryKey : Check
checkPrimaryKey = MkCheck name help inspect
where
name = "primary_key"
help = "Check that each table has a primary key constraint."
inspectRow : List (Maybe String) -> IOExcept String Issue
inspectRow [Just table] = pure (mkIssue table)
inspectRow _ = ioe_fail "checkPrimaryKey: Bad result"
inspect : PgConnection -> IOExcept String (List Issue)
inspect conn = do
res <- pgExecute conn """
SELECT relname
FROM pg_class
WHERE relkind = 'r' AND
NOT EXISTS
( SELECT
FROM pg_constraint
WHERE conrelid = pg_class.oid AND
contype = 'p' ) AND
relnamespace <> regnamespace 'information_schema' AND
relnamespace <> regnamespace 'pg_catalog'
ORDER BY relname ASC
"""
traverse inspectRow (pgGrid res)
| {"hexsha": "914c2c661b2cbf7d81aeeaaa8aa6c77ede445f49", "size": 1734, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "Dbcritic/Check/PrimaryKey.idr", "max_stars_repo_name": "channable/dbcritic", "max_stars_repo_head_hexsha": "a58bb730ff94f45a502bf769d302ef570a23dcba", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 144, "max_stars_repo_stars_event_min_datetime": "2021-07-07T12:17:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T17:50:21.000Z", "max_issues_repo_path": "Dbcritic/Check/PrimaryKey.idr", "max_issues_repo_name": "channable/dbcritic", "max_issues_repo_head_hexsha": "a58bb730ff94f45a502bf769d302ef570a23dcba", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-07-08T11:59:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-03T15:25:01.000Z", "max_forks_repo_path": "Dbcritic/Check/PrimaryKey.idr", "max_forks_repo_name": "channable/dbcritic", "max_forks_repo_head_hexsha": "a58bb730ff94f45a502bf769d302ef570a23dcba", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-24T05:06:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-24T05:06:56.000Z", "avg_line_length": 37.6956521739, "max_line_length": 95, "alphanum_fraction": 0.6020761246, "num_tokens": 363} |
from ..BaseClass import BaseSpOptHeuristicSolver
from warnings import warn
from sklearn.cluster import (
AffinityPropagation,
AgglomerativeClustering,
KMeans,
MiniBatchKMeans,
SpectralClustering,
)
class WardSpatial(BaseSpOptHeuristicSolver):
""" Agglomerative clustering using Ward linkage with a spatial connectivity constraint.
Parameters
----------
gdf : geopandas.GeoDataFrame, required
Geodataframe containing original data
w : libpysal.weights.W, required
Weights object created from given data
attrs_name : list, required
Strings for attribute names (cols of ``geopandas.GeoDataFrame``).
n_clusters : int, optional, default: 5
The number of clusters to form.
clustering_kwds: dictionary, optional, default: dict()
Other parameters about clustering could be used in sklearn.cluster.AgglometariveClustering.
Returns
-------
labels_ : numpy.array
Cluster labels for observations.
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> import geopandas as gpd
>>> from spopt import WardSpatial
Read the data.
>>> pth = libpysal.examples.get_path('airbnb_Chicago 2015.shp')
>>> chicago = gpd.read_file(pth)
Initialize the parameters.
>>> w = libpysal.weights.Queen.from_dataframe(chicago)
>>> attrs_name = ['num_spots']
>>> n_clusters = 8
Run the skater algorithm.
>>> model = WardSpatial(chicago, w, attrs_name, n_clusters)
>>> model.solve()
Get the region IDs for unit areas.
>>> model.labels_
Show the clustering results.
>>> chicago['ward_new'] = model.labels_
>>> chicago.plot(column='ward_new', categorical=True, figsize=(12,8), edgecolor='w')
"""
def __init__(self, gdf, w, attrs_name, n_clusters=5, clustering_kwds=dict()):
self.gdf = gdf
self.w = w
self.attrs_name = attrs_name
self.n_clusters = n_clusters
self.clustering_kwds = clustering_kwds
def solve(self):
"""Solve the Ward"""
data = self.gdf
X = data[self.attrs_name].values
model = AgglomerativeClustering(
n_clusters=self.n_clusters, connectivity=self.w.sparse, linkage="ward", **self.clustering_kwds
)
model.fit(X)
self.labels_ = model.labels_
| {"hexsha": "5048f011226d67481396405105d04cc8be8cd9e1", "size": 2563, "ext": "py", "lang": "Python", "max_stars_repo_path": "spopt/region/ward.py", "max_stars_repo_name": "fiendskrah/spopt", "max_stars_repo_head_hexsha": "b0f4b682f9246670241c415c4023fcb3e596c372", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "spopt/region/ward.py", "max_issues_repo_name": "fiendskrah/spopt", "max_issues_repo_head_hexsha": "b0f4b682f9246670241c415c4023fcb3e596c372", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "spopt/region/ward.py", "max_forks_repo_name": "fiendskrah/spopt", "max_forks_repo_head_hexsha": "b0f4b682f9246670241c415c4023fcb3e596c372", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5591397849, "max_line_length": 106, "alphanum_fraction": 0.607881389, "include": true, "reason": "import numpy", "num_tokens": 572} |
!! N-dimensional system of array encapsulation.
!
! This file is part of LIBPFASST.
!
!> Module to define and encapsulation for a system of N-dimensional arrays
!!
!! When a new solution is created by a PFASST level, this encapsulation
!! uses the levels 'arr_shape' attribute to create a new multi-component array with that
!! shape. Thus, the 'arr_shape' attributes of the PFASST levels should be
!! set appropriately. The last component of arr_shape is the number of components in the system
!!
!! For example, before calling pf_pfasst_run we can
!! set the arr_shape of the coarsest level by doing:
!!
!! allocate(pf%levels(1)%arr_shape(3))
!! pf%levels(1)%arr_shape = [ nx, ny, 3 ]
!!
!! Which would imply that a 3 component system of two-dimensional solutions.
!!
!! The helper routines get_array1d, get_array2d, get_array3d, etc can be used to
!! extract pointers to a component of encapsulated system
!! performing any copies or the whole system if desired.
!!
module pf_mod_ndsysarray
use iso_c_binding
use pf_mod_dtype
use pf_mod_stop
implicit none
!> Type to create and destroy systems of N-dimensional arrays
type, extends(pf_factory_t) :: pf_ndsysarray_factory_t
contains
procedure :: create_single => ndsysarray_create_single
procedure :: create_array => ndsysarray_create_array
procedure :: destroy_single => ndsysarray_destroy_single
procedure :: destroy_array => ndsysarray_destroy_array
end type pf_ndsysarray_factory_t
!> Type for system of N-dimensional arrays, extends the abstract encap type
type, extends(pf_encap_t) :: pf_ndsysarray_t
integer :: ndim ! The spatial dimension of each component in system
integer :: ncomp ! The number of components in the system
integer :: ndof ! The number of variables in each component
integer, allocatable :: arr_shape(:)
real(pfdp), allocatable :: flatarray(:)
contains
procedure :: setval => ndsysarray_setval
procedure :: copy => ndsysarray_copy
procedure :: norm => ndsysarray_norm
procedure :: pack => ndsysarray_pack
procedure :: unpack => ndsysarray_unpack
procedure :: axpy => ndsysarray_axpy
procedure :: eprint => ndsysarray_eprint
end type pf_ndsysarray_t
contains
!> Subroutine to allocate the array and set the size parameters
subroutine ndsysarray_build(q, arr_shape)
class(pf_encap_t), intent(inout) :: q
integer, intent(in ) :: arr_shape(:)
integer :: ierr
select type (q)
class is (pf_ndsysarray_t)
allocate(q%arr_shape(SIZE(arr_shape)),stat=ierr)
if (ierr /=0) call pf_stop(__FILE__,__LINE__,'allocate fail, error=',ierr)
q%ndim = SIZE(arr_shape)-1
q%ncomp = arr_shape(q%ndim+1)
q%ndof = product(arr_shape(1:q%ndim))
q%arr_shape = arr_shape
allocate(q%flatarray(product(arr_shape)),stat=ierr)
if (ierr /=0) call pf_stop(__FILE__,__LINE__,'allocate fail, error=',ierr)
end select
end subroutine ndsysarray_build
!> Subroutine to create a single array
subroutine ndsysarray_create_single(this, x, level_index, lev_shape)
class(pf_ndsysarray_factory_t), intent(inout) :: this
class(pf_encap_t), intent(inout), allocatable :: x
integer, intent(in ) :: level_index
integer, intent(in ) :: lev_shape(:)
integer :: ierr
allocate(pf_ndsysarray_t::x,stat=ierr)
if (ierr /=0) call pf_stop(__FILE__,__LINE__,'allocate fail, error=',ierr)
call ndsysarray_build(x, lev_shape)
end subroutine ndsysarray_create_single
!> Subroutine to create an array of arrays
subroutine ndsysarray_create_array(this, x, n, level_index, lev_shape)
class(pf_ndsysarray_factory_t), intent(inout) :: this
class(pf_encap_t), intent(inout), allocatable :: x(:)
integer, intent(in ) :: n
integer, intent(in ) :: level_index
integer, intent(in ) :: lev_shape(:)
integer :: i,ierr
allocate(pf_ndsysarray_t::x(n),stat=ierr)
if (ierr /=0) call pf_stop(__FILE__,__LINE__,'allocate fail, error=',ierr)
do i = 1, n
call ndsysarray_build(x(i), lev_shape)
end do
end subroutine ndsysarray_create_array
!!$
!> Subroutine to destroy array (simple)
subroutine ndsysarray_destroy(encap)
class(pf_encap_t), intent(inout) :: encap
type(pf_ndsysarray_t), pointer :: ndsysarray_obj
ndsysarray_obj => cast_as_ndsysarray(encap)
deallocate(ndsysarray_obj%arr_shape)
deallocate(ndsysarray_obj%flatarray)
nullify(ndsysarray_obj)
end subroutine ndsysarray_destroy
!> Subroutine to destroy an single array
subroutine ndsysarray_destroy_single(this, x)
class(pf_ndsysarray_factory_t), intent(inout) :: this
class(pf_encap_t), intent(inout), allocatable :: x
select type (x)
class is (pf_ndsysarray_t)
deallocate(x%arr_shape)
deallocate(x%flatarray)
end select
deallocate(x)
end subroutine ndsysarray_destroy_single
!> Subroutine to destroy an array of arrays
subroutine ndsysarray_destroy_array(this, x)
class(pf_ndsysarray_factory_t), intent(inout) :: this
class(pf_encap_t), intent(inout), allocatable :: x(:)
integer :: i
select type(x)
class is (pf_ndsysarray_t)
do i = 1,SIZE(x)
deallocate(x(i)%arr_shape)
deallocate(x(i)%flatarray)
end do
end select
deallocate(x)
end subroutine ndsysarray_destroy_array
!> The following are the base subroutines that all encapsulations must provide
!!
!> Subroutine to set array to a scalare value.
subroutine ndsysarray_setval(this, val, flags)
class(pf_ndsysarray_t), intent(inout) :: this
real(pfdp), intent(in ) :: val
integer, intent(in ), optional :: flags
this%flatarray = val
end subroutine ndsysarray_setval
!> Subroutine to copy an array
subroutine ndsysarray_copy(this, src, flags)
class(pf_ndsysarray_t), intent(inout) :: this
class(pf_encap_t), intent(in ) :: src
integer, intent(in ), optional :: flags
select type(src)
type is (pf_ndsysarray_t)
this%flatarray = src%flatarray
class default
call pf_stop(__FILE__,__LINE__,'invalid type')
end select
end subroutine ndsysarray_copy
!> Subroutine to pack an array into a flat array for sending
subroutine ndsysarray_pack(this, z, flags)
class(pf_ndsysarray_t), intent(in ) :: this
real(pfdp), intent( out) :: z(:)
integer, intent(in ), optional :: flags
z = this%flatarray
end subroutine ndsysarray_pack
!> Subroutine to unpack a flatarray after receiving
subroutine ndsysarray_unpack(this, z, flags)
class(pf_ndsysarray_t), intent(inout) :: this
real(pfdp), intent(in ) :: z(:)
integer, intent(in ), optional :: flags
this%flatarray = z
end subroutine ndsysarray_unpack
!> Subroutine to define the norm of the array (here the max norm)
function ndsysarray_norm(this, flags) result (norm)
class(pf_ndsysarray_t), intent(in ) :: this
integer, intent(in ), optional :: flags
real(pfdp) :: norm
norm = maxval(abs(this%flatarray))
end function ndsysarray_norm
!> Subroutine to compute y = a x + y where a is a scalar and x and y are arrays
subroutine ndsysarray_axpy(this, a, x, flags)
class(pf_ndsysarray_t), intent(inout) :: this
class(pf_encap_t), intent(in ) :: x
real(pfdp), intent(in ) :: a
integer, intent(in ), optional :: flags
if (a .eq. 0.0_pfdp) return
select type(x)
type is (pf_ndsysarray_t)
if (a .eq. 1.0_pfdp) then
this%flatarray = x%flatarray + this%flatarray
elseif (a .eq. -1.0_pfdp) then
this%flatarray = -x%flatarray + this%flatarray
else
this%flatarray = a*x%flatarray + this%flatarray
end if
class default
call pf_stop(__FILE__,__LINE__,'invalid type')
end select
end subroutine ndsysarray_axpy
!> Subroutine to print the array to the screen (mainly for debugging purposes)
subroutine ndsysarray_eprint(this,flags)
class(pf_ndsysarray_t), intent(inout) :: this
integer, intent(in ), optional :: flags
! Just print the first few values
print *, this%flatarray(1:10)
!print *, this%flatarray
end subroutine ndsysarray_eprint
function cast_as_ndsysarray(encap_polymorph) result(ndsysarray_obj)
class(pf_encap_t), intent(in), target :: encap_polymorph
type(pf_ndsysarray_t), pointer :: ndsysarray_obj
select type(encap_polymorph)
type is (pf_ndsysarray_t)
ndsysarray_obj => encap_polymorph
end select
end function cast_as_ndsysarray
!> Helper function to return the array part or the whole thing
function get_array1d(x,n,flags) result(r)
class(pf_encap_t), target,intent(in) :: x
integer, intent(in) :: n
integer, intent(in ), optional :: flags
real(pfdp), pointer :: r(:)
select type (x)
type is (pf_ndsysarray_t)
if (n .eq. 0) then ! Return pointer to whole array
if (x%ncomp .eq. 1 .and. x%ndim .eq. 1) then
r => x%flatarray
else
call pf_stop(__FILE__,__LINE__,'bad dimension, must be 1. ndim=',x%ndim)
end if
else ! Return pointer to nth component
if (x%ndim .eq. 1) then
r => x%flatarray(x%ndof*(n-1)+1:x%ndof*n)
else
call pf_stop(__FILE__,__LINE__,'bad dimension, must be 1. ndim=',x%ndim)
end if
end if
end select
end function get_array1d
function get_array2d(x,n,flags) result(r)
class(pf_encap_t), target,intent(in) :: x
integer, intent(in) :: n
integer, intent(in ), optional :: flags
real(pfdp), pointer :: r(:,:)
select type (x)
type is (pf_ndsysarray_t)
if (n .eq. 0) then
if (x%ndim .eq. 1) then ! Return pointer to whole array
r(1:x%arr_shape(1),1:x%arr_shape(2)) => x%flatarray
else
call pf_stop(__FILE__,__LINE__,'bad dimension, must be 1. ndim=',x%ndim)
end if
else ! Return pointer to nth component
if (x%ndim .eq. 2) then
r(1:x%arr_shape(1),1:x%arr_shape(2)) => x%flatarray(x%ndof*(n-1)+1:x%ndof*n)
else
call pf_stop(__FILE__,__LINE__,'bad dimension, must be 2. ndim=',x%ndim)
end if
endif
end select
end function get_array2d
function get_array3d(x,n,flags) result(r)
class(pf_encap_t), target,intent(in) :: x
integer, intent(in) :: n
integer, intent(in ), optional :: flags
real(pfdp), pointer :: r(:,:,:)
select type (x)
type is (pf_ndsysarray_t)
if (n .eq. 0) then ! Return pointer to whole array
if (x%ndim .eq. 2) then
r(1:x%arr_shape(1),1:x%arr_shape(2),1:x%arr_shape(3)) => x%flatarray
else
call pf_stop(__FILE__,__LINE__,'bad dimension, must be 2. ndim=',x%ndim)
end if
else ! Return pointer to nth component
if (x%ndim .eq. 3) then
r(1:x%arr_shape(1),1:x%arr_shape(2),1:x%arr_shape(3)) => x%flatarray(x%ndof*(n-1)+1:x%ndof*n)
else
call pf_stop(__FILE__,__LINE__,'bad dimension, must be 3. ndim=',x%ndim)
end if
end if
end select
end function get_array3d
function get_array4d(x,n,flags) result(r)
class(pf_encap_t), target,intent(in) :: x
integer, intent(in) :: n
integer, intent(in ), optional :: flags
real(pfdp), pointer :: r(:,:,:,:)
select type (x)
type is (pf_ndsysarray_t)
if (n .eq. 0) then ! Return pointer to whole array
if (x%ndim .eq. 3) then
r(1:x%arr_shape(1),1:x%arr_shape(2),1:x%arr_shape(3),1:x%arr_shape(4)) => x%flatarray
else
call pf_stop(__FILE__,__LINE__,'bad dimension, must be 3. ndim=',x%ndim)
end if
else ! Return pointer to nth component
if (x%ndim .eq. 4) then
r(1:x%arr_shape(1),1:x%arr_shape(2),1:x%arr_shape(3),1:x%arr_shape(4)) => x%flatarray(x%ndof*(n-1)+1:x%ndof*n)
else
call pf_stop(__FILE__,__LINE__,'bad dimension, must be 4. ndim=',x%ndim)
end if
end if
end select
end function get_array4d
end module pf_mod_ndsysarray
| {"hexsha": "04d5aa27e6759221a40735ffb2ef3ca02f684c12", "size": 12962, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/pf_ndsysarray_encap.f90", "max_stars_repo_name": "wavefunction91/LibPFASST", "max_stars_repo_head_hexsha": "8bf49000d4e613496e824bb98f368527044f7064", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-05-22T11:02:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T16:16:33.000Z", "max_issues_repo_path": "src/pf_ndsysarray_encap.f90", "max_issues_repo_name": "wavefunction91/LibPFASST", "max_issues_repo_head_hexsha": "8bf49000d4e613496e824bb98f368527044f7064", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-01-18T01:46:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-29T16:32:52.000Z", "max_forks_repo_path": "src/pf_ndsysarray_encap.f90", "max_forks_repo_name": "wavefunction91/LibPFASST", "max_forks_repo_head_hexsha": "8bf49000d4e613496e824bb98f368527044f7064", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2019-02-09T09:38:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-25T23:00:05.000Z", "avg_line_length": 37.5710144928, "max_line_length": 123, "alphanum_fraction": 0.6269094276, "num_tokens": 3610} |
#include "generator/hierarchy.hpp"
#include "indexer/feature_algo.hpp"
#include "geometry/mercator.hpp"
#include "geometry/rect2d.hpp"
#include "base/assert.hpp"
#include "base/stl_helpers.hpp"
#include <algorithm>
#include <cmath>
#include <fstream>
#include <functional>
#include <iomanip>
#include <iterator>
#include <limits>
#include <numeric>
#include <boost/geometry.hpp>
#include <boost/geometry/geometries/register/point.hpp>
#include <boost/geometry/geometries/register/ring.hpp>
#include <boost/geometry/multi/geometries/register/multi_point.hpp>
BOOST_GEOMETRY_REGISTER_POINT_2D(m2::PointD, double, boost::geometry::cs::cartesian, x, y);
BOOST_GEOMETRY_REGISTER_RING(std::vector<m2::PointD>);
using namespace feature;
namespace generator
{
namespace hierarchy
{
namespace
{
double CalculateOverlapPercentage(std::vector<m2::PointD> const & lhs,
std::vector<m2::PointD> const & rhs)
{
if (!boost::geometry::intersects(lhs, rhs))
return 0.0;
using BoostPolygon = boost::geometry::model::polygon<m2::PointD>;
std::vector<BoostPolygon> coll;
boost::geometry::intersection(lhs, rhs, coll);
auto const min = std::min(boost::geometry::area(lhs), boost::geometry::area(rhs));
CHECK_GREATER(min, 0.0, (min));
auto const binOp = [](double x, BoostPolygon const & y) { return x + boost::geometry::area(y); };
auto const sum = std::accumulate(std::cbegin(coll), std::cend(coll), 0.0, binOp);
return sum * 100 / min;
}
} // namespace
bool FilterFeatureDefault(feature::FeatureBuilder const &) { return true; }
HierarchyPlace::HierarchyPlace(FeatureBuilder const & fb)
: m_id(MakeCompositeId(fb))
, m_name(fb.GetMultilangName())
, m_types(fb.GetTypes())
, m_rect(fb.GetLimitRect())
, m_center(fb.GetKeyPoint())
{
if (fb.IsPoint())
{
m_isPoint = true;
}
else if (fb.IsArea())
{
m_polygon = fb.GetOuterGeometry();
boost::geometry::correct(m_polygon);
m_area = boost::geometry::area(m_polygon);
}
}
bool HierarchyPlace::Contains(HierarchyPlace const & smaller) const
{
if (IsPoint())
return false;
if (smaller.IsPoint())
return Contains(smaller.GetCenter());
return smaller.GetArea() <= GetArea() &&
CalculateOverlapPercentage(m_polygon, smaller.m_polygon) > 80.0;
}
bool HierarchyPlace::Contains(m2::PointD const & point) const
{
return boost::geometry::covered_by(point, m_polygon);
}
HierarchyLinker::HierarchyLinker(Node::Ptrs && nodes)
: m_nodes(std::move(nodes)), m_tree(MakeTree4d(m_nodes))
{
}
// static
HierarchyLinker::Tree4d HierarchyLinker::MakeTree4d(Node::Ptrs const & nodes)
{
Tree4d tree;
for (auto const & n : nodes)
tree.Add(n, n->GetData().GetLimitRect());
return tree;
}
HierarchyLinker::Node::Ptr HierarchyLinker::FindPlaceParent(HierarchyPlace const & place)
{
Node::Ptr parent = nullptr;
auto minArea = std::numeric_limits<double>::max();
auto const point = place.GetCenter();
m_tree.ForEachInRect({point, point}, [&](auto const & candidateNode) {
// https://wiki.openstreetmap.org/wiki/Simple_3D_buildings
// An object with tag 'building:part' is a part of a relation with outline 'building' or
// is contained in a object with tag 'building'. This case is second. We suppose a building part is
// only inside a building.
static auto const & buildingChecker = ftypes::IsBuildingChecker::Instance();
static auto const & buildingPartChecker = ftypes::IsBuildingPartChecker::Instance();
auto const & candidate = candidateNode->GetData();
if (buildingPartChecker(place.GetTypes()) &&
!(buildingChecker(candidate.GetTypes()) || buildingPartChecker(candidate.GetTypes())))
{
return;
}
// A building part must have children only with 'building:part' type.
if (!buildingPartChecker(place.GetTypes()) && buildingPartChecker(candidate.GetTypes()))
return;
if (place.GetCompositeId() == candidate.GetCompositeId())
return;
if (candidate.GetArea() < minArea && candidate.Contains(place))
{
// Sometimes there can be two places with the same geometry. We must check place node and
// its parents to avoid cyclic connections.
auto node = candidateNode;
while (node->HasParent())
{
node = node->GetParent();
if (node->GetData().GetCompositeId() == place.GetCompositeId())
return;
}
parent = candidateNode;
minArea = candidate.GetArea();
}
});
return parent;
}
HierarchyLinker::Node::Ptrs HierarchyLinker::Link()
{
for (auto & node : m_nodes)
{
auto const & place = node->GetData();
auto const parentPlace = FindPlaceParent(place);
if (!parentPlace)
continue;
tree_node::Link(node, parentPlace);
}
return m_nodes;
}
HierarchyEntryEnricher::HierarchyEntryEnricher(std::string const & osm2FtIdsPath,
std::string const & countryFullPath)
: m_featureGetter(countryFullPath)
{
CHECK(m_osm2FtIds.ReadFromFile(osm2FtIdsPath), (osm2FtIdsPath));
}
std::optional<m2::PointD> HierarchyEntryEnricher::GetFeatureCenter(CompositeId const & id) const
{
auto const optIds = m_osm2FtIds.GetFeatureIds(id);
if (optIds.empty())
return {};
// A CompositeId id may correspond to several feature ids. These features can be represented by
// three types of geometry. Logically, their centers coincide, but in practice they don’t,
// because the centers are calculated differently. For example, for an object with a type area,
// the area will be computed using the triangles geometry, but for an object with a type line,
// the area will be computed using the outer geometry of a polygon.
std::unordered_map<std::underlying_type_t<feature::GeomType>, m2::PointD> m;
for (auto optId : optIds)
{
auto const ftPtr = m_featureGetter.GetFeatureByIndex(optId);
if (!ftPtr)
continue;
CHECK(m.emplace(base::Underlying(ftPtr->GetGeomType()), feature::GetCenter(*ftPtr)).second,
(id, optIds));
}
for (auto type : {
base::Underlying(feature::GeomType::Point),
base::Underlying(feature::GeomType::Area),
base::Underlying(feature::GeomType::Line)})
{
if (m.count(type) != 0)
return m[type];
}
return {};
}
HierarchyLinesBuilder::HierarchyLinesBuilder(HierarchyLinker::Node::Ptrs && trees)
: m_trees(std::move(trees))
{
}
void HierarchyLinesBuilder::SetGetMainTypeFunction(GetMainTypeFn const & getMainType)
{
m_getMainType = getMainType;
}
void HierarchyLinesBuilder::SetGetNameFunction(GetNameFn const & getName) { m_getName = getName; }
void HierarchyLinesBuilder::SetCountry(storage::CountryId const & country)
{
m_countryName = country;
}
void HierarchyLinesBuilder::SetHierarchyEntryEnricher(
std::unique_ptr<HierarchyEntryEnricher> && enricher)
{
m_enricher = std::move(enricher);
}
std::vector<HierarchyEntry> HierarchyLinesBuilder::GetHierarchyLines()
{
CHECK(m_getName, ());
CHECK(m_getMainType, ());
std::vector<HierarchyEntry> lines;
for (auto const & tree : m_trees)
{
tree_node::PreOrderVisit(tree, [&](auto const & node) {
lines.emplace_back(Transform(node));
});
}
return lines;
}
m2::PointD HierarchyLinesBuilder::GetCenter(HierarchyLinker::Node::Ptr const & node)
{
auto const & data = node->GetData();
if (!m_enricher)
return data.GetCenter();
auto const optCenter = m_enricher->GetFeatureCenter(data.GetCompositeId());
return optCenter ? *optCenter : data.GetCenter();
}
HierarchyEntry HierarchyLinesBuilder::Transform(HierarchyLinker::Node::Ptr const & node)
{
HierarchyEntry line;
auto const & data = node->GetData();
line.m_id = data.GetCompositeId();
auto const parent = node->GetParent();
if (parent)
line.m_parentId = parent->GetData().GetCompositeId();
line.m_country = m_countryName;
line.m_depth = GetDepth(node);
line.m_name = m_getName(data.GetName());
line.m_type = m_getMainType(data.GetTypes());
line.m_center = GetCenter(node);
return line;
}
HierarchyLinker::Node::Ptrs BuildHierarchy(std::vector<feature::FeatureBuilder> && fbs,
GetMainTypeFn const & getMainType,
std::shared_ptr<FilterInterface> const & filter)
{
base::EraseIf(fbs, [&](auto const & fb) { return !filter->IsAccepted(fb); });
HierarchyLinker::Node::Ptrs places;
places.reserve(fbs.size());
base::Transform(fbs, std::back_inserter(places), [](auto const & fb) {
return tree_node::MakeTreeNode(HierarchyPlace(fb));
});
auto nodes = HierarchyLinker(std::move(places)).Link();
// We leave only the trees.
base::EraseIf(nodes, [](auto const & node) {
return node->HasParent();
});
return nodes;
}
void AddChildrenTo(HierarchyLinker::Node::Ptrs & trees,
std::function<std::vector<HierarchyPlace>(CompositeId const &)> const & fn)
{
for (auto & tree : trees)
{
CHECK(!tree->HasParent(), ());
tree_node::PostOrderVisit(tree, [&](auto const & n) {
auto const id = n->GetData().GetCompositeId();
auto const & places = fn(id);
for (auto place : places)
{
auto const newNode = tree_node::MakeTreeNode(std::move(place));
tree_node::Link(newNode, n);
}
});
}
}
void FlattenBuildingParts(HierarchyLinker::Node::Ptrs & trees)
{
for (auto & tree : trees)
{
CHECK(!tree->HasParent(), ());
std::vector<
std::pair<hierarchy::HierarchyLinker::Node::Ptr, hierarchy::HierarchyLinker::Node::Ptr>>
buildingPartsTrees;
static auto const & buildingPartChecker = ftypes::IsBuildingPartChecker::Instance();
std::function<void(hierarchy::HierarchyLinker::Node::Ptr const &)> visit;
visit = [&](auto const & n) {
if (buildingPartChecker(n->GetData().GetTypes()))
{
CHECK(n->HasParent(), ());
auto building = n->GetParent();
buildingPartsTrees.emplace_back(building, n);
return;
}
CHECK(!buildingPartChecker(n->GetData().GetTypes()), ());
for (auto const & ch : n->GetChildren())
visit(ch);
};
visit(tree);
for (auto const & buildingAndParts : buildingPartsTrees)
{
Unlink(buildingAndParts.second, buildingAndParts.first);
tree_node::PostOrderVisit(buildingAndParts.second, [&](auto const & buildingPartNode) {
CHECK(buildingPartChecker(buildingPartNode->GetData().GetTypes()), ());
buildingPartNode->RemoveChildren();
tree_node::Link(buildingPartNode, buildingAndParts.first);
});
}
}
}
} // namespace hierarchy
} // namespace generator
| {"hexsha": "e7546a5480ed8d3c259bfc24f56aa3539fad8015", "size": 10660, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "generator/hierarchy.cpp", "max_stars_repo_name": "vicpopov/omim", "max_stars_repo_head_hexsha": "664b458998fb0f2405f68ae830c2798e027b2dcc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-07-21T01:24:24.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-21T01:24:24.000Z", "max_issues_repo_path": "generator/hierarchy.cpp", "max_issues_repo_name": "vicpopov/omim", "max_issues_repo_head_hexsha": "664b458998fb0f2405f68ae830c2798e027b2dcc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generator/hierarchy.cpp", "max_forks_repo_name": "vicpopov/omim", "max_forks_repo_head_hexsha": "664b458998fb0f2405f68ae830c2798e027b2dcc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3703703704, "max_line_length": 103, "alphanum_fraction": 0.6795497186, "num_tokens": 2675} |
"""
Created on Mon Jun 24 10:52:25 2019
Reads a wav file with SDR IQ capture of FM stations located in :
https://mega.nz/#F!3UUUnSiD!WLhWZ3ff4f4Pi7Ko_zcodQ
Also: https://drive.google.com/open?id=1itb_ePcPeDRXrVBIVL-1Y3wrt8yvpW28
Also generates IQ stream sampled at 2.4Msps to simulate a similar spectrum
sinusoids, this might be useful in an early stage to use a known signal.
@author: f.divruno
"""
# !/usr/bin/env python3
import wave
import matplotlib.pyplot as plt
import numpy
# ------------ PARAMETERS
def calculate_noise_visibility(bandwidth, int_time, diameter, t_sys, eta):
"""Determine noise rms per visibility
:returns: Sigma [nrows]
"""
k_b = 1.38064852e-23
area = numpy.pi * (diameter / 2.) ** 2
bt = bandwidth * int_time
sigma = (numpy.sqrt(2) * k_b * t_sys) / (area * eta * (numpy.sqrt(bt)))
sigma *= 1e26
return sigma
def generate_DTV(frequency, times, power=50e3, gain=1e-9):
nchan = len(frequency)
ntimes = len(times)
shape = [ntimes, nchan]
sshape = [ntimes, nchan//2]
bchan = nchan//4
echan = 3*nchan//4
amp = 1e26 * gain * numpy.sqrt(2.0) * power / (max(frequency)-min(frequency))
print("RMS signal per sample = %g Jy" % amp)
signal = numpy.zeros(shape, dtype='complex')
signal[:, bchan:echan] += numpy.random.normal(0.0, amp, sshape) + 1j * numpy.random.normal(0.0, amp, sshape)
return signal
def add_noise(waterfall, bandwidth, int_time, diameter, t_sys, eta):
"""Determine noise rms per visibility
:returns: Sigma [nrows]
"""
# The specified sensitivity (effective area / T_sys) is roughly 610 m ^ 2 / K in the range 160 - 200MHz
# sigma_vis = 2 k T_sys / (area * sqrt(tb)) = 2 k 512 / (610 * sqrt(tb)
sens = 610
k_b = 1.38064852e-23
bt = bandwidth * int_time
sigma = 2 * 1e26 * k_b / ((sens/512) * (numpy.sqrt(bt)))
print("RMS noise per sample = %g Jy" % sigma)
sshape = waterfall.shape
waterfall += numpy.random.normal(0.0, sigma, sshape) + 1j * numpy.random.normal(0.0, sigma, sshape)
return waterfall
if __name__ == "__main__":
sample_freq = 2e5
frequency = numpy.arange(170.5e6, 184.5e6, sample_freq)
nchan = len(frequency)
tscan = 0.2
times = numpy.arange(0.0, 30.0, 0.2)
ntimes = len(times)
gain = 1e-18
waterfall = generate_DTV(frequency, times, 1.0, gain)
# waterfall = add_noise(waterfall, bandwidth=sample_freq, int_time=tscan, diameter=35.0, t_sys=200, eta=0.9)
plt.clf()
plt.imshow(numpy.abs(waterfall), origin='bottom')
plt.xlabel('Frequency (MHz)')
plt.ylabel('Time (s)')
fticks = numpy.arange(171e6, 185e6, 5e6)
ifticks = numpy.round((fticks-numpy.min(frequency))/(sample_freq)).astype('int')
plt.xticks(ifticks, fticks*1e-6)
tticks = numpy.array([5, 10, 15, 20, 25])
itticks = numpy.round((tticks-numpy.min(times))/tscan).astype('int')
plt.yticks(itticks, tticks)
cbar = plt.colorbar()
plt.title(("Gain towards Perth = %.1f dB" % (10*numpy.log10(gain))))
cbar.set_label('Power Spectral Density (Jy)', rotation=270)
plt.tight_layout()
plt.savefig("DTV_Waterfall.png")
plt.show()
| {"hexsha": "6cb1d4b5595af89892b42aa0b64b0889f73cb796", "size": 3313, "ext": "py", "lang": "Python", "max_stars_repo_path": "DTV/generate_DTV_interference.py", "max_stars_repo_name": "ska-telescope/sim-lowlevel-rfi", "max_stars_repo_head_hexsha": "24ebb5a738321641eefbff0a1d754b1043ff61c0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "DTV/generate_DTV_interference.py", "max_issues_repo_name": "ska-telescope/sim-lowlevel-rfi", "max_issues_repo_head_hexsha": "24ebb5a738321641eefbff0a1d754b1043ff61c0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DTV/generate_DTV_interference.py", "max_forks_repo_name": "ska-telescope/sim-lowlevel-rfi", "max_forks_repo_head_hexsha": "24ebb5a738321641eefbff0a1d754b1043ff61c0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5104166667, "max_line_length": 114, "alphanum_fraction": 0.6281316028, "include": true, "reason": "import numpy", "num_tokens": 1054} |
# %%
import pandas as pd
import boto3
import sagemaker
from sagemaker import get_execution_role
from sagemaker.serializers import JSONSerializer
from sagemaker.deserializers import JSONDeserializer
# %%
endpoint_name = "endpoint-cdk-model-test"
predictor = sagemaker.predictor.Predictor(
endpoint_name=endpoint_name,
serializer=JSONSerializer(),
deserializer=JSONDeserializer(),
)
# %%
test_inference_data = pd.read_csv("../data/test_inference_input.csv")
inference_data_json = test_inference_data.to_json(orient="records")
# %%
result = predictor.predict(inference_data_json)
result
# %%
| {"hexsha": "d989a1ebd91e3e72fa642e9296c6c73e05bce730", "size": 609, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebooks/endpoint_test.py", "max_stars_repo_name": "a-barton/cdk-model-test", "max_stars_repo_head_hexsha": "b7a0a88d4e674759967aa983321568b071a33fa6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/endpoint_test.py", "max_issues_repo_name": "a-barton/cdk-model-test", "max_issues_repo_head_hexsha": "b7a0a88d4e674759967aa983321568b071a33fa6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/endpoint_test.py", "max_forks_repo_name": "a-barton/cdk-model-test", "max_forks_repo_head_hexsha": "b7a0a88d4e674759967aa983321568b071a33fa6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.3, "max_line_length": 69, "alphanum_fraction": 0.7865353038, "include": true, "reason": "import sage,from sage", "num_tokens": 144} |
[STATEMENT]
lemma bisimSubstOutputPushRes:
fixes x :: name
and \<Psi> :: 'b
and M :: 'a
and N :: 'a
and P :: "('a, 'b, 'c) psi"
assumes "x \<sharp> M"
and "x \<sharp> N"
shows "\<Psi> \<rhd> \<lparr>\<nu>x\<rparr>(M\<langle>N\<rangle>.P) \<sim>\<^sub>s M\<langle>N\<rangle>.\<lparr>\<nu>x\<rparr>P"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Psi> \<rhd> \<lparr>\<nu>x\<rparr>(M\<langle>N\<rangle>.P) \<sim>\<^sub>s M\<langle>N\<rangle>.\<lparr>\<nu>x\<rparr>P
[PROOF STEP]
proof(rule closeSubstI)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>\<sigma>. wellFormedSubst \<sigma> \<Longrightarrow> \<Psi> \<rhd> \<lparr>\<nu>x\<rparr>(M\<langle>N\<rangle>.P)[<\<sigma>>] \<sim> M\<langle>N\<rangle>.\<lparr>\<nu>x\<rparr>P[<\<sigma>>]
[PROOF STEP]
fix \<sigma>:: "(name list \<times> 'a list) list"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>\<sigma>. wellFormedSubst \<sigma> \<Longrightarrow> \<Psi> \<rhd> \<lparr>\<nu>x\<rparr>(M\<langle>N\<rangle>.P)[<\<sigma>>] \<sim> M\<langle>N\<rangle>.\<lparr>\<nu>x\<rparr>P[<\<sigma>>]
[PROOF STEP]
assume "wellFormedSubst \<sigma>"
[PROOF STATE]
proof (state)
this:
wellFormedSubst \<sigma>
goal (1 subgoal):
1. \<And>\<sigma>. wellFormedSubst \<sigma> \<Longrightarrow> \<Psi> \<rhd> \<lparr>\<nu>x\<rparr>(M\<langle>N\<rangle>.P)[<\<sigma>>] \<sim> M\<langle>N\<rangle>.\<lparr>\<nu>x\<rparr>P[<\<sigma>>]
[PROOF STEP]
obtain y::name where "y \<sharp> \<Psi>" and "y \<sharp> \<sigma>" and "y \<sharp> P" and "y \<sharp> M" and "y \<sharp> N"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>y. \<lbrakk>y \<sharp> \<Psi>; y \<sharp> \<sigma>; y \<sharp> P; y \<sharp> M; y \<sharp> N\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(generate_fresh "name") (auto simp add: fresh_prod)
[PROOF STATE]
proof (state)
this:
y \<sharp> \<Psi>
y \<sharp> \<sigma>
y \<sharp> P
y \<sharp> M
y \<sharp> N
goal (1 subgoal):
1. \<And>\<sigma>. wellFormedSubst \<sigma> \<Longrightarrow> \<Psi> \<rhd> \<lparr>\<nu>x\<rparr>(M\<langle>N\<rangle>.P)[<\<sigma>>] \<sim> M\<langle>N\<rangle>.\<lparr>\<nu>x\<rparr>P[<\<sigma>>]
[PROOF STEP]
from \<open>wellFormedSubst \<sigma>\<close> \<open>y \<sharp> M\<close> \<open>y \<sharp> \<sigma>\<close>
[PROOF STATE]
proof (chain)
picking this:
wellFormedSubst \<sigma>
y \<sharp> M
y \<sharp> \<sigma>
[PROOF STEP]
have "y \<sharp> M[<\<sigma>>]"
[PROOF STATE]
proof (prove)
using this:
wellFormedSubst \<sigma>
y \<sharp> M
y \<sharp> \<sigma>
goal (1 subgoal):
1. y \<sharp> M[<\<sigma>>]
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
y \<sharp> M[<\<sigma>>]
goal (1 subgoal):
1. \<And>\<sigma>. wellFormedSubst \<sigma> \<Longrightarrow> \<Psi> \<rhd> \<lparr>\<nu>x\<rparr>(M\<langle>N\<rangle>.P)[<\<sigma>>] \<sim> M\<langle>N\<rangle>.\<lparr>\<nu>x\<rparr>P[<\<sigma>>]
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
y \<sharp> M[<\<sigma>>]
goal (1 subgoal):
1. \<And>\<sigma>. wellFormedSubst \<sigma> \<Longrightarrow> \<Psi> \<rhd> \<lparr>\<nu>x\<rparr>(M\<langle>N\<rangle>.P)[<\<sigma>>] \<sim> M\<langle>N\<rangle>.\<lparr>\<nu>x\<rparr>P[<\<sigma>>]
[PROOF STEP]
from \<open>wellFormedSubst \<sigma>\<close> \<open>y \<sharp> N\<close> \<open>y \<sharp> \<sigma>\<close>
[PROOF STATE]
proof (chain)
picking this:
wellFormedSubst \<sigma>
y \<sharp> N
y \<sharp> \<sigma>
[PROOF STEP]
have "y \<sharp> N[<\<sigma>>]"
[PROOF STATE]
proof (prove)
using this:
wellFormedSubst \<sigma>
y \<sharp> N
y \<sharp> \<sigma>
goal (1 subgoal):
1. y \<sharp> N[<\<sigma>>]
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
y \<sharp> N[<\<sigma>>]
goal (1 subgoal):
1. \<And>\<sigma>. wellFormedSubst \<sigma> \<Longrightarrow> \<Psi> \<rhd> \<lparr>\<nu>x\<rparr>(M\<langle>N\<rangle>.P)[<\<sigma>>] \<sim> M\<langle>N\<rangle>.\<lparr>\<nu>x\<rparr>P[<\<sigma>>]
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
y \<sharp> M[<\<sigma>>]
y \<sharp> N[<\<sigma>>]
[PROOF STEP]
have "\<Psi> \<rhd> \<lparr>\<nu>y\<rparr>((M[<\<sigma>>])\<langle>(N[<\<sigma>>])\<rangle>.(([(x, y)] \<bullet> P)[<\<sigma>>])) \<sim> (M[<\<sigma>>])\<langle>(N[<\<sigma>>])\<rangle>.(\<lparr>\<nu>y\<rparr>(([(x, y)] \<bullet> P)[<\<sigma>>]))"
[PROOF STATE]
proof (prove)
using this:
y \<sharp> M[<\<sigma>>]
y \<sharp> N[<\<sigma>>]
goal (1 subgoal):
1. \<Psi> \<rhd> \<lparr>\<nu>y\<rparr>((M[<\<sigma>>])\<langle>(N[<\<sigma>>])\<rangle>.[(x, y)] \<bullet> P[<\<sigma>>]) \<sim> (M[<\<sigma>>])\<langle>(N[<\<sigma>>])\<rangle>.\<lparr>\<nu>y\<rparr>[(x, y)] \<bullet> P[<\<sigma>>]
[PROOF STEP]
by(rule bisimOutputPushRes)
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> \<lparr>\<nu>y\<rparr>((M[<\<sigma>>])\<langle>(N[<\<sigma>>])\<rangle>.[(x, y)] \<bullet> P[<\<sigma>>]) \<sim> (M[<\<sigma>>])\<langle>(N[<\<sigma>>])\<rangle>.\<lparr>\<nu>y\<rparr>[(x, y)] \<bullet> P[<\<sigma>>]
goal (1 subgoal):
1. \<And>\<sigma>. wellFormedSubst \<sigma> \<Longrightarrow> \<Psi> \<rhd> \<lparr>\<nu>x\<rparr>(M\<langle>N\<rangle>.P)[<\<sigma>>] \<sim> M\<langle>N\<rangle>.\<lparr>\<nu>x\<rparr>P[<\<sigma>>]
[PROOF STEP]
with \<open>y \<sharp> M\<close> \<open>y \<sharp> N\<close> \<open>y \<sharp> P\<close> \<open>x \<sharp> M\<close> \<open>x \<sharp> N\<close> \<open>y \<sharp> \<sigma>\<close> \<open>wellFormedSubst \<sigma>\<close>
[PROOF STATE]
proof (chain)
picking this:
y \<sharp> M
y \<sharp> N
y \<sharp> P
x \<sharp> M
x \<sharp> N
y \<sharp> \<sigma>
wellFormedSubst \<sigma>
\<Psi> \<rhd> \<lparr>\<nu>y\<rparr>((M[<\<sigma>>])\<langle>(N[<\<sigma>>])\<rangle>.[(x, y)] \<bullet> P[<\<sigma>>]) \<sim> (M[<\<sigma>>])\<langle>(N[<\<sigma>>])\<rangle>.\<lparr>\<nu>y\<rparr>[(x, y)] \<bullet> P[<\<sigma>>]
[PROOF STEP]
show "\<Psi> \<rhd> (\<lparr>\<nu>x\<rparr>(M\<langle>N\<rangle>.P))[<\<sigma>>] \<sim> (M\<langle>N\<rangle>.\<lparr>\<nu>x\<rparr>P)[<\<sigma>>]"
[PROOF STATE]
proof (prove)
using this:
y \<sharp> M
y \<sharp> N
y \<sharp> P
x \<sharp> M
x \<sharp> N
y \<sharp> \<sigma>
wellFormedSubst \<sigma>
\<Psi> \<rhd> \<lparr>\<nu>y\<rparr>((M[<\<sigma>>])\<langle>(N[<\<sigma>>])\<rangle>.[(x, y)] \<bullet> P[<\<sigma>>]) \<sim> (M[<\<sigma>>])\<langle>(N[<\<sigma>>])\<rangle>.\<lparr>\<nu>y\<rparr>[(x, y)] \<bullet> P[<\<sigma>>]
goal (1 subgoal):
1. \<Psi> \<rhd> \<lparr>\<nu>x\<rparr>(M\<langle>N\<rangle>.P)[<\<sigma>>] \<sim> M\<langle>N\<rangle>.\<lparr>\<nu>x\<rparr>P[<\<sigma>>]
[PROOF STEP]
apply(subst alphaRes[of y], simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>y \<sharp> M; y \<sharp> N; y \<sharp> P; x \<sharp> M; x \<sharp> N; y \<sharp> \<sigma>; wellFormedSubst \<sigma>; \<Psi> \<rhd> \<lparr>\<nu>y\<rparr>((M[<\<sigma>>])\<langle>(N[<\<sigma>>])\<rangle>.[(x, y)] \<bullet> P[<\<sigma>>]) \<sim> (M[<\<sigma>>])\<langle>(N[<\<sigma>>])\<rangle>.\<lparr>\<nu>y\<rparr>[(x, y)] \<bullet> P[<\<sigma>>]\<rbrakk> \<Longrightarrow> \<Psi> \<rhd> \<lparr>\<nu>y\<rparr>([(x, y)] \<bullet> M\<langle>N\<rangle>.P)[<\<sigma>>] \<sim> M\<langle>N\<rangle>.\<lparr>\<nu>x\<rparr>P[<\<sigma>>]
[PROOF STEP]
apply(subst alphaRes[of y P], simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>y \<sharp> M; y \<sharp> N; y \<sharp> P; x \<sharp> M; x \<sharp> N; y \<sharp> \<sigma>; wellFormedSubst \<sigma>; \<Psi> \<rhd> \<lparr>\<nu>y\<rparr>((M[<\<sigma>>])\<langle>(N[<\<sigma>>])\<rangle>.[(x, y)] \<bullet> P[<\<sigma>>]) \<sim> (M[<\<sigma>>])\<langle>(N[<\<sigma>>])\<rangle>.\<lparr>\<nu>y\<rparr>[(x, y)] \<bullet> P[<\<sigma>>]\<rbrakk> \<Longrightarrow> \<Psi> \<rhd> \<lparr>\<nu>y\<rparr>([(x, y)] \<bullet> M\<langle>N\<rangle>.P)[<\<sigma>>] \<sim> M\<langle>N\<rangle>.\<lparr>\<nu>y\<rparr>([(x, y)] \<bullet> P)[<\<sigma>>]
[PROOF STEP]
by(simp add: eqvts)
[PROOF STATE]
proof (state)
this:
\<Psi> \<rhd> \<lparr>\<nu>x\<rparr>(M\<langle>N\<rangle>.P)[<\<sigma>>] \<sim> M\<langle>N\<rangle>.\<lparr>\<nu>x\<rparr>P[<\<sigma>>]
goal:
No subgoals!
[PROOF STEP]
qed | {"llama_tokens": 3567, "file": "Psi_Calculi_Bisim_Subst", "length": 21} |
using BenchmarkTools
SUITE = BenchmarkGroup()
BCST_A = parse(Int, ENV["BCST_A"])
for i in [0, 1]
k1 = i * BCST_A
SUITE["k1=$k1"] = @benchmarkable nothing
end
| {"hexsha": "b1d553cd6053ad58f230c81ce34c42bec239fc63", "size": 167, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/BenchmarkConfigSweepsTests/src/example1.jl", "max_stars_repo_name": "tkf/BenchmarkConfigSweeps.jl", "max_stars_repo_head_hexsha": "778c6c754eef1c7edc1ab57fb8e4758a68c96742", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-10-20T08:45:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-20T08:45:54.000Z", "max_issues_repo_path": "test/BenchmarkConfigSweepsTests/src/example1.jl", "max_issues_repo_name": "tkf/BenchmarkConfigSweeps.jl", "max_issues_repo_head_hexsha": "778c6c754eef1c7edc1ab57fb8e4758a68c96742", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-11-05T03:55:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-05T21:59:35.000Z", "max_forks_repo_path": "test/BenchmarkConfigSweepsTests/src/example1.jl", "max_forks_repo_name": "tkf/BenchmarkConfigSweeps.jl", "max_forks_repo_head_hexsha": "778c6c754eef1c7edc1ab57fb8e4758a68c96742", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.5555555556, "max_line_length": 44, "alphanum_fraction": 0.6586826347, "num_tokens": 60} |
import json
import logging
import os
import numpy as np
from draco.learn import data_util, linear
from draco.learn.helper import current_weights
from draco.run import run
from draco.spec import Task
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def absolute_path(p: str) -> str:
return os.path.join(os.path.dirname(__file__), p)
def play(partial_full_data, train_weights=True, output_file=None):
init_weights = current_weights()
if train_weights:
train_dev, _ = data_util.load_data()
X = train_dev.positive - train_dev.negative
clf = linear.train_model(X)
# columns where all X[i] are zero
unused_features = np.nonzero(np.sum(np.abs(X), axis=0) == 0)[0]
# if a feature is not used, its weight is 0
learnt_weights = [int(x * 1000) if (i not in unused_features) else None
for i, x in enumerate(clf.coef_[0])]
weights = {}
for i, k in enumerate(init_weights):
if learnt_weights[i] is not None:
weights[k] = learnt_weights[i]
else:
weights[k] = 10000 + init_weights[k]
else:
weights = init_weights
pairs = generate_visual_pairs(partial_full_data, weights)
if output_file is not None:
with open(output_file, "w+") as f:
print(f'Writing pairs to {output_file}')
json.dump(pairs, f)
else:
print(json.dumps(pairs))
def generate_visual_pairs(partial_full_data, weights):
# Generate pairs that can be visualized by bug finders
result = {}
result["headers"] = {
"first": {
"title": "Draco",
"subtitle": "Draco Prediction"
},
"second": {
"title": "CQL",
"subtitle": "Compassql Prediction"
}
}
result["specs"] = []
for case in partial_full_data:
partial_spec, full_spec = partial_full_data[case]
draco_rec = run(Task.from_cql(partial_spec), constants=weights)
if draco_rec is None:
logger.warning(f'Could not find a spec for {partial_spec}')
result["specs"].append({
"first": None,
"second": full_spec,
"properties": {
"input": partial_spec
}
})
continue
result["specs"].append({
"first": draco_rec.to_vegalite(),
"second": full_spec,
"properties": {
"input": partial_spec
}
})
return result
if __name__ == '__main__':
# spec_dir = absolute_path("../../data/synthetic")
# dataset = data_util.load_partial_full_data(spec_dir)
# output_file = absolute_path("../../data/spec_pairs/synthetic.json")
# play(dataset, train_weights=True, output_file=output_file)
# spec_dir = absolute_path("../../data/synthetic")
# dataset = data_util.load_partial_full_data(spec_dir)
# output_file = absolute_path("../../data/spec_pairs/synthetic_default_weights.json")
# play(dataset, train_weights=False, output_file=output_file)
# spec_dir = absolute_path("../../data/compassql_examples")
# dataset = data_util.load_partial_full_data(spec_dir)
# output_file = absolute_path("../../data/spec_pairs/draco_cql.json")
# play(dataset, train_weights=True, output_file=output_file)
spec_dir = absolute_path("../../data/compassql_examples")
dataset = data_util.load_partial_full_data(spec_dir)
output_file = absolute_path("../../data/spec_pairs/draco_cql_default_weights.json")
play(dataset, train_weights=False, output_file=output_file)
# open `http://localhost:3000/specviewer?data=spec_pairs/draco_cql_default_weights.json`
| {"hexsha": "307da47ce132e6e32ea6e2c96bbd11fa03900687", "size": 3790, "ext": "py", "lang": "Python", "max_stars_repo_path": "draco/learn/playground.py", "max_stars_repo_name": "ngehlenborg/draco", "max_stars_repo_head_hexsha": "e7e958d9494413e0578053359368b7918a0135de", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "draco/learn/playground.py", "max_issues_repo_name": "ngehlenborg/draco", "max_issues_repo_head_hexsha": "e7e958d9494413e0578053359368b7918a0135de", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "draco/learn/playground.py", "max_forks_repo_name": "ngehlenborg/draco", "max_forks_repo_head_hexsha": "e7e958d9494413e0578053359368b7918a0135de", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3223140496, "max_line_length": 92, "alphanum_fraction": 0.6258575198, "include": true, "reason": "import numpy", "num_tokens": 852} |
[STATEMENT]
lemma min_satisfying_Some:
"min_satisfying P l = Some x \<longrightarrow>
x \<in> set l \<and> P x \<and> (\<forall> x' \<in> set l. x' < x \<longrightarrow> \<not> P x')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. min_satisfying P l = Some x \<longrightarrow> x \<in> set l \<and> P x \<and> (\<forall>x'\<in>set l. x' < x \<longrightarrow> \<not> P x')
[PROOF STEP]
proof (safe)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. min_satisfying P l = Some x \<Longrightarrow> x \<in> set l
2. min_satisfying P l = Some x \<Longrightarrow> P x
3. \<And>x'. \<lbrakk>min_satisfying P l = Some x; x' \<in> set l; x' < x; P x'\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
let ?xs = "filter P l"
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. min_satisfying P l = Some x \<Longrightarrow> x \<in> set l
2. min_satisfying P l = Some x \<Longrightarrow> P x
3. \<And>x'. \<lbrakk>min_satisfying P l = Some x; x' \<in> set l; x' < x; P x'\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
assume "min_satisfying P l = Some x"
[PROOF STATE]
proof (state)
this:
min_satisfying P l = Some x
goal (3 subgoals):
1. min_satisfying P l = Some x \<Longrightarrow> x \<in> set l
2. min_satisfying P l = Some x \<Longrightarrow> P x
3. \<And>x'. \<lbrakk>min_satisfying P l = Some x; x' \<in> set l; x' < x; P x'\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
min_satisfying P l = Some x
[PROOF STEP]
have "set ?xs \<noteq> {}" "x = Min (set ?xs)"
[PROOF STATE]
proof (prove)
using this:
min_satisfying P l = Some x
goal (1 subgoal):
1. set (filter P l) \<noteq> {} &&& x = Min (set (filter P l))
[PROOF STEP]
unfolding min_satisfying_def Let_def
[PROOF STATE]
proof (prove)
using this:
(if filter P l = [] then None else Some (list_min (filter P l))) = Some x
goal (1 subgoal):
1. set (filter P l) \<noteq> {} &&& x = Min (set (filter P l))
[PROOF STEP]
by (auto split: if_splits simp add: filter_empty_conv)
[PROOF STATE]
proof (state)
this:
set (filter P l) \<noteq> {}
x = Min (set (filter P l))
goal (3 subgoals):
1. min_satisfying P l = Some x \<Longrightarrow> x \<in> set l
2. min_satisfying P l = Some x \<Longrightarrow> P x
3. \<And>x'. \<lbrakk>min_satisfying P l = Some x; x' \<in> set l; x' < x; P x'\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
set (filter P l) \<noteq> {}
x = Min (set (filter P l))
[PROOF STEP]
show "x \<in> set l" "P x"
[PROOF STATE]
proof (prove)
using this:
set (filter P l) \<noteq> {}
x = Min (set (filter P l))
goal (1 subgoal):
1. x \<in> set l &&& P x
[PROOF STEP]
using Min_in[of "set ?xs"]
[PROOF STATE]
proof (prove)
using this:
set (filter P l) \<noteq> {}
x = Min (set (filter P l))
\<lbrakk>finite (set (filter P l)); set (filter P l) \<noteq> {}\<rbrakk> \<Longrightarrow> Min (set (filter P l)) \<in> set (filter P l)
goal (1 subgoal):
1. x \<in> set l &&& P x
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
x \<in> set l
P x
goal (1 subgoal):
1. \<And>x'. \<lbrakk>min_satisfying P l = Some x; x' \<in> set l; x' < x; P x'\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
fix x'
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x'. \<lbrakk>min_satisfying P l = Some x; x' \<in> set l; x' < x; P x'\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
assume "x' \<in> set l" "P x'" "x' < x"
[PROOF STATE]
proof (state)
this:
x' \<in> set l
P x'
x' < x
goal (1 subgoal):
1. \<And>x'. \<lbrakk>min_satisfying P l = Some x; x' \<in> set l; x' < x; P x'\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
have "x' \<notin> set ?xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x' \<notin> set (filter P l)
[PROOF STEP]
proof (rule ccontr)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> x' \<notin> set (filter P l) \<Longrightarrow> False
[PROOF STEP]
assume "\<not> ?thesis"
[PROOF STATE]
proof (state)
this:
\<not> x' \<notin> set (filter P l)
goal (1 subgoal):
1. \<not> x' \<notin> set (filter P l) \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<not> x' \<notin> set (filter P l)
[PROOF STEP]
have "x' \<ge> x"
[PROOF STATE]
proof (prove)
using this:
\<not> x' \<notin> set (filter P l)
goal (1 subgoal):
1. x \<le> x'
[PROOF STEP]
using \<open>x = Min (set ?xs)\<close>
[PROOF STATE]
proof (prove)
using this:
\<not> x' \<notin> set (filter P l)
x = Min (set (filter P l))
goal (1 subgoal):
1. x \<le> x'
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
x \<le> x'
goal (1 subgoal):
1. \<not> x' \<notin> set (filter P l) \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<le> x'
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
x \<le> x'
goal (1 subgoal):
1. False
[PROOF STEP]
using \<open>x' < x\<close>
[PROOF STATE]
proof (prove)
using this:
x \<le> x'
x' < x
goal (1 subgoal):
1. False
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
x' \<notin> set (filter P l)
goal (1 subgoal):
1. \<And>x'. \<lbrakk>min_satisfying P l = Some x; x' \<in> set l; x' < x; P x'\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x' \<notin> set (filter P l)
[PROOF STEP]
show "False"
[PROOF STATE]
proof (prove)
using this:
x' \<notin> set (filter P l)
goal (1 subgoal):
1. False
[PROOF STEP]
using \<open>x' \<in> set l\<close> \<open>P x'\<close>
[PROOF STATE]
proof (prove)
using this:
x' \<notin> set (filter P l)
x' \<in> set l
P x'
goal (1 subgoal):
1. False
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed | {"llama_tokens": 2464, "file": "Simplex_Simplex_Auxiliary", "length": 30} |
[STATEMENT]
lemma R_join:
assumes "x is R healthy"
and "y is R healthy"
shows "(x \<sqinter> y) is R healthy"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<or> y is R healthy
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<or> y is R healthy
[PROOF STEP]
have "R x = x" and "R y = y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. R x = x &&& R y = y
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
x is R healthy
y is R healthy
goal (1 subgoal):
1. R x = x &&& R y = y
[PROOF STEP]
by (simp_all only: Healthy_def)
[PROOF STATE]
proof (state)
this:
R x = x
R y = y
goal (1 subgoal):
1. x \<or> y is R healthy
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
R x = x
R y = y
goal (1 subgoal):
1. x \<or> y is R healthy
[PROOF STEP]
have "((R x) \<sqinter> (R y)) is R healthy"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. R x \<or> R y is R healthy
[PROOF STEP]
by (auto simp add: design_defs rp_defs fun_eq_iff split: cond_splits)
[PROOF STATE]
proof (state)
this:
R x \<or> R y is R healthy
goal (1 subgoal):
1. x \<or> y is R healthy
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
R x = x
R y = y
R x \<or> R y is R healthy
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
R x = x
R y = y
R x \<or> R y is R healthy
goal (1 subgoal):
1. x \<or> y is R healthy
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
x \<or> y is R healthy
goal:
No subgoals!
[PROOF STEP]
qed | {"llama_tokens": 693, "file": "Circus_Reactive_Processes", "length": 11} |
import numpy as np
from math import sqrt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.datasets import make_circles
import matplotlib.pyplot as plt
import pylab as pl
"""
Demonstrates how a linearly nonseparable dataset in R^2 can be
linearly separable in R^3 after a transformation via an appropriate
kernel function.
Generates a 2D non-separable dataset, and projects it to R^3 using
a polynomial kernel [x1, x2] -> [x1, x2, x1^2.0 + x2^2.0], where
it is now linearly separable in R^3.
Usage:
$ python demo_data_transform.py
"""
def randrange(n, vmin, vmax):
return (vmax-vmin)*np.random.rand(n) + vmin
def fn_kernel(x1, x2):
""" Implements a kernel phi(x1,y1) = [x1, y1, x1^2 + y1^2] """
return np.array([x1, x2, x1**2.0 + x2**2.0])
""" Generate linearly nonseparable dataset (in R^2) """
n = 200
X, Y = make_circles(n_samples=n, noise=0.07, factor=0.4)
A = X[np.where(Y == 0)]
B = X[np.where(Y == 1)]
X0_orig = A[:, 0]
Y0_orig = A[:, 1]
X1_orig = B[:, 0]
Y1_orig = B[:, 1]
frac0 = len(np.where(Y == 0)[0]) / float(len(Y))
frac1 = len(np.where(Y == 1)[0]) / float(len(Y))
print("Percentage of '0' labels:", frac0)
print("Percentage of '1' labels:", frac1)
A = np.array([fn_kernel(x,y) for x,y in zip(np.ravel(X0_orig), np.ravel(Y0_orig))])
X0 = A[:, 0]
Y0 = A[:, 1]
Z0 = A[:, 2]
A = np.array([fn_kernel(x,y) for x,y in zip(np.ravel(X1_orig), np.ravel(Y1_orig))])
X1 = A[:, 0]
Y1 = A[:, 1]
Z1 = A[:, 2]
def plot_no_decision_boundary():
fig = plt.figure(figsize=(20,8))
ax = fig.add_subplot(122, projection='3d')
ax.scatter(X0, Y0, Z0, c='r', marker='o')
ax.scatter(X1, Y1, Z1, c='b', marker='^')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
ax.set_title("Data in R^3 (separable)")
# Project data to X/Y plane
ax2d = fig.add_subplot(121)
ax2d.scatter(X0, Y0, c='r', marker='o')
ax2d.scatter(X1, Y1, c='b', marker='^')
ax2d.set_xlabel('X Label')
ax2d.set_ylabel('Y Label')
ax2d.set_title("Data projected to R^2 (nonseparable)")
plt.show()
def plot_decision_boundary():
fig = plt.figure(figsize=(20,8))
ax = fig.add_subplot(121, projection='3d')
ax.scatter(X0, Y0, Z0, c='r', marker='o')
ax.scatter(X1, Y1, Z1, c='b', marker='^')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
ax.set_title("Data in R^3 (separable w/ hyperplane)")
x = np.arange(-1.25, 1.25, 0.1)
y = np.arange(-1.25, 1.25, 0.1)
X, Y = np.meshgrid(x, y)
Z = np.zeros(X.shape)
Z[:,:] = 0.5
ax.plot_surface(X, Y, Z, color='#09F911')
# Project data to X/Y plane
ax2d = fig.add_subplot(122)
ax2d.scatter(X0, Y0, c='r', marker='o')
ax2d.scatter(X1, Y1, c='b', marker='^')
ax2d.add_patch(pl.Circle((0,0), radius=sqrt(0.5),
fill=False, linestyle='solid', linewidth=4.0,
color='black'))
ax2d.add_patch(pl.Circle((0,0), radius=sqrt(0.5),
fill=False, linestyle='dashed', linewidth=1.5,
color='#09F911'))
ax2d.set_xlabel('X Label')
ax2d.set_ylabel('Y Label')
ax2d.set_title("Data projected to R^2 (hyperplane projection shown)")
plt.show()
def main():
print("...Projecting dataset to R^3 (no decision boundary)...")
plot_no_decision_boundary()
print("...Projecting dataset to R^3 (with decision boundary)...")
plot_decision_boundary()
print("...Done.")
if __name__ == '__main__':
main()
| {"hexsha": "b4878a8d9ba3da2230b850bc17915a4f8686189c", "size": 3532, "ext": "py", "lang": "Python", "max_stars_repo_path": "svm/KernelTrick/code/demo_data_transform.py", "max_stars_repo_name": "uredkar/mymlstat", "max_stars_repo_head_hexsha": "b36a2097d76ca9c8071e60d57331945b978ca6bc", "max_stars_repo_licenses": ["Unlicense", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "svm/KernelTrick/code/demo_data_transform.py", "max_issues_repo_name": "uredkar/mymlstat", "max_issues_repo_head_hexsha": "b36a2097d76ca9c8071e60d57331945b978ca6bc", "max_issues_repo_licenses": ["Unlicense", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "svm/KernelTrick/code/demo_data_transform.py", "max_forks_repo_name": "uredkar/mymlstat", "max_forks_repo_head_hexsha": "b36a2097d76ca9c8071e60d57331945b978ca6bc", "max_forks_repo_licenses": ["Unlicense", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1692307692, "max_line_length": 83, "alphanum_fraction": 0.6121177803, "include": true, "reason": "import numpy", "num_tokens": 1170} |
import numpy as np
import timeit
import lpnorm
import smmprod
from scipy.sparse import coo_matrix as spmatrix
class RobustAlgo(object):
# This class implements the smoothed lp-norm loss function and its gradient
def __init__(self, k, p, mu):
self.k = k
self.p = p
self.mu = mu
def lpnorm(self, X):
# Depending on the cardinality of the input, the cost function is either computed in Python or with an external C module
card = X.size
if card > 1e4:
val = lpnorm.lpnorm_c_openmp(X, self.mu, self.p) / card
else:
val = lpnorm.lpnorm_py_simple(X, self.mu, self.p) / card
zero_offset = self.mu ** (self.p / 2.0)
scaling = (self.mu + 1) ** (self.p / 2.0) - zero_offset
return (val - zero_offset) / scaling
def lpnormgrad(self, X):
grad = lpnorm.lpnormgrad_c_openmp(X, self.mu, self.p) / X.size
zero_offset = self.mu ** (self.p / 2.0)
scaling = (self.mu + 1) ** (self.p / 2.0) - zero_offset
return grad / scaling
class RobustPCA(RobustAlgo):
def __init__(self, X, k, p, mu, kappa=None, samplesize=None, Omega=None, dimensions=None, PCA_INIT=False, SMMPROD=None, CALC_L=False, U_init=None):
super(RobustPCA, self).__init__(k, p, mu)
self.Xmax = np.float(X.max())
self.Xmin = np.float(X.min())
self.Omega = Omega
# X is a matrix, X_Omega is a vector containing the observed entries of X
self.X = None
self.X_Omega = None
if Omega is None:
# assume X is given as a matrix
self.X = X
self.X_Omega = None
self.m, self.n = X.shape
self.mn = self.m * self.n
self.card_Omega = self.mn
else:
# assume X is given as a vector
self.X = None
self.X_Omega = X
self.m, self.n = dimensions
self.mn = self.m * self.n
self.card_Omega = Omega[0].size
if samplesize is None:
self.card_Psi = self.card_Omega
else:
# cannot sample more than what is observed
self.card_Psi = np.int(np.minimum(samplesize, self.card_Omega))
self.card_Omega_not = self.mn - self.card_Omega
self.card_Psi_not = self.mn - self.card_Psi
# kappa is an optional energy factor that weighs an additional cost function term for the energy on the unobserved entries. See RTRMC by Boumal and Absil for the general idea
if kappa is not None:
self.kappa = np.float(kappa)
else:
self.kappa = None
# if the external parameter enforces selective matrix-matrix product then use this setting, but only if it is an actual subsampling
if SMMPROD is not None:
if self.card_Psi < self.mn:
self.SMMPROD_PSI = SMMPROD
else:
self.SMMPROD_PSI = False
if self.card_Omega < self.mn:
self.SMMPROD_OMEGA = SMMPROD
else:
self.SMMPROD_OMEGA = False
else:
# otherwise run benchmark to measure if SMMPROD is faster than full matrix-matrix product
if self.card_Psi == self.mn:
# if all entries are sampled SMMPROD is slow
self.SMMPROD_PSI = False
else:
# run benchmark for Psi
self.SMMPROD_PSI = self.mmprod_benchmark(self.card_Psi)
if Omega is None or not self.SMMPROD_PSI:
# if all entries are sampled SMMPROD is slow
# since Psi is a subset of Omega, SMMPROD for Omega is slow if SMMPROD for Psi is slow
self.SMMPROD_OMEGA = False
else:
self.SMMPROD_OMEGA = self.mmprod_benchmark(self.card_Omega)
if self.SMMPROD_PSI:
print "Using selective matrix multiplication for cost function"
else:
print "Using full matrix multiplication for cost function"
if self.SMMPROD_OMEGA:
print "Using selective matrix multiplication for gradient"
else:
print "Using full matrix multiplication for gradient"
self.CALC_L = CALC_L
# initialize U and Y
if PCA_INIT:
if self.Omega:
X_full = np.zeros((self.m, self.n))
X_full[Omega] = X
U, _, _ = np.linalg.svd(X_full)
self.U = U[:, : k]
self.Y = np.dot(self.U.T, X_full)
else:
U, _, _ = np.linalg.svd(self.X)
self.U = U[:, : k]
self.Y = np.dot(self.U.T, self.X)
else:
if U_init is not None:
self.U = U_init
else:
self.U, _ = np.linalg.qr(np.random.randn(self.m, self.k))
self.Y = np.zeros((self.k, self.n))
# Psi is the set of samples for each iteration, it is a subset of Omega
self.Psi = None
# ix_Psi denotes the indices of Psi with respect to Omega
self.ix_Psi = None
# X_Psi denotes the vector of sampled entries
self.X_Psi = None
self.L = None
self.update()
def loss_data_selective(self, UY):
return self.lpnorm(self.X_Psi - UY)
def loss_data_full(self, L):
if self.Psi is None:
# compute full residual
return self.lpnorm(self.X - L)
else:
# compute selective entries of the residual
return self.lpnorm(self.X_Psi - L[self.Psi])
def grad_data_selective(self, UY):
values = -self.lpnormgrad(self.X_Omega - UY)
return spmatrix((values, self.Omega), shape=(self.m, self.n))
def grad_data_full(self, L):
if self.Omega is None:
grad = -self.lpnormgrad(self.X - L)
else:
grad = np.zeros((self.m, self.n))
grad[self.Omega] = -self.lpnormgrad(self.X_Omega - L[self.Omega])
return grad
# loss and gradient functions for additional energy term
def loss_energy_full(self, Y, L):
return self.kappa / 2.0 * (np.linalg.norm(Y, 'fro') ** 2 - np.linalg.norm(L[self.Omega]) ** 2) / self.card_Omega_not
def loss_energy_selective(self, Y, UY):
return self.kappa / 2.0 * (np.linalg.norm(Y, 'fro') ** 2 - np.linalg.norm(UY) ** 2) / self.card_Psi_not
def grad_energy_term_selective(self, UY):
values = self.kappa * UY / self.card_Psi_not
return spmatrix((values, self.Omega), shape=(self.m, self.n))
def grad_energy_term_full(self, L):
grad = np.zeros((self.m, self.n))
grad[self.Omega] = self.kappa * L[self.Omega] / self.card_Omega_not
return grad
def get_cost(self, var, varname, VERBOSE=None):
if varname == "U":
U = var
Y = self.Y
elif varname == "Y":
Y = var
U = self.U
else:
U = self.U
Y = self.Y
if self.kappa is not None:
if self.SMMPROD_PSI:
# compute selective entries of the low-rank approximation
UY = smmprod.smmprod_c(U, Y, self.Psi)
loss_data = self.loss_data_selective(UY)
loss_energy = self.loss_energy_selective(Y, UY)
else:
# compute full L
L = np.dot(U, Y)
loss_data = self.loss_data_full(L)
loss_energy = self.loss_energy_full(Y, L)
if VERBOSE is not None:
print VERBOSE * "\t" + "loss_data: ", loss_data
print VERBOSE * "\t" + "loss_energy: ", loss_energy
loss = loss_data + loss_energy
else:
if self.SMMPROD_PSI:
# compute selective entries of the low-rank approximation
UY = smmprod.smmprod_c(U, Y, self.Psi)
loss_data = self.loss_data_selective(UY)
else:
# compute full L
L = np.dot(U, Y)
loss_data = self.loss_data_full(L)
if VERBOSE is not None:
print VERBOSE * "\t" + "loss_data: ", loss_data
loss = loss_data
return loss
def get_full_cost(self):
# this function computes the full cost on all observable positions
if self.card_Psi == self.card_Omega:
cost = self.get_cost(None, "full")
else:
Psi_backup = self.Psi
X_Psi_backup = self.X_Psi
SMMPROD_PSI_backup = self.SMMPROD_PSI
self.Psi = self.Omega
self.X_Psi = self.X_Omega
self.SMMPROD_PSI = self.SMMPROD_OMEGA
cost = self.get_cost(None, "full")
self.Psi = Psi_backup
self.X_Psi = X_Psi_backup
self.SMMPROD_PSI = SMMPROD_PSI_backup
return cost
def get_gradient(self, var, varname):
if varname == "U":
U = var
Y = self.Y
elif varname == "Y":
Y = var
U = self.U
else:
return False
if self.SMMPROD_OMEGA:
# compute selective entries of L
UY = smmprod.smmprod_c(U, Y, self.Omega)
grad_data = self.grad_data_selective(UY)
if self.kappa is not None:
if varname == "U":
return (grad_data - self.grad_energy_term_selective(UY)).dot(Y.T)
elif varname == "Y":
return (grad_data - self.grad_energy_term_selective(UY)).T.dot(U).T + self.kappa/self.card_Omega_not * Y
else:
return False
else:
if varname == "U":
return grad_data.dot(self.Y.T)
elif varname == "Y":
return grad_data.T.dot(self.U).T
else:
return False
else:
# compute full L
L = np.dot(U, Y)
grad_data = self.grad_data_full(L)
if self.kappa is not None:
if varname == "U":
return np.dot(grad_data - self.grad_energy_term_full(L), Y.T)
elif varname == "Y":
return np.dot(U.T, grad_data - self.grad_energy_term_full(L)) + self.kappa / self.card_Omega_not * Y
else:
return False
else:
if varname == "U":
return np.dot(grad_data, self.Y.T)
elif varname == "Y":
return np.dot(self.U.T, grad_data)
else:
return False
def get_variable(self, varname):
if varname == "Y":
return self.Y
elif varname == "U":
return self.U
else:
return False
def set_updated(self, var, varname):
if varname == "U":
self.U = var
elif varname == "Y":
self.Y = var
else:
return False
def update(self):
self.resample()
if self.CALC_L:
self.L = np.dot(self.U, self.Y)
def resample(self):
if self.Omega is None:
# check if all entries are sampled, then no sampling is required and Psi remains None
if self.card_Psi == self.mn:
return
else:
# randomly pick from all possible positions
if (self.mn < 1e6) and (self.card_Psi > 0.1 * self.mn):
# sample exactly if necessary (large amount of samples, thus high risk of duplicates) and cheap (dimensions small enough)
self.Psi = np.unravel_index(np.random.choice(self.mn, self.card_Psi, replace=False), (self.m, self.n))
else:
# sample inexactly, might produce duplicate entries
self.Psi = (np.random.choice(self.m, self.card_Psi, replace=True), np.random.choice(self.n, self.card_Psi, replace=True))
# X is a matrix since no Omega is defined
self.X_Psi = self.X[self.Psi]
else:
# randomly pick from the entries of Omega
if self.card_Psi == self.card_Omega:
self.Psi = self.Omega
self.X_Psi = self.X_Omega
else:
# select random subset
self.ix_Psi = np.random.choice(self.card_Omega, self.card_Psi, replace=False)
self.Psi = (self.Omega[0][self.ix_Psi], self.Omega[1][self.ix_Psi])
self.X_Psi = self.X_Omega[self.ix_Psi]
def mmprod_benchmark(self, card_selection):
# This function benchmarks the actual difference in computation time between full and selective matrix-matrix product
randset = np.unravel_index(np.random.choice(self.mn, card_selection, replace=False), (self.m, self.n))
A = np.random.rand(self.m, self.k)
B = np.random.rand(self.k, self.n)
if self.mn <= 1E5:
repetitions = 1000
elif self.mn <= 1E6:
repetitions = 10
else:
repetitions = 1
time_mmprod = timeit.timeit(lambda: np.dot(A, B), number=repetitions)
time_smmprod = timeit.timeit(lambda: smmprod.smmprod_c(A, B, randset), number=repetitions)
# return True if SMMPROD is faster, False if full matrix product is faster
return time_mmprod > time_smmprod
def print_cost(self, tablevel):
self.get_cost(None, "full", VERBOSE=tablevel)
class RobustSubspaceTracking(RobustAlgo):
def __init__(self, m, k, p, mu, U_init=None):
super(RobustSubspaceTracking, self).__init__(k, p, mu)
self.m = m
self.x = None
self.x_Omega = None
if U_init is not None:
self.U = U_init
else:
self.U, _ = np.linalg.qr(np.random.rand(self.m, self.k))
self.y = np.zeros((k,))
self.l = np.dot(self.U, self.y)
self.Omega = None
def load_sample(self, x, Omega=None):
# x is always 1D, regardless of Omega, but it is safer to make a distinction between x and x_Omega
if Omega is not None:
self.Omega = Omega
self.x_Omega = x
self.x = None
else:
self.x = x
self.Omega = None
self.x_Omega = None
def loss_data(self, l):
if self.Omega is None:
return self.lpnorm(self.x - l)
else:
return self.lpnorm(self.x_Omega - l[self.Omega])
def grad_data(self, l):
if self.Omega is None:
grad = -self.lpnormgrad(self.x - l)
else:
grad = np.zeros((self.m, ))
grad[self.Omega] = -self.lpnormgrad(self.x_Omega - l[self.Omega])
return grad
def get_variable(self, varname):
if varname == "y":
return self.y
if varname == "U":
return self.U
else:
return False
def get_gradient(self, var, varname):
if varname == "y":
y = var
l = np.dot(self.U, y)
return np.dot(self.U.T, self.grad_data(l))
elif varname == "U":
U = var
l = np.dot(U, self.y)
# Returns a tuple of two vectors instead of actually multiplying out the rank 1 matrix!
return self.grad_data(l), self.y
else:
return False
def get_cost(self, var, varname, VERBOSE=None):
if varname == "y":
y = var
U = self.U
elif varname == "U":
y = self.y
U = var
else:
y = self.y
U = self.U
loss_data = self.loss_data(np.dot(U, y))
if VERBOSE is not None:
print VERBOSE * "\t" + "loss_data: ", loss_data
return loss_data
def set_updated(self, x, varname):
if varname == "U":
self.U = x
elif varname == "y":
self.y = x
else:
return False
class RobustSLRA(RobustAlgo):
def __init__(self, x, structure, k, p, mu, rho, Omega=None, PCA_INIT=False, U_init=None, Y_init=None):
super(RobustSLRA, self).__init__(k, p, mu)
self.xmin = x.min()
self.xmax = x.max()
self.x = None
self.x_Omega = None
self.structure = structure
# The structure contains the dimensions of the structured matrix, so no additional dimensions variable is needed
self.m = structure.m
self.n = structure.n
self.mn = self.m * self.n
# the length of the data vector
self.N = structure.N
self.rho = rho
self.Omega = Omega
self.vec_Lambda = np.zeros((self.mn,))
self.vec_grad_Lambda = np.zeros((self.mn,))
if Omega is None:
# assume x is the full data vector
self.x = x
self.x_Omega = None
if x.size != self.N:
print "Error: Input length does not match the structure"
exit()
self.card_Omega = self.N
else:
# assume only the known entries of x are given
self.x = None
self.x_Omega = x
self.card_Omega = Omega.size
if PCA_INIT:
# if one wishes to initialize with PCA, the full structured matrix needs to be computed first
if self.Omega:
x_full = np.zeros((self.N,))
x_full[Omega] = x
X_full = structure.struct_from_vec(x_full)
else:
X_full = structure.struct_from_vec(x)
U, _, _ = np.linalg.svd(X_full)
self.U = U[:, : k]
self.Y = np.dot(self.U.T, X_full)
else:
if U_init is not None:
self.U = U_init
else:
self.U, _ = np.linalg.qr(np.random.randn(self.m, self.k))
if Y_init is not None:
self.Y = Y_init
else:
self.Y = np.zeros((self.k, self.n))
self.L = None
self.vec_L = None
self.l = None
# Since the full L appears in vectorized form in the structural constraint it needs to be stored
self.update_L()
def update_L(self):
self.L = np.dot(self.U, self.Y)
self.vec_L = self.L.flatten('F')
self.l = self.structure.S_pinv.dot(self.vec_L)
def update_Lambda(self):
self.vec_Lambda += self.rho * (self.vec_L - self.structure.S.dot(self.structure.S_pinv.dot(self.vec_L)))
self.vec_grad_Lambda = (self.vec_Lambda - self.structure.S.dot(self.structure.S_pinv.dot(self.vec_Lambda))) / self.mn
def loss_data(self, vec_L):
# The residual is only computed on the data vector level, not over the full matrix
if self.Omega is None:
return self.lpnorm(self.x - self.structure.S_pinv.dot(vec_L))
else:
return self.lpnorm(self.x_Omega - self.structure.S_pinv.dot(vec_L)[self.Omega])
def grad_data_term(self, vec_L):
if self.Omega is None:
grad = - self.lpnormgrad(self.x - self.structure.S_pinv.dot(vec_L))
else:
grad = np.zeros((self.N,))
grad[self.Omega] = -self.lpnormgrad(self.x_Omega - self.structure.S_pinv.dot(vec_L)[self.Omega])
return grad
def vec_grad_data(self, vec_L):
if self.Omega is None:
lpnormgrad = self.lpnormgrad(self.x - self.structure.S_pinv.dot(vec_L))
else:
lpnormgrad = np.zeros((self.N,))
lpnormgrad[self.Omega] = self.lpnormgrad(self.x_Omega - self.structure.S_pinv.dot(vec_L)[self.Omega])
return -self.structure.S_pinv.T.dot(lpnormgrad)
def loss_structure(self, vec_L):
e = vec_L - self.structure.S.dot(self.structure.S_pinv.dot(vec_L))
loss_lambda_vec = np.dot(self.vec_Lambda, e) / self.mn
loss_structure_vec = self.rho / 2.0 * np.linalg.norm(e) ** 2 / self.mn
return loss_lambda_vec + loss_structure_vec
def vec_grad_structure(self, vec_L):
vec_grad_structure = self.rho * (vec_L - self.structure.S.dot(self.structure.S_pinv.dot(vec_L))) / self.mn
return self.vec_grad_Lambda + vec_grad_structure
def get_cost(self, X, varname, VERBOSE=None):
if varname == "U":
U = X
Y = self.Y
elif varname == "Y":
Y = X
U = self.U
else:
U = self.U
Y = self.Y
# compute full L
vec_L = np.dot(U, Y).flatten('F')
loss_data = self.loss_data(vec_L)
loss_structure = self.loss_structure(vec_L)
if VERBOSE is not None:
print VERBOSE * "\t" + "data loss: ", loss_data
print VERBOSE * "\t" + "structure loss: ", loss_structure
loss = loss_data + loss_structure
return loss
def get_gradient(self, X, varname):
if varname == "U":
U = X
Y = self.Y
elif varname == "Y":
Y = X
U = self.U
else:
return False
vec_L = np.dot(U, Y).flatten('F')
grad = np.reshape(self.vec_grad_data(vec_L) + self.vec_grad_structure(vec_L), (self.m, self.n), order='F')
if varname == "U":
return np.dot(grad, self.Y.T)
elif varname == "Y":
return np.dot(self.U.T, grad)
def get_variable(self, varname):
if varname == "Y":
return self.Y
elif varname == "U":
return self.U
else:
return False
def set_updated(self, var, varname):
if varname == "U":
self.U = var
elif varname == "Y":
self.Y = var
else:
return False
def print_cost(self, tablevel):
self.get_cost(None, "full", VERBOSE=tablevel)
| {"hexsha": "950a2852f6792dca53fa038dd9129132563bd25f", "size": 21871, "ext": "py", "lang": "Python", "max_stars_repo_path": "grslra/problems.py", "max_stars_repo_name": "clemenshage/grslra", "max_stars_repo_head_hexsha": "00f61b4ef08208d12e8e803d10f8ebbe16d8614a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "grslra/problems.py", "max_issues_repo_name": "clemenshage/grslra", "max_issues_repo_head_hexsha": "00f61b4ef08208d12e8e803d10f8ebbe16d8614a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "grslra/problems.py", "max_forks_repo_name": "clemenshage/grslra", "max_forks_repo_head_hexsha": "00f61b4ef08208d12e8e803d10f8ebbe16d8614a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6963087248, "max_line_length": 182, "alphanum_fraction": 0.5468885739, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5444} |
import sys
sys.path.append('')
from espnet2.VC_SRC import melspectrogram,load_wav
import numpy as np
import os
def cal_mel_target(dir): #这个函数遍历文件夹中所有wav文件并且计算相应的mel谱,保存为同名.npy
for root, dirs, files in os.walk(dir):
for f in files:
if ".wav" in f:
wav_path=os.path.join(root, f)
print(wav_path)
wav = load_wav(wav_path)
mel_spectrogram = melspectrogram(wav).astype(np.float32) # 这里提取MFCC特征
np.save(wav_path[:-4]+'.npy', mel_spectrogram.T, allow_pickle=False)
if __name__ == '__main__':
import sys
data_dir=sys.argv[1]
#data_dir="$root_path/tf_project/a2m_VC/models/baseline/data/train"
cal_mel_target(data_dir)
| {"hexsha": "a5f392ca60b25f7d42c5976da146e5035bf60e69", "size": 757, "ext": "py", "lang": "Python", "max_stars_repo_path": "espnet2/VC_SRC/evaluation/wav2mel.py", "max_stars_repo_name": "victor45664/espnet", "max_stars_repo_head_hexsha": "0ccacc32d25feddec5270cb3f8e08c24183755d8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-12-24T03:27:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-26T13:21:09.000Z", "max_issues_repo_path": "espnet2/VC_SRC/evaluation/wav2mel.py", "max_issues_repo_name": "victor45664/espnet", "max_issues_repo_head_hexsha": "0ccacc32d25feddec5270cb3f8e08c24183755d8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "espnet2/VC_SRC/evaluation/wav2mel.py", "max_forks_repo_name": "victor45664/espnet", "max_forks_repo_head_hexsha": "0ccacc32d25feddec5270cb3f8e08c24183755d8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-12-24T03:33:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-26T03:54:17.000Z", "avg_line_length": 17.6046511628, "max_line_length": 86, "alphanum_fraction": 0.6287978864, "include": true, "reason": "import numpy", "num_tokens": 230} |
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
def plot_supercell(orthonormal_positions, atoms2plot):
op = orthonormal_positions
'''
Plot all (or only desired) atoms in orthormalised supercell
'''
# This is required to filter out atoms which want plotting.
# If done after causes some issues with aligning atoms to their coordiantes..?
to_plot = [op[i][:] for i, atom in enumerate(op) if op[i][0] in atoms2plot]
scatter_x = np.array([to_plot[i][2] for i, atom in enumerate(to_plot)])
scatter_y = np.array([to_plot[i][3] for i, atom in enumerate(to_plot)])
scatter_z = np.array([to_plot[i][4] for i, atom in enumerate(to_plot)])
group = np.array([to_plot[i][0] for i, atom in enumerate(to_plot)])
color_list = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow']
cdict = dict(zip(atoms2plot, color_list)) # connect atoms to colours
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for g in np.unique(group):
ix = np.where(group == g)
ax.scatter(
scatter_x[ix],
scatter_y[ix],
scatter_z[ix],
c=cdict[g],
label=g,
s=50, # size
alpha=1) # opaqueness
ax.legend(loc='upper right')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.title('Orthonormalised supercell')
plt.show()
return group
| {"hexsha": "6820f57638312a7b308548489c5e6679ebba0141", "size": 1460, "ext": "py", "lang": "Python", "max_stars_repo_path": "rmcalyse/plotting_functions/plot_supercell.py", "max_stars_repo_name": "antgobar/rmcalyse", "max_stars_repo_head_hexsha": "dee40416bfd7cf73a4ede6eb309b484397e11310", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-02-17T18:09:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-12T12:36:19.000Z", "max_issues_repo_path": "rmcalyse/plotting_functions/plot_supercell.py", "max_issues_repo_name": "antgobar/rmcalyse", "max_issues_repo_head_hexsha": "dee40416bfd7cf73a4ede6eb309b484397e11310", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rmcalyse/plotting_functions/plot_supercell.py", "max_forks_repo_name": "antgobar/rmcalyse", "max_forks_repo_head_hexsha": "dee40416bfd7cf73a4ede6eb309b484397e11310", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-12T12:37:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-12T12:37:24.000Z", "avg_line_length": 30.4166666667, "max_line_length": 82, "alphanum_fraction": 0.6246575342, "include": true, "reason": "import numpy", "num_tokens": 394} |
# importing the necessary libraries
import matplotlib.pyplot as plt
import pandas as pd
import re
import random
import math
import numpy as np
random.seed(10)
"""
Read text data from file and pre-process text by doing the following
1. convert to lowercase
2. convert tabs to spaces
3. remove "non-word" characters
Store resulting "words" into an array
"""
FILENAME='SMSSpamCollection'
all_data = open(FILENAME).readlines()
# split into train and test
num_samples = len(all_data)
all_idx = list(range(num_samples))
random.shuffle(all_idx)
idx_limit = int(0.8*num_samples)
train_idx = all_idx[:idx_limit]
test_idx = all_idx[idx_limit:]
train_examples = [all_data[ii] for ii in train_idx]
test_examples = [all_data[ii] for ii in test_idx]
num_spam_lines = 0
num_ham_lines = 0
# Preprocess train and test examples
train_words = []
train_labels = []
test_words = []
test_labels = []
# train examples
for line in train_examples:
line = line.strip('\r\n\t ') # remove trailing spaces, tabs and carraige returne
line = line.lower() # lowercase
line = line.replace("\t", ' ') # convert tabs to spae
line_words = re.findall(r'\w+', line)
line_words = [xx for xx in line_words if xx != ''] # remove empty words
label = line_words[0]
if label == "spam":
label = 1
num_spam_lines += 1 # increment the number of spam lines
else:
label = 0
num_ham_lines += 1 # increment the number of ham lines
line_words = line_words[1:]
train_words.append(line_words)
train_labels.append(label)
# test examples
for line in test_examples:
line = line.strip('\r\n\t ') # remove trailing spaces, tabs and carraige return
line = line.lower() # lowercase
line = line.replace("\t", ' ') # convert tabs to spae
line_words = re.findall(r'\w+', line)
line_words = [xx for xx in line_words if xx != ''] # remove empty words
label = line_words[0]
label = 1 if label == 'spam' else 0
line_words = line_words[1:]
test_words.append(line_words)
test_labels.append(label)
def nbayes_a():
spam_words = []
ham_words = []
alpha = 0.5
N = 20000
for ii in range(len(train_words)): # we pass through words in each (train) SMS
words = train_words[ii]
label = train_labels[ii]
if label == 1:
spam_words += words
else:
ham_words += words
input_words = spam_words + ham_words # all words in the input vocabulary
# Count spam and ham occurances for each word
spam_counts = {}; ham_counts = {}
# Spamcounts
for word in spam_words:
try:
word_spam_count = spam_counts.get(word)
spam_counts[word] = word_spam_count + 1
except:
spam_counts[word] = 1 + alpha # smoothening
for word in ham_words:
try:
word_ham_count = ham_counts.get(word)
ham_counts[word] = word_ham_count + 1
except:
ham_counts[word] = 1 + alpha # smoothening
num_spam = len(spam_words)
num_ham = len(ham_words)
# Training model starts here
p_spam = num_spam_lines / idx_limit # probability of spam
p_ham = num_ham_lines / idx_limit # probability of ham
p_wordgivenspam = {} # probability of each word given spam
p_wordgivenham = {} # probability of each word given ham
denominator_spam = num_spam + (alpha * N)
denominator_ham = num_ham + (alpha * N)
for word in spam_counts:
p_wordgivenspam[word] = (spam_counts[word] / denominator_spam)
for word in ham_counts:
p_wordgivenham[word] = (ham_counts[word] / denominator_ham)
# Training model ends here
# Model run on test data
p_spamgivenline = []
# Calculating probability of spam given the message
for i in range(len(test_words)):
p_spamgivenline.append(p_spam)
for j in range(len(test_words[i])):
if test_words[i][j] in p_wordgivenspam.keys():
p_spamgivenline[i] = p_spamgivenline[i] * p_wordgivenspam[test_words[i][j]]
else:
num_spam += 1
p_wordgivenspam[test_words[i][j]] = alpha / denominator_spam
p_spamgivenline[i] = p_spamgivenline[i] * p_wordgivenspam[test_words[i][j]]
p_hamgivenline = []
# Calculating probability of ham given the message
for i in range(len(test_words)):
p_hamgivenline.append(p_ham)
for j in range(len(test_words[i])):
if test_words[i][j] in p_wordgivenham.keys():
p_hamgivenline[i] = p_hamgivenline[i] * p_wordgivenham[test_words[i][j]]
else:
num_ham += 1
p_wordgivenham[test_words[i][j]] = alpha / denominator_ham
p_hamgivenline[i] = p_hamgivenline[i] * p_wordgivenham[test_words[i][j]]
predicted_label = []
# Comparing the probability of spam and ham and appending labels accordingly
for x in range(len(p_spamgivenline)):
if (p_hamgivenline[x] > p_spamgivenline[x]):
predicted_label.append(0)
else:
predicted_label.append(1)
true_pos = 0
true_neg = 0
false_pos = 0
false_neg = 0
# Calculating true positive and negative, false positive and negative
for x in range(len(predicted_label)):
if predicted_label[x] == 0 and test_labels[x] == 0:
true_neg += 1
elif(predicted_label[x] == 1 and test_labels[x] == 1):
true_pos +=1
elif(predicted_label[x] == 0 and test_labels[x] == 1):
false_neg += 1
else:
false_pos += 1
total = true_neg + true_pos + false_neg + false_pos
print("\nTesting Accuracy:", (true_pos + true_neg) / total, "\n")
# Confusion Matrix
data = {'Positive': pd.Series([true_pos, false_neg, ''], index = ['Positive', 'Negative', '(Predicted)']),
'Negative': pd.Series([false_pos, true_neg, ''], index = ['Positive', 'Negative', '(Predicted)']),
'(True)': pd.Series(['', '', ''], index = ['Positive', 'Negative', '(Predicted)'])}
cm = pd.DataFrame(data)
print(cm)
precision = true_pos / (true_pos + false_pos)
print("\nPrecision:", precision)
recall = true_pos / (true_pos + false_neg)
print("\nRecall:", recall)
f_score = 2 * ((precision * recall) / (precision + recall))
print("\nF-Score:", f_score)
print("-----------------------------------------\n")
p_wordgivenspam = {} # probability of each word given spam
p_wordgivenham = {} # probability of each word given ham
def nbayes_b(alpha_i):
spam_words = []
ham_words = []
alpha = alpha_i
N = 20000
for ii in range(len(train_words)): # we pass through words in each (train) SMS
words = train_words[ii]
label = train_labels[ii]
if label == 1:
spam_words += words
else:
ham_words += words
input_words = spam_words + ham_words # all words in the input vocabulary
# Count spam and ham occurances for each word
spam_counts = {}; ham_counts = {}
# Spamcounts
for word in spam_words:
try:
word_spam_count = spam_counts.get(word)
spam_counts[word] = word_spam_count + 1
except:
spam_counts[word] = 1 + alpha # smoothening
for word in ham_words:
try:
word_ham_count = ham_counts.get(word)
ham_counts[word] = word_ham_count + 1
except:
ham_counts[word] = 1 + alpha # smoothening
num_spam = len(spam_words)
num_ham = len(ham_words)
# Training model starts here
p_spam = num_spam_lines / idx_limit # probability of spam
p_ham = num_ham_lines / idx_limit # probability of ham
denominator_spam = num_spam + (alpha * N)
denominator_ham = num_ham + (alpha * N)
for word in spam_counts:
p_wordgivenspam[word] = (spam_counts[word] / denominator_spam)
for word in ham_counts:
p_wordgivenham[word] = (ham_counts[word] / denominator_ham)
# Training model ends here
# All variables and lists that end in te denote testing variables, while the ones that end in tr are training variables.
# Model run on test data
p_spamgivenline_te = []
# Calculating probability of spam given the message
for i in range(len(test_words)):
p_spamgivenline_te.append(p_spam)
for j in range(len(test_words[i])):
if test_words[i][j] in p_wordgivenspam.keys():
p_spamgivenline_te[i] = p_spamgivenline_te[i] * p_wordgivenspam[test_words[i][j]]
else:
num_spam += 1
p_wordgivenspam[test_words[i][j]] = alpha / denominator_spam
p_spamgivenline_te[i] = p_spamgivenline_te[i] * p_wordgivenspam[test_words[i][j]]
p_hamgivenline_te = []
# Calculating probability of ham given the message
for i in range(len(test_words)):
p_hamgivenline_te.append(p_ham)
for j in range(len(test_words[i])):
if test_words[i][j] in p_wordgivenham.keys():
p_hamgivenline_te[i] = p_hamgivenline_te[i] * p_wordgivenham[test_words[i][j]]
else:
num_ham += 1
p_wordgivenham[test_words[i][j]] = alpha / denominator_ham
p_hamgivenline_te[i] = p_hamgivenline_te[i] * p_wordgivenham[test_words[i][j]]
predicted_label_te = []
# Comparing the probability of spam and ham and appending labels accordingly
for x in range(len(p_spamgivenline_te)):
if (p_hamgivenline_te[x] > p_spamgivenline_te[x]):
predicted_label_te.append(0)
else:
predicted_label_te.append(1)
#End of test data
for word in spam_counts:
p_wordgivenspam[word] = (spam_counts[word] / denominator_spam)
for word in ham_counts:
p_wordgivenham[word] = (ham_counts[word] / denominator_ham)
#Model run on training data
p_spamgivenline_tr = []
# Calculating probability of spam given the message
for i in range(len(train_words)):
p_spamgivenline_tr.append(p_spam)
for j in range(len(train_words[i])):
if train_words[i][j] in p_wordgivenspam.keys():
p_spamgivenline_tr[i] = p_spamgivenline_tr[i] * p_wordgivenspam[train_words[i][j]]
else:
num_spam += 1
p_wordgivenspam[train_words[i][j]] = alpha / denominator_spam
p_spamgivenline_tr[i] = p_spamgivenline_tr[i] * p_wordgivenspam[train_words[i][j]]
p_hamgivenline_tr = []
# Calculating probability of ham given the message
for i in range(len(train_words)):
p_hamgivenline_tr.append(p_ham)
for j in range(len(train_words[i])):
if train_words[i][j] in p_wordgivenham.keys():
p_hamgivenline_tr[i] = p_hamgivenline_tr[i] * p_wordgivenham[train_words[i][j]]
else:
num_ham += 1
p_wordgivenham[train_words[i][j]] = alpha / denominator_ham
p_hamgivenline_tr[i] = p_hamgivenline_tr[i] * p_wordgivenham[train_words[i][j]]
predicted_label_tr = []
# Comparing the probability of spam and ham and appending labels accordingly
for x in range(len(p_spamgivenline_tr)):
if (p_hamgivenline_tr[x] > p_spamgivenline_tr[x]):
predicted_label_tr.append(0)
else:
predicted_label_tr.append(1)
#End of training data
# Calculating true positive and negative, false positive and negative for test data
true_pos_te = 0
true_neg_te = 0
false_pos_te = 0
false_neg_te = 0
for x in range(len(predicted_label_te)):
if predicted_label_te[x] == 0 and test_labels[x] == 0:
true_neg_te += 1
elif(predicted_label_te[x] == 1 and test_labels[x] == 1):
true_pos_te +=1
elif(predicted_label_te[x] == 0 and test_labels[x] == 1):
false_neg_te += 1
else:
false_pos_te += 1
# Calculating true positive and negative, false positive and negative for training data
true_pos_tr = 0
true_neg_tr = 0
false_pos_tr = 0
false_neg_tr = 0
for x in range(len(predicted_label_tr)):
if predicted_label_tr[x] == 0 and train_labels[x] == 0:
true_neg_tr += 1
elif(predicted_label_tr[x] == 1 and train_labels[x] == 1):
true_pos_tr +=1
elif(predicted_label_tr[x] == 0 and train_labels[x] == 1):
false_neg_tr += 1
else:
false_pos_tr += 1
# Accuracy for test data
total_te = true_neg_te + true_pos_te + false_neg_te + false_pos_te
accuracy_te = (true_pos_te + true_neg_te) / total_te
print("\nTesting Accuracy:", accuracy_te, "\n")
lacc_te.append(accuracy_te)
# Accuracy for training data
total_tr = true_neg_tr + true_pos_tr + false_neg_tr + false_pos_tr
accuracy_tr = (true_pos_tr + true_neg_tr) / total_tr
print("\nTraining Accuracy:", accuracy_tr, "\n")
lacc_tr.append(accuracy_tr)
precision_te = true_pos_te / (true_pos_te + false_pos_te) # For testing data
precision_tr = true_pos_tr / (true_pos_tr + false_pos_tr) # For training data
recall_te = true_pos_te / (true_pos_te + false_neg_te) # For testing data
recall_tr = true_pos_tr / (true_pos_tr + false_neg_tr) # For training data
f_score_te = 2 * ((precision_te * recall_te) / (precision_te + recall_te))
print("\nF-Score for testing:", f_score_te) # For testing data
lfscore_te.append(f_score_te)
f_score_tr = 2 * ((precision_tr * recall_tr) / (precision_tr + recall_tr))
print("\nF-Score for training:", f_score_tr) # For training data
print("-----------------------------------------")
lfscore_tr.append(f_score_tr)
# The program flow starts here. Here we call the nbayes_a function to execute qs 4a
nbayes_a()
print("-----------------------------------------")
# Creating all lists required for plotting graphs
lacc_te = []
lacc_tr = []
lfscore_te = []
lfscore_tr = []
# nbayes_b is called for each value of i
for i in range(-5, 1):
nbayes_b(2 ** i)
# plotting graph of accuracy against i
i = [-5, -4, -3, -2, -1, 0]
plt.title('Accuracy Measure')
plt.plot(i, lacc_te, label = 'Test data')
plt.plot(i, lacc_tr, label = 'Train data')
plt.xlabel('i')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
# Plotting graph of F-score against i
plt.title('F-Score Measure')
plt.plot(i, lfscore_te, label = 'Test data')
plt.plot(i, lfscore_tr, label = 'Train data')
plt.xlabel('i')
plt.ylabel('F-Score')
plt.legend()
plt.show()
| {"hexsha": "da49a78a925e8b2685b1b2ccd24067e5390bc039", "size": 15114, "ext": "py", "lang": "Python", "max_stars_repo_path": "nbayes.py", "max_stars_repo_name": "yashchitre03/Naive-Bayes-SMS-Classification", "max_stars_repo_head_hexsha": "ac9a5dea982e262a29e107d83887db11b86fb675", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nbayes.py", "max_issues_repo_name": "yashchitre03/Naive-Bayes-SMS-Classification", "max_issues_repo_head_hexsha": "ac9a5dea982e262a29e107d83887db11b86fb675", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nbayes.py", "max_forks_repo_name": "yashchitre03/Naive-Bayes-SMS-Classification", "max_forks_repo_head_hexsha": "ac9a5dea982e262a29e107d83887db11b86fb675", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9861111111, "max_line_length": 124, "alphanum_fraction": 0.61201535, "include": true, "reason": "import numpy", "num_tokens": 3952} |
import cuttsum.events
import cuttsum.corpora
from cuttsum.pipeline import InputStreamResource
from mpi4py import MPI
from cuttsum.misc import enum
from cuttsum.classifiers import NuggetRegressor
import numpy as np
import pandas as pd
import random
import pyvw
from datetime import datetime
from sklearn.feature_extraction import FeatureHasher
from sklearn.metrics.pairwise import cosine_similarity
from itertools import izip
import os
import cuttsum.judgements
from cuttsum.misc import event2semsim
matches_df = cuttsum.judgements.get_merged_dataframe()
tags = enum("READY", "WORKER_START", "WORKER_STOP")
class FeatureMapper(dict):
def __init__(self):
self.store = dict()
self._inv_store = dict()
self._idx = 0
def __getitem__(self, key):
if key not in self.store:
self.store[key] = self._idx
self._inv_store[self._idx] = key
self._idx += 1
return self.store[key]
def items(self):
return self.store.items()
def inv(self, idx):
return self._inv_store[idx]
fmap = FeatureMapper()
SELECT = 1
SKIP = 2
basic_cols = ["BASIC length", #"BASIC char length",
"BASIC doc position", "BASIC all caps ratio",
"BASIC upper ratio",
# "BASIC lower ratio",
# "BASIC punc ratio",
"BASIC person ratio",
"BASIC location ratio",
"BASIC organization ratio", "BASIC date ratio",
"BASIC time ratio", "BASIC duration ratio",
"BASIC number ratio", "BASIC ordinal ratio",
"BASIC percent ratio",
"BASIC money ratio",
# "BASIC set ratio", "BASIC misc ratio"
]
query_bw_cols = [
"Q_sent_query_cov",
"Q_sent_syn_cov",
"Q_sent_hyper_cov",
"Q_sent_hypo_cov",
]
query_fw_cols = [
"Q_query_sent_cov",
"Q_syn_sent_cov",
"Q_hyper_sent_cov",
"Q_hypo_sent_cov",
]
lm_cols = ["LM domain avg lp",
"LM gw avg lp"]
sum_cols = [
"SUM_sbasic_sum",
"SUM_sbasic_amean",
# "SUM_sbasic_max",
"SUM_novelty_gmean",
"SUM_novelty_amean",
# "SUM_novelty_max",
"SUM_centrality",
"SUM_pagerank",
"SUM_sem_novelty_gmean",
"SUM_sem_novelty_amean",
"SUM_sem_centrality",
"SUM_sem_pagerank",
]
stream_cols = [
"STREAM_sbasic_sum",
"STREAM_sbasic_amean",
"STREAM_sbasic_max",
"STREAM_per_prob_sum",
"STREAM_per_prob_max",
"STREAM_per_prob_amean",
"STREAM_loc_prob_sum",
"STREAM_loc_prob_max",
"STREAM_loc_prob_amean",
"STREAM_org_prob_sum",
"STREAM_org_prob_max",
"STREAM_org_prob_amean",
"STREAM_nt_prob_sum",
"STREAM_nt_prob_max",
"STREAM_nt_prob_amean",
]
best_feats = set([
"LM gw avg lp",
"CACHE_SIM_amean",
"STREAM_sbasic_max",
"CACHE_SIM_max",
"STREAM_loc_prob_amean^df^prob",
"BASIC ordinal ratio^df^prob",
"STREAM_per_prob_max",
"BASIC ordinal ratio",
"STREAM_sbasic_max^df",
"SUM_pagerank",
"BASIC date ratio^df^prob",
"SUM_sem_centrality^prob",
"BASIC doc position^df",
"STREAM_org_prob_amean",
"STREAM_nt_prob_sum^df",
"STREAM_loc_prob_sum",
"STREAM_loc_prob_max^df",
"STREAM_nt_prob_sum^prob",
"BASIC location ratio^df",
"BASIC all caps ratio^prob",
"BASIC organization ratio^df^prob",
"SUM_sbasic_sum^df",
"STREAM_org_prob_sum^prob",
"BASIC money ratio^prob",
"CONSTANT",
"STREAM_loc_prob_max",
"STREAM_org_prob_amean^prob",
"STREAM_nt_prob_max^prob",
"SUM_sbasic_sum^df^prob",
"STREAM_nt_prob_sum",
"LM domain avg lp^df",
"BASIC number ratio^df^prob",
"CACHE_SEM_SIM_amean",
"Q_syn_sent_cov",
"BASIC percent ratio",
"BASIC time ratio^df^prob",
"BASIC date ratio^prob",
"BASIC person ratio^prob",
"STREAM_sbasic_sum^df",
"BASIC location ratio^df^prob",
"BASIC money ratio",
"BASIC duration ratio^df^prob",
"BASIC location ratio^prob",
"BASIC duration ratio^prob",
"BASIC person ratio^df",
"STREAM_sbasic_amean",
"BASIC date ratio",
"SUM_sem_centrality^df",
"BASIC time ratio^df",
"STREAM_sbasic_sum",
])
class Summarizer(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
pyvw.SearchTask.__init__(self, vw , sch, num_actions)
#sch.set_options( sch.AUTO_HAMMING_LOSS )
self._with_scores = False
self._loss = 1
self.total_loss = 0
self.log_time = False
self.use_best_feats = False
self.use_i_only = False
self.use_abs_df = False
def feat_weight(self, idx):
if idx >= fmap._idx:
ridx = idx - fmap._idx
else:
ridx = idx
ex = self.example({"a": [(idx, 1)]})
w = self.vw.get_weight(ex.feature("a", 0))
return fmap.inv(ridx), idx, w
def get_feature_weights(self):
select_feats = []
for i in xrange(fmap._idx):
fname, idx, weight = self.feat_weight(i)
select_feats.append({"name": fname, "index": idx, "weight": weight})
select_feats.sort(key=lambda x: x["weight"])
select_df = pd.DataFrame(select_feats, columns=["name", "index", "weight"])
next_feats = []
for i in xrange(fmap._idx, fmap._idx * 2):
fname, idx, weight = self.feat_weight(i)
next_feats.append({"name": fname, "index": idx, "weight": weight})
next_feats.sort(key=lambda x: x["weight"])
next_df = pd.DataFrame(next_feats, columns=["name", "index", "weight"])
return select_df, next_df
#for i, feat in enumerate(self.basic_cols()):
#fw.append(("b:" + feat, w))
#fw.append(("n:bias", self.vw.get_weight(ex.feature("n", 0))))
def set_loss(self, loss):
self._loss = loss
def make_example(self, sent, cache, cache_in, days, x, cache_latent, dfdelta):
if self.log_time is True:
days = np.log(2 + days)
df = sent.to_frame().transpose()
feats = {
"b": [(k, df[k].tolist()[0]) for k in basic_cols],
"c": [(k, df[k].tolist()[0]) for k in sum_cols],
"q": [(k, df[k].tolist()[0]) for k in query_fw_cols],
"l": [(k, df[k].tolist()[0]) for k in lm_cols],
"s": [(k, df[k].tolist()[0]) for k in stream_cols],
"p": [("probs",sent["probs"])],
#"t": [("time", days)],
# "g": ["GAIN_POS" if gain > 0 else "GAIN_ZERO"],
}
if cache is None:
feats["g"] = [("EMPTY", 1)]
else:
h = FeatureHasher(input_type="string")
X_c = h.transform(cache["lemmas stopped"].tolist())
x_i = h.transform([sent["lemmas stopped"]])
K = cosine_similarity(X_c, x_i)
k_u = K.mean()
k_max = K.max()
if k_max == 0:
feats["g"] = [("CACHE_SIM_ZERO", 1)]
else:
feats["g"] = [("CACHE_SIM_amean", k_u), ("CACHE_SIM_max", k_max)]
K_l = cosine_similarity(cache_latent, x)
k_lu = K_l.mean()
k_lmax = K_l.max()
if k_lmax == 0:
feats["g"].append(("CACHE_SEM_SIM_ZERO", 1))
else:
feats["g"].extend([("CACHE_SEM_SIM_amean", k_lu), ("CACHE_SEM_SIM_max", k_lmax)])
feats["I"] = []
# for ns in ["b", "c", "q", "l", "s", "g"]:
# for feat, val in feats[ns]:
# feats["I"].append(("{}^time^prob".format(feat), val * sent["probs"] * days))
# feats["I"].append(("{}^prob".format(feat), val * sent["probs"]))
# feats["I"].append(("{}^time".format(feat), val * days))
for ns in ["b", "c", "q", "l", "s", "g"]:
for feat, val in feats[ns]:
feats["I"].append(("{}^df^prob".format(feat), val * sent["probs"] * dfdelta))
feats["I"].append(("{}^prob".format(feat), val * sent["probs"]))
feats["I"].append(("{}^df".format(feat), val * dfdelta))
ifeats = {'a': []}
if self.use_i_only:
ns = ["I"]
else:
ns = ["b", "c", "q", "l", "s", "g", "p", "I"]
if self.use_best_feats:
for ns in ["b", "c", "q", "l", "s", "g", "p", "I"]:
for key, val in feats[ns]:
if key not in best_feats: continue
ifeats['a'].append((fmap[key], val))
ifeats['a'].append((fmap["CONSTANT"], 1))
else:
for ns in ["b", "c", "q", "l", "s", "g", "p", "I"]:
for key, val in feats[ns]:
ifeats['a'].append((fmap[key], val))
ifeats['a'].append((fmap["CONSTANT"], 1))
return self.example(ifeats)
def _run(self, (event, df_stream, X_stream, dfdeltas)):
nuggets = set()
cache = None
cache_in = None
cache_latent = None
output = []
current_dfdelta_idx = 0
current_dfdelta = 0
n = 0
y_int_y_hat = 0
size_y = 0
size_y_hat = 0
loss = 0
for (_, sent), x in zip(df_stream.iterrows(), X_stream):
intts = int(sent["timestamp"])
if intts > dfdeltas[current_dfdelta_idx +1][0]:
current_dfdelta_idx += 1
current_dfdelta = dfdeltas[current_dfdelta_idx][1]
if self.use_abs_df:
current_dfdelta = abs(current_dfdelta)
days = (datetime.utcfromtimestamp(int(sent["timestamp"])) - event.start).total_seconds() / (60. * 60. * 24.)
n += 1
gain = len(sent["nuggets"] - nuggets)
if self.sch.predict_needs_example():
examples = self.make_example(sent, cache, cache_in, days, x, cache_latent, current_dfdelta)
else:
examples = None
if gain > 0:
oracle = SELECT
else:
oracle = SKIP
# Make prediction.
pred = self.sch.predict(
examples=examples,
my_tag=n,
oracle=oracle,
condition=[], # (n-1, "p"), ])
)
output.append(pred)
if pred != oracle:
if oracle == SELECT:
loss += self._loss
else:
loss += 1
if pred == SELECT and oracle == SELECT:
y_int_y_hat += 1
size_y += 1
size_y_hat += 1
elif pred == SELECT and oracle == SKIP:
size_y_hat += 1
elif pred == SKIP and oracle == SELECT:
size_y += 1
#if self._with_scores is True:
# print "examining:", sent["pretty text"]
if pred == SELECT:
nuggets.update(sent["nuggets"])
if cache is None:
cache = sent.to_frame().transpose()
cache_latent = x
else:
cache = pd.concat([cache, sent.to_frame().transpose()])
cache_latent = np.vstack([cache_latent, x])
# else:
# if cache_in is None:
# cache_in = sent.to_frame().transpose()
# else:
# cache_in = pd.concat([cache_in, sent.to_frame().transpose()])
loss = 1 - float(y_int_y_hat) / (size_y + size_y_hat)
self.sch.loss(loss)
self.total_loss += loss
return output
def start_manager(event_ids, output_dir):
events = [e for e in cuttsum.events.get_events()
if e.query_num in event_ids]
jobs = [(events[0:i] + events[i+1:], test_event)
for i, test_event in enumerate(events)]
comm = MPI.COMM_WORLD #.Accept(port, info, 0)
status = MPI.Status() # get MPI status object
if not os.path.exists(output_dir):
os.makedirs(output_dir)
w_path = os.path.join(output_dir, "weights.tsv")
s_path = os.path.join(output_dir, "scores.tsv")
t_path = os.path.join(output_dir, "summary.tsv")
n_workers = comm.size - 1
first_write = True
with open(w_path, "w") as w_f, open(s_path, "w") as s_f, \
open(t_path, "w") as t_f:
while n_workers > 0:
data = comm.recv(
source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
source = status.Get_source()
tag = status.Get_tag()
print "STATUS", tag, "SOURCE", source
if tag == tags.READY:
if len(jobs) > 0:
job = jobs.pop(0)
comm.send(job, dest=source, tag=tags.WORKER_START)
else:
comm.send(None, dest=source, tag=tags.WORKER_STOP)
n_workers -= 1
if data is not None:
scores_df, weights_df, summary_df = data
scores_df.to_csv(s_f, sep="\t", index=False, header=first_write)
s_f.flush()
weights_df.to_csv(w_f, sep="\t", index=False, header=first_write)
w_f.flush()
summary_df.to_csv(t_f, sep="\t", index=False, header=first_write)
t_f.flush()
first_write = False
def get_all_semsim():
accident_semsim = event2semsim("accident")
natdis_semsim = event2semsim("earthquake")
social_semsim = event2semsim("protest")
terror_semsim = event2semsim("shooting")
return {
"accident": accident_semsim,
"earthquake": natdis_semsim,
"storm": natdis_semsim,
"impact event": natdis_semsim,
"shooting": terror_semsim,
"hostage": terror_semsim,
"conflict": terror_semsim,
"bombing": terror_semsim,
"protest": social_semsim,
"riot": social_semsim,
}
def get_dfdeltas():
with open("doc_freqs.tsv", "r") as f:
df = pd.read_csv(f, sep="\t")
def get(event):
df_e = df[df["event"] == event.query_id]
mylist = [[0,0]] + zip(df_e["hour"].tolist(), df_e["df delta"].tolist())
return mylist
return get
def start_worker(sample_size, samples_per_event, gold_probs, iters, l2, log_time,
use_best_feats, use_i_only, use_abs_df):
rank = MPI.COMM_WORLD.Get_rank()
status = MPI.Status() # get MPI status object
job_results = None
semsims = get_all_semsim()
dfdeltas = get_dfdeltas()
while True:
comm.send(job_results, dest=0, tag=tags.READY)
data = comm.recv(
source=0, tag=MPI.ANY_TAG, status=status)
source = status.Get_source()
tag = status.Get_tag()
if tag == tags.WORKER_START:
training_events, test_event = data
print "JOBBING", test_event.fs_name()
job_results = do_work(
training_events, test_event,
sample_size, samples_per_event, gold_probs, iters, l2, log_time, semsims, dfdeltas,
use_best_feats, use_i_only, use_abs_df)
if tag == tags.WORKER_STOP:
break
def do_work(training_events, test_event, sample_size, samples_per_event,
gold_probs, iters, l2, log_time, semsims, dfdeltas,
use_best_feats, use_i_only, use_abs_df):
training_streams = []
summary = []
for event in training_events:
df = get_input_stream(event, gold_probs)
training_streams.append((event, df))
test_df = get_input_stream(test_event, gold_probs)
test_X_l = semsims[test_event.type].transform(
test_df["stems"].apply(lambda x: ' '.join(x)).tolist())
test_stream = (test_event, test_df, test_X_l, dfdeltas(test_event))
vw = pyvw.vw(
("--l2 {} --search 2 --search_task hook --ring_size 1024 " + \
"--search_no_caching --noconstant --quiet").format(l2))
task = vw.init_search_task(Summarizer)
task.use_best_feats = use_best_feats
task.use_i_only = use_i_only
task.use_abs_df = use_abs_df
print "use best?", task.use_best_feats
print "use i only?", task.use_i_only
print "use abs df?", task.use_abs_df
task.log_time = log_time
all_scores = []
all_weights = []
instances = []
for sample in xrange(samples_per_event):
for event, stream in training_streams:
while 1:
sample_stream = ds(stream, sample_size=sample_size)
if (sample_stream["nuggets"].apply(len) > 0).any():
break
X_l = semsims[event.type].transform(
sample_stream["stems"].apply(lambda x: ' '.join(x)).tolist())
instances.append((event, sample_stream, X_l, dfdeltas(event)))
for n_iter in xrange(1, iters + 1):
task.total_loss = 0
#instances = [(event, ds(stream, sample_size=sample_size))
# for event, stream in training_streams
# for sample in xrange(samples_per_event)]
random.shuffle(instances)
for i, inst in enumerate(instances):
print "{}.{}.{}/{}".format(
test_event.fs_name(), n_iter, i, len(instances))
task.learn([inst])
print "{}.{}.p".format(
test_event.fs_name(), n_iter)
train_egain = 0
train_comp = 0
train_f1 = 0
train_loss = 0
for i, inst in enumerate(instances):
egain, comp, f1, loss, train_sum = predict(task, inst, n_iter)
train_egain += egain
train_comp += comp
train_f1 += f1
train_loss += loss
train_egain = train_egain / float(len(instances))
train_comp = train_comp / float(len(instances))
train_f1 = train_f1 / float(len(instances))
train_loss = train_loss / float(len(instances))
print "{} {} train loss {}".format(test_event.query_id, n_iter, train_loss)
pred = task.predict(test_stream)
select_df, next_df = task.get_feature_weights()
select_df["class"] = "SELECT"
select_df["iter"] = n_iter
next_df["class"] = "NEXT"
next_df["iter"] = n_iter
all_weights.append(select_df)
all_weights.append(next_df)
pred = ["SELECT" if p == SELECT else "SKIP" for p in pred]
all_nuggets = set()
for nuggets in test_stream[1]["nuggets"].tolist():
all_nuggets.update(nuggets)
loss = 0
y_int_y_hat = 0
size_y = 0
size_y_hat = 0
nuggets = set()
for action, (_, sent) in izip(pred, test_stream[1].iterrows()):
gain = len(sent["nuggets"] - nuggets)
if action == "SELECT":
if gain == 0:
loss += 1
summary.append({
"event": test_event.query_id,
"iter": n_iter,
"update id": sent["update id"],
"timestamp": sent["timestamp"],
"gain": gain,
"nuggets": ",".join(sent["nuggets"]),
"update text": sent["pretty text"]
})
nuggets.update(sent["nuggets"])
else:
if gain > 0:
loss += 1
if gain > 0:
oracle = "SELECT"
else:
oracle = "SKIP"
if action == "SELECT" and oracle == "SELECT":
y_int_y_hat += 1
size_y += 1
size_y_hat += 1
elif action == "SELECT" and oracle == "SKIP":
size_y_hat += 1
elif action == "SKIP" and oracle == "SELECT":
size_y += 1
if size_y_hat == 0:
print test_event
print (test_stream[1]["nuggets"].apply(len) > 0).any()
loss = 1 - float(y_int_y_hat) / (size_y + size_y_hat)
if len(nuggets) > 0:
egain = len(nuggets) / sum([1.0 if a == "SELECT" else 0.0 for a in pred])
else:
egain = 0
comp = len(nuggets) / float(len(all_nuggets))
all_scores.append({"iter": n_iter, "Comp.": comp,
"E[gain]": egain, "Loss": loss,
"Avg. Train Loss": train_loss,
"Avg. Train E[gain]": train_egain,
"Avg. Train Comp.": train_comp,
"Avg. Train F1": train_f1,
})
print "{}.{}.p E[gain]={:0.6f} Comp.={:0.6f} Train Loss={:0.6f}".format(
test_event.fs_name(), n_iter, egain, comp, train_loss)
scores_df = pd.DataFrame(all_scores, columns=["iter", "E[gain]", "Comp.", "Loss", "Avg. Train Loss", "Avg. Train E[gain]", "Avg. Train Comp.", "Avg. Train F1"])
weights_df = pd.concat(all_weights)
weights_df["event"] = test_event.query_id
scores_df["event"] = test_event.query_id
summary_df = pd.DataFrame(
summary,
columns=["iter", "event", "update id", "timestamp", "gain",
"update text", "nuggets"])
return scores_df, weights_df, summary_df
def get_input_stream(event, gold_probs, extractor="goose", thresh=.8, delay=None, topk=20):
max_nuggets = 3
corpus = cuttsum.corpora.get_raw_corpus(event)
res = InputStreamResource()
df = pd.concat(
res.get_dataframes(event, corpus, extractor, thresh, delay, topk))
selector = (df["n conf"] == 1) & (df["nugget probs"].apply(len) == 0)
df.loc[selector, "nugget probs"] = df.loc[selector, "nuggets"].apply(lambda x: {n:1 for n in x})
df["true probs"] = df["nugget probs"].apply(lambda x: [val for key, val in x.items()] +[0])
df["true probs"] = df["true probs"].apply(lambda x: np.max(x))
df.loc[(df["n conf"] == 1) & (df["nuggets"].apply(len) == 0), "true probs"] = 0
if gold_probs is True:
df["probs"] = df["true probs"]
else:
df["probs"] = NuggetRegressor().predict(event, df)
df["nuggets"] = df["nugget probs"].apply(
lambda x: set([key for key, val in x.items() if val > .9]))
nid2time = {}
nids = set(matches_df[matches_df["query id"] == event.query_id]["nugget id"].tolist())
for nid in nids:
ts = matches_df[matches_df["nugget id"] == nid]["update id"].apply(lambda x: int(x.split("-")[0])).tolist()
ts.sort()
nid2time[nid] = ts[0]
fltr_nuggets = []
for name, row in df.iterrows():
fltr_nuggets.append(
set([nug for nug in row["nuggets"] if nid2time[nug] <= row["timestamp"]]))
#print df[["nuggets", "timestamp"]].apply(lambda y: print y[0]) # datetime.utcfromtimestamp(int(y["timestamp"])))
#print nids
df["nuggets"] = fltr_nuggets
df["nuggets"] = df["nuggets"].apply(lambda x: x if len(x) <= max_nuggets else set([]))
return df
def ds(df, sample_size=100):
I = np.arange(len(df))
np.random.shuffle(I)
I = I[:sample_size]
I = np.sort(I)
return df.iloc[I]
def predict(task, event_stream, n_iter):
pred = task.predict(event_stream)
pred = ["SELECT" if p == SELECT else "SKIP" for p in pred]
all_nuggets = set()
for nuggets in event_stream[1]["nuggets"].tolist():
all_nuggets.update(nuggets)
loss = 0
y_int_y_hat = 0
size_y = 0
size_y_hat = 0
summary = []
nuggets = set()
for action, (_, sent) in izip(pred, event_stream[1].iterrows()):
gain = len(sent["nuggets"] - nuggets)
if action == "SELECT":
summary.append({
"event": event_stream[0].query_id,
"iter": n_iter,
"update id": sent["update id"],
"timestamp": sent["timestamp"],
"gain": gain,
"nuggets": ",".join(sent["nuggets"]),
"update text": sent["pretty text"]
})
nuggets.update(sent["nuggets"])
if gain > 0:
oracle = "SELECT"
else:
oracle = "SKIP"
if action == "SELECT" and oracle == "SELECT":
y_int_y_hat += 1
size_y += 1
size_y_hat += 1
elif action == "SELECT" and oracle == "SKIP":
size_y_hat += 1
elif action == "SKIP" and oracle == "SELECT":
size_y += 1
if size_y + size_y_hat == 0:
loss = 1
else:
loss = 1 - float(y_int_y_hat) / (size_y + size_y_hat)
if len(nuggets) > 0:
egain = len(nuggets) / sum([1.0 if a == "SELECT" else 0.0 for a in pred])
else:
egain = 0
comp = len(nuggets) / float(len(all_nuggets))
f1 = 2 * ( egain * comp) / (egain + comp) if egain + comp > 0 else 0
return egain, comp, f1, loss, summary
if __name__ == u"__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(u"--event-ids", type=int, nargs=u"+",
help=u"event ids to select.")
parser.add_argument(u"--sample-size", type=int,
default=100,
help=u"downsample size for each training instance.")
parser.add_argument(
u"--samples-per-event", type=int,default=10,
help=u"number of training instances to make from each event.")
parser.add_argument(
u"--gold-probs", type=bool, default=False,
help=u"Use gold nugget probability feature.")
parser.add_argument(u"--iters", type=int,
default=10,
help=u"Training iters")
parser.add_argument(u"--output-dir", type=str,
required=True, help="directory to write results.")
parser.add_argument(u"--l2", type=float,
default=0, help="l2 weight")
parser.add_argument(
u"--best-feats", action="store_true", default=False,
help=u"Use best features")
parser.add_argument(
u"--i-only", action="store_true", default=False,
help=u"Use interactions only")
parser.add_argument(
u"--abs-df", action="store_true", default=False,
help=u"Use absolute value of df deltas.")
parser.add_argument(
u"--log-time", action="store_true", default=False,
help=u"Use log(t) feature")
args = parser.parse_args()
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
if size == 1:
print "Must be run with at least 2 processes!"
exit()
if rank == 0:
start_manager(args.event_ids, args.output_dir)
else:
start_worker(args.sample_size, args.samples_per_event,
args.gold_probs, args.iters, args.l2, args.log_time,
args.best_feats, args.i_only, args.abs_df)
| {"hexsha": "441cffbbd75d7ec327699c07bcdc662ad344d775", "size": 26909, "ext": "py", "lang": "Python", "max_stars_repo_path": "trec2015/sbin/cross-validation/cross-validate-l2s.py", "max_stars_repo_name": "kedz/cuttsum", "max_stars_repo_head_hexsha": "992c21192af03fd2ef863f5ab7d10752f75580fa", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2015-09-10T02:22:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-01T16:36:46.000Z", "max_issues_repo_path": "trec2015/sbin/cross-validation/cross-validate-l2s.py", "max_issues_repo_name": "kedz/cuttsum", "max_issues_repo_head_hexsha": "992c21192af03fd2ef863f5ab7d10752f75580fa", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trec2015/sbin/cross-validation/cross-validate-l2s.py", "max_forks_repo_name": "kedz/cuttsum", "max_forks_repo_head_hexsha": "992c21192af03fd2ef863f5ab7d10752f75580fa", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-04-04T10:44:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T16:37:26.000Z", "avg_line_length": 33.1391625616, "max_line_length": 164, "alphanum_fraction": 0.5469917128, "include": true, "reason": "import numpy", "num_tokens": 7082} |