content
stringlengths 1
1.02M
⌀ |
---|
#!/bin/bash
# Running skydns based on instructions at: https://testdatamanagement.wordpress.com/2015/09/01/running-kubernetes-in-docker-with-dns-on-a-single-node/
PWD=`pwd`
BASEDIR=`readlink -e $(dirname ${0})`
cd ${BASEDIR}
KUBECTL='docker exec hyperkube /hyperkube kubectl'
#RUN_SKYDNS="yes"
RUN_SKYDNS="no"
# DNS_ARGUMENTS needs to be passed when Kubernetes is setup.
if [ "${RUN_SKYDNS}" = "yes" ]; then
DNS_ARGUMENTS="--cluster-dns=10.0.0.10 --cluster-domain=cluster.local"
else
DNS_ARGUMENTS=""
fi
wait_until_k8s_ready() {
# Wait until kubernetes is up and fully responsive
while :
do
${KUBECTL} get nodes 2>/dev/null | grep -q '127.0.0.1'
if [ "${?}" = "0" ]; then
break
else
echo "sleeping for 5 seconds (waiting for kubernetes to start)"
sleep 5
fi
done
echo "kubernetes nodes:"
${KUBECTL} get nodes
}
if [ "${RUN_SKYDNS}" = "yes" ]; then
wait_until_k8s_ready
echo "Launch kube2sky..."
docker run -d --net=host gcr.io/google_containers/kube2sky:1.11 --kube_master_url=http://127.0.0.1:8080 --domain=cluster.local
echo ""
echo "Launch SkyDNS..."
docker run -d --net=host gcr.io/google_containers/skydns:2015-03-11-001 --machines=http://localhost:4001 --addr=0.0.0.0:53 --domain=cluster.local
else
true
fi
cd ${PWD}
|
#!/bin/bash
# cp -i /etc/kubernetes/admin.conf /root/kube-admin.conf
kubectl --kubeconfig /root/kube-admin.conf $*
|
#!/bin/bash
set -e
if [ "$1" = "/opt/logstash/bin/logstash" ]; then
exec "$1" agent -f /opt/conf/logstash.conf
else
exec "$@"
fi |
#---------#
# Seqdiag #
#---------#
# Watches for files named *.diag in the given directory (recursive) and generates the
# corresponding PNG file.
# $1: the folder to watch (Default: pwd)
# shellcheck disable=SC2034
seqWatch() {
local folder="$1"
[[ -n "$folder" ]] || {
printfc 'Folder not defined, it was set to pwd\n' "$YELLOW"
folder="$(pwd)";
}
inotifywait -rm "$folder" -e close_write |
while read path action file; do
if [[ "$file" =~ .*\.diag$ ]]; then
seqdiag "$path$file" --no-transparency -a
fi
done
}
# Inits a seqdiag file with the preferences defined in seqdiag.init.
# Uses: $TOOLING
# $1: the file to be created (absolute path)
seqInit() {
local filePath="${1?Missing path to file}"
mkdir -p "$(dirname "$filePath")"
cp "$TOOLING/bashrc/Utils/seqdiag.init" "$(basename "$filePath")"
}
|
#!/bin/bash
# Author: Eason Yi
# Date: 2017-05-17
pbpaste|awk '!/^[ ]*$/'|pbcopy|pbpaste
|
#!/usr/bin/env bash
PWD_DIR=$(pwd)
function cleanup {
cd "$PWD_DIR"
}
trap cleanup EXIT
GREP=grep
SED=sed
AWK=awk
MAKE=make
# Fixup ancient Bash
# https://unix.stackexchange.com/q/468579/56041
if [[ -z "$BASH_SOURCE" ]]; then
BASH_SOURCE="$0"
fi
# Fixup, Solaris and friends
if [[ (-d /usr/xpg4/bin) ]]; then
SED=/usr/xpg4/bin/sed
AWK=/usr/xpg4/bin/awk
GREP=/usr/xpg4/bin/grep
elif [[ (-d /usr/bin/posix) ]]; then
SED=/usr/bin/posix/sed
AWK=/usr/bin/posix/awk
GREP=/usr/bin/posix/grep
fi
# Fixup for sed and "illegal byte sequence"
IS_DARWIN=$(uname -s | "$GREP" -i -c darwin)
if [[ "$IS_DARWIN" -ne 0 ]]; then
export LC_ALL=C
fi
# Fixup for Solaris and BSDs
# Fixup for Solaris and BSDs
if [[ ! -z $(command -v gmake) ]]; then
MAKE=gmake
else
MAKE=make
fi
# Fixup for missing libtool
if [[ ! -z $(command -v libtoolize) ]]; then
LIBTOOLIZE=$(command -v libtoolize)
elif [[ ! -z $(command -v glibtoolize) ]]; then
LIBTOOLIZE=$(command -v glibtoolize)
elif [[ ! -z $(command -v libtool) ]]; then
LIBTOOLIZE=$(command -v libtool)
elif [[ ! -z $(command -v glibtool) ]]; then
LIBTOOLIZE=$(command -v glibtool)
fi
# Fecth the three required files
if ! wget --no-check-certificate 'https://raw.githubusercontent.com/noloader/cryptopp-autotools/master/Makefile.am' -O Makefile.am; then
echo "Makefile.am download failed"
[[ "$0" = "${BASH_SOURCE[0]}" ]] && exit 1 || return 1
fi
if ! wget --no-check-certificate 'https://raw.githubusercontent.com/noloader/cryptopp-autotools/master/configure.ac' -O configure.ac; then
echo "configure.ac download failed"
[[ "$0" = "${BASH_SOURCE[0]}" ]] && exit 1 || return 1
fi
if ! wget --no-check-certificate 'https://raw.githubusercontent.com/noloader/cryptopp-autotools/master/libcryptopp.pc.in' -O libcryptopp.pc.in; then
echo "libcryptopp.pc.in download failed"
[[ "$0" = "${BASH_SOURCE[0]}" ]] && exit 1 || return 1
fi
mkdir -p m4/
if [[ -z $(command -v autoupdate) ]]; then
echo "Cannot find autoupdate. Things may fail."
fi
if [[ -z "$LIBTOOLIZE" ]]; then
echo "Cannot find libtoolize. Things may fail."
fi
if [[ -z $(command -v autoreconf) ]]; then
echo "Cannot find autoreconf. Things may fail."
fi
echo "Running autoupdate"
if ! autoupdate 2>/dev/null; then
echo "autoupdate failed."
[[ "$0" = "${BASH_SOURCE[0]}" ]] && exit 1 || return 1
fi
echo "Running libtoolize"
if ! "$LIBTOOLIZE" 2>/dev/null; then
echo "libtoolize failed."
[[ "$0" = "${BASH_SOURCE[0]}" ]] && exit 1 || return 1
fi
# Run autoreconf twice on failure. Also see
# https://github.com/tracebox/tracebox/issues/57
echo "Running autoreconf"
if ! autoreconf 2>/dev/null; then
echo "autoreconf failed, running again."
if ! autoreconf -fi; then
echo "autoreconf failed, again."
[[ "$0" = "${BASH_SOURCE[0]}" ]] && exit 1 || return 1
fi
fi
# Sparc need +w
if [[ -e config.sub ]]; then
chmod +w config.sub
fi
if [[ -e config.guess ]]; then
chmod +w config.guess
fi
# Update config.sub config.guess. GNU recommends using the latest for all projects.
echo "Updating config.sub"
wget --no-check-certificate 'https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub' -O config.sub
if [[ -e config.sub ]]; then
chmod +x config.sub
fi
echo "Updating config.guess"
wget --no-check-certificate 'https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess' -O config.guess
if [[ -e config.guess ]]; then
chmod +x config.guess
fi
if ! ./configure; then
echo "configure failed."
[[ "$0" = "${BASH_SOURCE[0]}" ]] && exit 1 || return 1
fi
"$MAKE" clean 2>/dev/null
if ! "$MAKE" -j2 -f Makefile; then
echo "make failed."
[[ "$0" = "${BASH_SOURCE[0]}" ]] && exit 1 || return 1
fi
if ! ./cryptest v; then
echo "cryptest v failed."
[[ "$0" = "${BASH_SOURCE[0]}" ]] && exit 1 || return 1
fi
if ! ./cryptest tv all; then
echo "cryptest tv all failed."
[[ "$0" = "${BASH_SOURCE[0]}" ]] && exit 1 || return 1
fi
# Return success
[[ "$0" = "${BASH_SOURCE[0]}" ]] && exit 0 || return 0
|
#!/bin/bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This shell script is used to build a cluster and create a namespace from our
# argo workflow
set -o errexit
set -o nounset
set -o pipefail
CLUSTER_NAME="${CLUSTER_NAME}"
ZONE="${GCP_ZONE}"
PROJECT="${GCP_PROJECT}"
NAMESPACE="${DEPLOY_NAMESPACE}"
echo "Activating service-account"
gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS}
echo "Creating GPU cluster"
gcloud --project ${PROJECT} beta container clusters create ${CLUSTER_NAME} \
--zone ${ZONE} \
--machine-type=n1-standard-8 \
--num-nodes=6 \
--cluster-version 1.14
echo "Configuring kubectl"
gcloud --project ${PROJECT} container clusters get-credentials ${CLUSTER_NAME} \
--zone ${ZONE}
|
#!/bin/sh
prog=wave1D_u0_s.py
grep 'if n == 90:' $prog
if [ $? -ne 0 ]; then
echo "insert if n == 90: st.savefig('frame_C%s.pdf' % C) in $prog"
exit
fi
C_values="1.0 0.95 0.2 1.0015"
for C in $C_values; do
python $prog $C
scitools movie output_file=index.html fps=2 frame*.png
scitools movie encoder=convert output_file=movie.gif fps=4 frame*.png
dir=guitar_C$C
rm -rf $dir
mkdir $dir
mv movie.gif index.html frame*.png $dir
done
scitools rename frame_C wave1D_guitar_C frame_C*.pdf
|
export KUBERNETES_SERVICE_HOST=master.serverless-6e97.openshiftworkshop.com
export KUBERNETES_SERVICE_PORT=443
export KUBERNETES_CLIENT_SERVICEACCOUNT_ROOT=$(pwd)/istio
export COOLSTORE_GW_ENDPOINT=http://istio-ingressgateway-istio-system.apps.serverless-6e97.openshiftworkshop.com
#export COOLSTORE_SCENARIOS_ENDPOINT=http://scenarios-coolstore.apps.serverless-6e97.openshiftworkshop.com
export COOLSTORE_SCENARIOS_ENDPOINT=http://localhost:8080
export OPENSHIFT_BUILD_NAMESPACE=coolstore-ng
export BASE_DOMAIN=apps.serverless-6e97.openshiftworkshop.com
export WEB_UI_CUSTOM_PORT=8090
npm run dev |
# Combined file for easier scripting
export MQSI_SIGNAL_EXCLUSIONS=11
export MQSI_NO_CACHE_SUPPORT=1
. /opt/ibm/ace-12/server/bin/mqsiprofile
export LD_LIBRARY_PATH=/lib:/opt/ibm/java/jre/lib/amd64/compressedrefs:/opt/ibm/java/jre/lib/amd64:$LD_LIBRARY_PATH
# Not really ibmjava-related, but still needed
export LD_LIBRARY_PATH=/usr/glibc-compat/zlib-only:$LD_LIBRARY_PATH
|
#!/bin/bash
#
# This file is open source software, licensed to you under the terms
# of the Apache License, Version 2.0 (the "License"). See the NOTICE file
# distributed with this work for additional information regarding copyright
# ownership. You may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# os-release may be missing in container environment by default.
if [ -f "/etc/os-release" ]; then
. /etc/os-release
elif [ -f "/etc/arch-release" ]; then
export ID=arch
else
echo "/etc/os-release missing."
exit 1
fi
debian_packages=(
ninja-build
ragel
libhwloc-dev
libnuma-dev
libpciaccess-dev
libcrypto++-dev
libboost-all-dev
libxml2-dev
xfslibs-dev
libgnutls28-dev
liblz4-dev
libsctp-dev
gcc
make
python3
systemtap-sdt-dev
libtool
cmake
libyaml-cpp-dev
libc-ares-dev
stow
g++
libfmt-dev
diffutils
valgrind
doxygen
openssl
pkg-config
)
# seastar doesn't directly depend on these packages. They are
# needed because we want to link seastar statically and pkg-config
# has no way of saying "static seastar, but dynamic transitive
# dependencies". They provide the various .so -> .so.ver symbolic
# links.
transitive=(libtool-ltdl-devel trousers-devel libidn2-devel libunistring-devel)
redhat_packages=(
hwloc-devel
numactl-devel
libpciaccess-devel
cryptopp-devel
libxml2-devel
xfsprogs-devel
gnutls-devel
lksctp-tools-devel
lz4-devel
gcc
g++
make
python3
systemtap-sdt-devel
libtool
cmake
yaml-cpp-devel
c-ares-devel
stow
diffutils
doxygen
openssl
fmt-devel
boost-devel
valgrind-devel
"${transitive[@]}"
)
fedora_packages=(
"${redhat_packages[@]}"
gcc-c++
ninja-build
ragel
boost-devel
fmt-devel
libubsan
libasan
libatomic
valgrind-devel
)
centos7_packages=(
"${redhat_packages[@]}"
ninja-build
ragel
cmake3
rh-mongodb36-boost-devel
devtoolset-9-gcc-c++
devtoolset-9-libubsan
devtoolset-9-libasan
devtoolset-9-libatomic
)
centos8_packages=(
"${redhat_packages[@]}"
ninja-build
ragel
gcc-toolset-9-gcc
gcc-toolset-9-gcc-c++
gcc-toolset-9-libubsan-devel
gcc-toolset-9-libasan-devel
gcc-toolset-9-libatomic-devel
)
# 1) glibc 2.30-3 has sys/sdt.h (systemtap include)
# some old containers may contain glibc older,
# so enforce update on that one.
# 2) if problems with signatures, ensure having fresh
# archlinux-keyring: pacman -Sy archlinux-keyring && pacman -Syyu
# 3) aur installations require having sudo and being
# a sudoer. makepkg does not work otherwise.
arch_packages=(
gcc
ninja
ragel
boost
boost-libs
hwloc
numactl
libpciaccess
crypto++
libxml2
xfsprogs
gnutls
lksctp-tools
lz4
make
libtool
cmake
yaml-cpp
stow
c-ares
pkgconf
fmt
python3
glibc
filesystem
valgrind
openssl
)
opensuse_packages=(
c-ares-devel
cmake
hwloc-devel
libboost_filesystem1_66_0
libboost_filesystem1_66_0-devel
libboost_program_options1_66_0
libboost_program_options1_66_0-devel
libboost_system1_66_0
libboost_system1_66_0-devel
libboost_test1_66_0
libboost_test1_66_0-devel
libboost_thread1_66_0
libboost_thread1_66_0-devel
libcryptopp-devel
libboost_atomic1_66_0
libboost_atomic1_66_0-devel
libboost_date_time1_66_0
libboost_date_time1_66_0-devel
libboost_chrono1_66_0
libboost_chrono1_66_0-devel
libgnutls-devel
libgnutlsxx28
liblz4-devel
libnuma-devel
lksctp-tools-devel
ninja
ragel
xfsprogs-devel
yaml-cpp-devel
libtool
stow
openssl
)
case "$ID" in
ubuntu|debian|pop)
apt-get install -y "${debian_packages[@]}"
;;
fedora)
dnf install -y "${fedora_packages[@]}"
;;
rhel|centos|amzn)
if [ "$VERSION_ID" = "7" ]; then
yum install -y epel-release centos-release-scl scl-utils
yum install -y "${centos7_packages[@]}"
elif [ "${VERSION_ID%%.*}" = "8" ]; then
dnf install -y epel-release
dnf install -y "${centos8_packages[@]} ${arch_packages[@]}"
elif [ "$VERSION_ID" = "2" ]; then
yum install -y epel-release centos-release-scl scl-utils
yum install -y "${centos8_packages[@]} ${arch_packages[@]}"
fi
;;
opensuse-leap)
zypper install -y "${opensuse_packages[@]}"
;;
arch|manjaro)
if [ "$EUID" -eq "0" ]; then
pacman -Sy --needed --noconfirm "${arch_packages[@]}"
else
echo "seastar: running without root. Skipping main dependencies (pacman)." 1>&2
fi
;;
*)
echo "Your system ($ID) is not supported by this script. Please install dependencies manually."
exit 1
;;
esac
|
#!/usr/bin/env bash
set -e
function check_command() {
if ! command -v $1 >/dev/null; then
echo -e "Install \033[1m$1\033[0m"
exit 1
fi
}
check_command mvn
check_command jq
check_command yq
check_command yarn
check_command npm
check_command docker
if [[ "$#" != "1" ]] || [[ ! "$1" =~ ^(patch|minor|major)$ ]]; then
echo "Usage: $0 patch|minor|major"
exit 1
fi
if [[ $(git status --porcelain) ]]; then
echo -e "The repository has changes. Commit first...\033[0;31mAborting!\033[0m"
exit 1
fi
git pull --rebase
cd paperboy-project-generator
yarn
npm version $1
npm publish
cd ../paperboy-core
yarn
yarn build
npm version $1
version=$(cat package.json | jq -r .version)
npm publish
cd ../paperboy-magnolia-module
mvn versions:set -DnewVersion=${version} -DgenerateBackupPoms=false
cd ../paperboy-cli
cat package.json | jq ".version = \"$version\" | .dependencies.\"@neoskop/paperboy\" = \"$version\"" >package.json.new
mv package.json.new package.json
yarn
sed -i.bak "s/version('[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+')/version('$version')/g" paperboy-cli.js
rm -rf paperboy-cli.js.bak
npm publish
cd ../paperboy-push-service
cat package.json | jq ".version = \"$version\"" >package.json.new
mv package.json.new package.json
yarn
yarn build
npm publish
docker build -t neoskop/paperboy-push-service:$version .
docker build -t neoskop/paperboy-push-service:latest .
docker push neoskop/paperboy-push-service:$version
docker push neoskop/paperboy-push-service:latest
cd ../paperboy-docker
sed -i "s/ENV PAPERBOY_VERSION=[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+/ENV PAPERBOY_VERSION=$version/" Dockerfile
docker build -t neoskop/paperboy:$version .
docker build -t neoskop/paperboy:latest .
docker push neoskop/paperboy:$version
docker push neoskop/paperboy:latest
cd ../paperboy-helm
yq eval -i ".version=\"$version\"" ./Chart.yaml
yq eval -i ".appVersion=\"$version\"" ./Chart.yaml
yq eval -i ".image.tag=\"$version\"" ./values.yaml
cd ../
git add .
git commit -m "chore: Bump version to ${version}."
git tag ${version}
git push origin $version
git pull --rebase
git push
helm package paperboy-helm --destination .deploy
cr upload -o neoskop -r paperboy -p .deploy
git checkout gh-pages
cr index -i ./index.yaml -p .deploy -o neoskop -r paperboy -c https://neoskop.github.io/paperboy/
git add index.yaml
git commit -m "chore: Bump version to ${version}."
git push
git checkout master
rm -rf .deploy/
HELM_CHARTS_DIR=../neoskop-helm-charts
[ -d $HELM_CHARTS_DIR ] || git clone [email protected]:neoskop/helm-charts.git $HELM_CHARTS_DIR
cd $HELM_CHARTS_DIR
./update-index.sh
cd - &>/dev/null |
#!/bin/bash
. /HolismHolding/Infra/Scripts/Message.sh
function LinkConnectionStrings()
{
Info "Linking ConnectionStrings.json ...";
sudo ln -s -f /$Organization/Common/ConnectionStrings.json /$Organization/$Repository/ConnectionStrings.json
Divide
} |
#!/usr/bin/env bash
# get run options
while test $# -gt 0; do
case "$1" in
-h|--help)
echo "pac-man$ docker-test - run lambda package"
echo " "
echo "pac-man$ docker-test [options]"
echo " "
echo "options:"
echo "-h, --help show brief help"
echo "-b, --build build lambda package prior to running"
exit 0
;;
-b|--build)
shift
export PACMAN_BUILD=1
;;
*)
break
;;
esac
done
# cd to pac-man directory
cd "$(dirname "$0")"
if [[ -n ${PACMAN_BUILD} && "${PACMAN_BUILD}"=="1" ]]; then
# build lambda package
docker run --rm \
-v ${PWD}:/code \
-v ${HOME}/.cargo/registry:/root/.cargo/registry \
-v ${HOME}/.cargo/git:/root/.cargo/git \
softprops/lambda-rust && \
unzip -o \
target/lambda/release/pac-man.zip \
-d /tmp/lambda && \
echo "Enter Payload Then Press CTRL-D..." && \
docker run \
-i -e DOCKER_LAMBDA_USE_STDIN=1 \
-e AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
-e AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--rm \
-v /tmp/lambda:/var/task \
lambci/lambda:provided
else
echo "Enter Payload Then Press CTRL-D..." && \
docker run \
-i -e DOCKER_LAMBDA_USE_STDIN=1 \
-e AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
-e AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--rm \
-v /tmp/lambda:/var/task \
lambci/lambda:provided
fi
|
printf "testing '$1'\n"
printf "testing python... "
ppp_file=$1
py_output=$(python3 $ppp_file 2>&1)
py_exit_code=$?
if [ "$py_exit_code" -eq "0" ]; then
printf "succeeded\n"
else
printf "FAILED!\n"
fi
printf "testing C++... "
cpp_comp_output=$(g++ -x c++ -std=c++14 $ppp_file -o tmp_bin 2>&1)
cpp_comp_exit_code=$?
cpp_run_output=""
cpp_run_exit_code=1
if [ "$cpp_comp_exit_code" -eq "0" ]; then
cpp_run_output=$(./tmp_bin 2>&1)
cpp_run_exit_code=$?
if [ "$cpp_run_exit_code" -eq "0" ]; then
printf "succeeded\n"
else
printf "CRASHED!\n"
fi
rm tmp_bin
else
printf "FAILED TO COMPILE!\n"
fi
if [ "$py_exit_code" -eq "0" ] && [ "$cpp_run_exit_code" -eq "0" ] && [ "$py_output" = "$cpp_run_output" ]; then
printf "Python and C++ outputs match\n"
printf "________\n"
printf " output \__________________________________________\n\n"
printf "$py_output\n"
printf "___________________________________________________\n"
else
if [ "$py_exit_code" -eq "0" ] && [ "$cpp_run_exit_code" -eq "0" ]; then
printf "Python and C++ outputs DO NOT MATCH!\n"
fi
printf "_______________\n"
printf " Python output \___________________________________\n\n"
printf "$py_output\n"
printf "___________________________________________________\n"
if [ "$cpp_comp_exit_code" -ne "0" ]; then
printf "_____________________\n"
printf " C++ compiler output \_____________________________\n\n"
printf "$cpp_comp_output\n"
printf "___________________________________________________\n"
else
printf "____________\n"
printf " C++ output \______________________________________\n\n"
printf "$cpp_run_output\n"
printf "___________________________________________________\n"
fi
fi
printf "\n"
|
#!/bin/bash
export HOME=/root/
source $HOME/.bashrc
source $HOME/conda/bin/activate
conda activate tali
cd $CODE_DIR
git pull
pip install -r $CODE_DIR/requirements.txt
source $CODE_DIR/setup_scripts/setup_base_experiment_disk.sh
source $CODE_DIR/setup_scripts/setup_wandb_credentials.sh
cd $CODE_DIR
fuser -k /dev/nvidia*; \
python $CODE_DIR/run.py \
hydra.verbose=True \
trainer=default \
resume=True \
batch_size=8 \
trainer.gpus=4 \
trainer.auto_scale_batch_size=True \
datamodule.dataset_config.rescan_paths=True \
datamodule.prefetch_factor=3 \
datamodule.num_workers=48 \
model=deci_modus_prime_resnet50 \
datamodule.dataset_config.dataset_size_identifier=base \
datamodule.dataset_config.modality_config.image=True \
datamodule.dataset_config.modality_config.text=True \
datamodule.dataset_config.modality_config.audio=True \
datamodule.dataset_config.modality_config.video=True
|
#!/usr/bin/env bash
watch --color --beep 'bash script/ci.sh t'
|
#!/bin/sh
# generate sim input
echo "(1/5) generating simulation input data"
cd tests
./sim_input.py sim_points.txt sim_input.h || exit $?
cd ..
# compile simulation
echo "(2/5) compiling the simulation program"
./compile.sh sim || exit $?
# flash target with simulation program
echo "(3/5) flashing the target"
./flash.sh sim || exit $?
# redirect tether output to file
echo "(4/5) running the simulation"
./tether.py --format-csv tests/sim_output.csv || exit $?
# run tests
echo "(5/5) checking the simulation output"
./tests/sim_tests.py tests/sim_output.csv || exit $?
|
#!/bin/bash
# ------------------------------------------------------------------------------
# SCM Breeze - Streamline your SCM workflow.
# Copyright 2011 Nathan Broadbent (http://madebynathan.com). All Rights Reserved.
# Released under the LGPL (GNU Lesser General Public License)
# ------------------------------------------------------------------------------
#
# Unit tests for git shell scripts
export scmbDir="$( cd -P "$( dirname "$0" )" && pwd )/../../.."
# Zsh compatibility
if [ -n "${ZSH_VERSION:-}" ]; then shell="zsh"; SHUNIT_PARENT=$0; setopt shwordsplit; fi
# Load test helpers
source "$scmbDir/test/support/test_helper.sh"
# Load functions to test
source "$scmbDir/lib/scm_breeze.sh"
source "$scmbDir/lib/git/repo_index.sh"
# Setup and tear down
#-----------------------------------------------------------------------------
oneTimeSetUp() {
GIT_REPO_DIR=$(mktemp -d -t scm_breeze.XXXXXXXXXX)
GIT_REPOS="/tmp/test_repo_1:/tmp/test_repo_11"
git_status_command="git status"
git_index_file="$GIT_REPO_DIR/.git_index"
silentGitCommands
cd $GIT_REPO_DIR
# Setup test repos in temp repo dir
for repo in github bitbucket source_forge TestCaps; do
mkdir $repo; cd $repo; git init; cd - > /dev/null
done
# Add some nested dirs for testing resursive tab completion
mkdir -p github/videos/octocat/live_action
# Add hidden dir to test that '.git' is filtered, but other hidden dirs are available.
mkdir -p github/.im_hidden
# Setup a test repo with some submodules
# (just a dummy '.gitmodules' file and some nested .git directories)
mkdir submodules_everywhere
cd submodules_everywhere
git init
cat > .gitmodules <<EOF
[submodule "very/nested/directory/red_submodule"]
[submodule "very/nested/directory/green_submodule"]
[submodule "very/nested/directory/blue_submodule"]
EOF
mkdir -p "very/nested/directory"
cd "very/nested/directory"
for repo in red_submodule green_submodule blue_submodule; do
mkdir $repo; cd $repo; git init; cd - > /dev/null
done
# Setup some custom repos outside the main repo dir
IFS=":"
for dir in $GIT_REPOS; do
mkdir -p $dir; cd $dir; git init;
done
unset IFS
verboseGitCommands
cd "$orig_cwd"
}
oneTimeTearDown() {
rm -rf "${GIT_REPO_DIR}"
IFS=":"
for dir in $GIT_REPOS; do rm -rf $dir; done
unset IFS
}
ensureIndex() {
_check_git_index
}
index_no_newlines() {
tr "\\n" " " < $git_index_file
}
#-----------------------------------------------------------------------------
# Unit tests
#-----------------------------------------------------------------------------
test_repo_index_command() {
git_index --rebuild > /dev/null
# Test that all repos are detected, and sorted alphabetically
assertIncludes "$(index_no_newlines)" "bitbucket.*\
blue_submodule.*\
github.*\
green_submodule.*\
red_submodule.*\
source_forge.*\
submodules_everywhere.*\
test_repo_11.*\
test_repo_1"
}
test_check_git_index() {
ensureIndex
echo "should not be regenerated" >> $git_index_file
_check_git_index
# Test that index is not rebuilt unless empty
assertIncludes "$(index_no_newlines)" "should not be regenerated"
rm $git_index_file
# Test the index is rebuilt
_check_git_index
assertTrue "[ -f $git_index_file ]"
}
test_git_index_count() {
assertEquals "10" "$(_git_index_count)"
}
test_repo_list() {
ensureIndex
list=$(git_index --list)
assertIncludes "$list" "bitbucket" || return
assertIncludes "$list" "blue_submodule" || return
assertIncludes "$list" "test_repo_11"
}
# Test matching rules for changing directory
test_git_index_changing_directory() {
ensureIndex
git_index "github"; assertEquals "$GIT_REPO_DIR/github" "$PWD"
git_index "github/"; assertEquals "$GIT_REPO_DIR/github" "$PWD"
git_index "bucket"; assertEquals "$GIT_REPO_DIR/bitbucket" "$PWD"
git_index "testcaps"; assertEquals "$GIT_REPO_DIR/TestCaps" "$PWD"
git_index "green_sub"; assertEquals "$GIT_REPO_DIR/submodules_everywhere/very/nested/directory/green_submodule" "$PWD"
git_index "_submod"; assertEquals "$GIT_REPO_DIR/submodules_everywhere/very/nested/directory/blue_submodule" "$PWD"
git_index "test_repo_1"; assertEquals "/tmp/test_repo_1" "$PWD"
git_index "test_repo_11"; assertEquals "/tmp/test_repo_11" "$PWD"
git_index "test_repo_"; assertEquals "/tmp/test_repo_11" "$PWD"
git_index "github/videos/octocat/live_action"; assertEquals "$GIT_REPO_DIR/github/videos/octocat/live_action" "$PWD"
}
test_git_index_tab_completion() {
# Only run tab completion test for bash
if [[ "$0" == *bash ]]; then
ensureIndex
COMP_CWORD=0
# Test that '--' commands have tab completion
COMP_WORDS="--"
_git_index_tab_completion
assertEquals "Incorrect number of tab-completed '--' commands" "5" "$(tab_completions | wc -w)"
COMP_WORDS="gith"
_git_index_tab_completion
assertIncludes "$(tab_completions)" "github/"
# Test completion for project sub-directories when project ends with '/'
COMP_WORDS="github/"
_git_index_tab_completion
assertIncludes "$(tab_completions)" "github/videos/"
# Check that '.git/' is filtered from completion, but other hidden dirs are available
assertNotIncludes "$(tab_completions)" "github/.git/"
assertIncludes "$(tab_completions)" "github/.im_hidden/"
COMP_WORDS="github/videos/"
_git_index_tab_completion
assertIncludes "$(tab_completions)" "github/videos/octocat/"
# Test that completion checks for other matching projects even if one matches perfectly
COMP_WORDS="test_repo_1"
_git_index_tab_completion
assertIncludes "$(tab_completions)" "test_repo_1/ test_repo_11/"
fi
}
# Test changing to top-level directory (when arg begins with '/')
test_changing_to_top_level_directory() {
mkdir "$GIT_REPO_DIR/gems"
git_index "/gems"
assertEquals "$GIT_REPO_DIR/gems" "$PWD"
}
# load and run shUnit2
# Call this function to run tests
source "$scmbDir/test/support/shunit2"
|
#!/bin/bash
# Boost for compiling 32-bit binaries on 64-bit:
# ./bootstrap.sh
# ./b2 link=static address-model=32 stage
set -eu
function boost-static
{
sed -i 's/^\(oakfoam_LDADD =\) \(.*\) \($(HOARD_LIB).*\)$/\1 -Wl,-Bstatic \2 -Wl,-Bdynamic -pthread \3/' Makefile
}
VER=`cat config.h | sed -n 's/.*PACKAGE_VERSION \"\(.*\)\".*/\1/p'`
PREV_CONFIGURE=`cat config.log | head | sed -n 's/\s*$ //p'`
echo "configure was: $PREV_CONFIGURE"
DEBINPUT="0
[email protected]
5
BSD
6
games
7
i386
"
BOOST_ROOT=/data/opt/boost_1_47_0 $PREV_CONFIGURE --with-web 'CPPFLAGS=-m32' 'LDFLAGS=-m32 -pthread'
boost-static
echo "$DEBINPUT" | sudo checkinstall --nodoc --install=no make install
sudo chmod a+rw oakfoam oakfoam_*.deb
NAME=oakfoam_${VER}_i386
rm -f ${NAME}.tar.gz
mkdir ${NAME}
# BOOST_ROOT=/data/opt/boost_1_47_0 $PREV_CONFIGURE --with-web 'CPPFLAGS=-m32' 'LDFLAGS=-m32 -pthread'
# boost-static
make install DESTDIR=`pwd`/${NAME}
find ${NAME}/ -type f | grep -v 'menu\|applications\|www' | xargs -n1 -I{} mv {} $NAME/
find ${NAME}/ -type d -name www | xargs -n1 -I{} mv {} $NAME/
sed -i '/^cd \.\./d;/^bin=".*/d;s/$bin/\./' ${NAME}/oakfoam-web
mv ${NAME}/oakfoam-web ${NAME}/run.sh
tar -czf ${NAME}.tar.gz ${NAME}/
rm -r ${NAME}/
if [ "`uname -m`" == "x86_64" ]; then
DEBINPUT="0
[email protected]
5
BSD
6
games
"
$PREV_CONFIGURE --with-web
boost-static
make clean
echo "$DEBINPUT" | sudo checkinstall --nodoc --install=no make install
sudo chmod a+rw oakfoam oakfoam_*.deb
NAME=oakfoam_${VER}_amd64
rm -f ${NAME}.tar.gz
mkdir ${NAME}
# $PREV_CONFIGURE --with-web
# boost-static
make install DESTDIR=`pwd`/${NAME}
find ${NAME}/ -type f | grep -v 'menu\|applications\|www' | xargs -n1 -I{} mv {} $NAME/
find ${NAME}/ -type d -name www | xargs -n1 -I{} mv {} $NAME/
sed -i '/^cd \.\./d;/^bin=".*/d;s/$bin/\./' ${NAME}/oakfoam-web
mv ${NAME}/oakfoam-web ${NAME}/run.sh
tar -czf ${NAME}.tar.gz ${NAME}/
rm -r ${NAME}/
make clean
fi
$PREV_CONFIGURE
|
#!/bin/sh
if [ "$USER" != "root" ]
then
echo "This installer must be run with root privileges. Please run sudo $0"
return 1
fi
# Ensure the libraries we use are installed
apt install python3 python3-rpi.gpio python3-requests
addgroup --system doorbot
adduser --system --ingroup gpio doorbot
for N in doorbot.ini.example doorbot.py doorbot.service ringtest.py
do cp $N /home/doorbot
chown doorbot:doorbot /home/doorbot/$N
done
if [ -f /etc/systemd/system/doorbot.service ]
then echo "Unit file already exists, skipping"
else ln /home/doorbot/doorbot.service /etc/systemd/system/
fi
systemctl daemon-reload
|
#!/bin/bash
for file in *.gv
do
name=${file%%.*}
dot -Tsvg:cairo:cairo $name.gv > ../output/$name.svg
done
|
python transformers/examples/language-modeling/run_language_modeling.py --model_type gpt2 --tokenizer_name model-configs/1024-config --config_name model-configs/1024-config/config.json --train_data_file ../data/wikitext-103-raw/wiki.train.raw --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir train-outputs/512+0+512-shuffled-N/13-model --do_train --do_eval --evaluate_during_training --per_device_train_batch_size 3 --per_device_eval_batch_size 3 --num_train_epochs 10 --dataloader_drop_last --save_steps 500 --save_total_limit 20 --augmented --augmentation_function shuffle_remove_all_but_nouns_first_half --train_function augmented_training --eval_function augmented_eval --seed 13 |
#!/bin/bash
dieharder -d 206 -g 27 -S 844198761
|
#!/usr/bin/env bash
set -eu
set -o pipefail
EULA=${EULA:-false}
HEAP_SIZE=${HEAP_SIZE:-1024}
JVM_OPTS=${JVM_OPTS:-}
RCON_PASSWORD=${RCON_PASSWORD:-}
SERVER_OPTS=${SERVER_OPTS:-}
cd $(pwd)/config
if [ $(ls -1 ../overrides | wc -l) != "0" ]; then
echo "Copying configuration overrides..."
for file in ../overrides/*; do
echo " $(basename ${file})"
cp ${file} .
done
echo "done!"
fi
if [ -n "$RCON_PASSWORD" ]; then
echo "rcon.password=${RCON_PASSWORD}" >> server.properties
fi
echo "Copying configuration defaults..."
for file in ../defaults/*; do
if [ ! -f "$(basename ${file})" ]; then
echo " $(basename ${file})"
cp ${file} .
fi
done
echo "done!"
if ! grep -q eula=true eula.txt; then
if [ "$EULA" != "true" ]; then
echo "You must accept the Minecraft EULA to run the server! Read it at:"
echo "> https://account.mojang.com/documents/minecraft_eula"
echo "and then restart the server with EULA=true to accept the EULA."
exit 1
else
sed -e "/^eula=/ s/=.*$/=${EULA}/" -i"" eula.txt
fi
fi
sed -e "/^(query\.|server-)port=/ s/\d+/25565/" \
-e "/^rcon.port=/ s/\d+/25575/" \
-i"" server.properties
NURSERY_MINIMUM=$((${HEAP_SIZE} / 2))
NURSERY_MAXIMUM=$((${HEAP_SIZE} * 4 / 5))
JVM_OPTS="${JVM_OPTS} -Xms${HEAP_SIZE}M -Xmx${HEAP_SIZE}M -Xmns${NURSERY_MINIMUM}M -Xmnx${NURSERY_MAXIMUM}M"
JVM_OPTS="${JVM_OPTS} -Xgc:concurrentScavenge -Xgc:dnssExpectedTimeRatioMaximum=3 -Xgc:scvNoAdaptiveTenure"
JVM_OPTS="${JVM_OPTS} -Xdisableexplicitjc -Xtune:virtualized -Dlog4j.configurationFile=log4j2.xml"
SERVER_OPTS="--universe ../server --plugins ../plugins ${SERVER_OPTS}"
exec mc-server-runner java ${JVM_OPTS} -jar ../bin/paperclip.jar ${SERVER_OPTS}
|
#!/usr/bin/env bash
export DOKKU_QUIET_OUTPUT=1
export DOKKU_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/dokku"
export DOKKU_VERSION=${DOKKU_VERSION:-"master"}
export PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/bin:$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/dokku:$PATH"
export PLUGIN_COMMAND_PREFIX="s3"
export PLUGIN_PATH="$DOKKU_ROOT/plugins"
export PLUGIN_ENABLED_PATH="$PLUGIN_PATH"
export PLUGIN_AVAILABLE_PATH="$PLUGIN_PATH"
export PLUGIN_CORE_AVAILABLE_PATH="$PLUGIN_PATH"
export S3RVER_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/fixtures"
export PLUGIN_DATA_ROOT="$S3RVER_ROOT"
if [[ "$(uname)" == "Darwin" ]]; then
export PLUGN_URL="https://github.com/dokku/plugn/releases/download/v0.2.1/plugn_0.2.1_darwin_x86_64.tgz"
else
export PLUGN_URL="https://github.com/dokku/plugn/releases/download/v0.2.1/plugn_0.2.1_linux_x86_64.tgz"
fi
mkdir -p "$PLUGIN_DATA_ROOT"
rm -rf "${PLUGIN_DATA_ROOT:?}"/*
flunk() {
{ if [ "$#" -eq 0 ]; then cat -
else echo "$*"
fi
}
return 1
}
assert_equal() {
if [ "$1" != "$2" ]; then
{ echo "expected: $1"
echo "actual: $2"
} | flunk
fi
}
assert_exit_status() {
assert_equal "$status" "$1"
}
assert_success() {
if [ "$status" -ne 0 ]; then
flunk "command failed with exit status $status"
elif [ "$#" -gt 0 ]; then
assert_output "$1"
fi
}
assert_exists() {
if [ ! -f "$1" ]; then
flunk "expected file to exist: $1"
fi
}
assert_contains() {
if [[ "$1" != *"$2"* ]]; then
flunk "expected $2 to be in: $1"
fi
}
assert_output() {
local expected
if [ $# -eq 0 ]; then expected="$(cat -)"
else expected="$1"
fi
assert_equal "$expected" "$output"
}
|
#!/bin/bash
grunt
rm test/res/js/pagenav.js
cp pagenav.js test/res/js/pagenav.js
cp pagenav.min.js test/res/js/pagenav.min.js |
#!/bin/bash
printCommandHelp() {
echo "Command Help:"
echo -e "source patchPeerDeployment.sh <peerSubscriptionID> <peerResourceGroup> <peerAKSClusterName>"
echo
echo "Arguments:"
echo -e "\tpeerSubscriptionID : Subscription ID of AKS-HLF peer template deployment"
echo -e "\tpeerResourceGroup : Resource group of AKS-HLF peer template deployment"
echo -e "\tpeerAKSClusterName : AKS Cluster name of AKS-HLF peer template deployment"
}
PEER_ORG_SUBSCRIPTION=$1
PEER_ORG_RESOURCE_GROUP=$2
PEER_ORG_AKS_NAME=$3
if [ -z $PEER_ORG_SUBSCRIPTION ] || [ -z $PEER_ORG_RESOURCE_GROUP ] || [ -z $PEER_ORG_AKS_NAME ]; then
echo
echo "Peer organization subscription, resource group and AKS cluster name cannot be empty!"
echo
printCommandHelp
return;
fi
if ! command -v az &> /dev/null; then
echo
echo "Command \"az\" not found! Please download Azure CLI for your system."
echo "To setup Azure CLI after installation, run: az login with valid credentials!"
echo
return;
fi
az aks get-credentials --resource-group $PEER_ORG_RESOURCE_GROUP \
--name $PEER_ORG_AKS_NAME \
--subscription $PEER_ORG_SUBSCRIPTION
res=$?
if [ $res -ne 0 ]; then
echo
echo "Switching to AKS cluster config failed with error code: $res!"
echo
printCommandHelp
return
fi
ns=hlf
deployments="$(kubectl get deploy -n $ns -o=jsonpath='{.items[*].metadata.name}')"
for deployment in $deployments; do
resource=deploy/$deployment
if [[ $deployment == peer* ]]; then
echo "Updating" $deployment
kubectl scale -n $ns $resource --replicas=0
kubectl rollout status -n $ns $resource -w
kubectl patch deployment $deployment -n $ns -p \
'{"spec": { "template": { "spec": { "containers": [ { "name":"'$deployment'", "env": [{ "name": "CORE_CHAINCODE_BUILDER", "value": "hlfakstemplateoss.azurecr.io/hyperledger/fabric-ccenv:1.4.4" }, { "name": "CORE_CHAINCODE_GOLANG_RUNTIME", "value": "hlfakstemplateoss.azurecr.io/hyperledger/fabric-baseos:amd64-0.4.18" }, { "name": "CORE_CHAINCODE_NODE_RUNTIME", "value": "hlfakstemplateoss.azurecr.io/hyperledger/fabric-baseimage:amd64-0.4.18" }, { "name": "CORE_CHAINCODE_JAVA_RUNTIME", "value": "" }, { "name": "CORE_CHAINCODE_CAR_RUNTIME", "value": "" }] } ] } } } }'
kubectl scale -n $ns $resource --replicas=1
kubectl rollout status -n $ns $resource -w
fi
done |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/MyFirstCocoaPod/MyFirstCocoaPod.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/MyFirstCocoaPod/MyFirstCocoaPod.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
. inc/common.sh
if ! $XB_BIN --help 2>&1 | grep -q debug-sync; then
echo "Requires --debug-sync support" > $SKIPPED_REASON
exit $SKIPPED_EXIT_CODE
fi
start_server --innodb_log_file_size=1M --innodb_thread_concurrency=1 \
--innodb_log_buffer_size=1M
load_dbase_schema sakila
load_dbase_data sakila
mkdir $topdir/backup
run_cmd_expect_failure $XB_BIN $XB_ARGS --datadir=$mysql_datadir --backup \
--innodb_log_file_size=1M --target-dir=$topdir/backup \
--debug-sync="xtrabackup_copy_logfile_pause" &
job_pid=$!
pid_file=$topdir/backup/xtrabackup_debug_sync
# Wait for xtrabackup to suspend
i=0
while [ ! -r "$pid_file" ]
do
sleep 1
i=$((i+1))
echo "Waited $i seconds for $pid_file to be created"
done
xb_pid=`cat $pid_file`
# Create 4M+ of log data
$MYSQL $MYSQL_ARGS -Ns -e "CREATE TABLE tmp1 ENGINE=InnoDB SELECT * FROM payment" sakila
$MYSQL $MYSQL_ARGS -Ns -e "CREATE TABLE tmp2 ENGINE=InnoDB SELECT * FROM payment" sakila
$MYSQL $MYSQL_ARGS -Ns -e "CREATE TABLE tmp3 ENGINE=InnoDB SELECT * FROM payment" sakila
# Resume the xtrabackup process
vlog "Resuming xtrabackup"
kill -SIGCONT $xb_pid
# wait's return code will be the code returned by the background process
run_cmd wait $job_pid
|
#!/bin/sh
#
# Vivado(TM)
# runme.sh: a Vivado-generated Runs Script for UNIX
# Copyright 1986-2020 Xilinx, Inc. All Rights Reserved.
#
if [ -z "$PATH" ]; then
PATH=/home/varun/tools/XilinX/Vitis/2020.2/bin:/home/varun/tools/XilinX/Vivado/2020.2/ids_lite/ISE/bin/lin64:/home/varun/tools/XilinX/Vivado/2020.2/bin
else
PATH=/home/varun/tools/XilinX/Vitis/2020.2/bin:/home/varun/tools/XilinX/Vivado/2020.2/ids_lite/ISE/bin/lin64:/home/varun/tools/XilinX/Vivado/2020.2/bin:$PATH
fi
export PATH
if [ -z "$LD_LIBRARY_PATH" ]; then
LD_LIBRARY_PATH=
else
LD_LIBRARY_PATH=:$LD_LIBRARY_PATH
fi
export LD_LIBRARY_PATH
HD_PWD='/home/varun/coding/fpga/xylinx/pynq_z1/mpsoc_only_pl_counter/mpsoc_only_pl_counter.runs/synth_1'
cd "$HD_PWD"
HD_LOG=runme.log
/bin/touch $HD_LOG
ISEStep="./ISEWrap.sh"
EAStep()
{
$ISEStep $HD_LOG "$@" >> $HD_LOG 2>&1
if [ $? -ne 0 ]
then
exit
fi
}
EAStep vivado -log counter.vds -m64 -product Vivado -mode batch -messageDb vivado.pb -notrace -source counter.tcl
|
#!/bin/bash
if [[ $target_platform =~ linux.* ]] || [[ $target_platform == win-32 ]] || [[ $target_platform == win-64 ]] || [[ $target_platform == osx-64 ]]; then
export DISABLE_AUTOBREW=1
$R CMD INSTALL --build .
else
mkdir -p $PREFIX/lib/R/library/rgbif
mv * $PREFIX/lib/R/library/rgbif
if [[ $target_platform == osx-64 ]]; then
pushd $PREFIX
for libdir in lib/R/lib lib/R/modules lib/R/library lib/R/bin/exec sysroot/usr/lib; do
pushd $libdir || exit 1
for SHARED_LIB in $(find . -type f -iname "*.dylib" -or -iname "*.so" -or -iname "R"); do
echo "fixing SHARED_LIB $SHARED_LIB"
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5.0-MRO/Resources/lib/libR.dylib "$PREFIX"/lib/R/lib/libR.dylib $SHARED_LIB || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libR.dylib "$PREFIX"/lib/R/lib/libR.dylib $SHARED_LIB || true
install_name_tool -change /usr/local/clang4/lib/libomp.dylib "$PREFIX"/lib/libomp.dylib $SHARED_LIB || true
install_name_tool -change /usr/local/gfortran/lib/libgfortran.3.dylib "$PREFIX"/lib/libgfortran.3.dylib $SHARED_LIB || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libquadmath.0.dylib "$PREFIX"/lib/libquadmath.0.dylib $SHARED_LIB || true
install_name_tool -change /usr/local/gfortran/lib/libquadmath.0.dylib "$PREFIX"/lib/libquadmath.0.dylib $SHARED_LIB || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libgfortran.3.dylib "$PREFIX"/lib/libgfortran.3.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libgcc_s.1.dylib "$PREFIX"/lib/libgcc_s.1.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libiconv.2.dylib "$PREFIX"/sysroot/usr/lib/libiconv.2.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libncurses.5.4.dylib "$PREFIX"/sysroot/usr/lib/libncurses.5.4.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libicucore.A.dylib "$PREFIX"/sysroot/usr/lib/libicucore.A.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libexpat.1.dylib "$PREFIX"/lib/libexpat.1.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libcurl.4.dylib "$PREFIX"/lib/libcurl.4.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libc++.1.dylib "$PREFIX"/lib/libc++.1.dylib $SHARED_LIB || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libc++.1.dylib "$PREFIX"/lib/libc++.1.dylib $SHARED_LIB || true
done
popd
done
popd
fi
fi
|
#!/bin/sh
# Script which does some configuration of the system for laptops/desktops
test -e /sbin/rc-update
use_openrc=$?
setupZFSArc(){
# Tune ZFS ARC
###############################################
grep -q "vfs.zfs.arc_max=" "/boot/loader.conf"
if [ $? -eq 0 ] ; then
return 0 #Do not overwrite current ARC settings
fi
# Get system memory in bytes
sysMem=`sysctl hw.physmem | cut -w -f 2`
# Get that in MB
sysMem=`expr $sysMem / 1024 / 1024`
# Set some default zArc sizes based upon RAM of system
if [ $sysMem -lt 1024 ] ; then
zArc="128"
elif [ $sysMem -lt 4096 ] ; then
zArc="256"
else
zArc="512"
fi
echo "# Tune ZFS Arc Size - Change to adjust memory used for disk cache" >> /boot/loader.conf
echo "vfs.zfs.arc_max=\"${zArc}M\"" >> /boot/loader.conf
}
setupPowerd(){
if [ ${use_openrc} -eq 0 ] ; then
rc-update | grep -q powerd
else
grep -q -E 'powerd(xx)?_enable="YES"'
fi
if [ $? -eq 0 ] ; then
#one of the powerd[++] service is already setup
return
fi
p_service="powerd"
if [ ${use_openrc} -eq 0 ] ; then
if [ -e "/usr/local/etc/init.d/powerd++" ] ; then
#The alternative powerd++ service is installed - use that instead
p_service="powerd++"
fi
rc-update add ${p_service} default
else
if [ -e "/usr/local/etc/rc.d/powerdxx" ] ; then
p_service="powerdxx"
fi
sysrc "${p_service}_enable=YES"
fi
}
setupXProfile(){
local _script="/usr/local/bin/setup-xorg-session"
# Check all the .xprofile files in the user home dirs
# And make sure they launch the x session setup script
for _hd in $(ls /usr/home)
do
if [ ! -e "/usr/home/${_hd}/.xprofile" ] ; then continue; fi
grep -q "${_script}" "/usr/home/${_hd}/.xprofile"
if [ $? -ne 0 ] ; then
echo "
if [ -e \"${_script}\" ] ; then
. ${_script}
fi
" >> "/usr/home/${_hd}/.xprofile"
fi
done
#Now make sure the default ~/.xprofile exists and/or is setup
if [ ! -e "/usr/share/skel/dot.xprofile" ] ; then
echo "# Graphical session setup
# Created by Project Trident
# ===================
if [ -e \"${_script}\" ] ; then
. ${_script}
fi
" >> "/usr/share/skel/dot.xprofile"
else
grep -q "${_script}" "/usr/share/skel/dot.xprofile"
if [ $? -ne 0 ] ; then
echo "
if [ -e \"${_script}\" ] ; then
. ${_script}
fi
" >> "/usr/share/skel/dot.xprofile"
fi
fi
}
setupWlan(){
# Check for any new wifi devices to setup
for wnic in `sysctl -n net.wlan.devices 2>/dev/null`
do
#See if this device is already configured
grep -q "wlans_${wnic}" /etc/rc.conf
if [ $? -ne 0 ] ; then
# New wifi device - determine the next number for it
grep -qE "^wlans_" /etc/rc.conf
if [ $? -eq 0 ] ; then
WLANCOUNT=`cat /etc/rc.conf | grep -E "^wlans_" | wc -l | awk '{print $1}'`
else
WLANCOUNT="0"
fi
WLAN="wlan${WLANCOUNT}"
# Save the wlan interface
echo "wlans_${wnic}=\"${WLAN}\"" >> /etc/rc.conf
echo "ifconfig_${WLAN}=\"WPA DHCP\"" >> /etc/rc.conf
echo "ifconfig_${WLAN}_ipv6=\"inet6 accept_rtadv\"" >> /etc/rc.conf
fi
done
}
setupLan(){
for nic in `ifconfig -l`
do
#Ignore loopback devices
echo ${nic} | grep -qE "lo[0-9]"
if [ 0 -eq $? ] ; then continue; fi
#See if this device is already configured
sysrc -ci "ifconfig_${nic}"
if [ $? -ne 0 ] ; then
# New ethernet device
sysrc "ifconfig_${nic}=DHCP"
sysrc "ifconfig_${nic}_ipv6=inet6 accept_rtadv"
fi
done
}
#figure out if this is a laptop, desktop, or VM (VMWare or VirtualBox only at the moment)
pciconf -lv | grep -qiE "(vmware|innotek)"
if [ $? -eq 0 ] ; then
type="vm"
else
devinfo | grep -q acpi_acad0
if [ $? -eq 0 ] ; then
type="laptop"
else
type="desktop"
fi
fi
################################################
# Verify generic init
################################################
if [ ! -d "/usr/home" ] ; then
mkdir /usr/home
fi
# Setup /home link (for people used to Linux, and some applications)
if [ ! -e "/home" ] ; then
ln -s /usr/home /home
fi
#Check/set the ZFS arc size
setupZFSArc
#Turn on power management service (if one is not already setup)
if [ "type" != "vm" ] ; then
setupPowerd
fi
if [ "${type}" = "laptop" ] ; then
# Laptop system
# TO-DO
else
# Desktop system
# TO-DO
fi
#setup the networking interfaces
setupLan
setupWlan
setupXProfile
#Perform the system sanity check
/usr/local/share/trident/scripts/system-sanity-check.sh
#TrueOS 18.06-18.08 Bug Bypass (8/23/18 - Ken Moore)
# - replace "DHCP" with "SYNCDHCP" in the default-installed /etc/rc.conf
#sed -i '' 's|"DHCP|"SYNCDHCP|g' /etc/rc.conf
#sed -i '' 's| DHCP"| SYNCDHCP"|g' /etc/rc.conf
#Now ensure the system services are all setup properly
/usr/local/share/trident/scripts/validate-services.sh /usr/local/etc/trident/required-services /usr/local/etc/trident/recommended-services
|
# bash/zsh git prompt support
#
# Copyright (C) 2006,2007 Shawn O. Pearce <[email protected]>
# Distributed under the GNU General Public License, version 2.0.
#
# This script allows you to see repository status in your prompt.
#
# To enable:
#
# 1) Copy this file to somewhere (e.g. ~/.git-prompt.sh).
# 2) Add the following line to your .bashrc/.zshrc:
# source ~/.git-prompt.sh
# 3a) Change your PS1 to call __git_ps1 as
# command-substitution:
# Bash: PS1='[\u@\h \W$(__git_ps1 " (%s)")]\$ '
# ZSH: setopt PROMPT_SUBST ; PS1='[%n@%m %c$(__git_ps1 " (%s)")]\$ '
# the optional argument will be used as format string.
# 3b) Alternatively, for a slightly faster prompt, __git_ps1 can
# be used for PROMPT_COMMAND in Bash or for precmd() in Zsh
# with two parameters, <pre> and <post>, which are strings
# you would put in $PS1 before and after the status string
# generated by the git-prompt machinery. e.g.
# Bash: PROMPT_COMMAND='__git_ps1 "\u@\h:\w" "\\\$ "'
# will show username, at-sign, host, colon, cwd, then
# various status string, followed by dollar and SP, as
# your prompt.
# ZSH: precmd () { __git_ps1 "%n" ":%~$ " "|%s" }
# will show username, pipe, then various status string,
# followed by colon, cwd, dollar and SP, as your prompt.
# Optionally, you can supply a third argument with a printf
# format string to finetune the output of the branch status
#
# The repository status will be displayed only if you are currently in a
# git repository. The %s token is the placeholder for the shown status.
#
# The prompt status always includes the current branch name.
#
# In addition, if you set GIT_PS1_SHOWDIRTYSTATE to a nonempty value,
# unstaged (*) and staged (+) changes will be shown next to the branch
# name. You can configure this per-repository with the
# bash.showDirtyState variable, which defaults to true once
# GIT_PS1_SHOWDIRTYSTATE is enabled.
#
# You can also see if currently something is stashed, by setting
# GIT_PS1_SHOWSTASHSTATE to a nonempty value. If something is stashed,
# then a '$' will be shown next to the branch name.
#
# If you would like to see if there're untracked files, then you can set
# GIT_PS1_SHOWUNTRACKEDFILES to a nonempty value. If there're untracked
# files, then a '%' will be shown next to the branch name. You can
# configure this per-repository with the bash.showUntrackedFiles
# variable, which defaults to true once GIT_PS1_SHOWUNTRACKEDFILES is
# enabled.
#
# If you would like to see the difference between HEAD and its upstream,
# set GIT_PS1_SHOWUPSTREAM="auto". A "<" indicates you are behind, ">"
# indicates you are ahead, "<>" indicates you have diverged and "="
# indicates that there is no difference. You can further control
# behaviour by setting GIT_PS1_SHOWUPSTREAM to a space-separated list
# of values:
#
# verbose show number of commits ahead/behind (+/-) upstream
# name if verbose, then also show the upstream abbrev name
# legacy don't use the '--count' option available in recent
# versions of git-rev-list
# git always compare HEAD to @{upstream}
# svn always compare HEAD to your SVN upstream
#
# You can change the separator between the branch name and the above
# state symbols by setting GIT_PS1_STATESEPARATOR. The default separator
# is SP.
#
# By default, __git_ps1 will compare HEAD to your SVN upstream if it can
# find one, or @{upstream} otherwise. Once you have set
# GIT_PS1_SHOWUPSTREAM, you can override it on a per-repository basis by
# setting the bash.showUpstream config variable.
#
# If you would like to see more information about the identity of
# commits checked out as a detached HEAD, set GIT_PS1_DESCRIBE_STYLE
# to one of these values:
#
# contains relative to newer annotated tag (v1.6.3.2~35)
# branch relative to newer tag or branch (master~4)
# describe relative to older annotated tag (v1.6.3.1-13-gdd42c2f)
# tag relative to any older tag (v1.6.3.1-13-gdd42c2f)
# default exactly matching tag
#
# If you would like a colored hint about the current dirty state, set
# GIT_PS1_SHOWCOLORHINTS to a nonempty value. The colors are based on
# the colored output of "git status -sb" and are available only when
# using __git_ps1 for PROMPT_COMMAND or precmd.
#
# If you would like __git_ps1 to do nothing in the case when the current
# directory is set up to be ignored by git, then set
# GIT_PS1_HIDE_IF_PWD_IGNORED to a nonempty value. Override this on the
# repository level by setting bash.hideIfPwdIgnored to "false".
# check whether printf supports -v
__git_printf_supports_v=
printf -v __git_printf_supports_v -- '%s' yes >/dev/null 2>&1
# stores the divergence from upstream in $p
# used by GIT_PS1_SHOWUPSTREAM
__git_ps1_show_upstream ()
{
local key value
local svn_remote svn_url_pattern count n
local upstream=git legacy="" verbose="" name=""
svn_remote=()
# get some config options from git-config
local output="$(git config -z --get-regexp '^(svn-remote\..*\.url|bash\.showupstream)$' 2>/dev/null | tr '\0\n' '\n ')"
while read -r key value; do
case "$key" in
bash.showupstream)
GIT_PS1_SHOWUPSTREAM="$value"
if [[ -z "${GIT_PS1_SHOWUPSTREAM}" ]]; then
p=""
return
fi
;;
svn-remote.*.url)
svn_remote[$((${#svn_remote[@]} + 1))]="$value"
svn_url_pattern="$svn_url_pattern\\|$value"
upstream=svn+git # default upstream is SVN if available, else git
;;
esac
done <<< "$output"
# parse configuration values
for option in ${GIT_PS1_SHOWUPSTREAM}; do
case "$option" in
git|svn) upstream="$option" ;;
verbose) verbose=1 ;;
legacy) legacy=1 ;;
name) name=1 ;;
esac
done
# Find our upstream
case "$upstream" in
git) upstream="@{upstream}" ;;
svn*)
# get the upstream from the "git-svn-id: ..." in a commit message
# (git-svn uses essentially the same procedure internally)
local -a svn_upstream
svn_upstream=($(git log --first-parent -1 \
--grep="^git-svn-id: \(${svn_url_pattern#??}\)" 2>/dev/null))
if [[ 0 -ne ${#svn_upstream[@]} ]]; then
svn_upstream=${svn_upstream[${#svn_upstream[@]} - 2]}
svn_upstream=${svn_upstream%@*}
local n_stop="${#svn_remote[@]}"
for ((n=1; n <= n_stop; n++)); do
svn_upstream=${svn_upstream#${svn_remote[$n]}}
done
if [[ -z "$svn_upstream" ]]; then
# default branch name for checkouts with no layout:
upstream=${GIT_SVN_ID:-git-svn}
else
upstream=${svn_upstream#/}
fi
elif [[ "svn+git" = "$upstream" ]]; then
upstream="@{upstream}"
fi
;;
esac
# Find how many commits we are ahead/behind our upstream
if [[ -z "$legacy" ]]; then
count="$(git rev-list --count --left-right \
"$upstream"...HEAD 2>/dev/null)"
else
# produce equivalent output to --count for older versions of git
local commits
if commits="$(git rev-list --left-right "$upstream"...HEAD 2>/dev/null)"
then
local commit behind=0 ahead=0
for commit in $commits
do
case "$commit" in
"<"*) ((behind++)) ;;
*) ((ahead++)) ;;
esac
done
count="$behind $ahead"
else
count=""
fi
fi
# calculate the result
if [[ -z "$verbose" ]]; then
case "$count" in
"") # no upstream
p="" ;;
"0 0") # equal to upstream
p="=" ;;
"0 "*) # ahead of upstream
p=">" ;;
*" 0") # behind upstream
p="<" ;;
*) # diverged from upstream
p="<>" ;;
esac
else
case "$count" in
"") # no upstream
p="" ;;
"0 0") # equal to upstream
p=" u=" ;;
"0 "*) # ahead of upstream
p=" u+${count#0 }" ;;
*" 0") # behind upstream
p=" u-${count% 0}" ;;
*) # diverged from upstream
p=" u+${count#* }-${count% *}" ;;
esac
if [[ -n "$count" && -n "$name" ]]; then
__git_ps1_upstream_name=$(git rev-parse \
--abbrev-ref "$upstream" 2>/dev/null)
if [ "$pcmode" = yes ] && [ "$ps1_expanded" = yes ]; then
p="$p \${__git_ps1_upstream_name}"
else
p="$p ${__git_ps1_upstream_name}"
# not needed anymore; keep user's
# environment clean
unset __git_ps1_upstream_name
fi
fi
fi
}
# Helper function that is meant to be called from __git_ps1. It
# injects color codes into the appropriate gitstring variables used
# to build a gitstring.
__git_ps1_colorize_gitstring ()
{
if [[ -n "${ZSH_VERSION-}" ]]; then
local c_red='%F{red}'
local c_green='%F{green}'
local c_lblue='%F{blue}'
local c_clear='%f'
else
# Using \[ and \] around colors is necessary to prevent
# issues with command line editing/browsing/completion!
local c_red='\[\e[31m\]'
local c_green='\[\e[32m\]'
local c_lblue='\[\e[1;34m\]'
local c_clear='\[\e[0m\]'
fi
local bad_color=$c_red
local ok_color=$c_green
local flags_color="$c_lblue"
local branch_color=""
if [ "$detached" = no ]; then
branch_color="$ok_color"
else
branch_color="$bad_color"
fi
c="$branch_color$c"
z="$c_clear$z"
if [ "$w" = "*" ]; then
w="$bad_color$w"
fi
if [ -n "$i" ]; then
i="$ok_color$i"
fi
if [ -n "$s" ]; then
s="$flags_color$s"
fi
if [ -n "$u" ]; then
u="$bad_color$u"
fi
r="$c_clear$r"
}
# Helper function to read the first line of a file into a variable.
# __git_eread requires 2 arguments, the file path and the name of the
# variable, in that order.
__git_eread ()
{
test -r "$1" && IFS=$'\r\n' read "$2" <"$1"
}
# __git_ps1 accepts 0 or 1 arguments (i.e., format string)
# when called from PS1 using command substitution
# in this mode it prints text to add to bash PS1 prompt (includes branch name)
#
# __git_ps1 requires 2 or 3 arguments when called from PROMPT_COMMAND (pc)
# in that case it _sets_ PS1. The arguments are parts of a PS1 string.
# when two arguments are given, the first is prepended and the second appended
# to the state string when assigned to PS1.
# The optional third parameter will be used as printf format string to further
# customize the output of the git-status string.
# In this mode you can request colored hints using GIT_PS1_SHOWCOLORHINTS=true
__git_ps1 ()
{
# preserve exit status
local exit=$?
local pcmode=no
local detached=no
local ps1pc_start='\u@\h:\w '
local ps1pc_end='\$ '
local printf_format=' (%s)'
case "$#" in
2|3) pcmode=yes
ps1pc_start="$1"
ps1pc_end="$2"
printf_format="${3:-$printf_format}"
# set PS1 to a plain prompt so that we can
# simply return early if the prompt should not
# be decorated
PS1="$ps1pc_start$ps1pc_end"
;;
0|1) printf_format="${1:-$printf_format}"
;;
*) return $exit
;;
esac
# ps1_expanded: This variable is set to 'yes' if the shell
# subjects the value of PS1 to parameter expansion:
#
# * bash does unless the promptvars option is disabled
# * zsh does not unless the PROMPT_SUBST option is set
# * POSIX shells always do
#
# If the shell would expand the contents of PS1 when drawing
# the prompt, a raw ref name must not be included in PS1.
# This protects the user from arbitrary code execution via
# specially crafted ref names. For example, a ref named
# 'refs/heads/$(IFS=_;cmd=sudo_rm_-rf_/;$cmd)' might cause the
# shell to execute 'sudo rm -rf /' when the prompt is drawn.
#
# Instead, the ref name should be placed in a separate global
# variable (in the __git_ps1_* namespace to avoid colliding
# with the user's environment) and that variable should be
# referenced from PS1. For example:
#
# __git_ps1_foo=$(do_something_to_get_ref_name)
# PS1="...stuff...\${__git_ps1_foo}...stuff..."
#
# If the shell does not expand the contents of PS1, the raw
# ref name must be included in PS1.
#
# The value of this variable is only relevant when in pcmode.
#
# Assume that the shell follows the POSIX specification and
# expands PS1 unless determined otherwise. (This is more
# likely to be correct if the user has a non-bash, non-zsh
# shell and safer than the alternative if the assumption is
# incorrect.)
#
local ps1_expanded=yes
[ -z "${ZSH_VERSION-}" ] || [[ -o PROMPT_SUBST ]] || ps1_expanded=no
[ -z "${BASH_VERSION-}" ] || shopt -q promptvars || ps1_expanded=no
local repo_info rev_parse_exit_code
repo_info="$(git rev-parse --git-dir --is-inside-git-dir \
--is-bare-repository --is-inside-work-tree \
--short HEAD 2>/dev/null)"
rev_parse_exit_code="$?"
if [ -z "$repo_info" ]; then
return $exit
fi
local short_sha=""
if [ "$rev_parse_exit_code" = "0" ]; then
short_sha="${repo_info##*$'\n'}"
repo_info="${repo_info%$'\n'*}"
fi
local inside_worktree="${repo_info##*$'\n'}"
repo_info="${repo_info%$'\n'*}"
local bare_repo="${repo_info##*$'\n'}"
repo_info="${repo_info%$'\n'*}"
local inside_gitdir="${repo_info##*$'\n'}"
local g="${repo_info%$'\n'*}"
if [ "true" = "$inside_worktree" ] &&
[ -n "${GIT_PS1_HIDE_IF_PWD_IGNORED-}" ] &&
[ "$(git config --bool bash.hideIfPwdIgnored)" != "false" ] &&
git check-ignore -q .
then
return $exit
fi
local r=""
local b=""
local step=""
local total=""
if [ -d "$g/rebase-merge" ]; then
__git_eread "$g/rebase-merge/head-name" b
__git_eread "$g/rebase-merge/msgnum" step
__git_eread "$g/rebase-merge/end" total
if [ -f "$g/rebase-merge/interactive" ]; then
r="|REBASE-i"
else
r="|REBASE-m"
fi
else
if [ -d "$g/rebase-apply" ]; then
__git_eread "$g/rebase-apply/next" step
__git_eread "$g/rebase-apply/last" total
if [ -f "$g/rebase-apply/rebasing" ]; then
__git_eread "$g/rebase-apply/head-name" b
r="|REBASE"
elif [ -f "$g/rebase-apply/applying" ]; then
r="|AM"
else
r="|AM/REBASE"
fi
elif [ -f "$g/MERGE_HEAD" ]; then
r="|MERGING"
elif [ -f "$g/CHERRY_PICK_HEAD" ]; then
r="|CHERRY-PICKING"
elif [ -f "$g/REVERT_HEAD" ]; then
r="|REVERTING"
elif [ -f "$g/BISECT_LOG" ]; then
r="|BISECTING"
fi
if [ -n "$b" ]; then
:
elif [ -h "$g/HEAD" ]; then
# symlink symbolic ref
b="$(git symbolic-ref HEAD 2>/dev/null)"
else
local head=""
if ! __git_eread "$g/HEAD" head; then
return $exit
fi
# is it a symbolic ref?
b="${head#ref: }"
if [ "$head" = "$b" ]; then
detached=yes
b="$(
case "${GIT_PS1_DESCRIBE_STYLE-}" in
(contains)
git describe --contains HEAD ;;
(branch)
git describe --contains --all HEAD ;;
(tag)
git describe --tags HEAD ;;
(describe)
git describe HEAD ;;
(* | default)
git describe --tags --exact-match HEAD ;;
esac 2>/dev/null)" ||
b="$short_sha..."
b="($b)"
fi
fi
fi
if [ -n "$step" ] && [ -n "$total" ]; then
r="$r $step/$total"
fi
local w=""
local i=""
local s=""
local u=""
local c=""
local p=""
if [ "true" = "$inside_gitdir" ]; then
if [ "true" = "$bare_repo" ]; then
c="BARE:"
else
b="GIT_DIR!"
fi
elif [ "true" = "$inside_worktree" ]; then
if [ -n "${GIT_PS1_SHOWDIRTYSTATE-}" ] &&
[ "$(git config --bool bash.showDirtyState)" != "false" ]
then
git diff --no-ext-diff --quiet || w="*"
git diff --no-ext-diff --cached --quiet || i="+"
if [ -z "$short_sha" ] && [ -z "$i" ]; then
i="#"
fi
fi
if [ -n "${GIT_PS1_SHOWSTASHSTATE-}" ] &&
git rev-parse --verify --quiet refs/stash >/dev/null
then
s="$"
fi
if [ -n "${GIT_PS1_SHOWUNTRACKEDFILES-}" ] &&
[ "$(git config --bool bash.showUntrackedFiles)" != "false" ] &&
git ls-files --others --exclude-standard --directory --no-empty-directory --error-unmatch -- ':/*' >/dev/null 2>/dev/null
then
u="%${ZSH_VERSION+%}"
fi
if [ -n "${GIT_PS1_SHOWUPSTREAM-}" ]; then
__git_ps1_show_upstream
fi
fi
local z="${GIT_PS1_STATESEPARATOR-" "}"
# NO color option unless in PROMPT_COMMAND mode or it's Zsh
if [ -n "${GIT_PS1_SHOWCOLORHINTS-}" ]; then
if [ "$pcmode" = yes ] || [ -n "${ZSH_VERSION-}" ]; then
__git_ps1_colorize_gitstring
fi
fi
b=${b##refs/heads/}
if [ "$pcmode" = yes ] && [ "$ps1_expanded" = yes ]; then
__git_ps1_branch_name=$b
b="\${__git_ps1_branch_name}"
fi
local f="$w$i$s$u"
local gitstring="$c$b${f:+$z$f}$r$p"
if [ "$pcmode" = yes ]; then
if [ "${__git_printf_supports_v-}" != yes ]; then
gitstring=$(printf -- "$printf_format" "$gitstring")
else
printf -v gitstring -- "$printf_format" "$gitstring"
fi
PS1="$ps1pc_start$gitstring$ps1pc_end"
else
printf -- "$printf_format" "$gitstring"
fi
return $exit
}
|
#!/bin/bash
#Copyright (c) 2016, Allgeyer Tobias, Aumann Florian, Borella Jocelyn, Karrenbauer Oliver, Marek Felix, Meissner Pascal, Stroh Daniel, Trautmann Jeremias
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
#2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other #materials provided with the distribution.
#
#3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific #prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR #PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# read http://unix.stackexchange.com/questions/17116/prevent-pane-window-from-closing-when-command-completes-tmux
# Starts additional simulation modules that cannot be launched before gazebo is running
# wait for gazebo and rviz
sleep 5
# while [[ ! $(rosservice list | grep gaz) ]]; do sleep 1; done;
# while [[ ! $(xwininfo -root -all | grep rviz) ]]; do sleep 1; done;
# make sure log folder exist, so all modules start safely
export logFolder=~/log
mkdir -p ${logFolder}
#Starts scene recognition, pose prediction and provides a pane for interfaces with object localization simulation.
tmux new-window -n 'ism'
tmux send-keys -t asr:ism 'script -c "roslaunch --wait asr_recognizer_prediction_ism rp_ism_node.launch" -f '"${logFolder}"'/ism.log' C-m
tmux split-window -t asr:ism
tmux send-keys -t asr:ism.1 'echo Perform service calls to asr_fake_object_recognition from here.' C-m
#Starts next-best-view calculation and world model.
tmux new-window -n 'nbv'
tmux send-keys -t asr:nbv 'script -c "roslaunch --wait asr_next_best_view next_best_view_core_sim.launch" -f '"${logFolder}"'/nbv.log' C-m
tmux split-window -t asr:nbv
tmux send-keys -t asr:nbv.1 'script -c "roslaunch --wait asr_world_model world_model.launch" -f '"${logFolder}"'/world_model.log' C-m
#Starts visualization server to publish the room model.
tmux new-window -n 'viz_server'
tmux send-keys -t asr:viz_server 'script -c "roslaunch --wait asr_visualization_server visualization.launch" -f '"${logFolder}"'/viz_server.log' C-m
#Starts state machine that controls all other components, required for active scene recognition.
tmux new-window -n 'state_machine'
tmux send-keys -t asr:state_machine 'script -c "roslaunch --wait asr_state_machine scene_exploration_sim.launch" -f '"${logFolder}"'/state_machine.log' C-m
#Starts direct_search_manager that handles the direct search
tmux new-window -n 'direct_search_manager'
tmux send-keys -t asr:direct_search_manager 'script -c "roslaunch --wait asr_direct_search_manager direct_search_manager.launch" -f '"${logFolder}"'/direct_search_manager.log' C-m
|
#!/bin/sh
docker push robodomo/icomfort-microservice
|
#!/usr/bin/env bash
set -e
set -o pipefail
# vars
PYTHON=python
PIP=pip
VENV_NAME=
# process options
while getopts "h?3e:" opt; do
case "$opt" in
h|\?)
echo "install.sh parameters"
echo ""
echo "-3 install for Python 3.3+"
echo "-e [environment name] install to a virtual environment"
echo ""
exit 1
;;
3)
PYTHON=python3
PIP=pip3
;;
e)
VENV_NAME=$OPTARG
;;
esac
done
shift $((OPTIND-1))
[ "$1" = "--" ] && shift
# check to ensure this is not being run directly as root
if [ $(id -u) -eq 0 ]; then
echo "Installation cannot be performed as root or via sudo."
echo "Please install as a regular user."
exit 1
fi
# check for sudo
if hash sudo 2> /dev/null; then
echo "sudo found."
else
echo "sudo not found. Please install sudo first before proceeding."
exit 1
fi
# check that shipyard.py is in cwd
if [ ! -f $PWD/shipyard.py ]; then
echo "shipyard.py not found in $PWD."
echo "Please run install.sh from the same directory as shipyard.py."
exit 1
fi
# check for python
if hash $PYTHON 2> /dev/null; then
echo "Installing for $PYTHON."
else
echo "$PYTHON not found, please install $PYTHON first with your system software installer."
exit 1
fi
# check for anaconda
set +e
ANACONDA=0
$PYTHON -c "from __future__ import print_function; import sys; print(sys.version)" | grep -Ei 'anaconda|continuum'
if [ $? -eq 0 ]; then
# check for conda
if hash conda 2> /dev/null; then
echo "Anaconda environment detected."
else
echo "Anaconda environment detected, but conda command not found."
exit 1
fi
if [ -z $VENV_NAME ]; then
echo "Virtual environment name must be supplied for Anaconda installations."
exit 1
fi
ANACONDA=1
PIP=pip
fi
set -e
# perform some virtual env parameter checks
INSTALL_VENV_BIN=0
if [ ! -z $VENV_NAME ]; then
# check if virtual env, env is not named shipyard
if [ "$VENV_NAME" == "shipyard" ]; then
echo "Virtual environment name cannot be shipyard. Please use a different virtual environment name."
exit 1
fi
# check for virtualenv executable
if [ $ANACONDA -eq 0 ]; then
if hash virtualenv 2> /dev/null; then
echo "virtualenv found."
else
echo "virtualenv not found."
INSTALL_VENV_BIN=1
fi
fi
fi
# try to get /etc/lsb-release
if [ -e /etc/lsb-release ]; then
. /etc/lsb-release
else
if [ -e /etc/os-release ]; then
. /etc/os-release
DISTRIB_ID=$ID
DISTRIB_RELEASE=$VERSION_ID
fi
fi
if [ -z ${DISTRIB_ID+x} ] || [ -z ${DISTRIB_RELEASE+x} ]; then
echo "Unknown DISTRIB_ID or DISTRIB_RELEASE."
echo "Please refer to the Installation documentation for manual installation steps."
exit 1
fi
# lowercase vars
DISTRIB_ID=${DISTRIB_ID,,}
DISTRIB_RELEASE=${DISTRIB_RELEASE,,}
# install requisite packages from distro repo
if [ $DISTRIB_ID == "ubuntu" ] || [ $DISTRIB_ID == "debian" ]; then
sudo apt-get update
if [ $PYTHON == "python" ]; then
PYTHON_PKGS="libpython-dev python-dev"
if [ $ANACONDA -eq 0 ]; then
PYTHON_PKGS="$PYTHON_PKGS python-pip"
fi
else
PYTHON_PKGS="libpython3-dev python3-dev"
if [ $ANACONDA -eq 0 ]; then
PYTHON_PKGS="$PYTHON_PKGS python3-pip"
fi
fi
sudo apt-get install -y --no-install-recommends \
build-essential libssl-dev libffi-dev openssl \
openssh-client rsync $PYTHON_PKGS
elif [ $DISTRIB_ID == "centos" ] || [ $DISTRIB_ID == "rhel" ]; then
if [ $PYTHON == "python" ]; then
PYTHON_PKGS="python-devel"
else
if [ $(yum list installed epel-release) -ne 0 ]; then
echo "epel-release package not installed."
echo "Please install the epel-release package or refer to the Installation documentation for manual installation steps".
exit 1
fi
if [ $(yum list installed python34) -ne 0 ]; then
echo "python34 epel package not installed."
echo "Please install the python34 epel package or refer to the Installation documentation for manual installation steps."
exit 1
fi
PYTHON_PKGS="python34-devel"
fi
sudo yum install -y gcc openssl-devel libffi-devel openssl \
openssh-clients rsync $PYTHON_PKGS
if [ $ANACONDA -eq 0 ]; then
curl -fSsL https://bootstrap.pypa.io/get-pip.py | sudo $PYTHON
fi
elif [ $DISTRIB_ID == "opensuse" ] || [ $DISTRIB_ID == "sles" ]; then
sudo zypper ref
if [ $PYTHON == "python" ]; then
PYTHON_PKGS="python-devel"
else
PYTHON_PKGS="python3-devel"
fi
sudo zypper -n in gcc libopenssl-devel libffi48-devel openssl \
openssh rsync $PYTHON_PKGS
if [ $ANACONDA -eq 0 ]; then
curl -fSsL https://bootstrap.pypa.io/get-pip.py | sudo $PYTHON
fi
else
echo "Unsupported distribution."
echo "Please refer to the Installation documentation for manual installation steps."
exit 1
fi
# create virtual env if required and install required python packages
if [ ! -z $VENV_NAME ]; then
# install virtual env if required
if [ $INSTALL_VENV_BIN -eq 1 ]; then
sudo $PIP install virtualenv
fi
if [ $ANACONDA -eq 0 ]; then
# create venv if it doesn't exist
virtualenv -p $PYTHON $VENV_NAME
source $VENV_NAME/bin/activate
$PIP install --upgrade pip setuptools
$PIP install --upgrade -r requirements.txt
deactivate
else
# create conda env
set +e
conda create --yes --name $VENV_NAME
set -e
source activate $VENV_NAME
conda install --yes pip
# temporary workaround with pip requirements upgrading setuptools and
# conda pip failing to reference the old setuptools version
set +e
$PIP install --upgrade setuptools
set -e
$PIP install --upgrade -r requirements.txt
source deactivate $VENV_NAME
fi
else
sudo $PIP install --upgrade pip setuptools
$PIP install --upgrade --user -r requirements.txt
fi
# create shipyard script
cat > shipyard << EOF
#!/usr/bin/env bash
set -e
set -f
BATCH_SHIPYARD_ROOT_DIR=$PWD
VENV_NAME=$VENV_NAME
EOF
cat >> shipyard << 'EOF'
if [ -z $BATCH_SHIPYARD_ROOT_DIR ]; then
echo Batch Shipyard root directory not set.
echo Please rerun the install.sh script.
exit 1
fi
EOF
if [ ! -z $VENV_NAME ]; then
if [ $ANACONDA -eq 0 ]; then
cat >> shipyard << 'EOF'
source $BATCH_SHIPYARD_ROOT_DIR/$VENV_NAME/bin/activate
EOF
else
cat >> shipyard << 'EOF'
source activate $VENV_NAME
EOF
fi
fi
if [ $PYTHON == "python" ]; then
cat >> shipyard << 'EOF'
python $BATCH_SHIPYARD_ROOT_DIR/shipyard.py $*
EOF
else
cat >> shipyard << 'EOF'
python3 $BATCH_SHIPYARD_ROOT_DIR/shipyard.py $*
EOF
fi
if [ ! -z $VENV_NAME ]; then
if [ $ANACONDA -eq 0 ]; then
cat >> shipyard << 'EOF'
deactivate
EOF
else
cat >> shipyard << 'EOF'
source deactivate $VENV_NAME
EOF
fi
fi
chmod 755 shipyard
echo ""
if [ -z $VENV_NAME ]; then
echo '>> Please add $HOME/.local/bin to your $PATH. You can do this '
echo '>> permanently in your shell rc script, e.g., .bashrc for bash shells.'
echo ""
fi
echo ">> Install complete for $PYTHON. Please run Batch Shipyard as: $PWD/shipyard"
|
#!/bin/bash
echo ""
echo "Applying migration AssociatedEnterpriseCheckYourAnswers"
echo "Adding routes to conf/app.routes"
echo "" >> ../conf/app.routes
echo "GET /associatedEnterpriseCheckYourAnswers controllers.AssociatedEnterpriseCheckYourAnswersController.onPageLoad()" >> ../conf/app.routes
echo "Adding messages to conf.messages"
echo "" >> ../conf/messages.en
echo "associatedEnterpriseCheckYourAnswers.title = associatedEnterpriseCheckYourAnswers" >> ../conf/messages.en
echo "associatedEnterpriseCheckYourAnswers.heading = associatedEnterpriseCheckYourAnswers" >> ../conf/messages.en
echo "Migration AssociatedEnterpriseCheckYourAnswers completed"
|
rsync -avzP --update * ericmjl@rous:~/github/protein-convolutional-nets --exclude-from rsync_exclude.txt
rsync -avzP --update ericmjl@rous:~/github/protein-convolutional-nets/* ./ --exclude-from rsync_exclude.txt
|
#!/bin/bash
# LICENSE UPL 1.0
#
# Copyright (c) 2019 Oracle and/or its affiliates. All rights reserved.
#
# Since: January, 2018
# Author: [email protected], [email protected]
# Description: Add a Grid node and add Oracle Database instance based on following parameters:
# $PUBLIC_HOSTNAME
# $PUBLIC_IP
#
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
#
####################### Variables and Constants #################
declare -r FALSE=1
declare -r TRUE=0
declare -x GRID_USER='grid' ## Default gris user is grid.
declare -x DB_USER='oracle' ## default oracle user is oracle.
declare -r ETCHOSTS="/etc/hosts" ## /etc/hosts file location.
declare -r RAC_ENV_FILE="/etc/rac_env_vars" ## RACENV FILE NAME
declare -x GIMR_DB_FLAG='false' ## GIMR DB Check by default is false
declare -x DOMAIN ## Domain name will be computed based on hostname -d, otherwise pass it as env variable.
declare -x PUBLIC_IP ## Computed based on Node name.
declare -x PUBLIC_HOSTNAME ## PUBLIC HOSTNAME set based on hostname
declare -x EXISTING_CLS_NODE ## Computed during the program execution.
declare -x EXISTING_CLS_NODES ## You must all the exisitng nodes of the cluster in comma separated strings. Otherwise installation will fail.
declare -x DHCP_CONF='false' ## Pass env variable where value set to true for DHCP based installation.
declare -x NODE_VIP ## Pass it as env variable.
declare -x VIP_HOSTNAME ## Pass as env variable.
declare -x SCAN_NAME ## Pass it as env variable.
declare -x SCAN_IP ## Pass as env variable if you do not have DNS server. Otherwise, do not pass this variable.
declare -x SINGLENIC='false' ## Default value is false as we should use 2 nics if possible for better performance.
declare -x PRIV_IP ## Pass PRIV_IP is not using SINGLE NIC
declare -x CONFIGURE_GNS='false' ## Default value set to false. However, under DSC checks, it is reverted to true.
declare -x COMMON_SCRIPTS ## COMMON SCRIPT Locations. Pass this env variable if you have custom responsefile for grid and other scripts for DB.
declare -x PRIV_HOSTNAME ## if SINGLENIC=true then PRIV and PUB hostname will be same. Otherise pass it as env variable.
declare -x CMAN_HOSTNAME ## If you want to use connection manager to proxy the DB connections
declare -x CMAN_IP ## CMAN_IP if you want to use connection manager to proxy the DB connections
declare -x OS_PASSWORD ## if not passed as env variable, it will be set to PASSWORD
declare -x GRID_PASSWORD ## if not passed as env variable , it will be set to OS_PASSWORD
declare -x ORACLE_PASSWORD ## if not passed as env variable, it will be set to OS_PASSWORD
declare -x PASSWORD ## If not passed as env variable , it will be set as system generated password
declare -x CLUSTER_TYPE='STANDARD' ## Default instllation is STANDARD. You can pass DOMAIn or MEMBERDB.
declare -x GRID_RESPONSE_FILE ## IF you pass this env variable then user based responsefile will be used. default location is COMMON_SCRIPTS.
declare -x SCRIPT_ROOT ## SCRIPT_ROOT will be set as per your COMMON_SCRIPTS.Do not Pass env variable SCRIPT_ROOT.
declare -r OSDBA='dba'
declare -r OSASM='asmadmin'
declare -r INSTALL_TYPE='CRS_ADDNODE'
declare -r IPMI_FLAG='false'
declare -r ASM_STORAGE_OPTION='ASM'
declare -r GIMR_ON_NAS='false'
declare -x SCAN_TYPE='LOCAL_SCAN'
declare -x SHARED_SCAN
declare -x DB_ASM_DISKGROUP='DATA'
declare -x CONFIGURE_AFD_FLAG='false'
declare -x CONFIGURE_RHPS_FLAG='false'
declare -x EXECUTE_ROOT_SCRIPT_FLAG='fasle'
declare -x EXECUTE_ROOT_SCRIPT_METHOD='ROOT'
declare -x IGNORE_CVU_CHECKS='true' ## Ignore CVU Checks
declare -x SECRET_VOLUME='/run/secrets/' ## Secret Volume
declare -x PWD_KEY='pwd.key' ## PWD Key File
declare -x ORACLE_PWD_FILE
declare -x GRID_PWD_FILE
declare -x REMOVE_OS_PWD_FILES='false'
declare -x COMMON_OS_PWD_FILE='common_os_pwdfile.enc'
declare -x CRS_CONFIG_NODES
declare -x ANSIBLE_INSTALL='false'
declare -x RUN_DBCA='true'
progname=$(basename "$0")
###################### Variabes and Constants declaration ends here ####################
############Sourcing Env file##########
if [ -f "/etc/rac_env_vars" ]; then
source "/etc/rac_env_vars"
fi
##########Source ENV file ends here####
###################Capture Process id and source functions.sh###############
source "$SCRIPT_DIR/functions.sh"
###########################sourcing of functions.sh ends here##############
####error_exit function sends a TERM signal, which is caught by trap command and returns exit status 15"####
trap '{ exit 15; }' TERM
###########################trap code ends here##########################
all_check()
{
check_pub_host_name
check_cls_node_names
check_ip_env_vars
check_passwd_env_vars
check_rspfile_env_vars
check_db_env_vars
}
#####################Function related to public hostname, IP and domain name check begin here ########
check_pub_host_name()
{
local domain_name
local stat
if [ -z "${PUBLIC_IP}" ]; then
PUBLIC_IP=$(dig +short "$(hostname)")
print_message "Public IP is set to ${PUBLIC_IP}"
else
print_message "Public IP is set to ${PUBLIC_IP}"
fi
if [ -z "${PUBLIC_HOSTNAME}" ]; then
PUBLIC_HOSTNAME=$(hostname)
print_message "RAC Node PUBLIC Hostname is set to ${PUBLIC_HOSTNAME}"
else
print_message "RAC Node PUBLIC Hostname is set to ${PUBLIC_HOSTNAME}"
fi
if [ -z "${DOMAIN}" ]; then
domain_name=$(hostname -d)
if [ -z "${domain_name}" ];then
print_message "Domain name is not defined. Setting Domain to 'example.com'"
DOMAIN="example.com"
else
DOMAIN=${domain_name}
fi
else
print_message "Domain is defined to $DOMAIN"
fi
}
############### Function related to public hostname, IP and domain checks ends here ##########
############## Function related to check exisitng cls nodes begin here #######################
check_cls_node_names()
{
if [ -z "${EXISTING_CLS_NODES}" ]; then
error_exit "For Node Addition, please provide the existing clustered node name."
else
if isStringExist ${EXISTING_CLS_NODES} ${PUBLIC_HOSTNAME}; then
error_exit "EXISTING_CLS_NODES ${EXISTING_CLS_NODES} contains new node name ${PUBLIC_HOSTNAME}"
fi
print_message "Setting Existing Cluster Node for node addition operation. This will be retrieved from ${EXISTING_CLS_NODES}"
EXISTING_CLS_NODE="$( cut -d ',' -f 1 <<< "$EXISTING_CLS_NODES" )"
if [ -z "${EXISTING_CLS_NODE}" ]; then
error_exit " Existing Node Name of the cluster not set or set to empty string"
else
print_message "Existing Node Name of the cluster is set to ${EXISTING_CLS_NODE}"
if resolveip ${EXISTING_CLS_NODE}; then
print_message "Existing Cluster node resolved to IP. Check passed"
else
error_exit "Existing Cluster node does not resolved to IP. Check Failed"
fi
fi
fi
}
############## Function related to check exisitng cls nodes begin here #######################
check_ip_env_vars ()
{
if [ "${DHCP_CONF}" != 'true' ]; then
print_message "Default setting of AUTO GNS VIP set to false. If you want to use AUTO GNS VIP, please pass DHCP_CONF as an env parameter set to true"
DHCP_CONF=false
if [ -z "${NODE_VIP}" ]; then
error_exit "RAC Node ViP is not set or set to empty string"
else
print_message "RAC VIP set to ${NODE_VIP}"
fi
if [ -z "${VIP_HOSTNAME}" ]; then
error_exit "RAC Node Vip hostname is not set ot set to empty string"
else
print_message "RAC Node VIP hostname is set to ${VIP_HOSTNAME} "
fi
if [ -z ${SCAN_NAME} ]; then
print_message "SCAN_NAME set to the empty string"
else
print_message "SCAN_NAME name is ${SCAN_NAME}"
fi
if resolveip ${SCAN_NAME}; then
print_message "SCAN Name resolving to IP. Check Passed!"
else
error_exit "SCAN Name not resolving to IP. Check Failed!"
fi
if [ -z ${SCAN_IP} ]; then
print_message "SCAN_IP set to the empty string"
else
print_message "SCAN_IP name is ${SCAN_IP}"
fi
fi
if [ "${SINGLENIC}" == 'true' ];then
PRIV_IP=${PUBLIC_IP}
PRIV_HOSTNAME=${PUBLIC_HOSTNAME}
fi
if [ -z "${PRIV_IP}" ]; then
error_exit "RAC Node private ip is not set ot set to empty string"
else
print_message "RAC Node PRIV IP is set to ${PRIV_IP} "
fi
if [ -z "${PRIV_HOSTNAME}" ]; then
error_exit "RAC Node private hostname is not set ot set to empty string"
else
print_message "RAC Node private hostname is set to ${PRIV_HOSTNAME}"
fi
if [ -z ${CMAN_HOSTNAME} ]; then
print_message "CMAN_NAME set to the empty string"
else
print_message "CMAN_HOSTNAME name is ${CMAN_HOSTNAME}"
fi
if [ -z ${CMAN_IP} ]; then
print_message "CMAN_IP set to the empty string"
else
print_message "CMAN_IP name is ${CMAN_IP}"
fi
}
################check ip env vars function ends here ############################
################ Check passwd env vars function begin here ######################
check_passwd_env_vars()
{
if [ -f "${SECRET_VOLUME}/${COMMON_OS_PWD_FILE}" ]; then
cmd='openssl enc -d -aes-256-cbc -in "${SECRET_VOLUME}/${COMMON_OS_PWD_FILE}" -out /tmp/${COMMON_OS_PWD_FILE} -pass file:"${SECRET_VOLUME}/${PWD_KEY}"'
eval $cmd
if [ $? -eq 0 ]; then
print_message "Password file generated"
else
error_exit "Error occurred during common os password file generation"
fi
read PASSWORD < /tmp/${COMMON_OS_PWD_FILE}
rm -f /tmp/${COMMON_OS_PWD_FILE}
else
print_message "Password is empty string"
PASSWORD=O$(openssl rand -base64 6 | tr -d "=+/")_1
fi
if [ ! -z "${GRID_PWD_FILE}" ]; then
cmd='openssl enc -d -aes-256-cbc -in "${SECRET_VOLUME}/${GRID_PWD_FILE}" -out "/tmp/${GRID_PWD_FILE}" -pass file:"${SECRET_VOLUME}/${PWD_KEY}"'
eval $cmd
if [ $? -eq 0 ]; then
print_message "Password file generated"
else
error_exit "Error occurred during Grid password file generation"
fi
read GRID_PASSWORD < /tmp/${GRID_PWD_FILE}
rm -f /tmp/${GRID_PWD_FILE}
else
GRID_PASSWORD="${PASSWORD}"
print_message "Common OS Password string is set for Grid user"
fi
if [ ! -z "${ORACLE_PWD_FILE}" ]; then
cmd='openssl enc -d -aes-256-cbc -in "${SECRET_VOLUME}/${ORACLE_PWD_FILE}" -out "/tmp/${ORACLE_PWD_FILE}" -pass file:"${SECRET_VOLUME}/${PWD_KEY}"'
eval $cmd
if [ $? -eq 0 ]; then
print_message "Password file generated"
else
error_exit "Error occurred during Oracle password file generation"
fi
read ORACLE_PASSWORD < /tmp/${ORACLE_PWD_FILE}
rm -f /tmp/${GRID_PWD_FILE}
else
ORACLE_PASSWORD="${PASSWORD}"
print_message "Common OS Password string is set for Oracle user"
fi
if [ "${REMOVE_OS_PWD_FILES}" == 'true' ]; then
rm -f ${SECRET_VOLUME}/${COMMON_OS_PWD_FILE}
rm -f ${SECRET_VOLUME}/${PWD_KEY}
fi
}
############### Check password env vars function ends here ########################
############### Check grid Response file function begin here ######################
check_rspfile_env_vars ()
{
if [ -z "${GRID_RESPONSE_FILE}" ];then
print_message "GRID_RESPONSE_FILE env variable set to empty. $progname will use standard cluster responsefile"
else
if [ -f $COMMON_SCRIPTS/$GRID_RESPONSE_FILE ];then
cp $COMMON_SCRIPTS/$GRID_RESPONSE_FILE $logdir/$GRID_RESPONSE_FILE
else
error_exit "$COMMON_SCRIPTS/$GRID_RESPONSE_FILE does not exist"
fi
fi
if [ -z "${SCRIPT_ROOT}" ]; then
SCRIPT_ROOT=$COMMON_SCRIPTS
print_message "Location for User script SCRIPT_ROOT set to $COMMON_SCRIPTS"
else
print_message "Location for User script SCRIPT_ROOT set to $SCRIPT_ROOT"
fi
}
############ Check responsefile function end here ######################
########### Check db env vars function begin here #######################
check_db_env_vars ()
{
if [ $CLUSTER_TYPE == 'MEMBERDB' ]; then
print_message "Checking StorageOption for MEMBERDB Cluster"
if [ -z "${STORAGE_OPTIONS_FOR_MEMBERDB}" ]; then
print_message "Storage Options is set to STORAGE_OPTIONS_FOR_MEMBERDB"
else
print_message "Storage Options is set to STORAGE_OPTIONS_FOR_MEMBERDB"
fi
fi
if [ -z "${ORACLE_SID}" ]; then
print_message "ORACLE_SID is not defined"
else
print_message "ORACLE_SID is set to $ORACLE_SID"
fi
}
################# Check db env vars end here ##################################
################ All Check Functions end here #####################################
########################################### SSH Function begin here ########################
setupSSH()
{
local password
local ssh_pid
local stat
if [ -z $CRS_NODES ]; then
CRS_NODES=$PUBLIC_HOSTNAME
fi
IFS=', ' read -r -a CLUSTER_NODES <<< "$EXISTING_CLS_NODES"
EXISTING_CLS_NODES+=",$CRS_NODES"
CLUSTER_NODES=$(echo $EXISTING_CLS_NODES | tr ',' ' ')
print_message "Cluster Nodes are $CLUSTER_NODES"
print_message "Running SSH setup for $GRID_USER user between nodes ${CLUSTER_NODES}"
cmd='su - $GRID_USER -c "$EXPECT $SCRIPT_DIR/$SETUPSSH $GRID_USER \"$GRID_HOME/oui/prov/resources/scripts\" \"${CLUSTER_NODES}\" \"$GRID_PASSWORD\""'
(eval $cmd) &
ssh_pid=$!
wait $ssh_pid
stat=$?
if [ "${stat}" -ne 0 ]; then
error_exit "ssh setup for Grid user failed!, please make sure you have pass the corect password. You need to make sure that password must be same on all the clustered nodes or the nodes set in existing_cls_nodes env variable for $GRID_USER user"
fi
print_message "Running SSH setup for $DB_USER user between nodes ${CLUSTER_NODES[@]}"
cmd='su - $DB_USER -c "$EXPECT $SCRIPT_DIR/$SETUPSSH $DB_USER \"$DB_HOME/oui/prov/resources/scripts\" \"${CLUSTER_NODES}\" \"$ORACLE_PASSWORD\""'
(eval $cmd) &
ssh_pid=$!
wait $ssh_pid
stat=$?
if [ "${stat}" -ne 0 ]; then
error_exit "ssh setup for Oracle user failed!, please make sure you have pass the corect password. You need to make sure that password must be same on all the clustered nodes or the nodes set in existing_cls_nodes env variable for $DB_USER user"
fi
}
checkSSH ()
{
local password
local ssh_pid
local stat
local status
IFS=', ' read -r -a CLUSTER_NODES <<< "$EXISTING_CLS_NODES"
EXISTING_CLS_NODES+=",$PUBLIC_HOSTNAME"
CLUSTER_NODES=$(echo $EXISTING_CLS_NODES | tr ',' ' ')
cmd='su - $GRID_USER -c "ssh -o BatchMode=yes -o ConnectTimeout=5 $GRID_USER@$node echo ok 2>&1"'
echo $cmd
for node in ${CLUSTER_NODES}
do
status=$(eval $cmd)
if [[ $status == ok ]] ; then
print_message "SSH check fine for the $node"
elif [[ $status == "Permission denied"* ]] ; then
error_exit "SSH check failed for the $GRID_USER@$node beuase of permission denied error! SSH setup did not complete sucessfully"
else
error_exit "SSH check failed for the $GRID_USER@$node! Error occurred during SSH setup"
fi
done
status="NA"
cmd='su - $DB_USER -c "ssh -o BatchMode=yes -o ConnectTimeout=5 $DB_USER@$node echo ok 2>&1"'
echo $cmd
for node in ${CLUSTER_NODES}
do
status=$(eval $cmd)
if [[ $status == ok ]] ; then
print_message "SSH check fine for the $DB_USER@$node"
elif [[ $status == "Permission denied"* ]] ; then
error_exit "SSH check failed for the $DB_USER@$node becuase of permission denied error! SSH setup did not complete sucessfully"
else
error_exit "SSH check failed for the $DB_USER@$node! Error occurred during SSH setup"
fi
done
}
###################################### SSH Function End here ####################################
######################Add Node Functions ####################################
runorainstroot()
{
$INVENTORY/orainstRoot.sh
}
runrootsh ()
{
local ORACLE_HOME=$1
local USER=$2
if [ -z $CRS_NODES ]; then
CLUSTER_NODES=$PUBLIC_HOSTNAME
else
IFS=', ' read -r -a CLUSTER_NODES <<< "$CRS_NODES"
fi
print_message "Nodes in the cluster ${CLUSTER_NODES[@]}"
for node in "${CLUSTER_NODES[@]}"; do
cmd='su - $USER -c "ssh $node sudo $ORACLE_HOME/root.sh"'
eval $cmd
done
}
generate_response_file ()
{
cp $SCRIPT_DIR/$ADDNODE_RSP $logdir/$ADDNODE_RSP
chmod 666 $logdir/$ADDNODE_RSP
if [ -z "${GRID_RESPONSE_FILE}" ]; then
if [ -z ${CRS_CONFIG_NODES} ]; then
CRS_CONFIG_NODES="$PUBLIC_HOSTNAME:$VIP_HOSTNAME:HUB"
print_message "Clustered Nodes are set to $CRS_CONFIG_NODES"
fi
sed -i -e "s|###INVENTORY###|$INVENTORY|g" $logdir/$ADDNODE_RSP
sed -i -e "s|###GRID_BASE###|$GRID_BASE|g" $logdir/$ADDNODE_RSP
sed -i -r "s|###PUBLIC_HOSTNAME###|$PUBLIC_HOSTNAME|g" $logdir/$ADDNODE_RSP
sed -i -r "s|###HOSTNAME_VIP###|$VIP_HOSTNAME|g" $logdir/$ADDNODE_RSP
sed -i -e "s|###INSTALL_TYPE###|$INSTALL_TYPE|g" $logdir/$ADDNODE_RSP
sed -i -e "s|###OSDBA###|$OSDBA|g" $logdir/$ADDNODE_RSP
sed -i -e "s|###OSOPER###|$OSOPER|g" $logdir/$ADDNODE_RSP
sed -i -e "s|###OSASM###|$OSASM|g" $logdir/$ADDNODE_RSP
sed -i -e "s|###SCAN_TYPE###|$SCAN_TYPE|g" $logdir/$ADDNODE_RSP
sed -i -e "s|###SHARED_SCAN_FILE###|$SHARED_SCAN_FILE|g" $logdir/$ADDNODE_RSP
sed -i -e "s|###DB_ASM_DISKGROUP###|$DB_ASM_DISKGROUP|g" $logdir/$ADDNODE_RSP
sed -i -e "s|###CONFIGURE_AFD_FLAG###|$CONFIGURE_AFD_FLAG|g" $logdir/$ADDNODE_RSP
sed -i -e "s|###CONFIGURE_RHPS_FLAG###|$CONFIGURE_RHPS_FLAG|g" $logdir/$ADDNODE_RSP
sed -i -e "s|###EXECUTE_ROOT_SCRIPT_FLAG###|$EXECUTE_ROOT_SCRIPT_FLAG|g" $logdir/$ADDNODE_RSP
sed -i -e "s|###EXECUTE_ROOT_SCRIPT_METHOD###|$EXECUTE_ROOT_SCRIPT_METHOD|g" $logdir/$ADDNODE_RSP
sed -i -e "s|###CRS_CONFIG_NODES###|$CRS_CONFIG_NODES|g" $logdir/$ADDNODE_RSP
fi
}
###### Cluster Verification function #######
CheckRemoteCluster ()
{
local cmd;
local stat;
local node=$EXISTING_CLS_NODE
local oracle_home=$GRID_HOME
local ORACLE_HOME=$GRID_HOME
print_message "Checking Cluster"
cmd='su - $GRID_USER -c "ssh $node \"$ORACLE_HOME/bin/crsctl check crs\""'
eval $cmd
if [ $? -eq 0 ];then
print_message "Cluster Check on remote node passed"
else
error_exit "Cluster Check on remote node failed"
fi
cmd='su - $GRID_USER -c "ssh $node \"$ORACLE_HOME/bin/crsctl check cluster\""'
eval $cmd
if [ $? -eq 0 ]; then
print_message "Cluster Check went fine"
else
error_exit "Cluster Check failed!"
fi
if [ ${GIMR_DB_FLAG} == 'true' ]; then
cmd='su - $GRID_USER -c "ssh $node \"$ORACLE_HOME/bin/srvctl status mgmtdb\""'
eval $cmd
if [ $? -eq 0 ]; then
print_message "MGMTDB Check went fine"
else
error_exit "MGMTDB Check failed!"
fi
fi
cmd='su - $GRID_USER -c "ssh $node \"$ORACLE_HOME/bin/crsctl check crsd\""'
eval $cmd
if [ $? -eq 0 ]; then
print_message "CRSD Check went fine"
else
error_exit "CRSD Check failed!"
fi
cmd='su - $GRID_USER -c "ssh $node \"$ORACLE_HOME/bin/crsctl check cssd\""'
eval $cmd
if [ $? -eq 0 ]; then
print_message "CSSD Check went fine"
else
error_exit "CSSD Check failed!"
fi
cmd='su - $GRID_USER -c "ssh $node \"$ORACLE_HOME/bin/crsctl check evmd\""'
eval $cmd
if [ $? -eq 0 ]; then
print_message "EVMD Check went fine"
else
error_exit "EVMD Check failed"
fi
}
setDevicePermissions ()
{
local cmd
local state=3
if [ -z $CRS_NODES ]; then
CLUSTER_NODES=$PUBLIC_HOSTNAME
else
IFS=', ' read -r -a CLUSTER_NODES <<< "$CRS_NODES"
fi
print_message "Nodes in the cluster ${CLUSTER_NODES[@]}"
for node in "${CLUSTER_NODES[@]}"; do
print_message "Setting Device permissions for RAC Install on $node"
if [ ! -z "${GIMR_DEVICE_LIST}" ];then
print_message "Preapring GIMR Device list"
IFS=', ' read -r -a devices <<< "$GIMR_DEVICE_LIST"
local arr_device=${#devices[@]}
if [ $arr_device -ne 0 ]; then
for device in "${devices[@]}"
do
print_message "Changing Disk permission and ownership"
cmd='su - $GRID_USER -c "ssh $node sudo chown $GRID_USER:asmadmin $device"'
print_message "Command : $cmd execute on $node"
eval $cmd
unset cmd
cmd='su - $GRID_USER -c "ssh $node sudo chmod 660 $device"'
print_message "Command : $cmd execute on $node"
eval $cmd
unset cmd
print_message "Populate Rac Env Vars on Remote Hosts"
cmd='su - $GRID_USER -c "ssh $node sudo echo \"export GIMR_DEVICE_LIST=${GIMR_DEVICE_LIST}\" >> /etc/rac_env_vars"'
print_message "Command : $cmd execute on $node"
eval $cmd
unset cmd
done
fi
fi
if [ ! -z "${ASM_DEVICE_LIST}" ];then
print_message "Preapring ASM Device list"
IFS=', ' read -r -a devices <<< "$ASM_DEVICE_LIST"
local arr_device=${#devices[@]}
if [ $arr_device -ne 0 ]; then
for device in "${devices[@]}"
do
print_message "Changing Disk permission and ownership"
cmd='su - $GRID_USER -c "ssh $node sudo chown $GRID_USER:asmadmin $device"'
print_message "Command : $cmd execute on $node"
eval $cmd
unset cmd
cmd='su - $GRID_USER -c "ssh $node sudo chmod 660 $device"'
print_message "Command : $cmd execute on $node"
eval $cmd
unset cmd
print_message "Populate Rac Env Vars on Remote Hosts"
cmd='su - $GRID_USER -c "ssh $node sudo echo \"export ASM_DEVICE_LIST=${ASM_DEVICE_LIST}\" >> /etc/rac_env_vars"'
print_message "Command : $cmd execute on $node"
eval $cmd
unset cmd
done
fi
fi
done
}
checkCluster ()
{
local cmd;
local stat;
local oracle_home=$GRID_HOME
print_message "Checking Cluster"
cmd='su - $GRID_USER -c "$GRID_HOME/bin/crsctl check crs"'
eval $cmd
if [ $? -eq 0 ];then
print_message "Cluster Check passed"
else
error_exit "Cluster Check failed"
fi
cmd='su - $GRID_USER -c "$GRID_HOME/bin/crsctl check cluster"'
eval $cmd
if [ $? -eq 0 ]; then
print_message "Cluster Check went fine"
else
error_exit "Cluster Check failed!"
fi
if [ ${GIMR_DB_FLAG} == 'true' ]; then
cmd='su - $GRID_USER -c "$GRID_HOME/bin/srvctl status mgmtdb"'
eval $cmd
if [ $? -eq 0 ]; then
print_message "MGMTDB Check went fine"
else
error_exit "MGMTDB Check failed!"
fi
fi
cmd='su - $GRID_USER -c "$GRID_HOME/bin/crsctl check crsd"'
eval $cmd
if [ $? -eq 0 ]; then
print_message "CRSD Check went fine"
else
error_exit "CRSD Check failed!"
fi
cmd='su - $GRID_USER -c "$GRID_HOME/bin/crsctl check cssd"'
eval $cmd
if [ $? -eq 0 ]; then
print_message "CSSD Check went fine"
else
error_exit "CSSD Check failed!"
fi
cmd='su - $GRID_USER -c "$GRID_HOME/bin/crsctl check evmd"'
eval $cmd
if [ $? -eq 0 ]; then
print_message "EVMD Check went fine"
else
error_exit "EVMD Check failed"
fi
print_message "Removing $logdir/cluvfy_check.txt as cluster check has passed"
rm -f $logdir/cluvfy_check.txt
}
checkClusterClass ()
{
print_message "Checking Cluster Class"
local cluster_class
cmd='su - $GRID_USER -c "$GRID_HOME/bin/crsctl get cluster class"'
cluster_class=$(eval $cmd)
print_message "Cluster class is $cluster_class"
CLUSTER_TYPE=$(echo $cluster_class | awk -F \' '{ print $2 }' | awk '{ print $1 }')
}
###### Grid install & Cluster Verification utility Function #######
cluvfyCheck()
{
local node=$EXISTING_CLS_NODE
local responsefile=$logdir/$ADDNODE_RSP
local hostname=$PUBLIC_HOSTNAME
local vip_hostname=$VIP_HOSTNAME
local cmd
local stat
if [ -z $CRS_NODES ]; then
CLUSTER_NODES=$PUBLIC_HOSTNAME
else
IFS=', ' read -r -a CLUSTER_NODES <<< "$CRS_NODES"
fi
if [ -f "$logdir/cluvfy_check.txt" ]; then
print_message "Moving any exisiting cluvfy $logdir/cluvfy_check.txt to $logdir/cluvfy_check_$TIMESTAMP.txt"
mv $logdir/cluvfy_check.txt $logdir/cluvfy_check."$(date +%Y%m%d-%H%M%S)".txt
fi
#cmd='su - $GRID_USER -c "ssh $node \"$GRID_HOME/runcluvfy.sh stage -pre nodeadd -n $hostname -vip $vip_hostname\" | tee -a $logdir/cluvfy_check.txt"'
#eval $cmd
print_message "Nodes in the cluster ${CLUSTER_NODES[@]}"
for cls_node in "${CLUSTER_NODES[@]}"; do
print_message "ssh to the node $node and executing cvu checks on $cls_node"
cmd='su - $GRID_USER -c "ssh $node \"$GRID_HOME/runcluvfy.sh stage -pre nodeadd -n $cls_node\" | tee -a $logdir/cluvfy_check.txt"'
eval $cmd
done
print_message "Checking $logdir/cluvfy_check.txt if there is any failed check."
FAILED_CMDS=$(sed -n -f - $logdir/cluvfy_check.txt << EOF
/.*FAILED.*/ {
p
}
EOF
)
cat $logdir/cluvfy_check.txt > $STD_OUT_FILE
if [[ ${IGNORE_CVU_CHECKS} == 'true' ]]; then
print_message "CVU Checks are ignored as IGNORE_CVU_CHECKS set to true. It is recommended to set IGNORE_CVU_CHECKS to false and meet all the cvu checks requirement. RAC installation might fail, if there are failed cvu checks."
else
if [[ $FAILED_CMDS =~ .*FAILED*. ]]
then
print_message "cluvfy failed for following \n $FAILED_CMDS"
error_exit "Pre Checks failed for Grid installation, please check $logdir/cluvfy_check.txt"
fi
fi
}
addGridNode ()
{
local node=$EXISTING_CLS_NODE
local responsefile=$logdir/$ADDNODE_RSP
local hostname=$PUBLIC_HOSTNAME
local vip_hostname=$VIP_HOSTNAME
local cmd
local stat
print_message "Copying $responsefile on remote node $node"
cmd='su - $GRID_USER -c "scp $responsefile $node:$logdir"'
eval $cmd
print_message "Running GridSetup.sh on $node to add the node to existing cluster"
cmd='su - $GRID_USER -c "ssh $node \"$GRID_HOME/gridSetup.sh -silent -waitForCompletion -noCopy -skipPrereqs -responseFile $responsefile\" | tee -a $logfile"'
eval $cmd
print_message "Node Addition performed. removing Responsefile"
rm -f $responsefile
cmd='su - $GRID_USER -c "ssh $node \"rm -f $responsefile\""'
#eval $cmd
}
###########DB Node Addition Functions##############
addDBNode ()
{
local node=$EXISTING_CLS_NODE
if [ -z $CRS_NODES ]; then
new_node_hostname=$PUBLIC_HOSTNAME
else
new_node_hostname=$CRS_NODES
fi
local stat=3
local cmd
cmd='su - $DB_USER -c "ssh $node \"$DB_HOME/addnode/addnode.sh \"CLUSTER_NEW_NODES={$new_node_hostname}\" -skipPrereqs -waitForCompletion -ignoreSysPrereqs -noCopy -silent\" | tee -a $logfile"'
eval $cmd
if [ $? -eq 0 ]; then
print_message "Node Addition went fine for $new_node_hostname"
else
error_exit "Node Addition failed for $new_node_hostname"
fi
}
addDBInst ()
{
# Check whether ORACLE_SID is passed on
local HOSTNAME=$PUBLIC_HOSTNAME
local node=$EXISTING_CLS_NODE
local stat=3
local cmd
if [ -z $CRS_NODES ]; then
CLUSTER_NODES=$PUBLIC_HOSTNAME
else
CLUSTER_NODES=$( echo $CRS_NODES | tr ',' ' ' )
fi
if [ -z "${ORACLE_SID}" ];then
error_exit "ORACLE SID is not defined. Cannot Add Instance"
fi
if [ -z "${HOSTNAME}" ]; then
error_exit "Hostname is not defined"
fi
for new_node in "${CLUSTER_NODES[@]}"; do
print_message "Adding DB Instance on $node"
cmd='su - $DB_USER -c "ssh $node \"$DB_HOME/bin/dbca -addInstance -silent -nodeName $new_node -gdbName $ORACLE_SID\" | tee -a $logfile"'
eval $cmd
done
}
checkDBStatus ()
{
local status
if [ -f "/tmp/db_status.txt" ]; then
status=$(cat /tmp/db_status.txt)
else
status="NOT OPEN"
fi
rm -f /tmp/db_status.txt
# SQL Plus execution was successful and database is open
if [ "$status" = "OPEN" ]; then
print_message "#################################################################"
print_message " Oracle Database $ORACLE_SID is up and running on $(hostname) "
print_message "#################################################################"
# Database is not open
else
error_exit "$ORACLE_SID is not up and running on $(hostname)"
fi
}
setremotelistener ()
{
local status
local cmd
if resolveip $CMAN_HOSTNAME; then
print_message "Executing script to set the remote listener"
su - $DB_USER -c "$SCRIPT_DIR/$REMOTE_LISTENER_FILE $ORACLE_SID $SCAN_NAME $CMAN_HOSTNAME.$DOMAIN"
fi
}
########################## DB Functions End here ##########################
###################################
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #
############# MAIN ################
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #
###################################
###### Etc Host and other Checks and setup before proceeding installation #####
all_check
print_message "Setting random password for root/$GRID_USER/$DB_USER user"
print_message "Setting random password for $GRID_USER user"
setpasswd $GRID_USER $GRID_PASSWORD
print_message "Setting random password for $DB_USER user"
setpasswd $DB_USER $ORACLE_PASSWORD
print_message "Setting random password for root user"
setpasswd root $PASSWORD
#### Setting up SSH #######
setupSSH
checkSSH
#### Grid Node Addition #####
print_message "Setting Device permission to grid and asmadmin on all the cluster nodes"
setDevicePermissions
print_message "Checking Cluster Status on $EXISTING_CLS_NODE"
CheckRemoteCluster
print_message "Generating Responsefile for node addition"
generate_response_file
print_message "Running Cluster verification utility for new node $PUBLIC_HOSTNAME on $EXISTING_CLS_NODE"
cluvfyCheck
print_message "Running Node Addition and cluvfy test for node $PUBLIC_HOSTNAME"
addGridNode
print_message "Running root.sh on node $PUBLIC_HOSTNAME"
runrootsh $GRID_HOME $GRID_USER
checkCluster
print_message "Checking Cluster Class"
checkClusterClass
print_message "Running User Script for $GRID_USER user"
su - $GRID_USER -c "$SCRIPT_DIR/$USER_SCRIPTS_FILE $GRID_SCRIPT_ROOT GRID"
###### DB Node Addition ######
if [ "${CLUSTER_TYPE}" != 'Domain' ]; then
if [ "${RUN_DBCA}" == 'true' ]; then
print_message "Performing DB Node addition"
addDBNode
print_message "Running root.sh"
runrootsh $DB_HOME $DB_USER
print_message "Adding DB Instance"
addDBInst
print_message "Checking DB status"
su - $DB_USER -c "$SCRIPT_DIR/$CHECK_DB_FILE $ORACLE_SID"
checkDBStatus
print_message "Running User Script for $DB_USER user"
su - $DB_USER -c "$SCRIPT_DIR/$USER_SCRIPTS_FILE $DB_SCRIPT_ROOT DB"
print_message "Setting Remote Listener"
setremotelistener
fi
fi
echo $TRUE
|
#!/bin/sh
if [ $# -ne 2 ]; then
echo "invalid parameter"
exit 1
fi
DATE=$1
GROUP_ID=$2
which gdate > /dev/null 2>&1
if [ $? -eq 1 ]; then
echo "not found gdate command"
exit 1
fi
gdate +%Y%m%d --date "${DATE}"
if [ $? -eq 1 ]; then
echo "invalid date: ${DATE}"
exit 1
fi
API_URL="http://localhost:9000/programs?date=${DATE}&groupId=${GROUP_ID}"
which jq > /dev/null 2>&1
if [ $? -eq 1 ]; then
echo "not found jq command"
exit 1
fi
curl "${API_URL}" | jq -r ".[] | \"\(.start_time|split(\"T\")|.[1])〜\(.end_time|split(\"T\")|.[1]) [\(.channel.name)] \(.name) / \(.title)\""
exit 0
|
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/1024+0+512-STWS/model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/1024+0+512-STWS/512+512+512-HPMI-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function shuffle_within_sentences_high_pmi_first_third_sixth --eval_function last_sixth_eval |
#!/usr/bin/env bash
# Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Exit on non-true return value
set -e
# Exit on reference to uninitialized variable
set -u
set -o pipefail
source $SOURCE_DIR/functions.sh
THIS_DIR="$( cd "$( dirname "$0" )" && pwd )"
prepare $THIS_DIR
if needs_build_package ; then
# Download the dependency from S3
download_dependency $PACKAGE "${PACKAGE_STRING}.tar.gz" $THIS_DIR
setup_package_build $PACKAGE $PACKAGE_VERSION
# Snappy switched to CMake. Detect this and use CMake for newer releases.
if [ -e CMakeLists.txt ]; then
# Snappy's CMake builds either shared or static but not both. Build
# each separately.
mkdir -p build_shared
pushd build_shared
wrap cmake -DBUILD_SHARED_LIBS=ON -DCMAKE_BUILD_TYPE=RELEASE \
-DCMAKE_INSTALL_PREFIX=$LOCAL_INSTALL ..
wrap make -C . -j${BUILD_THREADS:-4} install
popd
mkdir -p build_static
pushd build_static
wrap cmake -DCMAKE_BUILD_TYPE=RELEASE -DCMAKE_INSTALL_PREFIX=$LOCAL_INSTALL ..
wrap make -C . -j${BUILD_THREADS:-4} install
popd
else
wrap ./configure --with-pic --prefix=$LOCAL_INSTALL
wrap make -j${BUILD_THREADS:-4} install
fi
finalize_package_build $PACKAGE $PACKAGE_VERSION
fi
|
#!/bin/ash -xe
get_from_event() {
jq -r "$1" "${GITHUB_EVENT_PATH}"
}
if jq --exit-status '.inputs.deployment_id' "$GITHUB_EVENT_PATH" >/dev/null; then
MONOPOLIS_URL="https://github-api.monopolis.cloud/rollout/start/$(get_from_event '.repository.full_name')/$(get_from_event '.inputs.deployment_id')"
echo ::set-output name=environment_map::$(curl --fail -X POST "${MONOPOLIS_URL}" -H "Authorization: Bearer ${GITHUB_TOKEN}")
else
echo ::set-output name=environment_map::"{}"
fi
|
#!/usr/bin/env bash
#shellcheck disable=SC2128
#shellcheck source=/dev/null
set -x
source ../dapp-test-common.sh
source "../x2ethereum/publicTest.sh"
sendAddress="12qyocayNF7Lv6C9qW4avxs2E7U41fKSfv"
sendPriKey="0x4257d8692ef7fe13c68b65d6a52f03933db2fa5ce8faf210b5b8b80c721ced01"
MAIN_HTTP=""
chain33SenderAddr="14KEKbYtKKQm4wMthSK9J4La4nAiidGozt"
# validatorsAddr=["0x92c8b16afd6d423652559c6e266cbe1c29bfd84f", "0x0df9a824699bc5878232c9e612fe1a5346a5a368", "0xcb074cb21cdddf3ce9c3c0a7ac4497d633c9d9f1", "0xd9dab021e74ecf475788ed7b61356056b2095830"]
ethValidatorAddrKeyA="3fa21584ae2e4fd74db9b58e2386f5481607dfa4d7ba0617aaa7858e5025dc1e"
ethValidatorAddrKeyB="a5f3063552f4483cfc20ac4f40f45b798791379862219de9e915c64722c1d400"
ethValidatorAddrKeyC="bbf5e65539e9af0eb0cfac30bad475111054b09c11d668fc0731d54ea777471e"
ethValidatorAddrKeyD="c9fa31d7984edf81b8ef3b40c761f1847f6fcd5711ab2462da97dc458f1f896b"
# chain33 10 bty
chain33Validator1="1H4zzzQEQQR2FxXwppiMRXcvqLvqzxK2nv"
chain33Validator2="1Nq5AhTgVNvYaWQqih8ZQQEaRk3CFhTDHp"
chain33Validator3="16nmxjF58z5oKK9m44cGy241zMSJWPN1Ty"
chain33Validator4="182nAEMxF1JWWxEWdu4jvd68aZhQumS97H"
chain33ValidatorKey1="0x260124d9c619b0088241ffe2f1d7dc56b0b6100c88c342040387cd62b8ba35a3"
chain33ValidatorKey2="0x7812f8c688048943f1c168f8f2f76f44912de1f0ff8b12358b213118081869b2"
chain33ValidatorKey3="0xd44c8f3d8cac5d9c7fef7b0a0bf7be0909372ec6368064f742193de0bddeb2d1"
chain33ValidatorKey4="0xaad36689ca332026d4a4ceee62c8a91bac7bc100906b25a181a7f28b8552b53e"
ethReceiverAddr1="0xa4ea64a583f6e51c3799335b28a8f0529570a635"
ethReceiverAddrKey1="355b876d7cbcb930d5dfab767f66336ce327e082cbaa1877210c1bae89b1df71"
ethReceiverAddr2="0x0c05ba5c230fdaa503b53702af1962e08d0c60bf"
#ethReceiverAddrKey2="9dc6df3a8ab139a54d8a984f54958ae0661f880229bf3bdbb886b87d58b56a08"
maturityDegree=5
#portRelayer=19999
ethUrl=""
CLIA_HTTP=""
CLIB_HTTP=""
CLIC_HTTP=""
CLID_HTTP=""
# $1 sendAddress, $2 balance
function queryExecBalance() {
local resp=""
chain33_QueryExecBalance "${1}" "x2ethereum" "$MAIN_HTTP"
# shellcheck disable=SC2155
local balance=$(echo "$resp" | jq -r ".result" | jq ".[].balance")
if [ "${balance}" != "${2}" ]; then
echo_rst "queryExecBalance" "1" "${balance} != ${2}"
fi
}
# $1 chain33Address, $2 balance
function queryChain33Balance() {
local resp=""
chain33_QueryBalance "${1}" "${MAIN_HTTP}"
# shellcheck disable=SC2155
local balance=$(echo $resp | jq -r ".result.execAccount" | jq ".[].account.balance")
if [ "${balance}" != "${2}" ]; then
echo_rst "queryChain33Balance" "1" "${balance} != ${2}"
fi
}
# $1 req , $2 balance
function queryRelayerBalance() {
chain33_Http "${1}" "${CLIA_HTTP}" '(.error|not) and (.result != null)' "GetBalance" ".result.balance"
if [ "${RETURN_RESP}" != "${2}" ]; then
echo_rst "queryRelayerBalance" "1" "${RETURN_RESP} != ${2}"
copyErrLogs
fi
}
# $1 req , $2 balance
function queryChain33X2ethBalance() {
chain33_Http "${req}" ${MAIN_HTTP} '(.error|not) and (.result != null)' "GetBalance" ".result"
# shellcheck disable=SC2155
local balance=$(echo "${RETURN_RESP}" | jq -r ".res" | jq ".[].balance" | sed 's/\"//g')
if [ "${balance}" != "${2}" ]; then
echo_rst "queryChain33X2ethBalance" "1" "${balance} != ${2}"
fi
}
function start_ebrelayerA() {
docker cp "./x2ethereum/relayer.toml" "${dockerNamePrefix}_ebrelayera_rpc_1":/root/relayer.toml
start_docker_ebrelayer "${dockerNamePrefix}_ebrelayera_rpc_1" "/root/ebrelayer" "./x2ethereum/ebrelayera.log"
sleep 5
}
function StartRelayerAndDeploy() {
echo -e "${GRE}=========== $FUNCNAME begin ===========${NOC}"
cp ../x2ethereum/* ./x2ethereum/
for dockerName in ganachetest ebrelayera ebrelayerb ebrelayerc ebrelayerd; do
line=$(delete_line_show "./x2ethereum/docker-compose-x2ethereum.yml" "${dockerName}:")
sed -i ''"${line}"' a \ \ '${dockerName}'_rpc:' "./x2ethereum/docker-compose-x2ethereum.yml"
done
docker-compose -f ./x2ethereum/docker-compose-x2ethereum.yml up --build -d
sleep 5
# change EthProvider url
dockerAddr=$(get_docker_addr "${dockerNamePrefix}_ganachetest_rpc_1")
ethUrl="http://${dockerAddr}:8545"
# relayer.toml
updata_relayer_a_toml "${dockerAddr}" "${dockerNamePrefix}_ebrelayera_rpc_1" "./x2ethereum/relayer.toml"
line=$(delete_line_show "./x2ethereum/relayer.toml" "localhost:9901")
sed -i ''"${line}"' a JrpcBindAddr=":9901"' "./x2ethereum/relayer.toml"
# start ebrelayer A
start_ebrelayerA
ebrelayeraRpcHost=$(get_docker_addr "${dockerNamePrefix}_ebrelayera_rpc_1")
if [[ ${ebrelayeraRpcHost} == "" ]]; then
echo -e "${RED}ebrelayeraRpcHost a is empty${NOC}"
fi
CLIA_HTTP="http://${ebrelayeraRpcHost}:9901"
#
InitAndDeploy
# BridgeRegistry
local req='{"method":"Manager.ShowBridgeRegistryAddr","params":[{}]}'
chain33_Http "$req" "${CLIA_HTTP}" '(.error|not) and (.result != null)' "$FUNCNAME" ".result.addr"
local BridgeRegistry="$RETURN_RESP"
# kill ebrelayer A
kill_docker_ebrelayer "${dockerNamePrefix}_ebrelayera_rpc_1"
sleep 1
# relayer.toml
updata_relayer_toml "${BridgeRegistry}" ${maturityDegree} "./x2ethereum/relayer.toml"
#
start_ebrelayerA
# start ebrelayer B C D
for name in b c d; do
local file="./x2ethereum/relayer$name.toml"
cp './x2ethereum/relayer.toml' "${file}"
#
for deleteName in "deployerPrivateKey" "operatorAddr" "validatorsAddr" "initPowers" "deployerPrivateKey" "deploy"; do
delete_line "${file}" "${deleteName}"
done
sed -i 's/x2ethereum/x2ethereum'${name}'/g' "${file}"
pushHost=$(get_docker_addr "${dockerNamePrefix}_ebrelayer${name}_rpc_1")
line=$(delete_line_show "${file}" "pushHost")
sed -i ''"${line}"' a pushHost="http://'"${pushHost}"':20000"' "${file}"
line=$(delete_line_show "${file}" "pushBind")
sed -i ''"${line}"' a pushBind="'"${pushHost}"':20000"' "${file}"
docker cp "${file}" "${dockerNamePrefix}_ebrelayer${name}_rpc_1":/root/relayer.toml
start_docker_ebrelayer "${dockerNamePrefix}_ebrelayer${name}_rpc_1" "/root/ebrelayer" "./x2ethereum/ebrelayer${name}.log"
done
sleep 5
ebrelayeraRpcHost=$(get_docker_addr "${dockerNamePrefix}_ebrelayera_rpc_1")
CLIA_HTTP="http://${ebrelayeraRpcHost}:9901"
ebrelayeraRpcHost=$(get_docker_addr "${dockerNamePrefix}_ebrelayerb_rpc_1")
CLIB_HTTP="http://${ebrelayeraRpcHost}:9901"
ebrelayeraRpcHost=$(get_docker_addr "${dockerNamePrefix}_ebrelayerc_rpc_1")
CLIC_HTTP="http://${ebrelayeraRpcHost}:9901"
ebrelayeraRpcHost=$(get_docker_addr "${dockerNamePrefix}_ebrelayerd_rpc_1")
CLID_HTTP="http://${ebrelayeraRpcHost}:9901"
docker ps -a
echo -e "${GRE}=========== $FUNCNAME end ===========${NOC}"
}
function InitAndDeploy() {
echo -e "${GRE}=========== $FUNCNAME begin ===========${NOC}"
local req='{"method":"Manager.SetPassphase","params":[{"Passphase":"123456hzj"}]}'
chain33_Http "$req" "${CLIA_HTTP}" '(.error|not) and (.result != null)' "SetPassphase" ".result"
local req='{"method":"Manager.Unlock","params":["123456hzj"]}'
chain33_Http "$req" "${CLIA_HTTP}" '(.error|not) and (.result != null)' "Unlock" ".result"
local req='{"method":"Manager.DeployContrcts","params":[{}]}'
chain33_Http "$req" "${CLIA_HTTP}" '(.error|not) and (.result != null)' "$FUNCNAME" ".result"
echo -e "${GRE}=========== $FUNCNAME end ===========${NOC}"
}
# chian33
function InitChain33Vilators() {
echo -e "${GRE}=========== $FUNCNAME begin ===========${NOC}"
# chain33Validators
chain33_ImportPrivkey "${chain33ValidatorKey1}" "${chain33Validator1}" "tokenAddr" "${MAIN_HTTP}"
chain33_ImportPrivkey "${chain33ValidatorKey2}" "${chain33Validator2}" "tokenAddr" "${MAIN_HTTP}"
chain33_ImportPrivkey "${chain33ValidatorKey3}" "${chain33Validator3}" "tokenAddr" "${MAIN_HTTP}"
chain33_ImportPrivkey "${chain33ValidatorKey4}" "${chain33Validator4}" "tokenAddr" "${MAIN_HTTP}"
# SetConsensusThreshold
tx=$(curl -ksd '{"method":"Chain33.CreateTransaction","params":[{"execer":"x2ethereum","actionName":"SetConsensusThreshold","payload":{"consensusThreshold":"80"}}]}' ${MAIN_HTTP} | jq -r ".result")
chain33_SignAndSendTxWait "$tx" "$sendPriKey" ${MAIN_HTTP} "SetConsensusThreshold"
# add a validator
tx=$(curl -ksd '{"method":"Chain33.CreateTransaction","params":[{"execer":"x2ethereum","actionName":"AddValidator","payload":{"address":"'${chain33Validator1}'","power":"25"}}]}' ${MAIN_HTTP} | jq -r ".result")
chain33_SignAndSendTxWait "$tx" "$sendPriKey" ${MAIN_HTTP} "AddValidator"
tx=$(curl -ksd '{"method":"Chain33.CreateTransaction","params":[{"execer":"x2ethereum","actionName":"AddValidator","payload":{"address":"'${chain33Validator2}'","power":"25"}}]}' ${MAIN_HTTP} | jq -r ".result")
chain33_SignAndSendTxWait "$tx" "$sendPriKey" ${MAIN_HTTP} "AddValidator"
tx=$(curl -ksd '{"method":"Chain33.CreateTransaction","params":[{"execer":"x2ethereum","actionName":"AddValidator","payload":{"address":"'${chain33Validator3}'","power":"25"}}]}' ${MAIN_HTTP} | jq -r ".result")
chain33_SignAndSendTxWait "$tx" "$sendPriKey" ${MAIN_HTTP} "AddValidator"
tx=$(curl -ksd '{"method":"Chain33.CreateTransaction","params":[{"execer":"x2ethereum","actionName":"AddValidator","payload":{"address":"'${chain33Validator4}'","power":"25"}}]}' ${MAIN_HTTP} | jq -r ".result")
chain33_SignAndSendTxWait "$tx" "$sendPriKey" ${MAIN_HTTP} "AddValidator"
# query Validators
chain33_Http '{"method":"Chain33.Query","params":[{"execer":"x2ethereum","funcName":"GetTotalPower","payload":{}}]}' ${MAIN_HTTP} '(.error|not) and (.result != null)' "GetTotalPower" ".result.totalPower"
if [ "${RETURN_RESP}" != "100" ]; then
echo -e "${RED}=========== GetTotalPower err: TotalPower = $RETURN_RESP ===========${NOC}"
fi
# cions x2ethereum
x2eth_addr=$(curl -ksd '{"method":"Chain33.ConvertExectoAddr","params":[{"execname":"x2ethereum"}]}' ${MAIN_HTTP} | jq -r ".result")
chain33_SendToAddress "${sendAddress}" "${x2eth_addr}" 20000000000 "${MAIN_HTTP}"
queryExecBalance "${sendAddress}" "20000000000"
# chain33Validator
chain33_applyCoins "${chain33Validator1}" 1000000000 "${MAIN_HTTP}"
queryChain33Balance "${chain33Validator1}" "1000000000"
chain33_applyCoins "${chain33Validator2}" 1000000000 "${MAIN_HTTP}"
queryChain33Balance "${chain33Validator2}" "1000000000"
chain33_applyCoins "${chain33Validator3}" 1000000000 "${MAIN_HTTP}"
queryChain33Balance "${chain33Validator3}" "1000000000"
chain33_applyCoins "${chain33Validator4}" 1000000000 "${MAIN_HTTP}"
queryChain33Balance "${chain33Validator4}" "1000000000"
echo -e "${GRE}=========== $FUNCNAME end ===========${NOC}"
}
function EthImportKey() {
echo -e "${GRE}=========== $FUNCNAME begin ===========${NOC}"
#
local req='{"method":"Manager.SetPassphase","params":[{"Passphase":"123456hzj"}]}'
chain33_Http "$req" "${CLIB_HTTP}" '(.error|not) and (.result != null)' "SetPassphase" ".result"
chain33_Http "$req" "${CLIC_HTTP}" '(.error|not) and (.result != null)' "SetPassphase" ".result"
chain33_Http "$req" "${CLID_HTTP}" '(.error|not) and (.result != null)' "SetPassphase" ".result"
req='{"method":"Manager.Unlock","params":["123456hzj"]}'
chain33_Http "$req" "${CLIA_HTTP}" '(.error|not) and (.result != null)' "Unlock" ".result"
chain33_Http "$req" "${CLIB_HTTP}" '(.error|not) and (.result != null)' "Unlock" ".result"
chain33_Http "$req" "${CLIC_HTTP}" '(.error|not) and (.result != null)' "Unlock" ".result"
chain33_Http "$req" "${CLID_HTTP}" '(.error|not) and (.result != null)' "Unlock" ".result"
req='{"method":"Manager.ImportChain33PrivateKey4EthRelayer","params":["'${chain33ValidatorKey1}'"]}'
chain33_Http "$req" "${CLIA_HTTP}" '(.error|not) and (.result != null)' "ImportChain33PrivateKey4EthRelayer" ".result"
req='{"method":"Manager.ImportChain33PrivateKey4EthRelayer","params":["'${chain33ValidatorKey2}'"]}'
chain33_Http "$req" "${CLIB_HTTP}" '(.error|not) and (.result != null)' "ImportChain33PrivateKey4EthRelayer" ".result"
req='{"method":"Manager.ImportChain33PrivateKey4EthRelayer","params":["'${chain33ValidatorKey3}'"]}'
chain33_Http "$req" "${CLIC_HTTP}" '(.error|not) and (.result != null)' "ImportChain33PrivateKey4EthRelayer" ".result"
req='{"method":"Manager.ImportChain33PrivateKey4EthRelayer","params":["'${chain33ValidatorKey4}'"]}'
chain33_Http "$req" "${CLID_HTTP}" '(.error|not) and (.result != null)' "ImportChain33PrivateKey4EthRelayer" ".result"
req='{"method":"Manager.ImportChain33RelayerPrivateKey","params":[{"privateKey":"'${ethValidatorAddrKeyA}'"}]}'
chain33_Http "$req" "${CLIA_HTTP}" '(.error|not) and (.result != null)' "ImportChain33RelayerPrivateKey" ".result"
req='{"method":"Manager.ImportChain33RelayerPrivateKey","params":[{"privateKey":"'${ethValidatorAddrKeyB}'"}]}'
chain33_Http "$req" "${CLIB_HTTP}" '(.error|not) and (.result != null)' "ImportChain33RelayerPrivateKey" ".result"
req='{"method":"Manager.ImportChain33RelayerPrivateKey","params":[{"privateKey":"'${ethValidatorAddrKeyC}'"}]}'
chain33_Http "$req" "${CLIC_HTTP}" '(.error|not) and (.result != null)' "ImportChain33RelayerPrivateKey" ".result"
req='{"method":"Manager.ImportChain33RelayerPrivateKey","params":[{"privateKey":"'${ethValidatorAddrKeyD}'"}]}'
chain33_Http "$req" "${CLID_HTTP}" '(.error|not) and (.result != null)' "ImportChain33RelayerPrivateKey" ".result"
echo -e "${GRE}=========== $FUNCNAME end ===========${NOC}"
}
function TestChain33ToEthAssets() {
echo -e "${GRE}=========== $FUNCNAME begin ===========${NOC}"
# token4chain33 bty
local req='{"method":"Manager.CreateBridgeToken","params":["coins.bty"]}'
chain33_Http "$req" "${CLIA_HTTP}" '(.error|not) and (.result != null)' "CreateBridgeToken" ".result.addr"
tokenAddrBty=${RETURN_RESP}
req='{"method":"Manager.GetBalance","params":[{"owner":"'${ethReceiverAddr1}'","tokenAddr":"'${tokenAddrBty}'"}]}'
queryRelayerBalance "$req" "0"
# chain33 lock bty
#shellcheck disable=SC2086
tx=$(curl -ksd '{"method":"Chain33.CreateTransaction","params":[{"execer":"x2ethereum","actionName":"Chain33ToEthLock","payload":{"TokenContract":"'${tokenAddrBty}'","Chain33Sender":"'${sendPriKey}'","EthereumReceiver":"'${ethReceiverAddr1}'","Amount":"500000000","IssuerDotSymbol":"coins.bty","Decimals":"8"}}]}' ${MAIN_HTTP} | jq -r ".result")
chain33_SignAndSendTxWait "$tx" "$sendPriKey" ${MAIN_HTTP} "Chain33ToEthLock"
queryExecBalance "${sendAddress}" "19500000000"
eth_block_wait $((maturityDegree + 2)) "${ethUrl}"
req='{"method":"Manager.GetBalance","params":[{"owner":"'${ethReceiverAddr1}'","tokenAddr":"'${tokenAddrBty}'"}]}'
queryRelayerBalance "$req" "5"
# eth burn
req='{"method":"Manager.Burn","params":[{"ownerKey":"'${ethReceiverAddrKey1}'","tokenAddr":"'${tokenAddrBty}'","chain33Receiver":"'${chain33SenderAddr}'","amount":"500000000"}]}'
chain33_Http "$req" "${CLIA_HTTP}" '(.error|not) and (.result != null)' "Burn" ".result"
req='{"method":"Manager.GetBalance","params":[{"owner":"'${ethReceiverAddr1}'","tokenAddr":"'${tokenAddrBty}'"}]}'
queryRelayerBalance "$req" "0"
# eth 10
eth_block_wait $((maturityDegree + 2)) "${ethUrl}"
queryExecBalance "${chain33SenderAddr}" "500000000"
echo -e "${GRE}=========== $FUNCNAME end ===========${NOC}"
}
# eth to chain33
# , chain33 , eth
function TestETH2Chain33Assets() {
echo -e "${GRE}=========== $FUNCNAME begin ===========${NOC}"
local req='{"method":"Manager.ShowBridgeBankAddr","params":[{}]}'
chain33_Http "$req" "${CLIA_HTTP}" '(.error|not) and (.result != null)' "ShowBridgeBankAddr" ".result.addr"
bridgeBankAddr="${RETURN_RESP}"
req='{"method":"Manager.GetBalance","params":[{"owner":"'${bridgeBankAddr}'","tokenAddr":""}]}'
queryRelayerBalance "$req" "0"
# eth lock 0.1
req='{"method":"Manager.LockEthErc20Asset","params":[{"ownerKey":"'${ethReceiverAddrKey1}'","tokenAddr":"","amount":"100000000000000000","chain33Receiver":"'${sendAddress}'"}]}'
chain33_Http "$req" "${CLIA_HTTP}" '(.error|not) and (.result != null)' "LockEthErc20Asset" ".result"
req='{"method":"Manager.GetBalance","params":[{"owner":"'${bridgeBankAddr}'","tokenAddr":""}]}'
queryRelayerBalance "$req" "0.1"
# eth 10
eth_block_wait $((maturityDegree + 2)) "${ethUrl}"
req='{"method":"Chain33.Query","params":[{"execer":"x2ethereum","funcName":"GetRelayerBalance","payload":{"tokenSymbol":"eth","address":"'${sendAddress}'","tokenAddr":"0x0000000000000000000000000000000000000000"}}]}'
queryChain33X2ethBalance "${req}" "0.1"
req='{"method":"Manager.GetBalance","params":[{"owner":"'${ethReceiverAddr2}'","tokenAddr":""}]}'
chain33_Http "${req}" "${CLIA_HTTP}" '(.error|not) and (.result != null)' "GetBalance" ".result.balance"
local balance=${RETURN_RESP}
# burn 0.1
tx=$(curl -ksd '{"method":"Chain33.CreateTransaction","params":[{"execer":"x2ethereum","actionName":"Chain33ToEthBurn","payload":{"TokenContract":"0x0000000000000000000000000000000000000000","Chain33Sender":"'${sendPriKey}'","EthereumReceiver":"'${ethReceiverAddr2}'","Amount":"10000000","IssuerDotSymbol":"eth","Decimals":"18"}}]}' ${MAIN_HTTP} | jq -r ".result")
chain33_SignAndSendTxWait "$tx" "$sendPriKey" ${MAIN_HTTP} "Chain33ToEthBurn"
req='{"method":"Chain33.Query","params":[{"execer":"x2ethereum","funcName":"GetRelayerBalance","payload":{"tokenSymbol":"eth","address":"'${sendAddress}'","tokenAddr":"0x0000000000000000000000000000000000000000"}}]}'
queryChain33X2ethBalance "${req}" "0"
eth_block_wait $((maturityDegree + 2)) "${ethUrl}"
req='{"method":"Manager.GetBalance","params":[{"owner":"'${bridgeBankAddr}'","tokenAddr":""}]}'
queryRelayerBalance "$req" "0"
req='{"method":"Manager.GetBalance","params":[{"owner":"'${ethReceiverAddr2}'","tokenAddr":""}]}'
#queryRelayerBalance "$req" "$(echo "${balance}+0.1" | bc)"
queryRelayerBalance "$req" "100.1"
echo -e "${GRE}=========== $FUNCNAME end ===========${NOC}"
}
function TestETH2Chain33Erc20() {
echo -e "${GRE}=========== $FUNCNAME begin ===========${NOC}"
# token4erc20 chain33 token, mint
local req='{"method":"Manager.CreateERC20Token","params":["testc"]}'
chain33_Http "$req" "${CLIA_HTTP}" '(.error|not) and (.result != null)' "CreateERC20Token" ".result.addr"
tokenAddr="${RETURN_RESP}"
# 1000
req='{"method":"Manager.MintErc20","params":[{"owner":"'${ethReceiverAddr1}'","tokenAddr":"'${tokenAddr}'","amount":"100000000000"}]}'
chain33_Http "$req" "${CLIA_HTTP}" '(.error|not) and (.result != null)' "MintErc20" ".result.addr"
req='{"method":"Manager.GetBalance","params":[{"owner":"'${ethReceiverAddr1}'","tokenAddr":"'${tokenAddr}'"}]}'
queryRelayerBalance "$req" "1000"
local req='{"method":"Manager.ShowBridgeBankAddr","params":[{}]}'
chain33_Http "$req" "${CLIA_HTTP}" '(.error|not) and (.result != null)' "ShowBridgeBankAddr" ".result.addr"
bridgeBankAddr="${RETURN_RESP}"
req='{"method":"Manager.GetBalance","params":[{"owner":"'${bridgeBankAddr}'","tokenAddr":"'${tokenAddr}'"}]}'
queryRelayerBalance "$req" "0"
# lock 100
req='{"method":"Manager.LockEthErc20Asset","params":[{"ownerKey":"'${ethReceiverAddrKey1}'","tokenAddr":"'${tokenAddr}'","amount":"10000000000","chain33Receiver":"'${chain33Validator1}'"}]}'
chain33_Http "$req" "${CLIA_HTTP}" '(.error|not) and (.result != null)' "LockEthErc20Asset" ".result"
req='{"method":"Manager.GetBalance","params":[{"owner":"'${ethReceiverAddr1}'","tokenAddr":"'${tokenAddr}'"}]}'
queryRelayerBalance "$req" "900"
req='{"method":"Manager.GetBalance","params":[{"owner":"'${bridgeBankAddr}'","tokenAddr":"'${tokenAddr}'"}]}'
queryRelayerBalance "$req" "100"
# eth 10
eth_block_wait $((maturityDegree + 2)) "${ethUrl}"
req='{"method":"Chain33.Query","params":[{"execer":"x2ethereum","funcName":"GetRelayerBalance","payload":{"tokenSymbol":"testc","address":"'${chain33Validator1}'","tokenAddr":"'${tokenAddr}'"}}]}'
queryChain33X2ethBalance "${req}" "100"
# chain33 burn 100
#shellcheck disable=SC2086
tx=$(curl -ksd '{"method":"Chain33.CreateTransaction","params":[{"execer":"x2ethereum","actionName":"Chain33ToEthBurn","payload":{"TokenContract":"'${tokenAddr}'","Chain33Sender":"'${chain33ValidatorKey1}'","EthereumReceiver":"'${ethReceiverAddr2}'","Amount":"10000000000","IssuerDotSymbol":"testc","Decimals":"8"}}]}' ${MAIN_HTTP} | jq -r ".result")
chain33_SignAndSendTxWait "$tx" "$chain33ValidatorKey1" ${MAIN_HTTP} "Chain33ToEthBurn"
req='{"method":"Chain33.Query","params":[{"execer":"x2ethereum","funcName":"GetRelayerBalance","payload":{"tokenSymbol":"testc","address":"'${chain33Validator1}'","tokenAddr":"'${tokenAddr}'"}}]}'
queryChain33X2ethBalance "${req}" "0"
eth_block_wait $((maturityDegree + 2)) "${ethUrl}"
req='{"method":"Manager.GetBalance","params":[{"owner":"'${ethReceiverAddr2}'","tokenAddr":"'${tokenAddr}'"}]}'
queryRelayerBalance "$req" "100"
req='{"method":"Manager.GetBalance","params":[{"owner":"'${bridgeBankAddr}'","tokenAddr":"'${tokenAddr}'"}]}'
queryRelayerBalance "$req" "0"
echo -e "${GRE}=========== $FUNCNAME end ===========${NOC}"
}
function rpc_test() {
set +e
set -x
chain33_RpcTestBegin x2ethereum
MAIN_HTTP="$1"
dockerNamePrefix="$2"
echo "main_ip=$MAIN_HTTP"
ispara=$(echo '"'"${MAIN_HTTP}"'"' | jq '.|contains("8901")')
if [ "$ispara" == false ]; then
# init
StartRelayerAndDeploy
InitChain33Vilators
EthImportKey
# test
TestChain33ToEthAssets
TestETH2Chain33Assets
TestETH2Chain33Erc20
copyErrLogs
docker-compose -f ./x2ethereum/docker-compose-x2ethereum.yml down
fi
chain33_RpcTestRst x2ethereum "$CASE_ERR"
}
chain33_debug_function rpc_test "$1" "$2"
|
set -e
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $SCRIPT_DIR/set-kubeconfig.sh
open http://127.0.0.1:4040
kubectl port-forward -n kube-system "$(kubectl get -n kube-system pod --selector=weave-scope-component=app -o jsonpath='{.items..metadata.name}')" 4040
|
setup_emsdk()
{
if [ ! -d "./tools/emsdk" ]
then
if [ ! -d "./tools" ]
then
mkdir ./tools
fi
cd ./tools
git clone https://github.com/emscripten-core/emsdk.git
cd ./emsdk
git pull
./emsdk install latest
./emsdk activate latest
cd ../..
fi
}
|
#!/bin/bash
#SBATCH --nodes=1
#SBATCH --ntasks=16
#SBATCH --mem=8gb
module load fastqc
module load trimgalore
module load samtools
module load bowtie2
module load bedtools
### Fastqc for untrimmed files
cd /gpfs/group/pipkin/hdiao/T_Cell_ChIP/0_fastq
fastq_untrimmed_1=SRR1731132_1.fastq
fastqc $fastq_untrimmed_1
### Trim Galore
trim_galore --length 24 --stringency 3 $fastq_untrimmed_1
trim_fastq_end1=/gpfs/group/pipkin/hdiao/T_Cell_ChIP/0_fastq/SRR1731132_1_trimmed.fq
### Fastqc for trimmed files
fastqc $trim_fastq_end1
### Bowtie2 alignment
cd /gpfs/group/pipkin/hdiao/T_Cell_ChIP/1_bowtie2
bowtie2_index=/gpfs/group/pipkin/hdiao/ref_resources/mm/release102/GRCm38
sam_name=SRR1731132.sam
bowtie2 -p 16 -x $bowtie2_index -U $trim_fastq_end1 -S $sam_name
### Convert/sort/filter
bam_name=SRR1731132.bam
bam_name_srt=SRR1731132_srt.sam
sam_name_srt_dupr=SRR1731132_srt_dupr.sam
bam_name_srt_dupr=SRR1731132_srt_dupr.bam
flb_bam_name=SRR1731132_srt_dupr_flb.bam
blacklist_bed=/gpfs/group/pipkin/hdiao/ref_resources/mm/mm10_blacklisted_2016_nochr.bed
samtools view -bS $sam_name > $bam_name
samtools sort $bam_name -o $bam_name_srt
samtools rmdup -S $bam_name_srt $sam_name_srt_dupr
samtools view -bS $sam_name_srt_dupr > $bam_name_srt_dupr
bedtools intersect -abam $bam_name_srt_dupr -b $blacklist_bed -v > $flb_bam_name
### Remove intermediate files
filesize=$(stat -c%s $flb_bam_name)
if (( filesize > 10000 ))
then
rm $sam_name
rm $bam_name
rm $bam_name_srt
rm $sam_name_srt_dupr
rm $bam_name_srt_dupr
rm $trim_fastq_end1
fi
|
#!/bin/bash -x
# Exit on error
set -e
# Test code goes here
rm -rf valid_lowisbet valid_lowisbet_?.log
mkdir -p valid_lowisbet
extra_opts="--no-shuffle --seed 1111 --maxi-batch 1 --maxi-batch-sort none"
extra_opts="$extra_opts --dim-emb 64 --dim-rnn 128 --mini-batch 32"
extra_opts="$extra_opts --cost-type ce-mean --disp-label-counts false --clip-norm 0"
# Files for the validation sets are swapped intentionally
$MRT_MARIAN/marian $extra_opts \
-m valid_lowisbet/model.npz -t $MRT_DATA/train.max50.{en,de} -v vocab.en.yml vocab.de.yml \
--disp-freq 10 --valid-freq 30 --after-batches 160 --early-stopping 2 \
--valid-metrics cross-entropy --valid-sets $MRT_DATA/europarl.de-en/toy.bpe.{de,en} --valid-mini-batch 64 \
--valid-log valid_lowisbet_1.log
test -e valid_lowisbet/model.npz
test -e valid_lowisbet/model.npz.yml
test -e valid_lowisbet_1.log
cp valid_lowisbet/model.npz.progress.yml valid_lowisbet/model.npz.progress.yml.bac
cat valid_lowisbet_1.log | $MRT_TOOLS/strip-timestamps.sh | grep "cross-entropy" > valid_lowisbet.out
# Files for the validation sets are swapped intentionally
$MRT_MARIAN/marian $extra_opts \
-m valid_lowisbet/model.npz -t $MRT_DATA/train.max50.{en,de} -v vocab.en.yml vocab.de.yml \
--disp-freq 10 --valid-freq 30 --after-batches 320 --early-stopping 4 \
--valid-metrics cross-entropy --valid-sets $MRT_DATA/europarl.de-en/toy.bpe.{de,en} --valid-mini-batch 64 \
--valid-log valid_lowisbet_2.log
test -e valid_lowisbet/model.npz
test -e valid_lowisbet_2.log
cat valid_lowisbet_2.log | $MRT_TOOLS/strip-timestamps.sh | grep "cross-entropy" >> valid_lowisbet.out
$MRT_TOOLS/diff-nums.py -p 0.1 valid_lowisbet.out valid_lowisbet.expected -o valid_lowisbet.diff
# Exit with success code
exit 0
|
#!/usr/bin/env bash
# --------------------------------------------------------------------------
# Copyright 2013-2016 Sam Deane, Elegant Chaos. All rights reserved.
# This source code is distributed under the terms of Elegant Chaos's
# liberal license: http://www.elegantchaos.com/license/liberal
# --------------------------------------------------------------------------
## This script is an attempt to automate picking up the latest version of the "develop" branch for a module, given that it might be on a detatch HEAD at the time.
##
## It performs the following steps:
##
## - rebase on the local develop branch
## - save this to a temporary branch
## - switch to the local develop branch
## - merge in the temporary branch - this should be a fast forward
## - remove the temporary branch
## - rebase on the remote "develop" from origin
## - push the resulting changed branch back to origin
check() {
if [[ $1 != 0 ]]; then
echo "failed: $2"
exit $1
fi
}
status=`git status --porcelain`
if [[ "$status" != "" ]]; then
echo "You have local changes. Commit them first."
exit 1
fi
# we may start on something that isn't the develop branch
# possibly a detached HEAD
# try to apply any changes on top of our local develop
git rebase develop
check $? "rebasing on develop"
# now fast forward develop to the merged place
git checkout -b develop-temp
check $? "making temp branch"
git checkout develop
check $? "switching back to develop"
git merge develop-temp
check $? "merging local changes"
git branch -d develop-temp
check $? "removing temp branch"
# we should now be on a local develop branch incorporating any local changes
echo fetching latest revisions
git fetch
# try to rebase again on top of any remote changes
git rebase
check $? "rebasing on origin/develop"
# if that worked, push back the merged version
git push
check $? "pushing"
|
#!/bin/bash
CUDA_VISIBLE_DEVICES="0" python runner.py
|
#!/bin/bash
rm -rf dist || exit 0;
mkdir dist;
npm run build
cp ./ops/CNAME ./dist
( cd dist
git init
git add .
git commit -m "Deployed to Github Pages"
git push --force [email protected]:anvaka/npmgraph.an.git master:gh-pages
)
|
find . -name '*.go' -exec sed -i 's?k8s.io/kubernetes/plugin/pkg/scheduler?github.com/KubeDevice/kube-scheduler/pkg?g' {} +
|
#!/usr/bin/env bash
#
# MetaCall Configuration Environment Bash Script by Parra Studios
# Configure and install MetaCall environment script utility.
#
# Copyright (C) 2016 - 2021 Vicente Eduardo Ferrer Garcia <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
ROOT_DIR=$(pwd)
RUN_AS_ROOT=0
SUDO_CMD=sudo
INSTALL_APT=1
INSTALL_PYTHON=0
INSTALL_RUBY=0
INSTALL_NETCORE=0
INSTALL_NETCORE2=0
INSTALL_NETCORE5=0
INSTALL_V8=0
INSTALL_NODEJS=0
INSTALL_TYPESCRIPT=0
INSTALL_FILE=0
INSTALL_RPC=0
INSTALL_PORTS=0
INSTALL_CLEAN=0
SHOW_HELP=0
PROGNAME=$(basename $0)
# Install and mark packages to avoid autoremove
sub_apt_install_hold(){
$SUDO_CMD apt-get -y install --no-install-recommends $@
$SUDO_CMD apt-mark hold $@
}
# Base packages
sub_apt(){
echo "configure apt"
cd $ROOT_DIR
$SUDO_CMD apt-get update && apt-get -y install --no-install-recommends wget gpg apt-transport-https
}
# Python
sub_python(){
echo "configure python"
cd $ROOT_DIR
sub_apt_install_hold python3 libpython3.7
}
# Ruby
sub_ruby(){
echo "configure ruby"
cd $ROOT_DIR
# TODO: Remove this when using ruby2.5 (not available yet because it fails on loading a script with a malloc error)
$SUDO_CMD mv /etc/apt/sources.list /etc/apt/sources.list.backup
$SUDO_CMD sh -c "echo \"deb http://ftp.debian.org/debian/ stretch main\" > /etc/apt/sources.list"
$SUDO_CMD sh -c "echo \"deb-src http://ftp.debian.org/debian/ stretch main\" >> /etc/apt/sources.list"
$SUDO_CMD sh -c "echo \"deb http://security.debian.org/debian-security stretch/updates main\" >> /etc/apt/sources.list"
$SUDO_CMD sh -c "echo \"deb-src http://security.debian.org/debian-security stretch/updates main\" >> /etc/apt/sources.list"
$SUDO_CMD sh -c "echo \"deb http://ftp.debian.org/debian/ stretch-updates main\" >> /etc/apt/sources.list"
$SUDO_CMD sh -c "echo \"deb-src http://ftp.debian.org/debian/ stretch-updates main\" >> /etc/apt/sources.list"
$SUDO_CMD apt-get update
# sub_apt_install_hold ruby2.5 libruby2.5
$SUDO_CMD apt-get -y install --no-install-recommends --allow-remove-essential --allow-downgrades libssl1.1 libffi6 zlib1g libyaml-0-2 libgmp10=2:6.1.2+dfsg-1 libreadline7 libxml2 libncurses5 libtinfo5 ruby2.3 libruby2.3
$SUDO_CMD apt-mark hold libssl1.1 libffi6 zlib1g libyaml-0-2 libgmp10 libreadline7 libxml2 libncurses5 libtinfo5 ruby2.3 libruby2.3
# TODO: Remove this when using ruby2.5 (not available yet because it fails on loading a script with a malloc error)
$SUDO_CMD mv /etc/apt/sources.list.backup /etc/apt/sources.list
}
# NetCore
sub_netcore(){
echo "configure netcore"
cd $ROOT_DIR
# Debian Stretch
sub_apt_install_hold libc6 libcurl3 libgcc1 libgssapi-krb5-2 libicu57 \
liblttng-ust0 libssl1.0.2 libstdc++6 libunwind8 libuuid1 zlib1g ca-certificates
# Install .NET Core Runtime 1.x
DOTNET_VERSION=1.1.10
DOTNET_DOWNLOAD_URL=https://dotnetcli.blob.core.windows.net/dotnet/Runtime/$DOTNET_VERSION/dotnet-debian.9-x64.$DOTNET_VERSION.tar.gz
wget $DOTNET_DOWNLOAD_URL -O dotnet.tar.gz
mkdir -p /usr/share/dotnet
tar -zxf dotnet.tar.gz -C /usr/share/dotnet
rm dotnet.tar.gz
ln -s /usr/share/dotnet/dotnet /usr/bin/dotnet
}
# NetCore 2
sub_netcore2(){
echo "configure netcore 2"
cd $ROOT_DIR
# Install NET Core Runtime 2.x
wget https://packages.microsoft.com/config/debian/10/packages-microsoft-prod.deb -O packages-microsoft-prod.deb
$SUDO_CMD dpkg -i packages-microsoft-prod.deb
rm packages-microsoft-prod.deb
$SUDO_CMD apt-get update
sub_apt_install_hold dotnet-runtime-2.2=2.2.8-1
}
# NetCore 5
sub_netcore5(){
echo "configure netcore 5"
cd $ROOT_DIR
# Install NET Core Runtime 5.x
wget https://packages.microsoft.com/config/debian/10/packages-microsoft-prod.deb -O packages-microsoft-prod.deb
$SUDO_CMD dpkg -i packages-microsoft-prod.deb
rm packages-microsoft-prod.deb
$SUDO_CMD apt-get update
sub_apt_install_hold dotnet-runtime-5.0=5.0.9-1
}
# V8
sub_v8(){
echo "configure v8"
# TODO
}
# NodeJS
sub_nodejs(){
echo "configure node"
# Install NodeJS library
sub_apt_install_hold libnode83
}
# TypeScript
sub_typescript(){
echo "configure typescript"
# Nothing needed, node_modules are local to the path,
# runtime is located in /usr/local/lib, and node builtins
# are already compiled in the runtime
}
# File
sub_file(){
echo "configure file"
# Nothing needed
}
# RPC
sub_rpc(){
echo "configure rpc"
sub_apt_install_hold libcurl4
}
# Ports
sub_ports(){
echo "configure ports"
# Nothing needed, there are no dependencies for ports by now
}
# Install
sub_install(){
if [ $RUN_AS_ROOT = 1 ]; then
SUDO_CMD=""
fi
if [ $INSTALL_APT = 1 ]; then
sub_apt
fi
if [ $INSTALL_PYTHON = 1 ]; then
sub_python
fi
if [ $INSTALL_RUBY = 1 ]; then
sub_ruby
fi
if [ $INSTALL_NETCORE = 1 ]; then
sub_netcore
fi
if [ $INSTALL_NETCORE2 = 1 ]; then
sub_netcore2
fi
if [ $INSTALL_NETCORE5 = 1 ]; then
sub_netcore5
fi
if [ $INSTALL_V8 = 1 ]; then
sub_v8
fi
if [ $INSTALL_NODEJS = 1 ]; then
sub_nodejs
fi
if [ $INSTALL_TYPESCRIPT = 1 ]; then
sub_typescript
fi
if [ $INSTALL_FILE = 1 ]; then
sub_file
fi
if [ $INSTALL_RPC = 1 ]; then
sub_rpc
fi
if [ $INSTALL_PORTS = 1 ]; then
sub_ports
fi
if [ $INSTALL_CLEAN = 1 ]; then
sub_clean
fi
echo "install finished in workspace $ROOT_DIR"
}
# Clean dependencies
sub_clean(){
echo "clean dependencies"
$SUDO_CMD apt-get -y remove wget gpg
$SUDO_CMD apt-get -y autoclean
}
# Configuration
sub_options(){
for var in "$@"
do
if [ "$var" = 'root' ]; then
echo "running as root"
RUN_AS_ROOT=1
fi
if [ "$var" = 'base' ]; then
echo "apt selected"
INSTALL_APT=1
fi
if [ "$var" = 'python' ]; then
echo "python selected"
INSTALL_PYTHON=1
fi
if [ "$var" = 'ruby' ]; then
echo "ruby selected"
INSTALL_RUBY=1
fi
if [ "$var" = 'netcore' ]; then
echo "netcore selected"
INSTALL_NETCORE=1
fi
if [ "$var" = 'netcore2' ]; then
echo "netcore 2 selected"
INSTALL_NETCORE2=1
fi
if [ "$var" = 'netcore5' ]; then
echo "netcore 5 selected"
INSTALL_NETCORE5=1
fi
if [ "$var" = 'v8' ]; then
echo "v8 selected"
INSTALL_V8=1
fi
if [ "$var" = 'nodejs' ]; then
echo "nodejs selected"
INSTALL_NODEJS=1
fi
if [ "$var" = 'typescript' ]; then
echo "typescript selected"
INSTALL_TYPESCRIPT=1
fi
if [ "$var" = 'file' ]; then
echo "file selected"
INSTALL_FILE=1
fi
if [ "$var" = 'rpc' ]; then
echo "rpc selected"
INSTALL_RPC=1
fi
if [ "$var" = 'ports' ]; then
echo "ports selected"
INSTALL_PORTS=1
fi
if [ "$var" = 'clean' ]; then
echo "clean selected"
INSTALL_CLEAN=1
fi
done
}
# Help
sub_help() {
echo "Usage: `basename "$0"` list of component"
echo "Components:"
echo " root"
echo " base"
echo " python"
echo " ruby"
echo " netcore"
echo " netcore2"
echo " v8"
echo " nodejs"
echo " typescript"
echo " file"
echo " rpc"
echo " ports"
echo " clean"
echo ""
}
case "$#" in
0)
sub_help
;;
*)
sub_options $@
sub_install
;;
esac
|
#
# Copyright (C) 2013 Julian Atienza Herrero <j.atienza at har.mrc.ac.uk>
#
# MEDICAL RESEARCH COUNCIL UK MRC
#
# Harwell Mammalian Genetics Unit
#
# http://www.har.mrc.ac.uk
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
rm -rf `find . -type d -name .svn`
svn delete svn://source.har.mrc.ac.uk/PhenoDCC/exportlibrary/trunk/exportlibrary -m "working copy broken"
svn import exportlibrary svn://source.har.mrc.ac.uk/PhenoDCC/exportlibrary/trunk/exportlibrary -m "first commit after broken working copy"
svn co svn://source.har.mrc.ac.uk/PhenoDCC/exportlibrary/trunk/exportlibrary
svn delete svn://source.har.mrc.ac.uk/PhenoDCC/exportlibrary/trunk/exportlibrary/exportlibrary.utils/src/test/db/
svn delete svn://source.har.mrc.ac.uk/PhenoDCC/exportlibrary/trunk/exportlibrary/exportlibrary.xmlvalidation/hsqldb/ -m "remove test dabases"
svn delete svn://source.har.mrc.ac.uk/PhenoDCC/exportlibrary/trunk/exportlibrary/exportlibrary.xmlvalidation/src/main/generated/ -m "removed generated classes"
|
#!/bin/sh
#NEWROOT=:pserver:$(id -un)@cvs.bh.exept.de:/cvs/stx
if [ -z "$1" ]; then
echo "Common CVS roots:"
# Do not show these to other people, these are useless for them
# anyway
if [ "$USER" = "jv" ]; then
echo " :ext:vrany@exeptn:/cvs/stx "
echo
echo " :ext:[email protected]:/cvs/stx "
echo
fi
cat <<EOF
:pserver:[email protected]:/cvs/stx
(public eXept CVS, synced once a day. Use this if unsure)
:ext:swing.fit.cvut.cz/var/local/cvs
(SWING mirror. Use this if you have shell account
on swing.fit.cvut.cz)
EOF
echo -n "Enter new CVS root (or Ctrl-C to abort): "
read answer
else
answer="$1"
fi
if [ ! -z "$answer" ]; then
echo "$answer" > /tmp/chcvs.$$
find . -name CVS -type d -exec cp /tmp/chcvs.$$ {}/Root \;
rm /tmp/chcvs.$$
else
echo "Nothing changed"
exit 1
fi
|
#!/bin/bash
REGEX="https:\/\/[a-z]{7}.*.iso"
Link=$(wget -qO- www.debian.org | grep -Eo $REGEX)
echo $Link
wget $Link -P ~/
#wget -qO- www.debian.org |
#grep -Eoi '<a [^>]+>' |
#grep -Eo 'href="[^\"]+"' |
#grep -Eo '(http|https)://[^/"]+'
|
#!/bin/bash
kubectl get secret --namespace monitoring grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
export POD_NAME=$(kubectl get pods --namespace monitoring -l "app.kubernetes.io/name=grafana,app.kubernetes.io/instance=grafana" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace monitoring port-forward $POD_NAME 3000 |
data_directory=../../model_training_data/Fine-ET/RS_NDS_from_2
ckpt_directory=$data_directory/ckpts
dataset_file=../../datasets/sampled_datasets/RS_NDS_from_2.json.gz
mkdir -p $data_directory
mkdir -p $ckpt_directory
# Model parameters
rnn_hidden_neurons=200
keep_prob=0.5
learning_rate=0.002
batch_size=500
char_embedding_size=200
char_rnn_hidden_neurons=50
joint_embedding_size=500
epochs=15 # A number such that approx 19-20 million EMs are used in training
save_checkpoint_after=600000 # Total entities in RS_NDS_from_2: 1291232
# http://nlp.stanford.edu/data/glove.840B.300d.zip
glove_vector_file_path=/hdd1/word_vectors/glove.42B.300d/glove.42B.300d.txt
# Step 1: Generate local variables such as word to number dictionary etc.
#echo "Generate local variables required for model"
#python Fine-ET/src/data_processing/json_to_tfrecord.py prepare_local_variables $dataset_file $glove_vector_file_path unk $data_directory/ --lowercase
# Step 2: Convert Training data into TFRecord format.
#echo "Converting Train data to TFRecord"
#python Fine-ET/src/data_processing/json_to_tfrecord.py afet_data $data_directory/ $dataset_file
# Step 3: Convert development and testing data into TFRecord format.
#echo "Converting Dev and Test data to TFRecord"
#python Fine-ET/src/data_processing/json_to_tfrecord.py afet_data $data_directory/ ../../datasets/1k-WFB-g/fner_dev.json --test_data
#python Fine-ET/src/data_processing/json_to_tfrecord.py afet_data $data_directory/ ../../datasets/1k-WFB-g/fner_test.json --test_data
#python Fine-ET/src/data_processing/json_to_tfrecord.py afet_data $data_directory/ ../../datasets/figer_gold.json --test_data
# Step 4 (Optional): Convert the development and testing data with entities identified by Fine-ED models to TFRecord format. The following files have to first generated by the Fine-ED model trained on the same dataset. Note that, if the Fine-ED model is retrained, these results file needs to be updated.
#echo "Step 4: Pipeline use of FgED results."
#python Fine-ET/src/detect_entities.py ../../datasets/figer_gold.json ../../results/Fine-ED/lstm_crf/RS_NDS_from_2/figer.conll $data_directory/figer_gold_lstm_crf.json
#python Fine-ET/src/detect_entities.py ../../datasets/1k-WFB-g/fner_dev.json ../../results/Fine-ED/lstm_crf/RS_NDS_from_2/fner_dev.conll $data_directory/fner_dev_lstm_crf.json
#python Fine-ET/src/detect_entities.py ../../datasets/1k-WFB-g/fner_test.json ../../results/Fine-ED/lstm_crf/RS_NDS_from_2/fner_test.conll $data_directory/fner_test_lstm_crf.json
#python Fine-ET/src/data_processing/json_to_tfrecord.py afet_data $data_directory/ $data_directory/fner_dev_lstm_crf.json --test_data
#python Fine-ET/src/data_processing/json_to_tfrecord.py afet_data $data_directory/ $data_directory/fner_test_lstm_crf.json --test_data
#python Fine-ET/src/data_processing/json_to_tfrecord.py afet_data $data_directory/ $data_directory/figer_gold_lstm_crf.json --test_data
# Run train test procedure 5 times
for ((i=1; i<=5; i++)); do
# Do not emit '_run_' from model ckpt name
# format: prefix_run_suffix
model_ckpt_name=checkpoint_run_$i
# echo "Training a FNET model"
# time python Fine-ET/src/main_fnet_train.py $data_directory/ $ckpt_directory/$model_ckpt_name/ 'RS_NDS_from_2.json*.tfrecord' $rnn_hidden_neurons $keep_prob $learning_rate $batch_size $char_embedding_size $char_rnn_hidden_neurons $joint_embedding_size $epochs $save_checkpoint_after --use_mention --use_clean
# echo "Testing a FNET model on dev data."
# python Fine-ET/src/main_fnet_test.py $ckpt_directory/$model_ckpt_name/ $data_directory/fner_dev.json_0.tfrecord
# echo "Testing a FNET model on dev data with entities detected by a Fine-ED model. (Optional)"
# python Fine-ET/src/main_fnet_test.py $ckpt_directory/$model_ckpt_name/ $data_directory/fner_dev_lstm_crf.json_0.tfrecord
# echo "Testing a FNET model on test data."
# python Fine-ET/src/main_fnet_test.py $ckpt_directory/$model_ckpt_name/ $data_directory/fner_test.json_0.tfrecord
# echo "Testing a FNET model on test data with entities detected by a Fine-ED model. (Optional)"
# python Fine-ET/src/main_fnet_test.py $ckpt_directory/$model_ckpt_name/ $data_directory/fner_test_lstm_crf.json_0.tfrecord
# echo "Testing a FNET model on figer gold data."
# python Fine-ET/src/main_fnet_test.py $ckpt_directory/$model_ckpt_name/ $data_directory/figer_gold.json_0.tfrecord
# echo "Testing a FNET model on figer data with entities detected by a Fine-ED model. (Optional)"
# python Fine-ET/src/main_fnet_test.py $ckpt_directory/$model_ckpt_name/ $data_directory/figer_gold_lstm_crf.json_0.tfrecord
# The final_result file contains the result on the development set based on the strict, macro and micro F1 metrics.
# echo "Report results FNER dev data."
# bash Fine-ET/src/scripts/report_result_fnet.bash $ckpt_directory/$model_ckpt_name/fner_dev.json_0.tfrecord/ ../../datasets/1k-WFB-g/fner_dev.json 0 > $ckpt_directory/$model_ckpt_name/fner_dev.json_0.tfrecord/final_result.txt
# echo "Report results FNER dev data with entities detected by a Fine-ED model. (Optional)"
# bash Fine-ET/src/scripts/report_result_fnet.bash $ckpt_directory/$model_ckpt_name/fner_dev_lstm_crf.json_0.tfrecord/ ../../datasets/1k-WFB-g/fner_dev.json 0 > $ckpt_directory/$model_ckpt_name/fner_dev_lstm_crf.json_0.tfrecord/final_result.txt
# The final_result file contains the result on the test set based on the strict, macro and micro F1 metrics.
# echo "Report results FNER eval data."
# bash Fine-ET/src/scripts/report_result_fnet.bash $ckpt_directory/$model_ckpt_name/fner_test.json_0.tfrecord/ ../../datasets/1k-WFB-g/fner_test.json 0 > $ckpt_directory/$model_ckpt_name/fner_test.json_0.tfrecord/final_result.txt
# echo "Report results FNER eval data with entities detected by a Fine-ED model. (Optional)"
# bash Fine-ET/src/scripts/report_result_fnet.bash $ckpt_directory/$model_ckpt_name/fner_test_lstm_crf.json_0.tfrecord/ ../../datasets/1k-WFB-g/fner_test.json 0 > $ckpt_directory/$model_ckpt_name/fner_test_lstm_crf.json_0.tfrecord/final_result.txt
# echo "Report results figer data."
# bash Fine-ET/src/scripts/report_result_fnet.bash $ckpt_directory/$model_ckpt_name/figer_gold.json_0.tfrecord/ ../../datasets/figer_gold.json 0 ../../datasets/label_patch_figer_to_fner.txt > $ckpt_directory/$model_ckpt_name/figer_gold.json_0.tfrecord/final_result.txt
# echo "Report results figer data with entities detected by a Fine-ED model. (Optional)"
# bash Fine-ET/src/scripts/report_result_fnet.bash $ckpt_directory/$model_ckpt_name/figer_gold_lstm_crf.json_0.tfrecord/ ../../datasets/figer_gold.json 0 ../../datasets/label_patch_figer_to_fner.txt > $ckpt_directory/$model_ckpt_name/figer_gold_lstm_crf.json_0.tfrecord/final_result.txt
done
|
#!/bin/bash
set -e
BLUE='\033[1;34m'
NC='\033[0m'
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
python_dir="$script_dir/occlum_instance/image/opt/python-occlum"
cd occlum_instance && rm -rf image
copy_bom -f ../pytorch.yaml --root image --include-dir /opt/occlum/etc/template
if [ ! -d $python_dir ];then
echo "Error: cannot stat '$python_dir' directory"
exit 1
fi
new_json="$(jq '.resource_limits.user_space_size = "6000MB" |
.resource_limits.kernel_space_heap_size = "256MB" |
.process.default_mmap_size = "4000MB" |
.env.default += ["PYTHONHOME=/opt/python-occlum"]' Occlum.json)" && \
echo "${new_json}" > Occlum.json
occlum build
# Run the python demo
echo -e "${BLUE}occlum run /bin/python3 demo.py${NC}"
occlum run /bin/python3 demo.py
|
# set the android paths globally
export ANDROID_HOME="/opt/android-sdk-linux"
export PATH=$PATH:$ANDROID_HOME/tools
export PATH=$PATH:$ANDROID_HOME/platform-tools
export PATH=$PATH:$ANDROID_HOME/build-tools/24.0.0
|
#!/bin/bash
# Hi!
# If you're reading this, you're probably interested in what's
# going on within this script. We've provided what we hope are useful
# comments inline, as well as color-coded relevant shell output.
# We hope it's useful for you, but if you have any questions or suggestions
# please open an issue on https:/github.com/MicrosoftDocs/mslearn-aspnet-core.
#
## Start
cd ~
# dotnet SDK version
declare -x dotnetSdkVersion="3.1.403"
# Module name
declare moduleName="microservices-logging-aspnet-core"
# Any other declarations we need
declare -x gitBranch="live"
declare initScript=https://raw.githubusercontent.com/MicrosoftDocs/mslearn-aspnet-core/$gitBranch/infrastructure/scripts/initenvironment.sh
declare suppressAzureResources=true
declare rootLocation=~/clouddrive
declare editorHomeLocation=$rootLocation/aspnet-learn/src
if [ -d "$rootLocation/aspnet-learn" ]; then
echo "$rootLocation/aspnet-learn/ already exists!"
echo " "
echo "Before running this script, please remove or rename the existing $rootLocation/aspnet-learn/ directory as follows:"
echo "Remove: rm -r $rootLocation/aspnet-learn/"
echo "Rename: mv $rootLocation/aspnet-learn/ ~/clouddrive/new-name-here/ "
echo " "
else
# Backup .bashrc
cp ~/.bashrc ~/.bashrc.bak.$moduleName
# Grab and run initenvironment.sh
. <(wget -q -O - $initScript)
# Download
downloadStarterApp
# Set location to ~/clouddrive
cd $editorHomeLocation
# Launch editor so the user can see the code
code .
# Run eshop-learn quickstart to deploy to AKS
$editorHomeLocation/deploy/k8s/quickstart.sh --resource-group eshop-learn-rg --location centralus
# Create ACR resource
$editorHomeLocation/deploy/k8s/create-acr.sh
# Display URLs to user
cat ~/clouddrive/aspnet-learn/deployment-urls.txt
fi
|
#!/bin/bash
set -e
source $(dirname $0)/lib.sh
req_env_var "
USER $USER
HOME $HOME
ENVLIB $ENVLIB
SCRIPT_BASE $SCRIPT_BASE
CIRRUS_REPO_NAME $CIRRUS_REPO_NAME
CIRRUS_CHANGE_IN_REPO $CIRRUS_CHANGE_IN_REPO
CIRRUS_WORKING_DIR $CIRRUS_WORKING_DIR
"
[[ "$SHELL" =~ "bash" ]] || chsh -s /bin/bash
cd "$CIRRUS_WORKING_DIR" # for clarity of initial conditions
# Verify basic dependencies
for depbin in gcc rsync sha256sum curl make
do
if ! type -P "$depbin" &> /dev/null
then
echo "***** WARNING: $depbin binary not found in $PATH *****"
fi
done
# Setup env. vars common to all tasks/scripts/platforms and
# ensure they return for every following script execution.
MARK="# Added by $0, manual changes will be lost."
touch "$HOME/$ENVLIB"
if ! grep -q "$MARK" "$HOME/$ENVLIB"
then
cp "$HOME/$ENVLIB" "$HOME/${ENVLIB}_original"
# N/B: Single-quote items evaluated every time, double-quotes only once (right now).
for envstr in \
"$MARK" \
"export SRC=\"$CIRRUS_WORKING_DIR\"" \
"export OS_RELEASE_ID=\"$(os_release_id)\"" \
"export OS_RELEASE_VER=\"$(os_release_ver)\"" \
"export OS_REL_VER=\"$(os_release_id)-$(os_release_ver)\"" \
"export BUILT_IMAGE_SUFFIX=\"-$CIRRUS_REPO_NAME-${CIRRUS_CHANGE_IN_REPO:0:8}\""
do
# Make permanent in later shells, and set in current shell
X=$(echo "$envstr" | tee -a "$HOME/$ENVLIB") && eval "$X" && echo "$X"
done
# Do the same for golang env. vars
go env | while read envline
do
X=$(echo "export $envline" | tee -a "$HOME/$ENVLIB") && eval "$X" && echo "$X"
done
show_env_vars
# Nothing further required on image-builder VM
if ((IMAGE_BUILD))
then
exit 0
fi
# Owner/mode may have changed
setup_gopath
case "$OS_REL_VER" in
fedora-29)
install_testing_deps
build_and_replace_conmon
cd "$CRIO_SRC" # cri-o source
echo "Building binaries required for testing"
ooe.sh make test-binaries
echo "Configuring firewall/networking for integration tests"
ooe.sh iptables -F
ooe.sh iptables -t nat -I POSTROUTING -s 127.0.0.1 ! -d 127.0.0.1 -j MASQUERADE
echo "Setting read_only flag to false"
sudo sed -i 's/read_only = true/read_only = false/g' /etc/crio/crio.conf
echo "Removing nodev flag"
sudo sed -i 's/nodev//g' /etc/containers/storage.conf
iptables -L -n -v
;;
*) bad_os_id_ver ;;
esac
# Verify nothing was set empty
# N/B: Some multi-user environment variables are pre-cooked into /etc/environ
# (see setup_gopath in $SCRIPT_BASE/lib.sh)
req_env_var "
OS_RELEASE_ID $OS_RELEASE_ID
OS_RELEASE_VER $OS_RELEASE_VER
OS_REL_VER $OS_REL_VER
BUILT_IMAGE_SUFFIX $BUILT_IMAGE_SUFFIX
"
fi
echo "***** TESTING STARTS: $(date --iso-8601=seconds)"
|
export MALI_LIBS=/scratch/ZCU102_PL/ZynqMP/build/tmp/sysroots/plnx_aarch64/usr/lib
export MALI_INCLUDE=/scratch/ZCU102_PL/ZynqMP/build/tmp/sysroots/plnx_aarch64/usr/include
export ARM_CROSS_COMPILER_PATH=/proj/petalinux/petalinux-v2017.1_daily_latest/petalinux-v2017.1-final/tools/linux-i386/aarch64-linux-gnu/bin/aarch64-linux-gnu-g++
export ARM_CROSS_COMPILER_PATH_C=/proj/petalinux/petalinux-v2017.1_daily_latest/petalinux-v2017.1-final/tools/linux-i386/aarch64-linux-gnu/bin/aarch64-linux-gnu-gcc
export CXXFLAGS=-DENABLE_FBDEV
|
#!/bin/bash -ve
./node_modules/.bin/marionette-mocha \
--host-log stdout \
--host $(pwd)/node_modules/graphene-marionette-runner/host/index.js \
--runtime ./graphene/Contents/MacOS/graphene \
--start-manifest http://localhost:6060/manifest.webapp \
$(find test -name '*_test.js') $@;
|
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
find . -regex '\.\/[P|T].CL-.*\.sh' -exec {} -o results-Latency/ --csv \;
|
#!/bin/sh
echo "/patched-lib" > /etc/ld.so.conf.d/000-patched-lib.conf && \
mkdir -p "/patched-lib" && \
PATCH_OUTPUT_DIR=/patched-lib /usr/local/bin/patch.sh && \
cd /patched-lib && \
for f in * ; do
suffix="${f##*.so}"
name="$(basename "$f" "$suffix")"
[ -h "$name" ] || ln -sf "$f" "$name"
[ -h "$name" ] || ln -sf "$f" "$name.1"
done && \
ldconfig
[ "$OLDPWD" ] && cd -
exec "$@"
|
export NGPUS=8
python3 -m torch.distributed.launch --nproc_per_node=$NGPUS tools/train_net.py --config-file configs/e2e_faster_rcnn_DETNAS_COCO_FPN_300M_search.yaml OUTPUT_DIR models/DETNAS_COCO_FPN_300M_1x_search
|
# === === === === ===
# Capture stdout and stderr to log files
## I don't totally understand this line. Sourced from https://stackoverflow.com/a/2364896/100596 as of 2020-07-05:
exec 3>&1 4>&2
## Redirect stdout to log
exec 1>/tmp/dotfiles-install-stdout.log
## Redirect stderr to log
exec 2>/tmp/dotfiles-install-stderr.log
# === === === === ===
# Make sure PowerShell is installed
sudo chmod a+x ./posix/require-pwsh.sh
. ./posix/require-pwsh.sh
# === === === === ===
# Make sure Git is installed
sudo chmod a+x ./posix/require-git.sh
. ./posix/require-git.sh
# === === === === ===
# Restore stdout and stderr
exec 1>&3 2>&4
## Close the unused descriptors
exec 3>&- 4>&-
|
#!/usr/bin/env bash
DIR="$(cd "$(dirname "$0")" >/dev/null 2>&1 && pwd)"
GO_SRC="$DIR/.."
rm -rf "$GO_SRC/.go"
|
#!/bin/bash
# Get script arguments
PARAMS=""
while (("$#")); do
[[ $1 == --*=* ]] && set -- "${1%%=*}" "${1#*=}" "${@:2}"
case "$1" in
--organization-url)
if [ -n "$2" ] && [ "${2:0:1}" != "-" ]; then
organization_url=$2
shift 2
else
echo "Error: Argument for $1 is missing" >&2
exit 1
fi
;;
--project-name)
if [ -n "$2" ] && [ "${2:0:1}" != "-" ]; then
project_name=$2
shift 2
else
echo "Error: Argument for $1 is missing" >&2
exit 1
fi
;;
--repository-name)
if [ -n "$2" ] && [ "${2:0:1}" != "-" ]; then
repository_name=$2
shift 2
else
echo "Error: Argument for $1 is missing" >&2
exit 1
fi
;;
--pull-request-title)
if [ -n "$2" ] && [ "${2:0:1}" != "-" ]; then
pull_request_title=$2
shift 2
else
echo "Error: Argument for $1 is missing" >&2
exit 1
fi
;;
--branch-name)
if [ -n "$2" ] && [ "${2:0:1}" != "-" ]; then
branch_name=$2
shift 2
else
echo "Error: Argument for $1 is missing" >&2
exit 1
fi
;;
--source-folder-path)
if [ -n "$2" ] && [ "${2:0:1}" != "-" ]; then
source_folder_path=$2
shift 2
else
echo "Error: Argument for $1 is missing" >&2
exit 1
fi
;;
--temporary-branch-name)
if [ -n "$2" ] && [ "${2:0:1}" != "-" ]; then
temporary_branch_name=$2
shift 2
else
echo "Error: Argument for $1 is missing" >&2
exit 1
fi
;;
--temporary-folder-path)
if [ -n "$2" ] && [ "${2:0:1}" != "-" ]; then
temporary_folder_path=$2
shift 2
else
echo "Error: Argument for $1 is missing" >&2
exit 1
fi
;;
--overwrite-subfolder)
if [ -n "$2" ] && [ "${2:0:1}" != "-" ]; then
overwrite_subfolder=$2
shift 2
else
echo "Error: Argument for $1 is missing" >&2
exit 1
fi
;;
-*) # unsupported flags
echo "Error: Unsupported flag $1" >&2
exit 1
;;
*) # preserve positional arguments
PARAMS="$PARAMS ""$1"""
shift
;;
esac
done
eval set -- "$PARAMS"
set -e -o pipefail
echo "Installing Azure DevOps extension..."
az extension add --name "azure-devops"
az devops configure --defaults organization="${organization_url}" project="${project_name}"
echo "Creating folder ${temporary_folder_path}..."
mkdir -p "${temporary_folder_path}"
echo "Cloning branch ${branch_name}..."
clone_url=$(az repos show --repository "${repository_name}" --query "webUrl" --output tsv)
authenticated_clone_url=${clone_url/\/\////$AZURE_DEVOPS_EXT_PAT@}
git clone --branch "${branch_name}" --depth 1 "${authenticated_clone_url}" "${temporary_folder_path}"
echo "Creating temporary branch ${temporary_branch_name} from ${branch_name}..."
git -C "${temporary_folder_path}" checkout -b "${temporary_branch_name}" "${branch_name}"
echo "Overwrite folder set to $overwrite_subfolder; deleting its contents..."
rm -rfv "${temporary_folder_path:?}/${overwrite_subfolder:?}"/*
echo "Copying source folder ${source_folder_path} contents to temporary folder ${temporary_folder_path}..."
cp -r "${source_folder_path}"/* "${temporary_folder_path}"/
echo "Validating that changes exist to be published..."
if [[ ! $(git -C "${temporary_folder_path}" status --porcelain | head -1) ]]; then
echo "No changes exist to be published."
exit 0
fi
echo "Setting git user information..."
git config --global user.email "[email protected]"
git config --global user.name "Azure Devops agent"
echo "Adding changes..."
git -C "${temporary_folder_path}" add --all
echo "Commiting changes..."
git -C "${temporary_folder_path}" commit --message "Initial commit"
echo "Pushing changes..."
git -C "${temporary_folder_path}" push --set-upstream origin "${temporary_branch_name}"
echo "Creating pull request..."
az repos pr create --source-branch "${temporary_branch_name}" --target-branch "${branch_name}" --title "${pull_request_title}" --squash --delete-source-branch "true" --repository "${repository_name}"
echo "Deleting temporary folder contents..."
rm -rf "${temporary_folder_path}/{*,.*}"
echo "Execution complete." |
export DETECTRON2_DATASETS=~/datasets
python train_detectron.py --num-gpus 2 \
--config-file ./detectron_configs/COCO-Keypoints/keypoint_rcnn_resnet50_triplet_attention_FPN_1x.yaml \
#--eval-only MODEL.WEIGHTS ./output/model_final.pth
|
#!/bin/bash
# Copyright 2021 VMware Tanzu Community Edition contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -e
MY_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
TCE_REPO_PATH=${MY_DIR}/../../../../..
TCE_VERSION="v0.9.1"
echo "Installing TCE ${TCE_VERSION}"
BUILD_OS=$(uname -s | tr '[:upper:]' '[:lower:]')
TCE_RELEASE_TAR_BALL="tce-${BUILD_OS}-amd64-${TCE_VERSION}.tar.gz"
TCE_RELEASE_DIR="tce-${BUILD_OS}-amd64-${TCE_VERSION}"
INSTALLATION_DIR="${MY_DIR}/tce-installation"
"${TCE_REPO_PATH}"/hack/get-tce-release.sh ${TCE_VERSION} "${BUILD_OS}"-amd64
mkdir -p "${INSTALLATION_DIR}"
tar xzvf "${TCE_RELEASE_TAR_BALL}" --directory="${INSTALLATION_DIR}"
"${INSTALLATION_DIR}"/"${TCE_RELEASE_DIR}"/install.sh || { error "Unexpected failure during TCE installation"; exit 1; }
echo "TCE version: "
tanzu standalone-cluster version || { error "Unexpected failure during TCE installation"; exit 1; }
TANZU_DIAGNOSTICS_PLUGIN_DIR=${MY_DIR}/..
TANZU_DIAGNOSTICS_BIN=${MY_DIR}/tanzu-diagnostics-e2e-bin
echo "Entering ${TANZU_DIAGNOSTICS_PLUGIN_DIR} directory to build tanzu diagnostics plugin"
pushd "${TANZU_DIAGNOSTICS_PLUGIN_DIR}"
go build -o "${TANZU_DIAGNOSTICS_BIN}" -v
echo "Finished building tanzu diagnostics plugin. Leaving ${TANZU_DIAGNOSTICS_PLUGIN_DIR}"
popd
|
#!/bin/bash
./configure --prefix=${PREFIX}
make -j${CPU_COUNT}
make check -j${CPU_COUNT}
make install -j${CPU_COUNT}
|
#!/bin/sh
# This script requires google's cloud_sql_proxy on your PATH. One way to set this up:
# $ brew cask install google-cloud-sdk
# $ gcloud components install cloud_sql_proxy
# $ PATH="$PATH:/usr/local/Caskroom/google-cloud-sdk/latest/google-cloud-sdk/bin"
# OR
# $ ln -s /usr/local/Caskroom/google-cloud-sdk/latest/google-cloud-sdk/bin/cloud_sql_proxy /usr/local/bin/cloud_sql_proxy
#
# Use this script to connect to cloud Postgres for a specific Catalog instance.
# For example, to connect to the dev catalog database, run:
# $ ENV=dev ./db-connect.sh
#
# The proxy will continue to run until you quit it using ^C.
#
# The default port used is 5431, to avoid conflicting with a locally running postgres which
# defaults to port 5432. Use the environment variable PORT to override this setting.
: "${ENV:?}"
PORT=${PORT:-5431}
VAULT_PATH="secret/dsde/terra/kernel/${ENV}/${ENV}/catalog/postgres"
INSTANCE=$(vault read -field=data -format=json "${VAULT_PATH}/instance" |
jq -r '"\(.project):\(.region):\(.name)"')=tcp:$PORT
DB_CREDS_DATA=$(vault read -field=data -format=json "${VAULT_PATH}/db-creds")
JDBC_URL=jdbc:postgresql://localhost:$PORT/$(echo "${DB_CREDS_DATA}" |
jq -r '"\(.db)?user=\(.username)&password=\(.password)"')
PSQL_COMMAND=$(echo "${DB_CREDS_DATA}" |
jq -r '"psql postgresql://\(.username):\(.password)@localhost/\(.db)\\?port="')$PORT
echo "Starting a proxy for $ENV. Connect using: \"$JDBC_URL\" or run: \"$PSQL_COMMAND\""
cloud_sql_proxy -instances="${INSTANCE}" -dir=/tmp
|
#!/bin/bash
exec /usr/local/bin/preoomkiller &
while :; do sleep 1; done
|
#!/bin/bash
# Copyright 2017-2020 Authors of Cilium
# SPDX-License-Identifier: Apache-2.0
set -o errexit
set -o pipefail
set -o nounset
MAKER_IMAGE="${MAKER_IMAGE:-docker.io/cilium/image-maker:3e2ea4f151593908c362307a1de22e68610d955c}"
if [ "$#" -ne 1 ] ; then
echo "$0 supports exactly 1 argument"
exit 1
fi
root_dir="$(git rev-parse --show-toplevel)"
if [ -z "${MAKER_CONTAINER+x}" ] ; then
exec docker run --env DOCKER_HUB_PUBLIC_ACCESS_ONLY=true --env QUAY_PUBLIC_ACCESS_ONLY=true --rm --volume "${root_dir}:/src" --workdir /src "${MAKER_IMAGE}" "/src/scripts/$(basename "${0}")" "${1}"
fi
crane digest "${1}" 2> /dev/null
|
#!/usr/bin/env bash
# Copyright 2017 Hainan Xu
# Apache 2.0
# This script rescores lattices with KALDI RNNLM trained on reversed text.
# The input directory should already be rescored with a forward RNNLM, preferably
# with the pruned algorithm, since smaller lattices make rescoring much faster.
# An example of the forward pruned rescoring is at
# egs/swbd/s5c/local/rnnlm/run_tdnn_lstm.sh
# One example script for backward RNNLM rescoring is at
# egs/swbd/s5c/local/rnnlm/run_tdnn_lstm_back.sh
# Begin configuration section.
cmd=run.pl
skip_scoring=false
max_ngram_order=4 # Approximate the lattice-rescoring by limiting the max-ngram-order
# if it's set, it merges histories in the lattice if they share
# the same ngram history and this prevents the lattice from
# exploding exponentially. Details of the n-gram approximation
# method are described in section 2.3 of the paper
# http://www.danielpovey.com/files/2018_icassp_lattice_pruning.pdm
weight=0.5 # Interpolation weight for RNNLM.
normalize=false # If true, we add a normalization step to the output of the RNNLM
# so that it adds up to *exactly* 1. Note that this is not necessary
# as in our RNNLM setup, a properly trained network would automatically
# have its normalization term close to 1. The details of this
# could be found at http://www.danielpovey.com/files/2018_icassp_rnnlm.pdf
scoring_opts=
# End configuration section.
echo "$0 $@" # Print the command line for logging
. ./utils/parse_options.sh
if [ $# != 5 ]; then
echo "Does language model rescoring of lattices (remove old LM, add new LM)"
echo "with Kaldi RNNLM trained on reversed text. See comments in file for details"
echo ""
echo "Usage: $0 [options] <old-lang-dir> <rnnlm-dir> \\"
echo " <data-dir> <input-decode-dir> <output-decode-dir>"
echo " e.g.: $0 data/lang_tg exp/rnnlm_lstm/ data/test \\"
echo " exp/tri3/test_rnnlm_forward exp/tri3/test_rnnlm_bidirection"
echo "options: [--cmd (run.pl|queue.pl [queue opts])]"
exit 1;
fi
[ -f path.sh ] && . ./path.sh;
oldlang=$1
rnnlm_dir=$2
data=$3
indir=$4
outdir=$5
oldlm=$oldlang/G.fst
if [ ! -f $oldlm ]; then
echo "$0: file $oldlm not found; using $oldlang/G.carpa"
oldlm=$oldlang/G.carpa
fi
[ ! -f $oldlm ] && echo "$0: Missing file $oldlm" && exit 1;
[ ! -f $rnnlm_dir/final.raw ] && echo "$0: Missing file $rnnlm_dir/final.raw" && exit 1;
[ ! -f $rnnlm_dir/feat_embedding.final.mat ] && [ ! -f $rnnlm_dir/word_embedding.final.mat ] && echo "$0: Missing word embedding file" && exit 1;
[ ! -f $oldlang/words.txt ] &&\
echo "$0: Missing file $oldlang/words.txt" && exit 1;
! ls $indir/lat.*.gz >/dev/null &&\
echo "$0: No lattices input directory $indir" && exit 1;
awk -v n=$0 -v w=$weight 'BEGIN {if (w < 0 || w > 1) {
print n": Interpolation weight should be in the range of [0, 1]"; exit 1;}}' \
|| exit 1;
normalize_opt=
if $normalize; then
normalize_opt="--normalize-probs=true"
fi
oldlm_command="fstproject --project_output=true $oldlm |"
special_symbol_opts=$(cat $rnnlm_dir/special_symbol_opts.txt)
word_embedding=
if [ -f $rnnlm_dir/word_embedding.final.mat ]; then
word_embedding=$rnnlm_dir/word_embedding.final.mat
else
word_embedding="'rnnlm-get-word-embedding $rnnlm_dir/word_feats.txt $rnnlm_dir/feat_embedding.final.mat -|'"
fi
mkdir -p $outdir/log
nj=`cat $indir/num_jobs` || exit 1;
cp $indir/num_jobs $outdir
# In order to rescore with a backward RNNLM, we first remove the original LM
# scores with lattice-lmrescore, before reversing the lattices
oldlm_weight=$(perl -e "print -1.0 * $weight;")
if [ "$oldlm" == "$oldlang/G.fst" ]; then
$cmd JOB=1:$nj $outdir/log/rescorelm.JOB.log \
lattice-lmrescore --lm-scale=$oldlm_weight \
"ark:gunzip -c $indir/lat.JOB.gz|" "$oldlm_command" ark:- \| \
lattice-reverse ark:- ark:- \| \
lattice-lmrescore-kaldi-rnnlm --lm-scale=$weight $special_symbol_opts \
--max-ngram-order=$max_ngram_order $normalize_opt \
$word_embedding "$rnnlm_dir/final.raw" ark:- ark:- \| \
lattice-reverse ark:- "ark,t:|gzip -c>$outdir/lat.JOB.gz" || exit 1;
else
$cmd JOB=1:$nj $outdir/log/rescorelm.JOB.log \
lattice-lmrescore-const-arpa --lm-scale=$oldlm_weight \
"ark:gunzip -c $indir/lat.JOB.gz|" "$oldlm" ark:- \| \
lattice-reverse ark:- ark:- \| \
lattice-lmrescore-kaldi-rnnlm --lm-scale=$weight $special_symbol_opts \
--max-ngram-order=$max_ngram_order $normalize_opt \
$word_embedding "$rnnlm_dir/final.raw" ark:- ark:- \| \
lattice-reverse ark:- "ark,t:|gzip -c>$outdir/lat.JOB.gz" || exit 1;
fi
if ! $skip_scoring ; then
err_msg="$0: Not scoring because local/score.sh does not exist or not executable."
[ ! -x local/score.sh ] && echo $err_msg && exit 1;
echo local/score.sh --cmd "$cmd" $scoring_opts $data $oldlang $outdir
local/score.sh --cmd "$cmd" $scoring_opts $data $oldlang $outdir
else
echo "$0: Not scoring because --skip-scoring was specified."
fi
exit 0;
|
End of preview. Expand
in Dataset Viewer.
This dataset is a collection of bash programs from various GitHub repositories and open source projects. The dataset might contain harmful code.
- Downloads last month
- 53