# "Install Saltstack":#saltstack
# "Single host install using the provision.sh script":#single_host
-# "Local testing Arvados in a Vagrant box":#vagrant
# "DNS configuration":#final_steps
# "Initial user and login":#initial_user
+# "Test the installed cluster running a simple workflow":#test_install
h2(#saltstack). Install Saltstack
* User: 'admin'
* Password: 'password'
* Email: 'admin@arva2.arv.local'
+
+h2(#test_install). Test the installed cluster running a simple workflow
+
+The @provision.sh@ script saves a simple example test workflow in the @/tmp/cluster_tests@. If you want to run it, just change to that directory and run:
+
+<notextile>
+<pre><code>cd /tmp/cluster_tests
+./run-test.sh
+</code></pre>
+</notextile>
+
+It will create a test user, upload a small workflow and run it. If everything goes OK, the output should similar to this (some output was shortened for clarity):
+
+<notextile>
+<pre><code>Creating Arvados Standard Docker Images project
+Arvados project uuid is 'arva2-j7d0g-0prd8cjlk6kfl7y'
+{
+ ...
+ "uuid":"arva2-o0j2j-n4zu4cak5iifq2a",
+ "owner_uuid":"arva2-tpzed-000000000000000",
+ ...
+}
+Uploading arvados/jobs' docker image to the project
+2.1.1: Pulling from arvados/jobs
+8559a31e96f4: Pulling fs layer
+...
+Status: Downloaded newer image for arvados/jobs:2.1.1
+docker.io/arvados/jobs:2.1.1
+2020-11-23 21:43:39 arvados.arv_put[32678] INFO: Creating new cache file at /home/vagrant/.cache/arvados/arv-put/c59256eda1829281424c80f588c7cc4d
+2020-11-23 21:43:46 arvados.arv_put[32678] INFO: Collection saved as 'Docker image arvados jobs:2.1.1 sha256:0dd50'
+arva2-4zz18-1u5pvbld7cvxuy2
+Creating initial user ('admin')
+Setting up user ('admin')
+{
+ "items":[
+ {
+ ...
+ "owner_uuid":"arva2-tpzed-000000000000000",
+ ...
+ "uuid":"arva2-o0j2j-1ownrdne0ok9iox"
+ },
+ {
+ ...
+ "owner_uuid":"arva2-tpzed-000000000000000",
+ ...
+ "uuid":"arva2-o0j2j-1zbeyhcwxc1tvb7"
+ },
+ {
+ ...
+ "email":"admin@arva2.arv.local",
+ ...
+ "owner_uuid":"arva2-tpzed-000000000000000",
+ ...
+ "username":"admin",
+ "uuid":"arva2-tpzed-3wrm93zmzpshrq2",
+ ...
+ }
+ ],
+ "kind":"arvados#HashList"
+}
+Activating user 'admin'
+{
+ ...
+ "email":"admin@arva2.arv.local",
+ ...
+ "username":"admin",
+ "uuid":"arva2-tpzed-3wrm93zmzpshrq2",
+ ...
+}
+Running test CWL workflow
+INFO /usr/bin/cwl-runner 2.1.1, arvados-python-client 2.1.1, cwltool 3.0.20200807132242
+INFO Resolved 'hasher-workflow.cwl' to 'file:///tmp/cluster_tests/hasher-workflow.cwl'
+...
+INFO Using cluster arva2 (https://arva2.arv.local:8443/)
+INFO Upload local files: "test.txt"
+INFO Uploaded to ea34d971b71d5536b4f6b7d6c69dc7f6+50 (arva2-4zz18-c8uvwqdry4r8jao)
+INFO Using collection cache size 256 MiB
+INFO [container hasher-workflow.cwl] submitted container_request arva2-xvhdp-v1bkywd58gyocwm
+INFO [container hasher-workflow.cwl] arva2-xvhdp-v1bkywd58gyocwm is Final
+INFO Overall process status is success
+INFO Final output collection d6c69a88147dde9d52a418d50ef788df+123
+{
+ "hasher_out": {
+ "basename": "hasher3.md5sum.txt",
+ "class": "File",
+ "location": "keep:d6c69a88147dde9d52a418d50ef788df+123/hasher3.md5sum.txt",
+ "size": 95
+ }
+}
+INFO Final process status is success
+</code></pre>
+</notextile>
# "Vagrant":#vagrant
# "DNS configuration":#final_steps
# "Initial user and login":#initial_user
+# "Test the installed cluster running a simple workflow":#test_install
h2(#vagrant). Vagrant
* User: 'admin'
* Password: 'password'
* Email: 'admin@arva2.arv.local'
+
+h2(#test_install). Test the installed cluster running a simple workflow
+
+As documented in the <a href="{{ site.baseurl }}/install/salt-single-host.html">Single Host installation</a> page, You can run a test workflow to verify the installation finished correctly. To do so, you can follow these steps:
+
+<notextile>
+<pre><code>vagrant ssh</code></pre>
+</notextile>
+
+and once in the instance:
+
+<notextile>
+<pre><code>cd /tmp/cluster_tests
+./run-test.sh
+</code></pre>
+</notextile>
config.vm.define "arvados" do |arv|
arv.vm.box = "bento/debian-10"
- arv.vm.hostname = "arva2.arv.local"
+ arv.vm.hostname = "vagrant.local"
+ # CPU/RAM
+ config.vm.provider :virtualbox do |v|
+ v.memory = 2048
+ v.cpus = 2
+ end
+
# Networking
arv.vm.network "forwarded_port", guest: 8443, host: 8443
arv.vm.network "forwarded_port", guest: 25100, host: 25100
arv.vm.network "forwarded_port", guest: 8001, host: 8001
arv.vm.network "forwarded_port", guest: 8000, host: 8000
arv.vm.network "forwarded_port", guest: 3001, host: 3001
- # config.vm.network "private_network", ip: "192.168.33.10"
- # arv.vm.synced_folder "salt_pillars", "/srv/pillars",
- # create: true
arv.vm.provision "shell",
path: "provision.sh",
args: [
+ # "--test",
"--vagrant",
"--ssl-port=8443"
].join(" ")
set -o pipefail
+# capture the directory that the script is running from
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
usage() {
echo >&2
echo >&2 "Usage: $0 [-h] [-h]"
echo >&2
echo >&2 "$0 options:"
- echo >&2 " -v, --vagrant Run in vagrant and use the /vagrant shared dir"
+ echo >&2 " -d, --debug Run salt installation in debug mode"
echo >&2 " -p <N>, --ssl-port <N> SSL port to use for the web applications"
+ echo >&2 " -t, --test Test installation running a CWL workflow"
echo >&2 " -h, --help Display this help and exit"
+ echo >&2 " -v, --vagrant Run in vagrant and use the /vagrant shared dir"
echo >&2
}
arguments() {
# NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
- TEMP=`getopt -o hvp: \
- --long help,vagrant,ssl-port: \
+ TEMP=`getopt -o dhp:tv \
+ --long debug,help,ssl-port:,test,vagrant \
-n "$0" -- "$@"`
if [ $? != 0 ] ; then echo "GNU getopt missing? Use -h for help"; exit 1 ; fi
while [ $# -ge 1 ]; do
case $1 in
+ -d | --debug)
+ LOG_LEVEL="debug"
+ shift
+ ;;
+ -t | --test)
+ TEST="yes"
+ shift
+ ;;
-v | --vagrant)
VAGRANT="yes"
shift
done
}
+LOG_LEVEL="info"
HOST_SSL_PORT=443
+TESTS_DIR="tests"
arguments $@
P_DIR="/srv/pillars"
apt-get update
-apt-get install -y curl git
+apt-get install -y curl git jq
dpkg -l |grep salt-minion
if [ ${?} -eq 0 ]; then
cat > ${S_DIR}/top.sls << EOFTSLS
base:
'*':
+ - example_single_host_host_entries
- example_add_snakeoil_certs
- locale
- nginx.passenger
# Get the formula and dependencies
cd ${F_DIR} || exit 1
for f in postgres arvados nginx docker locale; do
- git clone https://github.com/saltstack-formulas/${f}-formula.git
+ git clone https://github.com/netmanagers/${f}-formula.git
done
if [ "x${BRANCH}" != "x" ]; then
if [ "x${VAGRANT}" = "xyes" ]; then
SOURCE_PILLARS_DIR="/vagrant/${CONFIG_DIR}"
+ TESTS_DIR="/vagrant/${TESTS_DIR}"
else
- SOURCE_PILLARS_DIR="./${CONFIG_DIR}"
+ SOURCE_PILLARS_DIR="${SCRIPT_DIR}/${CONFIG_DIR}"
+ TESTS_DIR="${SCRIPT_DIR}/${TESTS_DIR}"
fi
-# Replace cluster and domain name in the example pillars
+# Replace cluster and domain name in the example pillars and test files
for f in ${SOURCE_PILLARS_DIR}/*; do
- # sed "s/example.net/${DOMAIN}/g; s/fixme/${CLUSTER}/g" \
- sed "s/__DOMAIN__/${DOMAIN}/g;
- s/__CLUSTER__/${CLUSTER}/g;
+ sed "s/__CLUSTER__/${CLUSTER}/g;
+ s/__DOMAIN__/${DOMAIN}/g;
s/__RELEASE__/${RELEASE}/g;
s/__HOST_SSL_PORT__/${HOST_SSL_PORT}/g;
s/__GUEST_SSL_PORT__/${GUEST_SSL_PORT}/g;
${f} > ${P_DIR}/$(basename ${f})
done
-# Let's write an /etc/hosts file that points all the hosts to localhost
-
-echo "127.0.0.2 api keep keep0 collections download ws workbench workbench2 ${CLUSTER}.${DOMAIN} api.${CLUSTER}.${DOMAIN} keep.${CLUSTER}.${DOMAIN} keep0.${CLUSTER}.${DOMAIN} collections.${CLUSTER}.${DOMAIN} download.${CLUSTER}.${DOMAIN} ws.${CLUSTER}.${DOMAIN} workbench.${CLUSTER}.${DOMAIN} workbench2.${CLUSTER}.${DOMAIN}" >> /etc/hosts
+mkdir -p /tmp/cluster_tests
+# Replace cluster and domain name in the example pillars and test files
+for f in ${TESTS_DIR}/*; do
+ sed "s/__CLUSTER__/${CLUSTER}/g;
+ s/__DOMAIN__/${DOMAIN}/g;
+ s/__HOST_SSL_PORT__/${HOST_SSL_PORT}/g;
+ s/__INITIAL_USER__/${INITIAL_USER}/g;
+ s/__INITIAL_USER_EMAIL__/${INITIAL_USER_EMAIL}/g;
+ s/__INITIAL_USER_PASSWORD__/${INITIAL_USER_PASSWORD}/g" \
+ ${f} > /tmp/cluster_tests/$(basename ${f})
+done
+chmod 755 /tmp/cluster_tests/run-test.sh
# FIXME! #16992 Temporary fix for psql call in arvados-api-server
if [ -e /root/.psqlrc ]; then
# END FIXME! #16992 Temporary fix for psql call in arvados-api-server
# Now run the install
-salt-call --local state.apply -l debug
+salt-call --local state.apply -l ${LOG_LEVEL}
# FIXME! #16992 Temporary fix for psql call in arvados-api-server
if [ "x${DELETE_PSQL}" = "xyes" ]; then
fi
if [ "x${RESTORE_PSQL}" = "xyes" ]; then
- echo "Restroting .psql file"
+ echo "Restoring .psql file"
mv -v /root/.psqlrc.provision.backup /root/.psqlrc
fi
# END FIXME! #16992 Temporary fix for psql call in arvados-api-server
+
+# If running in a vagrant VM, add default user to docker group
+if [ "x${VAGRANT}" = "xyes" ]; then
+ usermod -a -G docker vagrant
+fi
+
+# Test that the installation finished correctly
+if [ "x${TEST}" = "xyes" ]; then
+ cd /tmp/cluster_tests
+ ./run-test.sh
+fi
### TOKENS
tokens:
- system_root: changeme_system_root_token
- management: changeme_management_token
- rails_secret: changeme_rails_secret_token
- anonymous_user: changeme_anonymous_user_token
+ system_root: changemesystemroottoken
+ management: changememanagementtoken
+ rails_secret: changemerailssecrettoken
+ anonymous_user: changemeanonymoususertoken
### KEYS
secrets:
- blob_signing_key: changeme_blob_signing_key
- workbench_secret_key: changeme_workbench_secret_key
- dispatcher_access_key: changeme_dispatcher_access_key
- dispatcher_secret_key: changeme_dispatcher_secret_key
- keep_access_key: changeme_keep_access_key
- keep_secret_key: changeme_keep_secret_key
+ blob_signing_key: changemeblobsigningkey
+ workbench_secret_key: changemeworkbenchsecretkey
+ dispatcher_access_key: changemedispatcheraccesskey
+ dispatcher_secret_key: changeme_dispatchersecretkey
+ keep_access_key: changemekeepaccesskey
+ keep_secret_key: changemekeepsecretkey
Login:
Test:
Controller:
ExternalURL: https://__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
InternalURLs:
- http://127.0.0.2:8003: {}
+ http://controller.internal:8003: {}
DispatchCloud:
InternalURLs:
http://__CLUSTER__.__DOMAIN__:9006: {}
Keepproxy:
ExternalURL: https://keep.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
InternalURLs:
- http://127.0.0.2:25100: {}
+ http://keep.internal:25100: {}
Keepstore:
InternalURLs:
http://keep0.__CLUSTER__.__DOMAIN__:25107: {}
RailsAPI:
InternalURLs:
- http://127.0.0.2:8004: {}
+ http://api.internal:8004: {}
WebDAV:
ExternalURL: https://collections.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
InternalURLs:
- http://127.0.0.2:9002: {}
+ http://collections.internal:9002: {}
WebDAVDownload:
ExternalURL: https://download.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
WebShell:
Websocket:
ExternalURL: wss://ws.__CLUSTER__.__DOMAIN__/websocket
InternalURLs:
- http://127.0.0.2:8005: {}
+ http://ws.internal:8005: {}
Workbench1:
ExternalURL: https://workbench.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
Workbench2:
overwrite: true
config:
- server:
- - listen: '127.0.0.2:8004'
+ - listen: 'api.internal:8004'
- server_name: api
- root: /var/www/arvados-api/current/public
- index: index.html index.htm
default: 1
'127.0.0.0/8': 0
upstream controller_upstream:
- - server: '127.0.0.2:8003 fail_timeout=10s'
+ - server: 'controller.internal:8003 fail_timeout=10s'
### SITES
servers:
### STREAMS
http:
upstream keepproxy_upstream:
- - server: '127.0.0.2:25100 fail_timeout=10s'
+ - server: 'keep.internal:25100 fail_timeout=10s'
servers:
managed:
### STREAMS
http:
upstream collections_downloads_upstream:
- - server: '127.0.0.2:9002 fail_timeout=10s'
+ - server: 'collections.internal:9002 fail_timeout=10s'
servers:
managed:
### STREAMS
http:
upstream webshell_upstream:
- - server: '127.0.0.2:4200 fail_timeout=10s'
+ - server: 'shell.internal:4200 fail_timeout=10s'
### SITES
servers:
### STREAMS
http:
upstream websocket_upstream:
- - server: '127.0.0.2:8005 fail_timeout=10s'
+ - server: 'ws.internal:8005 fail_timeout=10s'
servers:
managed:
### STREAMS
http:
upstream workbench_upstream:
- - server: '127.0.0.2:9000 fail_timeout=10s'
+ - server: 'workbench.internal:9000 fail_timeout=10s'
### SITES
servers:
overwrite: true
config:
- server:
- - listen: '127.0.0.2:9000'
+ - listen: 'workbench.internal:9000'
- server_name: workbench
- root: /var/www/arvados-workbench/current/public
- index: index.html index.htm
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+inputfile:
+ class: File
+ path: test.txt
+hasher1_outputname: hasher1.md5sum.txt
+hasher2_outputname: hasher2.md5sum.txt
+hasher3_outputname: hasher3.md5sum.txt
--- /dev/null
+#!/usr/bin/env cwl-runner
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+
+$namespaces:
+ arv: "http://arvados.org/cwl#"
+ cwltool: "http://commonwl.org/cwltool#"
+
+inputs:
+ inputfile: File
+ hasher1_outputname: string
+ hasher2_outputname: string
+ hasher3_outputname: string
+
+outputs:
+ hasher_out:
+ type: File
+ outputSource: hasher3/hasher_out
+
+steps:
+ hasher1:
+ run: hasher.cwl
+ in:
+ inputfile: inputfile
+ outputname: hasher1_outputname
+ out: [hasher_out]
+ hints:
+ ResourceRequirement:
+ coresMin: 1
+ arv:IntermediateOutput:
+ outputTTL: 3600
+ arv:ReuseRequirement:
+ enableReuse: false
+
+ hasher2:
+ run: hasher.cwl
+ in:
+ inputfile: hasher1/hasher_out
+ outputname: hasher2_outputname
+ out: [hasher_out]
+ hints:
+ ResourceRequirement:
+ coresMin: 1
+ arv:IntermediateOutput:
+ outputTTL: 3600
+ arv:ReuseRequirement:
+ enableReuse: false
+
+ hasher3:
+ run: hasher.cwl
+ in:
+ inputfile: hasher2/hasher_out
+ outputname: hasher3_outputname
+ out: [hasher_out]
+ hints:
+ ResourceRequirement:
+ coresMin: 1
+ arv:IntermediateOutput:
+ outputTTL: 3600
+ arv:ReuseRequirement:
+ enableReuse: false
--- /dev/null
+#!/usr/bin/env cwl-runner
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+
+baseCommand: md5sum
+inputs:
+ inputfile:
+ type: File
+ inputBinding:
+ position: 1
+ outputname:
+ type: string
+
+stdout: $(inputs.outputname)
+
+outputs:
+ hasher_out:
+ type: File
+ outputBinding:
+ glob: $(inputs.outputname)
--- /dev/null
+#!/usr/bin/env /bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+export ARVADOS_API_TOKEN=changemesystemroottoken
+export ARVADOS_API_HOST=__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+export ARVADOS_API_HOST_INSECURE=true
+
+
+# https://doc.arvados.org/v2.0/install/install-jobs-image.html
+echo "Creating Arvados Standard Docker Images project"
+uuid_prefix=$(arv --format=uuid user current | cut -d- -f1)
+project_uuid=$(arv --format=uuid group create --group "{\"owner_uuid\": \"${uuid_prefix}-tpzed-000000000000000\", \"group_class\":\"project\", \"name\":\"Arvados Standard Docker Images\"}")
+echo "Arvados project uuid is '${project_uuid}'"
+read -rd $'\000' newlink <<EOF; arv link create --link "${newlink}"
+{
+"tail_uuid":"${uuid_prefix}-j7d0g-fffffffffffffff",
+"head_uuid":"${project_uuid}",
+"link_class":"permission",
+"name":"can_read"
+}
+EOF
+
+echo "Uploading arvados/jobs' docker image to the project"
+VERSION="2.1.1"
+arv-keepdocker --pull arvados/jobs ${VERSION} --project-uuid ${project_uuid}
+
+# Create the initial user
+echo "Creating initial user ('__INITIAL_USER__')"
+user=$(arv --format=uuid user create --user '{"email": "__INITIAL_USER_EMAIL__", "username": "__INITIAL_USER__"}')
+echo "Setting up user ('__INITIAL_USER__')"
+arv user setup --uuid ${user}
+echo "Activating user '__INITIAL_USER__'"
+arv user update --uuid ${user} --user '{"is_active": true}'
+
+user_api_token=$(arv api_client_authorization create --api-client-authorization "{\"owner_uuid\": \"${user}\"}" | jq -r .api_token)
+
+echo "Running test CWL workflow"
+# Change to the user's token and run the workflow
+export ARVADOS_API_TOKEN=${user_api_token}
+cwl-runner hasher-workflow.cwl hasher-workflow-job.yml
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+test