inside docker. Cleanup for the local.params file.
Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward@curii.com>
h2(#limitations). Limitations of the single host install
-<b>NOTE: The single host installation is a good choice when evaluating Arvados, but it is not recommended for production use.</b>
+<b>NOTE: The single host installation is a good choice for evaluating Arvados, but it is not recommended for production use.</b>
Using the default configuration, this installation method has a number of limitations:
-* all services run on the same machine, and they will compete for resources.
-* it uses the local machine disk for Keep storage (under the @/tmp@ directory).
-* it installs the @crunch-dispatch-local@ dispatcher, which can run just one concurrent CWL job. This job will be executed on the machine that runs all the Arvados services. Most workflows require at least two concurrent CWL jobs, one for the workflow runner, and one for the payload.
+* all services run on the same machine, and they will compete for resources. This includes any compute jobs.
+* it uses the local machine disk for Keep storage (under the @/tmp@ directory). There may not be a lot of space available.
+* it installs the @crunch-dispatch-local@ dispatcher, which can run just eight concurrent CWL jobs. These jobs will be executed on the same machine that runs all the Arvados services and may well starve them of resources.
It is possible to start with the single host installation method and modify the Arvados configuration file later to address these limitations. E.g. switch to a "different storage volume setup":{{site.baseurl}}/install/configure-s3-object-storage.html for Keep, and switch to "the cloud dispatcher":{{site.baseurl}}/install/crunch2-cloud/install-dispatch-cloud.html to provision compute resources dynamically.
# <cluster>-nyw5e-<volume>
__CLUSTER__-nyw5e-000000000000000:
AccessViaHosts:
- 'http://__HOSTNAME_INT__:25107':
+ 'http://__IP_INT__:25107':
ReadOnly: false
Replication: 2
Driver: Directory
Controller:
ExternalURL: 'https://__HOSTNAME_EXT__:__CONTROLLER_EXT_SSL_PORT__'
InternalURLs:
- 'http://__HOSTNAME_INT__:8003': {}
+ 'http://__IP_INT__:8003': {}
Keepproxy:
ExternalURL: 'https://__HOSTNAME_EXT__:__KEEP_EXT_SSL_PORT__'
InternalURLs:
- 'http://__HOSTNAME_INT__:25100': {}
+ 'http://__IP_INT__:25100': {}
Keepstore:
InternalURLs:
- 'http://__HOSTNAME_INT__:25107': {}
+ 'http://__IP_INT__:25107': {}
RailsAPI:
InternalURLs:
- 'http://__HOSTNAME_INT__:8004': {}
+ 'http://__IP_INT__:8004': {}
WebDAV:
ExternalURL: 'https://__HOSTNAME_EXT__:__KEEPWEB_EXT_SSL_PORT__'
InternalURLs:
- 'http://__HOSTNAME_INT__:9003': {}
+ 'http://__IP_INT__:9003': {}
WebDAVDownload:
ExternalURL: 'https://__HOSTNAME_EXT__:__KEEPWEB_EXT_SSL_PORT__'
WebShell:
Websocket:
ExternalURL: 'wss://__HOSTNAME_EXT__:__WEBSOCKET_EXT_SSL_PORT__/websocket'
InternalURLs:
- 'http://__HOSTNAME_INT__:8005': {}
+ 'http://__IP_INT__:8005': {}
Workbench1:
ExternalURL: 'https://__HOSTNAME_EXT__:__WORKBENCH1_EXT_SSL_PORT__'
Workbench2:
pkg:
docker:
use_upstream: package
+ daemon_config: {"dns": ["__IP_INT__"]}
overwrite: true
config:
- server:
- - listen: '__HOSTNAME_INT__:8004'
+ - listen: '__IP_INT__:8004'
- server_name: api
- root: /var/www/arvados-api/current/public
- index: index.html index.htm
default: 1
'127.0.0.0/8': 0
upstream controller_upstream:
- - server: '__HOSTNAME_INT__:8003 fail_timeout=10s'
+ - server: '__IP_INT__:8003 fail_timeout=10s'
### SITES
servers:
### STREAMS
http:
upstream keepproxy_upstream:
- - server: '__HOSTNAME_INT__:25100 fail_timeout=10s'
+ - server: '__IP_INT__:25100 fail_timeout=10s'
servers:
managed:
### STREAMS
http:
upstream collections_downloads_upstream:
- - server: '__HOSTNAME_INT__:9003 fail_timeout=10s'
+ - server: '__IP_INT__:9003 fail_timeout=10s'
servers:
managed:
### STREAMS
http:
upstream webshell_upstream:
- - server: '__HOSTNAME_INT__:4200 fail_timeout=10s'
+ - server: '__IP_INT__:4200 fail_timeout=10s'
### SITES
servers:
### STREAMS
http:
upstream websocket_upstream:
- - server: '__HOSTNAME_INT__:8005 fail_timeout=10s'
+ - server: '__IP_INT__:8005 fail_timeout=10s'
servers:
managed:
### STREAMS
http:
upstream workbench_upstream:
- - server: '__HOSTNAME_INT__:9000 fail_timeout=10s'
+ - server: '__IP_INT__:9000 fail_timeout=10s'
### SITES
servers:
overwrite: true
config:
- server:
- - listen: '__HOSTNAME_INT__:9000'
+ - listen: '__IP_INT__:9000'
- server_name: workbench
- root: /var/www/arvados-workbench/current/public
- index: index.html index.htm
{%- from "arvados/map.jinja" import arvados with context %}
{%- set tpldir = curr_tpldir %}
+# We need the external hostname to resolve to the internal IP for docker. We
+# tell docker to resolve via the local dnsmasq, which reads from /etc/hosts by
+# default.
+arvados_local_access_to_hostname_ext:
+ host.present:
+ - ip: __IP_INT__
+ - names:
+ - __HOSTNAME_EXT__
+
arvados_test_salt_states_examples_single_host_etc_hosts_host_present:
host.present:
- ip: 127.0.1.1
- names:
- {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
- # FIXME! This just works for our testings.
+ # FIXME! This just works for our testing.
# Won't work if the cluster name != host name
{%- for entry in [
'api',
{%- endfor %}
DNS.8 = {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
DNS.9 = '__HOSTNAME_EXT__'
- DNS.10 = '__HOSTNAME_INT__'
+ DNS.10 = '__IP_INT__'
CNF
# The req
HOSTNAME_EXT=""
# The internal hostname for the host. In the example files, only used in the
# single_host/single_hostname example
-HOSTNAME_INT="127.0.1.1"
+IP_INT="127.0.1.1"
# Host SSL port where you want to point your browser to access Arvados
# Defaults to 443 for regular runs, and to 8443 when called in Vagrant.
# You can point it to another port if desired
# Hostnames/IPs used for single-host deploys
HOSTNAME_EXT=""
-HOSTNAME_INT="127.0.1.1"
+IP_INT="127.0.1.1"
# Initial user setup
INITIAL_USER=""
s#__CLUSTER__#${CLUSTER}#g;
s#__DOMAIN__#${DOMAIN}#g;
s#__HOSTNAME_EXT__#${HOSTNAME_EXT}#g;
- s#__HOSTNAME_INT__#${HOSTNAME_INT}#g;
+ s#__IP_INT__#${IP_INT}#g;
s#__INITIAL_USER_EMAIL__#${INITIAL_USER_EMAIL}#g;
s#__INITIAL_USER_PASSWORD__#${INITIAL_USER_PASSWORD}#g;
s#__INITIAL_USER__#${INITIAL_USER}#g;
sed "s#__CLUSTER__#${CLUSTER}#g;
s#__CONTROLLER_EXT_SSL_PORT__#${CONTROLLER_EXT_SSL_PORT}#g;
s#__DOMAIN__#${DOMAIN}#g;
- s#__HOSTNAME_INT__#${HOSTNAME_INT}#g;
+ s#__IP_INT__#${IP_INT}#g;
s#__INITIAL_USER_EMAIL__#${INITIAL_USER_EMAIL}#g;
s#__INITIAL_USER_PASSWORD__#${INITIAL_USER_PASSWORD}#g
s#__INITIAL_USER__#${INITIAL_USER}#g;
s#__CONTROLLER_EXT_SSL_PORT__#${CONTROLLER_EXT_SSL_PORT}#g;
s#__DOMAIN__#${DOMAIN}#g;
s#__HOSTNAME_EXT__#${HOSTNAME_EXT}#g;
- s#__HOSTNAME_INT__#${HOSTNAME_INT}#g;
+ s#__IP_INT__#${IP_INT}#g;
s#__INITIAL_USER_EMAIL__#${INITIAL_USER_EMAIL}#g;
s#__INITIAL_USER_PASSWORD__#${INITIAL_USER_PASSWORD}#g;
s#__INITIAL_USER__#${INITIAL_USER}#g;
# States, extra states
if [ -d "${F_DIR}"/extra/extra ]; then
SKIP_SNAKE_OIL="snakeoil_certs"
-
if [[ "$DEV_MODE" = "yes" || "${SSL_MODE}" == "self-signed" ]] ; then
# In dev mode, we create some snake oil certs that we'll
# use as CUSTOM_CERTS, so we don't skip the states file.
echo " - postgres" >> ${S_DIR}/top.sls
echo " - docker.software" >> ${S_DIR}/top.sls
echo " - arvados" >> ${S_DIR}/top.sls
+ echo " - extra.dns" >> ${S_DIR}/top.sls
# Pillars
echo " - docker" >> ${P_DIR}/top.sls
s#__CERT_KEY__#/etc/letsencrypt/live/${c}.${CLUSTER}.${DOMAIN}/privkey.pem#g" \
${P_DIR}/nginx_${c}_configuration.sls
done
- else
- # Use custom certs (either dev mode or prod)
+ elif [ "${SSL_MODE}" = "bring-your-own" ]; then
+ # Use custom "bring-your-own" certs (either dev mode or prod)
+ grep -q "custom_certs" ${S_DIR}/top.sls || echo " - extra.custom_certs" >> ${S_DIR}/top.sls
grep -q "extra_custom_certs" ${P_DIR}/top.sls || echo " - extra_custom_certs" >> ${P_DIR}/top.sls
# And add the certs in the custom_certs pillar
echo "extra_custom_certs_dir: /srv/salt/certs" > ${P_DIR}/extra_custom_certs.sls
else
# If we add individual roles, make sure we add the repo first
echo " - arvados.repo" >> ${S_DIR}/top.sls
- # We add the custom_certs state
- grep -q "custom_certs" ${S_DIR}/top.sls || echo " - extra.custom_certs" >> ${S_DIR}/top.sls
+ # We add the extra_custom_certs state
+ grep -q "extra_custom_certs" ${S_DIR}/top.sls || echo " - extra.custom_certs" >> ${S_DIR}/top.sls
# And we add the basic part for the certs pillar
if [ "${SSL_MODE}" != "lets-encrypt" ]; then
echo "Arvados project uuid is '${project_uuid}'"
-echo "Uploading arvados/jobs' docker image to the project"
-VERSION="2.1.1"
-arv-keepdocker --pull arvados/jobs "${VERSION}" --project-uuid "${project_uuid}"
-
# Create the initial user
echo "Creating initial user '__INITIAL_USER__'"
user_uuid=$(arv --format=uuid user list --filters '[["email", "=", "__INITIAL_USER_EMAIL__"], ["username", "=", "__INITIAL_USER__"]]')
export ARVADOS_API_TOKEN="${user_api_token}"
echo "Running test CWL workflow"
-cwl-runner --local --debug hasher-workflow.cwl hasher-workflow-job.yml
+cwl-runner --debug hasher-workflow.cwl hasher-workflow-job.yml