arvados-api-package-clean-gems-deps-pkg-removed:
pkg.removed:
- pkgs: {{ arvados.ruby.gems_deps | json }}
- - only_if: test "{{ arvados.ruby.manage_gems_deps | lower }}" = "true"
+ - onlyif: test "{{ arvados.ruby.manage_gems_deps | lower }}" = "true"
arvados-api-package-clean-pkg-removed:
pkg.removed:
arvados-api-package-clean-ruby-pkg-removed:
pkg.removed:
- name: {{ arvados.ruby.pkg }}
- - only_if: test "{{ arvados.ruby.manage_ruby | lower }}" = "true"
+ - onlyif: test "{{ arvados.ruby.manage_ruby | lower }}" = "true"
arvados-api-package-install-gems-deps-pkg-installed:
pkg.installed:
- pkgs: {{ arvados.ruby.gems_deps | unique | json }}
- - only_if: test "{{ arvados.ruby.manage_gems_deps | lower }}" = "true"
+ - onlyif: test "{{ arvados.ruby.manage_gems_deps | lower }}" = "true"
{%- for gm in arvados.api.gem.name | unique %}
arvados-api-package-install-gem-{{ gm }}-installed:
{%- from tplroot ~ "/map.jinja" import arvados with context %}
{%- from tplroot ~ "/libtofs.jinja" import files_switch with context %}
-{%- do arvados.cluster.Users.update({'AnonymousUserToken': arvados.cluster.tokens.anonymous_user }) if arvados.cluster.Users.AnonymousUserToken is not defined %}
+{%- do arvados.cluster.Users.update({'AnonymousUserToken': arvados.cluster.tokens.anonymous_user })
+ if arvados.cluster.Users.AnonymousUserToken is not defined %}
include:
- .package
- .package
- ..config
- .service
+ - .resources
--- /dev/null
+# -*- coding: utf-8 -*-
+# vim: ft=sls
+
+include:
+ - .virtual_machines
--- /dev/null
+# -*- coding: utf-8 -*-
+# vim: ft=sls
+
+{#- Get the `tplroot` from `tpldir` #}
+{%- set tplroot = tpldir.split('/')[0] %}
+{%- set sls_config_file = tplroot ~ '.config.file' %}
+{%- from tplroot ~ "/map.jinja" import arvados with context %}
+{%- from tplroot ~ "/libtofs.jinja" import files_switch with context %}
+
+{%- set virtual_machines = arvados.cluster.resources.virtual_machines | default({}) %}
+{%- set api_token = arvados.cluster.tokens.system_root | yaml_encode %}
+{%- set api_host = arvados.cluster.Services.Controller.ExternalURL | regex_replace('^http(s?)://', '', ignorecase=true) %}
+
+include:
+ - ..package
+ - {{ sls_config_file }}
+ - ..service
+
+arvados-controller-resources-virtual-machines-jq-pkg-installed:
+ pkg.installed:
+ - name: jq
+
+{%- for vm, vm_params in virtual_machines.items() %}
+ {%- set vm_name = vm_params.name | default(vm) %}
+
+ {%- set cmd_query_vm_uuid = 'ARVADOS_API_TOKEN=' ~ api_token ~
+ ' ARVADOS_API_HOST=' ~ api_host ~
+ ' arv --short virtual_machine list' ~
+ ' --filters \'[["hostname", "=", "' ~ vm_name ~ '"]]\''
+ %}
+
+# Create the virtual machine record
+arvados-controller-resources-virtual-machines-{{ vm }}-record-cmd-run:
+ cmd.run:
+ - env:
+ - ARVADOS_API_TOKEN: {{ api_token }}
+ - ARVADOS_API_HOST: {{ api_host }}
+ - name: |
+ arv --format=uuid \
+ virtual_machine \
+ create \
+ --virtual-machine '{"hostname":"{{ vm_name }}" }'
+ - unless: |
+ {{ cmd_query_vm_uuid }} | \
+ /bin/grep -qE "[a-z0-9]{5}-2x53u-[a-z0-9]{15}"
+ - require:
+ - pkg: arvados-controller-package-install-pkg-installed
+ - cmd: arvados-controller-service-running-service-ready-cmd-run
+
+# We need to use the UUID generated in the previous command to see if there's a
+# scoped token for it. There's no easy way to pass the value from a shellout
+# to another state, so we store it in a temp file and use that in the next
+# command. Flaky, mostly because the `unless` clause is just checking thatg
+# the file content is a token uuid :|
+arvados-controller-resources-virtual-machines-{{ vm }}-get-vm_uuid-cmd-run:
+ cmd.run:
+ - name: {{ cmd_query_vm_uuid }} | head -1 | tee /tmp/{{ vm }}
+ - require:
+ - cmd: arvados-controller-resources-virtual-machines-{{ vm }}-record-cmd-run
+ - unless:
+ - /bin/grep -qE "[a-z0-9]{5}-2x53u-[a-z0-9]{15}" /tmp/{{ vm }}
+
+ # There's no direct way to query the scoped_token for a given virtual_machine
+ # so we need to parse the api_client_authorization list through some jq
+ {%- set cmd_query_scoped_token_url = 'VM_UUID=$(cat /tmp/' ~ vm ~ ') && ' ~
+ ' ARVADOS_API_TOKEN=' ~ api_token ~
+ ' ARVADOS_API_HOST=' ~ api_host ~
+ ' arv api_client_authorization list |' ~
+ ' /usr/bin/jq -e \'.items[].scopes[] | select(. == "GET ' ~
+ '/arvados/v1/virtual_machines/\'${VM_UUID}\'/logins")\' && ' ~
+ 'unset VM_UUID'
+ %}
+
+# Create the VM scoped tokens
+arvados-controller-resources-virtual-machines-{{ vm }}-scoped-token-cmd-run:
+ cmd.run:
+ - env:
+ - ARVADOS_API_TOKEN: {{ api_token }}
+ - ARVADOS_API_HOST: {{ api_host }}
+ - name: |
+ VM_UUID=$(cat /tmp/{{ vm }}) &&
+ arv --format=uuid \
+ api_client_authorization \
+ create \
+ --api-client-authorization '{"scopes":["GET /arvados/v1/virtual_machines/'${VM_UUID}'/logins"]}'
+ - unless: {{ cmd_query_scoped_token_url }}
+ - require:
+ - pkg: arvados-controller-package-install-pkg-installed
+ - pkg: arvados-controller-resources-virtual-machines-jq-pkg-installed
+ - cmd: arvados-controller-resources-virtual-machines-{{ vm }}-get-vm_uuid-cmd-run
+
+{%- endfor %}
- sls: {{ sls_config_file }}
- require:
- pkg: arvados-controller-package-install-pkg-installed
+
+# Before being able to create resources, we need API to be up. When running the formula for
+# the first time, it might be still being configured, so we add this workaround, as suggested at
+# https://github.com/saltstack/salt/issues/19084#issuecomment-70317884
+arvados-controller-service-running-service-ready-cmd-run:
+ cmd.run:
+ - name: |
+ while ! (curl -s {{ arvados.cluster.Services.Controller.ExternalURL }} | \
+ grep -qE "req-[a-z0-9]{20}.{5}error_token") do
+ echo 'waiting for API to be ready...'
+ sleep 1
+ done
+ - timeout: 120
+ - unless: |
+ curl -s {{ arvados.cluster.Services.Controller.ExternalURL }} | \
+ grep -qE "req-[a-z0-9]{20}.{5}error_token"
+ - require:
+ - service: arvados-controller-service-running-service-running
key: ''
insecure: false
+ resources: {}
+
### THESE ARE THE PACKAGES AND DAEMONS BASIC CONFIGS
##### API
api:
- service: arvados-dispatcher-service-running-service-running
- require:
- file: arvados-dispatcher-service-file-file-managed-crunch-dispatch-local-service
+ - onchanges:
+ - file: arvados-dispatcher-service-file-file-managed-crunch-dispatch-local-service
{%- endif %}
arvados-shell-package-clean-gems-deps-pkg-removed:
pkg.removed:
- pkgs: {{ arvados.ruby.gems_deps | json }}
- - only_if: test "{{ arvados.ruby.manage_gems_deps | lower }}" = "true"
+ - onlyif: test "{{ arvados.ruby.manage_gems_deps | lower }}" = "true"
arvados-shell-package-clean-pkg-removed:
pkg.removed:
arvados-shell-package-clean-ruby-pkg-removed:
pkg.removed:
- name: {{ arvados.ruby.pkg }}
- - only_if: test "{{ arvados.ruby.manage_ruby | lower }}" = "true"
+ - onlyif: test "{{ arvados.ruby.manage_ruby | lower }}" = "true"
arvados-shell-package-clean-gems-deps-pkg-removed:
pkg.removed:
- pkgs: {{ arvados.ruby.gems_deps | json }}
- - only_if: test "{{ arvados.ruby.manage_gems_deps | lower }}" = "true"
+ - onlyif: test "{{ arvados.ruby.manage_gems_deps | lower }}" = "true"
arvados-shell-package-clean-pkg-removed:
pkg.removed:
arvados-shell-package-clean-ruby-pkg-removed:
pkg.removed:
- name: {{ arvados.ruby.pkg }}
- - only_if: test "{{ arvados.ruby.manage_ruby | lower }}" = "true"
+ - onlyif: test "{{ arvados.ruby.manage_ruby | lower }}" = "true"
arvados-shell-package-install-gems-deps-pkg-installed:
pkg.installed:
- pkgs: {{ arvados.ruby.gems_deps | json }}
- - only_if: test "{{ arvados.ruby.manage_gems_deps | lower }}" = "true"
+ - onlyif: test "{{ arvados.ruby.manage_gems_deps | lower }}" = "true"
{%- for gm in arvados.shell.gem.name %}
arvados-shell-package-install-gem-{{ gm }}-installed:
arvados-workbench-package-clean-gems-deps-pkg-removed:
pkg.removed:
- pkgs: {{ arvados.ruby.gems_deps | json }}
- - only_if: test "{{ arvados.ruby.manage_gems_deps | lower }}" = "true"
+ - onlyif: test "{{ arvados.ruby.manage_gems_deps | lower }}" = "true"
arvados-workbench-package-clean-pkg-removed:
pkg.removed:
arvados-workbench-package-clean-ruby-pkg-removed:
pkg.removed:
- name: {{ arvados.ruby.pkg }}
- - only_if: test "{{ arvados.ruby.manage_ruby | lower }}" = "true"
+ - onlyif: test "{{ arvados.ruby.manage_ruby | lower }}" = "true"
arvados-workbench-package-install-gems-deps-pkg-installed:
pkg.installed:
- pkgs: {{ arvados.ruby.gems_deps | json }}
- - only_if: test "{{ arvados.ruby.manage_gems_deps | lower }}" = "true"
+ - onlyif: test "{{ arvados.ruby.manage_gems_deps | lower }}" = "true"
arvados-workbench-package-install-pkg-installed:
pkg.installed:
- path: test/integration/keepproxy
- path: test/integration/keepweb
- path: test/integration/controller
- #### workbench,workbench2
+ #### workbench,workbench2,webshell
- name: workbench
driver:
hostname: workbench.fixme.example.net
- example_nginx
- example_nginx_workbench
- example_nginx_workbench2
+ - example_nginx_webshell
pillars_from_files:
# yamllint disable rule:line-length
arvados.sls: test/salt/pillar/arvados.sls
example_nginx.sls: test/salt/pillar/examples/nginx_passenger.sls
example_nginx_workbench.sls: test/salt/pillar/examples/nginx_workbench_configuration.sls
example_nginx_workbench2.sls: test/salt/pillar/examples/nginx_workbench2_configuration.sls
+ example_nginx_webshell.sls: test/salt/pillar/examples/nginx_webshell_configuration.sls
# yamllint enable rule:line-length
verifier:
inspec_tests:
# verify-ca: false
# client_encoding: UTF8
-
tls:
# certificate: ''
# key: ''
keep_access_key: changemekeepaccesskey
keep_secret_key: changemekeepsecretkey
+ ### ARVADOS RESOURCES
+ # This dict allows you to create various resources in the Arvados
+ # database so they're ready to use.
+ # Check the `arvados.api.resources.* states to see which can be
+ # currently managed
+
+ ### SHELL / WEBSHELL REGISTRATION
+ # In order to use shell nodes via webshell, Arvados needs to know of
+ # their existence and they need to be configured as upstreams in nginx
+ # (see https://doc.arvados.org/v2.0/install/install-webshell.html)
+ # This could be achieved in various ways (ie, through salt mine if you
+ # want them to be dinamically created), but that's outside the scope of
+ # this formula. The following dict is just an example that will be used
+ # by the `arvados.api.resources.virtual_machines` state to add entries
+ # in Arvados' database of the cluster's resources'
+ # It's additionally used in the `test/salt/pillar/examples/nginx_webshell_configuration.sls`
+ # pillar to add the corresponding `location` entries in nginx's webshell vhosts & upstreams
+ resources:
+ virtual_machines:
+ shell1:
+ name: webshell1 # if not set, will match the one of the dict key above
+ backend: 1.2.3.4 # upstream host ip/name that has the shell role
+ port: 4200 # port where shellinabox is listening
+ # when no other parameter is set:
+ # `name` will match the name of the key
+ # backend` will match `name`
+ # `port` will default to shellinabox's 4200
+ webshell2: {}
+
### VOLUMES
## This should usually match all your `keepstore` instances
Volumes:
# service:
# name: keepstore
# port: 25107
-# #### GIT-HTTPD
-# githttpd:
-# pkg:
-# name: arvados-git-httpd
-# service:
-# name: arvados-git-httpd
-# port: 9001
# #### SHELL
# shell:
# pkg:
# service:
# name: arvados-ws
# port: 8005
-# #### SSO
-# sso:
-# pkg:
-# name: arvados-sso
-# service:
-# name: arvados-sso
-# port: 8900
# ## SALTSTACK FORMULAS TOFS configuration
# https://template-formula.readthedocs.io/en/latest/TOFS_pattern.html
--- /dev/null
+# frozen_string_literal: true
+
+query_virtual_machines = <<~TEST_VM_CMD
+ su -l kitchen -c \
+ "ARVADOS_API_TOKEN=\\"systemroottokenmushaveatleast32characters\\" \
+ ARVADOS_API_HOST=\\"fixme.example.net\\" \
+ arv virtual_machine list --filters '[[\\"hostname\\", \\"=\\", \\"%s\\"]]'"
+TEST_VM_CMD
+
+query_scoped_token_urls = <<~TEST_STU_CMD
+ su -l kitchen -c \
+ "ARVADOS_API_TOKEN=\\"systemroottokenmushaveatleast32characters\\" \
+ ARVADOS_API_HOST=\\"fixme.example.net\\" \
+ arv api_client_authorization list"
+TEST_STU_CMD
+
+control 'arvados api resources' do
+ impact 0.5
+ title 'should be created'
+
+ %w[
+ webshell1
+ shell.internal
+ webshell3
+ ].each do |vm|
+ describe "virtual machine #{vm}" do
+ subject do
+ command(query_virtual_machines % vm)
+ end
+ its('stdout') { should match(/"uuid":"fixme-2x53u-[a-z0-9_]{15}"/) }
+ its('stdout') { should match(/"hostname":"#{vm}"/) }
+ its('stderr') { should eq '' }
+ its('exit_status') { should eq 0 }
+ end
+
+ describe "scoped token for #{vm}" do
+ subject do
+ command(query_scoped_token_urls % vm)
+ end
+ its('stdout') do
+ should match(
+ %r{"GET /arvados/v1/virtual_machines/fixme-2x53u-[a-z0-9]{15}/logins"}
+ )
+ end
+ its('stderr') { should eq '' }
+ its('exit_status') { should eq 0 }
+ end
+ end
+end
title 'should not exist'
describe file('/etc/arvados/config.yml') do
- it { should_not exist}
+ it { should_not exist }
end
end
it { should be_running }
end
- describe port(9000) do
+ describe port(443) do
proc = case os[:name]
when 'centos'
# Centos ps adds an extra colon and the end of the process
# required to test with snakeoil certs
insecure: true
+ resources:
+ virtual_machines:
+ shell1:
+ name: webshell1
+ backend: 1.2.3.4
+ port: 4200
+ shell.internal: {}
+ webshell3:
+ backend: 4.3.2.1
+ port: 4500
+
### TOKENS
tokens:
system_root: systemroottokenmushaveatleast32characters
---
+# This parameter will be used here to generate a list of upstreams and vhosts.
+# This dict is here for convenience and should be managed some other way, but the
+# different ways of orchestration that can be used for this are outside the scope
+# of this formula and their examples.
+# These upstreams should match those defined in `arvados:cluster:resources:virtual_machines`
+{% set webshell_virtual_machines = {
+ 'shell1': {
+ 'name': 'webshell1',
+ 'backend': '1.2.3.4',
+ 'port': 4200,
+ },
+ 'shell.internal': {},
+ 'webshell3': {
+ 'backend': '4.3.2.1',
+ 'port': 4500,
+ }
+}
+%}
+
### NGINX
nginx:
### SERVER
server:
config:
-
### STREAMS
http:
- upstream webshell_upstream:
- - server: 'shell.internal:4200 fail_timeout=10s'
+ {%- for vm, params in webshell_virtual_machines.items() %}
+ {%- set vm_name = params.name | default(vm) %}
+ {%- set vm_backend = params.backend | default(vm_name) %}
+ {%- set vm_port = params.port | default(4200) %}
+
+ upstream {{ vm_name }}_upstream:
+ - server: '{{ vm_backend }}:{{ vm_port }} fail_timeout=10s'
+
+ {%- endfor %}
### SITES
servers:
- listen:
- 443 http2 ssl
- index: index.html index.htm
- - location /shell.fixme.example.net:
- - proxy_pass: 'http://webshell_upstream'
+ {%- for vm, params in webshell_virtual_machines.items() %}
+ {%- set vm_name = params.name | default(vm) %}
+ - location /{{ vm_name }}:
+ - proxy_pass: 'http://{{ vm_name }}_upstream'
- proxy_read_timeout: 90
- proxy_connect_timeout: 90
- proxy_set_header: 'Host $http_host'
- add_header: "'Access-Control-Allow-Origin' '*'"
- add_header: "'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'"
- add_header: "'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'"
+ {%- endfor %}
- include: 'snippets/ssl_hardening_default.conf'
# - include: 'snippets/letsencrypt.conf'
- include: 'snippets/ssl_snakeoil.conf'
### NGINX
nginx:
- ### SERVER
- server:
- config:
-
- ### STREAMS
- http:
- upstream workbench_upstream:
- - server: 'workbench.internal:9000 fail_timeout=10s'
-
### SITES
servers:
managed:
- server_name: workbench.fixme.example.net
- listen:
- 443 http2 ssl
+ - root: /var/www/arvados-workbench/current/public
+ - passenger_enabled: 'on'
- index: index.html index.htm
- - location /:
- - proxy_pass: 'http://workbench_upstream'
- - proxy_read_timeout: 300
- - proxy_connect_timeout: 90
- - proxy_redirect: 'off'
- - proxy_set_header: X-Forwarded-Proto https
- - proxy_set_header: 'Host $http_host'
- - proxy_set_header: 'X-Real-IP $remote_addr'
- - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
- include: 'snippets/ssl_hardening_default.conf'
# - include: 'snippets/letsencrypt.conf'
- include: 'snippets/ssl_snakeoil.conf'
+ # yamllint disable-line rule:line-length
- access_log: /var/log/nginx/workbench.fixme.example.net.access.log combined
- error_log: /var/log/nginx/workbench.fixme.example.net.error.log
- arvados_workbench_upstream.conf:
- enabled: true
- overwrite: true
- config:
- - server:
- - listen: 'workbench.internal:9000'
- - server_name: workbench
- - root: /var/www/arvados-workbench/current/public
- - index: index.html index.htm
- - passenger_enabled: 'on'
- # yamllint disable-line rule:line-length
- - access_log: /var/log/nginx/workbench.fixme.example.net-upstream.access.log combined
- - error_log: /var/log/nginx/workbench.fixme.example.net-upstream.error.log
--- /dev/null
+These state are examples that might help you automate some Arvados' configuration
+tasks. As they require orchestration (ie, API/controller running before being used)
+they're not included in the formula, as this orchestration is outside of its scope.
--- /dev/null
+# -*- coding: utf-8 -*-
+# vim: ft=sls
+
+# This state tries to query the controller using the parameters set in
+# the `arvados.cluster.resources.virtual_machines` pillar, to get the
+# scoped_token for the host and configure the arvados login-sync cron
+# as described in https://doc.arvados.org/v2.0/install/install-shell-server.html
+
+{#- Get the `tplroot` from `tpldir` #}
+{%- set tplroot = tpldir.split('/')[0] %}
+{%- set sls_config_file = tplroot ~ '.config.file' %}
+{%- from tplroot ~ "/map.jinja" import arvados with context %}
+{%- from tplroot ~ "/libtofs.jinja" import files_switch with context %}
+
+{%- set virtual_machines = arvados.cluster.resources.virtual_machines | default({}) %}
+{%- set api_token = arvados.cluster.tokens.system_root | yaml_encode %}
+{%- set api_host = arvados.cluster.Services.Controller.ExternalURL | regex_replace('^http(s?)://', '', ignorecase=true) %}
+
+examples-arvados-shell-cron-add-login-sync-add-jq-pkg-installed:
+ pkg.installed:
+ - name: jq
+
+{%- for vm, vm_params in virtual_machines.items() %}
+ {%- set vm_name = vm_params.name | default(vm) %}
+
+ # Check if any of the specified virtual_machines parameters corresponds to this instance
+ # It should be an error if we get more than one occurrence
+ {%- if vm_name in [grains['id'], grains['host'], grains['fqdn'], grains['nodename']] or
+ backend in [grains['id'], grains['host'], grains['fqdn'], grains['nodename']] +
+ grains['ipv4'] + grains['ipv6'] %}
+
+ {%- set cmd_query_vm_uuid = 'arv --short virtual_machine list' ~
+ ' --filters \'[["hostname", "=", "' ~ vm_name ~ '"]]\''
+ %}
+
+# We need to use the UUID generated in the previous command to see if there's a
+# scoped token for it. There's no easy way to pass the value from a shellout
+# to another state, so we store it in a temp file and use that in the next
+# command. Flaky, mostly because the `unless` clause is just checking thatg
+# the file content is a token uuid :|
+examples-arvados-shell-cron-add-login-sync-add-{{ vm }}-get-vm_uuid-cmd-run:
+ cmd.run:
+ - env:
+ - ARVADOS_API_TOKEN: {{ api_token }}
+ - ARVADOS_API_HOST: {{ api_host }}
+ - name: {{ cmd_query_vm_uuid }} | head -1 | tee /tmp/{{ vm }}
+ - require:
+ - cmd: examples-arvados-shell-cron-add-login-sync-add-resources-virtual-machines-{{ vm }}-record-cmd-run
+ - unless:
+ - /bin/grep -qE "[a-z0-9]{5}-2x53u-[a-z0-9]{15}" /tmp/{{ vm }}
+
+ # There's no direct way to query the scoped_token for a given virtual_machine
+ # so we need to parse the api_client_authorization list through some jq
+ {%- set cmd_query_scoped_token_url = 'VM_UUID=$(cat /tmp/' ~ vm ~ ') && ' ~
+ 'arv api_client_authorization list | ' ~
+ '/usr/bin/jq -e \'.items[]| select(.scopes[] == "GET ' ~
+ '/arvados/v1/virtual_machines/\'${VM_UUID}\'/logins") | ' ~
+ '.api_token\' | head -1 | tee /tmp/sctk' ~ vm ~ ' && ' ~
+ 'unset VM_UUID'
+ %}
+
+examples-arvados-shell-cron-add-login-sync-add-{{ vm }}-get-scoped_token-cmd-run:
+ cmd.run:
+ - env:
+ - ARVADOS_API_TOKEN: {{ api_token }}
+ - ARVADOS_API_HOST: {{ api_host }}
+ - name: {{ cmd_query_scoped_token_url }}
+ - require:
+ - cmd: examples-arvados-shell-cron-add-login-sync-add-{{ vm }}-get-vm_uuid-cmd-run
+ - unless:
+ - test -s /tmp/sctk{{ vm }}
+
+examples-arvados-shell-cron-add-login-sync-add-{{ vm }}-arvados-host-cron-env-present:
+ cron.env_present:
+ - name: ARVADOS_API_HOST
+ - value: {{ api_host }}
+
+examples-arvados-shell-cron-add-login-sync-add-{{ vm }}-arvados-api-cron-token-env-present:
+ cron.env_present:
+ - name: ARVADOS_API_TOKEN
+ - value: __slot__:salt:cmd.run(cat /tmp/sctk{{ vm }})
+
+examples-arvados-shell-cron-add-login-sync-add-{{ vm }}-arvados-api-cron-token-env-present:
+ cron.env_present:
+ - name: ARVADOS_VIRTUAL_MACHINE_UUID
+ - value: __slot__:salt:cmd.run(cat /tmp/{{ vm }})
+
+examples-arvados-shell-cron-add-login-sync-add-{{ vm }}-arvados-login-sync-cron-present:
+ cron.present:
+ minute: '*/2'
+ cmd: arvados-login-sync
+
+{%- endfor %}
arvados_test_salt_states_examples_single_host_etc_hosts_host_present:
host.present:
- - ip: {{ grains.get('ipv4')[0] }}
+ - ip: {{ grains['ipv4'][1] }}
- names:
- {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
# FIXME! This just works for our testings.
- sls: postgres
arvados_test_salt_states_examples_single_host_snakeoil_certs_certs_permissions_cmd_run:
- cmd.run:
- - name: |
- chown root:ssl-cert {{ arvados_key_file }}
+ file.managed:
+ - name: {{ arvados_key_file }}
+ - owner: root
+ - group: ssl-cert
- require:
- cmd: arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_cert_cmd_run
- pkg: arvados_test_salt_states_examples_single_host_snakeoil_certs_ssl_cert_pkg_installed
- service: nginx_service
- require:
- pkg: passenger_install
- - cmd: arvados_test_salt_states_examples_single_host_snakeoil_certs_certs_permissions_cmd_run
+ - file: arvados_test_salt_states_examples_single_host_snakeoil_certs_certs_permissions_cmd_run
- require_in:
- file: nginx_config