Merge branch '16263-logincluster-user-list-fix' refs #16263
authorPeter Amstutz <peter.amstutz@curii.com>
Mon, 13 Apr 2020 14:42:47 +0000 (10:42 -0400)
committerPeter Amstutz <peter.amstutz@curii.com>
Mon, 13 Apr 2020 14:42:47 +0000 (10:42 -0400)
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz@curii.com>

83 files changed:
apps/workbench/Gemfile.lock
apps/workbench/config/initializers/actionview_xss_fix.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/javascript_helper_test.rb [new file with mode: 0644]
build/package-build-dockerfiles/centos7/Dockerfile
build/package-build-dockerfiles/debian10/Dockerfile
build/package-build-dockerfiles/debian9/Dockerfile
build/package-build-dockerfiles/ubuntu1604/Dockerfile
build/package-build-dockerfiles/ubuntu1804/Dockerfile
build/run-build-packages.sh
build/run-tests.sh
cmd/arvados-server/arvados-ws.service [moved from services/ws/arvados-ws.service with 94% similarity]
cmd/arvados-server/cmd.go
doc/admin/metrics.html.textile.liquid
go.mod
go.sum
lib/boot/cmd.go
lib/boot/nginx.go
lib/boot/postgresql.go
lib/boot/supervisor.go
lib/config/config.default.yml
lib/config/export.go
lib/config/generated_config.go
lib/controller/federation/conn.go
lib/controller/federation/login_test.go
lib/controller/handler.go
lib/controller/localdb/conn.go
lib/controller/localdb/login.go
lib/controller/localdb/login_google.go [new file with mode: 0644]
lib/controller/localdb/login_google_test.go [moved from lib/controller/localdb/login_test.go with 94% similarity]
lib/controller/localdb/login_pam.go [new file with mode: 0644]
lib/controller/localdb/login_pam_docker_test.go [new file with mode: 0644]
lib/controller/localdb/login_pam_docker_test.sh [new file with mode: 0755]
lib/controller/localdb/login_pam_test.go [new file with mode: 0644]
lib/controller/router/router.go
lib/controller/rpc/conn.go
lib/dispatchcloud/dispatcher.go
lib/install/arvadostest_docker_build.sh [new file with mode: 0755]
lib/install/arvadostest_docker_run.sh [new file with mode: 0755]
lib/install/deps.go [new file with mode: 0644]
lib/install/deps_test.go [new file with mode: 0644]
lib/install/example_from_scratch.sh [new file with mode: 0644]
lib/service/cmd.go
lib/service/cmd_test.go
lib/service/error.go
sdk/go/arvados/api.go
sdk/go/arvados/client.go
sdk/go/arvados/config.go
sdk/go/arvados/config_test.go
sdk/go/arvados/login.go
sdk/go/arvadostest/api.go
sdk/go/health/aggregator.go
sdk/go/httpserver/logger.go
sdk/java-v2/build.gradle
sdk/java-v2/src/main/java/org/arvados/client/api/client/LinksApiClient.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/Link.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/LinkList.java [new file with mode: 0644]
sdk/python/arvados/commands/arv_copy.py
sdk/python/tests/run_test_server.py
sdk/python/tests/test_keep_client.py
services/api/Gemfile.lock
services/keep-balance/server.go
services/keepstore/command.go
services/keepstore/unix_volume_test.go
services/ws/doc.go
services/ws/event.go
services/ws/event_source.go
services/ws/event_source_test.go
services/ws/event_test.go
services/ws/gocheck_test.go
services/ws/handler.go
services/ws/main.go [deleted file]
services/ws/permission.go
services/ws/permission_test.go
services/ws/router.go
services/ws/server.go [deleted file]
services/ws/service.go [new file with mode: 0644]
services/ws/service_test.go [moved from services/ws/server_test.go with 55% similarity]
services/ws/session.go
services/ws/session_v0.go
services/ws/session_v0_test.go
services/ws/session_v1.go
tools/arvbox/lib/arvbox/docker/Dockerfile.base
tools/arvbox/lib/arvbox/docker/service/websockets/run-service

index b02161c598e75772895fe0dc1dccf111c124ac6b..2af9c8b16f0b282ef3abc1a4d7c3880e690f7420 100644 (file)
@@ -122,7 +122,7 @@ GEM
     coffee-script-source (1.12.2)
     commonjs (0.2.7)
     concurrent-ruby (1.1.5)
-    crass (1.0.4)
+    crass (1.0.5)
     deep_merge (1.2.1)
     docile (1.3.1)
     erubis (2.7.0)
@@ -167,7 +167,7 @@ GEM
       railties (>= 4)
       request_store (~> 1.0)
     logstash-event (1.2.02)
-    loofah (2.2.3)
+    loofah (2.3.1)
       crass (~> 1.0.2)
       nokogiri (>= 1.5.9)
     mail (2.7.1)
@@ -195,7 +195,7 @@ GEM
     net-ssh-gateway (2.0.0)
       net-ssh (>= 4.0.0)
     nio4r (2.3.1)
-    nokogiri (1.10.4)
+    nokogiri (1.10.8)
       mini_portile2 (~> 2.4.0)
     npm-rails (0.2.1)
       rails (>= 3.2)
@@ -247,7 +247,7 @@ GEM
       method_source
       rake (>= 0.8.7)
       thor (>= 0.18.1, < 2.0)
-    rake (12.3.2)
+    rake (13.0.1)
     raphael-rails (2.1.2)
     rb-fsevent (0.10.3)
     rb-inotify (0.10.0)
diff --git a/apps/workbench/config/initializers/actionview_xss_fix.rb b/apps/workbench/config/initializers/actionview_xss_fix.rb
new file mode 100644 (file)
index 0000000..3f5e239
--- /dev/null
@@ -0,0 +1,32 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This is related to:
+# * https://github.com/advisories/GHSA-65cv-r6x7-79hv
+# * https://nvd.nist.gov/vuln/detail/CVE-2020-5267
+#
+# Until we upgrade to rails 5.2, this monkeypatch should be enough
+ActionView::Helpers::JavaScriptHelper::JS_ESCAPE_MAP.merge!(
+  {
+    "`" => "\\`",
+    "$" => "\\$"
+  }
+)
+
+module ActionView::Helpers::JavaScriptHelper
+  alias :old_ej :escape_javascript
+  alias :old_j :j
+
+  def escape_javascript(javascript)
+    javascript = javascript.to_s
+    if javascript.empty?
+      result = ""
+    else
+      result = javascript.gsub(/(\\|<\/|\r\n|\342\200\250|\342\200\251|[\n\r"']|[`]|[$])/u, JS_ESCAPE_MAP)
+    end
+    javascript.html_safe? ? result.html_safe : result
+  end
+
+  alias :j :escape_javascript
+end
\ No newline at end of file
diff --git a/apps/workbench/test/unit/helpers/javascript_helper_test.rb b/apps/workbench/test/unit/helpers/javascript_helper_test.rb
new file mode 100644 (file)
index 0000000..9d5a553
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+# Tests XSS vulnerability monkeypatch
+# See: https://github.com/advisories/GHSA-65cv-r6x7-79hv
+class JavascriptHelperTest < ActionView::TestCase
+  def test_escape_backtick
+    assert_equal "\\`", escape_javascript("`")
+  end
+
+  def test_escape_dollar_sign
+    assert_equal "\\$", escape_javascript("$")
+  end
+end
index faaf91f43b8ee64812d0f4afe9627495e388c77d..8ccab49e1e7d3d9e7c557c48758b8b146386db35 100644 (file)
@@ -6,7 +6,7 @@ FROM centos:7
 MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
 
 # Install dependencies.
-RUN yum -q -y install make automake gcc gcc-c++ libyaml-devel patch readline-devel zlib-devel libffi-devel openssl-devel bzip2 libtool bison sqlite-devel rpm-build git perl-ExtUtils-MakeMaker libattr-devel nss-devel libcurl-devel which tar unzip scl-utils centos-release-scl postgresql-devel python-devel python-setuptools fuse-devel xz-libs git python-virtualenv wget
+RUN yum -q -y install make automake gcc gcc-c++ libyaml-devel patch readline-devel zlib-devel libffi-devel openssl-devel bzip2 libtool bison sqlite-devel rpm-build git perl-ExtUtils-MakeMaker libattr-devel nss-devel libcurl-devel which tar unzip scl-utils centos-release-scl postgresql-devel python-devel python-setuptools fuse-devel xz-libs git python-virtualenv wget pam-devel
 
 # Install RVM
 ADD generated/mpapis.asc /tmp/
index ff86262d38597aa99af8714ec5b94f3d11dfdbc6..90dfd36b52f66afb6f49c946df761fcd1651ac53 100644 (file)
@@ -9,7 +9,7 @@ MAINTAINER Ward Vandewege <wvandewege@veritasgenetics.com>
 ENV DEBIAN_FRONTEND noninteractive
 
 # Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools python3-pip libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev python-pip unzip python3-venv python3-dev
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools python3-pip libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev python-pip unzip python3-venv python3-dev libpam-dev
 
 # Install virtualenv
 RUN /usr/bin/pip install 'virtualenv<20'
index 257523a72feb58e8d28e78688ccd7d04859e777a..1a84da280898d3010ea6c8bf5978bc0da648f891 100644 (file)
@@ -9,7 +9,7 @@ MAINTAINER Nico Cesar <nico@curoverse.com>
 ENV DEBIAN_FRONTEND noninteractive
 
 # Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools python3-pip libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev python-pip unzip python3-venv python3-dev
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools python3-pip libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev python-pip unzip python3-venv python3-dev libpam-dev
 
 # Install virtualenv
 RUN /usr/bin/pip install 'virtualenv<20'
index 6b1304265b9d7c98b45a6247340283802e59a4d9..87f7712d50be68aceb65612b33154bc267b0a10c 100644 (file)
@@ -8,7 +8,7 @@ MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
 ENV DEBIAN_FRONTEND noninteractive
 
 # Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools python3-pip libcurl4-gnutls-dev libgnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip tzdata python3-venv python3-dev
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools python3-pip libcurl4-gnutls-dev libgnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip tzdata python3-venv python3-dev libpam-dev
 
 # Install virtualenv
 RUN /usr/bin/pip install 'virtualenv<20'
index 58bff616038b8ca3b6b3ec91549d7b9059d2b188..a2ec29da1cf3932134b3f524608fbcb0c0b72691 100644 (file)
@@ -8,7 +8,7 @@ MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
 ENV DEBIAN_FRONTEND noninteractive
 
 # Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-pip libcurl4-gnutls-dev libgnutls28-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip tzdata python3-venv python3-dev
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-pip libcurl4-gnutls-dev libgnutls28-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip tzdata python3-venv python3-dev libpam-dev
 
 # Install virtualenv
 RUN /usr/bin/pip install 'virtualenv<20'
index e38d1fd24e5f56cd19a6715b09aeb43593a8e362..3ba1dcc05e8776fc57a205e2deb79a0224a8e370 100755 (executable)
@@ -308,7 +308,7 @@ package_go_binary services/keepstore keepstore \
     "Keep storage daemon, accessible to clients on the LAN"
 package_go_binary services/keep-web keep-web \
     "Static web hosting service for user data stored in Arvados Keep"
-package_go_binary services/ws arvados-ws \
+package_go_binary cmd/arvados-server arvados-ws \
     "Arvados Websocket server"
 package_go_binary tools/sync-groups arvados-sync-groups \
     "Synchronize remote groups into Arvados from an external source"
@@ -322,10 +322,10 @@ package_go_binary tools/keep-exercise keep-exercise \
 # The Python SDK - Should be built first because it's needed by others
 fpm_build_virtualenv "arvados-python-client" "sdk/python"
 
-# Arvados cwl runner
-fpm_build_virtualenv "arvados-cwl-runner" "sdk/cwl"
+# The Python SDK - Python3 package
+fpm_build_virtualenv "arvados-python-client" "sdk/python" "python3"
 
-# Arvados cwl runner - Python3 package
+# Arvados cwl runner - Only supports Python3 now
 fpm_build_virtualenv "arvados-cwl-runner" "sdk/cwl" "python3"
 
 # The PAM module
@@ -340,9 +340,6 @@ fpm_build_virtualenv "arvados-node-manager" "services/nodemanager"
 # The Arvados crunchstat-summary tool
 fpm_build_virtualenv "crunchstat-summary" "tools/crunchstat-summary"
 
-# The Python SDK - Python3 package
-fpm_build_virtualenv "arvados-python-client" "sdk/python" "python3"
-
 # The Docker image cleaner
 fpm_build_virtualenv "arvados-docker-cleaner" "services/dockercleaner" "python3"
 
index 7a3302de9bc98e968845ae6a42b83d3a51c0d663..0212d1bc0e13e7b6202a04f4da00436a6c278ed1 100755 (executable)
@@ -35,7 +35,7 @@ Options:
 --short        Skip (or scale down) some slow tests.
 --interactive  Set up, then prompt for test/install steps to perform.
 WORKSPACE=path Arvados source tree to test.
-CONFIGSRC=path Dir with config.yml file containing PostgreSQL section for use by tests. (required)
+CONFIGSRC=path Dir with config.yml file containing PostgreSQL section for use by tests.
 services/api_test="TEST=test/functional/arvados/v1/collections_controller_test.rb"
                Restrict apiserver tests to the given file
 sdk/python_test="--test-suite tests.test_keep_locator"
@@ -197,10 +197,8 @@ sanity_checks() {
     [[ -n "${skip[sanity]}" ]] && return 0
     ( [[ -n "$WORKSPACE" ]] && [[ -d "$WORKSPACE/services" ]] ) \
         || fatal "WORKSPACE environment variable not set to a source directory (see: $0 --help)"
-    [[ -n "$CONFIGSRC" ]] \
-       || fatal "CONFIGSRC environment not set (see: $0 --help)"
-    [[ -s "$CONFIGSRC/config.yml" ]] \
-       || fatal "'$CONFIGSRC/config.yml' is empty or not found (see: $0 --help)"
+    [[ -z "$CONFIGSRC" ]] || [[ -s "$CONFIGSRC/config.yml" ]] \
+       || fatal "CONFIGSRC is $CONFIGSRC but '$CONFIGSRC/config.yml' is empty or not found (see: $0 --help)"
     echo Checking dependencies:
     echo "locale: ${LANG}"
     [[ "$(locale charmap)" = "UTF-8" ]] \
@@ -262,6 +260,9 @@ sanity_checks() {
     echo -n 'libpq libpq-fe.h: '
     find /usr/include -path '*/postgresql/libpq-fe.h' | egrep --max-count=1 . \
         || fatal "No libpq libpq-fe.h. Try: apt-get install libpq-dev"
+    echo -n 'libpam pam_appl.h: '
+    find /usr/include -path '*/security/pam_appl.h' | egrep --max-count=1 . \
+        || fatal "No libpam pam_appl.h. Try: apt-get install libpam-dev"
     echo -n 'postgresql: '
     psql --version || fatal "No postgresql. Try: apt-get install postgresql postgresql-client-common"
     echo -n 'phantomjs: '
@@ -553,8 +554,14 @@ setup_ruby_environment() {
         bundle="$(gem env gempath | cut -f1 -d:)/bin/bundle"
         (
             export HOME=$GEMHOME
-            ("$bundle" version | grep -q 2.0.2) \
-                || gem install --user bundler -v 2.0.2
+            bundlers="$(gem list --details bundler)"
+            versions=(1.11.0 1.17.3 2.0.2)
+            for v in ${versions[@]}; do
+                if ! echo "$bundlers" | fgrep -q "($v)"; then
+                    gem install --user $(for v in ${versions[@]}; do echo bundler:${v}; done)
+                    break
+                fi
+            done
             "$bundle" version | tee /dev/stderr | grep -q 'version 2'
         ) || fatal 'install bundler'
     fi
@@ -590,6 +597,11 @@ setup_virtualenv() {
 }
 
 initialize() {
+    # If dependencies like ruby, go, etc. are installed in
+    # /var/lib/arvados -- presumably by "arvados-server install" --
+    # then we want to use those versions, instead of whatever happens
+    # to be installed in /usr.
+    PATH="/var/lib/arvados/bin:${PATH}"
     sanity_checks
 
     echo "WORKSPACE=$WORKSPACE"
@@ -1056,7 +1068,7 @@ test_sdk/cli() {
 }
 
 test_sdk/java-v2() {
-    cd "$WORKSPACE/sdk/java-v2" && gradle test
+    cd "$WORKSPACE/sdk/java-v2" && gradle test ${testargs[sdk/java-v2]}
 }
 
 test_services/login-sync() {
similarity index 94%
rename from services/ws/arvados-ws.service
rename to cmd/arvados-server/arvados-ws.service
index 36624c78779c02cfde829323551ca9c2cb19eda3..aebc56a79f333b19f061f5f0aadce793e799529c 100644 (file)
@@ -6,6 +6,7 @@
 Description=Arvados websocket server
 Documentation=https://doc.arvados.org/
 After=network.target
+AssertPathExists=/etc/arvados/config.yml
 
 # systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
 StartLimitInterval=0
index a9d927d8734401f76fa173bff7214e0038fc4c68..fcea2223da70d5a174ee74b8281ebd3d20e0b503 100644 (file)
@@ -14,6 +14,8 @@ import (
        "git.arvados.org/arvados.git/lib/controller"
        "git.arvados.org/arvados.git/lib/crunchrun"
        "git.arvados.org/arvados.git/lib/dispatchcloud"
+       "git.arvados.org/arvados.git/lib/install"
+       "git.arvados.org/arvados.git/services/ws"
 )
 
 var (
@@ -25,11 +27,13 @@ var (
                "boot":            boot.Command,
                "cloudtest":       cloudtest.Command,
                "config-check":    config.CheckCommand,
-               "config-dump":     config.DumpCommand,
                "config-defaults": config.DumpDefaultsCommand,
+               "config-dump":     config.DumpCommand,
                "controller":      controller.Command,
                "crunch-run":      crunchrun.Command,
                "dispatch-cloud":  dispatchcloud.Command,
+               "install":         install.Command,
+               "ws":              ws.Command,
        })
 )
 
index 9616d4add43a44105d78fbf5ff6f4ae9b8e1c3cd..a6a0862c4f1d1383a44a80832b42cebaafd7f569 100644 (file)
@@ -36,7 +36,7 @@ table(table table-bordered table-condensed table-hover).
 |arvados-dispatch-cloud|✓|
 |arvados-git-httpd||
 |arvados-node-manager||
-|arvados-ws||
+|arvados-ws|✓|
 |composer||
 |keepproxy||
 |keepstore|✓|
diff --git a/go.mod b/go.mod
index 2cc5e89eb1fe68c88335e2ba2e2906e1bb2d9c33..4491b359813c00ca2d39af34f4d6587e49290699 100644 (file)
--- a/go.mod
+++ b/go.mod
@@ -36,6 +36,7 @@ require (
        github.com/lib/pq v1.3.0
        github.com/marstr/guid v1.1.1-0.20170427235115-8bdf7d1a087c // indirect
        github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747 // indirect
+       github.com/msteinert/pam v0.0.0-20190215180659-f29b9f28d6f9
        github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
        github.com/opencontainers/image-spec v1.0.1-0.20171125024018-577479e4dc27 // indirect
        github.com/pelletier/go-buffruneio v0.2.0 // indirect
@@ -52,7 +53,7 @@ require (
        golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
        golang.org/x/net v0.0.0-20190620200207-3b0461eec859
        golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
-       golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd // indirect
+       golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd
        google.golang.org/api v0.13.0
        gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405
        gopkg.in/square/go-jose.v2 v2.3.1
diff --git a/go.sum b/go.sum
index c3904fe84b70b1e79323b18601d989d3527b289d..18cf89b0e17e6130fa2e18cb2b4b067de54d506d 100644 (file)
--- a/go.sum
+++ b/go.sum
@@ -123,6 +123,8 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
 github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/msteinert/pam v0.0.0-20190215180659-f29b9f28d6f9 h1:ZivaaKmjs9q90zi6I4gTLW6tbVGtlBjellr3hMYaly0=
+github.com/msteinert/pam v0.0.0-20190215180659-f29b9f28d6f9/go.mod h1:np1wUFZ6tyoke22qDJZY40URn9Ae51gX7ljIWXN5TJs=
 github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
 github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
 github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
index 1abc93722d8b872cce0d524bf55f5277e6487a5c..5147e3ac33bb65ea8dc0305f986b30a69d736785 100644 (file)
@@ -6,9 +6,11 @@ package boot
 
 import (
        "context"
+       "errors"
        "flag"
        "fmt"
        "io"
+       "time"
 
        "git.arvados.org/arvados.git/lib/cmd"
        "git.arvados.org/arvados.git/lib/config"
@@ -56,6 +58,8 @@ func (bootCommand) RunCommand(prog string, args []string, stdin io.Reader, stdou
        flags.StringVar(&super.ListenHost, "listen-host", "localhost", "host name or interface address for service listeners")
        flags.StringVar(&super.ControllerAddr, "controller-address", ":0", "desired controller address, `host:port` or `:port`")
        flags.BoolVar(&super.OwnTemporaryDatabase, "own-temporary-database", false, "bring up a postgres server and create a temporary database")
+       timeout := flags.Duration("timeout", 0, "maximum time to wait for cluster to be ready")
+       shutdown := flags.Bool("shutdown", false, "shut down when the cluster becomes ready")
        err = flags.Parse(args)
        if err == flag.ErrHelp {
                err = nil
@@ -77,14 +81,27 @@ func (bootCommand) RunCommand(prog string, args []string, stdin io.Reader, stdou
 
        super.Start(ctx, cfg)
        defer super.Stop()
+
+       var timer *time.Timer
+       if *timeout > 0 {
+               timer = time.AfterFunc(*timeout, super.Stop)
+       }
+
        url, ok := super.WaitReady()
-       if !ok {
+       if timer != nil && !timer.Stop() {
+               err = errors.New("boot timed out")
+               return 1
+       } else if !ok {
+               err = errors.New("boot failed")
                return 1
        }
        // Write controller URL to stdout. Nothing else goes to
        // stdout, so this provides an easy way for a calling script
        // to discover the controller URL when everything is ready.
        fmt.Fprintln(stdout, url)
+       if *shutdown {
+               super.Stop()
+       }
        // Wait for signal/crash + orderly shutdown
        <-super.done
        return 0
index 6b2d6777fdf38053e02eb2a676e2eb3bd85ec241..ecbb7a9d3a40f9cfb916f7c89ff3f5841a38ac23 100644 (file)
@@ -26,15 +26,18 @@ func (runNginx) String() string {
 }
 
 func (runNginx) Run(ctx context.Context, fail func(error), super *Supervisor) error {
+       err := super.wait(ctx, createCertificates{})
+       if err != nil {
+               return err
+       }
        vars := map[string]string{
                "LISTENHOST": super.ListenHost,
-               "SSLCERT":    filepath.Join(super.SourcePath, "services", "api", "tmp", "self-signed.pem"), // TODO: root ca
-               "SSLKEY":     filepath.Join(super.SourcePath, "services", "api", "tmp", "self-signed.key"), // TODO: root ca
+               "SSLCERT":    filepath.Join(super.tempdir, "server.crt"),
+               "SSLKEY":     filepath.Join(super.tempdir, "server.key"),
                "ACCESSLOG":  filepath.Join(super.tempdir, "nginx_access.log"),
                "ERRORLOG":   filepath.Join(super.tempdir, "nginx_error.log"),
                "TMPDIR":     super.tempdir,
        }
-       var err error
        for _, cmpt := range []struct {
                varname string
                svc     arvados.Service
index df98904151834a8b22dfbfc429f7e9882ad6e7ae..34ccf04a88dbd68a7822cc75b13da972e32844ee 100644 (file)
@@ -11,7 +11,9 @@ import (
        "fmt"
        "os"
        "os/exec"
+       "os/user"
        "path/filepath"
+       "strconv"
        "strings"
        "time"
 
@@ -34,6 +36,13 @@ func (runPostgreSQL) Run(ctx context.Context, fail func(error), super *Superviso
                return err
        }
 
+       iamroot := false
+       if u, err := user.Current(); err != nil {
+               return fmt.Errorf("user.Current(): %s", err)
+       } else if u.Uid == "0" {
+               iamroot = true
+       }
+
        buf := bytes.NewBuffer(nil)
        err = super.RunProgram(ctx, super.tempdir, buf, nil, "pg_config", "--bindir")
        if err != nil {
@@ -42,11 +51,45 @@ func (runPostgreSQL) Run(ctx context.Context, fail func(error), super *Superviso
        bindir := strings.TrimSpace(buf.String())
 
        datadir := filepath.Join(super.tempdir, "pgdata")
-       err = os.Mkdir(datadir, 0755)
+       err = os.Mkdir(datadir, 0700)
        if err != nil {
                return err
        }
-       err = super.RunProgram(ctx, super.tempdir, nil, nil, filepath.Join(bindir, "initdb"), "-D", datadir)
+       prog, args := filepath.Join(bindir, "initdb"), []string{"-D", datadir, "-E", "utf8"}
+       if iamroot {
+               postgresUser, err := user.Lookup("postgres")
+               if err != nil {
+                       return fmt.Errorf("user.Lookup(\"postgres\"): %s", err)
+               }
+               postgresUid, err := strconv.Atoi(postgresUser.Uid)
+               if err != nil {
+                       return fmt.Errorf("user.Lookup(\"postgres\"): non-numeric uid?: %q", postgresUser.Uid)
+               }
+               postgresGid, err := strconv.Atoi(postgresUser.Gid)
+               if err != nil {
+                       return fmt.Errorf("user.Lookup(\"postgres\"): non-numeric gid?: %q", postgresUser.Gid)
+               }
+               err = os.Chown(super.tempdir, 0, postgresGid)
+               if err != nil {
+                       return err
+               }
+               err = os.Chmod(super.tempdir, 0710)
+               if err != nil {
+                       return err
+               }
+               err = os.Chown(datadir, postgresUid, 0)
+               if err != nil {
+                       return err
+               }
+               // We can't use "sudo -u" here because it creates an
+               // intermediate process that interferes with our
+               // ability to reliably kill postgres. The setuidgid
+               // program just calls exec without forking, so it
+               // doesn't have this problem.
+               args = append([]string{"postgres", prog}, args...)
+               prog = "setuidgid"
+       }
+       err = super.RunProgram(ctx, super.tempdir, nil, nil, prog, args...)
        if err != nil {
                return err
        }
@@ -55,18 +98,29 @@ func (runPostgreSQL) Run(ctx context.Context, fail func(error), super *Superviso
        if err != nil {
                return err
        }
+       if iamroot {
+               err = super.RunProgram(ctx, super.tempdir, nil, nil, "chown", "postgres", datadir+"/server.crt", datadir+"/server.key")
+               if err != nil {
+                       return err
+               }
+       }
 
        port := super.cluster.PostgreSQL.Connection["port"]
 
        super.waitShutdown.Add(1)
        go func() {
                defer super.waitShutdown.Done()
-               fail(super.RunProgram(ctx, super.tempdir, nil, nil, filepath.Join(bindir, "postgres"),
+               prog, args := filepath.Join(bindir, "postgres"), []string{
                        "-l",          // enable ssl
                        "-D", datadir, // data dir
                        "-k", datadir, // socket dir
                        "-p", super.cluster.PostgreSQL.Connection["port"],
-               ))
+               }
+               if iamroot {
+                       args = append([]string{"postgres", prog}, args...)
+                       prog = "setuidgid"
+               }
+               fail(super.RunProgram(ctx, super.tempdir, nil, nil, prog, args...))
        }()
 
        for {
@@ -78,11 +132,15 @@ func (runPostgreSQL) Run(ctx context.Context, fail func(error), super *Superviso
                }
                time.Sleep(time.Second / 2)
        }
-       db, err := sql.Open("postgres", arvados.PostgreSQLConnection{
+       pgconn := arvados.PostgreSQLConnection{
                "host":   datadir,
                "port":   port,
                "dbname": "postgres",
-       }.String())
+       }
+       if iamroot {
+               pgconn["user"] = "postgres"
+       }
+       db, err := sql.Open("postgres", pgconn.String())
        if err != nil {
                return fmt.Errorf("db open failed: %s", err)
        }
@@ -96,7 +154,7 @@ func (runPostgreSQL) Run(ctx context.Context, fail func(error), super *Superviso
        if err != nil {
                return fmt.Errorf("createuser failed: %s", err)
        }
-       _, err = conn.ExecContext(ctx, `CREATE DATABASE `+pq.QuoteIdentifier(super.cluster.PostgreSQL.Connection["dbname"]))
+       _, err = conn.ExecContext(ctx, `CREATE DATABASE `+pq.QuoteIdentifier(super.cluster.PostgreSQL.Connection["dbname"])+` WITH TEMPLATE template0 ENCODING 'utf8'`)
        if err != nil {
                return fmt.Errorf("createdb failed: %s", err)
        }
index bcf87812ab813c39a0bbcc6c801b9069c9dd4c7b..7f5d6a9baae2dd4eaa2b2e66fea9585f7be3bdc1 100644 (file)
@@ -126,7 +126,7 @@ func (super *Supervisor) run(cfg *arvados.Config) error {
        super.setEnv("ARVADOS_CONFIG", super.configfile)
        super.setEnv("RAILS_ENV", super.ClusterType)
        super.setEnv("TMPDIR", super.tempdir)
-       super.prependEnv("PATH", filepath.Join(super.tempdir, "bin")+":")
+       super.prependEnv("PATH", super.tempdir+"/bin:/var/lib/arvados/bin:")
 
        super.cluster, err = cfg.GetCluster("")
        if err != nil {
@@ -182,7 +182,7 @@ func (super *Supervisor) run(cfg *arvados.Config) error {
                runGoProgram{src: "services/keepproxy", svc: super.cluster.Services.Keepproxy, depends: []supervisedTask{runPassenger{src: "services/api"}}},
                runGoProgram{src: "services/keepstore", svc: super.cluster.Services.Keepstore},
                runGoProgram{src: "services/keep-web", svc: super.cluster.Services.WebDAV},
-               runGoProgram{src: "services/ws", svc: super.cluster.Services.Websocket, depends: []supervisedTask{runPostgreSQL{}}},
+               runServiceCommand{name: "ws", svc: super.cluster.Services.Websocket, depends: []supervisedTask{runPostgreSQL{}}},
                installPassenger{src: "services/api"},
                runPassenger{src: "services/api", svc: super.cluster.Services.RailsAPI, depends: []supervisedTask{createCertificates{}, runPostgreSQL{}, installPassenger{src: "services/api"}}},
                installPassenger{src: "apps/workbench", depends: []supervisedTask{installPassenger{src: "services/api"}}}, // dependency ensures workbench doesn't delay api startup
@@ -360,7 +360,11 @@ func (super *Supervisor) setupRubyEnv() error {
                        "GEM_HOME=",
                        "GEM_PATH=",
                })
-               cmd := exec.Command("gem", "env", "gempath")
+               gem := "gem"
+               if _, err := os.Stat("/var/lib/arvados/bin/gem"); err == nil {
+                       gem = "/var/lib/arvados/bin/gem"
+               }
+               cmd := exec.Command(gem, "env", "gempath")
                cmd.Env = super.environ
                buf, err := cmd.Output() // /var/lib/arvados/.gem/ruby/2.5.0/bin:...
                if err != nil || len(buf) == 0 {
@@ -406,7 +410,11 @@ func (super *Supervisor) RunProgram(ctx context.Context, dir string, output io.W
        cmdline := fmt.Sprintf("%s", append([]string{prog}, args...))
        super.logger.WithField("command", cmdline).WithField("dir", dir).Info("executing")
 
-       logprefix := strings.TrimPrefix(prog, super.tempdir+"/bin/")
+       logprefix := prog
+       if logprefix == "setuidgid" && len(args) >= 3 {
+               logprefix = args[2]
+       }
+       logprefix = strings.TrimPrefix(logprefix, super.tempdir+"/bin/")
        if logprefix == "bundle" && len(args) > 2 && args[0] == "exec" {
                logprefix = args[1]
        } else if logprefix == "arvados-server" && len(args) > 1 {
index 411296cbea60f4c39122fb72ee8db94d9f31cacf..fcccdd0634e48fe1611f8bf531126f0d8803b081 100644 (file)
@@ -184,12 +184,21 @@ Clusters:
       MaxItemsPerResponse: 1000
 
       # Maximum number of concurrent requests to accept in a single
-      # service process, or 0 for no limit. Currently supported only
-      # by keepstore.
+      # service process, or 0 for no limit.
       MaxConcurrentRequests: 0
 
-      # Maximum number of 64MiB memory buffers per keepstore server
-      # process, or 0 for no limit.
+      # Maximum number of 64MiB memory buffers per Keepstore server process, or
+      # 0 for no limit. When this limit is reached, up to
+      # (MaxConcurrentRequests - MaxKeepBlobBuffers) HTTP requests requiring
+      # buffers (like GET and PUT) will wait for buffer space to be released.
+      # Any HTTP requests beyond MaxConcurrentRequests will receive an
+      # immediate 503 response.
+      #
+      # MaxKeepBlobBuffers should be set such that (MaxKeepBlobBuffers * 64MiB
+      # * 1.1) fits comfortably in memory. On a host dedicated to running
+      # Keepstore, divide total memory by 88MiB to suggest a suitable value.
+      # For example, if grep MemTotal /proc/meminfo reports MemTotal: 7125440
+      # kB, compute 7125440 / (88 * 1024)=79 and configure MaxBuffers: 79
       MaxKeepBlobBuffers: 128
 
       # API methods to disable. Disabled methods are not listed in the
@@ -541,6 +550,29 @@ Clusters:
       # work. If false, only the primary email address will be used.
       GoogleAlternateEmailAddresses: true
 
+      # (Experimental) Use PAM to authenticate logins, using the
+      # specified PAM service name.
+      #
+      # Cannot be used in combination with OAuth2 (ProviderAppID) or
+      # Google (GoogleClientID). Cannot be used on a cluster acting as
+      # a LoginCluster.
+      PAM: false
+      PAMService: arvados
+
+      # Domain name (e.g., "example.com") to use to construct the
+      # user's email address if PAM authentication returns a username
+      # with no "@". If empty, use the PAM username as the user's
+      # email address, whether or not it contains "@".
+      #
+      # Note that the email address is used as the primary key for
+      # user records when logging in. Therefore, if you change
+      # PAMDefaultEmailDomain after the initial installation, you
+      # should also update existing user records to reflect the new
+      # domain. Otherwise, next time those users log in, they will be
+      # given new accounts instead of accessing their existing
+      # accounts.
+      PAMDefaultEmailDomain: ""
+
       # The cluster ID to delegate the user database.  When set,
       # logins on this cluster will be redirected to the login cluster
       # (login cluster must appear in RemoteClusters with Proxy: true)
index 5973a16a132a399e2988d6b8939bab8f03911308..ded03fc3030c8811a6d12210a6c1f9b57253dfaf 100644 (file)
@@ -134,6 +134,9 @@ var whitelist = map[string]bool{
        "Login.GoogleClientID":                         false,
        "Login.GoogleClientSecret":                     false,
        "Login.GoogleAlternateEmailAddresses":          false,
+       "Login.PAM":                                    true,
+       "Login.PAMService":                             false,
+       "Login.PAMDefaultEmailDomain":                  false,
        "Login.ProviderAppID":                          false,
        "Login.ProviderAppSecret":                      false,
        "Login.LoginCluster":                           true,
index f40093a96c5fa0a7bdeae7dc7b11316a247ed561..4a8d7024fb5a75d9d275adcdd70241c3ace18fcd 100644 (file)
@@ -190,12 +190,21 @@ Clusters:
       MaxItemsPerResponse: 1000
 
       # Maximum number of concurrent requests to accept in a single
-      # service process, or 0 for no limit. Currently supported only
-      # by keepstore.
+      # service process, or 0 for no limit.
       MaxConcurrentRequests: 0
 
-      # Maximum number of 64MiB memory buffers per keepstore server
-      # process, or 0 for no limit.
+      # Maximum number of 64MiB memory buffers per Keepstore server process, or
+      # 0 for no limit. When this limit is reached, up to
+      # (MaxConcurrentRequests - MaxKeepBlobBuffers) HTTP requests requiring
+      # buffers (like GET and PUT) will wait for buffer space to be released.
+      # Any HTTP requests beyond MaxConcurrentRequests will receive an
+      # immediate 503 response.
+      #
+      # MaxKeepBlobBuffers should be set such that (MaxKeepBlobBuffers * 64MiB
+      # * 1.1) fits comfortably in memory. On a host dedicated to running
+      # Keepstore, divide total memory by 88MiB to suggest a suitable value.
+      # For example, if grep MemTotal /proc/meminfo reports MemTotal: 7125440
+      # kB, compute 7125440 / (88 * 1024)=79 and configure MaxBuffers: 79
       MaxKeepBlobBuffers: 128
 
       # API methods to disable. Disabled methods are not listed in the
@@ -547,6 +556,29 @@ Clusters:
       # work. If false, only the primary email address will be used.
       GoogleAlternateEmailAddresses: true
 
+      # (Experimental) Use PAM to authenticate logins, using the
+      # specified PAM service name.
+      #
+      # Cannot be used in combination with OAuth2 (ProviderAppID) or
+      # Google (GoogleClientID). Cannot be used on a cluster acting as
+      # a LoginCluster.
+      PAM: false
+      PAMService: arvados
+
+      # Domain name (e.g., "example.com") to use to construct the
+      # user's email address if PAM authentication returns a username
+      # with no "@". If empty, use the PAM username as the user's
+      # email address, whether or not it contains "@".
+      #
+      # Note that the email address is used as the primary key for
+      # user records when logging in. Therefore, if you change
+      # PAMDefaultEmailDomain after the initial installation, you
+      # should also update existing user records to reflect the new
+      # domain. Otherwise, next time those users log in, they will be
+      # given new accounts instead of accessing their existing
+      # accounts.
+      PAMDefaultEmailDomain: ""
+
       # The cluster ID to delegate the user database.  When set,
       # logins on this cluster will be redirected to the login cluster
       # (login cluster must appear in RemoteClusters with Proxy: true)
index 10e1bc5f136d9d9d0ea05df6c37ad7b7f7a0d91f..418b6811beeb82d814c16603e50b694502372522 100644 (file)
@@ -493,6 +493,10 @@ func (conn *Conn) UserBatchUpdate(ctx context.Context, options arvados.UserBatch
        return conn.local.UserBatchUpdate(ctx, options)
 }
 
+func (conn *Conn) UserAuthenticate(ctx context.Context, options arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
+       return conn.local.UserAuthenticate(ctx, options)
+}
+
 func (conn *Conn) APIClientAuthorizationCurrent(ctx context.Context, options arvados.GetOptions) (arvados.APIClientAuthorization, error) {
        return conn.chooseBackend(options.UUID).APIClientAuthorizationCurrent(ctx, options)
 }
index 1d6e12e0159f5c73d45ebfda595a10587580a9b5..2de260fdc2493a30857894a85ebef22e7d898670 100644 (file)
@@ -46,6 +46,9 @@ func (s *LoginSuite) TestLogout(c *check.C) {
        s.cluster.Login.GoogleClientID = "zzzzzzzzzzzzzz"
        s.addHTTPRemote(c, "zhome", &arvadostest.APIStub{})
        s.cluster.Login.LoginCluster = "zhome"
+       // s.fed is already set by SetUpTest, but we need to
+       // reinitialize with the above config changes.
+       s.fed = New(s.cluster)
 
        returnTo := "https://app.example.com/foo?bar"
        for _, trial := range []struct {
index e3869261a160b4e42d147574410f15b9641b4e9d..01f2161632bf8e6562f51b4266e43602b90218c6 100644 (file)
@@ -67,6 +67,10 @@ func (h *Handler) CheckHealth() error {
        return err
 }
 
+func (h *Handler) Done() <-chan struct{} {
+       return nil
+}
+
 func neverRedirect(*http.Request, []*http.Request) error { return http.ErrUseLastResponse }
 
 func (h *Handler) setup() {
@@ -79,6 +83,7 @@ func (h *Handler) setup() {
 
        rtr := router.New(federation.New(h.Cluster))
        mux.Handle("/arvados/v1/config", rtr)
+       mux.Handle("/"+arvados.EndpointUserAuthenticate.Path, rtr)
 
        if !h.Cluster.ForceLegacyAPI14 {
                mux.Handle("/arvados/v1/collections", rtr)
index ac092382d42a20c6ec4caad4033ed4a5679d7632..60263455bdb1d02c10a9164c7c235d22a0f90fb7 100644 (file)
@@ -6,7 +6,6 @@ package localdb
 
 import (
        "context"
-       "errors"
 
        "git.arvados.org/arvados.git/lib/controller/railsproxy"
        "git.arvados.org/arvados.git/lib/controller/rpc"
@@ -18,35 +17,26 @@ type railsProxy = rpc.Conn
 type Conn struct {
        cluster     *arvados.Cluster
        *railsProxy // handles API methods that aren't defined on Conn itself
-
-       googleLoginController
+       loginController
 }
 
 func NewConn(cluster *arvados.Cluster) *Conn {
+       railsProxy := railsproxy.NewConn(cluster)
        return &Conn{
-               cluster:    cluster,
-               railsProxy: railsproxy.NewConn(cluster),
+               cluster:         cluster,
+               railsProxy:      railsProxy,
+               loginController: chooseLoginController(cluster, railsProxy),
        }
 }
 
 func (conn *Conn) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
-       if conn.cluster.Login.ProviderAppID != "" {
-               // Proxy to RailsAPI, which hands off to sso-provider.
-               return conn.railsProxy.Logout(ctx, opts)
-       } else {
-               return conn.googleLoginController.Logout(ctx, conn.cluster, conn.railsProxy, opts)
-       }
+       return conn.loginController.Logout(ctx, opts)
 }
 
 func (conn *Conn) Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error) {
-       wantGoogle := conn.cluster.Login.GoogleClientID != ""
-       wantSSO := conn.cluster.Login.ProviderAppID != ""
-       if wantGoogle == wantSSO {
-               return arvados.LoginResponse{}, errors.New("configuration problem: exactly one of Login.GoogleClientID and Login.ProviderAppID must be configured")
-       } else if wantGoogle {
-               return conn.googleLoginController.Login(ctx, conn.cluster, conn.railsProxy, opts)
-       } else {
-               // Proxy to RailsAPI, which hands off to sso-provider.
-               return conn.railsProxy.Login(ctx, opts)
-       }
+       return conn.loginController.Login(ctx, opts)
+}
+
+func (conn *Conn) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
+       return conn.loginController.UserAuthenticate(ctx, opts)
 }
index 2e50b84f435856dc282be51b4fbe8b5db548431b..ae59849993346afaf6eddf37dd53cfa08c5cce5e 100644 (file)
 package localdb
 
 import (
-       "bytes"
        "context"
-       "crypto/hmac"
-       "crypto/sha256"
-       "encoding/base64"
        "errors"
-       "fmt"
-       "net/url"
-       "strings"
-       "sync"
-       "text/template"
-       "time"
+       "net/http"
 
-       "git.arvados.org/arvados.git/lib/controller/rpc"
        "git.arvados.org/arvados.git/sdk/go/arvados"
-       "git.arvados.org/arvados.git/sdk/go/auth"
-       "git.arvados.org/arvados.git/sdk/go/ctxlog"
-       "github.com/coreos/go-oidc"
-       "golang.org/x/oauth2"
-       "google.golang.org/api/option"
-       "google.golang.org/api/people/v1"
+       "git.arvados.org/arvados.git/sdk/go/httpserver"
 )
 
-type googleLoginController struct {
-       issuer            string // override OIDC issuer URL (normally https://accounts.google.com) for testing
-       peopleAPIBasePath string // override Google People API base URL (normally set by google pkg to https://people.googleapis.com/)
-       provider          *oidc.Provider
-       mu                sync.Mutex
+type loginController interface {
+       Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error)
+       Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error)
+       UserAuthenticate(ctx context.Context, options arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error)
 }
 
-func (ctrl *googleLoginController) getProvider() (*oidc.Provider, error) {
-       ctrl.mu.Lock()
-       defer ctrl.mu.Unlock()
-       if ctrl.provider == nil {
-               issuer := ctrl.issuer
-               if issuer == "" {
-                       issuer = "https://accounts.google.com"
+func chooseLoginController(cluster *arvados.Cluster, railsProxy *railsProxy) loginController {
+       wantGoogle := cluster.Login.GoogleClientID != ""
+       wantSSO := cluster.Login.ProviderAppID != ""
+       wantPAM := cluster.Login.PAM
+       switch {
+       case wantGoogle && !wantSSO && !wantPAM:
+               return &googleLoginController{Cluster: cluster, RailsProxy: railsProxy}
+       case !wantGoogle && wantSSO && !wantPAM:
+               return &ssoLoginController{railsProxy}
+       case !wantGoogle && !wantSSO && wantPAM:
+               return &pamLoginController{Cluster: cluster, RailsProxy: railsProxy}
+       default:
+               return errorLoginController{
+                       error: errors.New("configuration problem: exactly one of Login.GoogleClientID, Login.ProviderAppID, or Login.PAM must be configured"),
                }
-               provider, err := oidc.NewProvider(context.Background(), issuer)
-               if err != nil {
-                       return nil, err
-               }
-               ctrl.provider = provider
        }
-       return ctrl.provider, nil
 }
 
-func (ctrl *googleLoginController) Logout(ctx context.Context, cluster *arvados.Cluster, railsproxy *railsProxy, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
-       target := opts.ReturnTo
-       if target == "" {
-               if cluster.Services.Workbench2.ExternalURL.Host != "" {
-                       target = cluster.Services.Workbench2.ExternalURL.String()
-               } else {
-                       target = cluster.Services.Workbench1.ExternalURL.String()
-               }
-       }
-       return arvados.LogoutResponse{RedirectLocation: target}, nil
-}
+// Login and Logout are passed through to the wrapped railsProxy;
+// UserAuthenticate is rejected.
+type ssoLoginController struct{ *railsProxy }
 
-func (ctrl *googleLoginController) Login(ctx context.Context, cluster *arvados.Cluster, railsproxy *railsProxy, opts arvados.LoginOptions) (arvados.LoginResponse, error) {
-       provider, err := ctrl.getProvider()
-       if err != nil {
-               return ctrl.loginError(fmt.Errorf("error setting up OpenID Connect provider: %s", err))
-       }
-       redirURL, err := (*url.URL)(&cluster.Services.Controller.ExternalURL).Parse("/login")
-       if err != nil {
-               return ctrl.loginError(fmt.Errorf("error making redirect URL: %s", err))
-       }
-       conf := &oauth2.Config{
-               ClientID:     cluster.Login.GoogleClientID,
-               ClientSecret: cluster.Login.GoogleClientSecret,
-               Endpoint:     provider.Endpoint(),
-               Scopes:       []string{oidc.ScopeOpenID, "profile", "email"},
-               RedirectURL:  redirURL.String(),
-       }
-       verifier := provider.Verifier(&oidc.Config{
-               ClientID: conf.ClientID,
-       })
-       if opts.State == "" {
-               // Initiate Google sign-in.
-               if opts.ReturnTo == "" {
-                       return ctrl.loginError(errors.New("missing return_to parameter"))
-               }
-               me := url.URL(cluster.Services.Controller.ExternalURL)
-               callback, err := me.Parse("/" + arvados.EndpointLogin.Path)
-               if err != nil {
-                       return ctrl.loginError(err)
-               }
-               conf.RedirectURL = callback.String()
-               state := ctrl.newOAuth2State([]byte(cluster.SystemRootToken), opts.Remote, opts.ReturnTo)
-               return arvados.LoginResponse{
-                       RedirectLocation: conf.AuthCodeURL(state.String(),
-                               // prompt=select_account tells Google
-                               // to show the "choose which Google
-                               // account" page, even if the client
-                               // is currently logged in to exactly
-                               // one Google account.
-                               oauth2.SetAuthURLParam("prompt", "select_account")),
-               }, nil
-       } else {
-               // Callback after Google sign-in.
-               state := ctrl.parseOAuth2State(opts.State)
-               if !state.verify([]byte(cluster.SystemRootToken)) {
-                       return ctrl.loginError(errors.New("invalid OAuth2 state"))
-               }
-               oauth2Token, err := conf.Exchange(ctx, opts.Code)
-               if err != nil {
-                       return ctrl.loginError(fmt.Errorf("error in OAuth2 exchange: %s", err))
-               }
-               rawIDToken, ok := oauth2Token.Extra("id_token").(string)
-               if !ok {
-                       return ctrl.loginError(errors.New("error in OAuth2 exchange: no ID token in OAuth2 token"))
-               }
-               idToken, err := verifier.Verify(ctx, rawIDToken)
-               if err != nil {
-                       return ctrl.loginError(fmt.Errorf("error verifying ID token: %s", err))
-               }
-               authinfo, err := ctrl.getAuthInfo(ctx, cluster, conf, oauth2Token, idToken)
-               if err != nil {
-                       return ctrl.loginError(err)
-               }
-               ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{cluster.SystemRootToken}})
-               return railsproxy.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
-                       ReturnTo: state.Remote + "," + state.ReturnTo,
-                       AuthInfo: *authinfo,
-               })
-       }
+func (ctrl *ssoLoginController) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
+       return arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(errors.New("username/password authentication is not available"), http.StatusBadRequest)
 }
 
-// Use a person's token to get all of their email addresses, with the
-// primary address at index 0. The provided defaultAddr is always
-// included in the returned slice, and is used as the primary if the
-// Google API does not indicate one.
-func (ctrl *googleLoginController) getAuthInfo(ctx context.Context, cluster *arvados.Cluster, conf *oauth2.Config, token *oauth2.Token, idToken *oidc.IDToken) (*rpc.UserSessionAuthInfo, error) {
-       var ret rpc.UserSessionAuthInfo
-       defer ctxlog.FromContext(ctx).WithField("ret", &ret).Debug("getAuthInfo returned")
+type errorLoginController struct{ error }
 
-       var claims struct {
-               Name     string `json:"name"`
-               Email    string `json:"email"`
-               Verified bool   `json:"email_verified"`
-       }
-       if err := idToken.Claims(&claims); err != nil {
-               return nil, fmt.Errorf("error extracting claims from ID token: %s", err)
-       } else if claims.Verified {
-               // Fall back to this info if the People API call
-               // (below) doesn't return a primary && verified email.
-               if names := strings.Fields(strings.TrimSpace(claims.Name)); len(names) > 1 {
-                       ret.FirstName = strings.Join(names[0:len(names)-1], " ")
-                       ret.LastName = names[len(names)-1]
-               } else {
-                       ret.FirstName = names[0]
-               }
-               ret.Email = claims.Email
-       }
-
-       if !cluster.Login.GoogleAlternateEmailAddresses {
-               if ret.Email == "" {
-                       return nil, fmt.Errorf("cannot log in with unverified email address %q", claims.Email)
-               }
-               return &ret, nil
-       }
-
-       svc, err := people.NewService(ctx, option.WithTokenSource(conf.TokenSource(ctx, token)), option.WithScopes(people.UserEmailsReadScope))
-       if err != nil {
-               return nil, fmt.Errorf("error setting up People API: %s", err)
-       }
-       if p := ctrl.peopleAPIBasePath; p != "" {
-               // Override normal API endpoint (for testing)
-               svc.BasePath = p
-       }
-       person, err := people.NewPeopleService(svc).Get("people/me").PersonFields("emailAddresses,names").Do()
-       if err != nil {
-               if strings.Contains(err.Error(), "Error 403") && strings.Contains(err.Error(), "accessNotConfigured") {
-                       // Log the original API error, but display
-                       // only the "fix config" advice to the user.
-                       ctxlog.FromContext(ctx).WithError(err).WithField("email", ret.Email).Error("People API is not enabled")
-                       return nil, errors.New("configuration error: Login.GoogleAlternateEmailAddresses is true, but Google People API is not enabled")
-               } else {
-                       return nil, fmt.Errorf("error getting profile info from People API: %s", err)
-               }
-       }
-
-       // The given/family names returned by the People API and
-       // flagged as "primary" (if any) take precedence over the
-       // split-by-whitespace result from above.
-       for _, name := range person.Names {
-               if name.Metadata != nil && name.Metadata.Primary {
-                       ret.FirstName = name.GivenName
-                       ret.LastName = name.FamilyName
-                       break
-               }
-       }
-
-       altEmails := map[string]bool{}
-       if ret.Email != "" {
-               altEmails[ret.Email] = true
-       }
-       for _, ea := range person.EmailAddresses {
-               if ea.Metadata == nil || !ea.Metadata.Verified {
-                       ctxlog.FromContext(ctx).WithField("address", ea.Value).Info("skipping unverified email address")
-                       continue
-               }
-               altEmails[ea.Value] = true
-               if ea.Metadata.Primary || ret.Email == "" {
-                       ret.Email = ea.Value
-               }
-       }
-       if len(altEmails) == 0 {
-               return nil, errors.New("cannot log in without a verified email address")
-       }
-       for ae := range altEmails {
-               if ae != ret.Email {
-                       ret.AlternateEmails = append(ret.AlternateEmails, ae)
-                       if i := strings.Index(ae, "@"); i > 0 && strings.ToLower(ae[i+1:]) == strings.ToLower(cluster.Users.PreferDomainForUsername) {
-                               ret.Username = strings.SplitN(ae[:i], "+", 2)[0]
-                       }
-               }
-       }
-       return &ret, nil
+func (ctrl errorLoginController) Login(context.Context, arvados.LoginOptions) (arvados.LoginResponse, error) {
+       return arvados.LoginResponse{}, ctrl.error
 }
-
-func (ctrl *googleLoginController) loginError(sendError error) (resp arvados.LoginResponse, err error) {
-       tmpl, err := template.New("error").Parse(`<h2>Login error:</h2><p>{{.}}</p>`)
-       if err != nil {
-               return
-       }
-       err = tmpl.Execute(&resp.HTML, sendError.Error())
-       return
+func (ctrl errorLoginController) Logout(context.Context, arvados.LogoutOptions) (arvados.LogoutResponse, error) {
+       return arvados.LogoutResponse{}, ctrl.error
 }
-
-func (ctrl *googleLoginController) newOAuth2State(key []byte, remote, returnTo string) oauth2State {
-       s := oauth2State{
-               Time:     time.Now().Unix(),
-               Remote:   remote,
-               ReturnTo: returnTo,
-       }
-       s.HMAC = s.computeHMAC(key)
-       return s
+func (ctrl errorLoginController) UserAuthenticate(context.Context, arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
+       return arvados.APIClientAuthorization{}, ctrl.error
 }
 
-type oauth2State struct {
-       HMAC     []byte // hash of other fields; see computeHMAC()
-       Time     int64  // creation time (unix timestamp)
-       Remote   string // remote cluster if requesting a salted token, otherwise blank
-       ReturnTo string // redirect target
-}
-
-func (ctrl *googleLoginController) parseOAuth2State(encoded string) (s oauth2State) {
-       // Errors are not checked. If decoding/parsing fails, the
-       // token will be rejected by verify().
-       decoded, _ := base64.RawURLEncoding.DecodeString(encoded)
-       f := strings.Split(string(decoded), "\n")
-       if len(f) != 4 {
-               return
-       }
-       fmt.Sscanf(f[0], "%x", &s.HMAC)
-       fmt.Sscanf(f[1], "%x", &s.Time)
-       fmt.Sscanf(f[2], "%s", &s.Remote)
-       fmt.Sscanf(f[3], "%s", &s.ReturnTo)
-       return
-}
-
-func (s oauth2State) verify(key []byte) bool {
-       if delta := time.Now().Unix() - s.Time; delta < 0 || delta > 300 {
-               return false
+func noopLogout(cluster *arvados.Cluster, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
+       target := opts.ReturnTo
+       if target == "" {
+               if cluster.Services.Workbench2.ExternalURL.Host != "" {
+                       target = cluster.Services.Workbench2.ExternalURL.String()
+               } else {
+                       target = cluster.Services.Workbench1.ExternalURL.String()
+               }
        }
-       return hmac.Equal(s.computeHMAC(key), s.HMAC)
-}
-
-func (s oauth2State) String() string {
-       var buf bytes.Buffer
-       enc := base64.NewEncoder(base64.RawURLEncoding, &buf)
-       fmt.Fprintf(enc, "%x\n%x\n%s\n%s", s.HMAC, s.Time, s.Remote, s.ReturnTo)
-       enc.Close()
-       return buf.String()
-}
-
-func (s oauth2State) computeHMAC(key []byte) []byte {
-       mac := hmac.New(sha256.New, key)
-       fmt.Fprintf(mac, "%x %s %s", s.Time, s.Remote, s.ReturnTo)
-       return mac.Sum(nil)
+       return arvados.LogoutResponse{RedirectLocation: target}, nil
 }
diff --git a/lib/controller/localdb/login_google.go b/lib/controller/localdb/login_google.go
new file mode 100644 (file)
index 0000000..bf1754c
--- /dev/null
@@ -0,0 +1,291 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+       "bytes"
+       "context"
+       "crypto/hmac"
+       "crypto/sha256"
+       "encoding/base64"
+       "errors"
+       "fmt"
+       "net/http"
+       "net/url"
+       "strings"
+       "sync"
+       "text/template"
+       "time"
+
+       "git.arvados.org/arvados.git/lib/controller/rpc"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/auth"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "git.arvados.org/arvados.git/sdk/go/httpserver"
+       "github.com/coreos/go-oidc"
+       "golang.org/x/oauth2"
+       "google.golang.org/api/option"
+       "google.golang.org/api/people/v1"
+)
+
+type googleLoginController struct {
+       Cluster    *arvados.Cluster
+       RailsProxy *railsProxy
+
+       issuer            string // override OIDC issuer URL (normally https://accounts.google.com) for testing
+       peopleAPIBasePath string // override Google People API base URL (normally set by google pkg to https://people.googleapis.com/)
+       provider          *oidc.Provider
+       mu                sync.Mutex
+}
+
+func (ctrl *googleLoginController) getProvider() (*oidc.Provider, error) {
+       ctrl.mu.Lock()
+       defer ctrl.mu.Unlock()
+       if ctrl.provider == nil {
+               issuer := ctrl.issuer
+               if issuer == "" {
+                       issuer = "https://accounts.google.com"
+               }
+               provider, err := oidc.NewProvider(context.Background(), issuer)
+               if err != nil {
+                       return nil, err
+               }
+               ctrl.provider = provider
+       }
+       return ctrl.provider, nil
+}
+
+func (ctrl *googleLoginController) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
+       return noopLogout(ctrl.Cluster, opts)
+}
+
+func (ctrl *googleLoginController) Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error) {
+       provider, err := ctrl.getProvider()
+       if err != nil {
+               return loginError(fmt.Errorf("error setting up OpenID Connect provider: %s", err))
+       }
+       redirURL, err := (*url.URL)(&ctrl.Cluster.Services.Controller.ExternalURL).Parse("/login")
+       if err != nil {
+               return loginError(fmt.Errorf("error making redirect URL: %s", err))
+       }
+       conf := &oauth2.Config{
+               ClientID:     ctrl.Cluster.Login.GoogleClientID,
+               ClientSecret: ctrl.Cluster.Login.GoogleClientSecret,
+               Endpoint:     provider.Endpoint(),
+               Scopes:       []string{oidc.ScopeOpenID, "profile", "email"},
+               RedirectURL:  redirURL.String(),
+       }
+       verifier := provider.Verifier(&oidc.Config{
+               ClientID: conf.ClientID,
+       })
+       if opts.State == "" {
+               // Initiate Google sign-in.
+               if opts.ReturnTo == "" {
+                       return loginError(errors.New("missing return_to parameter"))
+               }
+               me := url.URL(ctrl.Cluster.Services.Controller.ExternalURL)
+               callback, err := me.Parse("/" + arvados.EndpointLogin.Path)
+               if err != nil {
+                       return loginError(err)
+               }
+               conf.RedirectURL = callback.String()
+               state := ctrl.newOAuth2State([]byte(ctrl.Cluster.SystemRootToken), opts.Remote, opts.ReturnTo)
+               return arvados.LoginResponse{
+                       RedirectLocation: conf.AuthCodeURL(state.String(),
+                               // prompt=select_account tells Google
+                               // to show the "choose which Google
+                               // account" page, even if the client
+                               // is currently logged in to exactly
+                               // one Google account.
+                               oauth2.SetAuthURLParam("prompt", "select_account")),
+               }, nil
+       } else {
+               // Callback after Google sign-in.
+               state := ctrl.parseOAuth2State(opts.State)
+               if !state.verify([]byte(ctrl.Cluster.SystemRootToken)) {
+                       return loginError(errors.New("invalid OAuth2 state"))
+               }
+               oauth2Token, err := conf.Exchange(ctx, opts.Code)
+               if err != nil {
+                       return loginError(fmt.Errorf("error in OAuth2 exchange: %s", err))
+               }
+               rawIDToken, ok := oauth2Token.Extra("id_token").(string)
+               if !ok {
+                       return loginError(errors.New("error in OAuth2 exchange: no ID token in OAuth2 token"))
+               }
+               idToken, err := verifier.Verify(ctx, rawIDToken)
+               if err != nil {
+                       return loginError(fmt.Errorf("error verifying ID token: %s", err))
+               }
+               authinfo, err := ctrl.getAuthInfo(ctx, ctrl.Cluster, conf, oauth2Token, idToken)
+               if err != nil {
+                       return loginError(err)
+               }
+               ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{ctrl.Cluster.SystemRootToken}})
+               return ctrl.RailsProxy.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
+                       ReturnTo: state.Remote + "," + state.ReturnTo,
+                       AuthInfo: *authinfo,
+               })
+       }
+}
+
+func (ctrl *googleLoginController) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
+       return arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(errors.New("username/password authentication is not available"), http.StatusBadRequest)
+}
+
+// Use a person's token to get all of their email addresses, with the
+// primary address at index 0. The provided defaultAddr is always
+// included in the returned slice, and is used as the primary if the
+// Google API does not indicate one.
+func (ctrl *googleLoginController) getAuthInfo(ctx context.Context, cluster *arvados.Cluster, conf *oauth2.Config, token *oauth2.Token, idToken *oidc.IDToken) (*rpc.UserSessionAuthInfo, error) {
+       var ret rpc.UserSessionAuthInfo
+       defer ctxlog.FromContext(ctx).WithField("ret", &ret).Debug("getAuthInfo returned")
+
+       var claims struct {
+               Name     string `json:"name"`
+               Email    string `json:"email"`
+               Verified bool   `json:"email_verified"`
+       }
+       if err := idToken.Claims(&claims); err != nil {
+               return nil, fmt.Errorf("error extracting claims from ID token: %s", err)
+       } else if claims.Verified {
+               // Fall back to this info if the People API call
+               // (below) doesn't return a primary && verified email.
+               if names := strings.Fields(strings.TrimSpace(claims.Name)); len(names) > 1 {
+                       ret.FirstName = strings.Join(names[0:len(names)-1], " ")
+                       ret.LastName = names[len(names)-1]
+               } else {
+                       ret.FirstName = names[0]
+               }
+               ret.Email = claims.Email
+       }
+
+       if !ctrl.Cluster.Login.GoogleAlternateEmailAddresses {
+               if ret.Email == "" {
+                       return nil, fmt.Errorf("cannot log in with unverified email address %q", claims.Email)
+               }
+               return &ret, nil
+       }
+
+       svc, err := people.NewService(ctx, option.WithTokenSource(conf.TokenSource(ctx, token)), option.WithScopes(people.UserEmailsReadScope))
+       if err != nil {
+               return nil, fmt.Errorf("error setting up People API: %s", err)
+       }
+       if p := ctrl.peopleAPIBasePath; p != "" {
+               // Override normal API endpoint (for testing)
+               svc.BasePath = p
+       }
+       person, err := people.NewPeopleService(svc).Get("people/me").PersonFields("emailAddresses,names").Do()
+       if err != nil {
+               if strings.Contains(err.Error(), "Error 403") && strings.Contains(err.Error(), "accessNotConfigured") {
+                       // Log the original API error, but display
+                       // only the "fix config" advice to the user.
+                       ctxlog.FromContext(ctx).WithError(err).WithField("email", ret.Email).Error("People API is not enabled")
+                       return nil, errors.New("configuration error: Login.GoogleAlternateEmailAddresses is true, but Google People API is not enabled")
+               } else {
+                       return nil, fmt.Errorf("error getting profile info from People API: %s", err)
+               }
+       }
+
+       // The given/family names returned by the People API and
+       // flagged as "primary" (if any) take precedence over the
+       // split-by-whitespace result from above.
+       for _, name := range person.Names {
+               if name.Metadata != nil && name.Metadata.Primary {
+                       ret.FirstName = name.GivenName
+                       ret.LastName = name.FamilyName
+                       break
+               }
+       }
+
+       altEmails := map[string]bool{}
+       if ret.Email != "" {
+               altEmails[ret.Email] = true
+       }
+       for _, ea := range person.EmailAddresses {
+               if ea.Metadata == nil || !ea.Metadata.Verified {
+                       ctxlog.FromContext(ctx).WithField("address", ea.Value).Info("skipping unverified email address")
+                       continue
+               }
+               altEmails[ea.Value] = true
+               if ea.Metadata.Primary || ret.Email == "" {
+                       ret.Email = ea.Value
+               }
+       }
+       if len(altEmails) == 0 {
+               return nil, errors.New("cannot log in without a verified email address")
+       }
+       for ae := range altEmails {
+               if ae != ret.Email {
+                       ret.AlternateEmails = append(ret.AlternateEmails, ae)
+                       if i := strings.Index(ae, "@"); i > 0 && strings.ToLower(ae[i+1:]) == strings.ToLower(ctrl.Cluster.Users.PreferDomainForUsername) {
+                               ret.Username = strings.SplitN(ae[:i], "+", 2)[0]
+                       }
+               }
+       }
+       return &ret, nil
+}
+
+func loginError(sendError error) (resp arvados.LoginResponse, err error) {
+       tmpl, err := template.New("error").Parse(`<h2>Login error:</h2><p>{{.}}</p>`)
+       if err != nil {
+               return
+       }
+       err = tmpl.Execute(&resp.HTML, sendError.Error())
+       return
+}
+
+func (ctrl *googleLoginController) newOAuth2State(key []byte, remote, returnTo string) oauth2State {
+       s := oauth2State{
+               Time:     time.Now().Unix(),
+               Remote:   remote,
+               ReturnTo: returnTo,
+       }
+       s.HMAC = s.computeHMAC(key)
+       return s
+}
+
+type oauth2State struct {
+       HMAC     []byte // hash of other fields; see computeHMAC()
+       Time     int64  // creation time (unix timestamp)
+       Remote   string // remote cluster if requesting a salted token, otherwise blank
+       ReturnTo string // redirect target
+}
+
+func (ctrl *googleLoginController) parseOAuth2State(encoded string) (s oauth2State) {
+       // Errors are not checked. If decoding/parsing fails, the
+       // token will be rejected by verify().
+       decoded, _ := base64.RawURLEncoding.DecodeString(encoded)
+       f := strings.Split(string(decoded), "\n")
+       if len(f) != 4 {
+               return
+       }
+       fmt.Sscanf(f[0], "%x", &s.HMAC)
+       fmt.Sscanf(f[1], "%x", &s.Time)
+       fmt.Sscanf(f[2], "%s", &s.Remote)
+       fmt.Sscanf(f[3], "%s", &s.ReturnTo)
+       return
+}
+
+func (s oauth2State) verify(key []byte) bool {
+       if delta := time.Now().Unix() - s.Time; delta < 0 || delta > 300 {
+               return false
+       }
+       return hmac.Equal(s.computeHMAC(key), s.HMAC)
+}
+
+func (s oauth2State) String() string {
+       var buf bytes.Buffer
+       enc := base64.NewEncoder(base64.RawURLEncoding, &buf)
+       fmt.Fprintf(enc, "%x\n%x\n%s\n%s", s.HMAC, s.Time, s.Remote, s.ReturnTo)
+       enc.Close()
+       return buf.String()
+}
+
+func (s oauth2State) computeHMAC(key []byte) []byte {
+       mac := hmac.New(sha256.New, key)
+       fmt.Fprintf(mac, "%x %s %s", s.Time, s.Remote, s.ReturnTo)
+       return mac.Sum(nil)
+}
similarity index 94%
rename from lib/controller/localdb/login_test.go
rename to lib/controller/localdb/login_google_test.go
index db6daa195b226eb5ea36661909358edfeb263dbf..9e16e2e90439a8ab7767930b6c701fe6d6ab604a 100644 (file)
@@ -154,11 +154,11 @@ func (s *LoginSuite) SetUpTest(c *check.C) {
        c.Assert(err, check.IsNil)
 
        s.localdb = NewConn(s.cluster)
-       s.localdb.googleLoginController.issuer = s.fakeIssuer.URL
-       s.localdb.googleLoginController.peopleAPIBasePath = s.fakePeopleAPI.URL
+       s.localdb.loginController.(*googleLoginController).issuer = s.fakeIssuer.URL
+       s.localdb.loginController.(*googleLoginController).peopleAPIBasePath = s.fakePeopleAPI.URL
 
        s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
-       s.localdb.railsProxy = rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
+       *s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
 }
 
 func (s *LoginSuite) TearDownTest(c *check.C) {
@@ -188,7 +188,7 @@ func (s *LoginSuite) TestGoogleLogin_Start(c *check.C) {
                c.Check(target.Host, check.Equals, issuerURL.Host)
                q := target.Query()
                c.Check(q.Get("client_id"), check.Equals, "test%client$id")
-               state := s.localdb.googleLoginController.parseOAuth2State(q.Get("state"))
+               state := s.localdb.loginController.(*googleLoginController).parseOAuth2State(q.Get("state"))
                c.Check(state.verify([]byte(s.cluster.SystemRootToken)), check.Equals, true)
                c.Check(state.Time, check.Not(check.Equals), 0)
                c.Check(state.Remote, check.Equals, remote)
@@ -223,7 +223,7 @@ func (s *LoginSuite) setupPeopleAPIError(c *check.C) {
                w.WriteHeader(http.StatusForbidden)
                fmt.Fprintln(w, `Error 403: accessNotConfigured`)
        }))
-       s.localdb.googleLoginController.peopleAPIBasePath = s.fakePeopleAPI.URL
+       s.localdb.loginController.(*googleLoginController).peopleAPIBasePath = s.fakePeopleAPI.URL
 }
 
 func (s *LoginSuite) TestGoogleLogin_PeopleAPIDisabled(c *check.C) {
@@ -236,7 +236,7 @@ func (s *LoginSuite) TestGoogleLogin_PeopleAPIDisabled(c *check.C) {
                State: state,
        })
        c.Check(err, check.IsNil)
-       authinfo := s.getCallbackAuthInfo(c)
+       authinfo := getCallbackAuthInfo(c, s.railsSpy)
        c.Check(authinfo.Email, check.Equals, "joe.smith@primary.example.com")
 }
 
@@ -266,7 +266,7 @@ func (s *LoginSuite) TestGoogleLogin_Success(c *check.C) {
        token := target.Query().Get("api_token")
        c.Check(token, check.Matches, `v2/zzzzz-gj3su-.{15}/.{32,50}`)
 
-       authinfo := s.getCallbackAuthInfo(c)
+       authinfo := getCallbackAuthInfo(c, s.railsSpy)
        c.Check(authinfo.FirstName, check.Equals, "Fake User")
        c.Check(authinfo.LastName, check.Equals, "Name")
        c.Check(authinfo.Email, check.Equals, "active-user@arvados.local")
@@ -312,7 +312,7 @@ func (s *LoginSuite) TestGoogleLogin_RealName(c *check.C) {
                State: state,
        })
 
-       authinfo := s.getCallbackAuthInfo(c)
+       authinfo := getCallbackAuthInfo(c, s.railsSpy)
        c.Check(authinfo.FirstName, check.Equals, "Joseph")
        c.Check(authinfo.LastName, check.Equals, "Psmith")
 }
@@ -326,7 +326,7 @@ func (s *LoginSuite) TestGoogleLogin_OIDCRealName(c *check.C) {
                State: state,
        })
 
-       authinfo := s.getCallbackAuthInfo(c)
+       authinfo := getCallbackAuthInfo(c, s.railsSpy)
        c.Check(authinfo.FirstName, check.Equals, "Joe P.")
        c.Check(authinfo.LastName, check.Equals, "Smith")
 }
@@ -355,7 +355,7 @@ func (s *LoginSuite) TestGoogleLogin_AlternateEmailAddresses(c *check.C) {
                State: state,
        })
 
-       authinfo := s.getCallbackAuthInfo(c)
+       authinfo := getCallbackAuthInfo(c, s.railsSpy)
        c.Check(authinfo.Email, check.Equals, "joe.smith@primary.example.com")
        c.Check(authinfo.AlternateEmails, check.DeepEquals, []string{"joe.smith@home.example.com", "joe.smith@work.example.com"})
 }
@@ -384,7 +384,7 @@ func (s *LoginSuite) TestGoogleLogin_AlternateEmailAddresses_Primary(c *check.C)
                Code:  s.validCode,
                State: state,
        })
-       authinfo := s.getCallbackAuthInfo(c)
+       authinfo := getCallbackAuthInfo(c, s.railsSpy)
        c.Check(authinfo.Email, check.Equals, "joe.smith@primary.example.com")
        c.Check(authinfo.AlternateEmails, check.DeepEquals, []string{"joe.smith@alternate.example.com", "jsmith+123@preferdomainforusername.example.com"})
        c.Check(authinfo.Username, check.Equals, "jsmith")
@@ -411,30 +411,12 @@ func (s *LoginSuite) TestGoogleLogin_NoPrimaryEmailAddress(c *check.C) {
                State: state,
        })
 
-       authinfo := s.getCallbackAuthInfo(c)
+       authinfo := getCallbackAuthInfo(c, s.railsSpy)
        c.Check(authinfo.Email, check.Equals, "joe.smith@work.example.com") // first verified email in People response
        c.Check(authinfo.AlternateEmails, check.DeepEquals, []string{"joe.smith@home.example.com"})
        c.Check(authinfo.Username, check.Equals, "")
 }
 
-func (s *LoginSuite) getCallbackAuthInfo(c *check.C) (authinfo rpc.UserSessionAuthInfo) {
-       for _, dump := range s.railsSpy.RequestDumps {
-               c.Logf("spied request: %q", dump)
-               split := bytes.Split(dump, []byte("\r\n\r\n"))
-               c.Assert(split, check.HasLen, 2)
-               hdr, body := string(split[0]), string(split[1])
-               if strings.Contains(hdr, "POST /auth/controller/callback") {
-                       vs, err := url.ParseQuery(body)
-                       c.Check(json.Unmarshal([]byte(vs.Get("auth_info")), &authinfo), check.IsNil)
-                       c.Check(err, check.IsNil)
-                       sort.Strings(authinfo.AlternateEmails)
-                       return
-               }
-       }
-       c.Error("callback not found")
-       return
-}
-
 func (s *LoginSuite) startLogin(c *check.C) (state string) {
        // Initiate login, but instead of following the redirect to
        // the provider, just grab state from the redirect URL.
@@ -463,3 +445,21 @@ func (s *LoginSuite) fakeToken(c *check.C, payload []byte) string {
        c.Logf("fakeToken(%q) == %q", payload, t)
        return t
 }
+
+func getCallbackAuthInfo(c *check.C, railsSpy *arvadostest.Proxy) (authinfo rpc.UserSessionAuthInfo) {
+       for _, dump := range railsSpy.RequestDumps {
+               c.Logf("spied request: %q", dump)
+               split := bytes.Split(dump, []byte("\r\n\r\n"))
+               c.Assert(split, check.HasLen, 2)
+               hdr, body := string(split[0]), string(split[1])
+               if strings.Contains(hdr, "POST /auth/controller/callback") {
+                       vs, err := url.ParseQuery(body)
+                       c.Check(json.Unmarshal([]byte(vs.Get("auth_info")), &authinfo), check.IsNil)
+                       c.Check(err, check.IsNil)
+                       sort.Strings(authinfo.AlternateEmails)
+                       return
+               }
+       }
+       c.Error("callback not found")
+       return
+}
diff --git a/lib/controller/localdb/login_pam.go b/lib/controller/localdb/login_pam.go
new file mode 100644 (file)
index 0000000..01dfc13
--- /dev/null
@@ -0,0 +1,109 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+       "context"
+       "errors"
+       "fmt"
+       "net/http"
+       "net/url"
+       "strings"
+
+       "git.arvados.org/arvados.git/lib/controller/rpc"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/auth"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "git.arvados.org/arvados.git/sdk/go/httpserver"
+       "github.com/msteinert/pam"
+       "github.com/sirupsen/logrus"
+)
+
+type pamLoginController struct {
+       Cluster    *arvados.Cluster
+       RailsProxy *railsProxy
+}
+
+func (ctrl *pamLoginController) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
+       return noopLogout(ctrl.Cluster, opts)
+}
+
+func (ctrl *pamLoginController) Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error) {
+       return arvados.LoginResponse{}, errors.New("interactive login is not available")
+}
+
+func (ctrl *pamLoginController) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
+       errorMessage := ""
+       sentPassword := false
+       tx, err := pam.StartFunc(ctrl.Cluster.Login.PAMService, opts.Username, func(style pam.Style, message string) (string, error) {
+               ctxlog.FromContext(ctx).Debugf("pam conversation: style=%v message=%q", style, message)
+               switch style {
+               case pam.ErrorMsg:
+                       ctxlog.FromContext(ctx).WithField("Message", message).Info("pam.ErrorMsg")
+                       errorMessage = message
+                       return "", nil
+               case pam.TextInfo:
+                       ctxlog.FromContext(ctx).WithField("Message", message).Info("pam.TextInfo")
+                       return "", nil
+               case pam.PromptEchoOn, pam.PromptEchoOff:
+                       sentPassword = true
+                       return opts.Password, nil
+               default:
+                       return "", fmt.Errorf("unrecognized message style %d", style)
+               }
+       })
+       if err != nil {
+               return arvados.APIClientAuthorization{}, err
+       }
+       err = tx.Authenticate(pam.DisallowNullAuthtok)
+       if err != nil {
+               err = fmt.Errorf("PAM: %s", err)
+               if errorMessage != "" {
+                       // Perhaps the error message in the
+                       // conversation is helpful.
+                       err = fmt.Errorf("%s; %q", err, errorMessage)
+               }
+               if sentPassword {
+                       err = fmt.Errorf("%s (with username %q and password)", err, opts.Username)
+               } else {
+                       // This might hint that the username was
+                       // invalid.
+                       err = fmt.Errorf("%s (with username %q; password was never requested by PAM service)", err, opts.Username)
+               }
+               return arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(err, http.StatusUnauthorized)
+       }
+       if errorMessage != "" {
+               return arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(errors.New(errorMessage), http.StatusUnauthorized)
+       }
+       user, err := tx.GetItem(pam.User)
+       if err != nil {
+               return arvados.APIClientAuthorization{}, err
+       }
+       email := user
+       if domain := ctrl.Cluster.Login.PAMDefaultEmailDomain; domain != "" && !strings.Contains(email, "@") {
+               email = email + "@" + domain
+       }
+       ctxlog.FromContext(ctx).WithFields(logrus.Fields{"user": user, "email": email}).Debug("pam authentication succeeded")
+       ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{ctrl.Cluster.SystemRootToken}})
+       resp, err := ctrl.RailsProxy.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
+               // Send a fake ReturnTo value instead of the caller's
+               // opts.ReturnTo. We won't follow the resulting
+               // redirect target anyway.
+               ReturnTo: ",https://none.invalid",
+               AuthInfo: rpc.UserSessionAuthInfo{
+                       Username: user,
+                       Email:    email,
+               },
+       })
+       if err != nil {
+               return arvados.APIClientAuthorization{}, err
+       }
+       target, err := url.Parse(resp.RedirectLocation)
+       if err != nil {
+               return arvados.APIClientAuthorization{}, err
+       }
+       token := target.Query().Get("api_token")
+       return ctrl.RailsProxy.APIClientAuthorizationCurrent(auth.NewContext(ctx, auth.NewCredentials(token)), arvados.GetOptions{})
+}
diff --git a/lib/controller/localdb/login_pam_docker_test.go b/lib/controller/localdb/login_pam_docker_test.go
new file mode 100644 (file)
index 0000000..8a02b2c
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Skip this slow test unless invoked as "go test -tags docker".
+// +build docker
+
+package localdb
+
+import (
+       "os"
+       "os/exec"
+
+       check "gopkg.in/check.v1"
+)
+
+func (s *PamSuite) TestLoginLDAPViaPAM(c *check.C) {
+       cmd := exec.Command("bash", "login_pam_docker_test.sh")
+       cmd.Stdout = os.Stderr
+       cmd.Stderr = os.Stderr
+       err := cmd.Run()
+       c.Check(err, check.IsNil)
+}
diff --git a/lib/controller/localdb/login_pam_docker_test.sh b/lib/controller/localdb/login_pam_docker_test.sh
new file mode 100755 (executable)
index 0000000..b8f281b
--- /dev/null
@@ -0,0 +1,193 @@
+#!/bin/bash
+
+# This script demonstrates using LDAP for Arvados user authentication.
+#
+# It configures pam_ldap(5) and arvados controller in a docker
+# container, with pam_ldap configured to authenticate against an
+# OpenLDAP server in a second docker container.
+#
+# After adding a "foo" user entry, it uses curl to check that the
+# Arvados controller's login endpoint accepts the "foo" account
+# username/password and rejects invalid credentials.
+#
+# It is intended to be run inside .../build/run-tests.sh (in
+# interactive mode: "test lib/controller/localdb -tags=docker
+# -check.f=LDAP -check.vv"). It assumes ARVADOS_TEST_API_HOST points
+# to a RailsAPI server and the desired version of arvados-server is
+# installed in $GOPATH/bin.
+
+set -e -o pipefail
+
+debug=/dev/null
+if [[ -n ${ARVADOS_DEBUG} ]]; then
+    debug=/dev/stderr
+    set -x
+fi
+
+hostname="$(hostname)"
+tmpdir="$(mktemp -d)"
+cleanup() {
+    trap - ERR
+    rm -r ${tmpdir}
+    for h in ${ldapctr} ${ctrlctr}; do
+        if [[ -n ${h} ]]; then
+            docker kill ${h}
+        fi
+    done
+}
+trap cleanup ERR
+
+if [[ -z "$(docker image ls -q osixia/openldap:1.3.0)" ]]; then
+    echo >&2 "Pulling docker image for ldap server"
+    docker pull osixia/openldap:1.3.0
+fi
+
+ldapctr=ldap-${RANDOM}
+echo >&2 "Starting ldap server in docker container ${ldapctr}"
+docker run --rm --detach \
+       -p 389 -p 636 \
+       --name=${ldapctr} \
+       osixia/openldap:1.3.0
+docker logs --follow ${ldapctr} 2>$debug >$debug &
+ldaphostport=$(docker port ${ldapctr} 389/tcp)
+ldapport=${ldaphostport##*:}
+ldapurl="ldap://${hostname}:${ldapport}"
+passwordhash="$(docker exec -i ${ldapctr} slappasswd -s "secret")"
+
+# These are the default admin credentials for osixia/openldap:1.3.0
+adminuser=admin
+adminpassword=admin
+
+cat >"${tmpdir}/zzzzz.yml" <<EOF
+Clusters:
+  zzzzz:
+    PostgreSQL:
+      Connection:
+        client_encoding: utf8
+        host: ${hostname}
+        dbname: arvados_test
+        user: arvados
+        password: insecure_arvados_test
+    ManagementToken: e687950a23c3a9bceec28c6223a06c79
+    SystemRootToken: systemusertesttoken1234567890aoeuidhtnsqjkxbmwvzpy
+    API:
+      RequestTimeout: 30s
+    TLS:
+      Insecure: true
+    Collections:
+      BlobSigningKey: zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc
+      TrustAllContent: true
+      ForwardSlashNameSubstitution: /
+    Services:
+      RailsAPI:
+        InternalURLs:
+          "https://${hostname}:${ARVADOS_TEST_API_HOST##*:}/": {}
+      Controller:
+        ExternalURL: http://0.0.0.0:9999/
+        InternalURLs:
+          "http://0.0.0.0:9999/": {}
+    Login:
+      PAM: true
+      # Without this magic PAMDefaultEmailDomain, inserted users would
+      # prevent subsequent database/reset from working (see
+      # database_controller.rb).
+      PAMDefaultEmailDomain: example.com
+    SystemLogs:
+      LogLevel: debug
+EOF
+
+cat >"${tmpdir}/pam_ldap.conf" <<EOF
+base dc=example,dc=org
+ldap_version 3
+uri ${ldapurl}
+pam_password crypt
+binddn cn=${adminuser},dc=example,dc=org
+bindpw ${adminpassword}
+EOF
+
+cat >"${tmpdir}/add_example_user.ldif" <<EOF
+dn: cn=bar,dc=example,dc=org
+objectClass: posixGroup
+objectClass: top
+cn: bar
+gidNumber: 11111
+description: "Example group 'bar'"
+
+dn: uid=foo,dc=example,dc=org
+uid: foo
+cn: foo
+givenName: Foo
+sn: Bar
+mail: foobar@example.org
+objectClass: inetOrgPerson
+objectClass: posixAccount
+objectClass: top
+objectClass: shadowAccount
+shadowMax: 180
+shadowMin: 1
+shadowWarning: 7
+shadowLastChange: 10701
+loginShell: /bin/bash
+uidNumber: 11111
+gidNumber: 11111
+homeDirectory: /home/foo
+userPassword: ${passwordhash}
+EOF
+
+echo >&2 "Adding example user entry user=foo pass=secret (retrying until server comes up)"
+docker run --rm --entrypoint= \
+       -v "${tmpdir}/add_example_user.ldif":/add_example_user.ldif:ro \
+       osixia/openldap:1.3.0 \
+       bash -c "for f in \$(seq 1 5); do if ldapadd -H '${ldapurl}' -D 'cn=${adminuser},dc=example,dc=org' -w '${adminpassword}' -f /add_example_user.ldif; then exit 0; else sleep 2; fi; done; echo 'failed to add user entry'; exit 1"
+
+echo >&2 "Building arvados controller binary to run in container"
+go build -o "${tmpdir}" ../../../cmd/arvados-server
+
+ctrlctr=ctrl-${RANDOM}
+echo >&2 "Starting arvados controller in docker container ${ctrlctr}"
+docker run --detach --rm --name=${ctrlctr} \
+       -p 9999 \
+       -v "${tmpdir}/pam_ldap.conf":/etc/pam_ldap.conf:ro \
+       -v "${tmpdir}/arvados-server":/bin/arvados-server:ro \
+       -v "${tmpdir}/zzzzz.yml":/etc/arvados/config.yml:ro \
+       -v $(realpath "${PWD}/../../.."):/arvados:ro \
+       debian:10 \
+       bash -c "apt update && DEBIAN_FRONTEND=noninteractive apt install -y ldap-utils libpam-ldap && pam-auth-update --package /usr/share/pam-configs/ldap && arvados-server controller"
+docker logs --follow ${ctrlctr} 2>$debug >$debug &
+ctrlhostport=$(docker port ${ctrlctr} 9999/tcp)
+
+echo >&2 "Waiting for arvados controller to come up..."
+for f in $(seq 1 20); do
+    if curl -s "http://${ctrlhostport}/arvados/v1/config" >/dev/null; then
+        break
+    else
+        sleep 1
+    fi
+    echo -n >&2 .
+done
+echo >&2
+echo >&2 "Arvados controller is up at http://${ctrlhostport}"
+
+check_contains() {
+    resp="${1}"
+    str="${2}"
+    if ! echo "${resp}" | fgrep -q "${str}"; then
+        echo >&2 "${resp}"
+        echo >&2 "FAIL: expected in response, but not found: ${str@Q}"
+        return 1
+    fi
+}
+
+echo >&2 "Testing authentication failure"
+resp="$(curl -s --include -d username=foo -d password=nosecret "http://${ctrlhostport}/arvados/v1/users/authenticate" | tee $debug)"
+check_contains "${resp}" "HTTP/1.1 401"
+check_contains "${resp}" '{"errors":["PAM: Authentication failure (with username \"foo\" and password)"]}'
+
+echo >&2 "Testing authentication success"
+resp="$(curl -s --include -d username=foo -d password=secret "http://${ctrlhostport}/arvados/v1/users/authenticate" | tee $debug)"
+check_contains "${resp}" "HTTP/1.1 200"
+check_contains "${resp}" '"api_token":"'
+check_contains "${resp}" '"scopes":["all"]'
+check_contains "${resp}" '"uuid":"zzzzz-gj3su-'
+
+cleanup
diff --git a/lib/controller/localdb/login_pam_test.go b/lib/controller/localdb/login_pam_test.go
new file mode 100644 (file)
index 0000000..5b0e453
--- /dev/null
@@ -0,0 +1,84 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+       "context"
+       "io/ioutil"
+       "net/http"
+       "os"
+       "strings"
+
+       "git.arvados.org/arvados.git/lib/config"
+       "git.arvados.org/arvados.git/lib/controller/rpc"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/arvadostest"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&PamSuite{})
+
+type PamSuite struct {
+       cluster  *arvados.Cluster
+       ctrl     *pamLoginController
+       railsSpy *arvadostest.Proxy
+}
+
+func (s *PamSuite) SetUpSuite(c *check.C) {
+       cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+       c.Assert(err, check.IsNil)
+       s.cluster, err = cfg.GetCluster("")
+       c.Assert(err, check.IsNil)
+       s.cluster.Login.PAM = true
+       s.cluster.Login.PAMDefaultEmailDomain = "example.com"
+       s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
+       s.ctrl = &pamLoginController{
+               Cluster:    s.cluster,
+               RailsProxy: rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider),
+       }
+}
+
+func (s *PamSuite) TestLoginFailure(c *check.C) {
+       resp, err := s.ctrl.UserAuthenticate(context.Background(), arvados.UserAuthenticateOptions{
+               Username: "bogususername",
+               Password: "boguspassword",
+       })
+       c.Check(err, check.ErrorMatches, `PAM: Authentication failure \(with username "bogususername" and password\)`)
+       hs, ok := err.(interface{ HTTPStatus() int })
+       if c.Check(ok, check.Equals, true) {
+               c.Check(hs.HTTPStatus(), check.Equals, http.StatusUnauthorized)
+       }
+       c.Check(resp.APIToken, check.Equals, "")
+}
+
+// This test only runs if the ARVADOS_TEST_PAM_CREDENTIALS_FILE env
+// var is set. The credentials file should contain a valid username
+// and password, separated by \n.
+func (s *PamSuite) TestLoginSuccess(c *check.C) {
+       testCredsFile := os.Getenv("ARVADOS_TEST_PAM_CREDENTIALS_FILE")
+       if testCredsFile == "" {
+               c.Skip("no test credentials file given in ARVADOS_TEST_PAM_CREDENTIALS_FILE")
+               return
+       }
+       buf, err := ioutil.ReadFile(testCredsFile)
+       c.Assert(err, check.IsNil)
+       lines := strings.Split(string(buf), "\n")
+       c.Assert(len(lines), check.Equals, 2, check.Commentf("credentials file %s should contain \"username\\npassword\"", testCredsFile))
+       u, p := lines[0], lines[1]
+
+       resp, err := s.ctrl.UserAuthenticate(context.Background(), arvados.UserAuthenticateOptions{
+               Username: u,
+               Password: p,
+       })
+       c.Check(err, check.IsNil)
+       c.Check(resp.APIToken, check.Not(check.Equals), "")
+       c.Check(resp.UUID, check.Matches, `zzzzz-gj3su-.*`)
+       c.Check(resp.Scopes, check.DeepEquals, []string{"all"})
+
+       authinfo := getCallbackAuthInfo(c, s.railsSpy)
+       c.Check(authinfo.Email, check.Equals, u+"@"+s.cluster.Login.PAMDefaultEmailDomain)
+       c.Check(authinfo.AlternateEmails, check.DeepEquals, []string(nil))
+}
index 69d707703852b7fc60acfcb6e86d8fa960e7a5c9..c347e2f795517f74c9f67ec0311ba41d3250dafb 100644 (file)
@@ -303,6 +303,13 @@ func (rtr *router) addRoutes() {
                                return rtr.fed.UserDelete(ctx, *opts.(*arvados.DeleteOptions))
                        },
                },
+               {
+                       arvados.EndpointUserAuthenticate,
+                       func() interface{} { return &arvados.UserAuthenticateOptions{} },
+                       func(ctx context.Context, opts interface{}) (interface{}, error) {
+                               return rtr.fed.UserAuthenticate(ctx, *opts.(*arvados.UserAuthenticateOptions))
+                       },
+               },
        } {
                rtr.addRoute(route.endpoint, route.defaultOpts, route.exec)
        }
@@ -386,21 +393,23 @@ func (rtr *router) ServeHTTP(w http.ResponseWriter, r *http.Request) {
        default:
                w.Header().Set("Access-Control-Allow-Origin", "*")
                w.Header().Set("Access-Control-Allow-Methods", "GET, HEAD, PUT, POST, PATCH, DELETE")
-               w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type")
+               w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type, X-Http-Method-Override")
                w.Header().Set("Access-Control-Max-Age", "86486400")
        }
        if r.Method == "OPTIONS" {
                return
        }
-       r.ParseForm()
-       if m := r.FormValue("_method"); m != "" {
-               r2 := *r
-               r = &r2
-               r.Method = m
-       } else if m = r.Header.Get("X-Http-Method-Override"); m != "" {
-               r2 := *r
-               r = &r2
-               r.Method = m
+       if r.Method == "POST" {
+               r.ParseForm()
+               if m := r.FormValue("_method"); m != "" {
+                       r2 := *r
+                       r = &r2
+                       r.Method = m
+               } else if m = r.Header.Get("X-Http-Method-Override"); m != "" {
+                       r2 := *r
+                       r = &r2
+                       r.Method = m
+               }
        }
        rtr.mux.ServeHTTP(w, r)
 }
index b5c56dbc4d64897e4f44bbdbdc9c44f2f7931f56..729d8bdde09e7ee05d2766ef0a4d1ee72f01a8d1 100644 (file)
@@ -423,8 +423,15 @@ func (conn *Conn) UserSessionCreate(ctx context.Context, options UserSessionCrea
 }
 
 func (conn *Conn) UserBatchUpdate(ctx context.Context, options arvados.UserBatchUpdateOptions) (arvados.UserList, error) {
-       ep := arvados.APIEndpoint{Method: "PATCH", Path: "arvados/v1/users/batch_update"}
+       ep := arvados.EndpointUserBatchUpdate
        var resp arvados.UserList
        err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
        return resp, err
 }
+
+func (conn *Conn) UserAuthenticate(ctx context.Context, options arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
+       ep := arvados.EndpointUserAuthenticate
+       var resp arvados.APIClientAuthorization
+       err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+       return resp, err
+}
index 4023896f7933dbbd489387405419097dc083434e..02b6c976aec825f810eab3cca43488c808d5cc4e 100644 (file)
@@ -82,6 +82,11 @@ func (disp *dispatcher) CheckHealth() error {
        return disp.pool.CheckHealth()
 }
 
+// Done implements service.Handler.
+func (disp *dispatcher) Done() <-chan struct{} {
+       return disp.stopped
+}
+
 // Stop dispatching containers and release resources. Typically used
 // in tests.
 func (disp *dispatcher) Close() {
diff --git a/lib/install/arvadostest_docker_build.sh b/lib/install/arvadostest_docker_build.sh
new file mode 100755 (executable)
index 0000000..e0defa8
--- /dev/null
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+set -ex -o pipefail
+
+SRC=$(realpath $(dirname ${BASH_SOURCE[0]})/../..)
+
+ctrname=arvadostest
+ctrbase=${ctrname}
+if [[ "${1}" != "--update" ]] || ! docker images --format={{.Repository}} | grep -x ${ctrbase}; then
+    ctrbase=debian:10
+fi
+
+if docker ps -a --format={{.Names}} | grep -x ${ctrname}; then
+    echo >&2 "container name already in use -- another builder running?"
+    exit 1
+fi
+
+(cd ${SRC}/cmd/arvados-server && go install)
+trap "docker rm --volumes ${ctrname}" ERR
+docker run -it --name ${ctrname} \
+       -v ${GOPATH:-${HOME}/go}/bin/arvados-server:/bin/arvados-server:ro \
+       -v ${SRC}:/src/arvados:ro \
+       -v /tmp \
+       --env http_proxy \
+       --env https_proxy \
+       ${ctrbase} \
+       bash -c "
+set -ex -o pipefail
+arvados-server install -type test
+pg_ctlcluster 11 main start
+cp -a /src/arvados /tmp/
+cd /tmp/arvados
+rm -rf tmp config.yml database.yml services/api/config/database.yml
+mkdir tmp
+build/run-tests.sh WORKSPACE=\$PWD --temp /tmp/arvados/tmp --only x"
+docker commit ${ctrname} ${ctrname}
+trap - ERR
+docker rm --volumes ${ctrname}
diff --git a/lib/install/arvadostest_docker_run.sh b/lib/install/arvadostest_docker_run.sh
new file mode 100755 (executable)
index 0000000..ca53655
--- /dev/null
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+# Example:
+#
+# ./arvadostest_docker_build.sh             # build the base image ("arvadostest")
+# ./arvadostest_docker_build.sh --update    # update the base image with current version of `arvados-server install`
+# ./arvadostest_docker_run.sh --interactive # start a container using the previously built base image, copy this source tree into it, and invoke run-tests.sh with the given args
+
+set -ex -o pipefail
+
+declare -a qargs
+for arg in "$@"; do
+    qargs+=("${arg@Q}")
+done
+
+SRC=$(realpath $(dirname ${BASH_SOURCE[0]})/../..)
+
+docker run --rm -it \
+       --privileged \
+       -v /dev/fuse:/dev/fuse \
+       -v ${SRC}:/src/arvados:ro \
+       -v /tmp \
+       --env http_proxy \
+       --env https_proxy \
+       arvadostest \
+       bash -c "
+set -ex -o pipefail
+pg_ctlcluster 11 main start
+cp -a /src/arvados /tmp/
+cd /tmp/arvados
+rm -rf tmp config.yml database.yml services/api/config/database.yml
+mkdir tmp
+go run ./cmd/arvados-server install -type test
+build/run-tests.sh WORKSPACE=\$PWD --temp /tmp/arvados/tmp ${qargs[@]}"
diff --git a/lib/install/deps.go b/lib/install/deps.go
new file mode 100644 (file)
index 0000000..cbcf743
--- /dev/null
@@ -0,0 +1,417 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package install
+
+import (
+       "bufio"
+       "bytes"
+       "context"
+       "errors"
+       "flag"
+       "fmt"
+       "io"
+       "os"
+       "os/exec"
+       "strconv"
+       "strings"
+       "syscall"
+       "time"
+
+       "git.arvados.org/arvados.git/lib/cmd"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "github.com/lib/pq"
+)
+
+var Command cmd.Handler = installCommand{}
+
+const devtestDatabasePassword = "insecure_arvados_test"
+
+type installCommand struct{}
+
+func (installCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+       logger := ctxlog.New(stderr, "text", "info")
+       ctx := ctxlog.Context(context.Background(), logger)
+       ctx, cancel := context.WithCancel(ctx)
+       defer cancel()
+
+       var err error
+       defer func() {
+               if err != nil {
+                       logger.WithError(err).Info("exiting")
+               }
+       }()
+
+       flags := flag.NewFlagSet(prog, flag.ContinueOnError)
+       flags.SetOutput(stderr)
+       versionFlag := flags.Bool("version", false, "Write version information to stdout and exit 0")
+       clusterType := flags.String("type", "production", "cluster `type`: development, test, or production")
+       err = flags.Parse(args)
+       if err == flag.ErrHelp {
+               err = nil
+               return 0
+       } else if err != nil {
+               return 2
+       } else if *versionFlag {
+               return cmd.Version.RunCommand(prog, args, stdin, stdout, stderr)
+       }
+
+       var dev, test, prod bool
+       switch *clusterType {
+       case "development":
+               dev = true
+       case "test":
+               test = true
+       case "production":
+               prod = true
+       default:
+               err = fmt.Errorf("invalid cluster type %q (must be 'development', 'test', or 'production')", *clusterType)
+               return 2
+       }
+
+       if prod {
+               err = errors.New("production install is not yet implemented")
+               return 1
+       }
+
+       osv, err := identifyOS()
+       if err != nil {
+               return 1
+       }
+
+       listdir, err := os.Open("/var/lib/apt/lists")
+       if err != nil {
+               logger.Warnf("error while checking whether to run apt-get update: %s", err)
+       } else if names, _ := listdir.Readdirnames(1); len(names) == 0 {
+               // Special case for a base docker image where the
+               // package cache has been deleted and all "apt-get
+               // install" commands will fail unless we fetch repos.
+               cmd := exec.CommandContext(ctx, "apt-get", "update")
+               cmd.Stdout = stdout
+               cmd.Stderr = stderr
+               err = cmd.Run()
+               if err != nil {
+                       return 1
+               }
+       }
+
+       if dev || test {
+               debs := []string{
+                       "bison",
+                       "bsdmainutils",
+                       "build-essential",
+                       "ca-certificates",
+                       "cadaver",
+                       "curl",
+                       "cython",
+                       "daemontools", // lib/boot uses setuidgid to drop privileges when running as root
+                       "default-jdk-headless",
+                       "default-jre-headless",
+                       "fuse",
+                       "gettext",
+                       "git",
+                       "gitolite3",
+                       "graphviz",
+                       "haveged",
+                       "iceweasel",
+                       "libattr1-dev",
+                       "libcrypt-ssleay-perl",
+                       "libcrypt-ssleay-perl",
+                       "libcurl3-gnutls",
+                       "libcurl4-openssl-dev",
+                       "libfuse-dev",
+                       "libgnutls28-dev",
+                       "libjson-perl",
+                       "libjson-perl",
+                       "libpam-dev",
+                       "libpcre3-dev",
+                       "libpq-dev",
+                       "libpython2.7-dev",
+                       "libreadline-dev",
+                       "libssl-dev",
+                       "libwww-perl",
+                       "libxml2-dev",
+                       "libxslt1.1",
+                       "linkchecker",
+                       "lsof",
+                       "net-tools",
+                       "nginx",
+                       "pandoc",
+                       "perl-modules",
+                       "pkg-config",
+                       "postgresql",
+                       "postgresql-contrib",
+                       "python",
+                       "python3-dev",
+                       "python-epydoc",
+                       "r-base",
+                       "r-cran-testthat",
+                       "sudo",
+                       "virtualenv",
+                       "wget",
+                       "xvfb",
+                       "zlib1g-dev",
+               }
+               switch {
+               case osv.Debian && osv.Major >= 10:
+                       debs = append(debs, "libcurl4")
+               default:
+                       debs = append(debs, "libcurl3")
+               }
+               cmd := exec.CommandContext(ctx, "apt-get", "install", "--yes", "--no-install-recommends")
+               cmd.Args = append(cmd.Args, debs...)
+               cmd.Env = append(os.Environ(), "DEBIAN_FRONTEND=noninteractive")
+               cmd.Stdout = stdout
+               cmd.Stderr = stderr
+               err = cmd.Run()
+               if err != nil {
+                       return 1
+               }
+       }
+
+       os.Mkdir("/var/lib/arvados", 0755)
+       rubyversion := "2.5.7"
+       if haverubyversion, err := exec.Command("/var/lib/arvados/bin/ruby", "-v").CombinedOutput(); err == nil && bytes.HasPrefix(haverubyversion, []byte("ruby "+rubyversion)) {
+               logger.Print("ruby " + rubyversion + " already installed")
+       } else {
+               err = runBash(`
+mkdir -p /var/lib/arvados/tmp
+tmp=/var/lib/arvados/tmp/ruby-`+rubyversion+`
+trap "rm -r ${tmp}" ERR
+wget --progress=dot:giga -O- https://cache.ruby-lang.org/pub/ruby/2.5/ruby-`+rubyversion+`.tar.gz | tar -C /var/lib/arvados/tmp -xzf -
+cd ${tmp}
+./configure --disable-install-doc --prefix /var/lib/arvados
+make -j4
+make install
+/var/lib/arvados/bin/gem install bundler
+rm -r ${tmp}
+`, stdout, stderr)
+               if err != nil {
+                       return 1
+               }
+       }
+
+       if !prod {
+               goversion := "1.14"
+               if havegoversion, err := exec.Command("/usr/local/bin/go", "version").CombinedOutput(); err == nil && bytes.HasPrefix(havegoversion, []byte("go version go"+goversion+" ")) {
+                       logger.Print("go " + goversion + " already installed")
+               } else {
+                       err = runBash(`
+cd /tmp
+wget --progress=dot:giga -O- https://storage.googleapis.com/golang/go`+goversion+`.linux-amd64.tar.gz | tar -C /var/lib/arvados -xzf -
+ln -sf /var/lib/arvados/go/bin/* /usr/local/bin/
+`, stdout, stderr)
+                       if err != nil {
+                               return 1
+                       }
+               }
+
+               pjsversion := "1.9.8"
+               if havepjsversion, err := exec.Command("/usr/local/bin/phantomjs", "--version").CombinedOutput(); err == nil && string(havepjsversion) == "1.9.8\n" {
+                       logger.Print("phantomjs " + pjsversion + " already installed")
+               } else {
+                       err = runBash(`
+PJS=phantomjs-`+pjsversion+`-linux-x86_64
+wget --progress=dot:giga -O- https://bitbucket.org/ariya/phantomjs/downloads/$PJS.tar.bz2 | tar -C /var/lib/arvados -xjf -
+ln -sf /var/lib/arvados/$PJS/bin/phantomjs /usr/local/bin/
+`, stdout, stderr)
+                       if err != nil {
+                               return 1
+                       }
+               }
+
+               geckoversion := "0.24.0"
+               if havegeckoversion, err := exec.Command("/usr/local/bin/geckodriver", "--version").CombinedOutput(); err == nil && strings.Contains(string(havegeckoversion), " "+geckoversion+" ") {
+                       logger.Print("geckodriver " + geckoversion + " already installed")
+               } else {
+                       err = runBash(`
+GD=v`+geckoversion+`
+wget --progress=dot:giga -O- https://github.com/mozilla/geckodriver/releases/download/$GD/geckodriver-$GD-linux64.tar.gz | tar -C /var/lib/arvados/bin -xzf - geckodriver
+ln -sf /var/lib/arvados/bin/geckodriver /usr/local/bin/
+`, stdout, stderr)
+                       if err != nil {
+                               return 1
+                       }
+               }
+
+               nodejsversion := "v8.15.1"
+               if havenodejsversion, err := exec.Command("/usr/local/bin/node", "--version").CombinedOutput(); err == nil && string(havenodejsversion) == nodejsversion+"\n" {
+                       logger.Print("nodejs " + nodejsversion + " already installed")
+               } else {
+                       err = runBash(`
+NJS=`+nodejsversion+`
+wget --progress=dot:giga -O- https://nodejs.org/dist/${NJS}/node-${NJS}-linux-x64.tar.xz | sudo tar -C /var/lib/arvados -xJf -
+ln -sf /var/lib/arvados/node-${NJS}-linux-x64/bin/{node,npm} /usr/local/bin/
+`, stdout, stderr)
+                       if err != nil {
+                               return 1
+                       }
+               }
+
+               gradleversion := "5.3.1"
+               if havegradleversion, err := exec.Command("/usr/local/bin/gradle", "--version").CombinedOutput(); err == nil && strings.Contains(string(havegradleversion), "Gradle "+gradleversion+"\n") {
+                       logger.Print("gradle " + gradleversion + " already installed")
+               } else {
+                       err = runBash(`
+G=`+gradleversion+`
+mkdir -p /var/lib/arvados/tmp
+zip=/var/lib/arvados/tmp/gradle-${G}-bin.zip
+trap "rm ${zip}" ERR
+wget --progress=dot:giga -O${zip} https://services.gradle.org/distributions/gradle-${G}-bin.zip
+unzip -o -d /var/lib/arvados ${zip}
+ln -sf /var/lib/arvados/gradle-${G}/bin/gradle /usr/local/bin/
+rm ${zip}
+`, stdout, stderr)
+                       if err != nil {
+                               return 1
+                       }
+               }
+
+               // The entry in /etc/locale.gen is "en_US.UTF-8"; once
+               // it's installed, locale -a reports it as
+               // "en_US.utf8".
+               wantlocale := "en_US.UTF-8"
+               if havelocales, err := exec.Command("locale", "-a").CombinedOutput(); err == nil && bytes.Contains(havelocales, []byte(strings.Replace(wantlocale+"\n", "UTF-", "utf", 1))) {
+                       logger.Print("locale " + wantlocale + " already installed")
+               } else {
+                       err = runBash(`sed -i 's/^# *\(`+wantlocale+`\)/\1/' /etc/locale.gen && locale-gen`, stdout, stderr)
+                       if err != nil {
+                               return 1
+                       }
+               }
+
+               var pgc struct {
+                       Version       string
+                       Cluster       string
+                       Port          int
+                       Status        string
+                       Owner         string
+                       DataDirectory string
+                       LogFile       string
+               }
+               if pg_lsclusters, err2 := exec.Command("pg_lsclusters", "--no-header").CombinedOutput(); err2 != nil {
+                       err = fmt.Errorf("pg_lsclusters: %s", err2)
+                       return 1
+               } else if pgclusters := strings.Split(strings.TrimSpace(string(pg_lsclusters)), "\n"); len(pgclusters) != 1 {
+                       logger.Warnf("pg_lsclusters returned %d postgresql clusters -- skipping postgresql initdb/startup, hope that's ok", len(pgclusters))
+               } else if _, err = fmt.Sscanf(pgclusters[0], "%s %s %d %s %s %s %s", &pgc.Version, &pgc.Cluster, &pgc.Port, &pgc.Status, &pgc.Owner, &pgc.DataDirectory, &pgc.LogFile); err != nil {
+                       err = fmt.Errorf("error parsing pg_lsclusters output: %s", err)
+                       return 1
+               } else if pgc.Status == "online" {
+                       logger.Infof("postgresql cluster %s-%s is online", pgc.Version, pgc.Cluster)
+               } else {
+                       logger.Infof("postgresql cluster %s-%s is %s; trying to start", pgc.Version, pgc.Cluster, pgc.Status)
+                       cmd := exec.Command("pg_ctlcluster", "--foreground", pgc.Version, pgc.Cluster, "start")
+                       cmd.Stdout = stdout
+                       cmd.Stderr = stderr
+                       err = cmd.Start()
+                       if err != nil {
+                               return 1
+                       }
+                       defer func() {
+                               cmd.Process.Signal(syscall.SIGTERM)
+                               logger.Infof("sent SIGTERM; waiting for postgres to shut down")
+                               cmd.Wait()
+                       }()
+                       for deadline := time.Now().Add(10 * time.Second); ; {
+                               output, err2 := exec.Command("pg_isready").CombinedOutput()
+                               if err2 == nil {
+                                       break
+                               } else if time.Now().After(deadline) {
+                                       err = fmt.Errorf("timed out waiting for pg_isready (%q)", output)
+                                       return 1
+                               } else {
+                                       time.Sleep(time.Second)
+                               }
+                       }
+               }
+
+               if os.Getpid() == 1 {
+                       // We are the init process (presumably in a
+                       // docker container) so although postgresql is
+                       // installed, it's not running, and initdb
+                       // might never have been run.
+               }
+
+               withstuff := "WITH LOGIN SUPERUSER ENCRYPTED PASSWORD " + pq.QuoteLiteral(devtestDatabasePassword)
+               cmd := exec.Command("sudo", "-u", "postgres", "psql", "-c", "ALTER ROLE arvados "+withstuff)
+               cmd.Dir = "/"
+               if err := cmd.Run(); err == nil {
+                       logger.Print("arvados role exists; superuser privileges added, password updated")
+               } else {
+                       cmd := exec.Command("sudo", "-u", "postgres", "psql", "-c", "CREATE ROLE arvados "+withstuff)
+                       cmd.Dir = "/"
+                       cmd.Stdout = stdout
+                       cmd.Stderr = stderr
+                       err = cmd.Run()
+                       if err != nil {
+                               return 1
+                       }
+               }
+       }
+
+       return 0
+}
+
+type osversion struct {
+       Debian bool
+       Ubuntu bool
+       Major  int
+}
+
+func identifyOS() (osversion, error) {
+       var osv osversion
+       f, err := os.Open("/etc/os-release")
+       if err != nil {
+               return osv, err
+       }
+       defer f.Close()
+
+       kv := map[string]string{}
+       scanner := bufio.NewScanner(f)
+       for scanner.Scan() {
+               line := strings.TrimSpace(scanner.Text())
+               if strings.HasPrefix(line, "#") {
+                       continue
+               }
+               toks := strings.SplitN(line, "=", 2)
+               if len(toks) != 2 {
+                       return osv, fmt.Errorf("invalid line in /etc/os-release: %q", line)
+               }
+               k := toks[0]
+               v := strings.Trim(toks[1], `"`)
+               if v == toks[1] {
+                       v = strings.Trim(v, `'`)
+               }
+               kv[k] = v
+       }
+       if err = scanner.Err(); err != nil {
+               return osv, err
+       }
+       switch kv["ID"] {
+       case "ubuntu":
+               osv.Ubuntu = true
+       case "debian":
+               osv.Debian = true
+       default:
+               return osv, fmt.Errorf("unsupported ID in /etc/os-release: %q", kv["ID"])
+       }
+       vstr := kv["VERSION_ID"]
+       if i := strings.Index(vstr, "."); i > 0 {
+               vstr = vstr[:i]
+       }
+       osv.Major, err = strconv.Atoi(vstr)
+       if err != nil {
+               return osv, fmt.Errorf("incomprehensible VERSION_ID in /etc/os/release: %q", kv["VERSION_ID"])
+       }
+       return osv, nil
+}
+
+func runBash(script string, stdout, stderr io.Writer) error {
+       cmd := exec.Command("bash", "-")
+       cmd.Stdin = bytes.NewBufferString("set -ex -o pipefail\n" + script)
+       cmd.Stdout = stdout
+       cmd.Stderr = stderr
+       return cmd.Run()
+}
diff --git a/lib/install/deps_test.go b/lib/install/deps_test.go
new file mode 100644 (file)
index 0000000..5dfdbfe
--- /dev/null
@@ -0,0 +1,47 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Skip this slow test unless invoked as "go test -tags docker".
+// Depending on host/network speed, Go's default 10m test timeout
+// might be too short; recommend "go test -timeout 20m -tags docker".
+//
+// +build docker
+
+package install
+
+import (
+       "os"
+       "testing"
+
+       "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+var _ = check.Suite(&Suite{})
+
+type Suite struct{}
+
+func (*Suite) TestInstallDeps(c *check.C) {
+       tmp := c.MkDir()
+       script := `
+set -x
+tmp="` + tmp + `"
+sourcepath="$(realpath ../..)"
+(cd ${sourcepath} && go build -o ${tmp} ./cmd/arvados-server)
+docker run -i --rm --workdir /arvados \
+       -v ${tmp}/arvados-server:/arvados-server:ro \
+       -v ${sourcepath}:/arvados:ro \
+       -v /arvados/apps/workbench/.bundle \
+       -v /arvados/services/api/.bundle \
+       -v /arvados/services/api/tmp \
+       --env http_proxy \
+       --env https_proxy \
+       debian:10 \
+       bash -c "/arvados-server install -type test && /arvados-server boot -type test -config doc/examples/config/zzzzz.yml -own-temporary-database -shutdown -timeout 9m"
+`
+       c.Check(runBash(script, os.Stdout, os.Stderr), check.IsNil)
+}
diff --git a/lib/install/example_from_scratch.sh b/lib/install/example_from_scratch.sh
new file mode 100644 (file)
index 0000000..03d9b7f
--- /dev/null
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+set -e -o pipefail
+
+# Starting with a base debian buster system, like "docker run -it
+# debian:10"...
+
+apt update
+apt upgrade
+apt install --no-install-recommends build-essential ca-certificates git golang
+git clone https://git.arvados.org/arvados.git
+cd arvados
+[[ -e lib/install ]] || git checkout origin/16053-install-deps
+cd cmd/arvados-server
+go run ./cmd/arvados-server install -type test
+pg_isready || pg_ctlcluster 11 main start # only needed if there's no init process (as in docker)
+build/run-tests.sh
index 7f2f78ee9a9f7224aac4aacba94148497f292a5e..1e7a9a36edd3a8142192d14bfcfbf12885e1e857 100644 (file)
@@ -29,6 +29,7 @@ import (
 type Handler interface {
        http.Handler
        CheckHealth() error
+       Done() <-chan struct{}
 }
 
 type NewHandlerFunc func(_ context.Context, _ *arvados.Cluster, token string, registry *prometheus.Registry) Handler
@@ -148,9 +149,15 @@ func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout
                logger.WithError(err).Errorf("error notifying init daemon")
        }
        go func() {
+               // Shut down server if caller cancels context
                <-ctx.Done()
                srv.Close()
        }()
+       go func() {
+               // Shut down server if handler dies
+               <-handler.Done()
+               srv.Close()
+       }()
        err = srv.Wait()
        if err != nil {
                return 1
index 86039c4dd1fa111d2de292676f4773c9bdc203a1..ec7834972c2609aeb5e4cd14099d35367a7e3c09 100644 (file)
@@ -135,6 +135,7 @@ type testHandler struct {
        healthCheck chan bool
 }
 
+func (th *testHandler) Done() <-chan struct{}                            { return nil }
 func (th *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { th.handler.ServeHTTP(w, r) }
 func (th *testHandler) CheckHealth() error {
        ctxlog.FromContext(th.ctx).Info("CheckHealth called")
index c4049f7064d70a5a88c654be10cff478bb0f42f3..a4d7370d1b8d0d6a3b630025a181a56c029ed3b0 100644 (file)
@@ -36,3 +36,15 @@ func (eh errorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 func (eh errorHandler) CheckHealth() error {
        return eh.err
 }
+
+// Done returns a closed channel to indicate the service has
+// stopped/failed.
+func (eh errorHandler) Done() <-chan struct{} {
+       return doneChannel
+}
+
+var doneChannel = func() <-chan struct{} {
+       done := make(chan struct{})
+       close(done)
+       return done
+}()
index e60108335163714717c2fa8c9d0ed7760c0bc303..c32f88864f88750c00fe896286e147ccd9d061ce 100644 (file)
@@ -55,7 +55,8 @@ var (
        EndpointUserUnsetup                   = APIEndpoint{"POST", "arvados/v1/users/{uuid}/unsetup", ""}
        EndpointUserUpdate                    = APIEndpoint{"PATCH", "arvados/v1/users/{uuid}", "user"}
        EndpointUserUpdateUUID                = APIEndpoint{"POST", "arvados/v1/users/{uuid}/update_uuid", ""}
-       EndpointUserBatchUpdate               = APIEndpoint{"PATCH", "arvados/v1/users/batch", ""}
+       EndpointUserBatchUpdate               = APIEndpoint{"PATCH", "arvados/v1/users/batch_update", ""}
+       EndpointUserAuthenticate              = APIEndpoint{"POST", "arvados/v1/users/authenticate", ""}
        EndpointAPIClientAuthorizationCurrent = APIEndpoint{"GET", "arvados/v1/api_client_authorizations/current", ""}
 )
 
@@ -144,6 +145,11 @@ type LoginOptions struct {
        State    string `json:"state,omitempty"`  // OAuth2 callback state
 }
 
+type UserAuthenticateOptions struct {
+       Username string `json:"username,omitempty"` // PAM username
+       Password string `json:"password,omitempty"` // PAM password
+}
+
 type LogoutOptions struct {
        ReturnTo string `json:"return_to"` // Redirect to this URL after logging out
 }
@@ -186,5 +192,6 @@ type API interface {
        UserList(ctx context.Context, options ListOptions) (UserList, error)
        UserDelete(ctx context.Context, options DeleteOptions) (User, error)
        UserBatchUpdate(context.Context, UserBatchUpdateOptions) (UserList, error)
+       UserAuthenticate(ctx context.Context, options UserAuthenticateOptions) (APIClientAuthorization, error)
        APIClientAuthorizationCurrent(ctx context.Context, options GetOptions) (APIClientAuthorization, error)
 }
index 67e3f631174ce86741190a25aa4ce8a63864fd1f..1e2c07e867e84d6d6719fdd4f9298b005a86c6e9 100644 (file)
@@ -186,7 +186,7 @@ func (c *Client) DoAndDecode(dst interface{}, req *http.Request) error {
                return nil
        case isRedirectStatus(resp.StatusCode):
                // Copy the redirect target URL to dst.RedirectLocation.
-               buf, err := json.Marshal(map[string]string{"RedirectLocation": resp.Header.Get("Location")})
+               buf, err := json.Marshal(map[string]string{"redirect_location": resp.Header.Get("Location")})
                if err != nil {
                        return err
                }
index a70980cbde232cc562155bbfb813d0f1cf32afbc..6b83fb96d49e6359e656c3e634a273b6f29c4e16 100644 (file)
@@ -136,6 +136,9 @@ type Cluster struct {
                GoogleClientID                string
                GoogleClientSecret            string
                GoogleAlternateEmailAddresses bool
+               PAM                           bool
+               PAMService                    string
+               PAMDefaultEmailDomain         string
                ProviderAppID                 string
                ProviderAppSecret             string
                LoginCluster                  string
@@ -421,6 +424,24 @@ var errDuplicateInstanceTypeName = errors.New("duplicate instance type name")
 // UnmarshalJSON handles old config files that provide an array of
 // instance types instead of a hash.
 func (it *InstanceTypeMap) UnmarshalJSON(data []byte) error {
+       fixup := func(t InstanceType) (InstanceType, error) {
+               if t.ProviderType == "" {
+                       t.ProviderType = t.Name
+               }
+               if t.Scratch == 0 {
+                       t.Scratch = t.IncludedScratch + t.AddedScratch
+               } else if t.AddedScratch == 0 {
+                       t.AddedScratch = t.Scratch - t.IncludedScratch
+               } else if t.IncludedScratch == 0 {
+                       t.IncludedScratch = t.Scratch - t.AddedScratch
+               }
+
+               if t.Scratch != (t.IncludedScratch + t.AddedScratch) {
+                       return t, fmt.Errorf("InstanceType %q: Scratch != (IncludedScratch + AddedScratch)", t.Name)
+               }
+               return t, nil
+       }
+
        if len(data) > 0 && data[0] == '[' {
                var arr []InstanceType
                err := json.Unmarshal(data, &arr)
@@ -436,19 +457,9 @@ func (it *InstanceTypeMap) UnmarshalJSON(data []byte) error {
                        if _, ok := (*it)[t.Name]; ok {
                                return errDuplicateInstanceTypeName
                        }
-                       if t.ProviderType == "" {
-                               t.ProviderType = t.Name
-                       }
-                       if t.Scratch == 0 {
-                               t.Scratch = t.IncludedScratch + t.AddedScratch
-                       } else if t.AddedScratch == 0 {
-                               t.AddedScratch = t.Scratch - t.IncludedScratch
-                       } else if t.IncludedScratch == 0 {
-                               t.IncludedScratch = t.Scratch - t.AddedScratch
-                       }
-
-                       if t.Scratch != (t.IncludedScratch + t.AddedScratch) {
-                               return fmt.Errorf("%v: Scratch != (IncludedScratch + AddedScratch)", t.Name)
+                       t, err := fixup(t)
+                       if err != nil {
+                               return err
                        }
                        (*it)[t.Name] = t
                }
@@ -464,8 +475,9 @@ func (it *InstanceTypeMap) UnmarshalJSON(data []byte) error {
        *it = InstanceTypeMap(hash)
        for name, t := range *it {
                t.Name = name
-               if t.ProviderType == "" {
-                       t.ProviderType = name
+               t, err := fixup(t)
+               if err != nil {
+                       return err
                }
                (*it)[name] = t
        }
index b984cb5669ce851f2ec1f136a9c96bfb0d06b832..e4d26e03fd3f8101ad339f648b1efbaa56208437 100644 (file)
@@ -45,3 +45,29 @@ func (s *ConfigSuite) TestInstanceTypeSize(c *check.C) {
        c.Check(int64(it.Scratch), check.Equals, int64(4000000000))
        c.Check(int64(it.RAM), check.Equals, int64(4294967296))
 }
+
+func (s *ConfigSuite) TestInstanceTypeFixup(c *check.C) {
+       for _, confdata := range []string{
+               // Current format: map of entries
+               `{foo4: {IncludedScratch: 4GB}, foo8: {ProviderType: foo_8, Scratch: 8GB}}`,
+               // Legacy format: array of entries with key in "Name" field
+               `[{Name: foo4, IncludedScratch: 4GB}, {Name: foo8, ProviderType: foo_8, Scratch: 8GB}]`,
+       } {
+               c.Log(confdata)
+               var itm InstanceTypeMap
+               err := yaml.Unmarshal([]byte(confdata), &itm)
+               c.Check(err, check.IsNil)
+
+               c.Check(itm["foo4"].Name, check.Equals, "foo4")
+               c.Check(itm["foo4"].ProviderType, check.Equals, "foo4")
+               c.Check(itm["foo4"].Scratch, check.Equals, ByteSize(4000000000))
+               c.Check(itm["foo4"].AddedScratch, check.Equals, ByteSize(0))
+               c.Check(itm["foo4"].IncludedScratch, check.Equals, ByteSize(4000000000))
+
+               c.Check(itm["foo8"].Name, check.Equals, "foo8")
+               c.Check(itm["foo8"].ProviderType, check.Equals, "foo_8")
+               c.Check(itm["foo8"].Scratch, check.Equals, ByteSize(8000000000))
+               c.Check(itm["foo8"].AddedScratch, check.Equals, ByteSize(8000000000))
+               c.Check(itm["foo8"].IncludedScratch, check.Equals, ByteSize(0))
+       }
+}
index 75ebc81c142b1ee167bafa7792923513af3c5120..ab7637546289a659651c9648907b0558cfe1c064 100644 (file)
@@ -10,8 +10,8 @@ import (
 )
 
 type LoginResponse struct {
-       RedirectLocation string
-       HTML             bytes.Buffer
+       RedirectLocation string       `json:"redirect_location,omitempty"`
+       HTML             bytes.Buffer `json:"-"`
 }
 
 func (resp LoginResponse) ServeHTTP(w http.ResponseWriter, req *http.Request) {
@@ -26,7 +26,7 @@ func (resp LoginResponse) ServeHTTP(w http.ResponseWriter, req *http.Request) {
 }
 
 type LogoutResponse struct {
-       RedirectLocation string
+       RedirectLocation string `json:"redirect_location,omitempty"`
 }
 
 func (resp LogoutResponse) ServeHTTP(w http.ResponseWriter, req *http.Request) {
index 9019d33cfb8bc167c45cf5e4c1b2fa4107d0c9c3..fa5f53936028504b9dd8f4bcc41f1304dd36656e 100644 (file)
@@ -177,6 +177,10 @@ func (as *APIStub) UserBatchUpdate(ctx context.Context, options arvados.UserBatc
        as.appendCall(as.UserBatchUpdate, ctx, options)
        return arvados.UserList{}, as.Error
 }
+func (as *APIStub) UserAuthenticate(ctx context.Context, options arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
+       as.appendCall(as.UserAuthenticate, ctx, options)
+       return arvados.APIClientAuthorization{}, as.Error
+}
 func (as *APIStub) APIClientAuthorizationCurrent(ctx context.Context, options arvados.GetOptions) (arvados.APIClientAuthorization, error) {
        as.appendCall(as.APIClientAuthorizationCurrent, ctx, options)
        return arvados.APIClientAuthorization{}, as.Error
index a0284e8f247a60f8d2fd57b752f37a800d54c222..794adabdd3926b6b04036a6c62b1044f2e8f13d5 100644 (file)
@@ -46,6 +46,10 @@ func (agg *Aggregator) CheckHealth() error {
        return nil
 }
 
+func (agg *Aggregator) Done() <-chan struct{} {
+       return nil
+}
+
 func (agg *Aggregator) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
        agg.setupOnce.Do(agg.setup)
        sendErr := func(statusCode int, err error) {
index 8886f9517dfd5983032235e713a000f5615880b7..59981e3e55265be4eed1827d3570391533ac3a30 100644 (file)
@@ -53,10 +53,22 @@ func LogRequests(h http.Handler) http.Handler {
 
                logRequest(w, req, lgr)
                defer logResponse(w, req, lgr)
-               h.ServeHTTP(w, req)
+               h.ServeHTTP(rewrapResponseWriter(w, wrapped), req)
        })
 }
 
+// Rewrap w to restore additional interfaces provided by wrapped.
+func rewrapResponseWriter(w http.ResponseWriter, wrapped http.ResponseWriter) http.ResponseWriter {
+       if hijacker, ok := wrapped.(http.Hijacker); ok {
+               return struct {
+                       http.ResponseWriter
+                       http.Hijacker
+               }{w, hijacker}
+       } else {
+               return w
+       }
+}
+
 func Logger(req *http.Request) logrus.FieldLogger {
        return ctxlog.FromContext(req.Context())
 }
index 89f8a0cbb1b0ee812bb96ddf2ad6581f567867e2..5b09db948aad5fe1f7866b87bdd595cd9d976200 100644 (file)
@@ -21,7 +21,7 @@ dependencies {
     api 'com.typesafe:config:1.3.2'
     
     testImplementation 'junit:junit:4.12'
-    testImplementation 'org.mockito:mockito-core:2.12.0'
+    testImplementation 'org.mockito:mockito-core:3.3.3'
     testImplementation 'org.assertj:assertj-core:3.8.0'
     testImplementation 'com.squareup.okhttp3:mockwebserver:3.9.1'
 }
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/LinksApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/LinksApiClient.java
new file mode 100644 (file)
index 0000000..c64e1fb
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import org.arvados.client.api.model.Link;
+import org.arvados.client.api.model.LinkList;
+import org.arvados.client.config.ConfigProvider;
+
+public class LinksApiClient extends BaseStandardApiClient<Link, LinkList> {
+
+    private static final String RESOURCE = "links";
+
+    public LinksApiClient(ConfigProvider config) {
+        super(config);
+    }
+
+    @Override
+    String getResource() {
+        return RESOURCE;
+    }
+
+    @Override
+    Class<Link> getType() {
+        return Link.class;
+    }
+
+    @Override
+    Class<LinkList> getListType() {
+        return LinkList.class;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/Link.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/Link.java
new file mode 100644 (file)
index 0000000..a24f02a
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonPropertyOrder({ "name", "head_kind", "head_uuid", "link_class" })
+public class Link extends Item {
+
+    @JsonProperty("name")
+    private String name;
+    @JsonProperty("head_kind")
+    private String headKind;
+    @JsonProperty("head_uuid")
+    private String headUuid;
+    @JsonProperty("link_class")
+    private String linkClass;
+
+    public String getName() {
+        return name;
+    }
+
+    public String getHeadKind() {
+        return headKind;
+    }
+
+    public String getHeadUuid() {
+        return headUuid;
+    }
+
+    public String getLinkClass() {
+        return linkClass;
+    }
+
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    public void setHeadKind(String headKind) {
+        this.headKind = headKind;
+    }
+
+    public void setHeadUuid(String headUuid) {
+        this.headUuid = headUuid;
+    }
+
+    public void setLinkClass(String linkClass) {
+        this.linkClass = linkClass;
+    }
+
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/LinkList.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/LinkList.java
new file mode 100644 (file)
index 0000000..5bccbe5
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+import java.util.List;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonPropertyOrder({ "items" })
+public class LinkList extends ItemList {
+
+    @JsonProperty("items")
+    private List<Link> items;
+
+    public List<Link> getItems() {
+        return this.items;
+    }
+
+    public void setItems(List<Link> items) {
+        this.items = items;
+    }
+}
index 0ba3f0a483fac785a6d080adf5ea494c480d02b3..5f12b62eebe28bc97a874b907caff735dace3151 100755 (executable)
@@ -8,8 +8,8 @@
 #
 # By default, arv-copy recursively copies any dependent objects
 # necessary to make the object functional in the new instance
-# (e.g. for a pipeline instance, arv-copy copies the pipeline
-# template, input collection, docker images, git repositories). If
+# (e.g. for a workflow, arv-copy copies the workflow,
+# input collections, and docker images). If
 # --no-recursive is given, arv-copy copies only the single record
 # identified by object-uuid.
 #
@@ -86,9 +86,6 @@ def main():
     copy_opts.add_argument(
         '-f', '--force', dest='force', action='store_true',
         help='Perform copy even if the object appears to exist at the remote destination.')
-    copy_opts.add_argument(
-        '--force-filters', action='store_true', default=False,
-        help="Copy pipeline template filters verbatim, even if they act differently on the destination cluster.")
     copy_opts.add_argument(
         '--src', dest='source_arvados', required=True,
         help='The name of the source Arvados instance (required) - points at an Arvados config file. May be either a pathname to a config file, or (for example) "foo" as shorthand for $HOME/.config/arvados/foo.conf.')
@@ -101,18 +98,9 @@ def main():
     copy_opts.add_argument(
         '--no-recursive', dest='recursive', action='store_false',
         help='Do not copy any dependencies. NOTE: if this option is given, the copied object will need to be updated manually in order to be functional.')
-    copy_opts.add_argument(
-        '--dst-git-repo', dest='dst_git_repo',
-        help='The name of the destination git repository. Required when copying a pipeline recursively.')
     copy_opts.add_argument(
         '--project-uuid', dest='project_uuid',
-        help='The UUID of the project at the destination to which the pipeline should be copied.')
-    copy_opts.add_argument(
-        '--allow-git-http-src', action="store_true",
-        help='Allow cloning git repositories over insecure http')
-    copy_opts.add_argument(
-        '--allow-git-http-dst', action="store_true",
-        help='Allow pushing git repositories over insecure http')
+        help='The UUID of the project at the destination to which the collection or workflow should be copied.')
 
     copy_opts.add_argument(
         'object_uuid',
@@ -121,7 +109,7 @@ def main():
     copy_opts.set_defaults(recursive=True)
 
     parser = argparse.ArgumentParser(
-        description='Copy a pipeline instance, template, workflow, or collection from one Arvados instance to another.',
+        description='Copy a workflow or collection from one Arvados instance to another.',
         parents=[copy_opts, arv_cmd.retry_opt])
     args = parser.parse_args()
 
@@ -144,15 +132,6 @@ def main():
         result = copy_collection(args.object_uuid,
                                  src_arv, dst_arv,
                                  args)
-    elif t == 'PipelineInstance':
-        set_src_owner_uuid(src_arv.pipeline_instances(), args.object_uuid, args)
-        result = copy_pipeline_instance(args.object_uuid,
-                                        src_arv, dst_arv,
-                                        args)
-    elif t == 'PipelineTemplate':
-        set_src_owner_uuid(src_arv.pipeline_templates(), args.object_uuid, args)
-        result = copy_pipeline_template(args.object_uuid,
-                                        src_arv, dst_arv, args)
     elif t == 'Workflow':
         set_src_owner_uuid(src_arv.workflows(), args.object_uuid, args)
         result = copy_workflow(args.object_uuid, src_arv, dst_arv, args)
@@ -225,67 +204,6 @@ def check_git_availability():
     except Exception:
         abort('git command is not available. Please ensure git is installed.')
 
-# copy_pipeline_instance(pi_uuid, src, dst, args)
-#
-#    Copies a pipeline instance identified by pi_uuid from src to dst.
-#
-#    If the args.recursive option is set:
-#      1. Copies all input collections
-#           * For each component in the pipeline, include all collections
-#             listed as job dependencies for that component)
-#      2. Copy docker images
-#      3. Copy git repositories
-#      4. Copy the pipeline template
-#
-#    The only changes made to the copied pipeline instance are:
-#      1. The original pipeline instance UUID is preserved in
-#         the 'properties' hash as 'copied_from_pipeline_instance_uuid'.
-#      2. The pipeline_template_uuid is changed to the new template uuid.
-#      3. The owner_uuid of the instance is changed to the user who
-#         copied it.
-#
-def copy_pipeline_instance(pi_uuid, src, dst, args):
-    # Fetch the pipeline instance record.
-    pi = src.pipeline_instances().get(uuid=pi_uuid).execute(num_retries=args.retries)
-
-    if args.recursive:
-        check_git_availability()
-
-        if not args.dst_git_repo:
-            abort('--dst-git-repo is required when copying a pipeline recursively.')
-        # Copy the pipeline template and save the copied template.
-        if pi.get('pipeline_template_uuid', None):
-            pt = copy_pipeline_template(pi['pipeline_template_uuid'],
-                                        src, dst, args)
-
-        # Copy input collections, docker images and git repos.
-        pi = copy_collections(pi, src, dst, args)
-        copy_git_repos(pi, src, dst, args.dst_git_repo, args)
-        copy_docker_images(pi, src, dst, args)
-
-        # Update the fields of the pipeline instance with the copied
-        # pipeline template.
-        if pi.get('pipeline_template_uuid', None):
-            pi['pipeline_template_uuid'] = pt['uuid']
-
-    else:
-        # not recursive
-        logger.info("Copying only pipeline instance %s.", pi_uuid)
-        logger.info("You are responsible for making sure all pipeline dependencies have been updated.")
-
-    # Update the pipeline instance properties, and create the new
-    # instance at dst.
-    pi['properties']['copied_from_pipeline_instance_uuid'] = pi_uuid
-    pi['description'] = "Pipeline copied from {}\n\n{}".format(
-        pi_uuid,
-        pi['description'] if pi.get('description', None) else '')
-
-    pi['owner_uuid'] = args.project_uuid
-
-    del pi['uuid']
-
-    new_pi = dst.pipeline_instances().create(body=pi, ensure_unique_name=True).execute(num_retries=args.retries)
-    return new_pi
 
 def filter_iter(arg):
     """Iterate a filter string-or-list.
@@ -340,82 +258,6 @@ def exception_handler(handler, *exc_types):
     except exc_types as error:
         handler(error)
 
-def migrate_components_filters(template_components, dst_git_repo):
-    """Update template component filters in-place for the destination.
-
-    template_components is a dictionary of components in a pipeline template.
-    This method walks over each component's filters, and updates them to have
-    identical semantics on the destination cluster.  It returns a list of
-    error strings that describe what filters could not be updated safely.
-
-    dst_git_repo is the name of the destination Git repository, which can
-    be None if that is not known.
-    """
-    errors = []
-    for cname, cspec in template_components.items():
-        def add_error(errmsg):
-            errors.append("{}: {}".format(cname, errmsg))
-        if not isinstance(cspec, dict):
-            add_error("value is not a component definition")
-            continue
-        src_repository = cspec.get('repository')
-        filters = cspec.get('filters', [])
-        if not isinstance(filters, list):
-            add_error("filters are not a list")
-            continue
-        for cfilter in filters:
-            if not (isinstance(cfilter, list) and (len(cfilter) == 3)):
-                add_error("malformed filter {!r}".format(cfilter))
-                continue
-            if attr_filtered(cfilter, 'repository'):
-                with exception_handler(add_error, ValueError):
-                    migrate_repository_filter(cfilter, src_repository, dst_git_repo)
-            if attr_filtered(cfilter, 'script_version'):
-                with exception_handler(add_error, ValueError):
-                    migrate_script_version_filter(cfilter)
-    return errors
-
-# copy_pipeline_template(pt_uuid, src, dst, args)
-#
-#    Copies a pipeline template identified by pt_uuid from src to dst.
-#
-#    If args.recursive is True, also copy any collections, docker
-#    images and git repositories that this template references.
-#
-#    The owner_uuid of the new template is changed to that of the user
-#    who copied the template.
-#
-#    Returns the copied pipeline template object.
-#
-def copy_pipeline_template(pt_uuid, src, dst, args):
-    # fetch the pipeline template from the source instance
-    pt = src.pipeline_templates().get(uuid=pt_uuid).execute(num_retries=args.retries)
-
-    if not args.force_filters:
-        filter_errors = migrate_components_filters(pt['components'], args.dst_git_repo)
-        if filter_errors:
-            abort("Template filters cannot be copied safely. Use --force-filters to copy anyway.\n" +
-                  "\n".join(filter_errors))
-
-    if args.recursive:
-        check_git_availability()
-
-        if not args.dst_git_repo:
-            abort('--dst-git-repo is required when copying a pipeline recursively.')
-        # Copy input collections, docker images and git repos.
-        pt = copy_collections(pt, src, dst, args)
-        copy_git_repos(pt, src, dst, args.dst_git_repo, args)
-        copy_docker_images(pt, src, dst, args)
-
-    pt['description'] = "Pipeline template copied from {}\n\n{}".format(
-        pt_uuid,
-        pt['description'] if pt.get('description', None) else '')
-    pt['name'] = "{} copied from {}".format(pt.get('name', ''), pt_uuid)
-    del pt['uuid']
-
-    pt['owner_uuid'] = args.project_uuid
-
-    return dst.pipeline_templates().create(body=pt, ensure_unique_name=True).execute(num_retries=args.retries)
 
 # copy_workflow(wf_uuid, src, dst, args)
 #
@@ -518,53 +360,6 @@ def copy_collections(obj, src, dst, args):
         return type(obj)(copy_collections(v, src, dst, args) for v in obj)
     return obj
 
-def migrate_jobspec(jobspec, src, dst, dst_repo, args):
-    """Copy a job's script to the destination repository, and update its record.
-
-    Given a jobspec dictionary, this function finds the referenced script from
-    src and copies it to dst and dst_repo.  It also updates jobspec in place to
-    refer to names on the destination.
-    """
-    repo = jobspec.get('repository')
-    if repo is None:
-        return
-    # script_version is the "script_version" parameter from the source
-    # component or job.  If no script_version was supplied in the
-    # component or job, it is a mistake in the pipeline, but for the
-    # purposes of copying the repository, default to "master".
-    script_version = jobspec.get('script_version') or 'master'
-    script_key = (repo, script_version)
-    if script_key not in scripts_copied:
-        copy_git_repo(repo, src, dst, dst_repo, script_version, args)
-        scripts_copied.add(script_key)
-    jobspec['repository'] = dst_repo
-    repo_dir = local_repo_dir[repo]
-    for version_key in ['script_version', 'supplied_script_version']:
-        if version_key in jobspec:
-            jobspec[version_key] = git_rev_parse(jobspec[version_key], repo_dir)
-
-# copy_git_repos(p, src, dst, dst_repo, args)
-#
-#    Copies all git repositories referenced by pipeline instance or
-#    template 'p' from src to dst.
-#
-#    For each component c in the pipeline:
-#      * Copy git repositories named in c['repository'] and c['job']['repository'] if present
-#      * Rename script versions:
-#          * c['script_version']
-#          * c['job']['script_version']
-#          * c['job']['supplied_script_version']
-#        to the commit hashes they resolve to, since any symbolic
-#        names (tags, branches) are not preserved in the destination repo.
-#
-#    The pipeline object is updated in place with the new repository
-#    names.  The return value is undefined.
-#
-def copy_git_repos(p, src, dst, dst_repo, args):
-    for component in p['components'].values():
-        migrate_jobspec(component, src, dst, dst_repo, args)
-        if 'job' in component:
-            migrate_jobspec(component['job'], src, dst, dst_repo, args)
 
 def total_collection_size(manifest_text):
     """Return the total number of bytes in this collection (excluding
@@ -590,17 +385,16 @@ def create_collection_from(c, src, dst, args):
     available."""
 
     collection_uuid = c['uuid']
-    del c['uuid']
-
-    if not c["name"]:
-        c['name'] = "copied from " + collection_uuid
+    body = {}
+    for d in ('description', 'manifest_text', 'name', 'portable_data_hash', 'properties'):
+        body[d] = c[d]
 
-    if 'properties' in c:
-        del c['properties']
+    if not body["name"]:
+        body['name'] = "copied from " + collection_uuid
 
-    c['owner_uuid'] = args.project_uuid
+    body['owner_uuid'] = args.project_uuid
 
-    dst_collection = dst.collections().create(body=c, ensure_unique_name=True).execute(num_retries=args.retries)
+    dst_collection = dst.collections().create(body=body, ensure_unique_name=True).execute(num_retries=args.retries)
 
     # Create docker_image_repo+tag and docker_image_hash links
     # at the destination.
@@ -665,7 +459,7 @@ def copy_collection(obj_uuid, src, dst, args):
             c = items[0]
         if not c:
             # See if there is a collection that's in the same project
-            # as the root item (usually a pipeline) being copied.
+            # as the root item (usually a workflow) being copied.
             for i in items:
                 if i.get("owner_uuid") == src_owner_uuid and i.get("name"):
                     c = i
@@ -815,68 +609,6 @@ def select_git_url(api, repo_name, retries, allow_insecure_http, allow_insecure_
     return (git_url, git_config)
 
 
-# copy_git_repo(src_git_repo, src, dst, dst_git_repo, script_version, args)
-#
-#    Copies commits from git repository 'src_git_repo' on Arvados
-#    instance 'src' to 'dst_git_repo' on 'dst'.  Both src_git_repo
-#    and dst_git_repo are repository names, not UUIDs (i.e. "arvados"
-#    or "jsmith")
-#
-#    All commits will be copied to a destination branch named for the
-#    source repository URL.
-#
-#    The destination repository must already exist.
-#
-#    The user running this command must be authenticated
-#    to both repositories.
-#
-def copy_git_repo(src_git_repo, src, dst, dst_git_repo, script_version, args):
-    # Identify the fetch and push URLs for the git repositories.
-
-    (src_git_url, src_git_config) = select_git_url(src, src_git_repo, args.retries, args.allow_git_http_src, "--allow-git-http-src")
-    (dst_git_url, dst_git_config) = select_git_url(dst, dst_git_repo, args.retries, args.allow_git_http_dst, "--allow-git-http-dst")
-
-    logger.debug('src_git_url: {}'.format(src_git_url))
-    logger.debug('dst_git_url: {}'.format(dst_git_url))
-
-    dst_branch = re.sub(r'\W+', '_', "{}_{}".format(src_git_url, script_version))
-
-    # Copy git commits from src repo to dst repo.
-    if src_git_repo not in local_repo_dir:
-        local_repo_dir[src_git_repo] = tempfile.mkdtemp()
-        arvados.util.run_command(
-            ["git"] + src_git_config + ["clone", "--bare", src_git_url,
-             local_repo_dir[src_git_repo]],
-            cwd=os.path.dirname(local_repo_dir[src_git_repo]),
-            env={"HOME": os.environ["HOME"],
-                 "ARVADOS_API_TOKEN": src.api_token,
-                 "GIT_ASKPASS": "/bin/false"})
-        arvados.util.run_command(
-            ["git", "remote", "add", "dst", dst_git_url],
-            cwd=local_repo_dir[src_git_repo])
-    arvados.util.run_command(
-        ["git", "branch", dst_branch, script_version],
-        cwd=local_repo_dir[src_git_repo])
-    arvados.util.run_command(["git"] + dst_git_config + ["push", "dst", dst_branch],
-                             cwd=local_repo_dir[src_git_repo],
-                             env={"HOME": os.environ["HOME"],
-                                  "ARVADOS_API_TOKEN": dst.api_token,
-                                  "GIT_ASKPASS": "/bin/false"})
-
-def copy_docker_images(pipeline, src, dst, args):
-    """Copy any docker images named in the pipeline components'
-    runtime_constraints field from src to dst."""
-
-    logger.debug('copy_docker_images: {}'.format(pipeline['uuid']))
-    for c_name, c_info in pipeline['components'].items():
-        if ('runtime_constraints' in c_info and
-            'docker_image' in c_info['runtime_constraints']):
-            copy_docker_image(
-                c_info['runtime_constraints']['docker_image'],
-                c_info['runtime_constraints'].get('docker_image_tag', 'latest'),
-                src, dst, args)
-
-
 def copy_docker_image(docker_image, docker_image_tag, src, dst, args):
     """Copy the docker image identified by docker_image and
     docker_image_tag from src to dst. Create appropriate
@@ -917,7 +649,7 @@ def git_rev_parse(rev, repo):
 #    the second field of the uuid.  This function consults the api's
 #    schema to identify the object class.
 #
-#    It returns a string such as 'Collection', 'PipelineInstance', etc.
+#    It returns a string such as 'Collection', 'Workflow', etc.
 #
 #    Special case: if handed a Keep locator hash, return 'Collection'.
 #
index 262b9d2a2cbc7f0925867e9a522b8402e0386bd2..22d4f62ea0fd1bf5a8d6718e2d410b79d5377d72 100644 (file)
@@ -430,7 +430,8 @@ def run_ws():
     stop_ws()
     port = internal_port_from_config("Websocket")
     logf = open(_logfilename('ws'), 'a')
-    ws = subprocess.Popen(["ws"],
+    ws = subprocess.Popen(
+        ["arvados-server", "ws"],
         stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
     with open(_pidfile('ws'), 'w') as f:
         f.write(str(ws.pid))
@@ -661,11 +662,22 @@ def setup_config():
     keep_web_dl_port = find_available_port()
     keep_web_dl_external_port = find_available_port()
 
-    dbconf = os.path.join(os.environ["CONFIGSRC"], "config.yml")
-
-    print("Getting config from %s" % dbconf, file=sys.stderr)
-
-    pgconnection = yaml.safe_load(open(dbconf))["Clusters"]["zzzzz"]["PostgreSQL"]["Connection"]
+    configsrc = os.environ.get("CONFIGSRC", None)
+    if configsrc:
+        clusterconf = os.path.join(configsrc, "config.yml")
+        print("Getting config from %s" % clusterconf, file=sys.stderr)
+        pgconnection = yaml.safe_load(open(clusterconf))["Clusters"]["zzzzz"]["PostgreSQL"]["Connection"]
+    else:
+        # assume "arvados-server install -type test" has set up the
+        # conventional db credentials
+        pgconnection = {
+           "client_encoding": "utf8",
+           "host": "localhost",
+           "dbname": "arvados_test",
+           "user": "arvados",
+           "password": "insecure_arvados_test",
+            "template": "template0", # used by RailsAPI when [re]creating the database
+        }
 
     localhost = "127.0.0.1"
     services = {
index 68158d760ee785a501d75e931ac5635109f32c13..27e3cf6330728b8fa523d51d2197885cc1229070 100644 (file)
@@ -832,7 +832,9 @@ class KeepClientTimeout(keepstub.StubKeepServers, unittest.TestCase):
         kc = self.keepClient()
         loc = kc.put(self.DATA, copies=1, num_retries=0)
         self.server.setbandwidth(self.BANDWIDTH_LOW_LIM)
-        self.server.setdelays(response=self.TIMEOUT_TIME)
+        # Note the actual delay must be 1s longer than the low speed
+        # limit interval in order for curl to detect it reliably.
+        self.server.setdelays(response=self.TIMEOUT_TIME+1)
         with self.assertTakesGreater(self.TIMEOUT_TIME):
             with self.assertRaises(arvados.errors.KeepReadError):
                 kc.get(loc, num_retries=0)
@@ -846,7 +848,9 @@ class KeepClientTimeout(keepstub.StubKeepServers, unittest.TestCase):
         kc = self.keepClient()
         loc = kc.put(self.DATA, copies=1, num_retries=0)
         self.server.setbandwidth(self.BANDWIDTH_LOW_LIM)
-        self.server.setdelays(mid_write=self.TIMEOUT_TIME, mid_read=self.TIMEOUT_TIME)
+        # Note the actual delay must be 1s longer than the low speed
+        # limit interval in order for curl to detect it reliably.
+        self.server.setdelays(mid_write=self.TIMEOUT_TIME+1, mid_read=self.TIMEOUT_TIME+1)
         with self.assertTakesGreater(self.TIMEOUT_TIME):
             with self.assertRaises(arvados.errors.KeepReadError) as e:
                 kc.get(loc, num_retries=0)
index 2c780d4771724aa822247c248f51fedc8a4b4dd5..24d5ad5b64982c1e5d10e8f218336978d1e2b857 100644 (file)
@@ -157,7 +157,7 @@ GEM
     net-ssh-gateway (2.0.0)
       net-ssh (>= 4.0.0)
     nio4r (2.3.1)
-    nokogiri (1.10.2)
+    nokogiri (1.10.8)
       mini_portile2 (~> 2.4.0)
     oauth2 (1.4.1)
       faraday (>= 0.8, < 0.16.0)
@@ -213,7 +213,7 @@ GEM
       method_source
       rake (>= 0.8.7)
       thor (>= 0.18.1, < 2.0)
-    rake (12.3.2)
+    rake (13.0.1)
     rb-fsevent (0.10.3)
     rb-inotify (0.9.10)
       ffi (>= 0.5.0, < 2)
index 05658b5e5d7f17a4dcd9bd099d81d68950f97a8f..9801a3fd45d5d13ec40bf661c59b4de5156cfeed 100644 (file)
@@ -53,6 +53,11 @@ func (srv *Server) CheckHealth() error {
        return nil
 }
 
+// Done implements service.Handler.
+func (srv *Server) Done() <-chan struct{} {
+       return nil
+}
+
 func (srv *Server) run() {
        var err error
        if srv.RunOptions.Once {
index e0509393cff077e119b6ea7b975d10f90115d536..0927b187048315f0c305d5043c448d5ff38a8002 100644 (file)
@@ -132,6 +132,10 @@ func (h *handler) CheckHealth() error {
        return h.err
 }
 
+func (h *handler) Done() <-chan struct{} {
+       return nil
+}
+
 func newHandlerOrErrorHandler(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry) service.Handler {
        var h handler
        serviceURL, ok := service.URLFromContext(ctx)
@@ -153,9 +157,8 @@ func (h *handler) setup(ctx context.Context, cluster *arvados.Cluster, token str
        }
        bufs = newBufferPool(h.Logger, h.Cluster.API.MaxKeepBlobBuffers, BlockSize)
 
-       if h.Cluster.API.MaxConcurrentRequests < 1 {
-               h.Cluster.API.MaxConcurrentRequests = h.Cluster.API.MaxKeepBlobBuffers * 2
-               h.Logger.Warnf("API.MaxConcurrentRequests <1 or not specified; defaulting to MaxKeepBlobBuffers * 2 == %d", h.Cluster.API.MaxConcurrentRequests)
+       if h.Cluster.API.MaxConcurrentRequests > 0 && h.Cluster.API.MaxConcurrentRequests < h.Cluster.API.MaxKeepBlobBuffers {
+               h.Logger.Warnf("Possible configuration mistake: not useful to set API.MaxKeepBlobBuffers (%d) higher than API.MaxConcurrentRequests (%d)", h.Cluster.API.MaxKeepBlobBuffers, h.Cluster.API.MaxConcurrentRequests)
        }
 
        if h.Cluster.Collections.BlobSigningKey != "" {
index a60aa416a90b79d36d55a6aacf9c50de827af7bf..7777363b9d13815ab3036ae916a2c0f6989eb95f 100644 (file)
@@ -13,7 +13,6 @@ import (
        "io"
        "io/ioutil"
        "os"
-       "strings"
        "sync"
        "syscall"
        "time"
@@ -168,11 +167,10 @@ func (s *UnixVolumeSuite) TestPutBadVolume(c *check.C) {
        v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
        defer v.Teardown()
 
-       os.Chmod(v.Root, 000)
-       err := v.Put(context.Background(), TestHash, TestBlock)
-       if err == nil {
-               c.Error("Write should have failed")
-       }
+       err := os.RemoveAll(v.Root)
+       c.Assert(err, check.IsNil)
+       err = v.Put(context.Background(), TestHash, TestBlock)
+       c.Check(err, check.IsNil)
 }
 
 func (s *UnixVolumeSuite) TestUnixVolumeReadonly(c *check.C) {
@@ -330,11 +328,14 @@ func (s *UnixVolumeSuite) TestUnixVolumeCompare(c *check.C) {
                c.Errorf("Got err %q, expected %q", err, DiskHashError)
        }
 
-       p := fmt.Sprintf("%s/%s/%s", v.Root, TestHash[:3], TestHash)
-       os.Chmod(p, 000)
-       err = v.Compare(context.Background(), TestHash, TestBlock)
-       if err == nil || strings.Index(err.Error(), "permission denied") < 0 {
-               c.Errorf("Got err %q, expected %q", err, "permission denied")
+       if os.Getuid() == 0 {
+               c.Log("skipping 'permission denied' check when running as root")
+       } else {
+               p := fmt.Sprintf("%s/%s/%s", v.Root, TestHash[:3], TestHash)
+               err = os.Chmod(p, 000)
+               c.Assert(err, check.IsNil)
+               err = v.Compare(context.Background(), TestHash, TestBlock)
+               c.Check(err, check.ErrorMatches, ".*permission denied.*")
        }
 }
 
index 806c3355da6c693350493a7471bc59e270bfb1e3..6a86cbe7a8307e1683dbd09ea506bc8cd79f52e3 100644 (file)
 // Developer info
 //
 // See https://dev.arvados.org/projects/arvados/wiki/Hacking_websocket_server.
-//
-// Usage
-//
-//     arvados-ws [-legacy-ws-config /etc/arvados/ws/ws.yml] [-dump-config]
-//
-// Options
-//
-// -legacy-ws-config path
-//
-// Load legacy configuration from the given file instead of the default
-// /etc/arvados/ws/ws.yml, legacy config overrides the clusterwide config.yml.
-//
-// -dump-config
-//
-// Print the loaded configuration to stdout and exit.
-//
-// Logs
-//
-// Logs are printed to stderr, formatted as JSON.
-//
-// A log is printed each time a client connects or disconnects.
-//
-// Enable additional logs by configuring:
-//
-//     LogLevel: debug
-//
-// Runtime status
-//
-// GET /debug.json responds with debug stats.
-//
-// GET /status.json responds with health check results and
-// activity/usage metrics.
-package main
+package ws
index ae545c092cf8ddece45cfbebdddb542e08de16b4..c989c0ca559b1a1cff472b2cc1bdb95b4fd021ce 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package ws
 
 import (
        "database/sql"
@@ -11,6 +11,7 @@ import (
 
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "github.com/ghodss/yaml"
+       "github.com/sirupsen/logrus"
 )
 
 type eventSink interface {
@@ -31,6 +32,7 @@ type event struct {
        Serial   uint64
 
        db     *sql.DB
+       logger logrus.FieldLogger
        logRow *arvados.Log
        err    error
        mtx    sync.Mutex
@@ -57,12 +59,12 @@ func (e *event) Detail() *arvados.Log {
                &logRow.CreatedAt,
                &propYAML)
        if e.err != nil {
-               logger(nil).WithField("LogID", e.LogID).WithError(e.err).Error("QueryRow failed")
+               e.logger.WithField("LogID", e.LogID).WithError(e.err).Error("QueryRow failed")
                return nil
        }
        e.err = yaml.Unmarshal(propYAML, &logRow.Properties)
        if e.err != nil {
-               logger(nil).WithField("LogID", e.LogID).WithError(e.err).Error("yaml decode failed")
+               e.logger.WithField("LogID", e.LogID).WithError(e.err).Error("yaml decode failed")
                return nil
        }
        e.logRow = &logRow
index 3a82bf62b3e9351a95d2abe4c56ae942fededa4c..3593c3aebd58ceae6932e9667eca43aba8a8c0cf 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package ws
 
 import (
        "context"
@@ -11,17 +11,20 @@ import (
        "fmt"
        "strconv"
        "sync"
-       "sync/atomic"
        "time"
 
        "git.arvados.org/arvados.git/sdk/go/stats"
        "github.com/lib/pq"
+       "github.com/prometheus/client_golang/prometheus"
+       "github.com/sirupsen/logrus"
 )
 
 type pgEventSource struct {
        DataSource   string
        MaxOpenConns int
        QueueSize    int
+       Logger       logrus.FieldLogger
+       Reg          *prometheus.Registry
 
        db         *sql.DB
        pqListener *pq.Listener
@@ -30,8 +33,8 @@ type pgEventSource struct {
        mtx        sync.Mutex
 
        lastQDelay time.Duration
-       eventsIn   uint64
-       eventsOut  uint64
+       eventsIn   prometheus.Counter
+       eventsOut  prometheus.Counter
 
        cancel func()
 
@@ -39,18 +42,16 @@ type pgEventSource struct {
        ready     chan bool
 }
 
-var _ debugStatuser = (*pgEventSource)(nil)
-
 func (ps *pgEventSource) listenerProblem(et pq.ListenerEventType, err error) {
        if et == pq.ListenerEventConnected {
-               logger(nil).Debug("pgEventSource connected")
+               ps.Logger.Debug("pgEventSource connected")
                return
        }
 
        // Until we have a mechanism for catching up on missed events,
        // we cannot recover from a dropped connection without
        // breaking our promises to clients.
-       logger(nil).
+       ps.Logger.
                WithField("eventType", et).
                WithError(err).
                Error("listener problem")
@@ -59,6 +60,95 @@ func (ps *pgEventSource) listenerProblem(et pq.ListenerEventType, err error) {
 
 func (ps *pgEventSource) setup() {
        ps.ready = make(chan bool)
+       ps.Reg.MustRegister(prometheus.NewGaugeFunc(
+               prometheus.GaugeOpts{
+                       Namespace: "arvados",
+                       Subsystem: "ws",
+                       Name:      "queue_len",
+                       Help:      "Current number of events in queue",
+               }, func() float64 { return float64(len(ps.queue)) }))
+       ps.Reg.MustRegister(prometheus.NewGaugeFunc(
+               prometheus.GaugeOpts{
+                       Namespace: "arvados",
+                       Subsystem: "ws",
+                       Name:      "queue_cap",
+                       Help:      "Event queue capacity",
+               }, func() float64 { return float64(cap(ps.queue)) }))
+       ps.Reg.MustRegister(prometheus.NewGaugeFunc(
+               prometheus.GaugeOpts{
+                       Namespace: "arvados",
+                       Subsystem: "ws",
+                       Name:      "queue_delay",
+                       Help:      "Queue delay of the last emitted event",
+               }, func() float64 { return ps.lastQDelay.Seconds() }))
+       ps.Reg.MustRegister(prometheus.NewGaugeFunc(
+               prometheus.GaugeOpts{
+                       Namespace: "arvados",
+                       Subsystem: "ws",
+                       Name:      "sinks",
+                       Help:      "Number of active sinks (connections)",
+               }, func() float64 { return float64(len(ps.sinks)) }))
+       ps.Reg.MustRegister(prometheus.NewGaugeFunc(
+               prometheus.GaugeOpts{
+                       Namespace: "arvados",
+                       Subsystem: "ws",
+                       Name:      "sinks_blocked",
+                       Help:      "Number of sinks (connections) that are busy and blocking the main event stream",
+               }, func() float64 {
+                       ps.mtx.Lock()
+                       defer ps.mtx.Unlock()
+                       blocked := 0
+                       for sink := range ps.sinks {
+                               blocked += len(sink.channel)
+                       }
+                       return float64(blocked)
+               }))
+       ps.eventsIn = prometheus.NewCounter(prometheus.CounterOpts{
+               Namespace: "arvados",
+               Subsystem: "ws",
+               Name:      "events_in",
+               Help:      "Number of events received from postgresql notify channel",
+       })
+       ps.Reg.MustRegister(ps.eventsIn)
+       ps.eventsOut = prometheus.NewCounter(prometheus.CounterOpts{
+               Namespace: "arvados",
+               Subsystem: "ws",
+               Name:      "events_out",
+               Help:      "Number of events sent to client sessions (before filtering)",
+       })
+       ps.Reg.MustRegister(ps.eventsOut)
+
+       maxConnections := prometheus.NewGauge(prometheus.GaugeOpts{
+               Namespace: "arvados",
+               Subsystem: "ws",
+               Name:      "db_max_connections",
+               Help:      "Maximum number of open connections to the database",
+       })
+       ps.Reg.MustRegister(maxConnections)
+       openConnections := prometheus.NewGaugeVec(prometheus.GaugeOpts{
+               Namespace: "arvados",
+               Subsystem: "ws",
+               Name:      "db_open_connections",
+               Help:      "Open connections to the database",
+       }, []string{"inuse"})
+       ps.Reg.MustRegister(openConnections)
+
+       updateDBStats := func() {
+               stats := ps.db.Stats()
+               maxConnections.Set(float64(stats.MaxOpenConnections))
+               openConnections.WithLabelValues("0").Set(float64(stats.Idle))
+               openConnections.WithLabelValues("1").Set(float64(stats.InUse))
+       }
+       go func() {
+               <-ps.ready
+               if ps.db == nil {
+                       return
+               }
+               updateDBStats()
+               for range time.Tick(time.Second) {
+                       updateDBStats()
+               }
+       }()
 }
 
 // Close stops listening for new events and disconnects all clients.
@@ -76,8 +166,8 @@ func (ps *pgEventSource) WaitReady() {
 // Run listens for event notifications on the "logs" channel and sends
 // them to all subscribers.
 func (ps *pgEventSource) Run() {
-       logger(nil).Debug("pgEventSource Run starting")
-       defer logger(nil).Debug("pgEventSource Run finished")
+       ps.Logger.Debug("pgEventSource Run starting")
+       defer ps.Logger.Debug("pgEventSource Run finished")
 
        ps.setupOnce.Do(ps.setup)
        ready := ps.ready
@@ -103,15 +193,15 @@ func (ps *pgEventSource) Run() {
 
        db, err := sql.Open("postgres", ps.DataSource)
        if err != nil {
-               logger(nil).WithError(err).Error("sql.Open failed")
+               ps.Logger.WithError(err).Error("sql.Open failed")
                return
        }
        if ps.MaxOpenConns <= 0 {
-               logger(nil).Warn("no database connection limit configured -- consider setting PostgresPool>0 in arvados-ws configuration file")
+               ps.Logger.Warn("no database connection limit configured -- consider setting PostgreSQL.ConnectionPool>0 in arvados-ws configuration file")
        }
        db.SetMaxOpenConns(ps.MaxOpenConns)
        if err = db.Ping(); err != nil {
-               logger(nil).WithError(err).Error("db.Ping failed")
+               ps.Logger.WithError(err).Error("db.Ping failed")
                return
        }
        ps.db = db
@@ -119,11 +209,11 @@ func (ps *pgEventSource) Run() {
        ps.pqListener = pq.NewListener(ps.DataSource, time.Second, time.Minute, ps.listenerProblem)
        err = ps.pqListener.Listen("logs")
        if err != nil {
-               logger(nil).WithError(err).Error("pq Listen failed")
+               ps.Logger.WithError(err).Error("pq Listen failed")
                return
        }
        defer ps.pqListener.Close()
-       logger(nil).Debug("pq Listen setup done")
+       ps.Logger.Debug("pq Listen setup done")
 
        close(ready)
        // Avoid double-close in deferred func
@@ -141,7 +231,7 @@ func (ps *pgEventSource) Run() {
                        // client_count X client_queue_size.
                        e.Detail()
 
-                       logger(nil).
+                       ps.Logger.
                                WithField("serial", e.Serial).
                                WithField("detail", e.Detail()).
                                Debug("event ready")
@@ -149,9 +239,9 @@ func (ps *pgEventSource) Run() {
                        ps.lastQDelay = e.Ready.Sub(e.Received)
 
                        ps.mtx.Lock()
-                       atomic.AddUint64(&ps.eventsOut, uint64(len(ps.sinks)))
                        for sink := range ps.sinks {
                                sink.channel <- e
+                               ps.eventsOut.Inc()
                        }
                        ps.mtx.Unlock()
                }
@@ -163,11 +253,11 @@ func (ps *pgEventSource) Run() {
        for {
                select {
                case <-ctx.Done():
-                       logger(nil).Debug("ctx done")
+                       ps.Logger.Debug("ctx done")
                        return
 
                case <-ticker.C:
-                       logger(nil).Debug("listener ping")
+                       ps.Logger.Debug("listener ping")
                        err := ps.pqListener.Ping()
                        if err != nil {
                                ps.listenerProblem(-1, fmt.Errorf("pqListener ping failed: %s", err))
@@ -176,7 +266,7 @@ func (ps *pgEventSource) Run() {
 
                case pqEvent, ok := <-ps.pqListener.Notify:
                        if !ok {
-                               logger(nil).Error("pqListener Notify chan closed")
+                               ps.Logger.Error("pqListener Notify chan closed")
                                return
                        }
                        if pqEvent == nil {
@@ -188,12 +278,12 @@ func (ps *pgEventSource) Run() {
                                continue
                        }
                        if pqEvent.Channel != "logs" {
-                               logger(nil).WithField("pqEvent", pqEvent).Error("unexpected notify from wrong channel")
+                               ps.Logger.WithField("pqEvent", pqEvent).Error("unexpected notify from wrong channel")
                                continue
                        }
                        logID, err := strconv.ParseUint(pqEvent.Extra, 10, 64)
                        if err != nil {
-                               logger(nil).WithField("pqEvent", pqEvent).Error("bad notify payload")
+                               ps.Logger.WithField("pqEvent", pqEvent).Error("bad notify payload")
                                continue
                        }
                        serial++
@@ -202,9 +292,10 @@ func (ps *pgEventSource) Run() {
                                Received: time.Now(),
                                Serial:   serial,
                                db:       ps.db,
+                               logger:   ps.Logger,
                        }
-                       logger(nil).WithField("event", e).Debug("incoming")
-                       atomic.AddUint64(&ps.eventsIn, 1)
+                       ps.Logger.WithField("event", e).Debug("incoming")
+                       ps.eventsIn.Inc()
                        ps.queue <- e
                        go e.Detail()
                }
@@ -238,6 +329,9 @@ func (ps *pgEventSource) DB() *sql.DB {
 }
 
 func (ps *pgEventSource) DBHealth() error {
+       if ps.db == nil {
+               return errors.New("database not connected")
+       }
        ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second))
        defer cancel()
        var i int
@@ -252,8 +346,6 @@ func (ps *pgEventSource) DebugStatus() interface{} {
                blocked += len(sink.channel)
        }
        return map[string]interface{}{
-               "EventsIn":     atomic.LoadUint64(&ps.eventsIn),
-               "EventsOut":    atomic.LoadUint64(&ps.eventsOut),
                "Queue":        len(ps.queue),
                "QueueLimit":   cap(ps.queue),
                "QueueDelay":   stats.Duration(ps.lastQDelay),
index 98a9e8b9785b40dbd8f5314bcedb98bd083efe44..b7b8ac3006f3fa6af19de31737af82129dbf8642 100644 (file)
@@ -2,17 +2,17 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package ws
 
 import (
        "database/sql"
        "fmt"
-       "os"
-       "path/filepath"
        "sync"
        "time"
 
        "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "github.com/prometheus/client_golang/prometheus"
        check "gopkg.in/check.v1"
 )
 
@@ -21,7 +21,7 @@ var _ = check.Suite(&eventSourceSuite{})
 type eventSourceSuite struct{}
 
 func testDBConfig() arvados.PostgreSQLConnection {
-       cfg, err := arvados.GetConfig(filepath.Join(os.Getenv("WORKSPACE"), "tmp", "arvados.yml"))
+       cfg, err := arvados.GetConfig(arvados.DefaultConfigFile)
        if err != nil {
                panic(err)
        }
@@ -46,6 +46,8 @@ func (*eventSourceSuite) TestEventSource(c *check.C) {
        pges := &pgEventSource{
                DataSource: cfg.String(),
                QueueSize:  4,
+               Logger:     ctxlog.TestLogger(c),
+               Reg:        prometheus.NewRegistry(),
        }
        go pges.Run()
        sinks := make([]eventSink, 18)
index dc324464ec3d15f4b473b5d9b91f3557c7a90abd..4665dfcd9ee9208fcb71794189ba115d0285fa55 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package ws
 
 import check "gopkg.in/check.v1"
 
index ea8dfc30c94c94e19308192c8c6713f745ce3a9b..df1ca7ab31c292280ab8a72c2f56155ef4c68e84 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package ws
 
 import (
        "testing"
@@ -13,3 +13,7 @@ import (
 func TestGocheck(t *testing.T) {
        check.TestingT(t)
 }
+
+func init() {
+       testMode = true
+}
index 913b1ee8000cbd274039483df70bad7896d52df5..912643ad97c6374006b3fd4b00f90d340157d687 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package ws
 
 import (
        "context"
@@ -12,6 +12,7 @@ import (
 
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/stats"
+       "github.com/sirupsen/logrus"
 )
 
 type handler struct {
@@ -31,12 +32,11 @@ type handlerStats struct {
        EventCount   uint64
 }
 
-func (h *handler) Handle(ws wsConn, eventSource eventSource, newSession func(wsConn, chan<- interface{}) (session, error)) (hStats handlerStats) {
+func (h *handler) Handle(ws wsConn, logger logrus.FieldLogger, eventSource eventSource, newSession func(wsConn, chan<- interface{}) (session, error)) (hStats handlerStats) {
        h.setupOnce.Do(h.setup)
 
        ctx, cancel := context.WithCancel(ws.Request().Context())
        defer cancel()
-       log := logger(ctx)
 
        incoming := eventSource.NewSink()
        defer incoming.Stop()
@@ -53,7 +53,7 @@ func (h *handler) Handle(ws wsConn, eventSource eventSource, newSession func(wsC
 
        sess, err := newSession(ws, queue)
        if err != nil {
-               log.WithError(err).Error("newSession failed")
+               logger.WithError(err).Error("newSession failed")
                return
        }
 
@@ -71,19 +71,19 @@ func (h *handler) Handle(ws wsConn, eventSource eventSource, newSession func(wsC
                        ws.SetReadDeadline(time.Now().Add(24 * 365 * time.Hour))
                        n, err := ws.Read(buf)
                        buf := buf[:n]
-                       log.WithField("frame", string(buf[:n])).Debug("received frame")
+                       logger.WithField("frame", string(buf[:n])).Debug("received frame")
                        if err == nil && n == cap(buf) {
                                err = errFrameTooBig
                        }
                        if err != nil {
                                if err != io.EOF && ctx.Err() == nil {
-                                       log.WithError(err).Info("read error")
+                                       logger.WithError(err).Info("read error")
                                }
                                return
                        }
                        err = sess.Receive(buf)
                        if err != nil {
-                               log.WithError(err).Error("sess.Receive() failed")
+                               logger.WithError(err).Error("sess.Receive() failed")
                                return
                        }
                }
@@ -108,38 +108,38 @@ func (h *handler) Handle(ws wsConn, eventSource eventSource, newSession func(wsC
                        var e *event
                        var buf []byte
                        var err error
-                       log := log
+                       logger := logger
 
                        switch data := data.(type) {
                        case []byte:
                                buf = data
                        case *event:
                                e = data
-                               log = log.WithField("serial", e.Serial)
+                               logger = logger.WithField("serial", e.Serial)
                                buf, err = sess.EventMessage(e)
                                if err != nil {
-                                       log.WithError(err).Error("EventMessage failed")
+                                       logger.WithError(err).Error("EventMessage failed")
                                        return
                                } else if len(buf) == 0 {
-                                       log.Debug("skip")
+                                       logger.Debug("skip")
                                        continue
                                }
                        default:
-                               log.WithField("data", data).Error("bad object in client queue")
+                               logger.WithField("data", data).Error("bad object in client queue")
                                continue
                        }
 
-                       log.WithField("frame", string(buf)).Debug("send event")
+                       logger.WithField("frame", string(buf)).Debug("send event")
                        ws.SetWriteDeadline(time.Now().Add(h.PingTimeout))
                        t0 := time.Now()
                        _, err = ws.Write(buf)
                        if err != nil {
                                if ctx.Err() == nil {
-                                       log.WithError(err).Error("write failed")
+                                       logger.WithError(err).Error("write failed")
                                }
                                return
                        }
-                       log.Debug("sent")
+                       logger.Debug("sent")
 
                        if e != nil {
                                hStats.QueueDelayNs += t0.Sub(e.Ready)
@@ -189,7 +189,7 @@ func (h *handler) Handle(ws wsConn, eventSource eventSource, newSession func(wsC
                                select {
                                case queue <- e:
                                default:
-                                       log.WithError(errQueueFull).Error("terminate")
+                                       logger.WithError(errQueueFull).Error("terminate")
                                        return
                                }
                        }
diff --git a/services/ws/main.go b/services/ws/main.go
deleted file mode 100644 (file)
index 5b42c44..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package main
-
-import (
-       "flag"
-       "fmt"
-       "os"
-
-       "git.arvados.org/arvados.git/lib/config"
-       "git.arvados.org/arvados.git/sdk/go/arvados"
-       "git.arvados.org/arvados.git/sdk/go/ctxlog"
-       "github.com/ghodss/yaml"
-       "github.com/sirupsen/logrus"
-)
-
-var logger = ctxlog.FromContext
-var version = "dev"
-
-func configure(log logrus.FieldLogger, args []string) *arvados.Cluster {
-       flags := flag.NewFlagSet(args[0], flag.ExitOnError)
-       dumpConfig := flags.Bool("dump-config", false, "show current configuration and exit")
-       getVersion := flags.Bool("version", false, "Print version information and exit.")
-
-       loader := config.NewLoader(nil, log)
-       loader.SetupFlags(flags)
-       args = loader.MungeLegacyConfigArgs(log, args[1:], "-legacy-ws-config")
-
-       flags.Parse(args)
-
-       // Print version information if requested
-       if *getVersion {
-               fmt.Printf("arvados-ws %s\n", version)
-               return nil
-       }
-
-       cfg, err := loader.Load()
-       if err != nil {
-               log.Fatal(err)
-       }
-
-       cluster, err := cfg.GetCluster("")
-       if err != nil {
-               log.Fatal(err)
-       }
-
-       ctxlog.SetLevel(cluster.SystemLogs.LogLevel)
-       ctxlog.SetFormat(cluster.SystemLogs.Format)
-
-       if *dumpConfig {
-               out, err := yaml.Marshal(cfg)
-               if err != nil {
-                       log.Fatal(err)
-               }
-               _, err = os.Stdout.Write(out)
-               if err != nil {
-                       log.Fatal(err)
-               }
-               return nil
-       }
-       return cluster
-}
-
-func main() {
-       log := logger(nil)
-
-       cluster := configure(log, os.Args)
-       if cluster == nil {
-               return
-       }
-
-       log.Printf("arvados-ws %s started", version)
-       srv := &server{cluster: cluster}
-       log.Fatal(srv.Run())
-}
index 745d28f9523f36ca83afa0b29e9511e6f98176f9..ac895f80e5fd7ae7933558fbfa6e6acb97a6c7b0 100644 (file)
@@ -2,14 +2,16 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package ws
 
 import (
+       "context"
        "net/http"
        "net/url"
        "time"
 
        "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
 )
 
 const (
@@ -19,7 +21,7 @@ const (
 
 type permChecker interface {
        SetToken(token string)
-       Check(uuid string) (bool, error)
+       Check(ctx context.Context, uuid string) (bool, error)
 }
 
 func newPermChecker(ac arvados.Client) permChecker {
@@ -54,9 +56,9 @@ func (pc *cachingPermChecker) SetToken(token string) {
        pc.cache = make(map[string]cacheEnt)
 }
 
-func (pc *cachingPermChecker) Check(uuid string) (bool, error) {
+func (pc *cachingPermChecker) Check(ctx context.Context, uuid string) (bool, error) {
        pc.nChecks++
-       logger := logger(nil).
+       logger := ctxlog.FromContext(ctx).
                WithField("token", pc.Client.AuthToken).
                WithField("uuid", uuid)
        pc.tidy()
index 5f972551ffe8ffeaa4e11ec81573ae46425591d3..023656c01fd93dc3a912283682ffc9eda59c7e6b 100644 (file)
@@ -2,9 +2,11 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package ws
 
 import (
+       "context"
+
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/arvadostest"
        check "gopkg.in/check.v1"
@@ -22,19 +24,19 @@ func (s *permSuite) TestCheck(c *check.C) {
        }
        wantError := func(uuid string) {
                c.Log(uuid)
-               ok, err := pc.Check(uuid)
+               ok, err := pc.Check(context.Background(), uuid)
                c.Check(ok, check.Equals, false)
                c.Check(err, check.NotNil)
        }
        wantYes := func(uuid string) {
                c.Log(uuid)
-               ok, err := pc.Check(uuid)
+               ok, err := pc.Check(context.Background(), uuid)
                c.Check(ok, check.Equals, true)
                c.Check(err, check.IsNil)
        }
        wantNo := func(uuid string) {
                c.Log(uuid)
-               ok, err := pc.Check(uuid)
+               ok, err := pc.Check(context.Background(), uuid)
                c.Check(ok, check.Equals, false)
                c.Check(err, check.IsNil)
        }
@@ -67,7 +69,7 @@ func (s *permSuite) TestCheck(c *check.C) {
        pc.SetToken(arvadostest.ActiveToken)
 
        c.Log("...network error")
-       pc.Client.APIHost = "127.0.0.1:discard"
+       pc.Client.APIHost = "127.0.0.1:9"
        wantError(arvadostest.UserAgreementCollection)
        wantError(arvadostest.FooBarDirCollection)
 
index f8c273c5141b6f76f73b28c3c2c5d995f0df94dd..878c282f8a6c57f17192b777faba760485757b86 100644 (file)
@@ -2,13 +2,11 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package ws
 
 import (
-       "encoding/json"
        "io"
        "net/http"
-       "strconv"
        "sync"
        "sync/atomic"
        "time"
@@ -16,6 +14,7 @@ import (
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
        "git.arvados.org/arvados.git/sdk/go/health"
+       "github.com/prometheus/client_golang/prometheus"
        "github.com/sirupsen/logrus"
        "golang.org/x/net/websocket"
 )
@@ -28,7 +27,7 @@ type wsConn interface {
 }
 
 type router struct {
-       client         arvados.Client
+       client         *arvados.Client
        cluster        *arvados.Cluster
        eventSource    eventSource
        newPermChecker func() permChecker
@@ -36,33 +35,26 @@ type router struct {
        handler   *handler
        mux       *http.ServeMux
        setupOnce sync.Once
-
-       lastReqID  int64
-       lastReqMtx sync.Mutex
-
-       status routerDebugStatus
-}
-
-type routerDebugStatus struct {
-       ReqsReceived int64
-       ReqsActive   int64
-}
-
-type debugStatuser interface {
-       DebugStatus() interface{}
+       done      chan struct{}
+       reg       *prometheus.Registry
 }
 
 func (rtr *router) setup() {
+       mSockets := prometheus.NewGaugeVec(prometheus.GaugeOpts{
+               Namespace: "arvados",
+               Subsystem: "ws",
+               Name:      "sockets",
+               Help:      "Number of connected sockets",
+       }, []string{"version"})
+       rtr.reg.MustRegister(mSockets)
+
        rtr.handler = &handler{
                PingTimeout: time.Duration(rtr.cluster.API.SendTimeout),
                QueueSize:   rtr.cluster.API.WebsocketClientEventQueue,
        }
        rtr.mux = http.NewServeMux()
-       rtr.mux.Handle("/websocket", rtr.makeServer(newSessionV0))
-       rtr.mux.Handle("/arvados/v1/events.ws", rtr.makeServer(newSessionV1))
-       rtr.mux.Handle("/debug.json", rtr.jsonHandler(rtr.DebugStatus))
-       rtr.mux.Handle("/status.json", rtr.jsonHandler(rtr.Status))
-
+       rtr.mux.Handle("/websocket", rtr.makeServer(newSessionV0, mSockets.WithLabelValues("0")))
+       rtr.mux.Handle("/arvados/v1/events.ws", rtr.makeServer(newSessionV1, mSockets.WithLabelValues("1")))
        rtr.mux.Handle("/_health/", &health.Handler{
                Token:  rtr.cluster.ManagementToken,
                Prefix: "/_health/",
@@ -71,91 +63,50 @@ func (rtr *router) setup() {
                },
                Log: func(r *http.Request, err error) {
                        if err != nil {
-                               logger(r.Context()).WithError(err).Error("error")
+                               ctxlog.FromContext(r.Context()).WithError(err).Error("error")
                        }
                },
        })
 }
 
-func (rtr *router) makeServer(newSession sessionFactory) *websocket.Server {
+func (rtr *router) makeServer(newSession sessionFactory, gauge prometheus.Gauge) *websocket.Server {
+       var connected int64
        return &websocket.Server{
                Handshake: func(c *websocket.Config, r *http.Request) error {
                        return nil
                },
                Handler: websocket.Handler(func(ws *websocket.Conn) {
                        t0 := time.Now()
-                       log := logger(ws.Request().Context())
-                       log.Info("connected")
+                       logger := ctxlog.FromContext(ws.Request().Context())
+                       atomic.AddInt64(&connected, 1)
+                       gauge.Set(float64(atomic.LoadInt64(&connected)))
 
-                       stats := rtr.handler.Handle(ws, rtr.eventSource,
+                       stats := rtr.handler.Handle(ws, logger, rtr.eventSource,
                                func(ws wsConn, sendq chan<- interface{}) (session, error) {
-                                       return newSession(ws, sendq, rtr.eventSource.DB(), rtr.newPermChecker(), &rtr.client)
+                                       return newSession(ws, sendq, rtr.eventSource.DB(), rtr.newPermChecker(), rtr.client)
                                })
 
-                       log.WithFields(logrus.Fields{
+                       logger.WithFields(logrus.Fields{
                                "elapsed": time.Now().Sub(t0).Seconds(),
                                "stats":   stats,
-                       }).Info("disconnect")
+                       }).Info("client disconnected")
                        ws.Close()
+                       atomic.AddInt64(&connected, -1)
+                       gauge.Set(float64(atomic.LoadInt64(&connected)))
                }),
        }
 }
 
-func (rtr *router) newReqID() string {
-       rtr.lastReqMtx.Lock()
-       defer rtr.lastReqMtx.Unlock()
-       id := time.Now().UnixNano()
-       if id <= rtr.lastReqID {
-               id = rtr.lastReqID + 1
-       }
-       return strconv.FormatInt(id, 36)
-}
-
-func (rtr *router) DebugStatus() interface{} {
-       s := map[string]interface{}{
-               "HTTP":     rtr.status,
-               "Outgoing": rtr.handler.DebugStatus(),
-       }
-       if es, ok := rtr.eventSource.(debugStatuser); ok {
-               s["EventSource"] = es.DebugStatus()
-       }
-       return s
-}
-
-func (rtr *router) Status() interface{} {
-       return map[string]interface{}{
-               "Clients": atomic.LoadInt64(&rtr.status.ReqsActive),
-               "Version": version,
-       }
-}
-
 func (rtr *router) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
        rtr.setupOnce.Do(rtr.setup)
-       atomic.AddInt64(&rtr.status.ReqsReceived, 1)
-       atomic.AddInt64(&rtr.status.ReqsActive, 1)
-       defer atomic.AddInt64(&rtr.status.ReqsActive, -1)
-
-       logger := logger(req.Context()).
-               WithField("RequestID", rtr.newReqID())
-       ctx := ctxlog.Context(req.Context(), logger)
-       req = req.WithContext(ctx)
-       logger.WithFields(logrus.Fields{
-               "remoteAddr":      req.RemoteAddr,
-               "reqForwardedFor": req.Header.Get("X-Forwarded-For"),
-       }).Info("accept request")
        rtr.mux.ServeHTTP(resp, req)
 }
 
-func (rtr *router) jsonHandler(fn func() interface{}) http.Handler {
-       return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-               logger := logger(r.Context())
-               w.Header().Set("Content-Type", "application/json")
-               enc := json.NewEncoder(w)
-               err := enc.Encode(fn())
-               if err != nil {
-                       msg := "encode failed"
-                       logger.WithError(err).Error(msg)
-                       http.Error(w, msg, http.StatusInternalServerError)
-               }
-       })
+func (rtr *router) CheckHealth() error {
+       rtr.setupOnce.Do(rtr.setup)
+       return rtr.eventSource.DBHealth()
+}
+
+func (rtr *router) Done() <-chan struct{} {
+       return rtr.done
 }
diff --git a/services/ws/server.go b/services/ws/server.go
deleted file mode 100644 (file)
index 9747ea1..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package main
-
-import (
-       "net"
-       "net/http"
-       "sync"
-       "time"
-
-       "git.arvados.org/arvados.git/sdk/go/arvados"
-       "github.com/coreos/go-systemd/daemon"
-)
-
-type server struct {
-       httpServer  *http.Server
-       listener    net.Listener
-       cluster     *arvados.Cluster
-       eventSource *pgEventSource
-       setupOnce   sync.Once
-}
-
-func (srv *server) Close() {
-       srv.WaitReady()
-       srv.eventSource.Close()
-       srv.httpServer.Close()
-       srv.listener.Close()
-}
-
-func (srv *server) WaitReady() {
-       srv.setupOnce.Do(srv.setup)
-       srv.eventSource.WaitReady()
-}
-
-func (srv *server) Run() error {
-       srv.setupOnce.Do(srv.setup)
-       return srv.httpServer.Serve(srv.listener)
-}
-
-func (srv *server) setup() {
-       log := logger(nil)
-
-       var listen arvados.URL
-       for listen, _ = range srv.cluster.Services.Websocket.InternalURLs {
-               break
-       }
-       ln, err := net.Listen("tcp", listen.Host)
-       if err != nil {
-               log.WithField("Listen", listen).Fatal(err)
-       }
-       log.WithField("Listen", ln.Addr().String()).Info("listening")
-
-       client := arvados.Client{}
-       client.APIHost = srv.cluster.Services.Controller.ExternalURL.Host
-       client.AuthToken = srv.cluster.SystemRootToken
-       client.Insecure = srv.cluster.TLS.Insecure
-
-       srv.listener = ln
-       srv.eventSource = &pgEventSource{
-               DataSource:   srv.cluster.PostgreSQL.Connection.String(),
-               MaxOpenConns: srv.cluster.PostgreSQL.ConnectionPool,
-               QueueSize:    srv.cluster.API.WebsocketServerEventQueue,
-       }
-
-       srv.httpServer = &http.Server{
-               Addr:           listen.Host,
-               ReadTimeout:    time.Minute,
-               WriteTimeout:   time.Minute,
-               MaxHeaderBytes: 1 << 20,
-               Handler: &router{
-                       cluster:        srv.cluster,
-                       client:         client,
-                       eventSource:    srv.eventSource,
-                       newPermChecker: func() permChecker { return newPermChecker(client) },
-               },
-       }
-
-       go func() {
-               srv.eventSource.Run()
-               log.Info("event source stopped")
-               srv.Close()
-       }()
-
-       if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
-               log.WithError(err).Warn("error notifying init daemon")
-       }
-}
diff --git a/services/ws/service.go b/services/ws/service.go
new file mode 100644 (file)
index 0000000..761e22e
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package ws
+
+import (
+       "context"
+       "fmt"
+
+       "git.arvados.org/arvados.git/lib/cmd"
+       "git.arvados.org/arvados.git/lib/service"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "github.com/prometheus/client_golang/prometheus"
+)
+
+var testMode = false
+
+var Command cmd.Handler = service.Command(arvados.ServiceNameWebsocket, newHandler)
+
+func newHandler(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry) service.Handler {
+       client, err := arvados.NewClientFromConfig(cluster)
+       if err != nil {
+               return service.ErrorHandler(ctx, cluster, fmt.Errorf("error initializing client from cluster config: %s", err))
+       }
+       eventSource := &pgEventSource{
+               DataSource:   cluster.PostgreSQL.Connection.String(),
+               MaxOpenConns: cluster.PostgreSQL.ConnectionPool,
+               QueueSize:    cluster.API.WebsocketServerEventQueue,
+               Logger:       ctxlog.FromContext(ctx),
+               Reg:          reg,
+       }
+       done := make(chan struct{})
+       go func() {
+               eventSource.Run()
+               ctxlog.FromContext(ctx).Error("event source stopped")
+               close(done)
+       }()
+       eventSource.WaitReady()
+       if err := eventSource.DBHealth(); err != nil {
+               return service.ErrorHandler(ctx, cluster, err)
+       }
+       rtr := &router{
+               cluster:        cluster,
+               client:         client,
+               eventSource:    eventSource,
+               newPermChecker: func() permChecker { return newPermChecker(*client) },
+               done:           done,
+               reg:            reg,
+       }
+       return rtr
+}
similarity index 55%
rename from services/ws/server_test.go
rename to services/ws/service_test.go
index 88279ec9b2de83cd28bc191815bd1fa274cfec80..7213dcad2a9ddbb967991d70a2f9b094ce317b98 100644 (file)
@@ -2,39 +2,61 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package ws
 
 import (
-       "encoding/json"
+       "bytes"
+       "context"
+       "flag"
        "io/ioutil"
        "net/http"
+       "net/http/httptest"
        "os"
+       "strings"
        "sync"
        "time"
 
        "git.arvados.org/arvados.git/lib/config"
+       "git.arvados.org/arvados.git/lib/service"
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/arvadostest"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "git.arvados.org/arvados.git/sdk/go/httpserver"
+       "github.com/prometheus/client_golang/prometheus"
+       "github.com/sirupsen/logrus"
        check "gopkg.in/check.v1"
 )
 
-var _ = check.Suite(&serverSuite{})
+var _ = check.Suite(&serviceSuite{})
 
-type serverSuite struct {
+type serviceSuite struct {
+       handler service.Handler
+       reg     *prometheus.Registry
+       srv     *httptest.Server
        cluster *arvados.Cluster
-       srv     *server
        wg      sync.WaitGroup
 }
 
-func (s *serverSuite) SetUpTest(c *check.C) {
+func (s *serviceSuite) SetUpTest(c *check.C) {
        var err error
        s.cluster, err = s.testConfig(c)
        c.Assert(err, check.IsNil)
-       s.srv = &server{cluster: s.cluster}
 }
 
-func (*serverSuite) testConfig(c *check.C) (*arvados.Cluster, error) {
+func (s *serviceSuite) start(c *check.C) {
+       s.reg = prometheus.NewRegistry()
+       s.handler = newHandler(context.Background(), s.cluster, "", s.reg)
+       instrumented := httpserver.Instrument(s.reg, ctxlog.TestLogger(c), s.handler)
+       s.srv = httptest.NewServer(instrumented.ServeAPI(s.cluster.ManagementToken, instrumented))
+}
+
+func (s *serviceSuite) TearDownTest(c *check.C) {
+       if s.srv != nil {
+               s.srv.Close()
+       }
+}
+
+func (*serviceSuite) testConfig(c *check.C) (*arvados.Cluster, error) {
        ldr := config.NewLoader(nil, ctxlog.TestLogger(c))
        cfg, err := ldr.Load()
        if err != nil {
@@ -49,47 +71,30 @@ func (*serverSuite) testConfig(c *check.C) (*arvados.Cluster, error) {
        cluster.SystemRootToken = client.AuthToken
        cluster.TLS.Insecure = client.Insecure
        cluster.PostgreSQL.Connection = testDBConfig()
+       cluster.PostgreSQL.ConnectionPool = 12
        cluster.Services.Websocket.InternalURLs = map[arvados.URL]arvados.ServiceInstance{arvados.URL{Host: ":"}: arvados.ServiceInstance{}}
        cluster.ManagementToken = arvadostest.ManagementToken
        return cluster, nil
 }
 
-// TestBadDB ensures Run() returns an error (instead of panicking or
-// deadlocking) if it can't connect to the database server at startup.
-func (s *serverSuite) TestBadDB(c *check.C) {
+// TestBadDB ensures the server returns an error (instead of panicking
+// or deadlocking) if it can't connect to the database server at
+// startup.
+func (s *serviceSuite) TestBadDB(c *check.C) {
        s.cluster.PostgreSQL.Connection["password"] = "1234"
-
-       var wg sync.WaitGroup
-       wg.Add(1)
-       go func() {
-               err := s.srv.Run()
-               c.Check(err, check.NotNil)
-               wg.Done()
-       }()
-       wg.Add(1)
-       go func() {
-               s.srv.WaitReady()
-               wg.Done()
-       }()
-
-       done := make(chan bool)
-       go func() {
-               wg.Wait()
-               close(done)
-       }()
-       select {
-       case <-done:
-       case <-time.After(10 * time.Second):
-               c.Fatal("timeout")
-       }
+       s.start(c)
+       resp, err := http.Get(s.srv.URL)
+       c.Check(err, check.IsNil)
+       c.Check(resp.StatusCode, check.Equals, http.StatusInternalServerError)
+       c.Check(s.handler.CheckHealth(), check.ErrorMatches, "database not connected")
+       c.Check(err, check.IsNil)
+       c.Check(resp.StatusCode, check.Equals, http.StatusInternalServerError)
 }
 
-func (s *serverSuite) TestHealth(c *check.C) {
-       go s.srv.Run()
-       defer s.srv.Close()
-       s.srv.WaitReady()
+func (s *serviceSuite) TestHealth(c *check.C) {
+       s.start(c)
        for _, token := range []string{"", "foo", s.cluster.ManagementToken} {
-               req, err := http.NewRequest("GET", "http://"+s.srv.listener.Addr().String()+"/_health/ping", nil)
+               req, err := http.NewRequest("GET", s.srv.URL+"/_health/ping", nil)
                c.Assert(err, check.IsNil)
                if token != "" {
                        req.Header.Add("Authorization", "Bearer "+token)
@@ -107,30 +112,38 @@ func (s *serverSuite) TestHealth(c *check.C) {
        }
 }
 
-func (s *serverSuite) TestStatus(c *check.C) {
-       go s.srv.Run()
-       defer s.srv.Close()
-       s.srv.WaitReady()
-       req, err := http.NewRequest("GET", "http://"+s.srv.listener.Addr().String()+"/status.json", nil)
-       c.Assert(err, check.IsNil)
-       resp, err := http.DefaultClient.Do(req)
-       c.Check(err, check.IsNil)
-       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
-       var status map[string]interface{}
-       err = json.NewDecoder(resp.Body).Decode(&status)
-       c.Check(err, check.IsNil)
-       c.Check(status["Version"], check.Not(check.Equals), "")
+func (s *serviceSuite) TestMetrics(c *check.C) {
+       s.start(c)
+       s.handler.CheckHealth()
+       for deadline := time.Now().Add(time.Second); ; {
+               req, err := http.NewRequest("GET", s.srv.URL+"/metrics", nil)
+               c.Assert(err, check.IsNil)
+               req.Header.Set("Authorization", "Bearer "+s.cluster.ManagementToken)
+               resp, err := http.DefaultClient.Do(req)
+               c.Check(err, check.IsNil)
+               c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+               text, err := ioutil.ReadAll(resp.Body)
+               c.Check(err, check.IsNil)
+               if strings.Contains(string(text), "_db_max_connections 0\n") {
+                       // wait for the first db stats update
+                       if time.Now().After(deadline) {
+                               c.Fatal("timed out")
+                       }
+                       time.Sleep(time.Second / 50)
+                       continue
+               }
+               c.Check(string(text), check.Matches, `(?ms).*\narvados_ws_db_max_connections 12\n.*`)
+               c.Check(string(text), check.Matches, `(?ms).*\narvados_ws_db_open_connections\{inuse="0"\} \d+\n.*`)
+               c.Check(string(text), check.Matches, `(?ms).*\narvados_ws_db_open_connections\{inuse="1"\} \d+\n.*`)
+               break
+       }
 }
 
-func (s *serverSuite) TestHealthDisabled(c *check.C) {
+func (s *serviceSuite) TestHealthDisabled(c *check.C) {
        s.cluster.ManagementToken = ""
-
-       go s.srv.Run()
-       defer s.srv.Close()
-       s.srv.WaitReady()
-
+       s.start(c)
        for _, token := range []string{"", "foo", arvadostest.ManagementToken} {
-               req, err := http.NewRequest("GET", "http://"+s.srv.listener.Addr().String()+"/_health/ping", nil)
+               req, err := http.NewRequest("GET", s.srv.URL+"/_health/ping", nil)
                c.Assert(err, check.IsNil)
                req.Header.Add("Authorization", "Bearer "+token)
                resp, err := http.DefaultClient.Do(req)
@@ -139,7 +152,7 @@ func (s *serverSuite) TestHealthDisabled(c *check.C) {
        }
 }
 
-func (s *serverSuite) TestLoadLegacyConfig(c *check.C) {
+func (s *serviceSuite) TestLoadLegacyConfig(c *check.C) {
        content := []byte(`
 Client:
   APIHost: example.com
@@ -175,7 +188,14 @@ ManagementToken: qqqqq
                c.Error(err)
 
        }
-       cluster := configure(logger(nil), []string{"arvados-ws", "-config", tmpfile.Name()})
+       ldr := config.NewLoader(&bytes.Buffer{}, logrus.New())
+       flagset := flag.NewFlagSet("", flag.ContinueOnError)
+       ldr.SetupFlags(flagset)
+       flagset.Parse(ldr.MungeLegacyConfigArgs(ctxlog.TestLogger(c), []string{"-config", tmpfile.Name()}, "-legacy-ws-config"))
+       cfg, err := ldr.Load()
+       c.Check(err, check.IsNil)
+       cluster, err := cfg.GetCluster("")
+       c.Check(err, check.IsNil)
        c.Check(cluster, check.NotNil)
 
        c.Check(cluster.Services.Controller.ExternalURL, check.Equals, arvados.URL{Scheme: "https", Host: "example.com"})
index 53b02146d560fe3eb4d045227277d60a8c6e072b..c0cfbd6d02f6ff37083f426c85084effae45f212 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package ws
 
 import (
        "database/sql"
index b0f40371ffeb0ba12c5d3d1e1326d320fb6dbb51..309352b39edbd329aa031ec0c6194791341acec9 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package ws
 
 import (
        "database/sql"
@@ -14,6 +14,7 @@ import (
        "time"
 
        "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
        "github.com/sirupsen/logrus"
 )
 
@@ -59,7 +60,7 @@ func newSessionV0(ws wsConn, sendq chan<- interface{}, db *sql.DB, pc permChecke
                db:          db,
                ac:          ac,
                permChecker: pc,
-               log:         logger(ws.Request().Context()),
+               log:         ctxlog.FromContext(ws.Request().Context()),
        }
 
        err := ws.Request().ParseForm()
@@ -128,7 +129,7 @@ func (sess *v0session) EventMessage(e *event) ([]byte, error) {
        } else {
                permTarget = detail.ObjectUUID
        }
-       ok, err := sess.permChecker.Check(permTarget)
+       ok, err := sess.permChecker.Check(sess.ws.Request().Context(), permTarget)
        if err != nil || !ok {
                return nil, err
        }
index bd70b44459dd79b5f22b0c08074b2d4bf480d76f..7986cc7b08f95598ae4756be0aa1ca3dea2e2f7b 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package ws
 
 import (
        "bytes"
@@ -11,6 +11,7 @@ import (
        "io"
        "net/url"
        "os"
+       "strings"
        "sync"
        "time"
 
@@ -30,17 +31,16 @@ func init() {
 var _ = check.Suite(&v0Suite{})
 
 type v0Suite struct {
-       serverSuite serverSuite
-       token       string
-       toDelete    []string
-       wg          sync.WaitGroup
-       ignoreLogID uint64
+       serviceSuite serviceSuite
+       token        string
+       toDelete     []string
+       wg           sync.WaitGroup
+       ignoreLogID  uint64
 }
 
 func (s *v0Suite) SetUpTest(c *check.C) {
-       s.serverSuite.SetUpTest(c)
-       go s.serverSuite.srv.Run()
-       s.serverSuite.srv.WaitReady()
+       s.serviceSuite.SetUpTest(c)
+       s.serviceSuite.start(c)
 
        s.token = arvadostest.ActiveToken
        s.ignoreLogID = s.lastLogID(c)
@@ -48,7 +48,7 @@ func (s *v0Suite) SetUpTest(c *check.C) {
 
 func (s *v0Suite) TearDownTest(c *check.C) {
        s.wg.Wait()
-       s.serverSuite.srv.Close()
+       s.serviceSuite.TearDownTest(c)
 }
 
 func (s *v0Suite) TearDownSuite(c *check.C) {
@@ -353,8 +353,8 @@ func (s *v0Suite) expectLog(c *check.C, r *json.Decoder) *arvados.Log {
 }
 
 func (s *v0Suite) testClient() (*websocket.Conn, *json.Decoder, *json.Encoder) {
-       srv := s.serverSuite.srv
-       conn, err := websocket.Dial("ws://"+srv.listener.Addr().String()+"/websocket?api_token="+s.token, "", "http://"+srv.listener.Addr().String())
+       srv := s.serviceSuite.srv
+       conn, err := websocket.Dial(strings.Replace(srv.URL, "http", "ws", 1)+"/websocket?api_token="+s.token, "", srv.URL)
        if err != nil {
                panic(err)
        }
index 58f77df430201f79e71f66209711a740dff8a016..60b980d58e2f8f8a9acc67362deb7d7beff21350 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package ws
 
 import (
        "database/sql"
index 0259ba1c5cd6adc5f74eef046c95ee72bac64aae..b6d6c68e31fadd292df47fa6ea9410f979167396 100644 (file)
@@ -9,7 +9,7 @@ ENV DEBIAN_FRONTEND noninteractive
 RUN apt-get update && \
     apt-get -yq --no-install-recommends -o Acquire::Retries=6 install \
     postgresql-9.6 postgresql-contrib-9.6 git build-essential runit curl libpq-dev \
-    libcurl4-openssl-dev libssl1.0-dev zlib1g-dev libpcre3-dev \
+    libcurl4-openssl-dev libssl1.0-dev zlib1g-dev libpcre3-dev libpam-dev \
     openssh-server python-setuptools netcat-traditional \
     python-epydoc graphviz bzip2 less sudo virtualenv \
     libpython-dev fuse libfuse-dev python-pip python-yaml \
index d2585a6666c270de61514d007257d648eecd2287..efa2e08a7a7f34c3a04ee4c213931ed37a4f65ab 100755 (executable)
@@ -7,34 +7,14 @@ exec 2>&1
 set -ex -o pipefail
 
 . /usr/local/lib/arvbox/common.sh
-
-if test -s /var/lib/arvados/api_rails_env ; then
-  RAILS_ENV=$(cat /var/lib/arvados/api_rails_env)
-else
-  RAILS_ENV=development
-fi
-
 . /usr/local/lib/arvbox/go-setup.sh
 
-flock /var/lib/gopath/gopath.lock go install "git.arvados.org/arvados.git/services/ws"
-install $GOPATH/bin/ws /usr/local/bin/arvados-ws
+(cd /usr/local/bin && ln -sf arvados-server arvados-ws)
 
 if test "$1" = "--only-deps" ; then
     exit
 fi
 
-database_pw=$(cat /var/lib/arvados/api_database_pw)
-
-cat >/var/lib/arvados/arvados-ws.yml <<EOF
-Client:
-  APIHost: $localip:${services[controller-ssl]}
-  Insecure: false
-Postgres:
-  dbname: arvados_$RAILS_ENV
-  user: arvados
-  password: $database_pw
-  host: localhost
-Listen: localhost:${services[websockets]}
-EOF
+/usr/local/lib/arvbox/runsu.sh flock /var/lib/arvados/cluster_config.yml.lock /usr/local/lib/arvbox/cluster-config.sh
 
-exec /usr/local/bin/arvados-ws -config /var/lib/arvados/arvados-ws.yml
+exec /usr/local/lib/arvbox/runsu.sh /usr/local/bin/arvados-ws