15954: Merge branch 'master'
authorTom Clegg <tom@tomclegg.ca>
Thu, 27 Feb 2020 14:25:20 +0000 (09:25 -0500)
committerTom Clegg <tom@tomclegg.ca>
Thu, 27 Feb 2020 14:25:20 +0000 (09:25 -0500)
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom@tomclegg.ca>

32 files changed:
apps/workbench/config/application.rb
build/run-tests.sh
cmd/arvados-server/cmd.go
doc/examples/config/zzzzz.yml [new file with mode: 0644]
go.mod
go.sum
lib/boot/cert.go [new file with mode: 0644]
lib/boot/cmd.go [new file with mode: 0644]
lib/boot/nginx.go [new file with mode: 0644]
lib/boot/passenger.go [new file with mode: 0644]
lib/boot/postgresql.go [new file with mode: 0644]
lib/boot/seed.go [new file with mode: 0644]
lib/boot/service.go [new file with mode: 0644]
lib/boot/supervisor.go [new file with mode: 0644]
lib/config/config.default.yml
lib/config/generated_config.go
lib/controller/federation/conn.go
lib/controller/integration_test.go [new file with mode: 0644]
lib/service/cmd.go
lib/service/log.go [new file with mode: 0644]
sdk/go/arvados/collection.go
sdk/go/auth/auth.go
sdk/go/ctxlog/log.go
sdk/go/health/aggregator.go
sdk/python/tests/nginx.conf
sdk/python/tests/run_test_server.py
services/api/app/controllers/arvados/v1/users_controller.rb
services/api/app/models/api_client_authorization.rb
services/api/config/application.default.yml
services/api/config/application.rb
services/api/lib/config_loader.rb
services/login-sync/Gemfile.lock

index 514d57196d3fcc802dbc83a640907bbf166ceb76..2d331c70ad3a13941f933ab09b054fcdf4cd504e 100644 (file)
@@ -19,6 +19,10 @@ require "rails/test_unit/railtie"
 
 Bundler.require(:default, Rails.env)
 
+if Rails.env == 'test'
+  Rails.logger = ActiveSupport::TaggedLogging.new(Logger.new(STDOUT))
+end
+
 module ArvadosWorkbench
   class Application < Rails::Application
 
index 891faca41944469188afebb53f46291a14639e7c..fff095e6e62e0922917b6ddee9ecd3160a73447f 100755 (executable)
@@ -393,7 +393,7 @@ checkpidfile() {
 
 checkhealth() {
     svc="$1"
-    base=$(python -c "import yaml; print list(yaml.safe_load(file('$ARVADOS_CONFIG'))['Clusters']['zzzzz']['Services']['$1']['InternalURLs'].keys())[0]")
+    base=$("${VENVDIR}/bin/python" -c "import yaml; print list(yaml.safe_load(file('$ARVADOS_CONFIG'))['Clusters']['zzzzz']['Services']['$1']['InternalURLs'].keys())[0]")
     url="$base/_health/ping"
     if ! curl -Ss -H "Authorization: Bearer e687950a23c3a9bceec28c6223a06c79" "${url}" | tee -a /dev/stderr | grep '"OK"'; then
         echo "${url} failed"
@@ -555,7 +555,7 @@ setup_ruby_environment() {
             export HOME=$GEMHOME
             ("$bundle" version | grep -q 2.0.2) \
                 || gem install --user bundler -v 2.0.2
-            "$bundle" version | grep 2.0.2
+            "$bundle" version | tee /dev/stderr | grep -q 'version 2'
         ) || fatal 'install bundler'
     fi
 }
@@ -648,8 +648,13 @@ install_env() {
     . "$VENVDIR/bin/activate"
 
     # Needed for run_test_server.py which is used by certain (non-Python) tests.
-    pip install --no-cache-dir PyYAML future httplib2 \
-        || fatal "`pip install PyYAML future httplib2` failed"
+    (
+        set -e
+        "${VENVDIR}/bin/pip" install PyYAML
+        "${VENV3DIR}/bin/pip" install PyYAML
+        cd "$WORKSPACE/sdk/python"
+        python setup.py install
+    ) || fatal "installing PyYAML and sdk/python failed"
 
     # Preinstall libcloud if using a fork; otherwise nodemanager "pip
     # install" won't pick it up by default.
@@ -890,7 +895,7 @@ bundle_install_trylocal() {
             echo "(Running bundle install again, without --local.)"
             "$bundle" install --no-deployment
         fi
-        "$bundle" package --all
+        "$bundle" package
     )
 }
 
@@ -937,6 +942,7 @@ install_services/login-sync() {
 
 install_services/api() {
     stop_services
+    check_arvados_config "services/api"
     cd "$WORKSPACE/services/api" \
         && RAILS_ENV=test bundle_install_trylocal \
             || return 1
@@ -948,7 +954,7 @@ install_services/api() {
     # database, so that we can drop it. This assumes the current user
     # is a postgresql superuser.
     cd "$WORKSPACE/services/api" \
-        && test_database=$(python -c "import yaml; print yaml.safe_load(file('$ARVADOS_CONFIG'))['Clusters']['zzzzz']['PostgreSQL']['Connection']['dbname']") \
+        && test_database=$("${VENVDIR}/bin/python" -c "import yaml; print yaml.safe_load(file('$ARVADOS_CONFIG'))['Clusters']['zzzzz']['PostgreSQL']['Connection']['dbname']") \
         && psql "$test_database" -c "SELECT pg_terminate_backend (pg_stat_activity.pid::int) FROM pg_stat_activity WHERE pg_stat_activity.datname = '$test_database';" 2>/dev/null
 
     mkdir -p "$WORKSPACE/services/api/tmp/pids"
@@ -972,15 +978,16 @@ install_services/api() {
         && git --git-dir internal.git init \
             || return 1
 
-
-    (cd "$WORKSPACE/services/api"
-     export RAILS_ENV=test
-     if "$bundle" exec rails db:environment:set ; then
-        "$bundle" exec rake db:drop
-     fi
-     "$bundle" exec rake db:setup \
-        && "$bundle" exec rake db:fixtures:load
-    )
+    (
+        set -e
+        cd "$WORKSPACE/services/api"
+        export RAILS_ENV=test
+        if "$bundle" exec rails db:environment:set ; then
+            "$bundle" exec rake db:drop
+        fi
+        "$bundle" exec rake db:setup
+        "$bundle" exec rake db:fixtures:load
+    ) || return 1
 }
 
 declare -a pythonstuff
@@ -1100,7 +1107,7 @@ install_deps() {
     do_install sdk/cli
     do_install sdk/perl
     do_install sdk/python pip
-    do_install sdk/python pip3
+    do_install sdk/python pip "${VENV3DIR}/bin/"
     do_install sdk/ruby
     do_install services/api
     do_install services/arv-git-httpd go
index d93a8e78fd3f216788584872a7d1bd3f3fd675d2..a9d927d8734401f76fa173bff7214e0038fc4c68 100644 (file)
@@ -7,6 +7,7 @@ package main
 import (
        "os"
 
+       "git.arvados.org/arvados.git/lib/boot"
        "git.arvados.org/arvados.git/lib/cloud/cloudtest"
        "git.arvados.org/arvados.git/lib/cmd"
        "git.arvados.org/arvados.git/lib/config"
@@ -21,6 +22,7 @@ var (
                "-version":  cmd.Version,
                "--version": cmd.Version,
 
+               "boot":            boot.Command,
                "cloudtest":       cloudtest.Command,
                "config-check":    config.CheckCommand,
                "config-dump":     config.DumpCommand,
diff --git a/doc/examples/config/zzzzz.yml b/doc/examples/config/zzzzz.yml
new file mode 100644 (file)
index 0000000..c63550e
--- /dev/null
@@ -0,0 +1,12 @@
+Clusters:
+  zzzzz:
+    ManagementToken: e687950a23c3a9bceec28c6223a06c79
+    SystemRootToken: systemusertesttoken1234567890aoeuidhtnsqjkxbmwvzpy
+    API:
+      RequestTimeout: 30s
+    TLS:
+      Insecure: true
+    Collections:
+      BlobSigningKey: zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc
+      TrustAllContent: true
+      ForwardSlashNameSubstitution: /
diff --git a/go.mod b/go.mod
index 2f1852734099cb2238c4ba6a70baa477fad3b3a8..9a139448a6118ae73e2a423c2b0d92aabb622622 100644 (file)
--- a/go.mod
+++ b/go.mod
@@ -33,7 +33,7 @@ require (
        github.com/julienschmidt/httprouter v1.2.0
        github.com/karalabe/xgo v0.0.0-20191115072854-c5ccff8648a7 // indirect
        github.com/kevinburke/ssh_config v0.0.0-20171013211458-802051befeb5 // indirect
-       github.com/lib/pq v0.0.0-20171126050459-83612a56d3dd
+       github.com/lib/pq v1.3.0
        github.com/marstr/guid v1.1.1-0.20170427235115-8bdf7d1a087c // indirect
        github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747 // indirect
        github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
@@ -49,8 +49,8 @@ require (
        github.com/src-d/gcfg v1.3.0 // indirect
        github.com/stretchr/testify v1.4.0 // indirect
        github.com/xanzy/ssh-agent v0.1.0 // indirect
-       golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
-       golang.org/x/net v0.0.0-20190613194153-d28f0bde5980
+       golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
+       golang.org/x/net v0.0.0-20190620200207-3b0461eec859
        golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
        golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd // indirect
        google.golang.org/api v0.13.0
diff --git a/go.sum b/go.sum
index 0a543fde905eb15567464ee84e74aa3629f67d3c..48b23d79653d22e47b2e595682e1e06196d1d99a 100644 (file)
--- a/go.sum
+++ b/go.sum
@@ -111,8 +111,8 @@ github.com/kevinburke/ssh_config v0.0.0-20171013211458-802051befeb5/go.mod h1:CT
 github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
 github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
 github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/lib/pq v0.0.0-20171126050459-83612a56d3dd h1:2RDaVc4/izhWyAvYxNm8c9saSyCDIxefNwOcqaH7pcU=
-github.com/lib/pq v0.0.0-20171126050459-83612a56d3dd/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU=
+github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
 github.com/marstr/guid v1.1.1-0.20170427235115-8bdf7d1a087c h1:ouxemItv3B/Zh008HJkEXDYCN3BIRyNHxtUN7ThJ5Js=
 github.com/marstr/guid v1.1.1-0.20170427235115-8bdf7d1a087c/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
 github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
@@ -174,6 +174,8 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
 golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@@ -186,10 +188,13 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r
 golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw=
 golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU=
 golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
@@ -203,6 +208,7 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h
 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -218,6 +224,7 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm
 golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
 golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c h1:97SnQk1GYRXJgvwZ8fadnxDOWfKvkNQHH3CtZntPSrM=
 golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
 google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
 google.golang.org/api v0.13.0 h1:Q3Ui3V3/CVinFWFiW39Iw0kMuVrRzYX0wN6OPFp0lTA=
diff --git a/lib/boot/cert.go b/lib/boot/cert.go
new file mode 100644 (file)
index 0000000..508605b
--- /dev/null
@@ -0,0 +1,64 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package boot
+
+import (
+       "context"
+       "io/ioutil"
+       "path/filepath"
+)
+
+// Create a root CA key and use it to make a new server
+// certificate+key pair.
+//
+// In future we'll make one root CA key per host instead of one per
+// cluster, so it only needs to be imported to a browser once for
+// ongoing dev/test usage.
+type createCertificates struct{}
+
+func (createCertificates) String() string {
+       return "certificates"
+}
+
+func (createCertificates) Run(ctx context.Context, fail func(error), super *Supervisor) error {
+       // Generate root key
+       err := super.RunProgram(ctx, super.tempdir, nil, nil, "openssl", "genrsa", "-out", "rootCA.key", "4096")
+       if err != nil {
+               return err
+       }
+       // Generate a self-signed root certificate
+       err = super.RunProgram(ctx, super.tempdir, nil, nil, "openssl", "req", "-x509", "-new", "-nodes", "-key", "rootCA.key", "-sha256", "-days", "3650", "-out", "rootCA.crt", "-subj", "/C=US/ST=MA/O=Example Org/CN=localhost")
+       if err != nil {
+               return err
+       }
+       // Generate server key
+       err = super.RunProgram(ctx, super.tempdir, nil, nil, "openssl", "genrsa", "-out", "server.key", "2048")
+       if err != nil {
+               return err
+       }
+       // Build config file for signing request
+       defaultconf, err := ioutil.ReadFile("/etc/ssl/openssl.cnf")
+       if err != nil {
+               return err
+       }
+       err = ioutil.WriteFile(filepath.Join(super.tempdir, "server.cfg"), append(defaultconf, []byte(`
+[SAN]
+subjectAltName=DNS:localhost,DNS:localhost.localdomain
+`)...), 0777)
+       if err != nil {
+               return err
+       }
+       // Generate signing request
+       err = super.RunProgram(ctx, super.tempdir, nil, nil, "openssl", "req", "-new", "-sha256", "-key", "server.key", "-subj", "/C=US/ST=MA/O=Example Org/CN=localhost", "-reqexts", "SAN", "-config", "server.cfg", "-out", "server.csr")
+       if err != nil {
+               return err
+       }
+       // Sign certificate
+       err = super.RunProgram(ctx, super.tempdir, nil, nil, "openssl", "x509", "-req", "-in", "server.csr", "-CA", "rootCA.crt", "-CAkey", "rootCA.key", "-CAcreateserial", "-out", "server.crt", "-days", "3650", "-sha256")
+       if err != nil {
+               return err
+       }
+       return nil
+}
diff --git a/lib/boot/cmd.go b/lib/boot/cmd.go
new file mode 100644 (file)
index 0000000..60955df
--- /dev/null
@@ -0,0 +1,88 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package boot
+
+import (
+       "context"
+       "flag"
+       "fmt"
+       "io"
+
+       "git.arvados.org/arvados.git/lib/cmd"
+       "git.arvados.org/arvados.git/lib/config"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+)
+
+var Command cmd.Handler = bootCommand{}
+
+type supervisedTask interface {
+       // Execute the task. Run should return nil when the task is
+       // done enough to satisfy a dependency relationship (e.g., the
+       // service is running and ready). If the task starts a
+       // goroutine that fails after Run returns (e.g., the service
+       // shuts down), it should call cancel.
+       Run(ctx context.Context, fail func(error), super *Supervisor) error
+       String() string
+}
+
+type bootCommand struct{}
+
+func (bootCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+       super := &Supervisor{
+               Stderr: stderr,
+               logger: ctxlog.New(stderr, "json", "info"),
+       }
+
+       ctx := ctxlog.Context(context.Background(), super.logger)
+       ctx, cancel := context.WithCancel(ctx)
+       defer cancel()
+
+       var err error
+       defer func() {
+               if err != nil {
+                       super.logger.WithError(err).Info("exiting")
+               }
+       }()
+
+       flags := flag.NewFlagSet(prog, flag.ContinueOnError)
+       flags.SetOutput(stderr)
+       loader := config.NewLoader(stdin, super.logger)
+       loader.SetupFlags(flags)
+       versionFlag := flags.Bool("version", false, "Write version information to stdout and exit 0")
+       flags.StringVar(&super.SourcePath, "source", ".", "arvados source tree `directory`")
+       flags.StringVar(&super.ClusterType, "type", "production", "cluster `type`: development, test, or production")
+       flags.StringVar(&super.ListenHost, "listen-host", "localhost", "host name or interface address for service listeners")
+       flags.StringVar(&super.ControllerAddr, "controller-address", ":0", "desired controller address, `host:port` or `:port`")
+       flags.BoolVar(&super.OwnTemporaryDatabase, "own-temporary-database", false, "bring up a postgres server and create a temporary database")
+       err = flags.Parse(args)
+       if err == flag.ErrHelp {
+               err = nil
+               return 0
+       } else if err != nil {
+               return 2
+       } else if *versionFlag {
+               return cmd.Version.RunCommand(prog, args, stdin, stdout, stderr)
+       } else if super.ClusterType != "development" && super.ClusterType != "test" && super.ClusterType != "production" {
+               err = fmt.Errorf("cluster type must be 'development', 'test', or 'production'")
+               return 2
+       }
+
+       loader.SkipAPICalls = true
+       cfg, err := loader.Load()
+       if err != nil {
+               return 1
+       }
+
+       super.Start(ctx, cfg)
+       defer super.Stop()
+       if url, ok := super.WaitReady(); ok {
+               fmt.Fprintln(stdout, url)
+               // Wait for signal/crash + orderly shutdown
+               <-super.done
+               return 0
+       } else {
+               return 1
+       }
+}
diff --git a/lib/boot/nginx.go b/lib/boot/nginx.go
new file mode 100644 (file)
index 0000000..6b2d677
--- /dev/null
@@ -0,0 +1,101 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package boot
+
+import (
+       "context"
+       "fmt"
+       "io/ioutil"
+       "net"
+       "os"
+       "os/exec"
+       "path/filepath"
+       "regexp"
+
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+)
+
+// Run an Nginx process that proxies the supervisor's configured
+// ExternalURLs to the appropriate InternalURLs.
+type runNginx struct{}
+
+func (runNginx) String() string {
+       return "nginx"
+}
+
+func (runNginx) Run(ctx context.Context, fail func(error), super *Supervisor) error {
+       vars := map[string]string{
+               "LISTENHOST": super.ListenHost,
+               "SSLCERT":    filepath.Join(super.SourcePath, "services", "api", "tmp", "self-signed.pem"), // TODO: root ca
+               "SSLKEY":     filepath.Join(super.SourcePath, "services", "api", "tmp", "self-signed.key"), // TODO: root ca
+               "ACCESSLOG":  filepath.Join(super.tempdir, "nginx_access.log"),
+               "ERRORLOG":   filepath.Join(super.tempdir, "nginx_error.log"),
+               "TMPDIR":     super.tempdir,
+       }
+       var err error
+       for _, cmpt := range []struct {
+               varname string
+               svc     arvados.Service
+       }{
+               {"CONTROLLER", super.cluster.Services.Controller},
+               {"KEEPWEB", super.cluster.Services.WebDAV},
+               {"KEEPWEBDL", super.cluster.Services.WebDAVDownload},
+               {"KEEPPROXY", super.cluster.Services.Keepproxy},
+               {"GIT", super.cluster.Services.GitHTTP},
+               {"WORKBENCH1", super.cluster.Services.Workbench1},
+               {"WS", super.cluster.Services.Websocket},
+       } {
+               port, err := internalPort(cmpt.svc)
+               if err != nil {
+                       return fmt.Errorf("%s internal port: %s (%v)", cmpt.varname, err, cmpt.svc)
+               }
+               if ok, err := addrIsLocal(net.JoinHostPort(super.ListenHost, port)); !ok || err != nil {
+                       return fmt.Errorf("urlIsLocal() failed for host %q port %q: %v", super.ListenHost, port, err)
+               }
+               vars[cmpt.varname+"PORT"] = port
+
+               port, err = externalPort(cmpt.svc)
+               if err != nil {
+                       return fmt.Errorf("%s external port: %s (%v)", cmpt.varname, err, cmpt.svc)
+               }
+               if ok, err := addrIsLocal(net.JoinHostPort(super.ListenHost, port)); !ok || err != nil {
+                       return fmt.Errorf("urlIsLocal() failed for host %q port %q: %v", super.ListenHost, port, err)
+               }
+               vars[cmpt.varname+"SSLPORT"] = port
+       }
+       tmpl, err := ioutil.ReadFile(filepath.Join(super.SourcePath, "sdk", "python", "tests", "nginx.conf"))
+       if err != nil {
+               return err
+       }
+       conf := regexp.MustCompile(`{{.*?}}`).ReplaceAllStringFunc(string(tmpl), func(src string) string {
+               if len(src) < 4 {
+                       return src
+               }
+               return vars[src[2:len(src)-2]]
+       })
+       conffile := filepath.Join(super.tempdir, "nginx.conf")
+       err = ioutil.WriteFile(conffile, []byte(conf), 0755)
+       if err != nil {
+               return err
+       }
+       nginx := "nginx"
+       if _, err := exec.LookPath(nginx); err != nil {
+               for _, dir := range []string{"/sbin", "/usr/sbin", "/usr/local/sbin"} {
+                       if _, err = os.Stat(dir + "/nginx"); err == nil {
+                               nginx = dir + "/nginx"
+                               break
+                       }
+               }
+       }
+       super.waitShutdown.Add(1)
+       go func() {
+               defer super.waitShutdown.Done()
+               fail(super.RunProgram(ctx, ".", nil, nil, nginx,
+                       "-g", "error_log stderr info;",
+                       "-g", "pid "+filepath.Join(super.tempdir, "nginx.pid")+";",
+                       "-c", conffile))
+       }()
+       return waitForConnect(ctx, super.cluster.Services.Controller.ExternalURL.Host)
+}
diff --git a/lib/boot/passenger.go b/lib/boot/passenger.go
new file mode 100644 (file)
index 0000000..7ebb36e
--- /dev/null
@@ -0,0 +1,120 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package boot
+
+import (
+       "bytes"
+       "context"
+       "fmt"
+       "os"
+       "path/filepath"
+       "strings"
+       "sync"
+
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+)
+
+// Don't trust "passenger-config" (or "bundle install") to handle
+// concurrent installs.
+var passengerInstallMutex sync.Mutex
+
+// Install a Rails application's dependencies, including phusion
+// passenger.
+type installPassenger struct {
+       src     string
+       depends []supervisedTask
+}
+
+func (runner installPassenger) String() string {
+       return "installPassenger:" + runner.src
+}
+
+func (runner installPassenger) Run(ctx context.Context, fail func(error), super *Supervisor) error {
+       err := super.wait(ctx, runner.depends...)
+       if err != nil {
+               return err
+       }
+
+       passengerInstallMutex.Lock()
+       defer passengerInstallMutex.Unlock()
+
+       var buf bytes.Buffer
+       err = super.RunProgram(ctx, runner.src, &buf, nil, "gem", "list", "--details", "bundler")
+       if err != nil {
+               return err
+       }
+       for _, version := range []string{"1.11.0", "1.17.3", "2.0.2"} {
+               if !strings.Contains(buf.String(), "("+version+")") {
+                       err = super.RunProgram(ctx, runner.src, nil, nil, "gem", "install", "--user", "bundler:1.11", "bundler:1.17.3", "bundler:2.0.2")
+                       if err != nil {
+                               return err
+                       }
+                       break
+               }
+       }
+       err = super.RunProgram(ctx, runner.src, nil, nil, "bundle", "install", "--jobs", "4", "--path", filepath.Join(os.Getenv("HOME"), ".gem"))
+       if err != nil {
+               return err
+       }
+       err = super.RunProgram(ctx, runner.src, nil, nil, "bundle", "exec", "passenger-config", "build-native-support")
+       if err != nil {
+               return err
+       }
+       err = super.RunProgram(ctx, runner.src, nil, nil, "bundle", "exec", "passenger-config", "install-standalone-runtime")
+       if err != nil {
+               return err
+       }
+       err = super.RunProgram(ctx, runner.src, nil, nil, "bundle", "exec", "passenger-config", "validate-install")
+       if err != nil {
+               return err
+       }
+       return nil
+}
+
+type runPassenger struct {
+       src     string
+       svc     arvados.Service
+       depends []supervisedTask
+}
+
+func (runner runPassenger) String() string {
+       return "runPassenger:" + runner.src
+}
+
+func (runner runPassenger) Run(ctx context.Context, fail func(error), super *Supervisor) error {
+       err := super.wait(ctx, runner.depends...)
+       if err != nil {
+               return err
+       }
+       port, err := internalPort(runner.svc)
+       if err != nil {
+               return fmt.Errorf("bug: no internalPort for %q: %v (%#v)", runner, err, runner.svc)
+       }
+       loglevel := "4"
+       if lvl, ok := map[string]string{
+               "debug":   "5",
+               "info":    "4",
+               "warn":    "2",
+               "warning": "2",
+               "error":   "1",
+               "fatal":   "0",
+               "panic":   "0",
+       }[super.cluster.SystemLogs.LogLevel]; ok {
+               loglevel = lvl
+       }
+       super.waitShutdown.Add(1)
+       go func() {
+               defer super.waitShutdown.Done()
+               err = super.RunProgram(ctx, runner.src, nil, nil, "bundle", "exec",
+                       "passenger", "start",
+                       "-p", port,
+                       "--log-file", "/dev/stderr",
+                       "--log-level", loglevel,
+                       "--no-friendly-error-pages",
+                       "--pid-file", filepath.Join(super.tempdir, "passenger."+strings.Replace(runner.src, "/", "_", -1)+".pid"))
+               fail(err)
+       }()
+       return nil
+}
diff --git a/lib/boot/postgresql.go b/lib/boot/postgresql.go
new file mode 100644 (file)
index 0000000..df98904
--- /dev/null
@@ -0,0 +1,104 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package boot
+
+import (
+       "bytes"
+       "context"
+       "database/sql"
+       "fmt"
+       "os"
+       "os/exec"
+       "path/filepath"
+       "strings"
+       "time"
+
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "github.com/lib/pq"
+)
+
+// Run a postgresql server in a private data directory. Set up a db
+// user, database, and TCP listener that match the supervisor's
+// configured database connection info.
+type runPostgreSQL struct{}
+
+func (runPostgreSQL) String() string {
+       return "postgresql"
+}
+
+func (runPostgreSQL) Run(ctx context.Context, fail func(error), super *Supervisor) error {
+       err := super.wait(ctx, createCertificates{})
+       if err != nil {
+               return err
+       }
+
+       buf := bytes.NewBuffer(nil)
+       err = super.RunProgram(ctx, super.tempdir, buf, nil, "pg_config", "--bindir")
+       if err != nil {
+               return err
+       }
+       bindir := strings.TrimSpace(buf.String())
+
+       datadir := filepath.Join(super.tempdir, "pgdata")
+       err = os.Mkdir(datadir, 0755)
+       if err != nil {
+               return err
+       }
+       err = super.RunProgram(ctx, super.tempdir, nil, nil, filepath.Join(bindir, "initdb"), "-D", datadir)
+       if err != nil {
+               return err
+       }
+
+       err = super.RunProgram(ctx, super.tempdir, nil, nil, "cp", "server.crt", "server.key", datadir)
+       if err != nil {
+               return err
+       }
+
+       port := super.cluster.PostgreSQL.Connection["port"]
+
+       super.waitShutdown.Add(1)
+       go func() {
+               defer super.waitShutdown.Done()
+               fail(super.RunProgram(ctx, super.tempdir, nil, nil, filepath.Join(bindir, "postgres"),
+                       "-l",          // enable ssl
+                       "-D", datadir, // data dir
+                       "-k", datadir, // socket dir
+                       "-p", super.cluster.PostgreSQL.Connection["port"],
+               ))
+       }()
+
+       for {
+               if ctx.Err() != nil {
+                       return ctx.Err()
+               }
+               if exec.CommandContext(ctx, "pg_isready", "--timeout=10", "--host="+super.cluster.PostgreSQL.Connection["host"], "--port="+port).Run() == nil {
+                       break
+               }
+               time.Sleep(time.Second / 2)
+       }
+       db, err := sql.Open("postgres", arvados.PostgreSQLConnection{
+               "host":   datadir,
+               "port":   port,
+               "dbname": "postgres",
+       }.String())
+       if err != nil {
+               return fmt.Errorf("db open failed: %s", err)
+       }
+       defer db.Close()
+       conn, err := db.Conn(ctx)
+       if err != nil {
+               return fmt.Errorf("db conn failed: %s", err)
+       }
+       defer conn.Close()
+       _, err = conn.ExecContext(ctx, `CREATE USER `+pq.QuoteIdentifier(super.cluster.PostgreSQL.Connection["user"])+` WITH SUPERUSER ENCRYPTED PASSWORD `+pq.QuoteLiteral(super.cluster.PostgreSQL.Connection["password"]))
+       if err != nil {
+               return fmt.Errorf("createuser failed: %s", err)
+       }
+       _, err = conn.ExecContext(ctx, `CREATE DATABASE `+pq.QuoteIdentifier(super.cluster.PostgreSQL.Connection["dbname"]))
+       if err != nil {
+               return fmt.Errorf("createdb failed: %s", err)
+       }
+       return nil
+}
diff --git a/lib/boot/seed.go b/lib/boot/seed.go
new file mode 100644 (file)
index 0000000..650c836
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package boot
+
+import (
+       "context"
+)
+
+// Populate a blank database with arvados tables and seed rows.
+type seedDatabase struct{}
+
+func (seedDatabase) String() string {
+       return "seedDatabase"
+}
+
+func (seedDatabase) Run(ctx context.Context, fail func(error), super *Supervisor) error {
+       err := super.wait(ctx, runPostgreSQL{}, installPassenger{src: "services/api"})
+       if err != nil {
+               return err
+       }
+       err = super.RunProgram(ctx, "services/api", nil, nil, "bundle", "exec", "rake", "db:setup")
+       if err != nil {
+               return err
+       }
+       return nil
+}
diff --git a/lib/boot/service.go b/lib/boot/service.go
new file mode 100644 (file)
index 0000000..5afacfe
--- /dev/null
@@ -0,0 +1,100 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package boot
+
+import (
+       "context"
+       "errors"
+       "path/filepath"
+
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+)
+
+// Run a service using the arvados-server binary.
+//
+// In future this will bring up the service in the current process,
+// but for now (at least until the subcommand handlers get a shutdown
+// mechanism) it starts a child process using the arvados-server
+// binary, which the supervisor is assumed to have installed in
+// {super.tempdir}/bin/.
+type runServiceCommand struct {
+       name    string           // arvados-server subcommand, e.g., "controller"
+       svc     arvados.Service  // cluster.Services.* entry with the desired InternalURLs
+       depends []supervisedTask // wait for these tasks before starting
+}
+
+func (runner runServiceCommand) String() string {
+       return runner.name
+}
+
+func (runner runServiceCommand) Run(ctx context.Context, fail func(error), super *Supervisor) error {
+       binfile := filepath.Join(super.tempdir, "bin", "arvados-server")
+       err := super.RunProgram(ctx, super.tempdir, nil, nil, binfile, "-version")
+       if err != nil {
+               return err
+       }
+       super.wait(ctx, runner.depends...)
+       for u := range runner.svc.InternalURLs {
+               u := u
+               if islocal, err := addrIsLocal(u.Host); err != nil {
+                       return err
+               } else if !islocal {
+                       continue
+               }
+               super.waitShutdown.Add(1)
+               go func() {
+                       defer super.waitShutdown.Done()
+                       fail(super.RunProgram(ctx, super.tempdir, nil, []string{"ARVADOS_SERVICE_INTERNAL_URL=" + u.String()}, binfile, runner.name, "-config", super.configfile))
+               }()
+       }
+       return nil
+}
+
+// Run a Go service that isn't bundled in arvados-server.
+type runGoProgram struct {
+       src     string           // source dir, e.g., "services/keepproxy"
+       svc     arvados.Service  // cluster.Services.* entry with the desired InternalURLs
+       depends []supervisedTask // wait for these tasks before starting
+}
+
+func (runner runGoProgram) String() string {
+       _, basename := filepath.Split(runner.src)
+       return basename
+}
+
+func (runner runGoProgram) Run(ctx context.Context, fail func(error), super *Supervisor) error {
+       if len(runner.svc.InternalURLs) == 0 {
+               return errors.New("bug: runGoProgram needs non-empty svc.InternalURLs")
+       }
+
+       binfile, err := super.installGoProgram(ctx, runner.src)
+       if err != nil {
+               return err
+       }
+       if ctx.Err() != nil {
+               return ctx.Err()
+       }
+
+       err = super.RunProgram(ctx, super.tempdir, nil, nil, binfile, "-version")
+       if err != nil {
+               return err
+       }
+
+       super.wait(ctx, runner.depends...)
+       for u := range runner.svc.InternalURLs {
+               u := u
+               if islocal, err := addrIsLocal(u.Host); err != nil {
+                       return err
+               } else if !islocal {
+                       continue
+               }
+               super.waitShutdown.Add(1)
+               go func() {
+                       defer super.waitShutdown.Done()
+                       fail(super.RunProgram(ctx, super.tempdir, nil, []string{"ARVADOS_SERVICE_INTERNAL_URL=" + u.String()}, binfile))
+               }()
+       }
+       return nil
+}
diff --git a/lib/boot/supervisor.go b/lib/boot/supervisor.go
new file mode 100644 (file)
index 0000000..e10fa23
--- /dev/null
@@ -0,0 +1,695 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package boot
+
+import (
+       "bytes"
+       "context"
+       "crypto/rand"
+       "encoding/json"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net"
+       "os"
+       "os/exec"
+       "os/signal"
+       "os/user"
+       "path/filepath"
+       "strings"
+       "sync"
+       "syscall"
+       "time"
+
+       "git.arvados.org/arvados.git/lib/service"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "git.arvados.org/arvados.git/sdk/go/health"
+       "github.com/sirupsen/logrus"
+)
+
+type Supervisor struct {
+       SourcePath           string // e.g., /home/username/src/arvados
+       SourceVersion        string // e.g., acbd1324...
+       ClusterType          string // e.g., production
+       ListenHost           string // e.g., localhost
+       ControllerAddr       string // e.g., 127.0.0.1:8000
+       OwnTemporaryDatabase bool
+       Stderr               io.Writer
+
+       logger  logrus.FieldLogger
+       cluster *arvados.Cluster
+
+       ctx           context.Context
+       cancel        context.CancelFunc
+       done          chan struct{}
+       healthChecker *health.Aggregator
+       tasksReady    map[string]chan bool
+       waitShutdown  sync.WaitGroup
+
+       tempdir    string
+       configfile string
+       environ    []string // for child processes
+}
+
+func (super *Supervisor) Start(ctx context.Context, cfg *arvados.Config) {
+       super.ctx, super.cancel = context.WithCancel(ctx)
+       super.done = make(chan struct{})
+
+       go func() {
+               sigch := make(chan os.Signal)
+               signal.Notify(sigch, syscall.SIGINT, syscall.SIGTERM)
+               defer signal.Stop(sigch)
+               go func() {
+                       for sig := range sigch {
+                               super.logger.WithField("signal", sig).Info("caught signal")
+                               super.cancel()
+                       }
+               }()
+
+               err := super.run(cfg)
+               if err != nil {
+                       fmt.Fprintln(super.Stderr, err)
+               }
+               close(super.done)
+       }()
+}
+
+func (super *Supervisor) run(cfg *arvados.Config) error {
+       cwd, err := os.Getwd()
+       if err != nil {
+               return err
+       }
+       if !strings.HasPrefix(super.SourcePath, "/") {
+               super.SourcePath = filepath.Join(cwd, super.SourcePath)
+       }
+       super.SourcePath, err = filepath.EvalSymlinks(super.SourcePath)
+       if err != nil {
+               return err
+       }
+
+       super.tempdir, err = ioutil.TempDir("", "arvados-server-boot-")
+       if err != nil {
+               return err
+       }
+       defer os.RemoveAll(super.tempdir)
+       if err := os.Mkdir(filepath.Join(super.tempdir, "bin"), 0777); err != nil {
+               return err
+       }
+
+       // Fill in any missing config keys, and write the resulting
+       // config in the temp dir for child services to use.
+       err = super.autofillConfig(cfg, super.logger)
+       if err != nil {
+               return err
+       }
+       conffile, err := os.OpenFile(filepath.Join(super.tempdir, "config.yml"), os.O_CREATE|os.O_WRONLY, 0777)
+       if err != nil {
+               return err
+       }
+       defer conffile.Close()
+       err = json.NewEncoder(conffile).Encode(cfg)
+       if err != nil {
+               return err
+       }
+       err = conffile.Close()
+       if err != nil {
+               return err
+       }
+       super.configfile = conffile.Name()
+
+       super.environ = os.Environ()
+       super.cleanEnv()
+       super.setEnv("ARVADOS_CONFIG", super.configfile)
+       super.setEnv("RAILS_ENV", super.ClusterType)
+       super.setEnv("TMPDIR", super.tempdir)
+       super.prependEnv("PATH", filepath.Join(super.tempdir, "bin")+":")
+
+       super.cluster, err = cfg.GetCluster("")
+       if err != nil {
+               return err
+       }
+       // Now that we have the config, replace the bootstrap logger
+       // with a new one according to the logging config.
+       loglevel := super.cluster.SystemLogs.LogLevel
+       if s := os.Getenv("ARVADOS_DEBUG"); s != "" && s != "0" {
+               loglevel = "debug"
+       }
+       super.logger = ctxlog.New(super.Stderr, super.cluster.SystemLogs.Format, loglevel).WithFields(logrus.Fields{
+               "PID": os.Getpid(),
+       })
+
+       if super.SourceVersion == "" {
+               // Find current source tree version.
+               var buf bytes.Buffer
+               err = super.RunProgram(super.ctx, ".", &buf, nil, "git", "diff", "--shortstat")
+               if err != nil {
+                       return err
+               }
+               dirty := buf.Len() > 0
+               buf.Reset()
+               err = super.RunProgram(super.ctx, ".", &buf, nil, "git", "log", "-n1", "--format=%H")
+               if err != nil {
+                       return err
+               }
+               super.SourceVersion = strings.TrimSpace(buf.String())
+               if dirty {
+                       super.SourceVersion += "+uncommitted"
+               }
+       } else {
+               return errors.New("specifying a version to run is not yet supported")
+       }
+
+       _, err = super.installGoProgram(super.ctx, "cmd/arvados-server")
+       if err != nil {
+               return err
+       }
+       err = super.setupRubyEnv()
+       if err != nil {
+               return err
+       }
+
+       tasks := []supervisedTask{
+               createCertificates{},
+               runPostgreSQL{},
+               runNginx{},
+               runServiceCommand{name: "controller", svc: super.cluster.Services.Controller, depends: []supervisedTask{runPostgreSQL{}}},
+               runGoProgram{src: "services/arv-git-httpd", svc: super.cluster.Services.GitHTTP},
+               runGoProgram{src: "services/health", svc: super.cluster.Services.Health},
+               runGoProgram{src: "services/keepproxy", svc: super.cluster.Services.Keepproxy, depends: []supervisedTask{runPassenger{src: "services/api"}}},
+               runGoProgram{src: "services/keepstore", svc: super.cluster.Services.Keepstore},
+               runGoProgram{src: "services/keep-web", svc: super.cluster.Services.WebDAV},
+               runGoProgram{src: "services/ws", svc: super.cluster.Services.Websocket, depends: []supervisedTask{runPostgreSQL{}}},
+               installPassenger{src: "services/api"},
+               runPassenger{src: "services/api", svc: super.cluster.Services.RailsAPI, depends: []supervisedTask{createCertificates{}, runPostgreSQL{}, installPassenger{src: "services/api"}}},
+               installPassenger{src: "apps/workbench", depends: []supervisedTask{installPassenger{src: "services/api"}}}, // dependency ensures workbench doesn't delay api startup
+               runPassenger{src: "apps/workbench", svc: super.cluster.Services.Workbench1, depends: []supervisedTask{installPassenger{src: "apps/workbench"}}},
+               seedDatabase{},
+       }
+       if super.ClusterType != "test" {
+               tasks = append(tasks,
+                       runServiceCommand{name: "dispatch-cloud", svc: super.cluster.Services.Controller},
+                       runGoProgram{src: "services/keep-balance"},
+               )
+       }
+       super.tasksReady = map[string]chan bool{}
+       for _, task := range tasks {
+               super.tasksReady[task.String()] = make(chan bool)
+       }
+       for _, task := range tasks {
+               task := task
+               fail := func(err error) {
+                       if super.ctx.Err() != nil {
+                               return
+                       }
+                       super.cancel()
+                       super.logger.WithField("task", task.String()).WithError(err).Error("task failed")
+               }
+               go func() {
+                       super.logger.WithField("task", task.String()).Info("starting")
+                       err := task.Run(super.ctx, fail, super)
+                       if err != nil {
+                               fail(err)
+                               return
+                       }
+                       close(super.tasksReady[task.String()])
+               }()
+       }
+       err = super.wait(super.ctx, tasks...)
+       if err != nil {
+               return err
+       }
+       super.logger.Info("all startup tasks are complete; starting health checks")
+       super.healthChecker = &health.Aggregator{Cluster: super.cluster}
+       <-super.ctx.Done()
+       super.logger.Info("shutting down")
+       super.waitShutdown.Wait()
+       return super.ctx.Err()
+}
+
+func (super *Supervisor) wait(ctx context.Context, tasks ...supervisedTask) error {
+       for _, task := range tasks {
+               ch, ok := super.tasksReady[task.String()]
+               if !ok {
+                       return fmt.Errorf("no such task: %s", task)
+               }
+               super.logger.WithField("task", task.String()).Info("waiting")
+               select {
+               case <-ch:
+                       super.logger.WithField("task", task.String()).Info("ready")
+               case <-ctx.Done():
+                       super.logger.WithField("task", task.String()).Info("task was never ready")
+                       return ctx.Err()
+               }
+       }
+       return nil
+}
+
+func (super *Supervisor) Stop() {
+       super.cancel()
+       <-super.done
+}
+
+func (super *Supervisor) WaitReady() (*arvados.URL, bool) {
+       ticker := time.NewTicker(time.Second)
+       defer ticker.Stop()
+       for waiting := "all"; waiting != ""; {
+               select {
+               case <-ticker.C:
+               case <-super.ctx.Done():
+                       return nil, false
+               }
+               if super.healthChecker == nil {
+                       // not set up yet
+                       continue
+               }
+               resp := super.healthChecker.ClusterHealth()
+               // The overall health check (resp.Health=="OK") might
+               // never pass due to missing components (like
+               // arvados-dispatch-cloud in a test cluster), so
+               // instead we wait for all configured components to
+               // pass.
+               waiting = ""
+               for target, check := range resp.Checks {
+                       if check.Health != "OK" {
+                               waiting += " " + target
+                       }
+               }
+               if waiting != "" {
+                       super.logger.WithField("targets", waiting[1:]).Info("waiting")
+               }
+       }
+       u := super.cluster.Services.Controller.ExternalURL
+       return &u, true
+}
+
+func (super *Supervisor) prependEnv(key, prepend string) {
+       for i, s := range super.environ {
+               if strings.HasPrefix(s, key+"=") {
+                       super.environ[i] = key + "=" + prepend + s[len(key)+1:]
+                       return
+               }
+       }
+       super.environ = append(super.environ, key+"="+prepend)
+}
+
+var cleanEnvPrefixes = []string{
+       "GEM_HOME=",
+       "GEM_PATH=",
+       "ARVADOS_",
+}
+
+func (super *Supervisor) cleanEnv() {
+       var cleaned []string
+       for _, s := range super.environ {
+               drop := false
+               for _, p := range cleanEnvPrefixes {
+                       if strings.HasPrefix(s, p) {
+                               drop = true
+                               break
+                       }
+               }
+               if !drop {
+                       cleaned = append(cleaned, s)
+               }
+       }
+       super.environ = cleaned
+}
+
+func (super *Supervisor) setEnv(key, val string) {
+       for i, s := range super.environ {
+               if strings.HasPrefix(s, key+"=") {
+                       super.environ[i] = key + "=" + val
+                       return
+               }
+       }
+       super.environ = append(super.environ, key+"="+val)
+}
+
+// Remove all but the first occurrence of each env var.
+func dedupEnv(in []string) []string {
+       saw := map[string]bool{}
+       var out []string
+       for _, kv := range in {
+               if split := strings.Index(kv, "="); split < 1 {
+                       panic("invalid environment var: " + kv)
+               } else if saw[kv[:split]] {
+                       continue
+               } else {
+                       saw[kv[:split]] = true
+                       out = append(out, kv)
+               }
+       }
+       return out
+}
+
+func (super *Supervisor) installGoProgram(ctx context.Context, srcpath string) (string, error) {
+       _, basename := filepath.Split(srcpath)
+       bindir := filepath.Join(super.tempdir, "bin")
+       binfile := filepath.Join(bindir, basename)
+       err := super.RunProgram(ctx, filepath.Join(super.SourcePath, srcpath), nil, []string{"GOBIN=" + bindir}, "go", "install", "-ldflags", "-X git.arvados.org/arvados.git/lib/cmd.version="+super.SourceVersion+" -X main.version="+super.SourceVersion)
+       return binfile, err
+}
+
+func (super *Supervisor) setupRubyEnv() error {
+       cmd := exec.Command("gem", "env", "gempath")
+       cmd.Env = super.environ
+       buf, err := cmd.Output() // /var/lib/arvados/.gem/ruby/2.5.0/bin:...
+       if err != nil || len(buf) == 0 {
+               return fmt.Errorf("gem env gempath: %v", err)
+       }
+       gempath := string(bytes.Split(buf, []byte{':'})[0])
+       super.prependEnv("PATH", gempath+"/bin:")
+       super.setEnv("GEM_HOME", gempath)
+       super.setEnv("GEM_PATH", gempath)
+       // Passenger install doesn't work unless $HOME is ~user
+       u, err := user.Current()
+       if err != nil {
+               return err
+       }
+       super.setEnv("HOME", u.HomeDir)
+       return nil
+}
+
+func (super *Supervisor) lookPath(prog string) string {
+       for _, val := range super.environ {
+               if strings.HasPrefix(val, "PATH=") {
+                       for _, dir := range filepath.SplitList(val[5:]) {
+                               path := filepath.Join(dir, prog)
+                               if fi, err := os.Stat(path); err == nil && fi.Mode()&0111 != 0 {
+                                       return path
+                               }
+                       }
+               }
+       }
+       return prog
+}
+
+// Run prog with args, using dir as working directory. If ctx is
+// cancelled while the child is running, RunProgram terminates the
+// child, waits for it to exit, then returns.
+//
+// Child's environment will have our env vars, plus any given in env.
+//
+// Child's stdout will be written to output if non-nil, otherwise the
+// boot command's stderr.
+func (super *Supervisor) RunProgram(ctx context.Context, dir string, output io.Writer, env []string, prog string, args ...string) error {
+       cmdline := fmt.Sprintf("%s", append([]string{prog}, args...))
+       super.logger.WithField("command", cmdline).WithField("dir", dir).Info("executing")
+
+       logprefix := strings.TrimPrefix(prog, super.tempdir+"/bin/")
+       if logprefix == "bundle" && len(args) > 2 && args[0] == "exec" {
+               logprefix = args[1]
+       } else if logprefix == "arvados-server" && len(args) > 1 {
+               logprefix = args[0]
+       }
+       if !strings.HasPrefix(dir, "/") {
+               logprefix = dir + ": " + logprefix
+       }
+
+       cmd := exec.Command(super.lookPath(prog), args...)
+       stdout, err := cmd.StdoutPipe()
+       if err != nil {
+               return err
+       }
+       stderr, err := cmd.StderrPipe()
+       if err != nil {
+               return err
+       }
+       logwriter := &service.LogPrefixer{Writer: super.Stderr, Prefix: []byte("[" + logprefix + "] ")}
+       var copiers sync.WaitGroup
+       copiers.Add(1)
+       go func() {
+               io.Copy(logwriter, stderr)
+               copiers.Done()
+       }()
+       copiers.Add(1)
+       go func() {
+               if output == nil {
+                       io.Copy(logwriter, stdout)
+               } else {
+                       io.Copy(output, stdout)
+               }
+               copiers.Done()
+       }()
+
+       if strings.HasPrefix(dir, "/") {
+               cmd.Dir = dir
+       } else {
+               cmd.Dir = filepath.Join(super.SourcePath, dir)
+       }
+       env = append([]string(nil), env...)
+       env = append(env, super.environ...)
+       cmd.Env = dedupEnv(env)
+
+       exited := false
+       defer func() { exited = true }()
+       go func() {
+               <-ctx.Done()
+               log := ctxlog.FromContext(ctx).WithFields(logrus.Fields{"dir": dir, "cmdline": cmdline})
+               for !exited {
+                       if cmd.Process == nil {
+                               log.Debug("waiting for child process to start")
+                               time.Sleep(time.Second / 2)
+                       } else {
+                               log.WithField("PID", cmd.Process.Pid).Debug("sending SIGTERM")
+                               cmd.Process.Signal(syscall.SIGTERM)
+                               time.Sleep(5 * time.Second)
+                               if !exited {
+                                       stdout.Close()
+                                       stderr.Close()
+                                       log.WithField("PID", cmd.Process.Pid).Warn("still waiting for child process to exit 5s after SIGTERM")
+                               }
+                       }
+               }
+       }()
+
+       err = cmd.Start()
+       if err != nil {
+               return err
+       }
+       copiers.Wait()
+       err = cmd.Wait()
+       if ctx.Err() != nil {
+               // Return "context canceled", instead of the "killed"
+               // error that was probably caused by the context being
+               // canceled.
+               return ctx.Err()
+       } else if err != nil {
+               return fmt.Errorf("%s: error: %v", cmdline, err)
+       }
+       return nil
+}
+
+func (super *Supervisor) autofillConfig(cfg *arvados.Config, log logrus.FieldLogger) error {
+       cluster, err := cfg.GetCluster("")
+       if err != nil {
+               return err
+       }
+       usedPort := map[string]bool{}
+       nextPort := func() string {
+               for {
+                       port, err := availablePort(super.ListenHost)
+                       if err != nil {
+                               panic(err)
+                       }
+                       if usedPort[port] {
+                               continue
+                       }
+                       usedPort[port] = true
+                       return port
+               }
+       }
+       if cluster.Services.Controller.ExternalURL.Host == "" {
+               h, p, err := net.SplitHostPort(super.ControllerAddr)
+               if err != nil {
+                       return err
+               }
+               if h == "" {
+                       h = super.ListenHost
+               }
+               if p == "0" {
+                       p, err = availablePort(h)
+                       if err != nil {
+                               return err
+                       }
+                       usedPort[p] = true
+               }
+               cluster.Services.Controller.ExternalURL = arvados.URL{Scheme: "https", Host: net.JoinHostPort(h, p)}
+       }
+       for _, svc := range []*arvados.Service{
+               &cluster.Services.Controller,
+               &cluster.Services.DispatchCloud,
+               &cluster.Services.GitHTTP,
+               &cluster.Services.Health,
+               &cluster.Services.Keepproxy,
+               &cluster.Services.Keepstore,
+               &cluster.Services.RailsAPI,
+               &cluster.Services.WebDAV,
+               &cluster.Services.WebDAVDownload,
+               &cluster.Services.Websocket,
+               &cluster.Services.Workbench1,
+       } {
+               if svc == &cluster.Services.DispatchCloud && super.ClusterType == "test" {
+                       continue
+               }
+               if svc.ExternalURL.Host == "" && (svc == &cluster.Services.Controller ||
+                       svc == &cluster.Services.GitHTTP ||
+                       svc == &cluster.Services.Keepproxy ||
+                       svc == &cluster.Services.WebDAV ||
+                       svc == &cluster.Services.WebDAVDownload ||
+                       svc == &cluster.Services.Websocket ||
+                       svc == &cluster.Services.Workbench1) {
+                       svc.ExternalURL = arvados.URL{Scheme: "https", Host: fmt.Sprintf("%s:%s", super.ListenHost, nextPort())}
+               }
+               if len(svc.InternalURLs) == 0 {
+                       svc.InternalURLs = map[arvados.URL]arvados.ServiceInstance{
+                               arvados.URL{Scheme: "http", Host: fmt.Sprintf("%s:%s", super.ListenHost, nextPort())}: arvados.ServiceInstance{},
+                       }
+               }
+       }
+       if cluster.SystemRootToken == "" {
+               cluster.SystemRootToken = randomHexString(64)
+       }
+       if cluster.ManagementToken == "" {
+               cluster.ManagementToken = randomHexString(64)
+       }
+       if cluster.API.RailsSessionSecretToken == "" {
+               cluster.API.RailsSessionSecretToken = randomHexString(64)
+       }
+       if cluster.Collections.BlobSigningKey == "" {
+               cluster.Collections.BlobSigningKey = randomHexString(64)
+       }
+       if super.ClusterType != "production" && cluster.Containers.DispatchPrivateKey == "" {
+               buf, err := ioutil.ReadFile(filepath.Join(super.SourcePath, "lib", "dispatchcloud", "test", "sshkey_dispatch"))
+               if err != nil {
+                       return err
+               }
+               cluster.Containers.DispatchPrivateKey = string(buf)
+       }
+       if super.ClusterType != "production" {
+               cluster.TLS.Insecure = true
+       }
+       if super.ClusterType == "test" {
+               // Add a second keepstore process.
+               cluster.Services.Keepstore.InternalURLs[arvados.URL{Scheme: "http", Host: fmt.Sprintf("%s:%s", super.ListenHost, nextPort())}] = arvados.ServiceInstance{}
+
+               // Create a directory-backed volume for each keepstore
+               // process.
+               cluster.Volumes = map[string]arvados.Volume{}
+               for url := range cluster.Services.Keepstore.InternalURLs {
+                       volnum := len(cluster.Volumes)
+                       datadir := fmt.Sprintf("%s/keep%d.data", super.tempdir, volnum)
+                       if _, err = os.Stat(datadir + "/."); err == nil {
+                       } else if !os.IsNotExist(err) {
+                               return err
+                       } else if err = os.Mkdir(datadir, 0777); err != nil {
+                               return err
+                       }
+                       cluster.Volumes[fmt.Sprintf(cluster.ClusterID+"-nyw5e-%015d", volnum)] = arvados.Volume{
+                               Driver:           "Directory",
+                               DriverParameters: json.RawMessage(fmt.Sprintf(`{"Root":%q}`, datadir)),
+                               AccessViaHosts: map[arvados.URL]arvados.VolumeAccess{
+                                       url: {},
+                               },
+                       }
+               }
+       }
+       if super.OwnTemporaryDatabase {
+               cluster.PostgreSQL.Connection = arvados.PostgreSQLConnection{
+                       "client_encoding": "utf8",
+                       "host":            "localhost",
+                       "port":            nextPort(),
+                       "dbname":          "arvados_test",
+                       "user":            "arvados",
+                       "password":        "insecure_arvados_test",
+               }
+       }
+
+       cfg.Clusters[cluster.ClusterID] = *cluster
+       return nil
+}
+
+func addrIsLocal(addr string) (bool, error) {
+       return true, nil
+       listener, err := net.Listen("tcp", addr)
+       if err == nil {
+               listener.Close()
+               return true, nil
+       } else if strings.Contains(err.Error(), "cannot assign requested address") {
+               return false, nil
+       } else {
+               return false, err
+       }
+}
+
+func randomHexString(chars int) string {
+       b := make([]byte, chars/2)
+       _, err := rand.Read(b)
+       if err != nil {
+               panic(err)
+       }
+       return fmt.Sprintf("%x", b)
+}
+
+func internalPort(svc arvados.Service) (string, error) {
+       if len(svc.InternalURLs) > 1 {
+               return "", errors.New("internalPort() doesn't work with multiple InternalURLs")
+       }
+       for u := range svc.InternalURLs {
+               if _, p, err := net.SplitHostPort(u.Host); err != nil {
+                       return "", err
+               } else if p != "" {
+                       return p, nil
+               } else if u.Scheme == "https" {
+                       return "443", nil
+               } else {
+                       return "80", nil
+               }
+       }
+       return "", fmt.Errorf("service has no InternalURLs")
+}
+
+func externalPort(svc arvados.Service) (string, error) {
+       if _, p, err := net.SplitHostPort(svc.ExternalURL.Host); err != nil {
+               return "", err
+       } else if p != "" {
+               return p, nil
+       } else if svc.ExternalURL.Scheme == "https" {
+               return "443", nil
+       } else {
+               return "80", nil
+       }
+}
+
+func availablePort(host string) (string, error) {
+       ln, err := net.Listen("tcp", net.JoinHostPort(host, "0"))
+       if err != nil {
+               return "", err
+       }
+       defer ln.Close()
+       _, port, err := net.SplitHostPort(ln.Addr().String())
+       if err != nil {
+               return "", err
+       }
+       return port, nil
+}
+
+// Try to connect to addr until it works, then close ch. Give up if
+// ctx cancels.
+func waitForConnect(ctx context.Context, addr string) error {
+       dialer := net.Dialer{Timeout: time.Second}
+       for ctx.Err() == nil {
+               conn, err := dialer.DialContext(ctx, "tcp", addr)
+               if err != nil {
+                       time.Sleep(time.Second / 10)
+                       continue
+               }
+               conn.Close()
+               return nil
+       }
+       return ctx.Err()
+}
index 41af15073281b51d7a9087195a7ad9aacb9c0cc2..59dabbb26d6f6e093bc1ad94b0559771269c6ed5 100644 (file)
@@ -623,7 +623,7 @@ Clusters:
       # (experimental) cloud dispatcher for executing containers on
       # worker VMs. Begins with "-----BEGIN RSA PRIVATE KEY-----\n"
       # and ends with "\n-----END RSA PRIVATE KEY-----\n".
-      DispatchPrivateKey: none
+      DispatchPrivateKey: ""
 
       # Maximum time to wait for workers to come up before abandoning
       # stale locks from a previous dispatch process.
index 25fa89394a0ba9132b23a29126866c94f2d7bc08..2d8a487b7db2e8cd55f915404f2544351e909b1e 100644 (file)
@@ -629,7 +629,7 @@ Clusters:
       # (experimental) cloud dispatcher for executing containers on
       # worker VMs. Begins with "-----BEGIN RSA PRIVATE KEY-----\n"
       # and ends with "\n-----END RSA PRIVATE KEY-----\n".
-      DispatchPrivateKey: none
+      DispatchPrivateKey: ""
 
       # Maximum time to wait for workers to come up before abandoning
       # stale locks from a previous dispatch process.
index 56f117ee781682aeb09119b448494f263d34046f..279b7a51d5d8d4e57f920721db10ace45268b1d2 100644 (file)
@@ -7,7 +7,6 @@ package federation
 import (
        "bytes"
        "context"
-       "crypto/md5"
        "encoding/json"
        "errors"
        "fmt"
@@ -35,7 +34,7 @@ func New(cluster *arvados.Cluster) *Conn {
        local := localdb.NewConn(cluster)
        remotes := map[string]backend{}
        for id, remote := range cluster.RemoteClusters {
-               if !remote.Proxy {
+               if !remote.Proxy || id == cluster.ClusterID {
                        continue
                }
                conn := rpc.NewConn(id, &url.URL{Scheme: remote.Scheme, Host: remote.Host}, remote.Insecure, saltedTokenProvider(local, id))
@@ -169,26 +168,6 @@ func rewriteManifest(mt, remoteID string) string {
        })
 }
 
-// this could be in sdk/go/arvados
-func portableDataHash(mt string) string {
-       h := md5.New()
-       blkRe := regexp.MustCompile(`^ [0-9a-f]{32}\+\d+`)
-       size := 0
-       _ = regexp.MustCompile(` ?[^ ]*`).ReplaceAllFunc([]byte(mt), func(tok []byte) []byte {
-               if m := blkRe.Find(tok); m != nil {
-                       // write hash+size, ignore remaining block hints
-                       tok = m
-               }
-               n, err := h.Write(tok)
-               if err != nil {
-                       panic(err)
-               }
-               size += n
-               return nil
-       })
-       return fmt.Sprintf("%x+%d", h.Sum(nil), size)
-}
-
 func (conn *Conn) ConfigGet(ctx context.Context) (json.RawMessage, error) {
        var buf bytes.Buffer
        err := config.ExportJSON(&buf, conn.cluster)
@@ -269,7 +248,7 @@ func (conn *Conn) CollectionGet(ctx context.Context, options arvados.GetOptions)
                        // options.UUID is either hash+size or
                        // hash+size+hints; only hash+size need to
                        // match the computed PDH.
-                       if pdh := portableDataHash(c.ManifestText); pdh != options.UUID && !strings.HasPrefix(options.UUID, pdh+"+") {
+                       if pdh := arvados.PortableDataHash(c.ManifestText); pdh != options.UUID && !strings.HasPrefix(options.UUID, pdh+"+") {
                                err = httpErrorf(http.StatusBadGateway, "bad portable data hash %q received from remote %q (expected %q)", pdh, remoteID, options.UUID)
                                ctxlog.FromContext(ctx).Warn(err)
                                return err
diff --git a/lib/controller/integration_test.go b/lib/controller/integration_test.go
new file mode 100644 (file)
index 0000000..2adb581
--- /dev/null
@@ -0,0 +1,225 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "bytes"
+       "context"
+       "io"
+       "net"
+       "net/url"
+       "os"
+       "path/filepath"
+
+       "git.arvados.org/arvados.git/lib/boot"
+       "git.arvados.org/arvados.git/lib/config"
+       "git.arvados.org/arvados.git/lib/controller/rpc"
+       "git.arvados.org/arvados.git/lib/service"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/arvadosclient"
+       "git.arvados.org/arvados.git/sdk/go/auth"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "git.arvados.org/arvados.git/sdk/go/keepclient"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&IntegrationSuite{})
+
+type testCluster struct {
+       super         boot.Supervisor
+       config        arvados.Config
+       controllerURL *url.URL
+}
+
+type IntegrationSuite struct {
+       testClusters map[string]*testCluster
+}
+
+func (s *IntegrationSuite) SetUpSuite(c *check.C) {
+       if forceLegacyAPI14 {
+               c.Skip("heavy integration tests don't run with forceLegacyAPI14")
+               return
+       }
+
+       cwd, _ := os.Getwd()
+       s.testClusters = map[string]*testCluster{
+               "z1111": nil,
+               "z2222": nil,
+               "z3333": nil,
+       }
+       hostport := map[string]string{}
+       for id := range s.testClusters {
+               hostport[id] = func() string {
+                       // TODO: Instead of expecting random ports on
+                       // 127.0.0.11, 22, 33 to be race-safe, try
+                       // different 127.x.y.z until finding one that
+                       // isn't in use.
+                       ln, err := net.Listen("tcp", ":0")
+                       c.Assert(err, check.IsNil)
+                       ln.Close()
+                       _, port, err := net.SplitHostPort(ln.Addr().String())
+                       c.Assert(err, check.IsNil)
+                       return "127.0.0." + id[3:] + ":" + port
+               }()
+       }
+       for id := range s.testClusters {
+               yaml := `Clusters:
+  ` + id + `:
+    Services:
+      Controller:
+        ExternalURL: https://` + hostport[id] + `
+    TLS:
+      Insecure: true
+    Login:
+      # LoginCluster: z1111
+    SystemLogs:
+      Format: text
+    RemoteClusters:
+      z1111:
+        Host: ` + hostport["z1111"] + `
+        Scheme: https
+        Insecure: true
+        Proxy: true
+        ActivateUsers: true
+      z2222:
+        Host: ` + hostport["z2222"] + `
+        Scheme: https
+        Insecure: true
+        Proxy: true
+        ActivateUsers: true
+      z3333:
+        Host: ` + hostport["z3333"] + `
+        Scheme: https
+        Insecure: true
+        Proxy: true
+        ActivateUsers: true
+`
+               loader := config.NewLoader(bytes.NewBufferString(yaml), ctxlog.TestLogger(c))
+               loader.Path = "-"
+               loader.SkipLegacy = true
+               loader.SkipAPICalls = true
+               cfg, err := loader.Load()
+               c.Assert(err, check.IsNil)
+               s.testClusters[id] = &testCluster{
+                       super: boot.Supervisor{
+                               SourcePath:           filepath.Join(cwd, "..", ".."),
+                               ClusterType:          "test",
+                               ListenHost:           "127.0.0." + id[3:],
+                               ControllerAddr:       ":0",
+                               OwnTemporaryDatabase: true,
+                               Stderr:               &service.LogPrefixer{Writer: ctxlog.LogWriter(c.Log), Prefix: []byte("[" + id + "] ")},
+                       },
+                       config: *cfg,
+               }
+               s.testClusters[id].super.Start(context.Background(), &s.testClusters[id].config)
+       }
+       for _, tc := range s.testClusters {
+               au, ok := tc.super.WaitReady()
+               c.Assert(ok, check.Equals, true)
+               u := url.URL(*au)
+               tc.controllerURL = &u
+       }
+}
+
+func (s *IntegrationSuite) TearDownSuite(c *check.C) {
+       for _, c := range s.testClusters {
+               c.super.Stop()
+       }
+}
+
+func (s *IntegrationSuite) conn(clusterID string) *rpc.Conn {
+       return rpc.NewConn(clusterID, s.testClusters[clusterID].controllerURL, true, rpc.PassthroughTokenProvider)
+}
+
+func (s *IntegrationSuite) clientsWithToken(clusterID string, token string) (context.Context, *arvados.Client, *keepclient.KeepClient) {
+       cl := s.testClusters[clusterID].config.Clusters[clusterID]
+       ctx := auth.NewContext(context.Background(), auth.NewCredentials(token))
+       ac, err := arvados.NewClientFromConfig(&cl)
+       if err != nil {
+               panic(err)
+       }
+       ac.AuthToken = token
+       arv, err := arvadosclient.New(ac)
+       if err != nil {
+               panic(err)
+       }
+       kc := keepclient.New(arv)
+       return ctx, ac, kc
+}
+
+func (s *IntegrationSuite) userClients(c *check.C, conn *rpc.Conn, rootctx context.Context, clusterID string, activate bool) (context.Context, *arvados.Client, *keepclient.KeepClient) {
+       login, err := conn.UserSessionCreate(rootctx, rpc.UserSessionCreateOptions{
+               ReturnTo: ",https://example.com",
+               AuthInfo: rpc.UserSessionAuthInfo{
+                       Email:     "user@example.com",
+                       FirstName: "Example",
+                       LastName:  "User",
+                       Username:  "example",
+               },
+       })
+       c.Assert(err, check.IsNil)
+       redirURL, err := url.Parse(login.RedirectLocation)
+       c.Assert(err, check.IsNil)
+       userToken := redirURL.Query().Get("api_token")
+       c.Logf("user token: %q", userToken)
+       ctx, ac, kc := s.clientsWithToken(clusterID, userToken)
+       user, err := conn.UserGetCurrent(ctx, arvados.GetOptions{})
+       c.Assert(err, check.IsNil)
+       _, err = conn.UserSetup(rootctx, arvados.UserSetupOptions{UUID: user.UUID})
+       c.Assert(err, check.IsNil)
+       if activate {
+               _, err = conn.UserActivate(rootctx, arvados.UserActivateOptions{UUID: user.UUID})
+               c.Assert(err, check.IsNil)
+               user, err = conn.UserGetCurrent(ctx, arvados.GetOptions{})
+               c.Assert(err, check.IsNil)
+               c.Logf("user UUID: %q", user.UUID)
+               if !user.IsActive {
+                       c.Fatalf("failed to activate user -- %#v", user)
+               }
+       }
+       return ctx, ac, kc
+}
+
+func (s *IntegrationSuite) rootClients(clusterID string) (context.Context, *arvados.Client, *keepclient.KeepClient) {
+       return s.clientsWithToken(clusterID, s.testClusters[clusterID].config.Clusters[clusterID].SystemRootToken)
+}
+
+func (s *IntegrationSuite) TestGetCollectionByPDH(c *check.C) {
+       conn1 := s.conn("z1111")
+       rootctx1, _, _ := s.rootClients("z1111")
+       conn3 := s.conn("z3333")
+       userctx1, ac1, kc1 := s.userClients(c, conn1, rootctx1, "z1111", true)
+
+       // Create the collection to find its PDH (but don't save it
+       // anywhere yet)
+       var coll1 arvados.Collection
+       fs1, err := coll1.FileSystem(ac1, kc1)
+       c.Assert(err, check.IsNil)
+       f, err := fs1.OpenFile("test.txt", os.O_CREATE|os.O_RDWR, 0777)
+       c.Assert(err, check.IsNil)
+       _, err = io.WriteString(f, "IntegrationSuite.TestGetCollectionByPDH")
+       c.Assert(err, check.IsNil)
+       err = f.Close()
+       c.Assert(err, check.IsNil)
+       mtxt, err := fs1.MarshalManifest(".")
+       c.Assert(err, check.IsNil)
+       pdh := arvados.PortableDataHash(mtxt)
+
+       // Looking up the PDH before saving returns 404 if cycle
+       // detection is working.
+       _, err = conn1.CollectionGet(userctx1, arvados.GetOptions{UUID: pdh})
+       c.Assert(err, check.ErrorMatches, `.*404 Not Found.*`)
+
+       // Save the collection on cluster z1111.
+       coll1, err = conn1.CollectionCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
+               "manifest_text": mtxt,
+       }})
+       c.Assert(err, check.IsNil)
+
+       // Retrieve the collection from cluster z3333.
+       coll, err := conn3.CollectionGet(userctx1, arvados.GetOptions{UUID: pdh})
+       c.Check(err, check.IsNil)
+       c.Check(coll.PortableDataHash, check.Equals, pdh)
+}
index f1f3fd91dbe211a0742db03fec3052853c255b75..7f2f78ee9a9f7224aac4aacba94148497f292a5e 100644 (file)
@@ -12,6 +12,7 @@ import (
        "io"
        "net"
        "net/http"
+       "net/url"
        "os"
        "strings"
 
@@ -58,7 +59,7 @@ func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout
        var err error
        defer func() {
                if err != nil {
-                       log.WithError(err).Info("exiting")
+                       log.WithError(err).Error("exiting")
                }
        }()
 
@@ -164,6 +165,14 @@ func getListenAddr(svcs arvados.Services, prog arvados.ServiceName, log logrus.F
        if !ok {
                return arvados.URL{}, fmt.Errorf("unknown service name %q", prog)
        }
+
+       if want := os.Getenv("ARVADOS_SERVICE_INTERNAL_URL"); want == "" {
+       } else if url, err := url.Parse(want); err != nil {
+               return arvados.URL{}, fmt.Errorf("$ARVADOS_SERVICE_INTERNAL_URL (%q): %s", want, err)
+       } else {
+               return arvados.URL(*url), nil
+       }
+
        errors := []string{}
        for url := range svc.InternalURLs {
                listener, err := net.Listen("tcp", url.Host)
diff --git a/lib/service/log.go b/lib/service/log.go
new file mode 100644 (file)
index 0000000..7627803
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package service
+
+import (
+       "bytes"
+       "io"
+)
+
+type LogPrefixer struct {
+       io.Writer
+       Prefix []byte
+       did    bool
+}
+
+func (lp *LogPrefixer) Write(p []byte) (int, error) {
+       if len(p) == 0 {
+               return 0, nil
+       }
+       var out []byte
+       if !lp.did {
+               out = append(out, lp.Prefix...)
+       }
+       lp.did = p[len(p)-1] != '\n'
+       out = append(out, bytes.Replace(p[:len(p)-1], []byte("\n"), append([]byte("\n"), lp.Prefix...), -1)...)
+       out = append(out, p[len(p)-1])
+       _, err := lp.Writer.Write(out)
+       if err != nil {
+               return 0, err
+       }
+       return len(p), nil
+}
index 35fd3fd7406fcd24b0f167d38fc9dd009f0e4d69..030665d77f7a075f289c92d4a715f1d414de9a9b 100644 (file)
@@ -6,7 +6,9 @@ package arvados
 
 import (
        "bufio"
+       "crypto/md5"
        "fmt"
+       "regexp"
        "strings"
        "time"
 
@@ -90,3 +92,28 @@ type CollectionList struct {
        Offset         int          `json:"offset"`
        Limit          int          `json:"limit"`
 }
+
+var (
+       blkRe = regexp.MustCompile(`^ [0-9a-f]{32}\+\d+`)
+       tokRe = regexp.MustCompile(` ?[^ ]*`)
+)
+
+// PortableDataHash computes the portable data hash of the given
+// manifest.
+func PortableDataHash(mt string) string {
+       h := md5.New()
+       size := 0
+       _ = tokRe.ReplaceAllFunc([]byte(mt), func(tok []byte) []byte {
+               if m := blkRe.Find(tok); m != nil {
+                       // write hash+size, ignore remaining block hints
+                       tok = m
+               }
+               n, err := h.Write(tok)
+               if err != nil {
+                       panic(err)
+               }
+               size += n
+               return nil
+       })
+       return fmt.Sprintf("%x+%d", h.Sum(nil), size)
+}
index c2f6a0e8f0885e68a98f7e62a4ee4f17d0d930d2..b6a85e05e786fa1d0ace1715eab1cacdc3e7d0cc 100644 (file)
@@ -16,8 +16,8 @@ type Credentials struct {
        Tokens []string
 }
 
-func NewCredentials() *Credentials {
-       return &Credentials{Tokens: []string{}}
+func NewCredentials(tokens ...string) *Credentials {
+       return &Credentials{Tokens: tokens}
 }
 
 func NewContext(ctx context.Context, c *Credentials) context.Context {
index a17ad8d83614416222f56e0b950061886183822c..acbb11a3611094be4eed0ce13e5c03ffca9e758b 100644 (file)
@@ -60,6 +60,12 @@ func TestLogger(c interface{ Log(...interface{}) }) *logrus.Logger {
        return logger
 }
 
+// LogWriter returns an io.Writer that writes to the given log func,
+// which is typically (*check.C).Log().
+func LogWriter(log func(...interface{})) io.Writer {
+       return &logWriter{log}
+}
+
 // SetLevel sets the current logging level. See logrus for level
 // names.
 func SetLevel(level string) {
index a1ef5e0beb76d8c95cef9c0b9ec5a2dbe8df9eae..a0284e8f247a60f8d2fd57b752f37a800d54c222 100644 (file)
@@ -62,11 +62,14 @@ func (agg *Aggregator) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
                sendErr(http.StatusUnauthorized, errUnauthorized)
                return
        }
-       if req.URL.Path != "/_health/all" {
+       if req.URL.Path == "/_health/all" {
+               json.NewEncoder(resp).Encode(agg.ClusterHealth())
+       } else if req.URL.Path == "/_health/ping" {
+               resp.Write(healthyBody)
+       } else {
                sendErr(http.StatusNotFound, errNotFound)
                return
        }
-       json.NewEncoder(resp).Encode(agg.ClusterHealth())
        if agg.Log != nil {
                agg.Log(req, nil)
        }
@@ -103,6 +106,7 @@ type ServiceHealth struct {
 }
 
 func (agg *Aggregator) ClusterHealth() ClusterHealthResponse {
+       agg.setupOnce.Do(agg.setup)
        resp := ClusterHealthResponse{
                Health:   "OK",
                Checks:   make(map[string]CheckResult),
index 6010ee4bf73e0fc0278c672b41a20c0ecaa35532..b10b3f00825f682f9cbf4c4a498b5f56f8824945 100644 (file)
@@ -17,7 +17,7 @@ http {
   uwsgi_temp_path "{{TMPDIR}}";
   scgi_temp_path "{{TMPDIR}}";
   upstream arv-git-http {
-    server localhost:{{GITPORT}};
+    server {{LISTENHOST}}:{{GITPORT}};
   }
   server {
     listen *:{{GITSSLPORT}} ssl default_server;
@@ -33,7 +33,7 @@ http {
     }
   }
   upstream keepproxy {
-    server localhost:{{KEEPPROXYPORT}};
+    server {{LISTENHOST}}:{{KEEPPROXYPORT}};
   }
   server {
     listen *:{{KEEPPROXYSSLPORT}} ssl default_server;
@@ -52,7 +52,7 @@ http {
     }
   }
   upstream keep-web {
-    server localhost:{{KEEPWEBPORT}};
+    server {{LISTENHOST}}:{{KEEPWEBPORT}};
   }
   server {
     listen *:{{KEEPWEBSSLPORT}} ssl default_server;
@@ -89,10 +89,10 @@ http {
     }
   }
   upstream ws {
-    server localhost:{{WSPORT}};
+    server {{LISTENHOST}}:{{WSPORT}};
   }
   server {
-    listen *:{{WSSPORT}} ssl default_server;
+    listen *:{{WSSSLPORT}} ssl default_server;
     server_name websocket;
     ssl_certificate "{{SSLCERT}}";
     ssl_certificate_key "{{SSLKEY}}";
@@ -106,8 +106,24 @@ http {
       proxy_redirect off;
     }
   }
+  upstream workbench1 {
+    server {{LISTENHOST}}:{{WORKBENCH1PORT}};
+  }
+  server {
+    listen *:{{WORKBENCH1SSLPORT}} ssl default_server;
+    server_name workbench1;
+    ssl_certificate "{{SSLCERT}}";
+    ssl_certificate_key "{{SSLKEY}}";
+    location  / {
+      proxy_pass http://workbench1;
+      proxy_set_header Host $http_host;
+      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
+    }
+  }
   upstream controller {
-    server localhost:{{CONTROLLERPORT}};
+    server {{LISTENHOST}}:{{CONTROLLERPORT}};
   }
   server {
     listen *:{{CONTROLLERSSLPORT}} ssl default_server;
index 9e9b12f98ca6d09ed1fb65eb5c768acf23454bf5..5b75de08fef9afaf9ea83188579b112ca6e2ca27 100644 (file)
@@ -326,15 +326,16 @@ def run(leave_running_atexit=False):
     env.pop('ARVADOS_API_HOST', None)
     env.pop('ARVADOS_API_HOST_INSECURE', None)
     env.pop('ARVADOS_API_TOKEN', None)
+    logf = open(_logfilename('railsapi'), 'a')
     start_msg = subprocess.check_output(
         ['bundle', 'exec',
          'passenger', 'start', '-d', '-p{}'.format(port),
          '--pid-file', pid_file,
-         '--log-file', os.path.join(os.getcwd(), 'log/test.log'),
+         '--log-file', '/dev/stdout',
          '--ssl',
          '--ssl-certificate', 'tmp/self-signed.pem',
          '--ssl-certificate-key', 'tmp/self-signed.key'],
-        env=env)
+        env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
 
     if not leave_running_atexit:
         atexit.register(kill_server_pid, pid_file, passenger_root=api_src_dir)
@@ -606,6 +607,7 @@ def run_nginx():
         return
     stop_nginx()
     nginxconf = {}
+    nginxconf['LISTENHOST'] = 'localhost'
     nginxconf['CONTROLLERPORT'] = internal_port_from_config("Controller")
     nginxconf['CONTROLLERSSLPORT'] = external_port_from_config("Controller")
     nginxconf['KEEPWEBPORT'] = internal_port_from_config("WebDAV")
@@ -616,7 +618,9 @@ def run_nginx():
     nginxconf['GITPORT'] = internal_port_from_config("GitHTTP")
     nginxconf['GITSSLPORT'] = external_port_from_config("GitHTTP")
     nginxconf['WSPORT'] = internal_port_from_config("Websocket")
-    nginxconf['WSSPORT'] = external_port_from_config("Websocket")
+    nginxconf['WSSSLPORT'] = external_port_from_config("Websocket")
+    nginxconf['WORKBENCH1PORT'] = internal_port_from_config("Workbench1")
+    nginxconf['WORKBENCH1SSLPORT'] = external_port_from_config("Workbench1")
     nginxconf['SSLCERT'] = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'self-signed.pem')
     nginxconf['SSLKEY'] = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'self-signed.key')
     nginxconf['ACCESSLOG'] = _logfilename('nginx_access')
@@ -627,7 +631,7 @@ def run_nginx():
     conffile = os.path.join(TEST_TMPDIR, 'nginx.conf')
     with open(conffile, 'w') as f:
         f.write(re.sub(
-            r'{{([A-Z]+)}}',
+            r'{{([A-Z]+[A-Z0-9]+)}}',
             lambda match: str(nginxconf.get(match.group(1))),
             open(conftemplatefile).read()))
 
@@ -648,6 +652,8 @@ def setup_config():
     controller_external_port = find_available_port()
     websocket_port = find_available_port()
     websocket_external_port = find_available_port()
+    workbench1_port = find_available_port()
+    workbench1_external_port = find_available_port()
     git_httpd_port = find_available_port()
     git_httpd_external_port = find_available_port()
     keepproxy_port = find_available_port()
@@ -683,6 +689,12 @@ def setup_config():
                 "http://%s:%s"%(localhost, websocket_port): {},
             },
         },
+        "Workbench1": {
+            "ExternalURL": "https://%s:%s/" % (localhost, workbench1_external_port),
+            "InternalURLs": {
+                "http://%s:%s"%(localhost, workbench1_port): {},
+            },
+        },
         "GitHTTP": {
             "ExternalURL": "https://%s:%s" % (localhost, git_httpd_external_port),
             "InternalURLs": {
@@ -712,6 +724,9 @@ def setup_config():
                 "http://%s:%s"%(localhost, keep_web_dl_port): {},
             },
         },
+        "SSO": {
+            "ExternalURL": "http://localhost:3002",
+        },
     }
 
     config = {
@@ -721,6 +736,11 @@ def setup_config():
                 "SystemRootToken": auth_token('system_user'),
                 "API": {
                     "RequestTimeout": "30s",
+                    "RailsSessionSecretToken": "e24205c490ac07e028fd5f8a692dcb398bcd654eff1aef5f9fe6891994b18483",
+                },
+                "Login": {
+                    "ProviderAppID": "arvados-server",
+                    "ProviderAppSecret": "608dbf356a327e2d0d4932b60161e212c2d8d8f5e25690d7b622f850a990cd33",
                 },
                 "SystemLogs": {
                     "LogLevel": ('info' if os.environ.get('ARVADOS_DEBUG', '') in ['','0'] else 'debug'),
@@ -734,14 +754,22 @@ def setup_config():
                 "Services": services,
                 "Users": {
                     "AnonymousUserToken": auth_token('anonymous'),
+                    "UserProfileNotificationAddress": "arvados@example.com",
                 },
                 "Collections": {
                     "BlobSigningKey": "zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc",
                     "TrustAllContent": True,
                     "ForwardSlashNameSubstitution": "/",
+                    "TrashSweepInterval": "-1s",
                 },
                 "Git": {
-                    "Repositories": "%s/test" % os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'git'),
+                    "Repositories": os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'git', 'test'),
+                },
+                "Containers": {
+                    "JobsAPI": {
+                        "GitInternalDir": os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'internal.git'),
+                    },
+                    "SupportedDockerImageFormats": {"v1": {}},
                 },
                 "Volumes": {
                     "zzzzz-nyw5e-%015d"%n: {
index 1cf3b9d78a6e9d6290e9df1886304ba7124638c5..224f2c0bd464ceb182c4bc0a696a823def99393e 100644 (file)
@@ -45,8 +45,11 @@ class Arvados::V1::UsersController < ApplicationController
   end
 
   def activate
+    if params[:id] and params[:id].match(/\D/)
+      params[:uuid] = params.delete :id
+    end
     if current_user.andand.is_admin && params[:uuid]
-      @object = User.find params[:uuid]
+      @object = User.find_by_uuid params[:uuid]
     else
       @object = current_user
     end
index 77fc0a45afb32ff7ea93595a4b97ff66cd128f63..5386cb119a0c9cadbcc2cc0d8edfc5cadd8a1e76 100644 (file)
@@ -111,7 +111,7 @@ class ApiClientAuthorization < ArvadosModel
   def self.check_system_root_token token
     if token == Rails.configuration.SystemRootToken
       return ApiClientAuthorization.new(user: User.find_by_uuid(system_user_uuid),
-                                        uuid: uuid_prefix+"-gj3su-000000000000000",
+                                        uuid: Rails.configuration.ClusterID+"-gj3su-000000000000000",
                                         api_token: token,
                                         api_client: ApiClient.new(is_trusted: true, url_prefix: ""))
     else
index 4e1936b7716c65561d7592debbe9a3d5c5cfb51e..9fd5368c0aa10f83a2ce6f3b8456dc4beb1805b7 100644 (file)
@@ -76,15 +76,3 @@ test:
   action_controller.allow_forgery_protection: false
   action_mailer.delivery_method: :test
   active_support.deprecation: :stderr
-  uuid_prefix: zzzzz
-  sso_app_id: arvados-server
-  sso_app_secret: <%= rand(2**512).to_s(36) %>
-  sso_provider_url: http://localhost:3002
-  secret_token: <%= rand(2**512).to_s(36) %>
-  blob_signing_key: zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc
-  user_profile_notification_address: arvados@example.com
-  workbench_address: https://localhost:3001/
-  git_repositories_dir: <%= Rails.root.join 'tmp', 'git', 'test' %>
-  git_internal_dir: <%= Rails.root.join 'tmp', 'internal.git' %>
-  trash_sweep_interval: -1
-  docker_image_formats: ["v1"]
index f211ec9e0cde5c67160bda1bde97e20cdb7861a8..7901a7fd83897e2b6b5750cdb4f9e83337afb83f 100644 (file)
@@ -40,6 +40,10 @@ if defined?(Bundler)
   end
 end
 
+if Rails.env == 'test'
+  Rails.logger = ActiveSupport::TaggedLogging.new(Logger.new(STDOUT))
+end
+
 module Server
   class Application < Rails::Application
     # The following is to avoid SafeYAML's warning message
index 522aa73b0a0545a0cb9fa4b58184877581fb7752..cf16993ca51054e220713c24cca1a33510e2a423 100644 (file)
@@ -180,8 +180,13 @@ class ConfigLoader
     end
   end
 
-  def self.parse_duration durstr, cfgkey:
-    duration_re = /-?(\d+(\.\d+)?)(s|m|h)/
+  def self.parse_duration(durstr, cfgkey:)
+    sign = 1
+    if durstr[0] == '-'
+      durstr = durstr[1..-1]
+      sign = -1
+    end
+    duration_re = /(\d+(\.\d+)?)(s|m|h)/
     dursec = 0
     while durstr != ""
       mt = duration_re.match durstr
@@ -189,7 +194,7 @@ class ConfigLoader
         raise "#{cfgkey} not a valid duration: '#{durstr}', accepted suffixes are s, m, h"
       end
       multiplier = {s: 1, m: 60, h: 3600}
-      dursec += (Float(mt[1]) * multiplier[mt[3].to_sym])
+      dursec += (Float(mt[1]) * multiplier[mt[3].to_sym] * sign)
       durstr = durstr[mt[0].length..-1]
     end
     return dursec.seconds
index 28cdff6fd240dab21eeaad3c8dec7576622ffc7f..84cde6ba002a991f2fc76d54a7ab20f9b9ca8da9 100644 (file)
@@ -1,7 +1,7 @@
 PATH
   remote: .
   specs:
-    arvados-login-sync (1.5.0.dev20200114213539)
+    arvados-login-sync (1.5.0.dev20200118023341)
       arvados (~> 1.3.0, >= 1.3.0)
       faraday (< 0.16)
       signet (< 0.12)
@@ -86,4 +86,4 @@ DEPENDENCIES
   rake
 
 BUNDLED WITH
-   1.11
+   1.17.3