Merge remote-tracking branch 'origin/master' into 14484-collection-record-update
authorEric Biagiotti <ebiagiotti@veritasgenetics.com>
Wed, 3 Apr 2019 20:42:25 +0000 (16:42 -0400)
committerEric Biagiotti <ebiagiotti@veritasgenetics.com>
Wed, 3 Apr 2019 20:42:25 +0000 (16:42 -0400)
refs #14484

Arvados-DCO-1.1-Signed-off-by: Eric Biagiotti <ebiagiotti@veritasgenetics.com>

49 files changed:
build/package-build-dockerfiles/debian8/Dockerfile
build/package-test-dockerfiles/debian8/Dockerfile
build/run-tests.sh
doc/install/arvbox.html.textile.liquid
lib/cloud/azure/azure.go
lib/controller/cmd.go
lib/controller/handler_test.go
lib/controller/proxy.go
lib/crunchstat/crunchstat.go
lib/dispatchcloud/cmd.go
lib/dispatchcloud/dispatcher.go
lib/dispatchcloud/dispatcher_test.go
lib/dispatchcloud/test/stub_driver.go
lib/service/cmd.go
lib/service/cmd_test.go [new file with mode: 0644]
lib/service/error.go [new file with mode: 0644]
sdk/cwl/tests/federation/framework/prepare.cwl
sdk/go/arvados/client.go
sdk/go/arvados/config.go
sdk/python/tests/nginx.conf
sdk/python/tests/run_test_server.py
sdk/python/tests/test_arv_put.py
services/api/config/database.yml.example
services/arv-git-httpd/gitolite_test.go
services/arv-git-httpd/integration_test.go
services/crunch-dispatch-local/crunch-dispatch-local.go
services/crunch-dispatch-local/crunch-dispatch-local_test.go
services/crunch-run/crunchrun.go
services/keep-web/server_test.go
services/keepproxy/keepproxy.go
services/login-sync/Gemfile
services/login-sync/arvados-login-sync.gemspec
services/login-sync/bin/arvados-login-sync
services/login-sync/test/stubs.rb
services/login-sync/test/test_add_user.rb
tools/arvbox/bin/arvbox
tools/arvbox/lib/arvbox/docker/Dockerfile.base
tools/arvbox/lib/arvbox/docker/Dockerfile.dev
tools/arvbox/lib/arvbox/docker/api-setup.sh
tools/arvbox/lib/arvbox/docker/service/nginx/run [changed from symlink to file mode: 0755]
tools/arvbox/lib/arvbox/docker/service/nginx/run-service [deleted file]
tools/arvbox/lib/arvbox/docker/service/slurmctld/log/main/.gitstub [deleted file]
tools/arvbox/lib/arvbox/docker/service/slurmctld/log/run [deleted symlink]
tools/arvbox/lib/arvbox/docker/service/slurmctld/run [deleted file]
tools/arvbox/lib/arvbox/docker/service/slurmd/log/main/.gitstub [deleted file]
tools/arvbox/lib/arvbox/docker/service/slurmd/log/run [deleted symlink]
tools/arvbox/lib/arvbox/docker/service/slurmd/run [deleted file]
tools/arvbox/lib/arvbox/docker/service/sso/run-service
tools/arvbox/lib/arvbox/docker/waitforpostgres.sh

index 3f591cdfa14aceab1ff1b4be1c650192d396131f..ec7ae07d826e440cf653dbc0ebcfb8b5bca255ca 100644 (file)
@@ -5,6 +5,8 @@
 FROM debian:jessie
 MAINTAINER Ward Vandewege <ward@curoverse.com>
 
+RUN perl -ni~ -e 'print unless /jessie-updates/' /etc/apt/sources.list
+
 ENV DEBIAN_FRONTEND noninteractive
 
 # Install dependencies.
index 2168f725a1a5a3d9cf7fcbfafb50b28733870f79..82d679abfba0e7321d9229280ad1e1d10995d5c1 100644 (file)
@@ -5,6 +5,8 @@
 FROM debian:8
 MAINTAINER Ward Vandewege <wvandewege@veritasgenetics.com>
 
+RUN perl -ni~ -e 'print unless /jessie-updates/' /etc/apt/sources.list
+
 ENV DEBIAN_FRONTEND noninteractive
 
 # Install dependencies
index a6858f315f873cf384e0ba3110ae430bf3a49bf1..a37a0f731e5a12c192c3a17094a9891ae920f9a6 100755 (executable)
@@ -85,6 +85,7 @@ lib/dispatchcloud/container
 lib/dispatchcloud/scheduler
 lib/dispatchcloud/ssh_executor
 lib/dispatchcloud/worker
+lib/service
 services/api
 services/arv-git-httpd
 services/crunchstat
@@ -147,6 +148,7 @@ PYTHONPATH=
 GEMHOME=
 PERLINSTALLBASE=
 R_LIBS=
+export LANG=en_US.UTF-8
 
 short=
 only_install=
@@ -188,6 +190,9 @@ sanity_checks() {
     ( [[ -n "$WORKSPACE" ]] && [[ -d "$WORKSPACE/services" ]] ) \
         || fatal "WORKSPACE environment variable not set to a source directory (see: $0 --help)"
     echo Checking dependencies:
+    echo "locale: ${LANG}"
+    [[ "$(locale charmap)" = "UTF-8" ]] \
+        || fatal "Locale '${LANG}' is broken/missing. Try: echo ${LANG} | sudo tee -a /etc/locale.gen && sudo locale-gen"
     echo -n 'virtualenv: '
     virtualenv --version \
         || fatal "No virtualenv. Try: apt-get install virtualenv (on ubuntu: python-virtualenv)"
@@ -363,6 +368,27 @@ if [[ $NEED_SDK_R == false ]]; then
        echo "R SDK not needed, it will not be installed."
 fi
 
+checkpidfile() {
+    svc="$1"
+    pid="$(cat "$WORKSPACE/tmp/${svc}.pid")"
+    if [[ -z "$pid" ]] || ! kill -0 "$pid"; then
+        tail $WORKSPACE/tmp/${1}*.log
+        echo "${svc} pid ${pid} not running"
+        return 1
+    fi
+    echo "${svc} pid ${pid} ok"
+}
+
+checkdiscoverydoc() {
+    dd="https://${1}/discovery/v1/apis/arvados/v1/rest"
+    if ! (set -o pipefail; curl -fsk "$dd" | grep -q ^{ ); then
+        echo >&2 "ERROR: could not retrieve discovery doc from RailsAPI at $dd"
+        tail -v $WORKSPACE/services/api/log/test.log
+        return 1
+    fi
+    echo "${dd} ok"
+}
+
 start_services() {
     if [[ -n "$ARVADOS_TEST_API_HOST" ]]; then
         return 0
@@ -377,19 +403,29 @@ start_services() {
        rm -f "$WORKSPACE/tmp/api.pid"
     fi
     all_services_stopped=
-    fail=0
+    fail=1
     cd "$WORKSPACE" \
-        && eval $(python sdk/python/tests/run_test_server.py start --auth admin || echo "fail=1; false") \
+        && eval $(python sdk/python/tests/run_test_server.py start --auth admin) \
         && export ARVADOS_TEST_API_HOST="$ARVADOS_API_HOST" \
         && export ARVADOS_TEST_API_INSTALLED="$$" \
+        && checkpidfile api \
+        && checkdiscoverydoc $ARVADOS_API_HOST \
         && python sdk/python/tests/run_test_server.py start_controller \
+        && checkpidfile controller \
         && python sdk/python/tests/run_test_server.py start_keep_proxy \
+        && checkpidfile keepproxy \
         && python sdk/python/tests/run_test_server.py start_keep-web \
+        && checkpidfile keep-web \
         && python sdk/python/tests/run_test_server.py start_arv-git-httpd \
+        && checkpidfile arv-git-httpd \
         && python sdk/python/tests/run_test_server.py start_ws \
-        && eval $(python sdk/python/tests/run_test_server.py start_nginx || echo "fail=1; false") \
+        && checkpidfile ws \
+        && eval $(python sdk/python/tests/run_test_server.py start_nginx) \
+        && checkdiscoverydoc $ARVADOS_API_HOST \
+        && checkpidfile nginx \
+        && export ARVADOS_TEST_PROXY_SERVICES=1 \
         && (env | egrep ^ARVADOS) \
-        || fail=1
+        && fail=0
     deactivate
     if [[ $fail != 0 ]]; then
         unset ARVADOS_TEST_API_HOST
@@ -401,7 +437,7 @@ stop_services() {
     if [[ -n "$all_services_stopped" ]]; then
         return
     fi
-    unset ARVADOS_TEST_API_HOST
+    unset ARVADOS_TEST_API_HOST ARVADOS_TEST_PROXY_SERVICES
     . "$VENVDIR/bin/activate" || return
     cd "$WORKSPACE" \
         && python sdk/python/tests/run_test_server.py stop_nginx \
@@ -694,7 +730,7 @@ do_test() {
         services/api)
             stop_services
             ;;
-        doc | lib/cli | lib/cloud/azure | lib/cloud/ec2 | lib/cmd | lib/dispatchcloud/ssh_executor | lib/dispatchcloud/worker)
+        gofmt | doc | lib/cli | lib/cloud/azure | lib/cloud/ec2 | lib/cmd | lib/dispatchcloud/ssh_executor | lib/dispatchcloud/worker)
             # don't care whether services are running
             ;;
         *)
@@ -727,7 +763,6 @@ do_test_once() {
         # compilation errors.
         go get -ldflags "-X main.version=${ARVADOS_VERSION:-$(git log -n1 --format=%H)-dev}" -t "git.curoverse.com/arvados.git/$1" && \
             cd "$GOPATH/src/git.curoverse.com/arvados.git/$1" && \
-            [[ -z "$(gofmt -e -d . | tee /dev/stderr)" ]] && \
             if [[ -n "${testargs[$1]}" ]]
         then
             # "go test -check.vv giturl" doesn't work, but this
@@ -744,6 +779,7 @@ do_test_once() {
             go tool cover -html="$WORKSPACE/tmp/.$covername.tmp" -o "$WORKSPACE/tmp/$covername.html"
             rm "$WORKSPACE/tmp/.$covername.tmp"
         fi
+        [[ $result = 0 ]] && gofmt -e -d *.go
     elif [[ "$2" == "pip" ]]
     then
         tries=0
@@ -894,7 +930,7 @@ install_services/api() {
     # database, so that we can drop it. This assumes the current user
     # is a postgresql superuser.
     cd "$WORKSPACE/services/api" \
-        && test_database=$(python -c "import yaml; print yaml.load(file('config/database.yml'))['test']['database']") \
+        && test_database=$(python -c "import yaml; print yaml.safe_load(file('config/database.yml'))['test']['database']") \
         && psql "$test_database" -c "SELECT pg_terminate_backend (pg_stat_activity.pid::int) FROM pg_stat_activity WHERE pg_stat_activity.datname = '$test_database';" 2>/dev/null
 
     mkdir -p "$WORKSPACE/services/api/tmp/pids"
@@ -953,6 +989,7 @@ gostuff=(
     lib/dispatchcloud/scheduler
     lib/dispatchcloud/ssh_executor
     lib/dispatchcloud/worker
+    lib/service
     sdk/go/arvados
     sdk/go/arvadosclient
     sdk/go/auth
@@ -1000,6 +1037,12 @@ test_doc() {
     )
 }
 
+test_gofmt() {
+    cd "$WORKSPACE" || return 1
+    dirs=$(ls -d */ | egrep -v 'vendor|tmp')
+    [[ -z "$(gofmt -e -d $dirs | tee -a /dev/stderr)" ]]
+}
+
 test_services/api() {
     rm -f "$WORKSPACE/services/api/git-commit.version"
     cd "$WORKSPACE/services/api" \
@@ -1036,17 +1079,17 @@ test_services/nodemanager_integration() {
 
 test_apps/workbench_units() {
     cd "$WORKSPACE/apps/workbench" \
-        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:units TESTOPTS=-v ${testargs[apps/workbench]}
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:units TESTOPTS=-v ${testargs[apps/workbench]} ${testargs[apps/workbench_units]}
 }
 
 test_apps/workbench_functionals() {
     cd "$WORKSPACE/apps/workbench" \
-        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:functionals TESTOPTS=-v ${testargs[apps/workbench]}
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:functionals TESTOPTS=-v ${testargs[apps/workbench]} ${testargs[apps/workbench_functionals]}
 }
 
 test_apps/workbench_integration() {
     cd "$WORKSPACE/apps/workbench" \
-        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:integration TESTOPTS=-v ${testargs[apps/workbench]}
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:integration TESTOPTS=-v ${testargs[apps/workbench]} ${testargs[apps/workbench_integration]}
 }
 
 test_apps/workbench_benchmark() {
@@ -1115,6 +1158,7 @@ test_all() {
         exit_cleanly
     fi
 
+    do_test gofmt
     do_test doc
     do_test sdk/ruby
     do_test sdk/R
@@ -1169,11 +1213,8 @@ for g in "${gostuff[@]}"; do
 done
 for p in "${pythonstuff[@]}"; do
     dir=${p%:py3}
-    if [[ ${dir} = ${p} ]]; then
-        testfuncargs[$p]="$dir pip $VENVDIR/bin/"
-    else
-        testfuncargs[$p]="$dir pip $VENV3DIR/bin/"
-    fi
+    testfuncargs[$dir]="$dir pip $VENVDIR/bin/"
+    testfuncargs[$dir:py3]="$dir pip $VENV3DIR/bin/"
 done
 
 if [[ -z ${interactive} ]]; then
@@ -1185,7 +1226,11 @@ else
     only_install=()
     if [[ -e "$VENVDIR/bin/activate" ]]; then stop_services; fi
     setnextcmd() {
-        if [[ "$nextcmd" != "install deps" ]]; then
+        if [[ "$TERM" = dumb ]]; then
+            # assume emacs, or something, is offering a history buffer
+            # and pre-populating the command will only cause trouble
+            nextcmd=
+        elif [[ "$nextcmd" != "install deps" ]]; then
             :
         elif [[ -e "$VENVDIR/bin/activate" ]]; then
             nextcmd="test lib/cmd"
@@ -1199,30 +1244,34 @@ else
     setnextcmd
     while read -p 'What next? ' -e -i "${nextcmd}" nextcmd; do
         read verb target opts <<<"${nextcmd}"
+        target="${target%/}"
+        target="${target/\/:/:}"
         case "${verb}" in
-            "" | "help")
-                help_interactive
-                ;;
             "exit" | "quit")
                 exit_cleanly
                 ;;
             "reset")
                 stop_services
                 ;;
-            *)
-                target="${target%/}"
-                testargs["$target"]="${opts}"
+            "test" | "install")
                 case "$target" in
+                    "")
+                        help_interactive
+                        ;;
                     all | deps)
                         ${verb}_${target}
                         ;;
                     *)
+                        testargs["$target"]="${opts}"
                         tt="${testfuncargs[${target}]}"
                         tt="${tt:-$target}"
                         do_$verb $tt
                         ;;
                 esac
                 ;;
+            "" | "help" | *)
+                help_interactive
+                ;;
         esac
         if [[ ${#successes[@]} -gt 0 || ${#failures[@]} -gt 0 ]]; then
             report_outcomes
index 8827cf8abfd2b1fcbe73849268d7124f43973adf..2d94d32ac5edbb844eaa9dca37c18d49aef7a689 100644 (file)
@@ -31,17 +31,19 @@ h2. Usage
 $ arvbox
 Arvados-in-a-box                      http://arvados.org
 
-build   <config>      build arvbox Docker image
-rebuild <config>      build arvbox Docker image, no layer cache
-start|run <config>  start arvbox container
-open       open arvbox workbench in a web browser
-shell      enter arvbox shell
-ip         print arvbox docker container ip address
-host       print arvbox published host
-status     print some information about current arvbox
+start|run <config> [tag]  start arvbox container
 stop       stop arvbox container
 restart <config>  stop, then run again
-reboot  <config>  stop, build arvbox Docker image, run
+status     print some information about current arvbox
+ip         print arvbox docker container ip address
+host       print arvbox published host
+shell      enter arvbox shell
+open       open arvbox workbench in a web browser
+root-cert  get copy of root certificate
+update  <config> stop, pull latest image, run
+build   <config> build arvbox Docker image
+reboot  <config> stop, build arvbox Docker image, run
+rebuild <config> build arvbox Docker image, no layer cache
 reset      delete arvbox arvados data (be careful!)
 destroy    delete all arvbox code and data (be careful!)
 log <service> tail log of specified service
@@ -52,6 +54,23 @@ sv <start|stop|restart> <service> change state of service inside arvbox
 clone <from> <to>   clone an arvbox
 </pre>
 
+h2. Install root certificate
+
+Arvbox creates root certificate to authorize Arvbox services.  Installing the root certificate into your web browser will prevent security errors when accessing Arvbox services with your web browser.  Every  Arvbox instance generates a new root signing key.
+
+# Export the certificate using @arvbox root-cert@
+# Go to the certificate manager in your browser.
+#* In Chrome, this can be found under "Settings &rarr; Advanced &rarr; Manage Certificates" or by entering @chrome://settings/certificates@ in the URL bar.
+#* In Firefox, this can be found under "Preferences &rarr; Privacy & Security" or entering @about:preferences#privacy@ in the URL bar and then choosing "View Certificates...".
+# Select the "Authorities" tab, then press the "Import" button.  Choose @arvbox-root-cert.pem@
+
+The certificate will be added under the "Arvados testing" organization as "arvbox testing root CA".
+
+To access your Arvbox instance using command line clients (such as arv-get and arv-put) without security errors, install the certificate into the OS certificate storage (instructions for Debian/Ubuntu):
+
+# copy @arvbox-root-cert.pem@ to @/usr/local/share/ca-certificates/@
+# run @/usr/sbin/update-ca-certificates@
+
 h2. Configs
 
 h3. dev
index be7980ae7f77ee8e62f1b6ed69c2ae7fc42ea870..ac7ff14cc2539ff7c1305fc7df393c7e36d0a795 100644 (file)
@@ -341,6 +341,10 @@ func (az *azureInstanceSet) Create(
        az.stopWg.Add(1)
        defer az.stopWg.Done()
 
+       if instanceType.AddedScratch > 0 {
+               return nil, fmt.Errorf("cannot create instance type %q: driver does not implement non-zero AddedScratch (%d)", instanceType.Name, instanceType.AddedScratch)
+       }
+
        name, err := randutil.String(15, "abcdefghijklmnopqrstuvwxyz0123456789")
        if err != nil {
                return nil, err
@@ -645,14 +649,17 @@ func (ai *azureInstance) Destroy() error {
 }
 
 func (ai *azureInstance) Address() string {
-       if ai.nic.IPConfigurations != nil &&
-               len(*ai.nic.IPConfigurations) > 0 &&
-               (*ai.nic.IPConfigurations)[0].InterfaceIPConfigurationPropertiesFormat != nil &&
-               (*ai.nic.IPConfigurations)[0].InterfaceIPConfigurationPropertiesFormat.PrivateIPAddress != nil {
-
-               return *(*ai.nic.IPConfigurations)[0].PrivateIPAddress
+       if iprops := ai.nic.InterfacePropertiesFormat; iprops == nil {
+               return ""
+       } else if ipconfs := iprops.IPConfigurations; ipconfs == nil || len(*ipconfs) == 0 {
+               return ""
+       } else if ipconfprops := (*ipconfs)[0].InterfaceIPConfigurationPropertiesFormat; ipconfprops == nil {
+               return ""
+       } else if addr := ipconfprops.PrivateIPAddress; addr == nil {
+               return ""
+       } else {
+               return *addr
        }
-       return ""
 }
 
 func (ai *azureInstance) RemoteUser() string {
index c1d4657ba47b7801b63bad0222e4a2df71f7881d..f0268091bedb58f412d4e93ba675481d99f5e3ef 100644 (file)
@@ -14,6 +14,6 @@ import (
 
 var Command cmd.Handler = service.Command(arvados.ServiceNameController, newHandler)
 
-func newHandler(_ context.Context, cluster *arvados.Cluster, np *arvados.NodeProfile) service.Handler {
+func newHandler(_ context.Context, cluster *arvados.Cluster, np *arvados.NodeProfile, _ string) service.Handler {
        return &Handler{Cluster: cluster, NodeProfile: np}
 }
index dfe60d90a5f3119909658149b1017f3b782515f3..96110ea85859b05b362f849475a9d77c91919752 100644 (file)
@@ -50,7 +50,7 @@ func (s *HandlerSuite) SetUpTest(c *check.C) {
                },
        }
        node := s.cluster.NodeProfiles["*"]
-       s.handler = newHandler(s.ctx, s.cluster, &node)
+       s.handler = newHandler(s.ctx, s.cluster, &node, "")
 }
 
 func (s *HandlerSuite) TearDownTest(c *check.C) {
index c01c152352e6b8f101179bf38add3b0574a00c5d..c0b94c2b5f76d604e738c2d9bc43d3a01f8bf5dc 100644 (file)
@@ -32,6 +32,7 @@ var dropHeaders = map[string]bool{
        "Keep-Alive":          true,
        "Proxy-Authenticate":  true,
        "Proxy-Authorization": true,
+       // this line makes gofmt 1.10 and 1.11 agree
        "TE":                true,
        "Trailer":           true,
        "Transfer-Encoding": true, // *-Encoding headers interfer with Go's automatic compression/decompression
index 8afe828196d9ea029e2f66a411b9e9f40225efee..028083fa0d1a23442f527b24f8ce95aacff660f4 100644 (file)
@@ -256,8 +256,13 @@ func (r *Reporter) doMemoryStats() {
        }
        var outstat bytes.Buffer
        for _, key := range wantStats {
-               if val, ok := thisSample.memStat[key]; ok {
-                       outstat.WriteString(fmt.Sprintf(" %d %s", val, key))
+               // Use "total_X" stats (entire hierarchy) if enabled,
+               // otherwise just the single cgroup -- see
+               // https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt
+               if val, ok := thisSample.memStat["total_"+key]; ok {
+                       fmt.Fprintf(&outstat, " %d %s", val, key)
+               } else if val, ok := thisSample.memStat[key]; ok {
+                       fmt.Fprintf(&outstat, " %d %s", val, key)
                }
        }
        r.Logger.Printf("mem%s\n", outstat.String())
index 7231e839475639c2aa5e6c720091c15b4d4b5ed7..22ceb8aebe787ae79c1274cc0c714bc39df04640 100644 (file)
@@ -6,6 +6,7 @@ package dispatchcloud
 
 import (
        "context"
+       "fmt"
 
        "git.curoverse.com/arvados.git/lib/cmd"
        "git.curoverse.com/arvados.git/lib/service"
@@ -14,8 +15,17 @@ import (
 
 var Command cmd.Handler = service.Command(arvados.ServiceNameDispatchCloud, newHandler)
 
-func newHandler(ctx context.Context, cluster *arvados.Cluster, _ *arvados.NodeProfile) service.Handler {
-       d := &dispatcher{Cluster: cluster, Context: ctx}
+func newHandler(ctx context.Context, cluster *arvados.Cluster, np *arvados.NodeProfile, token string) service.Handler {
+       ac, err := arvados.NewClientFromConfig(cluster)
+       if err != nil {
+               return service.ErrorHandler(ctx, cluster, np, fmt.Errorf("error initializing client from cluster config: %s", err))
+       }
+       d := &dispatcher{
+               Cluster:   cluster,
+               Context:   ctx,
+               ArvClient: ac,
+               AuthToken: token,
+       }
        go d.Start()
        return d
 }
index 9245d5de30928e038da47172dd526cb6d5ca3b8a..71ff9c784e958fa7927cb3ca57214593d74eecd7 100644 (file)
@@ -46,6 +46,8 @@ type pool interface {
 type dispatcher struct {
        Cluster       *arvados.Cluster
        Context       context.Context
+       ArvClient     *arvados.Client
+       AuthToken     string
        InstanceSetID cloud.InstanceSetID
 
        logger      logrus.FieldLogger
@@ -108,19 +110,21 @@ func (disp *dispatcher) setup() {
 }
 
 func (disp *dispatcher) initialize() {
-       arvClient := arvados.NewClientFromEnv()
+       disp.logger = ctxlog.FromContext(disp.Context)
+
+       disp.ArvClient.AuthToken = disp.AuthToken
+
        if disp.InstanceSetID == "" {
-               if strings.HasPrefix(arvClient.AuthToken, "v2/") {
-                       disp.InstanceSetID = cloud.InstanceSetID(strings.Split(arvClient.AuthToken, "/")[1])
+               if strings.HasPrefix(disp.AuthToken, "v2/") {
+                       disp.InstanceSetID = cloud.InstanceSetID(strings.Split(disp.AuthToken, "/")[1])
                } else {
                        // Use some other string unique to this token
                        // that doesn't reveal the token itself.
-                       disp.InstanceSetID = cloud.InstanceSetID(fmt.Sprintf("%x", md5.Sum([]byte(arvClient.AuthToken))))
+                       disp.InstanceSetID = cloud.InstanceSetID(fmt.Sprintf("%x", md5.Sum([]byte(disp.AuthToken))))
                }
        }
        disp.stop = make(chan struct{}, 1)
        disp.stopped = make(chan struct{})
-       disp.logger = ctxlog.FromContext(disp.Context)
 
        if key, err := ssh.ParsePrivateKey([]byte(disp.Cluster.Dispatch.PrivateKey)); err != nil {
                disp.logger.Fatalf("error parsing configured Dispatch.PrivateKey: %s", err)
@@ -134,8 +138,8 @@ func (disp *dispatcher) initialize() {
        }
        disp.instanceSet = instanceSet
        disp.reg = prometheus.NewRegistry()
-       disp.pool = worker.NewPool(disp.logger, arvClient, disp.reg, disp.instanceSet, disp.newExecutor, disp.sshKey.PublicKey(), disp.Cluster)
-       disp.queue = container.NewQueue(disp.logger, disp.reg, disp.typeChooser, arvClient)
+       disp.pool = worker.NewPool(disp.logger, disp.ArvClient, disp.reg, disp.instanceSet, disp.newExecutor, disp.sshKey.PublicKey(), disp.Cluster)
+       disp.queue = container.NewQueue(disp.logger, disp.reg, disp.typeChooser, disp.ArvClient)
 
        if disp.Cluster.ManagementToken == "" {
                disp.httpHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
index b0033353c2e822d1e6883f7d72d117ac8474e84f..00157b75c649226880898c802973e9cd03a82173 100644 (file)
@@ -17,6 +17,7 @@ import (
 
        "git.curoverse.com/arvados.git/lib/dispatchcloud/test"
        "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        "git.curoverse.com/arvados.git/sdk/go/ctxlog"
        "golang.org/x/crypto/ssh"
        check "gopkg.in/check.v1"
@@ -81,10 +82,19 @@ func (s *DispatcherSuite) SetUpTest(c *check.C) {
                                DispatchCloud: arvados.SystemServiceInstance{Listen: ":"},
                        },
                },
+               Services: arvados.Services{
+                       Controller: arvados.Service{ExternalURL: arvados.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")}},
+               },
        }
+
+       arvClient, err := arvados.NewClientFromConfig(s.cluster)
+       c.Check(err, check.IsNil)
+
        s.disp = &dispatcher{
-               Cluster: s.cluster,
-               Context: s.ctx,
+               Cluster:   s.cluster,
+               Context:   s.ctx,
+               ArvClient: arvClient,
+               AuthToken: arvadostest.AdminToken,
        }
        // Test cases can modify s.cluster before calling
        // initialize(), and then modify private state before calling
index 02346a97076d7168869266c8078028a667b39f81..873d987327eafed2a53f6d63f0dcc17230dbeb0d 100644 (file)
@@ -245,7 +245,7 @@ func (svm *StubVM) Exec(env map[string]string, command string, stdin io.Reader,
                }
                for _, name := range []string{"ARVADOS_API_HOST", "ARVADOS_API_TOKEN"} {
                        if stdinKV[name] == "" {
-                               fmt.Fprintf(stderr, "%s env var missing from stdin %q\n", name, stdin)
+                               fmt.Fprintf(stderr, "%s env var missing from stdin %q\n", name, stdinData)
                                return 1
                        }
                }
index d99af0eea15428054fd5adc16596ca89b1de7820..e853da943222aa2182b01f41d12ebb3cbec5193a 100644 (file)
@@ -11,6 +11,7 @@ import (
        "fmt"
        "io"
        "net/http"
+       "net/url"
        "os"
 
        "git.curoverse.com/arvados.git/lib/cmd"
@@ -26,11 +27,12 @@ type Handler interface {
        CheckHealth() error
 }
 
-type NewHandlerFunc func(context.Context, *arvados.Cluster, *arvados.NodeProfile) Handler
+type NewHandlerFunc func(_ context.Context, _ *arvados.Cluster, _ *arvados.NodeProfile, token string) Handler
 
 type command struct {
        newHandler NewHandlerFunc
        svcName    arvados.ServiceName
+       ctx        context.Context // enables tests to shutdown service; no public API yet
 }
 
 // Command returns a cmd.Handler that loads site config, calls
@@ -43,6 +45,7 @@ func Command(svcName arvados.ServiceName, newHandler NewHandlerFunc) cmd.Handler
        return &command{
                newHandler: newHandler,
                svcName:    svcName,
+               ctx:        context.Background(),
        }
 }
 
@@ -77,7 +80,8 @@ func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout
        log = ctxlog.New(stderr, cluster.Logging.Format, cluster.Logging.Level).WithFields(logrus.Fields{
                "PID": os.Getpid(),
        })
-       ctx := ctxlog.Context(context.Background(), log)
+       ctx := ctxlog.Context(c.ctx, log)
+
        profileName := *nodeProfile
        if profileName == "" {
                profileName = os.Getenv("ARVADOS_NODE_PROFILE")
@@ -91,7 +95,25 @@ func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout
                err = fmt.Errorf("configuration does not enable the %s service on this host", c.svcName)
                return 1
        }
-       handler := c.newHandler(ctx, cluster, profile)
+
+       if cluster.SystemRootToken == "" {
+               log.Warn("SystemRootToken missing from cluster config, falling back to ARVADOS_API_TOKEN environment variable")
+               cluster.SystemRootToken = os.Getenv("ARVADOS_API_TOKEN")
+       }
+       if cluster.Services.Controller.ExternalURL.Host == "" {
+               log.Warn("Services.Controller.ExternalURL missing from cluster config, falling back to ARVADOS_API_HOST(_INSECURE) environment variables")
+               u, err := url.Parse("https://" + os.Getenv("ARVADOS_API_HOST"))
+               if err != nil {
+                       err = fmt.Errorf("ARVADOS_API_HOST: %s", err)
+                       return 1
+               }
+               cluster.Services.Controller.ExternalURL = arvados.URL(*u)
+               if i := os.Getenv("ARVADOS_API_HOST_INSECURE"); i != "" && i != "0" {
+                       cluster.TLS.Insecure = true
+               }
+       }
+
+       handler := c.newHandler(ctx, cluster, profile, cluster.SystemRootToken)
        if err = handler.CheckHealth(); err != nil {
                return 1
        }
@@ -112,6 +134,10 @@ func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout
        if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
                log.WithError(err).Errorf("error notifying init daemon")
        }
+       go func() {
+               <-ctx.Done()
+               srv.Close()
+       }()
        err = srv.Wait()
        if err != nil {
                return 1
diff --git a/lib/service/cmd_test.go b/lib/service/cmd_test.go
new file mode 100644 (file)
index 0000000..62960dc
--- /dev/null
@@ -0,0 +1,78 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+// package service provides a cmd.Handler that brings up a system service.
+package service
+
+import (
+       "bytes"
+       "context"
+       "fmt"
+       "io/ioutil"
+       "net/http"
+       "os"
+       "testing"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+       check "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+var _ = check.Suite(&Suite{})
+
+type Suite struct{}
+
+func (*Suite) TestCommand(c *check.C) {
+       cf, err := ioutil.TempFile("", "cmd_test.")
+       c.Assert(err, check.IsNil)
+       defer os.Remove(cf.Name())
+       defer cf.Close()
+       fmt.Fprintf(cf, "Clusters:\n zzzzz:\n  SystemRootToken: abcde\n  NodeProfiles: {\"*\": {\"arvados-controller\": {Listen: \":1234\"}}}")
+
+       healthCheck := make(chan bool, 1)
+       ctx, cancel := context.WithCancel(context.Background())
+       defer cancel()
+
+       cmd := Command(arvados.ServiceNameController, func(ctx context.Context, _ *arvados.Cluster, _ *arvados.NodeProfile, token string) Handler {
+               c.Check(ctx.Value("foo"), check.Equals, "bar")
+               c.Check(token, check.Equals, "abcde")
+               return &testHandler{ctx: ctx, healthCheck: healthCheck}
+       })
+       cmd.(*command).ctx = context.WithValue(ctx, "foo", "bar")
+
+       done := make(chan bool)
+       var stdin, stdout, stderr bytes.Buffer
+
+       go func() {
+               cmd.RunCommand("arvados-controller", []string{"-config", cf.Name()}, &stdin, &stdout, &stderr)
+               close(done)
+       }()
+       select {
+       case <-healthCheck:
+       case <-done:
+               c.Error("command exited without health check")
+       }
+       cancel()
+       c.Check(stdout.String(), check.Equals, "")
+       c.Check(stderr.String(), check.Matches, `(?ms).*"msg":"CheckHealth called".*`)
+}
+
+type testHandler struct {
+       ctx         context.Context
+       healthCheck chan bool
+}
+
+func (th *testHandler) ServeHTTP(http.ResponseWriter, *http.Request) {}
+func (th *testHandler) CheckHealth() error {
+       ctxlog.FromContext(th.ctx).Info("CheckHealth called")
+       select {
+       case th.healthCheck <- true:
+       default:
+       }
+       return nil
+}
diff --git a/lib/service/error.go b/lib/service/error.go
new file mode 100644 (file)
index 0000000..8955210
--- /dev/null
@@ -0,0 +1,38 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package service
+
+import (
+       "context"
+       "net/http"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+       "github.com/sirupsen/logrus"
+)
+
+// ErrorHandler returns a Handler that reports itself as unhealthy and
+// responds 500 to all requests.  ErrorHandler itself logs the given
+// error once, and the handler logs it again for each incoming
+// request.
+func ErrorHandler(ctx context.Context, _ *arvados.Cluster, _ *arvados.NodeProfile, err error) Handler {
+       logger := ctxlog.FromContext(ctx)
+       logger.WithError(err).Error("unhealthy service")
+       return errorHandler{err, logger}
+}
+
+type errorHandler struct {
+       err    error
+       logger logrus.FieldLogger
+}
+
+func (eh errorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+       eh.logger.WithError(eh.err).Error("unhealthy service")
+       http.Error(w, "", http.StatusInternalServerError)
+}
+
+func (eh errorHandler) CheckHealth() error {
+       return eh.err
+}
index 03f792c5e1b111c47f8d7acf33c7e544172d7579..f377d7348275bb86f5b2f50013a2bfa34133d500 100644 (file)
@@ -24,6 +24,9 @@ requirements:
       ARVADOS_API_TOKEN: $(inputs.arvados_api_token)
       ARVADOS_API_HOST_INSECURE: $(""+inputs.arvado_api_host_insecure)
   InlineJavascriptRequirement: {}
+hints:
+  DockerRequirement:
+    dockerPull: arvados/jobs
 inputs:
   arvados_api_token: string
   arvado_api_host_insecure: boolean
@@ -45,4 +48,4 @@ outputs:
     type: boolean
     outputBinding:
       outputEval: $(true)
-baseCommand: python2
\ No newline at end of file
+baseCommand: python
index 787e01ab8f7dc8be892e7c754bca4a29cba84b13..cbc2ca72f035f150fce46613fa015d299a9bbd7b 100644 (file)
@@ -69,6 +69,21 @@ var InsecureHTTPClient = &http.Client{
 var DefaultSecureClient = &http.Client{
        Timeout: 5 * time.Minute}
 
+// NewClientFromConfig creates a new Client that uses the endpoints in
+// the given cluster.
+//
+// AuthToken is left empty for the caller to populate.
+func NewClientFromConfig(cluster *Cluster) (*Client, error) {
+       ctrlURL := cluster.Services.Controller.ExternalURL
+       if ctrlURL.Host == "" {
+               return nil, fmt.Errorf("no host in config Services.Controller.ExternalURL: %v", ctrlURL)
+       }
+       return &Client{
+               APIHost:  ctrlURL.Host,
+               Insecure: cluster.TLS.Insecure,
+       }, nil
+}
+
 // NewClientFromEnv creates a new Client that uses the default HTTP
 // client with the API endpoint and credentials given by the
 // ARVADOS_API_* environment variables.
index 7c87ff0293052762641019f29d7ff442aa09e75d..2965d5ecb0dc8aa89da2354eea231464d9fa202f 100644 (file)
@@ -8,6 +8,7 @@ import (
        "encoding/json"
        "errors"
        "fmt"
+       "net/url"
        "os"
 
        "git.curoverse.com/arvados.git/sdk/go/config"
@@ -58,6 +59,8 @@ type RequestLimits struct {
 type Cluster struct {
        ClusterID          string `json:"-"`
        ManagementToken    string
+       SystemRootToken    string
+       Services           Services
        NodeProfiles       map[string]NodeProfile
        InstanceTypes      InstanceTypeMap
        CloudVMs           CloudVMs
@@ -67,8 +70,43 @@ type Cluster struct {
        PostgreSQL         PostgreSQL
        RequestLimits      RequestLimits
        Logging            Logging
+       TLS                TLS
 }
 
+type Services struct {
+       Controller    Service
+       DispatchCloud Service
+       Health        Service
+       Keepbalance   Service
+       Keepproxy     Service
+       Keepstore     Service
+       Keepweb       Service
+       Nodemanager   Service
+       RailsAPI      Service
+       Websocket     Service
+       Workbench     Service
+}
+
+type Service struct {
+       InternalURLs map[URL]ServiceInstance
+       ExternalURL  URL
+}
+
+// URL is a url.URL that is also usable as a JSON key/value.
+type URL url.URL
+
+// UnmarshalText implements encoding.TextUnmarshaler so URL can be
+// used as a JSON key/value.
+func (su *URL) UnmarshalText(text []byte) error {
+       u, err := url.Parse(string(text))
+       if err == nil {
+               *su = URL(*u)
+       }
+       return err
+}
+
+type ServiceInstance struct{}
+
 type Logging struct {
        Level  string
        Format string
@@ -309,3 +347,9 @@ type SystemServiceInstance struct {
        TLS      bool
        Insecure bool
 }
+
+type TLS struct {
+       Certificate string
+       Key         string
+       Insecure    bool
+}
index 130d8c964df2fdbc9931394049feb1bcf717dafd..1ef3b00c665e89c61aaa7853c7b0b455c944259a 100644 (file)
@@ -12,6 +12,10 @@ http {
     '"$http_referer" "$http_user_agent"';
   access_log "{{ACCESSLOG}}" customlog;
   client_body_temp_path "{{TMPDIR}}";
+  proxy_temp_path "{{TMPDIR}}";
+  fastcgi_temp_path "{{TMPDIR}}";
+  uwsgi_temp_path "{{TMPDIR}}";
+  scgi_temp_path "{{TMPDIR}}";
   upstream arv-git-http {
     server localhost:{{GITPORT}};
   }
index 7b1f6059aeef07b8ff2a2d03a6d4980f9d5a835f..6687ca491a769140aa8c803a5fd2b1a6ce3b1850 100644 (file)
@@ -582,6 +582,7 @@ def stop_keep(num_servers=2):
 
 def run_keep_proxy():
     if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
+        os.environ["ARVADOS_KEEP_SERVICES"] = "http://localhost:{}".format(_getport('keepproxy'))
         return
     stop_keep_proxy()
 
@@ -738,7 +739,7 @@ def _getport(program):
 def _dbconfig(key):
     global _cached_db_config
     if not _cached_db_config:
-        _cached_db_config = yaml.load(open(os.path.join(
+        _cached_db_config = yaml.safe_load(open(os.path.join(
             SERVICES_SRC_DIR, 'api', 'config', 'database.yml')))
     return _cached_db_config['test'][key]
 
@@ -750,7 +751,7 @@ def _apiconfig(key):
         fullpath = os.path.join(SERVICES_SRC_DIR, 'api', 'config', f)
         if not required and not os.path.exists(fullpath):
             return {}
-        return yaml.load(fullpath)
+        return yaml.safe_load(fullpath)
     cdefault = _load('application.default.yml')
     csite = _load('application.yml', required=False)
     _cached_config = {}
@@ -769,7 +770,7 @@ def fixture(fix):
           yaml_file = yaml_file[0:trim_index]
         except ValueError:
           pass
-        return yaml.load(yaml_file)
+        return yaml.safe_load(yaml_file)
 
 def auth_token(token_name):
     return fixture("api_client_authorizations")[token_name]["api_token"]
index 01a52a5e6681ec07daaf16eb0c0c18a9b7ba2ada..540e06c6c6a0d571e7a269e5eae7c9e8a1989419 100644 (file)
@@ -859,7 +859,7 @@ class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
             if not mandatory and not os.path.exists(path):
                 continue
             with open(path) as f:
-                rails_config = yaml.load(f.read())
+                rails_config = yaml.safe_load(f.read())
                 for config_section in ['test', 'common']:
                     try:
                         key = rails_config[config_section]["blob_signing_key"]
index 80876888cf53eb512085dd88641b3388cfe0c814..2ec90050ae74ee88158d19d8a68581f17e3a9bd9 100644 (file)
@@ -15,6 +15,7 @@ test:
   adapter: postgresql
   template: template0
   encoding: utf8
+  collation: en_US.utf8
   database: arvados_test
   username: arvados
   password: xxxxxxxx
@@ -28,7 +29,4 @@ production:
   username: arvados
   password: xxxxxxxx
   host: localhost
-  # For the websockets server, prefer a larger database connection pool size since it
-  # multithreaded and can serve a large number of long-lived clients.  See also
-  # websocket_max_connections configuration option.
   pool: 50
index 0656cbf89ad34661d08bb467c96b2be152a7908f..88cd221cbf8aaed87e4f91a6289e8dd02458cd90 100644 (file)
@@ -52,7 +52,7 @@ func (s *GitoliteSuite) SetUpTest(c *check.C) {
                        APIHost:  arvadostest.APIHost(),
                        Insecure: true,
                },
-               Listen:       ":0",
+               Listen:       "localhost:0",
                GitCommand:   "/usr/share/gitolite3/gitolite-shell",
                GitoliteHome: s.gitoliteHome,
                RepoRoot:     s.tmpRepoRoot,
index 10c69eedd3bf2e2c81cb51ea7c92961d108f1204..53b636dc0e577e75bf5577e66a54059628be8774 100644 (file)
@@ -77,7 +77,7 @@ func (s *IntegrationSuite) SetUpTest(c *check.C) {
                                APIHost:  arvadostest.APIHost(),
                                Insecure: true,
                        },
-                       Listen:          ":0",
+                       Listen:          "localhost:0",
                        GitCommand:      "/usr/bin/git",
                        RepoRoot:        s.tmpRepoRoot,
                        ManagementToken: arvadostest.ManagementToken,
index dcd54e8968e930f1cdb390aa9b0e5c40182c3bdb..ae09c52f213f5d17f94445b9ad3c77cea9a21e99 100644 (file)
@@ -85,14 +85,15 @@ func doMain() error {
        }
        arv.Retries = 25
 
+       ctx, cancel := context.WithCancel(context.Background())
+
        dispatcher := dispatch.Dispatcher{
                Logger:       logger,
                Arv:          arv,
-               RunContainer: run,
+               RunContainer: (&LocalRun{startFunc, make(chan bool, 8), ctx}).run,
                PollPeriod:   time.Duration(*pollInterval) * time.Second,
        }
 
-       ctx, cancel := context.WithCancel(context.Background())
        err = dispatcher.Run(ctx)
        if err != nil {
                return err
@@ -123,7 +124,11 @@ func startFunc(container arvados.Container, cmd *exec.Cmd) error {
        return cmd.Start()
 }
 
-var startCmd = startFunc
+type LocalRun struct {
+       startCmd         func(container arvados.Container, cmd *exec.Cmd) error
+       concurrencyLimit chan bool
+       ctx              context.Context
+}
 
 // Run a container.
 //
@@ -133,14 +138,36 @@ var startCmd = startFunc
 //
 // If the container is in any other state, or is not Complete/Cancelled after
 // crunch-run terminates, mark the container as Cancelled.
-func run(dispatcher *dispatch.Dispatcher,
+func (lr *LocalRun) run(dispatcher *dispatch.Dispatcher,
        container arvados.Container,
        status <-chan arvados.Container) {
 
        uuid := container.UUID
 
        if container.State == dispatch.Locked {
+
+               select {
+               case lr.concurrencyLimit <- true:
+                       break
+               case <-lr.ctx.Done():
+                       return
+               }
+
+               defer func() { <-lr.concurrencyLimit }()
+
+               select {
+               case c := <-status:
+                       // Check for state updates after possibly
+                       // waiting to be ready-to-run
+                       if c.Priority == 0 {
+                               goto Finish
+                       }
+               default:
+                       break
+               }
+
                waitGroup.Add(1)
+               defer waitGroup.Done()
 
                cmd := exec.Command(*crunchRunCommand, uuid)
                cmd.Stdin = nil
@@ -153,7 +180,7 @@ func run(dispatcher *dispatch.Dispatcher,
                // succeed in starting crunch-run.
 
                runningCmdsMutex.Lock()
-               if err := startCmd(container, cmd); err != nil {
+               if err := lr.startCmd(container, cmd); err != nil {
                        runningCmdsMutex.Unlock()
                        dispatcher.Logger.Warnf("error starting %q for %s: %s", *crunchRunCommand, uuid, err)
                        dispatcher.UpdateState(uuid, dispatch.Cancelled)
@@ -194,9 +221,10 @@ func run(dispatcher *dispatch.Dispatcher,
                        delete(runningCmds, uuid)
                        runningCmdsMutex.Unlock()
                }
-               waitGroup.Done()
        }
 
+Finish:
+
        // If the container is not finalized, then change it to "Cancelled".
        err := dispatcher.Arv.Get("containers", uuid, nil, &container)
        if err != nil {
index 6bae1f40997a8a824284390a18c2da8df8568cdb..41357403f0a01c9092e2ee7503e13943ba4c2cd3 100644 (file)
@@ -73,18 +73,19 @@ func (s *TestSuite) TestIntegration(c *C) {
        dispatcher := dispatch.Dispatcher{
                Arv:        arv,
                PollPeriod: time.Second,
-               RunContainer: func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) {
-                       run(d, c, s)
-                       cancel()
-               },
        }
 
-       startCmd = func(container arvados.Container, cmd *exec.Cmd) error {
+       startCmd := func(container arvados.Container, cmd *exec.Cmd) error {
                dispatcher.UpdateState(container.UUID, "Running")
                dispatcher.UpdateState(container.UUID, "Complete")
                return cmd.Start()
        }
 
+       dispatcher.RunContainer = func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) {
+               (&LocalRun{startCmd, make(chan bool, 8), ctx}).run(d, c, s)
+               cancel()
+       }
+
        err = dispatcher.Run(ctx)
        c.Assert(err, Equals, context.Canceled)
 
@@ -175,18 +176,19 @@ func testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubRespon
        dispatcher := dispatch.Dispatcher{
                Arv:        arv,
                PollPeriod: time.Second / 20,
-               RunContainer: func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) {
-                       run(d, c, s)
-                       cancel()
-               },
        }
 
-       startCmd = func(container arvados.Container, cmd *exec.Cmd) error {
+       startCmd := func(container arvados.Container, cmd *exec.Cmd) error {
                dispatcher.UpdateState(container.UUID, "Running")
                dispatcher.UpdateState(container.UUID, "Complete")
                return cmd.Start()
        }
 
+       dispatcher.RunContainer = func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) {
+               (&LocalRun{startCmd, make(chan bool, 8), ctx}).run(d, c, s)
+               cancel()
+       }
+
        re := regexp.MustCompile(`(?ms).*` + expected + `.*`)
        go func() {
                for i := 0; i < 80 && !re.MatchString(buf.String()); i++ {
index 3925b0b7b1f810c9c451c7e756693ba5875bc252..84b578a3e21ee6a1b9b70f1adf48709154452bb9 100644 (file)
@@ -987,7 +987,7 @@ func (runner *ContainerRunner) AttachStreams() (err error) {
                go func() {
                        _, err := io.Copy(response.Conn, stdinRdr)
                        if err != nil {
-                               runner.CrunchLog.Print("While writing stdin collection to docker container %q", err)
+                               runner.CrunchLog.Printf("While writing stdin collection to docker container %q", err)
                                runner.stop(nil)
                        }
                        stdinRdr.Close()
@@ -997,7 +997,7 @@ func (runner *ContainerRunner) AttachStreams() (err error) {
                go func() {
                        _, err := io.Copy(response.Conn, bytes.NewReader(stdinJson))
                        if err != nil {
-                               runner.CrunchLog.Print("While writing stdin json to docker container %q", err)
+                               runner.CrunchLog.Printf("While writing stdin json to docker container %q", err)
                                runner.stop(nil)
                        }
                        response.CloseWrite()
index 8b689efbdc1f1d731bc2a9dfb106c12e3c214cef..a9830bc1de4715d2cfdaa39049106bcf95cce779 100644 (file)
@@ -266,9 +266,10 @@ func (s *IntegrationSuite) runCurl(c *check.C, token, host, uri string, args ...
        c.Log(fmt.Sprintf("curlArgs == %#v", curlArgs))
        cmd := exec.Command("curl", curlArgs...)
        stdout, err := cmd.StdoutPipe()
-       c.Assert(err, check.Equals, nil)
-       cmd.Stderr = cmd.Stdout
-       go cmd.Start()
+       c.Assert(err, check.IsNil)
+       cmd.Stderr = os.Stderr
+       err = cmd.Start()
+       c.Assert(err, check.IsNil)
        buf := make([]byte, 2<<27)
        n, err := io.ReadFull(stdout, buf)
        // Discard (but measure size of) anything past 128 MiB.
@@ -276,9 +277,9 @@ func (s *IntegrationSuite) runCurl(c *check.C, token, host, uri string, args ...
        if err == io.ErrUnexpectedEOF {
                buf = buf[:n]
        } else {
-               c.Assert(err, check.Equals, nil)
+               c.Assert(err, check.IsNil)
                discarded, err = io.Copy(ioutil.Discard, stdout)
-               c.Assert(err, check.Equals, nil)
+               c.Assert(err, check.IsNil)
        }
        err = cmd.Wait()
        // Without "-f", curl exits 0 as long as it gets a valid HTTP
index fc4783eff9a41f342211fc1aa1e6f67520fc7185..c6fd99b9d8ed2f70b264b342ed041d5062eeb0a8 100644 (file)
@@ -152,7 +152,7 @@ func main() {
                }
                err = f.Sync()
                if err != nil {
-                       log.Fatal("sync(%s): %s", cfg.PIDFile, err)
+                       log.Fatalf("sync(%s): %s", cfg.PIDFile, err)
                }
        }
 
@@ -541,7 +541,7 @@ func (h *proxyHandler) Put(resp http.ResponseWriter, req *http.Request) {
        if locatorIn == "" {
                bytes, err2 := ioutil.ReadAll(req.Body)
                if err2 != nil {
-                       _ = errors.New(fmt.Sprintf("Error reading request body: %s", err2))
+                       err = fmt.Errorf("Error reading request body: %s", err2)
                        status = http.StatusInternalServerError
                        return
                }
index 42d990fa6675a8e6e9c7d8484448254898c7dbfc..420b1528618c10ec4f3b2f2b986060e25dfd2116 100644 (file)
@@ -6,6 +6,6 @@ source 'https://rubygems.org'
 gemspec
 group :test, :performance do
   gem 'minitest', '>= 5.0.0'
-  gem 'mocha', require: false
+  gem 'mocha', '>= 1.5.0', require: false
   gem 'rake'
 end
index b64aab2dc6cb0e189341ab93d175e27d38a659ce..f998a8f35211c89ae81dd89def87d5aef9d46412 100644 (file)
@@ -2,7 +2,7 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-if not File.exists?('/usr/bin/git') then
+if not File.exist?('/usr/bin/git') then
   STDERR.puts "\nGit binary not found, aborting. Please install git and run gem build from a checked out copy of the git repository.\n\n"
   exit
 end
index eb680043e4b50bf3f44bbd28a97bd551a39c12de..e00495c04db7db621ba0bf377cbe62072b82feba 100755 (executable)
@@ -108,7 +108,7 @@ begin
                 "-G", groups.join(","),
                 l[:username],
                 out: devnull)
-        STDERR.puts "Account creation failed for #{l[:username]}: $?"
+        STDERR.puts "Account creation failed for #{l[:username]}: #{$?}"
         next
       end
       begin
@@ -121,13 +121,13 @@ begin
 
     @homedir = pwnam[l[:username]].dir
     userdotssh = File.join(@homedir, ".ssh")
-    Dir.mkdir(userdotssh) if !File.exists?(userdotssh)
+    Dir.mkdir(userdotssh) if !File.exist?(userdotssh)
 
     newkeys = "###\n###\n" + keys[l[:username]].join("\n") + "\n###\n###\n"
 
     keysfile = File.join(userdotssh, "authorized_keys")
 
-    if File.exists?(keysfile)
+    if File.exist?(keysfile)
       oldkeys = IO::read(keysfile)
     else
       oldkeys = ""
index d7fab3c0db8fe202ad979f7ed469db1632ebc685..cf69da6efcc6e94d0b877cf84d738dd0b7386fc5 100644 (file)
@@ -3,19 +3,10 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 require 'etc'
-require 'mocha/mini_test'
+require 'mocha/minitest'
 require 'ostruct'
 
 module Stubs
-  # These Etc mocks help only when we run arvados-login-sync in-process.
-
-  def setup
-    super
-    ENV['ARVADOS_VIRTUAL_MACHINE_UUID'] = 'testvm2.shell'
-    Etc.stubs(:to_enum).with(:passwd).returns stubpasswd.map { |x| OpenStruct.new x }
-    Etc.stubs(:to_enum).with(:group).returns stubgroup.map { |x| OpenStruct.new x }
-  end
-
   def stubpasswd
     [{name: 'root', uid: 0}]
   end
@@ -24,10 +15,16 @@ module Stubs
     [{name: 'root', gid: 0}]
   end
 
-  # These child-ENV tricks help only when we run arvados-login-sync as a subprocess.
 
   def setup
     super
+
+    # These Etc mocks help only when we run arvados-login-sync in-process.
+    ENV['ARVADOS_VIRTUAL_MACHINE_UUID'] = 'testvm2.shell'
+    Etc.stubs(:to_enum).with(:passwd).returns stubpasswd.map { |x| OpenStruct.new x }
+    Etc.stubs(:to_enum).with(:group).returns stubgroup.map { |x| OpenStruct.new x }
+
+    # These child-ENV tricks help only when we run arvados-login-sync as a subprocess.
     @env_was = Hash[ENV]
     @tmpdir = Dir.mktmpdir
   end
index 17942c2cffa784993b4338dc64711e56f5e17028..e90c16d64fae900df698c1db9d0cd6814022604b 100644 (file)
@@ -10,17 +10,14 @@ class TestAddUser < Minitest::Test
   include Stubs
 
   def test_useradd_error
+    valid_groups = %w(docker admin fuse).select { |g| Etc.getgrnam(g) rescue false }
     # binstub_new_user/useradd will exit non-zero because its args
     # won't match any line in this empty file:
     File.open(@tmpdir+'/succeed', 'w') do |f| end
     invoke_sync binstubs: ['new_user']
     spied = File.read(@tmpdir+'/spy')
     assert_match %r{useradd -m -c active -s /bin/bash -G (fuse)? active}, spied
-    # BUG(TC): This assertion succeeds only if docker and fuse groups
-    # exist on the host, but is insensitive to the admin group (groups
-    # are quietly ignored by login-sync if they don't exist on the
-    # current host).
-    assert_match %r{useradd -m -c adminroot -s /bin/bash -G (docker)?(,admin)?(,fuse)? adminroot}, spied
+    assert_match %r{useradd -m -c adminroot -s /bin/bash -G #{valid_groups.join(',')} adminroot}, spied
   end
 
   def test_useradd_success
index 8c443fd71afd3ddb2aee089df9bcb745b1c3315d..74933718c76ac8e0e499f62bf3ede740308ce073 100755 (executable)
@@ -285,6 +285,27 @@ run() {
     fi
 }
 
+update() {
+    CONFIG=$1
+    TAG=$2
+
+    if test -n "$TAG"
+    then
+        if test $(echo $TAG | cut -c1-1) != '-' ; then
+           TAG=":$TAG"
+            shift
+        else
+            unset TAG
+        fi
+    fi
+
+    if echo "$CONFIG" | grep 'demo$' ; then
+       docker pull arvados/arvbox-demo$TAG
+    else
+       docker pull arvados/arvbox-dev$TAG
+    fi
+}
+
 stop() {
     if docker ps -a --filter "status=running" | grep -E "$ARVBOX_CONTAINER$" -q ; then
         docker stop $ARVBOX_CONTAINER
@@ -383,6 +404,13 @@ case "$subcmd" in
         run $@
         ;;
 
+    update)
+        check $@
+        stop
+       update $@
+        run $@
+        ;;
+
     ip)
         getip
         ;;
@@ -487,10 +515,13 @@ case "$subcmd" in
         fi
         ;;
 
-    install-root-cert)
-       set -x
-       sudo cp $VAR_DATA/root-cert.pem /usr/local/share/ca-certificates/${ARVBOX_CONTAINER}-testing-cert.crt
-       sudo update-ca-certificates
+    root-cert)
+       CERT=$PWD/${ARVBOX_CONTAINER}-root-cert.pem
+       if test -n "$1" ; then
+           CERT="$1"
+       fi
+       docker exec $ARVBOX_CONTAINER cat /var/lib/arvados/root-cert.pem > "$CERT"
+       echo "Certificate copied to $CERT"
        ;;
 
     devenv)
@@ -530,17 +561,19 @@ case "$subcmd" in
     *)
         echo "Arvados-in-a-box                      http://arvados.org"
         echo
-        echo "build   <config>      build arvbox Docker image"
-        echo "rebuild <config>      build arvbox Docker image, no layer cache"
         echo "start|run <config> [tag]  start $ARVBOX_CONTAINER container"
-        echo "open       open arvbox workbench in a web browser"
-        echo "shell      enter arvbox shell"
-        echo "ip         print arvbox docker container ip address"
-        echo "host       print arvbox published host"
-        echo "status     print some information about current arvbox"
         echo "stop       stop arvbox container"
         echo "restart <config>  stop, then run again"
-        echo "reboot  <config>  stop, build arvbox Docker image, run"
+        echo "status     print some information about current arvbox"
+        echo "ip         print arvbox docker container ip address"
+        echo "host       print arvbox published host"
+        echo "shell      enter arvbox shell"
+        echo "open       open arvbox workbench in a web browser"
+        echo "root-cert  get copy of root certificate"
+        echo "update  <config> stop, pull latest image, run"
+        echo "build   <config> build arvbox Docker image"
+        echo "reboot  <config> stop, build arvbox Docker image, run"
+        echo "rebuild <config> build arvbox Docker image, no layer cache"
         echo "reset      delete arvbox arvados data (be careful!)"
         echo "destroy    delete all arvbox code and data (be careful!)"
         echo "log <service> tail log of specified service"
index 1949af435bd2de82c3c9e2398ce58fa873477035..741bd33c4998cab201e6e9e60f0c58a69a3414fd 100644 (file)
@@ -16,11 +16,11 @@ RUN apt-get update && \
     pkg-config libattr1-dev python-llfuse python-pycurl \
     libwww-perl libio-socket-ssl-perl libcrypt-ssleay-perl \
     libjson-perl nginx gitolite3 lsof libreadline-dev \
-    apt-transport-https ca-certificates slurm-wlm \
+    apt-transport-https ca-certificates \
     linkchecker python3-virtualenv python-virtualenv xvfb iceweasel \
     libgnutls28-dev python3-dev vim cadaver cython gnupg dirmngr \
     libsecret-1-dev r-base r-cran-testthat libxml2-dev pandoc \
-    python3-setuptools python3-pip openjdk-8-jdk && \
+    python3-setuptools python3-pip openjdk-8-jdk bsdmainutils && \
     apt-get clean
 
 ENV RUBYVERSION_MINOR 2.3
index bb0ff76fe8f065c1be45338f677cf0e7cd99b8ed..22668253e1bf038c2bcbd297bff85233b92ee430 100644 (file)
@@ -12,5 +12,7 @@ RUN echo "development" > /var/lib/arvados/api_rails_env
 RUN echo "development" > /var/lib/arvados/sso_rails_env
 RUN echo "development" > /var/lib/arvados/workbench_rails_env
 
-RUN mkdir /etc/test-service && ln -sf /var/lib/arvbox/service/postgres /etc/test-service
+RUN mkdir /etc/test-service && \
+    ln -sf /var/lib/arvbox/service/postgres /etc/test-service && \
+    ln -sf /var/lib/arvbox/service/certificate /etc/test-service
 RUN mkdir /etc/devenv-service
\ No newline at end of file
index 0f283830f5b4e62fec3f59d761bdfb6704163e4e..482934c9151e295b38182081e3b0f4e6be8bc1a5 100755 (executable)
@@ -18,9 +18,6 @@ fi
 
 set -u
 
-if ! test -s /var/lib/arvados/api_uuid_prefix ; then
-    ruby -e 'puts "#{rand(2**64).to_s(36)[0,5]}"' > /var/lib/arvados/api_uuid_prefix
-fi
 uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
 
 if ! test -s /var/lib/arvados/api_secret_token ; then
deleted file mode 120000 (symlink)
index a388c8b67bf16bbb16601007540e58f1372ebc85..0000000000000000000000000000000000000000
+++ /dev/null
@@ -1 +0,0 @@
-/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
new file mode 100755 (executable)
index 0000000000000000000000000000000000000000..4014c5c8b040316c4850df4d788476854d06527c
--- /dev/null
@@ -0,0 +1,126 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+cat <<EOF >/var/lib/arvados/nginx.conf
+worker_processes auto;
+pid /var/lib/arvados/nginx.pid;
+
+error_log stderr;
+daemon off;
+user arvbox;
+
+events {
+       worker_connections 64;
+}
+
+http {
+     access_log off;
+     include /etc/nginx/mime.types;
+     default_type application/octet-stream;
+     server {
+            listen ${services[doc]} default_server;
+            listen [::]:${services[doc]} default_server;
+            root /usr/src/arvados/doc/.site;
+            index index.html;
+            server_name _;
+     }
+
+  server {
+    listen 80 default_server;
+    server_name _;
+    return 301 https://\$host\$request_uri;
+  }
+
+  upstream controller {
+    server localhost:${services[controller]};
+  }
+  server {
+    listen *:${services[controller-ssl]} ssl default_server;
+    server_name controller;
+    ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
+    ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+    location  / {
+      proxy_pass http://controller;
+      proxy_set_header Host \$http_host;
+      proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
+    }
+  }
+
+upstream arvados-ws {
+  server localhost:${services[websockets]};
+}
+server {
+  listen *:${services[websockets-ssl]} ssl default_server;
+  server_name           websockets;
+
+  proxy_connect_timeout 90s;
+  proxy_read_timeout    300s;
+
+  ssl                   on;
+  ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
+  ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+
+  location / {
+    proxy_pass          http://arvados-ws;
+    proxy_set_header    Upgrade         \$http_upgrade;
+    proxy_set_header    Connection      "upgrade";
+    proxy_set_header Host \$http_host;
+    proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+  }
+}
+
+  upstream workbench2 {
+    server localhost:${services[workbench2]};
+  }
+  server {
+    listen *:${services[workbench2-ssl]} ssl default_server;
+    server_name workbench2;
+    ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
+    ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+    location  / {
+      proxy_pass http://workbench2;
+      proxy_set_header Host \$http_host;
+      proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
+    }
+    location  /sockjs-node {
+      proxy_pass http://workbench2;
+      proxy_set_header    Upgrade         \$http_upgrade;
+      proxy_set_header    Connection      "upgrade";
+      proxy_set_header Host \$http_host;
+      proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+    }
+  }
+
+  upstream keep-web {
+    server localhost:${services[keep-web]};
+  }
+  server {
+    listen *:${services[keep-web-ssl]} ssl default_server;
+    server_name keep-web;
+    ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
+    ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+    location  / {
+      proxy_pass http://keep-web;
+      proxy_set_header Host \$http_host;
+      proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
+    }
+  }
+
+}
+
+EOF
+
+exec nginx -c /var/lib/arvados/nginx.conf
diff --git a/tools/arvbox/lib/arvbox/docker/service/nginx/run-service b/tools/arvbox/lib/arvbox/docker/service/nginx/run-service
deleted file mode 100755 (executable)
index cf72ed2..0000000
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/bin/bash
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-exec 2>&1
-set -ex -o pipefail
-
-. /usr/local/lib/arvbox/common.sh
-
-cat <<EOF >/var/lib/arvados/nginx.conf
-worker_processes auto;
-pid /var/lib/arvados/nginx.pid;
-
-error_log stderr;
-daemon off;
-
-events {
-       worker_connections 64;
-}
-
-http {
-     access_log off;
-     include /etc/nginx/mime.types;
-     default_type application/octet-stream;
-     server {
-            listen ${services[doc]} default_server;
-            listen [::]:${services[doc]} default_server;
-            root /usr/src/arvados/doc/.site;
-            index index.html;
-            server_name _;
-     }
-
-  upstream controller {
-    server localhost:${services[controller]};
-  }
-  server {
-    listen *:${services[controller-ssl]} ssl default_server;
-    server_name controller;
-    ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
-    ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
-    location  / {
-      proxy_pass http://controller;
-      proxy_set_header Host \$http_host;
-      proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
-      proxy_set_header X-Forwarded-Proto https;
-      proxy_redirect off;
-    }
-  }
-
-upstream arvados-ws {
-  server localhost:${services[websockets]};
-}
-server {
-  listen *:${services[websockets-ssl]} ssl default_server;
-  server_name           websockets;
-
-  proxy_connect_timeout 90s;
-  proxy_read_timeout    300s;
-
-  ssl                   on;
-  ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
-  ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
-
-  location / {
-    proxy_pass          http://arvados-ws;
-    proxy_set_header    Upgrade         \$http_upgrade;
-    proxy_set_header    Connection      "upgrade";
-    proxy_set_header Host \$http_host;
-    proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
-  }
-}
-
-  upstream workbench2 {
-    server localhost:${services[workbench2]};
-  }
-  server {
-    listen *:${services[workbench2-ssl]} ssl default_server;
-    server_name workbench2;
-    ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
-    ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
-    location  / {
-      proxy_pass http://workbench2;
-      proxy_set_header Host \$http_host;
-      proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
-      proxy_set_header X-Forwarded-Proto https;
-      proxy_redirect off;
-    }
-    location  /sockjs-node {
-      proxy_pass http://workbench2;
-      proxy_set_header    Upgrade         \$http_upgrade;
-      proxy_set_header    Connection      "upgrade";
-      proxy_set_header Host \$http_host;
-      proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
-    }
-  }
-
-  upstream keep-web {
-    server localhost:${services[keep-web]};
-  }
-  server {
-    listen *:${services[keep-web-ssl]} ssl default_server;
-    server_name keep-web;
-    ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
-    ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
-    location  / {
-      proxy_pass http://keep-web;
-      proxy_set_header Host \$http_host;
-      proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
-      proxy_set_header X-Forwarded-Proto https;
-      proxy_redirect off;
-    }
-  }
-
-}
-
-EOF
-
-exec nginx -c /var/lib/arvados/nginx.conf
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmctld/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/slurmctld/log/main/.gitstub
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmctld/log/run b/tools/arvbox/lib/arvbox/docker/service/slurmctld/log/run
deleted file mode 120000 (symlink)
index d6aef4a..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmctld/run b/tools/arvbox/lib/arvbox/docker/service/slurmctld/run
deleted file mode 100755 (executable)
index bb500a5..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-exec 2>&1
-set -eux -o pipefail
-
-. /usr/local/lib/arvbox/common.sh
-
-cat > /etc/slurm-llnl/slurm.conf  <<EOF
-ControlMachine=$HOSTNAME
-ControlAddr=$HOSTNAME
-AuthType=auth/munge
-DefaultStorageLoc=/var/log/slurm-llnl
-SelectType=select/cons_res
-SelectTypeParameters=CR_CPU_Memory
-SlurmUser=arvbox
-SlurmdUser=arvbox
-SlurmctldPort=7002
-SlurmctldTimeout=300
-SlurmdPort=7003
-SlurmdSpoolDir=/var/tmp/slurmd.spool
-SlurmdTimeout=300
-StateSaveLocation=/var/tmp/slurm.state
-NodeName=$HOSTNAME
-PartitionName=compute State=UP Default=YES Nodes=$HOSTNAME
-EOF
-
-mkdir -p /var/run/munge
-
-/usr/sbin/munged -f
-
-exec /usr/sbin/slurmctld -v -D
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmd/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/slurmd/log/main/.gitstub
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmd/log/run b/tools/arvbox/lib/arvbox/docker/service/slurmd/log/run
deleted file mode 120000 (symlink)
index d6aef4a..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmd/run b/tools/arvbox/lib/arvbox/docker/service/slurmd/run
deleted file mode 100755 (executable)
index 8656b27..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-exec 2>&1
-set -eux -o pipefail
-
-exec /usr/local/lib/arvbox/runsu.sh /usr/sbin/slurmd -v -D
index af49d4b3c0f829618f6572b800b5eb85597fc779..cbd3b2fbef2089dfd21d0b40e57cce7c130f2677 100755 (executable)
@@ -25,10 +25,10 @@ fi
 
 set -u
 
-if ! test -s /var/lib/arvados/sso_uuid_prefix ; then
-  ruby -e 'puts "#{rand(2**64).to_s(36)[0,5]}"' > /var/lib/arvados/sso_uuid_prefix
+if ! test -s /var/lib/arvados/api_uuid_prefix ; then
+  ruby -e 'puts "x#{rand(2**64).to_s(36)[0,4]}"' > /var/lib/arvados/api_uuid_prefix
 fi
-uuid_prefix=$(cat /var/lib/arvados/sso_uuid_prefix)
+uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
 
 if ! test -s /var/lib/arvados/sso_secret_token ; then
   ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/sso_secret_token
index 58f156cb1775dfb9cbab2a25a8f3d5a3d501e472..6bda618ab899e2a8ca1a429bf319f82263995c49 100755 (executable)
@@ -1,8 +1,14 @@
-#!/bin/sh
+#!/bin/bash
 # Copyright (C) The Arvados Authors. All rights reserved.
 #
 # SPDX-License-Identifier: AGPL-3.0
 
+. /usr/local/lib/arvbox/common.sh
+
 while ! psql postgres -c\\du >/dev/null 2>/dev/null ; do
     sleep 1
 done
+
+while ! test -s /var/lib/arvados/server-cert-${localip}.pem ; do
+    sleep 1
+done