Merge branch '18947-githttpd'
[arvados.git] / lib / boot / supervisor.go
index 3484a1444e786cc5f026f0d0a68ada822b79ffb1..94cd5d000023ce4fb3c5689073871f091cb22066 100644 (file)
@@ -14,12 +14,14 @@ import (
        "io"
        "io/ioutil"
        "net"
+       "net/url"
        "os"
        "os/exec"
        "os/signal"
        "os/user"
        "path/filepath"
        "reflect"
+       "strconv"
        "strings"
        "sync"
        "syscall"
@@ -35,95 +37,212 @@ import (
 )
 
 type Supervisor struct {
-       SourcePath           string // e.g., /home/username/src/arvados
-       SourceVersion        string // e.g., acbd1324...
-       ClusterType          string // e.g., production
-       ListenHost           string // e.g., localhost
-       ControllerAddr       string // e.g., 127.0.0.1:8000
+       // Config file location like "/etc/arvados/config.yml", or "-"
+       // to read from Stdin (see below).
+       ConfigPath string
+       // Literal config file (useful for test suites). If non-empty,
+       // this is used instead of ConfigPath.
+       ConfigYAML string
+       // Path to arvados source tree. Only used for dev/test
+       // clusters.
+       SourcePath string
+       // Version number to build into binaries. Only used for
+       // dev/test clusters.
+       SourceVersion string
+       // "production", "development", or "test".
+       ClusterType string
+       // Listening address for external services, and internal
+       // services whose InternalURLs are not explicitly configured.
+       // If blank, listen on the configured controller ExternalURL
+       // host; if that is also blank, listen on all addresses
+       // (0.0.0.0).
+       ListenHost string
+       // Default host:port for controller ExternalURL if not
+       // explicitly configured in config file. If blank, use a
+       // random port on ListenHost.
+       ControllerAddr string
+       // Path to arvados-workbench2 source tree checkout.
+       Workbench2Source     string
+       NoWorkbench1         bool
+       NoWorkbench2         bool
        OwnTemporaryDatabase bool
+       Stdin                io.Reader
        Stderr               io.Writer
 
-       logger  logrus.FieldLogger
-       cluster *arvados.Cluster
+       logger   logrus.FieldLogger
+       cluster  *arvados.Cluster       // nil if this is a multi-cluster supervisor
+       children map[string]*Supervisor // nil if this is a single-cluster supervisor
 
        ctx           context.Context
        cancel        context.CancelFunc
-       done          chan struct{} // closed when child procs/services have shut down
-       err           error         // error that caused shutdown (valid when done is closed)
-       healthChecker *health.Aggregator
+       done          chan struct{}      // closed when child procs/services have shut down
+       err           error              // error that caused shutdown (valid when done is closed)
+       healthChecker *health.Aggregator // nil if this is a multi-cluster supervisor, or still booting
        tasksReady    map[string]chan bool
        waitShutdown  sync.WaitGroup
 
-       tempdir    string
+       bindir     string
+       tempdir    string // in production mode, this is accessible only to root
+       wwwtempdir string // in production mode, this is accessible only to www-data
        configfile string
        environ    []string // for child processes
 }
 
-func (super *Supervisor) Start(ctx context.Context, cfg *arvados.Config, cfgPath string) {
+func (super *Supervisor) Clusters() map[string]*arvados.Cluster {
+       m := map[string]*arvados.Cluster{}
+       if super.cluster != nil {
+               m[super.cluster.ClusterID] = super.cluster
+       }
+       for id, super2 := range super.children {
+               m[id] = super2.Cluster("")
+       }
+       return m
+}
+
+func (super *Supervisor) Cluster(id string) *arvados.Cluster {
+       if super.children != nil {
+               return super.children[id].Cluster(id)
+       } else {
+               return super.cluster
+       }
+}
+
+func (super *Supervisor) Start(ctx context.Context) {
+       super.logger = ctxlog.FromContext(ctx)
        super.ctx, super.cancel = context.WithCancel(ctx)
        super.done = make(chan struct{})
 
+       sigch := make(chan os.Signal)
+       signal.Notify(sigch, syscall.SIGINT, syscall.SIGTERM)
+       defer signal.Stop(sigch)
        go func() {
-               defer close(super.done)
+               for sig := range sigch {
+                       super.logger.WithField("signal", sig).Info("caught signal")
+                       if super.err == nil {
+                               super.err = fmt.Errorf("caught signal %s", sig)
+                       }
+                       super.cancel()
+               }
+       }()
 
-               sigch := make(chan os.Signal)
-               signal.Notify(sigch, syscall.SIGINT, syscall.SIGTERM)
-               defer signal.Stop(sigch)
-               go func() {
-                       for sig := range sigch {
-                               super.logger.WithField("signal", sig).Info("caught signal")
-                               if super.err == nil {
-                                       super.err = fmt.Errorf("caught signal %s", sig)
-                               }
-                               super.cancel()
+       hupch := make(chan os.Signal)
+       signal.Notify(hupch, syscall.SIGHUP)
+       defer signal.Stop(hupch)
+       go func() {
+               for sig := range hupch {
+                       super.logger.WithField("signal", sig).Info("caught signal")
+                       if super.err == nil {
+                               super.err = errNeedConfigReload
                        }
-               }()
+                       super.cancel()
+               }
+       }()
+
+       loaderStdin := super.Stdin
+       if super.ConfigYAML != "" {
+               loaderStdin = bytes.NewBufferString(super.ConfigYAML)
+       }
+       loader := config.NewLoader(loaderStdin, super.logger)
+       loader.SkipLegacy = true
+       loader.SkipAPICalls = true
+       loader.Path = super.ConfigPath
+       if super.ConfigYAML != "" {
+               loader.Path = "-"
+       }
+       cfg, err := loader.Load()
+       if err != nil {
+               super.err = err
+               close(super.done)
+               super.cancel()
+               return
+       }
+
+       if super.ConfigPath != "" && super.ConfigPath != "-" && cfg.AutoReloadConfig {
+               go watchConfig(super.ctx, super.logger, super.ConfigPath, copyConfig(cfg), func() {
+                       if super.err == nil {
+                               super.err = errNeedConfigReload
+                       }
+                       super.cancel()
+               })
+       }
 
-               hupch := make(chan os.Signal)
-               signal.Notify(hupch, syscall.SIGHUP)
-               defer signal.Stop(hupch)
+       if len(cfg.Clusters) > 1 {
+               super.startFederation(cfg)
                go func() {
-                       for sig := range hupch {
-                               super.logger.WithField("signal", sig).Info("caught signal")
+                       defer super.cancel()
+                       defer close(super.done)
+                       for _, super2 := range super.children {
+                               err := super2.Wait()
                                if super.err == nil {
-                                       super.err = errNeedConfigReload
+                                       super.err = err
                                }
-                               super.cancel()
                        }
                }()
-
-               if cfgPath != "" && cfgPath != "-" && cfg.AutoReloadConfig {
-                       go watchConfig(super.ctx, super.logger, cfgPath, copyConfig(cfg), func() {
+       } else {
+               go func() {
+                       defer super.cancel()
+                       defer close(super.done)
+                       super.cluster, super.err = cfg.GetCluster("")
+                       if super.err != nil {
+                               return
+                       }
+                       err := super.runCluster()
+                       if err != nil {
+                               super.logger.WithError(err).Info("supervisor shut down")
                                if super.err == nil {
-                                       super.err = errNeedConfigReload
+                                       super.err = err
                                }
-                               super.cancel()
-                       })
-               }
-
-               err := super.run(cfg)
-               if err != nil {
-                       super.logger.WithError(err).Warn("supervisor shut down")
-                       if super.err == nil {
-                               super.err = err
                        }
-               }
-       }()
+               }()
+       }
 }
 
+// Wait returns when all child processes and goroutines have exited.
 func (super *Supervisor) Wait() error {
        <-super.done
        return super.err
 }
 
-func (super *Supervisor) run(cfg *arvados.Config) error {
-       defer super.cancel()
+// startFederation starts a child Supervisor for each cluster in the
+// given config. Each is a copy of the original/parent with the
+// original config reduced to a single cluster.
+func (super *Supervisor) startFederation(cfg *arvados.Config) {
+       super.children = map[string]*Supervisor{}
+       for id, cc := range cfg.Clusters {
+               super2 := *super
+               yaml, err := json.Marshal(arvados.Config{Clusters: map[string]arvados.Cluster{id: cc}})
+               if err != nil {
+                       panic(fmt.Sprintf("json.Marshal partial config: %s", err))
+               }
+               super2.ConfigYAML = string(yaml)
+               super2.ConfigPath = "-"
+               super2.children = nil
+
+               if super2.ClusterType == "test" {
+                       super2.Stderr = &service.LogPrefixer{
+                               Writer: super.Stderr,
+                               Prefix: []byte("[" + id + "] "),
+                       }
+               }
+               super2.Start(super.ctx)
+               super.children[id] = &super2
+       }
+}
 
+func (super *Supervisor) runCluster() error {
        cwd, err := os.Getwd()
        if err != nil {
                return err
        }
-       if !strings.HasPrefix(super.SourcePath, "/") {
+       if super.ClusterType == "test" && super.SourcePath == "" {
+               // When invoked by test suite, default to current
+               // source tree
+               buf, err := exec.Command("git", "rev-parse", "--show-toplevel").CombinedOutput()
+               if err != nil {
+                       return fmt.Errorf("git rev-parse: %w", err)
+               }
+               super.SourcePath = strings.TrimSuffix(string(buf), "\n")
+       } else if !strings.HasPrefix(super.SourcePath, "/") {
                super.SourcePath = filepath.Join(cwd, super.SourcePath)
        }
        super.SourcePath, err = filepath.EvalSymlinks(super.SourcePath)
@@ -131,27 +250,54 @@ func (super *Supervisor) run(cfg *arvados.Config) error {
                return err
        }
 
-       super.tempdir, err = ioutil.TempDir("", "arvados-server-boot-")
-       if err != nil {
-               return err
+       if super.ListenHost == "" {
+               if urlhost := super.cluster.Services.Controller.ExternalURL.Host; urlhost != "" {
+                       if h, _, _ := net.SplitHostPort(urlhost); h != "" {
+                               super.ListenHost = h
+                       } else {
+                               super.ListenHost = urlhost
+                       }
+               } else {
+                       super.ListenHost = "0.0.0.0"
+               }
        }
-       defer os.RemoveAll(super.tempdir)
-       if err := os.Mkdir(filepath.Join(super.tempdir, "bin"), 0755); err != nil {
-               return err
+
+       // Choose bin and temp dirs: /var/lib/arvados/... in
+       // production, transient tempdir otherwise.
+       if super.ClusterType == "production" {
+               // These dirs have already been created by
+               // "arvados-server install" (or by extracting a
+               // package).
+               super.tempdir = "/var/lib/arvados/tmp"
+               super.wwwtempdir = "/var/lib/arvados/wwwtmp"
+               super.bindir = "/var/lib/arvados/bin"
+       } else {
+               super.tempdir, err = ioutil.TempDir("", "arvados-server-boot-")
+               if err != nil {
+                       return err
+               }
+               defer os.RemoveAll(super.tempdir)
+               super.wwwtempdir = super.tempdir
+               super.bindir = filepath.Join(super.tempdir, "bin")
+               if err := os.Mkdir(super.bindir, 0755); err != nil {
+                       return err
+               }
        }
 
        // Fill in any missing config keys, and write the resulting
        // config in the temp dir for child services to use.
-       err = super.autofillConfig(cfg)
+       err = super.autofillConfig()
        if err != nil {
                return err
        }
-       conffile, err := os.OpenFile(filepath.Join(super.tempdir, "config.yml"), os.O_CREATE|os.O_WRONLY, 0644)
+       conffile, err := os.OpenFile(filepath.Join(super.wwwtempdir, "config.yml"), os.O_CREATE|os.O_WRONLY, 0644)
        if err != nil {
                return err
        }
        defer conffile.Close()
-       err = json.NewEncoder(conffile).Encode(cfg)
+       err = json.NewEncoder(conffile).Encode(arvados.Config{
+               Clusters: map[string]arvados.Cluster{
+                       super.cluster.ClusterID: *super.cluster}})
        if err != nil {
                return err
        }
@@ -166,12 +312,11 @@ func (super *Supervisor) run(cfg *arvados.Config) error {
        super.setEnv("ARVADOS_CONFIG", super.configfile)
        super.setEnv("RAILS_ENV", super.ClusterType)
        super.setEnv("TMPDIR", super.tempdir)
-       super.prependEnv("PATH", super.tempdir+"/bin:/var/lib/arvados/bin:")
-
-       super.cluster, err = cfg.GetCluster("")
-       if err != nil {
-               return err
+       super.prependEnv("PATH", "/var/lib/arvados/bin:")
+       if super.ClusterType != "production" {
+               super.prependEnv("PATH", super.tempdir+"/bin:")
        }
+
        // Now that we have the config, replace the bootstrap logger
        // with a new one according to the logging config.
        loglevel := super.cluster.SystemLogs.LogLevel
@@ -182,16 +327,18 @@ func (super *Supervisor) run(cfg *arvados.Config) error {
                "PID": os.Getpid(),
        })
 
-       if super.SourceVersion == "" {
+       if super.SourceVersion == "" && super.ClusterType == "production" {
+               // don't need SourceVersion
+       } else if super.SourceVersion == "" {
                // Find current source tree version.
                var buf bytes.Buffer
-               err = super.RunProgram(super.ctx, ".", &buf, nil, "git", "diff", "--shortstat")
+               err = super.RunProgram(super.ctx, ".", runOptions{output: &buf}, "git", "diff", "--shortstat")
                if err != nil {
                        return err
                }
                dirty := buf.Len() > 0
                buf.Reset()
-               err = super.RunProgram(super.ctx, ".", &buf, nil, "git", "log", "-n1", "--format=%H")
+               err = super.RunProgram(super.ctx, ".", runOptions{output: &buf}, "git", "log", "-n1", "--format=%H")
                if err != nil {
                        return err
                }
@@ -216,23 +363,32 @@ func (super *Supervisor) run(cfg *arvados.Config) error {
                createCertificates{},
                runPostgreSQL{},
                runNginx{},
-               runServiceCommand{name: "controller", svc: super.cluster.Services.Controller, depends: []supervisedTask{runPostgreSQL{}}},
-               runGoProgram{src: "services/arv-git-httpd", svc: super.cluster.Services.GitHTTP},
+               runServiceCommand{name: "controller", svc: super.cluster.Services.Controller, depends: []supervisedTask{seedDatabase{}}},
+               runServiceCommand{name: "git-httpd", svc: super.cluster.Services.GitHTTP},
                runGoProgram{src: "services/health", svc: super.cluster.Services.Health},
-               runGoProgram{src: "services/keepproxy", svc: super.cluster.Services.Keepproxy, depends: []supervisedTask{runPassenger{src: "services/api"}}},
-               runGoProgram{src: "services/keepstore", svc: super.cluster.Services.Keepstore},
+               runServiceCommand{name: "keepproxy", svc: super.cluster.Services.Keepproxy, depends: []supervisedTask{runPassenger{src: "services/api"}}},
+               runServiceCommand{name: "keepstore", svc: super.cluster.Services.Keepstore},
                runGoProgram{src: "services/keep-web", svc: super.cluster.Services.WebDAV},
-               runServiceCommand{name: "ws", svc: super.cluster.Services.Websocket, depends: []supervisedTask{runPostgreSQL{}}},
+               runServiceCommand{name: "ws", svc: super.cluster.Services.Websocket, depends: []supervisedTask{seedDatabase{}}},
                installPassenger{src: "services/api"},
-               runPassenger{src: "services/api", svc: super.cluster.Services.RailsAPI, depends: []supervisedTask{createCertificates{}, runPostgreSQL{}, installPassenger{src: "services/api"}}},
-               installPassenger{src: "apps/workbench", depends: []supervisedTask{installPassenger{src: "services/api"}}}, // dependency ensures workbench doesn't delay api startup
-               runPassenger{src: "apps/workbench", svc: super.cluster.Services.Workbench1, depends: []supervisedTask{installPassenger{src: "apps/workbench"}}},
+               runPassenger{src: "services/api", varlibdir: "railsapi", svc: super.cluster.Services.RailsAPI, depends: []supervisedTask{createCertificates{}, seedDatabase{}, installPassenger{src: "services/api"}}},
                seedDatabase{},
        }
+       if !super.NoWorkbench1 {
+               tasks = append(tasks,
+                       installPassenger{src: "apps/workbench", depends: []supervisedTask{seedDatabase{}}}, // dependency ensures workbench doesn't delay api install/startup
+                       runPassenger{src: "apps/workbench", varlibdir: "workbench1", svc: super.cluster.Services.Workbench1, depends: []supervisedTask{installPassenger{src: "apps/workbench"}}},
+               )
+       }
+       if !super.NoWorkbench2 {
+               tasks = append(tasks,
+                       runWorkbench2{svc: super.cluster.Services.Workbench2},
+               )
+       }
        if super.ClusterType != "test" {
                tasks = append(tasks,
-                       runServiceCommand{name: "dispatch-cloud", svc: super.cluster.Services.Controller},
-                       runGoProgram{src: "services/keep-balance"},
+                       runServiceCommand{name: "dispatch-cloud", svc: super.cluster.Services.DispatchCloud},
+                       runGoProgram{src: "services/keep-balance", svc: super.cluster.Services.Keepbalance},
                )
        }
        super.tasksReady = map[string]chan bool{}
@@ -271,36 +427,60 @@ func (super *Supervisor) run(cfg *arvados.Config) error {
 }
 
 func (super *Supervisor) wait(ctx context.Context, tasks ...supervisedTask) error {
+       ticker := time.NewTicker(15 * time.Second)
+       defer ticker.Stop()
        for _, task := range tasks {
                ch, ok := super.tasksReady[task.String()]
                if !ok {
                        return fmt.Errorf("no such task: %s", task)
                }
                super.logger.WithField("task", task.String()).Info("waiting")
-               select {
-               case <-ch:
-                       super.logger.WithField("task", task.String()).Info("ready")
-               case <-ctx.Done():
-                       super.logger.WithField("task", task.String()).Info("task was never ready")
-                       return ctx.Err()
+               for {
+                       select {
+                       case <-ch:
+                               super.logger.WithField("task", task.String()).Info("ready")
+                       case <-ctx.Done():
+                               super.logger.WithField("task", task.String()).Info("task was never ready")
+                               return ctx.Err()
+                       case <-ticker.C:
+                               super.logger.WithField("task", task.String()).Info("still waiting...")
+                               continue
+                       }
+                       break
                }
        }
        return nil
 }
 
+// Stop shuts down all child processes and goroutines, and returns
+// when all of them have exited.
 func (super *Supervisor) Stop() {
        super.cancel()
        <-super.done
 }
 
-func (super *Supervisor) WaitReady() (*arvados.URL, bool) {
+// WaitReady waits for the cluster(s) to be ready to handle requests,
+// then returns true. If startup fails, it returns false.
+func (super *Supervisor) WaitReady() bool {
+       if super.children != nil {
+               for id, super2 := range super.children {
+                       super.logger.Infof("waiting for %s to be ready", id)
+                       if !super2.WaitReady() {
+                               super.logger.Infof("%s startup failed", id)
+                               return false
+                       }
+                       super.logger.Infof("%s is ready", id)
+               }
+               super.logger.Info("all clusters are ready")
+               return true
+       }
        ticker := time.NewTicker(time.Second)
        defer ticker.Stop()
        for waiting := "all"; waiting != ""; {
                select {
                case <-ticker.C:
                case <-super.ctx.Done():
-                       return nil, false
+                       return false
                }
                if super.healthChecker == nil {
                        // not set up yet
@@ -322,8 +502,7 @@ func (super *Supervisor) WaitReady() (*arvados.URL, bool) {
                        super.logger.WithField("targets", waiting[1:]).Info("waiting")
                }
        }
-       u := super.cluster.Services.Controller.ExternalURL
-       return &u, true
+       return true
 }
 
 func (super *Supervisor) prependEnv(key, prepend string) {
@@ -382,9 +561,11 @@ func dedupEnv(in []string) []string {
 
 func (super *Supervisor) installGoProgram(ctx context.Context, srcpath string) (string, error) {
        _, basename := filepath.Split(srcpath)
-       bindir := filepath.Join(super.tempdir, "bin")
-       binfile := filepath.Join(bindir, basename)
-       err := super.RunProgram(ctx, filepath.Join(super.SourcePath, srcpath), nil, []string{"GOBIN=" + bindir}, "go", "install", "-ldflags", "-X git.arvados.org/arvados.git/lib/cmd.version="+super.SourceVersion+" -X main.version="+super.SourceVersion)
+       binfile := filepath.Join(super.bindir, basename)
+       if super.ClusterType == "production" {
+               return binfile, nil
+       }
+       err := super.RunProgram(ctx, filepath.Join(super.SourcePath, srcpath), runOptions{env: []string{"GOBIN=" + super.bindir}}, "go", "install", "-ldflags", "-X git.arvados.org/arvados.git/lib/cmd.version="+super.SourceVersion+" -X main.version="+super.SourceVersion)
        return binfile, err
 }
 
@@ -401,14 +582,23 @@ func (super *Supervisor) setupRubyEnv() error {
                        "GEM_PATH=",
                })
                gem := "gem"
-               if _, err := os.Stat("/var/lib/arvados/bin/gem"); err == nil {
+               if _, err := os.Stat("/var/lib/arvados/bin/gem"); err == nil || super.ClusterType == "production" {
                        gem = "/var/lib/arvados/bin/gem"
                }
                cmd := exec.Command(gem, "env", "gempath")
+               if super.ClusterType == "production" {
+                       cmd.Args = append([]string{"sudo", "-u", "www-data", "-E", "HOME=/var/www"}, cmd.Args...)
+                       path, err := exec.LookPath("sudo")
+                       if err != nil {
+                               return fmt.Errorf("LookPath(\"sudo\"): %w", err)
+                       }
+                       cmd.Path = path
+               }
+               cmd.Stderr = super.Stderr
                cmd.Env = super.environ
                buf, err := cmd.Output() // /var/lib/arvados/.gem/ruby/2.5.0/bin:...
                if err != nil || len(buf) == 0 {
-                       return fmt.Errorf("gem env gempath: %v", err)
+                       return fmt.Errorf("gem env gempath: %w", err)
                }
                gempath := string(bytes.Split(buf, []byte{':'})[0])
                super.prependEnv("PATH", gempath+"/bin:")
@@ -438,33 +628,55 @@ func (super *Supervisor) lookPath(prog string) string {
        return prog
 }
 
-// Run prog with args, using dir as working directory. If ctx is
-// cancelled while the child is running, RunProgram terminates the
-// child, waits for it to exit, then returns.
+type runOptions struct {
+       output io.Writer // attach stdout
+       env    []string  // add/replace environment variables
+       user   string    // run as specified user
+       stdin  io.Reader
+}
+
+// RunProgram runs prog with args, using dir as working directory. If ctx is
+// cancelled while the child is running, RunProgram terminates the child, waits
+// for it to exit, then returns.
 //
 // Child's environment will have our env vars, plus any given in env.
 //
 // Child's stdout will be written to output if non-nil, otherwise the
 // boot command's stderr.
-func (super *Supervisor) RunProgram(ctx context.Context, dir string, output io.Writer, env []string, prog string, args ...string) error {
+func (super *Supervisor) RunProgram(ctx context.Context, dir string, opts runOptions, prog string, args ...string) error {
        cmdline := fmt.Sprintf("%s", append([]string{prog}, args...))
        super.logger.WithField("command", cmdline).WithField("dir", dir).Info("executing")
 
        logprefix := prog
-       if logprefix == "setuidgid" && len(args) >= 3 {
-               logprefix = args[2]
-       }
-       logprefix = strings.TrimPrefix(logprefix, super.tempdir+"/bin/")
-       if logprefix == "bundle" && len(args) > 2 && args[0] == "exec" {
-               logprefix = args[1]
-       } else if logprefix == "arvados-server" && len(args) > 1 {
-               logprefix = args[0]
-       }
-       if !strings.HasPrefix(dir, "/") {
-               logprefix = dir + ": " + logprefix
+       {
+               innerargs := args
+               if logprefix == "sudo" {
+                       for i := 0; i < len(args); i++ {
+                               if args[i] == "-u" {
+                                       i++
+                               } else if args[i] == "-E" || strings.Contains(args[i], "=") {
+                               } else {
+                                       logprefix = args[i]
+                                       innerargs = args[i+1:]
+                                       break
+                               }
+                       }
+               }
+               logprefix = strings.TrimPrefix(logprefix, "/var/lib/arvados/bin/")
+               logprefix = strings.TrimPrefix(logprefix, super.tempdir+"/bin/")
+               if logprefix == "bundle" && len(innerargs) > 2 && innerargs[0] == "exec" {
+                       _, dirbase := filepath.Split(dir)
+                       logprefix = innerargs[1] + "@" + dirbase
+               } else if logprefix == "arvados-server" && len(args) > 1 {
+                       logprefix = args[0]
+               }
+               if !strings.HasPrefix(dir, "/") {
+                       logprefix = dir + ": " + logprefix
+               }
        }
 
        cmd := exec.Command(super.lookPath(prog), args...)
+       cmd.Stdin = opts.stdin
        stdout, err := cmd.StdoutPipe()
        if err != nil {
                return err
@@ -482,10 +694,10 @@ func (super *Supervisor) RunProgram(ctx context.Context, dir string, output io.W
        }()
        copiers.Add(1)
        go func() {
-               if output == nil {
+               if opts.output == nil {
                        io.Copy(logwriter, stdout)
                } else {
-                       io.Copy(output, stdout)
+                       io.Copy(opts.output, stdout)
                }
                copiers.Done()
        }()
@@ -495,10 +707,34 @@ func (super *Supervisor) RunProgram(ctx context.Context, dir string, output io.W
        } else {
                cmd.Dir = filepath.Join(super.SourcePath, dir)
        }
-       env = append([]string(nil), env...)
+       env := append([]string(nil), opts.env...)
        env = append(env, super.environ...)
        cmd.Env = dedupEnv(env)
 
+       if opts.user != "" {
+               // Note: We use this approach instead of "sudo"
+               // because in certain circumstances (we are pid 1 in a
+               // docker container, and our passenger child process
+               // changes to pgid 1) the intermediate sudo process
+               // notices we have the same pgid as our child and
+               // refuses to propagate signals from us to our child,
+               // so we can't signal/shutdown our passenger/rails
+               // apps. "chpst" or "setuidgid" would work, but these
+               // few lines avoid depending on runit/daemontools.
+               u, err := user.Lookup(opts.user)
+               if err != nil {
+                       return fmt.Errorf("user.Lookup(%q): %w", opts.user, err)
+               }
+               uid, _ := strconv.Atoi(u.Uid)
+               gid, _ := strconv.Atoi(u.Gid)
+               cmd.SysProcAttr = &syscall.SysProcAttr{
+                       Credential: &syscall.Credential{
+                               Uid: uint32(uid),
+                               Gid: uint32(gid),
+                       },
+               }
+       }
+
        exited := false
        defer func() { exited = true }()
        go func() {
@@ -538,108 +774,134 @@ func (super *Supervisor) RunProgram(ctx context.Context, dir string, output io.W
        return nil
 }
 
-func (super *Supervisor) autofillConfig(cfg *arvados.Config) error {
-       cluster, err := cfg.GetCluster("")
-       if err != nil {
-               return err
-       }
+func (super *Supervisor) autofillConfig() error {
        usedPort := map[string]bool{}
-       nextPort := func(host string) string {
+       nextPort := func(host string) (string, error) {
                for {
                        port, err := availablePort(host)
                        if err != nil {
-                               panic(err)
+                               port, err = availablePort(super.ListenHost)
+                       }
+                       if err != nil {
+                               return "", err
                        }
                        if usedPort[port] {
                                continue
                        }
                        usedPort[port] = true
-                       return port
+                       return port, nil
                }
        }
-       if cluster.Services.Controller.ExternalURL.Host == "" {
+       if super.cluster.Services.Controller.ExternalURL.Host == "" {
                h, p, err := net.SplitHostPort(super.ControllerAddr)
-               if err != nil {
-                       return err
+               if err != nil && super.ControllerAddr != "" {
+                       return fmt.Errorf("SplitHostPort(ControllerAddr %q): %w", super.ControllerAddr, err)
                }
                if h == "" {
                        h = super.ListenHost
                }
-               if p == "0" {
-                       p = nextPort(h)
+               if p == "0" || p == "" {
+                       p, err = nextPort(h)
+                       if err != nil {
+                               return err
+                       }
                }
-               cluster.Services.Controller.ExternalURL = arvados.URL{Scheme: "https", Host: net.JoinHostPort(h, p), Path: "/"}
+               super.cluster.Services.Controller.ExternalURL = arvados.URL{Scheme: "https", Host: net.JoinHostPort(h, p), Path: "/"}
        }
+       u := url.URL(super.cluster.Services.Controller.ExternalURL)
+       defaultExtHost := u.Hostname()
        for _, svc := range []*arvados.Service{
-               &cluster.Services.Controller,
-               &cluster.Services.DispatchCloud,
-               &cluster.Services.GitHTTP,
-               &cluster.Services.Health,
-               &cluster.Services.Keepproxy,
-               &cluster.Services.Keepstore,
-               &cluster.Services.RailsAPI,
-               &cluster.Services.WebDAV,
-               &cluster.Services.WebDAVDownload,
-               &cluster.Services.Websocket,
-               &cluster.Services.Workbench1,
+               &super.cluster.Services.Controller,
+               &super.cluster.Services.DispatchCloud,
+               &super.cluster.Services.GitHTTP,
+               &super.cluster.Services.Health,
+               &super.cluster.Services.Keepproxy,
+               &super.cluster.Services.Keepstore,
+               &super.cluster.Services.RailsAPI,
+               &super.cluster.Services.WebDAV,
+               &super.cluster.Services.WebDAVDownload,
+               &super.cluster.Services.Websocket,
+               &super.cluster.Services.Workbench1,
+               &super.cluster.Services.Workbench2,
        } {
-               if svc == &cluster.Services.DispatchCloud && super.ClusterType == "test" {
+               if svc == &super.cluster.Services.DispatchCloud && super.ClusterType == "test" {
                        continue
                }
                if svc.ExternalURL.Host == "" {
-                       if svc == &cluster.Services.Controller ||
-                               svc == &cluster.Services.GitHTTP ||
-                               svc == &cluster.Services.Health ||
-                               svc == &cluster.Services.Keepproxy ||
-                               svc == &cluster.Services.WebDAV ||
-                               svc == &cluster.Services.WebDAVDownload ||
-                               svc == &cluster.Services.Workbench1 {
-                               svc.ExternalURL = arvados.URL{Scheme: "https", Host: fmt.Sprintf("%s:%s", super.ListenHost, nextPort(super.ListenHost)), Path: "/"}
-                       } else if svc == &cluster.Services.Websocket {
-                               svc.ExternalURL = arvados.URL{Scheme: "wss", Host: fmt.Sprintf("%s:%s", super.ListenHost, nextPort(super.ListenHost)), Path: "/websocket"}
+                       port, err := nextPort(defaultExtHost)
+                       if err != nil {
+                               return err
+                       }
+                       host := net.JoinHostPort(defaultExtHost, port)
+                       if svc == &super.cluster.Services.Controller ||
+                               svc == &super.cluster.Services.GitHTTP ||
+                               svc == &super.cluster.Services.Health ||
+                               svc == &super.cluster.Services.Keepproxy ||
+                               svc == &super.cluster.Services.WebDAV ||
+                               svc == &super.cluster.Services.WebDAVDownload ||
+                               svc == &super.cluster.Services.Workbench1 ||
+                               svc == &super.cluster.Services.Workbench2 {
+                               svc.ExternalURL = arvados.URL{Scheme: "https", Host: host, Path: "/"}
+                       } else if svc == &super.cluster.Services.Websocket {
+                               svc.ExternalURL = arvados.URL{Scheme: "wss", Host: host, Path: "/websocket"}
                        }
                }
+               if super.NoWorkbench1 && svc == &super.cluster.Services.Workbench1 ||
+                       super.NoWorkbench2 && svc == &super.cluster.Services.Workbench2 {
+                       // When workbench1 is disabled, it gets an
+                       // ExternalURL (so we have a valid listening
+                       // port to write in our Nginx config) but no
+                       // InternalURLs (so health checker doesn't
+                       // complain).
+                       continue
+               }
                if len(svc.InternalURLs) == 0 {
+                       port, err := nextPort(super.ListenHost)
+                       if err != nil {
+                               return err
+                       }
+                       host := net.JoinHostPort(super.ListenHost, port)
                        svc.InternalURLs = map[arvados.URL]arvados.ServiceInstance{
-                               {Scheme: "http", Host: fmt.Sprintf("%s:%s", super.ListenHost, nextPort(super.ListenHost)), Path: "/"}: {},
+                               {Scheme: "http", Host: host, Path: "/"}: {},
                        }
                }
        }
-       if cluster.SystemRootToken == "" {
-               cluster.SystemRootToken = randomHexString(64)
-       }
-       if cluster.ManagementToken == "" {
-               cluster.ManagementToken = randomHexString(64)
-       }
-       if cluster.API.RailsSessionSecretToken == "" {
-               cluster.API.RailsSessionSecretToken = randomHexString(64)
-       }
-       if cluster.Collections.BlobSigningKey == "" {
-               cluster.Collections.BlobSigningKey = randomHexString(64)
-       }
-       if cluster.Users.AnonymousUserToken == "" {
-               cluster.Users.AnonymousUserToken = randomHexString(64)
-       }
-
-       if super.ClusterType != "production" && cluster.Containers.DispatchPrivateKey == "" {
-               buf, err := ioutil.ReadFile(filepath.Join(super.SourcePath, "lib", "dispatchcloud", "test", "sshkey_dispatch"))
-               if err != nil {
-                       return err
-               }
-               cluster.Containers.DispatchPrivateKey = string(buf)
-       }
        if super.ClusterType != "production" {
-               cluster.TLS.Insecure = true
+               if super.cluster.SystemRootToken == "" {
+                       super.cluster.SystemRootToken = randomHexString(64)
+               }
+               if super.cluster.ManagementToken == "" {
+                       super.cluster.ManagementToken = randomHexString(64)
+               }
+               if super.cluster.Collections.BlobSigningKey == "" {
+                       super.cluster.Collections.BlobSigningKey = randomHexString(64)
+               }
+               if super.cluster.Users.AnonymousUserToken == "" {
+                       super.cluster.Users.AnonymousUserToken = randomHexString(64)
+               }
+               if super.cluster.Containers.DispatchPrivateKey == "" {
+                       buf, err := ioutil.ReadFile(filepath.Join(super.SourcePath, "lib", "dispatchcloud", "test", "sshkey_dispatch"))
+                       if err != nil {
+                               return err
+                       }
+                       super.cluster.Containers.DispatchPrivateKey = string(buf)
+               }
+               super.cluster.TLS.Insecure = true
        }
        if super.ClusterType == "test" {
                // Add a second keepstore process.
-               cluster.Services.Keepstore.InternalURLs[arvados.URL{Scheme: "http", Host: fmt.Sprintf("%s:%s", super.ListenHost, nextPort(super.ListenHost)), Path: "/"}] = arvados.ServiceInstance{}
+               port, err := nextPort(super.ListenHost)
+               if err != nil {
+                       return err
+               }
+               host := net.JoinHostPort(super.ListenHost, port)
+               super.cluster.Services.Keepstore.InternalURLs[arvados.URL{Scheme: "http", Host: host, Path: "/"}] = arvados.ServiceInstance{}
 
                // Create a directory-backed volume for each keepstore
                // process.
-               cluster.Volumes = map[string]arvados.Volume{}
-               for url := range cluster.Services.Keepstore.InternalURLs {
-                       volnum := len(cluster.Volumes)
+               super.cluster.Volumes = map[string]arvados.Volume{}
+               for url := range super.cluster.Services.Keepstore.InternalURLs {
+                       volnum := len(super.cluster.Volumes)
                        datadir := fmt.Sprintf("%s/keep%d.data", super.tempdir, volnum)
                        if _, err = os.Stat(datadir + "/."); err == nil {
                        } else if !os.IsNotExist(err) {
@@ -647,32 +909,48 @@ func (super *Supervisor) autofillConfig(cfg *arvados.Config) error {
                        } else if err = os.Mkdir(datadir, 0755); err != nil {
                                return err
                        }
-                       cluster.Volumes[fmt.Sprintf(cluster.ClusterID+"-nyw5e-%015d", volnum)] = arvados.Volume{
+                       super.cluster.Volumes[fmt.Sprintf(super.cluster.ClusterID+"-nyw5e-%015d", volnum)] = arvados.Volume{
                                Driver:           "Directory",
                                DriverParameters: json.RawMessage(fmt.Sprintf(`{"Root":%q}`, datadir)),
                                AccessViaHosts: map[arvados.URL]arvados.VolumeAccess{
                                        url: {},
                                },
+                               StorageClasses: map[string]bool{
+                                       "default": true,
+                                       "foo":     true,
+                                       "bar":     true,
+                               },
                        }
                }
+               super.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
+                       "default": {Default: true},
+                       "foo":     {},
+                       "bar":     {},
+               }
        }
        if super.OwnTemporaryDatabase {
-               cluster.PostgreSQL.Connection = arvados.PostgreSQLConnection{
+               port, err := nextPort("localhost")
+               if err != nil {
+                       return err
+               }
+               super.cluster.PostgreSQL.Connection = arvados.PostgreSQLConnection{
                        "client_encoding": "utf8",
                        "host":            "localhost",
-                       "port":            nextPort(super.ListenHost),
+                       "port":            port,
                        "dbname":          "arvados_test",
                        "user":            "arvados",
                        "password":        "insecure_arvados_test",
                }
        }
-
-       cfg.Clusters[cluster.ClusterID] = *cluster
        return nil
 }
 
 func addrIsLocal(addr string) (bool, error) {
-       return true, nil
+       if h, _, err := net.SplitHostPort(addr); err != nil {
+               return false, err
+       } else {
+               addr = net.JoinHostPort(h, "0")
+       }
        listener, err := net.Listen("tcp", addr)
        if err == nil {
                listener.Close()
@@ -693,30 +971,30 @@ func randomHexString(chars int) string {
        return fmt.Sprintf("%x", b)
 }
 
-func internalPort(svc arvados.Service) (string, error) {
+func internalPort(svc arvados.Service) (host, port string, err error) {
        if len(svc.InternalURLs) > 1 {
-               return "", errors.New("internalPort() doesn't work with multiple InternalURLs")
+               return "", "", errors.New("internalPort() doesn't work with multiple InternalURLs")
        }
        for u := range svc.InternalURLs {
-               if _, p, err := net.SplitHostPort(u.Host); err != nil {
-                       return "", err
-               } else if p != "" {
-                       return p, nil
-               } else if u.Scheme == "https" {
-                       return "443", nil
-               } else {
-                       return "80", nil
+               u := url.URL(u)
+               host, port = u.Hostname(), u.Port()
+               switch {
+               case port != "":
+               case u.Scheme == "https", u.Scheme == "ws":
+                       port = "443"
+               default:
+                       port = "80"
                }
+               return
        }
-       return "", fmt.Errorf("service has no InternalURLs")
+       return "", "", fmt.Errorf("service has no InternalURLs")
 }
 
 func externalPort(svc arvados.Service) (string, error) {
-       if _, p, err := net.SplitHostPort(svc.ExternalURL.Host); err != nil {
-               return "", err
-       } else if p != "" {
+       u := url.URL(svc.ExternalURL)
+       if p := u.Port(); p != "" {
                return p, nil
-       } else if svc.ExternalURL.Scheme == "https" {
+       } else if u.Scheme == "https" || u.Scheme == "wss" {
                return "443", nil
        } else {
                return "80", nil
@@ -739,6 +1017,7 @@ func availablePort(host string) (string, error) {
 // Try to connect to addr until it works, then close ch. Give up if
 // ctx cancels.
 func waitForConnect(ctx context.Context, addr string) error {
+       ctxlog.FromContext(ctx).WithField("addr", addr).Info("waitForConnect")
        dialer := net.Dialer{Timeout: time.Second}
        for ctx.Err() == nil {
                conn, err := dialer.DialContext(ctx, "tcp", addr)