Restore rendering HTML tags in registered workflow panel, refs #21944
[arvados.git] / lib / boot / supervisor.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package boot
6
7 import (
8         "bytes"
9         "context"
10         "crypto/rand"
11         "encoding/json"
12         "errors"
13         "fmt"
14         "io"
15         "io/ioutil"
16         "net"
17         "net/url"
18         "os"
19         "os/exec"
20         "os/signal"
21         "os/user"
22         "path/filepath"
23         "reflect"
24         "strconv"
25         "strings"
26         "sync"
27         "syscall"
28         "time"
29
30         "git.arvados.org/arvados.git/lib/config"
31         "git.arvados.org/arvados.git/lib/service"
32         "git.arvados.org/arvados.git/sdk/go/arvados"
33         "git.arvados.org/arvados.git/sdk/go/ctxlog"
34         "git.arvados.org/arvados.git/sdk/go/health"
35         "github.com/fsnotify/fsnotify"
36         "github.com/sirupsen/logrus"
37 )
38
39 type Supervisor struct {
40         // Config file location like "/etc/arvados/config.yml", or "-"
41         // to read from Stdin (see below).
42         ConfigPath string
43         // Literal config file (useful for test suites). If non-empty,
44         // this is used instead of ConfigPath.
45         ConfigYAML string
46         // Path to arvados source tree. Only used for dev/test
47         // clusters.
48         SourcePath string
49         // Version number to build into binaries. Only used for
50         // dev/test clusters.
51         SourceVersion string
52         // "production", "development", or "test".
53         ClusterType string
54         // Listening address for external services, and internal
55         // services whose InternalURLs are not explicitly configured.
56         // If blank, listen on the configured controller ExternalURL
57         // host; if that is also blank, listen on all addresses
58         // (0.0.0.0).
59         ListenHost string
60         // Default host:port for controller ExternalURL if not
61         // explicitly configured in config file. If blank, use a
62         // random port on ListenHost.
63         ControllerAddr string
64
65         NoWorkbench1         bool
66         NoWorkbench2         bool
67         OwnTemporaryDatabase bool
68         Stdin                io.Reader
69         Stderr               io.Writer
70
71         logger   logrus.FieldLogger
72         cluster  *arvados.Cluster       // nil if this is a multi-cluster supervisor
73         children map[string]*Supervisor // nil if this is a single-cluster supervisor
74
75         ctx           context.Context
76         cancel        context.CancelFunc
77         done          chan struct{}      // closed when child procs/services have shut down
78         err           error              // error that caused shutdown (valid when done is closed)
79         healthChecker *health.Aggregator // nil if this is a multi-cluster supervisor, or still booting
80         tasksReady    map[string]chan bool
81         waitShutdown  sync.WaitGroup
82
83         bindir     string
84         tempdir    string // in production mode, this is accessible only to root
85         wwwtempdir string // in production mode, this is accessible only to www-data
86         configfile string
87         environ    []string // for child processes
88 }
89
90 func (super *Supervisor) Clusters() map[string]*arvados.Cluster {
91         m := map[string]*arvados.Cluster{}
92         if super.cluster != nil {
93                 m[super.cluster.ClusterID] = super.cluster
94         }
95         for id, super2 := range super.children {
96                 m[id] = super2.Cluster("")
97         }
98         return m
99 }
100
101 func (super *Supervisor) Cluster(id string) *arvados.Cluster {
102         if super.children != nil {
103                 return super.children[id].Cluster(id)
104         } else {
105                 return super.cluster
106         }
107 }
108
109 func (super *Supervisor) Start(ctx context.Context) {
110         super.logger = ctxlog.FromContext(ctx)
111         super.ctx, super.cancel = context.WithCancel(ctx)
112         super.done = make(chan struct{})
113
114         sigch := make(chan os.Signal, 1)
115         signal.Notify(sigch, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
116         go func() {
117                 defer signal.Stop(sigch)
118                 for {
119                         select {
120                         case <-ctx.Done():
121                                 return
122                         case sig := <-sigch:
123                                 super.logger.WithField("signal", sig).Info("caught signal")
124                                 if super.err == nil {
125                                         if sig == syscall.SIGHUP {
126                                                 super.err = errNeedConfigReload
127                                         } else {
128                                                 super.err = fmt.Errorf("caught signal %s", sig)
129                                         }
130                                 }
131                                 super.cancel()
132                         }
133                 }
134         }()
135
136         loaderStdin := super.Stdin
137         if super.ConfigYAML != "" {
138                 loaderStdin = bytes.NewBufferString(super.ConfigYAML)
139         }
140         loader := config.NewLoader(loaderStdin, super.logger)
141         loader.SkipLegacy = true
142         loader.SkipAPICalls = true
143         loader.Path = super.ConfigPath
144         if super.ConfigYAML != "" {
145                 loader.Path = "-"
146         }
147         cfg, err := loader.Load()
148         if err != nil {
149                 super.err = err
150                 close(super.done)
151                 super.cancel()
152                 return
153         }
154
155         if super.ConfigPath != "" && super.ConfigPath != "-" && cfg.AutoReloadConfig {
156                 go watchConfig(super.ctx, super.logger, super.ConfigPath, copyConfig(cfg), func() {
157                         if super.err == nil {
158                                 super.err = errNeedConfigReload
159                         }
160                         super.cancel()
161                 })
162         }
163
164         if len(cfg.Clusters) > 1 {
165                 super.startFederation(cfg)
166                 go func() {
167                         defer super.cancel()
168                         defer close(super.done)
169                         for _, super2 := range super.children {
170                                 err := super2.Wait()
171                                 if super.err == nil {
172                                         super.err = err
173                                 }
174                         }
175                 }()
176         } else {
177                 go func() {
178                         defer super.cancel()
179                         defer close(super.done)
180                         super.cluster, super.err = cfg.GetCluster("")
181                         if super.err != nil {
182                                 return
183                         }
184                         err := super.runCluster()
185                         if err != nil {
186                                 super.logger.WithError(err).Info("supervisor shut down")
187                                 if super.err == nil {
188                                         super.err = err
189                                 }
190                         }
191                 }()
192         }
193 }
194
195 // Wait returns when all child processes and goroutines have exited.
196 func (super *Supervisor) Wait() error {
197         <-super.done
198         return super.err
199 }
200
201 // startFederation starts a child Supervisor for each cluster in the
202 // given config. Each is a copy of the original/parent with the
203 // original config reduced to a single cluster.
204 func (super *Supervisor) startFederation(cfg *arvados.Config) {
205         super.children = map[string]*Supervisor{}
206         for id, cc := range cfg.Clusters {
207                 yaml, err := json.Marshal(arvados.Config{Clusters: map[string]arvados.Cluster{id: cc}})
208                 if err != nil {
209                         panic(fmt.Sprintf("json.Marshal partial config: %s", err))
210                 }
211                 super2 := &Supervisor{
212                         ConfigPath:           "-",
213                         ConfigYAML:           string(yaml),
214                         SourcePath:           super.SourcePath,
215                         SourceVersion:        super.SourceVersion,
216                         ClusterType:          super.ClusterType,
217                         ListenHost:           super.ListenHost,
218                         ControllerAddr:       super.ControllerAddr,
219                         NoWorkbench1:         super.NoWorkbench1,
220                         NoWorkbench2:         super.NoWorkbench2,
221                         OwnTemporaryDatabase: super.OwnTemporaryDatabase,
222                         Stdin:                super.Stdin,
223                         Stderr:               super.Stderr,
224                 }
225                 if super2.ClusterType == "test" {
226                         super2.Stderr = &service.LogPrefixer{
227                                 Writer: super.Stderr,
228                                 Prefix: []byte("[" + id + "] "),
229                         }
230                 }
231                 super2.Start(super.ctx)
232                 super.children[id] = super2
233         }
234 }
235
236 func (super *Supervisor) runCluster() error {
237         cwd, err := os.Getwd()
238         if err != nil {
239                 return err
240         }
241         if super.ClusterType == "test" && super.SourcePath == "" {
242                 // When invoked by test suite, default to current
243                 // source tree
244                 buf, err := exec.Command("git", "rev-parse", "--show-toplevel").CombinedOutput()
245                 if err != nil {
246                         return fmt.Errorf("git rev-parse: %w", err)
247                 }
248                 super.SourcePath = strings.TrimSuffix(string(buf), "\n")
249         } else if !strings.HasPrefix(super.SourcePath, "/") {
250                 super.SourcePath = filepath.Join(cwd, super.SourcePath)
251         }
252         super.SourcePath, err = filepath.EvalSymlinks(super.SourcePath)
253         if err != nil {
254                 return err
255         }
256
257         if super.ListenHost == "" {
258                 u := url.URL(super.cluster.Services.Controller.ExternalURL)
259                 super.ListenHost = u.Hostname()
260                 if super.ListenHost == "" {
261                         super.ListenHost = "0.0.0.0"
262                 }
263         }
264
265         // Choose bin and temp dirs: /var/lib/arvados/... in
266         // production, transient tempdir otherwise.
267         if super.ClusterType == "production" {
268                 // These dirs have already been created by
269                 // "arvados-server install" (or by extracting a
270                 // package).
271                 super.tempdir = "/var/lib/arvados/tmp"
272                 super.wwwtempdir = "/var/lib/arvados/wwwtmp"
273                 super.bindir = "/var/lib/arvados/bin"
274         } else {
275                 super.tempdir, err = ioutil.TempDir("", "arvados-server-boot-")
276                 if err != nil {
277                         return err
278                 }
279                 defer os.RemoveAll(super.tempdir)
280                 super.wwwtempdir = super.tempdir
281                 super.bindir = filepath.Join(super.tempdir, "bin")
282                 if err := os.Mkdir(super.bindir, 0755); err != nil {
283                         return err
284                 }
285         }
286
287         // Fill in any missing config keys, and write the resulting
288         // config in the temp dir for child services to use.
289         err = super.autofillConfig()
290         if err != nil {
291                 return err
292         }
293         conffile, err := os.OpenFile(filepath.Join(super.wwwtempdir, "config.yml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
294         if err != nil {
295                 return err
296         }
297         defer conffile.Close()
298         err = json.NewEncoder(conffile).Encode(arvados.Config{
299                 Clusters: map[string]arvados.Cluster{
300                         super.cluster.ClusterID: *super.cluster}})
301         if err != nil {
302                 return err
303         }
304         err = conffile.Close()
305         if err != nil {
306                 return err
307         }
308         super.configfile = conffile.Name()
309
310         super.environ = os.Environ()
311         super.cleanEnv([]string{"ARVADOS_"})
312         super.setEnv("ARVADOS_CONFIG", super.configfile)
313         super.setEnv("RAILS_ENV", super.ClusterType)
314         super.setEnv("TMPDIR", super.tempdir)
315         super.prependEnv("PATH", "/var/lib/arvados/bin:")
316         if super.ClusterType != "production" {
317                 super.prependEnv("PATH", super.tempdir+"/bin:")
318         }
319         super.setEnv("ARVADOS_SERVER_ADDRESS", super.ListenHost)
320         if super.ClusterType == "test" {
321                 super.setEnv("ARVADOS_USE_KEEP_ACCESSIBLE_API", "true")
322         }
323
324         // Now that we have the config, replace the bootstrap logger
325         // with a new one according to the logging config.
326         loglevel := super.cluster.SystemLogs.LogLevel
327         if s := os.Getenv("ARVADOS_DEBUG"); s != "" && s != "0" {
328                 loglevel = "debug"
329         }
330         super.logger = ctxlog.New(super.Stderr, super.cluster.SystemLogs.Format, loglevel).WithFields(logrus.Fields{
331                 "PID": os.Getpid(),
332         })
333
334         if super.SourceVersion == "" && super.ClusterType == "production" {
335                 // don't need SourceVersion
336         } else if super.SourceVersion == "" {
337                 // Find current source tree version.
338                 var buf bytes.Buffer
339                 err = super.RunProgram(super.ctx, super.SourcePath, runOptions{output: &buf}, "git", "diff", "--shortstat")
340                 if err != nil {
341                         return err
342                 }
343                 dirty := buf.Len() > 0
344                 buf.Reset()
345                 err = super.RunProgram(super.ctx, super.SourcePath, runOptions{output: &buf}, "git", "log", "-n1", "--format=%H")
346                 if err != nil {
347                         return err
348                 }
349                 super.SourceVersion = strings.TrimSpace(buf.String())
350                 if dirty {
351                         super.SourceVersion += "+uncommitted"
352                 }
353         } else {
354                 return errors.New("specifying a version to run is not yet supported")
355         }
356
357         _, err = super.installGoProgram(super.ctx, "cmd/arvados-server")
358         if err != nil {
359                 return err
360         }
361         err = super.setupRubyEnv()
362         if err != nil {
363                 return err
364         }
365
366         tasks := []supervisedTask{
367                 createCertificates{},
368                 runPostgreSQL{},
369                 runNginx{},
370                 railsDatabase{},
371                 runServiceCommand{name: "controller", svc: super.cluster.Services.Controller, depends: []supervisedTask{railsDatabase{}}},
372                 runServiceCommand{name: "health", svc: super.cluster.Services.Health},
373                 runServiceCommand{name: "keepproxy", svc: super.cluster.Services.Keepproxy, depends: []supervisedTask{runPassenger{src: "services/api"}}},
374                 runServiceCommand{name: "keepstore", svc: super.cluster.Services.Keepstore},
375                 runServiceCommand{name: "keep-web", svc: super.cluster.Services.WebDAV},
376                 runServiceCommand{name: "ws", svc: super.cluster.Services.Websocket, depends: []supervisedTask{railsDatabase{}}},
377                 installPassenger{src: "services/api", varlibdir: "railsapi"},
378                 runPassenger{src: "services/api", varlibdir: "railsapi", svc: super.cluster.Services.RailsAPI, depends: []supervisedTask{
379                         createCertificates{},
380                         installPassenger{src: "services/api", varlibdir: "railsapi"},
381                         railsDatabase{},
382                 }},
383         }
384         if !super.NoWorkbench1 {
385                 return errors.New("workbench1 is no longer supported")
386         }
387         if !super.NoWorkbench2 {
388                 tasks = append(tasks,
389                         runWorkbench2{svc: super.cluster.Services.Workbench2},
390                 )
391         }
392         if super.ClusterType != "test" {
393                 tasks = append(tasks,
394                         runServiceCommand{name: "keep-balance", svc: super.cluster.Services.Keepbalance},
395                 )
396         }
397         if super.cluster.Containers.CloudVMs.Enable {
398                 tasks = append(tasks,
399                         runServiceCommand{name: "dispatch-cloud", svc: super.cluster.Services.DispatchCloud},
400                 )
401         }
402         super.tasksReady = map[string]chan bool{}
403         for _, task := range tasks {
404                 super.tasksReady[task.String()] = make(chan bool)
405         }
406         for _, task := range tasks {
407                 task := task
408                 fail := func(err error) {
409                         if super.ctx.Err() != nil {
410                                 return
411                         }
412                         super.cancel()
413                         super.logger.WithField("task", task.String()).WithError(err).Error("task failed")
414                 }
415                 go func() {
416                         super.logger.WithField("task", task.String()).Info("starting")
417                         err := task.Run(super.ctx, fail, super)
418                         if err != nil {
419                                 fail(err)
420                                 return
421                         }
422                         close(super.tasksReady[task.String()])
423                 }()
424         }
425         err = super.wait(super.ctx, tasks...)
426         if err != nil {
427                 return err
428         }
429         super.logger.Info("all startup tasks are complete; starting health checks")
430         super.healthChecker = &health.Aggregator{Cluster: super.cluster}
431         <-super.ctx.Done()
432         super.logger.Info("shutting down")
433         super.waitShutdown.Wait()
434         return super.ctx.Err()
435 }
436
437 func (super *Supervisor) wait(ctx context.Context, tasks ...supervisedTask) error {
438         ticker := time.NewTicker(15 * time.Second)
439         defer ticker.Stop()
440         for _, task := range tasks {
441                 ch, ok := super.tasksReady[task.String()]
442                 if !ok {
443                         return fmt.Errorf("no such task: %s", task)
444                 }
445                 super.logger.WithField("task", task.String()).Info("waiting")
446                 for {
447                         select {
448                         case <-ch:
449                                 super.logger.WithField("task", task.String()).Info("ready")
450                         case <-ctx.Done():
451                                 super.logger.WithField("task", task.String()).Info("task was never ready")
452                                 return ctx.Err()
453                         case <-ticker.C:
454                                 super.logger.WithField("task", task.String()).Info("still waiting...")
455                                 continue
456                         }
457                         break
458                 }
459         }
460         return nil
461 }
462
463 // Stop shuts down all child processes and goroutines, and returns
464 // when all of them have exited.
465 func (super *Supervisor) Stop() {
466         super.cancel()
467         <-super.done
468 }
469
470 // WaitReady waits for the cluster(s) to be ready to handle requests,
471 // then returns true. If startup fails, it returns false.
472 func (super *Supervisor) WaitReady() bool {
473         if super.children != nil {
474                 for id, super2 := range super.children {
475                         super.logger.Infof("waiting for %s to be ready", id)
476                         if !super2.WaitReady() {
477                                 super.logger.Infof("%s startup failed", id)
478                                 super.Stop()
479                                 return false
480                         }
481                         super.logger.Infof("%s is ready", id)
482                 }
483                 super.logger.Info("all clusters are ready")
484                 return true
485         }
486         ticker := time.NewTicker(time.Second)
487         defer ticker.Stop()
488         for waiting := "all"; waiting != ""; {
489                 select {
490                 case <-ticker.C:
491                 case <-super.ctx.Done():
492                         super.Stop()
493                         return false
494                 }
495                 if super.healthChecker == nil {
496                         // not set up yet
497                         continue
498                 }
499                 resp := super.healthChecker.ClusterHealth()
500                 // The overall health check (resp.Health=="OK") might
501                 // never pass due to missing components (like
502                 // arvados-dispatch-cloud in a test cluster), so
503                 // instead we wait for all configured components to
504                 // pass.
505                 waiting = ""
506                 for target, check := range resp.Checks {
507                         if check.Health != "OK" {
508                                 waiting += " " + target
509                         }
510                 }
511                 if waiting != "" {
512                         super.logger.WithField("targets", waiting[1:]).Info("waiting")
513                 }
514         }
515         return true
516 }
517
518 func (super *Supervisor) prependEnv(key, prepend string) {
519         for i, s := range super.environ {
520                 if strings.HasPrefix(s, key+"=") {
521                         super.environ[i] = key + "=" + prepend + s[len(key)+1:]
522                         return
523                 }
524         }
525         super.environ = append(super.environ, key+"="+prepend)
526 }
527
528 func (super *Supervisor) cleanEnv(prefixes []string) {
529         var cleaned []string
530         for _, s := range super.environ {
531                 drop := false
532                 for _, p := range prefixes {
533                         if strings.HasPrefix(s, p) {
534                                 drop = true
535                                 break
536                         }
537                 }
538                 if !drop {
539                         cleaned = append(cleaned, s)
540                 }
541         }
542         super.environ = cleaned
543 }
544
545 func (super *Supervisor) setEnv(key, val string) {
546         for i, s := range super.environ {
547                 if strings.HasPrefix(s, key+"=") {
548                         super.environ[i] = key + "=" + val
549                         return
550                 }
551         }
552         super.environ = append(super.environ, key+"="+val)
553 }
554
555 // Remove all but the first occurrence of each env var.
556 func dedupEnv(in []string) []string {
557         saw := map[string]bool{}
558         var out []string
559         for _, kv := range in {
560                 if split := strings.Index(kv, "="); split < 1 {
561                         panic("invalid environment var: " + kv)
562                 } else if saw[kv[:split]] {
563                         continue
564                 } else {
565                         saw[kv[:split]] = true
566                         out = append(out, kv)
567                 }
568         }
569         return out
570 }
571
572 func (super *Supervisor) installGoProgram(ctx context.Context, srcpath string) (string, error) {
573         _, basename := filepath.Split(srcpath)
574         binfile := filepath.Join(super.bindir, basename)
575         if super.ClusterType == "production" {
576                 return binfile, nil
577         }
578         err := super.RunProgram(ctx, filepath.Join(super.SourcePath, srcpath), runOptions{env: []string{"GOBIN=" + super.bindir}}, "go", "install", "-ldflags", "-X git.arvados.org/arvados.git/lib/cmd.version="+super.SourceVersion+" -X main.version="+super.SourceVersion)
579         return binfile, err
580 }
581
582 func (super *Supervisor) usingRVM() bool {
583         return os.Getenv("rvm_path") != ""
584 }
585
586 func (super *Supervisor) setupRubyEnv() error {
587         if !super.usingRVM() {
588                 // (If rvm is in use, assume the caller has everything
589                 // set up as desired)
590                 super.cleanEnv([]string{
591                         "GEM_HOME=",
592                         "GEM_PATH=",
593                 })
594                 gem := "gem"
595                 if _, err := os.Stat("/var/lib/arvados/bin/gem"); err == nil || super.ClusterType == "production" {
596                         gem = "/var/lib/arvados/bin/gem"
597                 }
598                 cmd := exec.Command(gem, "env", "gempath")
599                 if super.ClusterType == "production" {
600                         cmd.Args = append([]string{"sudo", "-u", "www-data", "-E", "HOME=/var/www"}, cmd.Args...)
601                         path, err := exec.LookPath("sudo")
602                         if err != nil {
603                                 return fmt.Errorf("LookPath(\"sudo\"): %w", err)
604                         }
605                         cmd.Path = path
606                 }
607                 cmd.Stderr = super.Stderr
608                 cmd.Env = super.environ
609                 buf, err := cmd.Output() // /var/lib/arvados/.gem/ruby/2.5.0/bin:...
610                 if err != nil || len(buf) == 0 {
611                         return fmt.Errorf("gem env gempath: %w", err)
612                 }
613                 gempath := string(bytes.Split(buf, []byte{':'})[0])
614                 super.prependEnv("PATH", gempath+"/bin:")
615                 super.setEnv("GEM_HOME", gempath)
616                 super.setEnv("GEM_PATH", gempath)
617         }
618         // Passenger install doesn't work unless $HOME is ~user
619         u, err := user.Current()
620         if err != nil {
621                 return err
622         }
623         super.setEnv("HOME", u.HomeDir)
624         return nil
625 }
626
627 func (super *Supervisor) lookPath(prog string) string {
628         for _, val := range super.environ {
629                 if strings.HasPrefix(val, "PATH=") {
630                         for _, dir := range filepath.SplitList(val[5:]) {
631                                 path := filepath.Join(dir, prog)
632                                 if fi, err := os.Stat(path); err == nil && fi.Mode()&0111 != 0 {
633                                         return path
634                                 }
635                         }
636                 }
637         }
638         return prog
639 }
640
641 type runOptions struct {
642         output io.Writer // attach stdout
643         env    []string  // add/replace environment variables
644         user   string    // run as specified user
645         stdin  io.Reader
646 }
647
648 // RunProgram runs prog with args, using dir as working directory. If ctx is
649 // cancelled while the child is running, RunProgram terminates the child, waits
650 // for it to exit, then returns.
651 //
652 // Child's environment will have our env vars, plus any given in env.
653 //
654 // Child's stdout will be written to output if non-nil, otherwise the
655 // boot command's stderr.
656 func (super *Supervisor) RunProgram(ctx context.Context, dir string, opts runOptions, prog string, args ...string) error {
657         cmdline := fmt.Sprintf("%s", append([]string{prog}, args...))
658         super.logger.WithField("command", cmdline).WithField("dir", dir).Info("executing")
659
660         logprefix := prog
661         {
662                 innerargs := args
663                 if logprefix == "sudo" {
664                         for i := 0; i < len(args); i++ {
665                                 if args[i] == "-u" {
666                                         i++
667                                 } else if args[i] == "-E" || strings.Contains(args[i], "=") {
668                                 } else {
669                                         logprefix = args[i]
670                                         innerargs = args[i+1:]
671                                         break
672                                 }
673                         }
674                 }
675                 logprefix = strings.TrimPrefix(logprefix, "/var/lib/arvados/bin/")
676                 logprefix = strings.TrimPrefix(logprefix, super.tempdir+"/bin/")
677                 if logprefix == "bundle" && len(innerargs) > 2 && innerargs[0] == "exec" {
678                         _, dirbase := filepath.Split(dir)
679                         logprefix = innerargs[1] + "@" + dirbase
680                 } else if logprefix == "arvados-server" && len(args) > 1 {
681                         logprefix = args[0]
682                 }
683                 if !strings.HasPrefix(dir, "/") {
684                         logprefix = dir + ": " + logprefix
685                 }
686         }
687
688         cmd := exec.Command(super.lookPath(prog), args...)
689         cmd.Stdin = opts.stdin
690         stdout, err := cmd.StdoutPipe()
691         if err != nil {
692                 return err
693         }
694         stderr, err := cmd.StderrPipe()
695         if err != nil {
696                 return err
697         }
698         logwriter := &service.LogPrefixer{Writer: super.Stderr, Prefix: []byte("[" + logprefix + "] ")}
699         var copiers sync.WaitGroup
700         copiers.Add(1)
701         go func() {
702                 io.Copy(logwriter, stderr)
703                 copiers.Done()
704         }()
705         copiers.Add(1)
706         go func() {
707                 if opts.output == nil {
708                         io.Copy(logwriter, stdout)
709                 } else {
710                         io.Copy(opts.output, stdout)
711                 }
712                 copiers.Done()
713         }()
714
715         if strings.HasPrefix(dir, "/") {
716                 cmd.Dir = dir
717         } else {
718                 cmd.Dir = filepath.Join(super.SourcePath, dir)
719         }
720         env := append([]string(nil), opts.env...)
721         env = append(env, super.environ...)
722         cmd.Env = dedupEnv(env)
723
724         if opts.user != "" {
725                 // Note: We use this approach instead of "sudo"
726                 // because in certain circumstances (we are pid 1 in a
727                 // docker container, and our passenger child process
728                 // changes to pgid 1) the intermediate sudo process
729                 // notices we have the same pgid as our child and
730                 // refuses to propagate signals from us to our child,
731                 // so we can't signal/shutdown our passenger/rails
732                 // apps. "chpst" or "setuidgid" would work, but these
733                 // few lines avoid depending on runit/daemontools.
734                 u, err := user.Lookup(opts.user)
735                 if err != nil {
736                         return fmt.Errorf("user.Lookup(%q): %w", opts.user, err)
737                 }
738                 uid, _ := strconv.Atoi(u.Uid)
739                 gid, _ := strconv.Atoi(u.Gid)
740                 cmd.SysProcAttr = &syscall.SysProcAttr{
741                         Credential: &syscall.Credential{
742                                 Uid: uint32(uid),
743                                 Gid: uint32(gid),
744                         },
745                 }
746         }
747
748         exited := false
749         defer func() { exited = true }()
750         go func() {
751                 <-ctx.Done()
752                 log := ctxlog.FromContext(ctx).WithFields(logrus.Fields{"dir": dir, "cmdline": cmdline})
753                 for !exited {
754                         if cmd.Process == nil {
755                                 log.Debug("waiting for child process to start")
756                                 time.Sleep(time.Second / 2)
757                         } else {
758                                 log.WithField("PID", cmd.Process.Pid).Debug("sending SIGTERM")
759                                 cmd.Process.Signal(syscall.SIGTERM)
760                                 time.Sleep(5 * time.Second)
761                                 if !exited {
762                                         stdout.Close()
763                                         stderr.Close()
764                                         log.WithField("PID", cmd.Process.Pid).Warn("still waiting for child process to exit 5s after SIGTERM")
765                                 }
766                         }
767                 }
768         }()
769
770         err = cmd.Start()
771         if err != nil {
772                 return err
773         }
774         copiers.Wait()
775         err = cmd.Wait()
776         if ctx.Err() != nil {
777                 // Return "context canceled", instead of the "killed"
778                 // error that was probably caused by the context being
779                 // canceled.
780                 return ctx.Err()
781         } else if err != nil {
782                 return fmt.Errorf("%s: error: %v", cmdline, err)
783         }
784         return nil
785 }
786
787 func (super *Supervisor) autofillConfig() error {
788         usedPort := map[string]bool{}
789         nextPort := func(host string) (string, error) {
790                 for {
791                         port, err := availablePort(host)
792                         if err != nil {
793                                 port, err = availablePort(super.ListenHost)
794                         }
795                         if err != nil {
796                                 return "", err
797                         }
798                         if usedPort[port] {
799                                 continue
800                         }
801                         usedPort[port] = true
802                         return port, nil
803                 }
804         }
805         if super.cluster.Services.Controller.ExternalURL.Host == "" {
806                 h, p, err := net.SplitHostPort(super.ControllerAddr)
807                 if err != nil && super.ControllerAddr != "" {
808                         return fmt.Errorf("SplitHostPort(ControllerAddr %q): %w", super.ControllerAddr, err)
809                 }
810                 if h == "" {
811                         h = super.ListenHost
812                 }
813                 if p == "0" || p == "" {
814                         p, err = nextPort(h)
815                         if err != nil {
816                                 return err
817                         }
818                 }
819                 super.cluster.Services.Controller.ExternalURL = arvados.URL{Scheme: "https", Host: net.JoinHostPort(h, p), Path: "/"}
820         }
821         u := url.URL(super.cluster.Services.Controller.ExternalURL)
822         defaultExtHost := u.Hostname()
823         for _, svc := range []*arvados.Service{
824                 &super.cluster.Services.Controller,
825                 &super.cluster.Services.DispatchCloud,
826                 &super.cluster.Services.Health,
827                 &super.cluster.Services.Keepproxy,
828                 &super.cluster.Services.Keepstore,
829                 &super.cluster.Services.RailsAPI,
830                 &super.cluster.Services.WebDAV,
831                 &super.cluster.Services.WebDAVDownload,
832                 &super.cluster.Services.Websocket,
833                 &super.cluster.Services.Workbench1,
834                 &super.cluster.Services.Workbench2,
835         } {
836                 if svc.ExternalURL.Host == "" {
837                         port, err := nextPort(defaultExtHost)
838                         if err != nil {
839                                 return err
840                         }
841                         host := net.JoinHostPort(defaultExtHost, port)
842                         if svc == &super.cluster.Services.Controller ||
843                                 svc == &super.cluster.Services.Health ||
844                                 svc == &super.cluster.Services.Keepproxy ||
845                                 svc == &super.cluster.Services.WebDAV ||
846                                 svc == &super.cluster.Services.WebDAVDownload ||
847                                 svc == &super.cluster.Services.Workbench1 ||
848                                 svc == &super.cluster.Services.Workbench2 {
849                                 svc.ExternalURL = arvados.URL{Scheme: "https", Host: host, Path: "/"}
850                         } else if svc == &super.cluster.Services.Websocket {
851                                 svc.ExternalURL = arvados.URL{Scheme: "wss", Host: host, Path: "/websocket"}
852                         }
853                 }
854                 if super.NoWorkbench1 && svc == &super.cluster.Services.Workbench1 ||
855                         super.NoWorkbench2 && svc == &super.cluster.Services.Workbench2 ||
856                         !super.cluster.Containers.CloudVMs.Enable && svc == &super.cluster.Services.DispatchCloud {
857                         // When Workbench is disabled, it gets an
858                         // ExternalURL (so we have a valid listening
859                         // port to write in our Nginx config) but no
860                         // InternalURLs (so health checker doesn't
861                         // complain).
862                         continue
863                 }
864                 if len(svc.InternalURLs) == 0 {
865                         port, err := nextPort(super.ListenHost)
866                         if err != nil {
867                                 return err
868                         }
869                         host := net.JoinHostPort(super.ListenHost, port)
870                         svc.InternalURLs = map[arvados.URL]arvados.ServiceInstance{
871                                 {Scheme: "http", Host: host, Path: "/"}: {},
872                         }
873                 }
874         }
875         if super.ClusterType != "production" {
876                 if super.cluster.SystemRootToken == "" {
877                         super.cluster.SystemRootToken = randomHexString(64)
878                 }
879                 if super.cluster.ManagementToken == "" {
880                         super.cluster.ManagementToken = randomHexString(64)
881                 }
882                 if super.cluster.Collections.BlobSigningKey == "" {
883                         super.cluster.Collections.BlobSigningKey = randomHexString(64)
884                 }
885                 if super.cluster.Users.AnonymousUserToken == "" {
886                         super.cluster.Users.AnonymousUserToken = randomHexString(64)
887                 }
888                 if super.cluster.Containers.DispatchPrivateKey == "" {
889                         buf, err := ioutil.ReadFile(filepath.Join(super.SourcePath, "lib", "dispatchcloud", "test", "sshkey_dispatch"))
890                         if err != nil {
891                                 return err
892                         }
893                         super.cluster.Containers.DispatchPrivateKey = string(buf)
894                 }
895                 super.cluster.TLS.Insecure = true
896         }
897         if super.ClusterType == "test" {
898                 // Add a second keepstore process.
899                 port, err := nextPort(super.ListenHost)
900                 if err != nil {
901                         return err
902                 }
903                 host := net.JoinHostPort(super.ListenHost, port)
904                 super.cluster.Services.Keepstore.InternalURLs[arvados.URL{Scheme: "http", Host: host, Path: "/"}] = arvados.ServiceInstance{}
905
906                 // Create a directory-backed volume for each keepstore
907                 // process.
908                 super.cluster.Volumes = map[string]arvados.Volume{}
909                 for url := range super.cluster.Services.Keepstore.InternalURLs {
910                         volnum := len(super.cluster.Volumes)
911                         datadir := fmt.Sprintf("%s/keep%d.data", super.tempdir, volnum)
912                         if _, err = os.Stat(datadir + "/."); err == nil {
913                         } else if !os.IsNotExist(err) {
914                                 return err
915                         } else if err = os.Mkdir(datadir, 0755); err != nil {
916                                 return err
917                         }
918                         super.cluster.Volumes[fmt.Sprintf(super.cluster.ClusterID+"-nyw5e-%015d", volnum)] = arvados.Volume{
919                                 Driver:           "Directory",
920                                 DriverParameters: json.RawMessage(fmt.Sprintf(`{"Root":%q}`, datadir)),
921                                 AccessViaHosts: map[arvados.URL]arvados.VolumeAccess{
922                                         url: {},
923                                 },
924                                 StorageClasses: map[string]bool{
925                                         "default": true,
926                                         "foo":     true,
927                                         "bar":     true,
928                                 },
929                         }
930                 }
931                 super.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
932                         "default": {Default: true},
933                         "foo":     {},
934                         "bar":     {},
935                 }
936         }
937         if super.OwnTemporaryDatabase {
938                 port, err := nextPort("localhost")
939                 if err != nil {
940                         return err
941                 }
942                 super.cluster.PostgreSQL.Connection = arvados.PostgreSQLConnection{
943                         "client_encoding": "utf8",
944                         "host":            "localhost",
945                         "port":            port,
946                         "dbname":          "arvados_test",
947                         "user":            "arvados",
948                         "password":        "insecure_arvados_test",
949                 }
950         }
951         return nil
952 }
953
954 func addrIsLocal(addr string) (bool, error) {
955         if h, _, err := net.SplitHostPort(addr); err != nil {
956                 return false, err
957         } else {
958                 addr = net.JoinHostPort(h, "0")
959         }
960         listener, err := net.Listen("tcp", addr)
961         if err == nil {
962                 listener.Close()
963                 return true, nil
964         } else if strings.Contains(err.Error(), "cannot assign requested address") {
965                 return false, nil
966         } else {
967                 return false, err
968         }
969 }
970
971 func randomHexString(chars int) string {
972         b := make([]byte, chars/2)
973         _, err := rand.Read(b)
974         if err != nil {
975                 panic(err)
976         }
977         return fmt.Sprintf("%x", b)
978 }
979
980 func internalPort(svc arvados.Service) (host, port string, err error) {
981         if len(svc.InternalURLs) > 1 {
982                 return "", "", errors.New("internalPort() doesn't work with multiple InternalURLs")
983         }
984         for u := range svc.InternalURLs {
985                 u := url.URL(u)
986                 host, port = u.Hostname(), u.Port()
987                 switch {
988                 case port != "":
989                 case u.Scheme == "https", u.Scheme == "ws":
990                         port = "443"
991                 default:
992                         port = "80"
993                 }
994                 return
995         }
996         return "", "", fmt.Errorf("service has no InternalURLs")
997 }
998
999 func externalPort(svc arvados.Service) (string, error) {
1000         u := url.URL(svc.ExternalURL)
1001         if p := u.Port(); p != "" {
1002                 return p, nil
1003         } else if u.Scheme == "https" || u.Scheme == "wss" {
1004                 return "443", nil
1005         } else {
1006                 return "80", nil
1007         }
1008 }
1009
1010 func availablePort(host string) (string, error) {
1011         ln, err := net.Listen("tcp", net.JoinHostPort(host, "0"))
1012         if err != nil {
1013                 return "", err
1014         }
1015         defer ln.Close()
1016         _, port, err := net.SplitHostPort(ln.Addr().String())
1017         if err != nil {
1018                 return "", err
1019         }
1020         return port, nil
1021 }
1022
1023 // Try to connect to addr until it works, then close ch. Give up if
1024 // ctx cancels.
1025 func waitForConnect(ctx context.Context, addr string) error {
1026         ctxlog.FromContext(ctx).WithField("addr", addr).Info("waitForConnect")
1027         dialer := net.Dialer{Timeout: time.Second}
1028         for ctx.Err() == nil {
1029                 conn, err := dialer.DialContext(ctx, "tcp", addr)
1030                 if err != nil {
1031                         time.Sleep(time.Second / 10)
1032                         continue
1033                 }
1034                 conn.Close()
1035                 return nil
1036         }
1037         return ctx.Err()
1038 }
1039
1040 func copyConfig(cfg *arvados.Config) *arvados.Config {
1041         pr, pw := io.Pipe()
1042         go func() {
1043                 err := json.NewEncoder(pw).Encode(cfg)
1044                 if err != nil {
1045                         panic(err)
1046                 }
1047                 pw.Close()
1048         }()
1049         cfg2 := new(arvados.Config)
1050         err := json.NewDecoder(pr).Decode(cfg2)
1051         if err != nil {
1052                 panic(err)
1053         }
1054         return cfg2
1055 }
1056
1057 func watchConfig(ctx context.Context, logger logrus.FieldLogger, cfgPath string, prevcfg *arvados.Config, fn func()) {
1058         watcher, err := fsnotify.NewWatcher()
1059         if err != nil {
1060                 logger.WithError(err).Error("fsnotify setup failed")
1061                 return
1062         }
1063         defer watcher.Close()
1064
1065         err = watcher.Add(cfgPath)
1066         if err != nil {
1067                 logger.WithError(err).Error("fsnotify watcher failed")
1068                 return
1069         }
1070
1071         for {
1072                 select {
1073                 case <-ctx.Done():
1074                         return
1075                 case err, ok := <-watcher.Errors:
1076                         if !ok {
1077                                 return
1078                         }
1079                         logger.WithError(err).Warn("fsnotify watcher reported error")
1080                 case _, ok := <-watcher.Events:
1081                         if !ok {
1082                                 return
1083                         }
1084                         for len(watcher.Events) > 0 {
1085                                 <-watcher.Events
1086                         }
1087                         loader := config.NewLoader(&bytes.Buffer{}, &logrus.Logger{Out: ioutil.Discard})
1088                         loader.Path = cfgPath
1089                         loader.SkipAPICalls = true
1090                         cfg, err := loader.Load()
1091                         if err != nil {
1092                                 logger.WithError(err).Warn("error reloading config file after change detected; ignoring new config for now")
1093                         } else if reflect.DeepEqual(cfg, prevcfg) {
1094                                 logger.Debug("config file changed but is still DeepEqual to the existing config")
1095                         } else {
1096                                 logger.Debug("config changed, notifying supervisor")
1097                                 fn()
1098                                 prevcfg = cfg
1099                         }
1100                 }
1101         }
1102 }