Merge branch '19179-acct-activity' refs #19179
[arvados.git] / lib / crunchrun / crunchrun.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package crunchrun
6
7 import (
8         "bytes"
9         "context"
10         "encoding/json"
11         "errors"
12         "flag"
13         "fmt"
14         "io"
15         "io/ioutil"
16         "log"
17         "net"
18         "net/http"
19         "os"
20         "os/exec"
21         "os/signal"
22         "os/user"
23         "path"
24         "path/filepath"
25         "regexp"
26         "runtime"
27         "runtime/pprof"
28         "sort"
29         "strings"
30         "sync"
31         "syscall"
32         "time"
33
34         "git.arvados.org/arvados.git/lib/cmd"
35         "git.arvados.org/arvados.git/lib/config"
36         "git.arvados.org/arvados.git/lib/crunchstat"
37         "git.arvados.org/arvados.git/sdk/go/arvados"
38         "git.arvados.org/arvados.git/sdk/go/arvadosclient"
39         "git.arvados.org/arvados.git/sdk/go/ctxlog"
40         "git.arvados.org/arvados.git/sdk/go/keepclient"
41         "git.arvados.org/arvados.git/sdk/go/manifest"
42         "golang.org/x/sys/unix"
43 )
44
45 type command struct{}
46
47 var Command = command{}
48
49 // ConfigData contains environment variables and (when needed) cluster
50 // configuration, passed from dispatchcloud to crunch-run on stdin.
51 type ConfigData struct {
52         Env         map[string]string
53         KeepBuffers int
54         Cluster     *arvados.Cluster
55 }
56
57 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
58 type IArvadosClient interface {
59         Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
60         Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
61         Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
62         Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
63         CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
64         Discovery(key string) (interface{}, error)
65 }
66
67 // ErrCancelled is the error returned when the container is cancelled.
68 var ErrCancelled = errors.New("Cancelled")
69
70 // IKeepClient is the minimal Keep API methods used by crunch-run.
71 type IKeepClient interface {
72         BlockWrite(context.Context, arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error)
73         ReadAt(locator string, p []byte, off int) (int, error)
74         ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
75         LocalLocator(locator string) (string, error)
76         ClearBlockCache()
77         SetStorageClasses(sc []string)
78 }
79
80 // NewLogWriter is a factory function to create a new log writer.
81 type NewLogWriter func(name string) (io.WriteCloser, error)
82
83 type RunArvMount func(cmdline []string, tok string) (*exec.Cmd, error)
84
85 type MkTempDir func(string, string) (string, error)
86
87 type PsProcess interface {
88         CmdlineSlice() ([]string, error)
89 }
90
91 // ContainerRunner is the main stateful struct used for a single execution of a
92 // container.
93 type ContainerRunner struct {
94         executor       containerExecutor
95         executorStdin  io.Closer
96         executorStdout io.Closer
97         executorStderr io.Closer
98
99         // Dispatcher client is initialized with the Dispatcher token.
100         // This is a privileged token used to manage container status
101         // and logs.
102         //
103         // We have both dispatcherClient and DispatcherArvClient
104         // because there are two different incompatible Arvados Go
105         // SDKs and we have to use both (hopefully this gets fixed in
106         // #14467)
107         dispatcherClient     *arvados.Client
108         DispatcherArvClient  IArvadosClient
109         DispatcherKeepClient IKeepClient
110
111         // Container client is initialized with the Container token
112         // This token controls the permissions of the container, and
113         // must be used for operations such as reading collections.
114         //
115         // Same comment as above applies to
116         // containerClient/ContainerArvClient.
117         containerClient     *arvados.Client
118         ContainerArvClient  IArvadosClient
119         ContainerKeepClient IKeepClient
120
121         Container     arvados.Container
122         token         string
123         ExitCode      *int
124         NewLogWriter  NewLogWriter
125         CrunchLog     *ThrottledLogger
126         logUUID       string
127         logMtx        sync.Mutex
128         LogCollection arvados.CollectionFileSystem
129         LogsPDH       *string
130         RunArvMount   RunArvMount
131         MkTempDir     MkTempDir
132         ArvMount      *exec.Cmd
133         ArvMountPoint string
134         HostOutputDir string
135         Volumes       map[string]struct{}
136         OutputPDH     *string
137         SigChan       chan os.Signal
138         ArvMountExit  chan error
139         SecretMounts  map[string]arvados.Mount
140         MkArvClient   func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error)
141         finalState    string
142         parentTemp    string
143         costStartTime time.Time
144
145         keepstoreLogger  io.WriteCloser
146         keepstoreLogbuf  *bufThenWrite
147         statLogger       io.WriteCloser
148         statReporter     *crunchstat.Reporter
149         hoststatLogger   io.WriteCloser
150         hoststatReporter *crunchstat.Reporter
151         statInterval     time.Duration
152         cgroupRoot       string
153         // What we expect the container's cgroup parent to be.
154         expectCgroupParent string
155         // What we tell docker to use as the container's cgroup
156         // parent. Note: Ideally we would use the same field for both
157         // expectCgroupParent and setCgroupParent, and just make it
158         // default to "docker". However, when using docker < 1.10 with
159         // systemd, specifying a non-empty cgroup parent (even the
160         // default value "docker") hits a docker bug
161         // (https://github.com/docker/docker/issues/17126). Using two
162         // separate fields makes it possible to use the "expect cgroup
163         // parent to be X" feature even on sites where the "specify
164         // cgroup parent" feature breaks.
165         setCgroupParent string
166
167         cStateLock sync.Mutex
168         cCancelled bool // StopContainer() invoked
169
170         enableMemoryLimit bool
171         enableNetwork     string // one of "default" or "always"
172         networkMode       string // "none", "host", or "" -- passed through to executor
173         brokenNodeHook    string // script to run if node appears to be broken
174         arvMountLog       *ThrottledLogger
175
176         containerWatchdogInterval time.Duration
177
178         gateway Gateway
179 }
180
181 // setupSignals sets up signal handling to gracefully terminate the
182 // underlying container and update state when receiving a TERM, INT or
183 // QUIT signal.
184 func (runner *ContainerRunner) setupSignals() {
185         runner.SigChan = make(chan os.Signal, 1)
186         signal.Notify(runner.SigChan, syscall.SIGTERM)
187         signal.Notify(runner.SigChan, syscall.SIGINT)
188         signal.Notify(runner.SigChan, syscall.SIGQUIT)
189
190         go func(sig chan os.Signal) {
191                 for s := range sig {
192                         runner.stop(s)
193                 }
194         }(runner.SigChan)
195 }
196
197 // stop the underlying container.
198 func (runner *ContainerRunner) stop(sig os.Signal) {
199         runner.cStateLock.Lock()
200         defer runner.cStateLock.Unlock()
201         if sig != nil {
202                 runner.CrunchLog.Printf("caught signal: %v", sig)
203         }
204         runner.cCancelled = true
205         runner.CrunchLog.Printf("stopping container")
206         err := runner.executor.Stop()
207         if err != nil {
208                 runner.CrunchLog.Printf("error stopping container: %s", err)
209         }
210 }
211
212 var errorBlacklist = []string{
213         "(?ms).*[Cc]annot connect to the Docker daemon.*",
214         "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
215         "(?ms).*grpc: the connection is unavailable.*",
216 }
217
218 func (runner *ContainerRunner) runBrokenNodeHook() {
219         if runner.brokenNodeHook == "" {
220                 path := filepath.Join(lockdir, brokenfile)
221                 runner.CrunchLog.Printf("Writing %s to mark node as broken", path)
222                 f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0700)
223                 if err != nil {
224                         runner.CrunchLog.Printf("Error writing %s: %s", path, err)
225                         return
226                 }
227                 f.Close()
228         } else {
229                 runner.CrunchLog.Printf("Running broken node hook %q", runner.brokenNodeHook)
230                 // run killme script
231                 c := exec.Command(runner.brokenNodeHook)
232                 c.Stdout = runner.CrunchLog
233                 c.Stderr = runner.CrunchLog
234                 err := c.Run()
235                 if err != nil {
236                         runner.CrunchLog.Printf("Error running broken node hook: %v", err)
237                 }
238         }
239 }
240
241 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
242         for _, d := range errorBlacklist {
243                 if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
244                         runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
245                         runner.runBrokenNodeHook()
246                         return true
247                 }
248         }
249         return false
250 }
251
252 // LoadImage determines the docker image id from the container record and
253 // checks if it is available in the local Docker image store.  If not, it loads
254 // the image from Keep.
255 func (runner *ContainerRunner) LoadImage() (string, error) {
256         runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
257
258         d, err := os.Open(runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage)
259         if err != nil {
260                 return "", err
261         }
262         defer d.Close()
263         allfiles, err := d.Readdirnames(-1)
264         if err != nil {
265                 return "", err
266         }
267         var tarfiles []string
268         for _, fnm := range allfiles {
269                 if strings.HasSuffix(fnm, ".tar") {
270                         tarfiles = append(tarfiles, fnm)
271                 }
272         }
273         if len(tarfiles) == 0 {
274                 return "", fmt.Errorf("image collection does not include a .tar image file")
275         }
276         if len(tarfiles) > 1 {
277                 return "", fmt.Errorf("cannot choose from multiple tar files in image collection: %v", tarfiles)
278         }
279         imageID := tarfiles[0][:len(tarfiles[0])-4]
280         imageTarballPath := runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage + "/" + imageID + ".tar"
281         runner.CrunchLog.Printf("Using Docker image id %q", imageID)
282
283         runner.CrunchLog.Print("Loading Docker image from keep")
284         err = runner.executor.LoadImage(imageID, imageTarballPath, runner.Container, runner.ArvMountPoint,
285                 runner.containerClient)
286         if err != nil {
287                 return "", err
288         }
289
290         return imageID, nil
291 }
292
293 func (runner *ContainerRunner) ArvMountCmd(cmdline []string, token string) (c *exec.Cmd, err error) {
294         c = exec.Command(cmdline[0], cmdline[1:]...)
295
296         // Copy our environment, but override ARVADOS_API_TOKEN with
297         // the container auth token.
298         c.Env = nil
299         for _, s := range os.Environ() {
300                 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
301                         c.Env = append(c.Env, s)
302                 }
303         }
304         c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
305
306         w, err := runner.NewLogWriter("arv-mount")
307         if err != nil {
308                 return nil, err
309         }
310         runner.arvMountLog = NewThrottledLogger(w)
311         scanner := logScanner{
312                 Patterns: []string{
313                         "Keep write error",
314                         "Block not found error",
315                         "Unhandled exception during FUSE operation",
316                 },
317                 ReportFunc: runner.reportArvMountWarning,
318         }
319         c.Stdout = runner.arvMountLog
320         c.Stderr = io.MultiWriter(runner.arvMountLog, os.Stderr, &scanner)
321
322         runner.CrunchLog.Printf("Running %v", c.Args)
323
324         err = c.Start()
325         if err != nil {
326                 return nil, err
327         }
328
329         statReadme := make(chan bool)
330         runner.ArvMountExit = make(chan error)
331
332         keepStatting := true
333         go func() {
334                 for keepStatting {
335                         time.Sleep(100 * time.Millisecond)
336                         _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
337                         if err == nil {
338                                 keepStatting = false
339                                 statReadme <- true
340                         }
341                 }
342                 close(statReadme)
343         }()
344
345         go func() {
346                 mnterr := c.Wait()
347                 if mnterr != nil {
348                         runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
349                 }
350                 runner.ArvMountExit <- mnterr
351                 close(runner.ArvMountExit)
352         }()
353
354         select {
355         case <-statReadme:
356                 break
357         case err := <-runner.ArvMountExit:
358                 runner.ArvMount = nil
359                 keepStatting = false
360                 return nil, err
361         }
362
363         return c, nil
364 }
365
366 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
367         if runner.ArvMountPoint == "" {
368                 runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)
369         }
370         return
371 }
372
373 func copyfile(src string, dst string) (err error) {
374         srcfile, err := os.Open(src)
375         if err != nil {
376                 return
377         }
378
379         os.MkdirAll(path.Dir(dst), 0777)
380
381         dstfile, err := os.Create(dst)
382         if err != nil {
383                 return
384         }
385         _, err = io.Copy(dstfile, srcfile)
386         if err != nil {
387                 return
388         }
389
390         err = srcfile.Close()
391         err2 := dstfile.Close()
392
393         if err != nil {
394                 return
395         }
396
397         if err2 != nil {
398                 return err2
399         }
400
401         return nil
402 }
403
404 func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
405         bindmounts := map[string]bindmount{}
406         err := runner.SetupArvMountPoint("keep")
407         if err != nil {
408                 return nil, fmt.Errorf("While creating keep mount temp dir: %v", err)
409         }
410
411         token, err := runner.ContainerToken()
412         if err != nil {
413                 return nil, fmt.Errorf("could not get container token: %s", err)
414         }
415         runner.CrunchLog.Printf("container token %q", token)
416
417         pdhOnly := true
418         tmpcount := 0
419         arvMountCmd := []string{
420                 "arv-mount",
421                 "--foreground",
422                 "--read-write",
423                 "--storage-classes", strings.Join(runner.Container.OutputStorageClasses, ","),
424                 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
425
426         if _, isdocker := runner.executor.(*dockerExecutor); isdocker {
427                 arvMountCmd = append(arvMountCmd, "--allow-other")
428         }
429
430         if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
431                 arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
432         }
433
434         collectionPaths := []string{}
435         needCertMount := true
436         type copyFile struct {
437                 src  string
438                 bind string
439         }
440         var copyFiles []copyFile
441
442         var binds []string
443         for bind := range runner.Container.Mounts {
444                 binds = append(binds, bind)
445         }
446         for bind := range runner.SecretMounts {
447                 if _, ok := runner.Container.Mounts[bind]; ok {
448                         return nil, fmt.Errorf("secret mount %q conflicts with regular mount", bind)
449                 }
450                 if runner.SecretMounts[bind].Kind != "json" &&
451                         runner.SecretMounts[bind].Kind != "text" {
452                         return nil, fmt.Errorf("secret mount %q type is %q but only 'json' and 'text' are permitted",
453                                 bind, runner.SecretMounts[bind].Kind)
454                 }
455                 binds = append(binds, bind)
456         }
457         sort.Strings(binds)
458
459         for _, bind := range binds {
460                 mnt, notSecret := runner.Container.Mounts[bind]
461                 if !notSecret {
462                         mnt = runner.SecretMounts[bind]
463                 }
464                 if bind == "stdout" || bind == "stderr" {
465                         // Is it a "file" mount kind?
466                         if mnt.Kind != "file" {
467                                 return nil, fmt.Errorf("unsupported mount kind '%s' for %s: only 'file' is supported", mnt.Kind, bind)
468                         }
469
470                         // Does path start with OutputPath?
471                         prefix := runner.Container.OutputPath
472                         if !strings.HasSuffix(prefix, "/") {
473                                 prefix += "/"
474                         }
475                         if !strings.HasPrefix(mnt.Path, prefix) {
476                                 return nil, fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
477                         }
478                 }
479
480                 if bind == "stdin" {
481                         // Is it a "collection" mount kind?
482                         if mnt.Kind != "collection" && mnt.Kind != "json" {
483                                 return nil, fmt.Errorf("unsupported mount kind '%s' for stdin: only 'collection' and 'json' are supported", mnt.Kind)
484                         }
485                 }
486
487                 if bind == "/etc/arvados/ca-certificates.crt" {
488                         needCertMount = false
489                 }
490
491                 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
492                         if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
493                                 return nil, fmt.Errorf("only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
494                         }
495                 }
496
497                 switch {
498                 case mnt.Kind == "collection" && bind != "stdin":
499                         var src string
500                         if mnt.UUID != "" && mnt.PortableDataHash != "" {
501                                 return nil, fmt.Errorf("cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
502                         }
503                         if mnt.UUID != "" {
504                                 if mnt.Writable {
505                                         return nil, fmt.Errorf("writing to existing collections currently not permitted")
506                                 }
507                                 pdhOnly = false
508                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
509                         } else if mnt.PortableDataHash != "" {
510                                 if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
511                                         return nil, fmt.Errorf("can never write to a collection specified by portable data hash")
512                                 }
513                                 idx := strings.Index(mnt.PortableDataHash, "/")
514                                 if idx > 0 {
515                                         mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
516                                         mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
517                                         runner.Container.Mounts[bind] = mnt
518                                 }
519                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
520                                 if mnt.Path != "" && mnt.Path != "." {
521                                         if strings.HasPrefix(mnt.Path, "./") {
522                                                 mnt.Path = mnt.Path[2:]
523                                         } else if strings.HasPrefix(mnt.Path, "/") {
524                                                 mnt.Path = mnt.Path[1:]
525                                         }
526                                         src += "/" + mnt.Path
527                                 }
528                         } else {
529                                 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
530                                 arvMountCmd = append(arvMountCmd, "--mount-tmp", fmt.Sprintf("tmp%d", tmpcount))
531                                 tmpcount++
532                         }
533                         if mnt.Writable {
534                                 if bind == runner.Container.OutputPath {
535                                         runner.HostOutputDir = src
536                                         bindmounts[bind] = bindmount{HostPath: src}
537                                 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
538                                         copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
539                                 } else {
540                                         bindmounts[bind] = bindmount{HostPath: src}
541                                 }
542                         } else {
543                                 bindmounts[bind] = bindmount{HostPath: src, ReadOnly: true}
544                         }
545                         collectionPaths = append(collectionPaths, src)
546
547                 case mnt.Kind == "tmp":
548                         var tmpdir string
549                         tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
550                         if err != nil {
551                                 return nil, fmt.Errorf("while creating mount temp dir: %v", err)
552                         }
553                         st, staterr := os.Stat(tmpdir)
554                         if staterr != nil {
555                                 return nil, fmt.Errorf("while Stat on temp dir: %v", staterr)
556                         }
557                         err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
558                         if staterr != nil {
559                                 return nil, fmt.Errorf("while Chmod temp dir: %v", err)
560                         }
561                         bindmounts[bind] = bindmount{HostPath: tmpdir}
562                         if bind == runner.Container.OutputPath {
563                                 runner.HostOutputDir = tmpdir
564                         }
565
566                 case mnt.Kind == "json" || mnt.Kind == "text":
567                         var filedata []byte
568                         if mnt.Kind == "json" {
569                                 filedata, err = json.Marshal(mnt.Content)
570                                 if err != nil {
571                                         return nil, fmt.Errorf("encoding json data: %v", err)
572                                 }
573                         } else {
574                                 text, ok := mnt.Content.(string)
575                                 if !ok {
576                                         return nil, fmt.Errorf("content for mount %q must be a string", bind)
577                                 }
578                                 filedata = []byte(text)
579                         }
580
581                         tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
582                         if err != nil {
583                                 return nil, fmt.Errorf("creating temp dir: %v", err)
584                         }
585                         tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
586                         err = ioutil.WriteFile(tmpfn, filedata, 0444)
587                         if err != nil {
588                                 return nil, fmt.Errorf("writing temp file: %v", err)
589                         }
590                         if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && (notSecret || runner.Container.Mounts[runner.Container.OutputPath].Kind != "collection") {
591                                 // In most cases, if the container
592                                 // specifies a literal file inside the
593                                 // output path, we copy it into the
594                                 // output directory (either a mounted
595                                 // collection or a staging area on the
596                                 // host fs). If it's a secret, it will
597                                 // be skipped when copying output from
598                                 // staging to Keep later.
599                                 copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
600                         } else {
601                                 // If a secret is outside OutputPath,
602                                 // we bind mount the secret file
603                                 // directly just like other mounts. We
604                                 // also use this strategy when a
605                                 // secret is inside OutputPath but
606                                 // OutputPath is a live collection, to
607                                 // avoid writing the secret to
608                                 // Keep. Attempting to remove a
609                                 // bind-mounted secret file from
610                                 // inside the container will return a
611                                 // "Device or resource busy" error
612                                 // that might not be handled well by
613                                 // the container, which is why we
614                                 // don't use this strategy when
615                                 // OutputPath is a staging directory.
616                                 bindmounts[bind] = bindmount{HostPath: tmpfn, ReadOnly: true}
617                         }
618
619                 case mnt.Kind == "git_tree":
620                         tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
621                         if err != nil {
622                                 return nil, fmt.Errorf("creating temp dir: %v", err)
623                         }
624                         err = gitMount(mnt).extractTree(runner.ContainerArvClient, tmpdir, token)
625                         if err != nil {
626                                 return nil, err
627                         }
628                         bindmounts[bind] = bindmount{HostPath: tmpdir, ReadOnly: true}
629                 }
630         }
631
632         if runner.HostOutputDir == "" {
633                 return nil, fmt.Errorf("output path does not correspond to a writable mount point")
634         }
635
636         if needCertMount && runner.Container.RuntimeConstraints.API {
637                 for _, certfile := range arvadosclient.CertFiles {
638                         _, err := os.Stat(certfile)
639                         if err == nil {
640                                 bindmounts["/etc/arvados/ca-certificates.crt"] = bindmount{HostPath: certfile, ReadOnly: true}
641                                 break
642                         }
643                 }
644         }
645
646         if pdhOnly {
647                 // If we are only mounting collections by pdh, make
648                 // sure we don't subscribe to websocket events to
649                 // avoid putting undesired load on the API server
650                 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id", "--disable-event-listening")
651         } else {
652                 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
653         }
654         // the by_uuid mount point is used by singularity when writing
655         // out docker images converted to SIF
656         arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_uuid")
657         arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
658
659         runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
660         if err != nil {
661                 return nil, fmt.Errorf("while trying to start arv-mount: %v", err)
662         }
663
664         for _, p := range collectionPaths {
665                 _, err = os.Stat(p)
666                 if err != nil {
667                         return nil, fmt.Errorf("while checking that input files exist: %v", err)
668                 }
669         }
670
671         for _, cp := range copyFiles {
672                 st, err := os.Stat(cp.src)
673                 if err != nil {
674                         return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
675                 }
676                 if st.IsDir() {
677                         err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
678                                 if walkerr != nil {
679                                         return walkerr
680                                 }
681                                 target := path.Join(cp.bind, walkpath[len(cp.src):])
682                                 if walkinfo.Mode().IsRegular() {
683                                         copyerr := copyfile(walkpath, target)
684                                         if copyerr != nil {
685                                                 return copyerr
686                                         }
687                                         return os.Chmod(target, walkinfo.Mode()|0777)
688                                 } else if walkinfo.Mode().IsDir() {
689                                         mkerr := os.MkdirAll(target, 0777)
690                                         if mkerr != nil {
691                                                 return mkerr
692                                         }
693                                         return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
694                                 } else {
695                                         return fmt.Errorf("source %q is not a regular file or directory", cp.src)
696                                 }
697                         })
698                 } else if st.Mode().IsRegular() {
699                         err = copyfile(cp.src, cp.bind)
700                         if err == nil {
701                                 err = os.Chmod(cp.bind, st.Mode()|0777)
702                         }
703                 }
704                 if err != nil {
705                         return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
706                 }
707         }
708
709         return bindmounts, nil
710 }
711
712 func (runner *ContainerRunner) stopHoststat() error {
713         if runner.hoststatReporter == nil {
714                 return nil
715         }
716         runner.hoststatReporter.Stop()
717         err := runner.hoststatLogger.Close()
718         if err != nil {
719                 return fmt.Errorf("error closing hoststat logs: %v", err)
720         }
721         return nil
722 }
723
724 func (runner *ContainerRunner) startHoststat() error {
725         w, err := runner.NewLogWriter("hoststat")
726         if err != nil {
727                 return err
728         }
729         runner.hoststatLogger = NewThrottledLogger(w)
730         runner.hoststatReporter = &crunchstat.Reporter{
731                 Logger:     log.New(runner.hoststatLogger, "", 0),
732                 CgroupRoot: runner.cgroupRoot,
733                 PollPeriod: runner.statInterval,
734         }
735         runner.hoststatReporter.Start()
736         return nil
737 }
738
739 func (runner *ContainerRunner) startCrunchstat() error {
740         w, err := runner.NewLogWriter("crunchstat")
741         if err != nil {
742                 return err
743         }
744         runner.statLogger = NewThrottledLogger(w)
745         runner.statReporter = &crunchstat.Reporter{
746                 CID:          runner.executor.CgroupID(),
747                 Logger:       log.New(runner.statLogger, "", 0),
748                 CgroupParent: runner.expectCgroupParent,
749                 CgroupRoot:   runner.cgroupRoot,
750                 PollPeriod:   runner.statInterval,
751                 TempDir:      runner.parentTemp,
752         }
753         runner.statReporter.Start()
754         return nil
755 }
756
757 type infoCommand struct {
758         label string
759         cmd   []string
760 }
761
762 // LogHostInfo logs info about the current host, for debugging and
763 // accounting purposes. Although it's logged as "node-info", this is
764 // about the environment where crunch-run is actually running, which
765 // might differ from what's described in the node record (see
766 // LogNodeRecord).
767 func (runner *ContainerRunner) LogHostInfo() (err error) {
768         w, err := runner.NewLogWriter("node-info")
769         if err != nil {
770                 return
771         }
772
773         commands := []infoCommand{
774                 {
775                         label: "Host Information",
776                         cmd:   []string{"uname", "-a"},
777                 },
778                 {
779                         label: "CPU Information",
780                         cmd:   []string{"cat", "/proc/cpuinfo"},
781                 },
782                 {
783                         label: "Memory Information",
784                         cmd:   []string{"cat", "/proc/meminfo"},
785                 },
786                 {
787                         label: "Disk Space",
788                         cmd:   []string{"df", "-m", "/", os.TempDir()},
789                 },
790                 {
791                         label: "Disk INodes",
792                         cmd:   []string{"df", "-i", "/", os.TempDir()},
793                 },
794         }
795
796         // Run commands with informational output to be logged.
797         for _, command := range commands {
798                 fmt.Fprintln(w, command.label)
799                 cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
800                 cmd.Stdout = w
801                 cmd.Stderr = w
802                 if err := cmd.Run(); err != nil {
803                         err = fmt.Errorf("While running command %q: %v", command.cmd, err)
804                         fmt.Fprintln(w, err)
805                         return err
806                 }
807                 fmt.Fprintln(w, "")
808         }
809
810         err = w.Close()
811         if err != nil {
812                 return fmt.Errorf("While closing node-info logs: %v", err)
813         }
814         return nil
815 }
816
817 // LogContainerRecord gets and saves the raw JSON container record from the API server
818 func (runner *ContainerRunner) LogContainerRecord() error {
819         logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
820         if !logged && err == nil {
821                 err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
822         }
823         return err
824 }
825
826 // LogNodeRecord logs the current host's InstanceType config entry (or
827 // the arvados#node record, if running via crunch-dispatch-slurm).
828 func (runner *ContainerRunner) LogNodeRecord() error {
829         if it := os.Getenv("InstanceType"); it != "" {
830                 // Dispatched via arvados-dispatch-cloud. Save
831                 // InstanceType config fragment received from
832                 // dispatcher on stdin.
833                 w, err := runner.LogCollection.OpenFile("node.json", os.O_CREATE|os.O_WRONLY, 0666)
834                 if err != nil {
835                         return err
836                 }
837                 defer w.Close()
838                 _, err = io.WriteString(w, it)
839                 if err != nil {
840                         return err
841                 }
842                 return w.Close()
843         }
844         // Dispatched via crunch-dispatch-slurm. Look up
845         // apiserver's node record corresponding to
846         // $SLURMD_NODENAME.
847         hostname := os.Getenv("SLURMD_NODENAME")
848         if hostname == "" {
849                 hostname, _ = os.Hostname()
850         }
851         _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
852                 // The "info" field has admin-only info when
853                 // obtained with a privileged token, and
854                 // should not be logged.
855                 node, ok := resp.(map[string]interface{})
856                 if ok {
857                         delete(node, "info")
858                 }
859         })
860         return err
861 }
862
863 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
864         writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
865         if err != nil {
866                 return false, err
867         }
868         w := &ArvLogWriter{
869                 ArvClient:     runner.DispatcherArvClient,
870                 UUID:          runner.Container.UUID,
871                 loggingStream: label,
872                 writeCloser:   writer,
873         }
874
875         reader, err := runner.DispatcherArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
876         if err != nil {
877                 return false, fmt.Errorf("error getting %s record: %v", label, err)
878         }
879         defer reader.Close()
880
881         dec := json.NewDecoder(reader)
882         dec.UseNumber()
883         var resp map[string]interface{}
884         if err = dec.Decode(&resp); err != nil {
885                 return false, fmt.Errorf("error decoding %s list response: %v", label, err)
886         }
887         items, ok := resp["items"].([]interface{})
888         if !ok {
889                 return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
890         } else if len(items) < 1 {
891                 return false, nil
892         }
893         if munge != nil {
894                 munge(items[0])
895         }
896         // Re-encode it using indentation to improve readability
897         enc := json.NewEncoder(w)
898         enc.SetIndent("", "    ")
899         if err = enc.Encode(items[0]); err != nil {
900                 return false, fmt.Errorf("error logging %s record: %v", label, err)
901         }
902         err = w.Close()
903         if err != nil {
904                 return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
905         }
906         return true, nil
907 }
908
909 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
910         stdoutPath := mntPath[len(runner.Container.OutputPath):]
911         index := strings.LastIndex(stdoutPath, "/")
912         if index > 0 {
913                 subdirs := stdoutPath[:index]
914                 if subdirs != "" {
915                         st, err := os.Stat(runner.HostOutputDir)
916                         if err != nil {
917                                 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
918                         }
919                         stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
920                         err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
921                         if err != nil {
922                                 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
923                         }
924                 }
925         }
926         stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
927         if err != nil {
928                 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
929         }
930
931         return stdoutFile, nil
932 }
933
934 // CreateContainer creates the docker container.
935 func (runner *ContainerRunner) CreateContainer(imageID string, bindmounts map[string]bindmount) error {
936         var stdin io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
937         if mnt, ok := runner.Container.Mounts["stdin"]; ok {
938                 switch mnt.Kind {
939                 case "collection":
940                         var collID string
941                         if mnt.UUID != "" {
942                                 collID = mnt.UUID
943                         } else {
944                                 collID = mnt.PortableDataHash
945                         }
946                         path := runner.ArvMountPoint + "/by_id/" + collID + "/" + mnt.Path
947                         f, err := os.Open(path)
948                         if err != nil {
949                                 return err
950                         }
951                         stdin = f
952                 case "json":
953                         j, err := json.Marshal(mnt.Content)
954                         if err != nil {
955                                 return fmt.Errorf("error encoding stdin json data: %v", err)
956                         }
957                         stdin = ioutil.NopCloser(bytes.NewReader(j))
958                 default:
959                         return fmt.Errorf("stdin mount has unsupported kind %q", mnt.Kind)
960                 }
961         }
962
963         var stdout, stderr io.WriteCloser
964         if mnt, ok := runner.Container.Mounts["stdout"]; ok {
965                 f, err := runner.getStdoutFile(mnt.Path)
966                 if err != nil {
967                         return err
968                 }
969                 stdout = f
970         } else if w, err := runner.NewLogWriter("stdout"); err != nil {
971                 return err
972         } else {
973                 stdout = NewThrottledLogger(w)
974         }
975
976         if mnt, ok := runner.Container.Mounts["stderr"]; ok {
977                 f, err := runner.getStdoutFile(mnt.Path)
978                 if err != nil {
979                         return err
980                 }
981                 stderr = f
982         } else if w, err := runner.NewLogWriter("stderr"); err != nil {
983                 return err
984         } else {
985                 stderr = NewThrottledLogger(w)
986         }
987
988         env := runner.Container.Environment
989         enableNetwork := runner.enableNetwork == "always"
990         if runner.Container.RuntimeConstraints.API {
991                 enableNetwork = true
992                 tok, err := runner.ContainerToken()
993                 if err != nil {
994                         return err
995                 }
996                 env = map[string]string{}
997                 for k, v := range runner.Container.Environment {
998                         env[k] = v
999                 }
1000                 env["ARVADOS_API_TOKEN"] = tok
1001                 env["ARVADOS_API_HOST"] = os.Getenv("ARVADOS_API_HOST")
1002                 env["ARVADOS_API_HOST_INSECURE"] = os.Getenv("ARVADOS_API_HOST_INSECURE")
1003                 env["ARVADOS_KEEP_SERVICES"] = os.Getenv("ARVADOS_KEEP_SERVICES")
1004         }
1005         workdir := runner.Container.Cwd
1006         if workdir == "." {
1007                 // both "" and "." mean default
1008                 workdir = ""
1009         }
1010         ram := runner.Container.RuntimeConstraints.RAM
1011         if !runner.enableMemoryLimit {
1012                 ram = 0
1013         }
1014         runner.executorStdin = stdin
1015         runner.executorStdout = stdout
1016         runner.executorStderr = stderr
1017
1018         if runner.Container.RuntimeConstraints.CUDA.DeviceCount > 0 {
1019                 nvidiaModprobe(runner.CrunchLog)
1020         }
1021
1022         return runner.executor.Create(containerSpec{
1023                 Image:           imageID,
1024                 VCPUs:           runner.Container.RuntimeConstraints.VCPUs,
1025                 RAM:             ram,
1026                 WorkingDir:      workdir,
1027                 Env:             env,
1028                 BindMounts:      bindmounts,
1029                 Command:         runner.Container.Command,
1030                 EnableNetwork:   enableNetwork,
1031                 CUDADeviceCount: runner.Container.RuntimeConstraints.CUDA.DeviceCount,
1032                 NetworkMode:     runner.networkMode,
1033                 CgroupParent:    runner.setCgroupParent,
1034                 Stdin:           stdin,
1035                 Stdout:          stdout,
1036                 Stderr:          stderr,
1037         })
1038 }
1039
1040 // StartContainer starts the docker container created by CreateContainer.
1041 func (runner *ContainerRunner) StartContainer() error {
1042         runner.CrunchLog.Printf("Starting container")
1043         runner.cStateLock.Lock()
1044         defer runner.cStateLock.Unlock()
1045         if runner.cCancelled {
1046                 return ErrCancelled
1047         }
1048         err := runner.executor.Start()
1049         if err != nil {
1050                 var advice string
1051                 if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
1052                         advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
1053                 }
1054                 return fmt.Errorf("could not start container: %v%s", err, advice)
1055         }
1056         return nil
1057 }
1058
1059 // WaitFinish waits for the container to terminate, capture the exit code, and
1060 // close the stdout/stderr logging.
1061 func (runner *ContainerRunner) WaitFinish() error {
1062         runner.CrunchLog.Print("Waiting for container to finish")
1063         var timeout <-chan time.Time
1064         if s := runner.Container.SchedulingParameters.MaxRunTime; s > 0 {
1065                 timeout = time.After(time.Duration(s) * time.Second)
1066         }
1067         ctx, cancel := context.WithCancel(context.Background())
1068         defer cancel()
1069         go func() {
1070                 select {
1071                 case <-timeout:
1072                         runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
1073                         runner.stop(nil)
1074                 case <-runner.ArvMountExit:
1075                         runner.CrunchLog.Printf("arv-mount exited while container is still running. Stopping container.")
1076                         runner.stop(nil)
1077                 case <-ctx.Done():
1078                 }
1079         }()
1080         exitcode, err := runner.executor.Wait(ctx)
1081         if err != nil {
1082                 runner.checkBrokenNode(err)
1083                 return err
1084         }
1085         runner.ExitCode = &exitcode
1086
1087         extra := ""
1088         if exitcode&0x80 != 0 {
1089                 // Convert raw exit status (0x80 + signal number) to a
1090                 // string to log after the code, like " (signal 101)"
1091                 // or " (signal 9, killed)"
1092                 sig := syscall.WaitStatus(exitcode).Signal()
1093                 if name := unix.SignalName(sig); name != "" {
1094                         extra = fmt.Sprintf(" (signal %d, %s)", sig, name)
1095                 } else {
1096                         extra = fmt.Sprintf(" (signal %d)", sig)
1097                 }
1098         }
1099         runner.CrunchLog.Printf("Container exited with status code %d%s", exitcode, extra)
1100         err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1101                 "container": arvadosclient.Dict{"exit_code": exitcode},
1102         }, nil)
1103         if err != nil {
1104                 runner.CrunchLog.Printf("ignoring error updating exit_code: %s", err)
1105         }
1106
1107         var returnErr error
1108         if err = runner.executorStdin.Close(); err != nil {
1109                 err = fmt.Errorf("error closing container stdin: %s", err)
1110                 runner.CrunchLog.Printf("%s", err)
1111                 returnErr = err
1112         }
1113         if err = runner.executorStdout.Close(); err != nil {
1114                 err = fmt.Errorf("error closing container stdout: %s", err)
1115                 runner.CrunchLog.Printf("%s", err)
1116                 if returnErr == nil {
1117                         returnErr = err
1118                 }
1119         }
1120         if err = runner.executorStderr.Close(); err != nil {
1121                 err = fmt.Errorf("error closing container stderr: %s", err)
1122                 runner.CrunchLog.Printf("%s", err)
1123                 if returnErr == nil {
1124                         returnErr = err
1125                 }
1126         }
1127
1128         if runner.statReporter != nil {
1129                 runner.statReporter.Stop()
1130                 err = runner.statLogger.Close()
1131                 if err != nil {
1132                         runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
1133                 }
1134         }
1135         return returnErr
1136 }
1137
1138 func (runner *ContainerRunner) updateLogs() {
1139         ticker := time.NewTicker(crunchLogUpdatePeriod / 360)
1140         defer ticker.Stop()
1141
1142         sigusr1 := make(chan os.Signal, 1)
1143         signal.Notify(sigusr1, syscall.SIGUSR1)
1144         defer signal.Stop(sigusr1)
1145
1146         saveAtTime := time.Now().Add(crunchLogUpdatePeriod)
1147         saveAtSize := crunchLogUpdateSize
1148         var savedSize int64
1149         for {
1150                 select {
1151                 case <-ticker.C:
1152                 case <-sigusr1:
1153                         saveAtTime = time.Now()
1154                 }
1155                 runner.logMtx.Lock()
1156                 done := runner.LogsPDH != nil
1157                 runner.logMtx.Unlock()
1158                 if done {
1159                         return
1160                 }
1161                 size := runner.LogCollection.Size()
1162                 if size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) {
1163                         continue
1164                 }
1165                 saveAtTime = time.Now().Add(crunchLogUpdatePeriod)
1166                 saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize
1167                 saved, err := runner.saveLogCollection(false)
1168                 if err != nil {
1169                         runner.CrunchLog.Printf("error updating log collection: %s", err)
1170                         continue
1171                 }
1172
1173                 err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1174                         "container": arvadosclient.Dict{"log": saved.PortableDataHash},
1175                 }, nil)
1176                 if err != nil {
1177                         runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
1178                         continue
1179                 }
1180
1181                 savedSize = size
1182         }
1183 }
1184
1185 func (runner *ContainerRunner) reportArvMountWarning(pattern, text string) {
1186         var updated arvados.Container
1187         err := runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1188                 "container": arvadosclient.Dict{
1189                         "runtime_status": arvadosclient.Dict{
1190                                 "warning":       "arv-mount: " + pattern,
1191                                 "warningDetail": text,
1192                         },
1193                 },
1194         }, &updated)
1195         if err != nil {
1196                 runner.CrunchLog.Printf("error updating container runtime_status: %s", err)
1197         }
1198 }
1199
1200 // CaptureOutput saves data from the container's output directory if
1201 // needed, and updates the container output accordingly.
1202 func (runner *ContainerRunner) CaptureOutput(bindmounts map[string]bindmount) error {
1203         if runner.Container.RuntimeConstraints.API {
1204                 // Output may have been set directly by the container, so
1205                 // refresh the container record to check.
1206                 err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID,
1207                         nil, &runner.Container)
1208                 if err != nil {
1209                         return err
1210                 }
1211                 if runner.Container.Output != "" {
1212                         // Container output is already set.
1213                         runner.OutputPDH = &runner.Container.Output
1214                         return nil
1215                 }
1216         }
1217
1218         txt, err := (&copier{
1219                 client:        runner.containerClient,
1220                 arvClient:     runner.ContainerArvClient,
1221                 keepClient:    runner.ContainerKeepClient,
1222                 hostOutputDir: runner.HostOutputDir,
1223                 ctrOutputDir:  runner.Container.OutputPath,
1224                 bindmounts:    bindmounts,
1225                 mounts:        runner.Container.Mounts,
1226                 secretMounts:  runner.SecretMounts,
1227                 logger:        runner.CrunchLog,
1228         }).Copy()
1229         if err != nil {
1230                 return err
1231         }
1232         if n := len(regexp.MustCompile(` [0-9a-f]+\+\S*\+R`).FindAllStringIndex(txt, -1)); n > 0 {
1233                 runner.CrunchLog.Printf("Copying %d data blocks from remote input collections...", n)
1234                 fs, err := (&arvados.Collection{ManifestText: txt}).FileSystem(runner.containerClient, runner.ContainerKeepClient)
1235                 if err != nil {
1236                         return err
1237                 }
1238                 txt, err = fs.MarshalManifest(".")
1239                 if err != nil {
1240                         return err
1241                 }
1242         }
1243         var resp arvados.Collection
1244         err = runner.ContainerArvClient.Create("collections", arvadosclient.Dict{
1245                 "ensure_unique_name": true,
1246                 "collection": arvadosclient.Dict{
1247                         "is_trashed":    true,
1248                         "name":          "output for " + runner.Container.UUID,
1249                         "manifest_text": txt,
1250                 },
1251         }, &resp)
1252         if err != nil {
1253                 return fmt.Errorf("error creating output collection: %v", err)
1254         }
1255         runner.OutputPDH = &resp.PortableDataHash
1256         return nil
1257 }
1258
1259 func (runner *ContainerRunner) CleanupDirs() {
1260         if runner.ArvMount != nil {
1261                 var delay int64 = 8
1262                 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1263                 umount.Stdout = runner.CrunchLog
1264                 umount.Stderr = runner.CrunchLog
1265                 runner.CrunchLog.Printf("Running %v", umount.Args)
1266                 umnterr := umount.Start()
1267
1268                 if umnterr != nil {
1269                         runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1270                         runner.ArvMount.Process.Kill()
1271                 } else {
1272                         // If arv-mount --unmount gets stuck for any reason, we
1273                         // don't want to wait for it forever.  Do Wait() in a goroutine
1274                         // so it doesn't block crunch-run.
1275                         umountExit := make(chan error)
1276                         go func() {
1277                                 mnterr := umount.Wait()
1278                                 if mnterr != nil {
1279                                         runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1280                                 }
1281                                 umountExit <- mnterr
1282                         }()
1283
1284                         for again := true; again; {
1285                                 again = false
1286                                 select {
1287                                 case <-umountExit:
1288                                         umount = nil
1289                                         again = true
1290                                 case <-runner.ArvMountExit:
1291                                         break
1292                                 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1293                                         runner.CrunchLog.Printf("Timed out waiting for unmount")
1294                                         if umount != nil {
1295                                                 umount.Process.Kill()
1296                                         }
1297                                         runner.ArvMount.Process.Kill()
1298                                 }
1299                         }
1300                 }
1301                 runner.ArvMount = nil
1302         }
1303
1304         if runner.ArvMountPoint != "" {
1305                 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1306                         runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1307                 }
1308                 runner.ArvMountPoint = ""
1309         }
1310
1311         if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {
1312                 runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr)
1313         }
1314 }
1315
1316 // CommitLogs posts the collection containing the final container logs.
1317 func (runner *ContainerRunner) CommitLogs() error {
1318         func() {
1319                 // Hold cStateLock to prevent races on CrunchLog (e.g., stop()).
1320                 runner.cStateLock.Lock()
1321                 defer runner.cStateLock.Unlock()
1322
1323                 runner.CrunchLog.Print(runner.finalState)
1324
1325                 if runner.arvMountLog != nil {
1326                         runner.arvMountLog.Close()
1327                 }
1328                 runner.CrunchLog.Close()
1329
1330                 // Closing CrunchLog above allows them to be committed to Keep at this
1331                 // point, but re-open crunch log with ArvClient in case there are any
1332                 // other further errors (such as failing to write the log to Keep!)
1333                 // while shutting down
1334                 runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{
1335                         ArvClient:     runner.DispatcherArvClient,
1336                         UUID:          runner.Container.UUID,
1337                         loggingStream: "crunch-run",
1338                         writeCloser:   nil,
1339                 })
1340                 runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
1341         }()
1342
1343         if runner.keepstoreLogger != nil {
1344                 // Flush any buffered logs from our local keepstore
1345                 // process.  Discard anything logged after this point
1346                 // -- it won't end up in the log collection, so
1347                 // there's no point writing it to the collectionfs.
1348                 runner.keepstoreLogbuf.SetWriter(io.Discard)
1349                 runner.keepstoreLogger.Close()
1350                 runner.keepstoreLogger = nil
1351         }
1352
1353         if runner.LogsPDH != nil {
1354                 // If we have already assigned something to LogsPDH,
1355                 // we must be closing the re-opened log, which won't
1356                 // end up getting attached to the container record and
1357                 // therefore doesn't need to be saved as a collection
1358                 // -- it exists only to send logs to other channels.
1359                 return nil
1360         }
1361
1362         saved, err := runner.saveLogCollection(true)
1363         if err != nil {
1364                 return fmt.Errorf("error saving log collection: %s", err)
1365         }
1366         runner.logMtx.Lock()
1367         defer runner.logMtx.Unlock()
1368         runner.LogsPDH = &saved.PortableDataHash
1369         return nil
1370 }
1371
1372 func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
1373         runner.logMtx.Lock()
1374         defer runner.logMtx.Unlock()
1375         if runner.LogsPDH != nil {
1376                 // Already finalized.
1377                 return
1378         }
1379         updates := arvadosclient.Dict{
1380                 "name": "logs for " + runner.Container.UUID,
1381         }
1382         mt, err1 := runner.LogCollection.MarshalManifest(".")
1383         if err1 == nil {
1384                 // Only send updated manifest text if there was no
1385                 // error.
1386                 updates["manifest_text"] = mt
1387         }
1388
1389         // Even if flushing the manifest had an error, we still want
1390         // to update the log record, if possible, to push the trash_at
1391         // and delete_at times into the future.  Details on bug
1392         // #17293.
1393         if final {
1394                 updates["is_trashed"] = true
1395         } else {
1396                 exp := time.Now().Add(crunchLogUpdatePeriod * 24)
1397                 updates["trash_at"] = exp
1398                 updates["delete_at"] = exp
1399         }
1400         reqBody := arvadosclient.Dict{"collection": updates}
1401         var err2 error
1402         if runner.logUUID == "" {
1403                 reqBody["ensure_unique_name"] = true
1404                 err2 = runner.DispatcherArvClient.Create("collections", reqBody, &response)
1405         } else {
1406                 err2 = runner.DispatcherArvClient.Update("collections", runner.logUUID, reqBody, &response)
1407         }
1408         if err2 == nil {
1409                 runner.logUUID = response.UUID
1410         }
1411
1412         if err1 != nil || err2 != nil {
1413                 err = fmt.Errorf("error recording logs: %q, %q", err1, err2)
1414         }
1415         return
1416 }
1417
1418 // UpdateContainerRunning updates the container state to "Running"
1419 func (runner *ContainerRunner) UpdateContainerRunning() error {
1420         runner.cStateLock.Lock()
1421         defer runner.cStateLock.Unlock()
1422         if runner.cCancelled {
1423                 return ErrCancelled
1424         }
1425         return runner.DispatcherArvClient.Update("containers", runner.Container.UUID,
1426                 arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running", "gateway_address": runner.gateway.Address}}, nil)
1427 }
1428
1429 // ContainerToken returns the api_token the container (and any
1430 // arv-mount processes) are allowed to use.
1431 func (runner *ContainerRunner) ContainerToken() (string, error) {
1432         if runner.token != "" {
1433                 return runner.token, nil
1434         }
1435
1436         var auth arvados.APIClientAuthorization
1437         err := runner.DispatcherArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1438         if err != nil {
1439                 return "", err
1440         }
1441         runner.token = fmt.Sprintf("v2/%s/%s/%s", auth.UUID, auth.APIToken, runner.Container.UUID)
1442         return runner.token, nil
1443 }
1444
1445 // UpdateContainerFinal updates the container record state on API
1446 // server to "Complete" or "Cancelled"
1447 func (runner *ContainerRunner) UpdateContainerFinal() error {
1448         update := arvadosclient.Dict{}
1449         update["state"] = runner.finalState
1450         if runner.LogsPDH != nil {
1451                 update["log"] = *runner.LogsPDH
1452         }
1453         if runner.ExitCode != nil {
1454                 update["exit_code"] = *runner.ExitCode
1455         } else {
1456                 update["exit_code"] = nil
1457         }
1458         if runner.finalState == "Complete" && runner.OutputPDH != nil {
1459                 update["output"] = *runner.OutputPDH
1460         }
1461         var it arvados.InstanceType
1462         if j := os.Getenv("InstanceType"); j != "" && json.Unmarshal([]byte(j), &it) == nil && it.Price > 0 {
1463                 update["cost"] = it.Price * time.Now().Sub(runner.costStartTime).Seconds() / time.Hour.Seconds()
1464         }
1465         return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
1466 }
1467
1468 // IsCancelled returns the value of Cancelled, with goroutine safety.
1469 func (runner *ContainerRunner) IsCancelled() bool {
1470         runner.cStateLock.Lock()
1471         defer runner.cStateLock.Unlock()
1472         return runner.cCancelled
1473 }
1474
1475 // NewArvLogWriter creates an ArvLogWriter
1476 func (runner *ContainerRunner) NewArvLogWriter(name string) (io.WriteCloser, error) {
1477         writer, err := runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
1478         if err != nil {
1479                 return nil, err
1480         }
1481         return &ArvLogWriter{
1482                 ArvClient:     runner.DispatcherArvClient,
1483                 UUID:          runner.Container.UUID,
1484                 loggingStream: name,
1485                 writeCloser:   writer,
1486         }, nil
1487 }
1488
1489 // Run the full container lifecycle.
1490 func (runner *ContainerRunner) Run() (err error) {
1491         runner.CrunchLog.Printf("crunch-run %s started", cmd.Version.String())
1492         runner.CrunchLog.Printf("%s", currentUserAndGroups())
1493         v, _ := exec.Command("arv-mount", "--version").CombinedOutput()
1494         runner.CrunchLog.Printf("Using FUSE mount: %s", v)
1495         runner.CrunchLog.Printf("Using container runtime: %s", runner.executor.Runtime())
1496         runner.CrunchLog.Printf("Executing container: %s", runner.Container.UUID)
1497         runner.costStartTime = time.Now()
1498
1499         hostname, hosterr := os.Hostname()
1500         if hosterr != nil {
1501                 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1502         } else {
1503                 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1504         }
1505
1506         runner.finalState = "Queued"
1507
1508         defer func() {
1509                 runner.CleanupDirs()
1510
1511                 runner.CrunchLog.Printf("crunch-run finished")
1512                 runner.CrunchLog.Close()
1513         }()
1514
1515         err = runner.fetchContainerRecord()
1516         if err != nil {
1517                 return
1518         }
1519         if runner.Container.State != "Locked" {
1520                 return fmt.Errorf("dispatch error detected: container %q has state %q", runner.Container.UUID, runner.Container.State)
1521         }
1522
1523         var bindmounts map[string]bindmount
1524         defer func() {
1525                 // checkErr prints e (unless it's nil) and sets err to
1526                 // e (unless err is already non-nil). Thus, if err
1527                 // hasn't already been assigned when Run() returns,
1528                 // this cleanup func will cause Run() to return the
1529                 // first non-nil error that is passed to checkErr().
1530                 checkErr := func(errorIn string, e error) {
1531                         if e == nil {
1532                                 return
1533                         }
1534                         runner.CrunchLog.Printf("error in %s: %v", errorIn, e)
1535                         if err == nil {
1536                                 err = e
1537                         }
1538                         if runner.finalState == "Complete" {
1539                                 // There was an error in the finalization.
1540                                 runner.finalState = "Cancelled"
1541                         }
1542                 }
1543
1544                 // Log the error encountered in Run(), if any
1545                 checkErr("Run", err)
1546
1547                 if runner.finalState == "Queued" {
1548                         runner.UpdateContainerFinal()
1549                         return
1550                 }
1551
1552                 if runner.IsCancelled() {
1553                         runner.finalState = "Cancelled"
1554                         // but don't return yet -- we still want to
1555                         // capture partial output and write logs
1556                 }
1557
1558                 if bindmounts != nil {
1559                         checkErr("CaptureOutput", runner.CaptureOutput(bindmounts))
1560                 }
1561                 checkErr("stopHoststat", runner.stopHoststat())
1562                 checkErr("CommitLogs", runner.CommitLogs())
1563                 runner.CleanupDirs()
1564                 checkErr("UpdateContainerFinal", runner.UpdateContainerFinal())
1565         }()
1566
1567         runner.setupSignals()
1568         err = runner.startHoststat()
1569         if err != nil {
1570                 return
1571         }
1572
1573         // set up FUSE mount and binds
1574         bindmounts, err = runner.SetupMounts()
1575         if err != nil {
1576                 runner.finalState = "Cancelled"
1577                 err = fmt.Errorf("While setting up mounts: %v", err)
1578                 return
1579         }
1580
1581         // check for and/or load image
1582         imageID, err := runner.LoadImage()
1583         if err != nil {
1584                 if !runner.checkBrokenNode(err) {
1585                         // Failed to load image but not due to a "broken node"
1586                         // condition, probably user error.
1587                         runner.finalState = "Cancelled"
1588                 }
1589                 err = fmt.Errorf("While loading container image: %v", err)
1590                 return
1591         }
1592
1593         err = runner.CreateContainer(imageID, bindmounts)
1594         if err != nil {
1595                 return
1596         }
1597         err = runner.LogHostInfo()
1598         if err != nil {
1599                 return
1600         }
1601         err = runner.LogNodeRecord()
1602         if err != nil {
1603                 return
1604         }
1605         err = runner.LogContainerRecord()
1606         if err != nil {
1607                 return
1608         }
1609
1610         if runner.IsCancelled() {
1611                 return
1612         }
1613
1614         err = runner.UpdateContainerRunning()
1615         if err != nil {
1616                 return
1617         }
1618         runner.finalState = "Cancelled"
1619
1620         err = runner.startCrunchstat()
1621         if err != nil {
1622                 return
1623         }
1624
1625         err = runner.StartContainer()
1626         if err != nil {
1627                 runner.checkBrokenNode(err)
1628                 return
1629         }
1630
1631         err = runner.WaitFinish()
1632         if err == nil && !runner.IsCancelled() {
1633                 runner.finalState = "Complete"
1634         }
1635         return
1636 }
1637
1638 // Fetch the current container record (uuid = runner.Container.UUID)
1639 // into runner.Container.
1640 func (runner *ContainerRunner) fetchContainerRecord() error {
1641         reader, err := runner.DispatcherArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1642         if err != nil {
1643                 return fmt.Errorf("error fetching container record: %v", err)
1644         }
1645         defer reader.Close()
1646
1647         dec := json.NewDecoder(reader)
1648         dec.UseNumber()
1649         err = dec.Decode(&runner.Container)
1650         if err != nil {
1651                 return fmt.Errorf("error decoding container record: %v", err)
1652         }
1653
1654         var sm struct {
1655                 SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
1656         }
1657
1658         containerToken, err := runner.ContainerToken()
1659         if err != nil {
1660                 return fmt.Errorf("error getting container token: %v", err)
1661         }
1662
1663         runner.ContainerArvClient, runner.ContainerKeepClient,
1664                 runner.containerClient, err = runner.MkArvClient(containerToken)
1665         if err != nil {
1666                 return fmt.Errorf("error creating container API client: %v", err)
1667         }
1668
1669         runner.ContainerKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)
1670         runner.DispatcherKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)
1671
1672         err = runner.ContainerArvClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
1673         if err != nil {
1674                 if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
1675                         return fmt.Errorf("error fetching secret_mounts: %v", err)
1676                 }
1677                 // ok && apierr.HttpStatusCode == 404, which means
1678                 // secret_mounts isn't supported by this API server.
1679         }
1680         runner.SecretMounts = sm.SecretMounts
1681
1682         return nil
1683 }
1684
1685 // NewContainerRunner creates a new container runner.
1686 func NewContainerRunner(dispatcherClient *arvados.Client,
1687         dispatcherArvClient IArvadosClient,
1688         dispatcherKeepClient IKeepClient,
1689         containerUUID string) (*ContainerRunner, error) {
1690
1691         cr := &ContainerRunner{
1692                 dispatcherClient:     dispatcherClient,
1693                 DispatcherArvClient:  dispatcherArvClient,
1694                 DispatcherKeepClient: dispatcherKeepClient,
1695         }
1696         cr.NewLogWriter = cr.NewArvLogWriter
1697         cr.RunArvMount = cr.ArvMountCmd
1698         cr.MkTempDir = ioutil.TempDir
1699         cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
1700                 cl, err := arvadosclient.MakeArvadosClient()
1701                 if err != nil {
1702                         return nil, nil, nil, err
1703                 }
1704                 cl.ApiToken = token
1705                 kc, err := keepclient.MakeKeepClient(cl)
1706                 if err != nil {
1707                         return nil, nil, nil, err
1708                 }
1709                 c2 := arvados.NewClientFromEnv()
1710                 c2.AuthToken = token
1711                 return cl, kc, c2, nil
1712         }
1713         var err error
1714         cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.dispatcherClient, cr.DispatcherKeepClient)
1715         if err != nil {
1716                 return nil, err
1717         }
1718         cr.Container.UUID = containerUUID
1719         w, err := cr.NewLogWriter("crunch-run")
1720         if err != nil {
1721                 return nil, err
1722         }
1723         cr.CrunchLog = NewThrottledLogger(w)
1724         cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
1725
1726         loadLogThrottleParams(dispatcherArvClient)
1727         go cr.updateLogs()
1728
1729         return cr, nil
1730 }
1731
1732 func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
1733         log := log.New(stderr, "", 0)
1734         flags := flag.NewFlagSet(prog, flag.ContinueOnError)
1735         statInterval := flags.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1736         cgroupRoot := flags.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
1737         cgroupParent := flags.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
1738         cgroupParentSubsystem := flags.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
1739         caCertsPath := flags.String("ca-certs", "", "Path to TLS root certificates")
1740         detach := flags.Bool("detach", false, "Detach from parent process and run in the background")
1741         stdinConfig := flags.Bool("stdin-config", false, "Load config and environment variables from JSON message on stdin")
1742         configFile := flags.String("config", arvados.DefaultConfigFile, "filename of cluster config file to try loading if -stdin-config=false (default is $ARVADOS_CONFIG)")
1743         sleep := flags.Duration("sleep", 0, "Delay before starting (testing use only)")
1744         kill := flags.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID")
1745         list := flags.Bool("list", false, "List UUIDs of existing crunch-run processes")
1746         enableMemoryLimit := flags.Bool("enable-memory-limit", true, "tell container runtime to limit container's memory usage")
1747         enableNetwork := flags.String("container-enable-networking", "default", "enable networking \"always\" (for all containers) or \"default\" (for containers that request it)")
1748         networkMode := flags.String("container-network-mode", "default", `Docker network mode for container (use any argument valid for docker --net)`)
1749         memprofile := flags.String("memprofile", "", "write memory profile to `file` after running container")
1750         runtimeEngine := flags.String("runtime-engine", "docker", "container runtime: docker or singularity")
1751         brokenNodeHook := flags.String("broken-node-hook", "", "script to run if node is detected to be broken (for example, Docker daemon is not running)")
1752         flags.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.")
1753         version := flags.Bool("version", false, "Write version information to stdout and exit 0.")
1754
1755         ignoreDetachFlag := false
1756         if len(args) > 0 && args[0] == "-no-detach" {
1757                 // This process was invoked by a parent process, which
1758                 // has passed along its own arguments, including
1759                 // -detach, after the leading -no-detach flag.  Strip
1760                 // the leading -no-detach flag (it's not recognized by
1761                 // flags.Parse()) and ignore the -detach flag that
1762                 // comes later.
1763                 args = args[1:]
1764                 ignoreDetachFlag = true
1765         }
1766
1767         if ok, code := cmd.ParseFlags(flags, prog, args, "container-uuid", stderr); !ok {
1768                 return code
1769         } else if *version {
1770                 fmt.Fprintln(stdout, prog, cmd.Version.String())
1771                 return 0
1772         } else if !*list && flags.NArg() != 1 {
1773                 fmt.Fprintf(stderr, "missing required argument: container-uuid (try -help)\n")
1774                 return 2
1775         }
1776
1777         containerUUID := flags.Arg(0)
1778
1779         switch {
1780         case *detach && !ignoreDetachFlag:
1781                 return Detach(containerUUID, prog, args, os.Stdin, os.Stdout, os.Stderr)
1782         case *kill >= 0:
1783                 return KillProcess(containerUUID, syscall.Signal(*kill), os.Stdout, os.Stderr)
1784         case *list:
1785                 return ListProcesses(os.Stdout, os.Stderr)
1786         }
1787
1788         if len(containerUUID) != 27 {
1789                 log.Printf("usage: %s [options] UUID", prog)
1790                 return 1
1791         }
1792
1793         var keepstoreLogbuf bufThenWrite
1794         var conf ConfigData
1795         if *stdinConfig {
1796                 err := json.NewDecoder(stdin).Decode(&conf)
1797                 if err != nil {
1798                         log.Printf("decode stdin: %s", err)
1799                         return 1
1800                 }
1801                 for k, v := range conf.Env {
1802                         err = os.Setenv(k, v)
1803                         if err != nil {
1804                                 log.Printf("setenv(%q): %s", k, err)
1805                                 return 1
1806                         }
1807                 }
1808                 if conf.Cluster != nil {
1809                         // ClusterID is missing from the JSON
1810                         // representation, but we need it to generate
1811                         // a valid config file for keepstore, so we
1812                         // fill it using the container UUID prefix.
1813                         conf.Cluster.ClusterID = containerUUID[:5]
1814                 }
1815         } else {
1816                 conf = hpcConfData(containerUUID, *configFile, io.MultiWriter(&keepstoreLogbuf, stderr))
1817         }
1818
1819         log.Printf("crunch-run %s started", cmd.Version.String())
1820         time.Sleep(*sleep)
1821
1822         if *caCertsPath != "" {
1823                 arvadosclient.CertFiles = []string{*caCertsPath}
1824         }
1825
1826         keepstore, err := startLocalKeepstore(conf, io.MultiWriter(&keepstoreLogbuf, stderr))
1827         if err != nil {
1828                 log.Print(err)
1829                 return 1
1830         }
1831         if keepstore != nil {
1832                 defer keepstore.Process.Kill()
1833         }
1834
1835         api, err := arvadosclient.MakeArvadosClient()
1836         if err != nil {
1837                 log.Printf("%s: %v", containerUUID, err)
1838                 return 1
1839         }
1840         api.Retries = 8
1841
1842         kc, err := keepclient.MakeKeepClient(api)
1843         if err != nil {
1844                 log.Printf("%s: %v", containerUUID, err)
1845                 return 1
1846         }
1847         kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
1848         kc.Retries = 4
1849
1850         cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, containerUUID)
1851         if err != nil {
1852                 log.Print(err)
1853                 return 1
1854         }
1855
1856         if keepstore == nil {
1857                 // Log explanation (if any) for why we're not running
1858                 // a local keepstore.
1859                 var buf bytes.Buffer
1860                 keepstoreLogbuf.SetWriter(&buf)
1861                 if buf.Len() > 0 {
1862                         cr.CrunchLog.Printf("%s", strings.TrimSpace(buf.String()))
1863                 }
1864         } else if logWhat := conf.Cluster.Containers.LocalKeepLogsToContainerLog; logWhat == "none" {
1865                 cr.CrunchLog.Printf("using local keepstore process (pid %d) at %s", keepstore.Process.Pid, os.Getenv("ARVADOS_KEEP_SERVICES"))
1866                 keepstoreLogbuf.SetWriter(io.Discard)
1867         } else {
1868                 cr.CrunchLog.Printf("using local keepstore process (pid %d) at %s, writing logs to keepstore.txt in log collection", keepstore.Process.Pid, os.Getenv("ARVADOS_KEEP_SERVICES"))
1869                 logwriter, err := cr.NewLogWriter("keepstore")
1870                 if err != nil {
1871                         log.Print(err)
1872                         return 1
1873                 }
1874                 cr.keepstoreLogger = NewThrottledLogger(logwriter)
1875
1876                 var writer io.WriteCloser = cr.keepstoreLogger
1877                 if logWhat == "errors" {
1878                         writer = &filterKeepstoreErrorsOnly{WriteCloser: writer}
1879                 } else if logWhat != "all" {
1880                         // should have been caught earlier by
1881                         // dispatcher's config loader
1882                         log.Printf("invalid value for Containers.LocalKeepLogsToContainerLog: %q", logWhat)
1883                         return 1
1884                 }
1885                 err = keepstoreLogbuf.SetWriter(writer)
1886                 if err != nil {
1887                         log.Print(err)
1888                         return 1
1889                 }
1890                 cr.keepstoreLogbuf = &keepstoreLogbuf
1891         }
1892
1893         switch *runtimeEngine {
1894         case "docker":
1895                 cr.executor, err = newDockerExecutor(containerUUID, cr.CrunchLog.Printf, cr.containerWatchdogInterval)
1896         case "singularity":
1897                 cr.executor, err = newSingularityExecutor(cr.CrunchLog.Printf)
1898         default:
1899                 cr.CrunchLog.Printf("%s: unsupported RuntimeEngine %q", containerUUID, *runtimeEngine)
1900                 cr.CrunchLog.Close()
1901                 return 1
1902         }
1903         if err != nil {
1904                 cr.CrunchLog.Printf("%s: %v", containerUUID, err)
1905                 cr.checkBrokenNode(err)
1906                 cr.CrunchLog.Close()
1907                 return 1
1908         }
1909         defer cr.executor.Close()
1910
1911         cr.brokenNodeHook = *brokenNodeHook
1912
1913         gwAuthSecret := os.Getenv("GatewayAuthSecret")
1914         os.Unsetenv("GatewayAuthSecret")
1915         if gwAuthSecret == "" {
1916                 // not safe to run a gateway service without an auth
1917                 // secret
1918                 cr.CrunchLog.Printf("Not starting a gateway server (GatewayAuthSecret was not provided by dispatcher)")
1919         } else {
1920                 gwListen := os.Getenv("GatewayAddress")
1921                 cr.gateway = Gateway{
1922                         Address:       gwListen,
1923                         AuthSecret:    gwAuthSecret,
1924                         ContainerUUID: containerUUID,
1925                         Target:        cr.executor,
1926                         Log:           cr.CrunchLog,
1927                 }
1928                 if gwListen == "" {
1929                         // Direct connection won't work, so we use the
1930                         // gateway_address field to indicate the
1931                         // internalURL of the controller process that
1932                         // has the current tunnel connection.
1933                         cr.gateway.ArvadosClient = cr.dispatcherClient
1934                         cr.gateway.UpdateTunnelURL = func(url string) {
1935                                 cr.gateway.Address = "tunnel " + url
1936                                 cr.DispatcherArvClient.Update("containers", containerUUID,
1937                                         arvadosclient.Dict{"container": arvadosclient.Dict{"gateway_address": cr.gateway.Address}}, nil)
1938                         }
1939                 }
1940                 err = cr.gateway.Start()
1941                 if err != nil {
1942                         log.Printf("error starting gateway server: %s", err)
1943                         return 1
1944                 }
1945         }
1946
1947         parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerUUID+".")
1948         if tmperr != nil {
1949                 log.Printf("%s: %v", containerUUID, tmperr)
1950                 return 1
1951         }
1952
1953         cr.parentTemp = parentTemp
1954         cr.statInterval = *statInterval
1955         cr.cgroupRoot = *cgroupRoot
1956         cr.expectCgroupParent = *cgroupParent
1957         cr.enableMemoryLimit = *enableMemoryLimit
1958         cr.enableNetwork = *enableNetwork
1959         cr.networkMode = *networkMode
1960         if *cgroupParentSubsystem != "" {
1961                 p, err := findCgroup(*cgroupParentSubsystem)
1962                 if err != nil {
1963                         log.Printf("fatal: cgroup parent subsystem: %s", err)
1964                         return 1
1965                 }
1966                 cr.setCgroupParent = p
1967                 cr.expectCgroupParent = p
1968         }
1969
1970         runerr := cr.Run()
1971
1972         if *memprofile != "" {
1973                 f, err := os.Create(*memprofile)
1974                 if err != nil {
1975                         log.Printf("could not create memory profile: %s", err)
1976                 }
1977                 runtime.GC() // get up-to-date statistics
1978                 if err := pprof.WriteHeapProfile(f); err != nil {
1979                         log.Printf("could not write memory profile: %s", err)
1980                 }
1981                 closeerr := f.Close()
1982                 if closeerr != nil {
1983                         log.Printf("closing memprofile file: %s", err)
1984                 }
1985         }
1986
1987         if runerr != nil {
1988                 log.Printf("%s: %v", containerUUID, runerr)
1989                 return 1
1990         }
1991         return 0
1992 }
1993
1994 // Try to load ConfigData in hpc (slurm/lsf) environment. This means
1995 // loading the cluster config from the specified file and (if that
1996 // works) getting the runtime_constraints container field from
1997 // controller to determine # VCPUs so we can calculate KeepBuffers.
1998 func hpcConfData(uuid string, configFile string, stderr io.Writer) ConfigData {
1999         var conf ConfigData
2000         conf.Cluster = loadClusterConfigFile(configFile, stderr)
2001         if conf.Cluster == nil {
2002                 // skip loading the container record -- we won't be
2003                 // able to start local keepstore anyway.
2004                 return conf
2005         }
2006         arv, err := arvadosclient.MakeArvadosClient()
2007         if err != nil {
2008                 fmt.Fprintf(stderr, "error setting up arvadosclient: %s\n", err)
2009                 return conf
2010         }
2011         arv.Retries = 8
2012         var ctr arvados.Container
2013         err = arv.Call("GET", "containers", uuid, "", arvadosclient.Dict{"select": []string{"runtime_constraints"}}, &ctr)
2014         if err != nil {
2015                 fmt.Fprintf(stderr, "error getting container record: %s\n", err)
2016                 return conf
2017         }
2018         if ctr.RuntimeConstraints.VCPUs > 0 {
2019                 conf.KeepBuffers = ctr.RuntimeConstraints.VCPUs * conf.Cluster.Containers.LocalKeepBlobBuffersPerVCPU
2020         }
2021         return conf
2022 }
2023
2024 // Load cluster config file from given path. If an error occurs, log
2025 // the error to stderr and return nil.
2026 func loadClusterConfigFile(path string, stderr io.Writer) *arvados.Cluster {
2027         ldr := config.NewLoader(&bytes.Buffer{}, ctxlog.New(stderr, "plain", "info"))
2028         ldr.Path = path
2029         cfg, err := ldr.Load()
2030         if err != nil {
2031                 fmt.Fprintf(stderr, "could not load config file %s: %s\n", path, err)
2032                 return nil
2033         }
2034         cluster, err := cfg.GetCluster("")
2035         if err != nil {
2036                 fmt.Fprintf(stderr, "could not use config file %s: %s\n", path, err)
2037                 return nil
2038         }
2039         fmt.Fprintf(stderr, "loaded config file %s\n", path)
2040         return cluster
2041 }
2042
2043 func startLocalKeepstore(configData ConfigData, logbuf io.Writer) (*exec.Cmd, error) {
2044         if configData.KeepBuffers < 1 {
2045                 fmt.Fprintf(logbuf, "not starting a local keepstore process because KeepBuffers=%v in config\n", configData.KeepBuffers)
2046                 return nil, nil
2047         }
2048         if configData.Cluster == nil {
2049                 fmt.Fprint(logbuf, "not starting a local keepstore process because cluster config file was not loaded\n")
2050                 return nil, nil
2051         }
2052         for uuid, vol := range configData.Cluster.Volumes {
2053                 if len(vol.AccessViaHosts) > 0 {
2054                         fmt.Fprintf(logbuf, "not starting a local keepstore process because a volume (%s) uses AccessViaHosts\n", uuid)
2055                         return nil, nil
2056                 }
2057                 if !vol.ReadOnly && vol.Replication < configData.Cluster.Collections.DefaultReplication {
2058                         fmt.Fprintf(logbuf, "not starting a local keepstore process because a writable volume (%s) has replication less than Collections.DefaultReplication (%d < %d)\n", uuid, vol.Replication, configData.Cluster.Collections.DefaultReplication)
2059                         return nil, nil
2060                 }
2061         }
2062
2063         // Rather than have an alternate way to tell keepstore how
2064         // many buffers to use when starting it this way, we just
2065         // modify the cluster configuration that we feed it on stdin.
2066         configData.Cluster.API.MaxKeepBlobBuffers = configData.KeepBuffers
2067
2068         localaddr := localKeepstoreAddr()
2069         ln, err := net.Listen("tcp", net.JoinHostPort(localaddr, "0"))
2070         if err != nil {
2071                 return nil, err
2072         }
2073         _, port, err := net.SplitHostPort(ln.Addr().String())
2074         if err != nil {
2075                 ln.Close()
2076                 return nil, err
2077         }
2078         ln.Close()
2079         url := "http://" + net.JoinHostPort(localaddr, port)
2080
2081         fmt.Fprintf(logbuf, "starting keepstore on %s\n", url)
2082
2083         var confJSON bytes.Buffer
2084         err = json.NewEncoder(&confJSON).Encode(arvados.Config{
2085                 Clusters: map[string]arvados.Cluster{
2086                         configData.Cluster.ClusterID: *configData.Cluster,
2087                 },
2088         })
2089         if err != nil {
2090                 return nil, err
2091         }
2092         cmd := exec.Command("/proc/self/exe", "keepstore", "-config=-")
2093         if target, err := os.Readlink(cmd.Path); err == nil && strings.HasSuffix(target, ".test") {
2094                 // If we're a 'go test' process, running
2095                 // /proc/self/exe would start the test suite in a
2096                 // child process, which is not what we want.
2097                 cmd.Path, _ = exec.LookPath("go")
2098                 cmd.Args = append([]string{"go", "run", "../../cmd/arvados-server"}, cmd.Args[1:]...)
2099                 cmd.Env = os.Environ()
2100         }
2101         cmd.Stdin = &confJSON
2102         cmd.Stdout = logbuf
2103         cmd.Stderr = logbuf
2104         cmd.Env = append(cmd.Env,
2105                 "GOGC=10",
2106                 "ARVADOS_SERVICE_INTERNAL_URL="+url)
2107         err = cmd.Start()
2108         if err != nil {
2109                 return nil, fmt.Errorf("error starting keepstore process: %w", err)
2110         }
2111         cmdExited := false
2112         go func() {
2113                 cmd.Wait()
2114                 cmdExited = true
2115         }()
2116         ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
2117         defer cancel()
2118         poll := time.NewTicker(time.Second / 10)
2119         defer poll.Stop()
2120         client := http.Client{}
2121         for range poll.C {
2122                 testReq, err := http.NewRequestWithContext(ctx, "GET", url+"/_health/ping", nil)
2123                 testReq.Header.Set("Authorization", "Bearer "+configData.Cluster.ManagementToken)
2124                 if err != nil {
2125                         return nil, err
2126                 }
2127                 resp, err := client.Do(testReq)
2128                 if err == nil {
2129                         resp.Body.Close()
2130                         if resp.StatusCode == http.StatusOK {
2131                                 break
2132                         }
2133                 }
2134                 if cmdExited {
2135                         return nil, fmt.Errorf("keepstore child process exited")
2136                 }
2137                 if ctx.Err() != nil {
2138                         return nil, fmt.Errorf("timed out waiting for new keepstore process to report healthy")
2139                 }
2140         }
2141         os.Setenv("ARVADOS_KEEP_SERVICES", url)
2142         return cmd, nil
2143 }
2144
2145 // return current uid, gid, groups in a format suitable for logging:
2146 // "crunch-run process has uid=1234(arvados) gid=1234(arvados)
2147 // groups=1234(arvados),114(fuse)"
2148 func currentUserAndGroups() string {
2149         u, err := user.Current()
2150         if err != nil {
2151                 return fmt.Sprintf("error getting current user ID: %s", err)
2152         }
2153         s := fmt.Sprintf("crunch-run process has uid=%s(%s) gid=%s", u.Uid, u.Username, u.Gid)
2154         if g, err := user.LookupGroupId(u.Gid); err == nil {
2155                 s += fmt.Sprintf("(%s)", g.Name)
2156         }
2157         s += " groups="
2158         if gids, err := u.GroupIds(); err == nil {
2159                 for i, gid := range gids {
2160                         if i > 0 {
2161                                 s += ","
2162                         }
2163                         s += gid
2164                         if g, err := user.LookupGroupId(gid); err == nil {
2165                                 s += fmt.Sprintf("(%s)", g.Name)
2166                         }
2167                 }
2168         }
2169         return s
2170 }
2171
2172 // Return a suitable local interface address for a local keepstore
2173 // service. Currently this is the numerically lowest non-loopback ipv4
2174 // address assigned to a local interface that is not in any of the
2175 // link-local/vpn/loopback ranges 169.254/16, 100.64/10, or 127/8.
2176 func localKeepstoreAddr() string {
2177         var ips []net.IP
2178         // Ignore error (proceed with zero IPs)
2179         addrs, _ := processIPs(os.Getpid())
2180         for addr := range addrs {
2181                 ip := net.ParseIP(addr)
2182                 if ip == nil {
2183                         // invalid
2184                         continue
2185                 }
2186                 if ip.Mask(net.CIDRMask(8, 32)).Equal(net.IPv4(127, 0, 0, 0)) ||
2187                         ip.Mask(net.CIDRMask(10, 32)).Equal(net.IPv4(100, 64, 0, 0)) ||
2188                         ip.Mask(net.CIDRMask(16, 32)).Equal(net.IPv4(169, 254, 0, 0)) {
2189                         // unsuitable
2190                         continue
2191                 }
2192                 ips = append(ips, ip)
2193         }
2194         if len(ips) == 0 {
2195                 return "0.0.0.0"
2196         }
2197         sort.Slice(ips, func(ii, jj int) bool {
2198                 i, j := ips[ii], ips[jj]
2199                 if len(i) != len(j) {
2200                         return len(i) < len(j)
2201                 }
2202                 for x := range i {
2203                         if i[x] != j[x] {
2204                                 return i[x] < j[x]
2205                         }
2206                 }
2207                 return false
2208         })
2209         return ips[0].String()
2210 }