Merge branch '18773-check-image-id' refs #18773
[arvados.git] / lib / crunchrun / crunchrun.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package crunchrun
6
7 import (
8         "bytes"
9         "encoding/json"
10         "errors"
11         "flag"
12         "fmt"
13         "io"
14         "io/ioutil"
15         "log"
16         "os"
17         "os/exec"
18         "os/signal"
19         "path"
20         "path/filepath"
21         "regexp"
22         "runtime"
23         "runtime/pprof"
24         "sort"
25         "strings"
26         "sync"
27         "syscall"
28         "time"
29
30         "git.arvados.org/arvados.git/lib/cmd"
31         "git.arvados.org/arvados.git/lib/crunchstat"
32         "git.arvados.org/arvados.git/sdk/go/arvados"
33         "git.arvados.org/arvados.git/sdk/go/arvadosclient"
34         "git.arvados.org/arvados.git/sdk/go/keepclient"
35         "git.arvados.org/arvados.git/sdk/go/manifest"
36         "golang.org/x/net/context"
37 )
38
39 type command struct{}
40
41 var Command = command{}
42
43 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
44 type IArvadosClient interface {
45         Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
46         Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
47         Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
48         Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
49         CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
50         Discovery(key string) (interface{}, error)
51 }
52
53 // ErrCancelled is the error returned when the container is cancelled.
54 var ErrCancelled = errors.New("Cancelled")
55
56 // IKeepClient is the minimal Keep API methods used by crunch-run.
57 type IKeepClient interface {
58         BlockWrite(context.Context, arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error)
59         ReadAt(locator string, p []byte, off int) (int, error)
60         ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
61         LocalLocator(locator string) (string, error)
62         ClearBlockCache()
63         SetStorageClasses(sc []string)
64 }
65
66 // NewLogWriter is a factory function to create a new log writer.
67 type NewLogWriter func(name string) (io.WriteCloser, error)
68
69 type RunArvMount func(cmdline []string, tok string) (*exec.Cmd, error)
70
71 type MkTempDir func(string, string) (string, error)
72
73 type PsProcess interface {
74         CmdlineSlice() ([]string, error)
75 }
76
77 // ContainerRunner is the main stateful struct used for a single execution of a
78 // container.
79 type ContainerRunner struct {
80         executor       containerExecutor
81         executorStdin  io.Closer
82         executorStdout io.Closer
83         executorStderr io.Closer
84
85         // Dispatcher client is initialized with the Dispatcher token.
86         // This is a privileged token used to manage container status
87         // and logs.
88         //
89         // We have both dispatcherClient and DispatcherArvClient
90         // because there are two different incompatible Arvados Go
91         // SDKs and we have to use both (hopefully this gets fixed in
92         // #14467)
93         dispatcherClient     *arvados.Client
94         DispatcherArvClient  IArvadosClient
95         DispatcherKeepClient IKeepClient
96
97         // Container client is initialized with the Container token
98         // This token controls the permissions of the container, and
99         // must be used for operations such as reading collections.
100         //
101         // Same comment as above applies to
102         // containerClient/ContainerArvClient.
103         containerClient     *arvados.Client
104         ContainerArvClient  IArvadosClient
105         ContainerKeepClient IKeepClient
106
107         Container     arvados.Container
108         token         string
109         ExitCode      *int
110         NewLogWriter  NewLogWriter
111         CrunchLog     *ThrottledLogger
112         logUUID       string
113         logMtx        sync.Mutex
114         LogCollection arvados.CollectionFileSystem
115         LogsPDH       *string
116         RunArvMount   RunArvMount
117         MkTempDir     MkTempDir
118         ArvMount      *exec.Cmd
119         ArvMountPoint string
120         HostOutputDir string
121         Volumes       map[string]struct{}
122         OutputPDH     *string
123         SigChan       chan os.Signal
124         ArvMountExit  chan error
125         SecretMounts  map[string]arvados.Mount
126         MkArvClient   func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error)
127         finalState    string
128         parentTemp    string
129
130         statLogger       io.WriteCloser
131         statReporter     *crunchstat.Reporter
132         hoststatLogger   io.WriteCloser
133         hoststatReporter *crunchstat.Reporter
134         statInterval     time.Duration
135         cgroupRoot       string
136         // What we expect the container's cgroup parent to be.
137         expectCgroupParent string
138         // What we tell docker to use as the container's cgroup
139         // parent. Note: Ideally we would use the same field for both
140         // expectCgroupParent and setCgroupParent, and just make it
141         // default to "docker". However, when using docker < 1.10 with
142         // systemd, specifying a non-empty cgroup parent (even the
143         // default value "docker") hits a docker bug
144         // (https://github.com/docker/docker/issues/17126). Using two
145         // separate fields makes it possible to use the "expect cgroup
146         // parent to be X" feature even on sites where the "specify
147         // cgroup parent" feature breaks.
148         setCgroupParent string
149
150         cStateLock sync.Mutex
151         cCancelled bool // StopContainer() invoked
152
153         enableMemoryLimit bool
154         enableNetwork     string // one of "default" or "always"
155         networkMode       string // "none", "host", or "" -- passed through to executor
156         arvMountLog       *ThrottledLogger
157
158         containerWatchdogInterval time.Duration
159
160         gateway Gateway
161 }
162
163 // setupSignals sets up signal handling to gracefully terminate the
164 // underlying container and update state when receiving a TERM, INT or
165 // QUIT signal.
166 func (runner *ContainerRunner) setupSignals() {
167         runner.SigChan = make(chan os.Signal, 1)
168         signal.Notify(runner.SigChan, syscall.SIGTERM)
169         signal.Notify(runner.SigChan, syscall.SIGINT)
170         signal.Notify(runner.SigChan, syscall.SIGQUIT)
171
172         go func(sig chan os.Signal) {
173                 for s := range sig {
174                         runner.stop(s)
175                 }
176         }(runner.SigChan)
177 }
178
179 // stop the underlying container.
180 func (runner *ContainerRunner) stop(sig os.Signal) {
181         runner.cStateLock.Lock()
182         defer runner.cStateLock.Unlock()
183         if sig != nil {
184                 runner.CrunchLog.Printf("caught signal: %v", sig)
185         }
186         runner.cCancelled = true
187         runner.CrunchLog.Printf("stopping container")
188         err := runner.executor.Stop()
189         if err != nil {
190                 runner.CrunchLog.Printf("error stopping container: %s", err)
191         }
192 }
193
194 var errorBlacklist = []string{
195         "(?ms).*[Cc]annot connect to the Docker daemon.*",
196         "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
197         "(?ms).*grpc: the connection is unavailable.*",
198 }
199 var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
200
201 func (runner *ContainerRunner) runBrokenNodeHook() {
202         if *brokenNodeHook == "" {
203                 path := filepath.Join(lockdir, brokenfile)
204                 runner.CrunchLog.Printf("Writing %s to mark node as broken", path)
205                 f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0700)
206                 if err != nil {
207                         runner.CrunchLog.Printf("Error writing %s: %s", path, err)
208                         return
209                 }
210                 f.Close()
211         } else {
212                 runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
213                 // run killme script
214                 c := exec.Command(*brokenNodeHook)
215                 c.Stdout = runner.CrunchLog
216                 c.Stderr = runner.CrunchLog
217                 err := c.Run()
218                 if err != nil {
219                         runner.CrunchLog.Printf("Error running broken node hook: %v", err)
220                 }
221         }
222 }
223
224 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
225         for _, d := range errorBlacklist {
226                 if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
227                         runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
228                         runner.runBrokenNodeHook()
229                         return true
230                 }
231         }
232         return false
233 }
234
235 // LoadImage determines the docker image id from the container record and
236 // checks if it is available in the local Docker image store.  If not, it loads
237 // the image from Keep.
238 func (runner *ContainerRunner) LoadImage() (string, error) {
239         runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
240
241         d, err := os.Open(runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage)
242         if err != nil {
243                 return "", err
244         }
245         defer d.Close()
246         allfiles, err := d.Readdirnames(-1)
247         if err != nil {
248                 return "", err
249         }
250         var tarfiles []string
251         for _, fnm := range allfiles {
252                 if strings.HasSuffix(fnm, ".tar") {
253                         tarfiles = append(tarfiles, fnm)
254                 }
255         }
256         if len(tarfiles) == 0 {
257                 return "", fmt.Errorf("image collection does not include a .tar image file")
258         }
259         if len(tarfiles) > 1 {
260                 return "", fmt.Errorf("cannot choose from multiple tar files in image collection: %v", tarfiles)
261         }
262         imageID := tarfiles[0][:len(tarfiles[0])-4]
263         imageTarballPath := runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage + "/" + imageID + ".tar"
264         runner.CrunchLog.Printf("Using Docker image id %q", imageID)
265
266         runner.CrunchLog.Print("Loading Docker image from keep")
267         err = runner.executor.LoadImage(imageID, imageTarballPath, runner.Container, runner.ArvMountPoint,
268                 runner.containerClient)
269         if err != nil {
270                 return "", err
271         }
272
273         return imageID, nil
274 }
275
276 func (runner *ContainerRunner) ArvMountCmd(cmdline []string, token string) (c *exec.Cmd, err error) {
277         c = exec.Command(cmdline[0], cmdline[1:]...)
278
279         // Copy our environment, but override ARVADOS_API_TOKEN with
280         // the container auth token.
281         c.Env = nil
282         for _, s := range os.Environ() {
283                 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
284                         c.Env = append(c.Env, s)
285                 }
286         }
287         c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
288
289         w, err := runner.NewLogWriter("arv-mount")
290         if err != nil {
291                 return nil, err
292         }
293         runner.arvMountLog = NewThrottledLogger(w)
294         scanner := logScanner{
295                 Patterns: []string{
296                         "Keep write error",
297                         "Block not found error",
298                         "Unhandled exception during FUSE operation",
299                 },
300                 ReportFunc: runner.reportArvMountWarning,
301         }
302         c.Stdout = runner.arvMountLog
303         c.Stderr = io.MultiWriter(runner.arvMountLog, os.Stderr, &scanner)
304
305         runner.CrunchLog.Printf("Running %v", c.Args)
306
307         err = c.Start()
308         if err != nil {
309                 return nil, err
310         }
311
312         statReadme := make(chan bool)
313         runner.ArvMountExit = make(chan error)
314
315         keepStatting := true
316         go func() {
317                 for keepStatting {
318                         time.Sleep(100 * time.Millisecond)
319                         _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
320                         if err == nil {
321                                 keepStatting = false
322                                 statReadme <- true
323                         }
324                 }
325                 close(statReadme)
326         }()
327
328         go func() {
329                 mnterr := c.Wait()
330                 if mnterr != nil {
331                         runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
332                 }
333                 runner.ArvMountExit <- mnterr
334                 close(runner.ArvMountExit)
335         }()
336
337         select {
338         case <-statReadme:
339                 break
340         case err := <-runner.ArvMountExit:
341                 runner.ArvMount = nil
342                 keepStatting = false
343                 return nil, err
344         }
345
346         return c, nil
347 }
348
349 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
350         if runner.ArvMountPoint == "" {
351                 runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)
352         }
353         return
354 }
355
356 func copyfile(src string, dst string) (err error) {
357         srcfile, err := os.Open(src)
358         if err != nil {
359                 return
360         }
361
362         os.MkdirAll(path.Dir(dst), 0777)
363
364         dstfile, err := os.Create(dst)
365         if err != nil {
366                 return
367         }
368         _, err = io.Copy(dstfile, srcfile)
369         if err != nil {
370                 return
371         }
372
373         err = srcfile.Close()
374         err2 := dstfile.Close()
375
376         if err != nil {
377                 return
378         }
379
380         if err2 != nil {
381                 return err2
382         }
383
384         return nil
385 }
386
387 func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
388         bindmounts := map[string]bindmount{}
389         err := runner.SetupArvMountPoint("keep")
390         if err != nil {
391                 return nil, fmt.Errorf("While creating keep mount temp dir: %v", err)
392         }
393
394         token, err := runner.ContainerToken()
395         if err != nil {
396                 return nil, fmt.Errorf("could not get container token: %s", err)
397         }
398         runner.CrunchLog.Printf("container token %q", token)
399
400         pdhOnly := true
401         tmpcount := 0
402         arvMountCmd := []string{
403                 "arv-mount",
404                 "--foreground",
405                 "--read-write",
406                 "--storage-classes", strings.Join(runner.Container.OutputStorageClasses, ","),
407                 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
408
409         if runner.executor.Runtime() == "docker" {
410                 arvMountCmd = append(arvMountCmd, "--allow-other")
411         }
412
413         if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
414                 arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
415         }
416
417         collectionPaths := []string{}
418         needCertMount := true
419         type copyFile struct {
420                 src  string
421                 bind string
422         }
423         var copyFiles []copyFile
424
425         var binds []string
426         for bind := range runner.Container.Mounts {
427                 binds = append(binds, bind)
428         }
429         for bind := range runner.SecretMounts {
430                 if _, ok := runner.Container.Mounts[bind]; ok {
431                         return nil, fmt.Errorf("secret mount %q conflicts with regular mount", bind)
432                 }
433                 if runner.SecretMounts[bind].Kind != "json" &&
434                         runner.SecretMounts[bind].Kind != "text" {
435                         return nil, fmt.Errorf("secret mount %q type is %q but only 'json' and 'text' are permitted",
436                                 bind, runner.SecretMounts[bind].Kind)
437                 }
438                 binds = append(binds, bind)
439         }
440         sort.Strings(binds)
441
442         for _, bind := range binds {
443                 mnt, notSecret := runner.Container.Mounts[bind]
444                 if !notSecret {
445                         mnt = runner.SecretMounts[bind]
446                 }
447                 if bind == "stdout" || bind == "stderr" {
448                         // Is it a "file" mount kind?
449                         if mnt.Kind != "file" {
450                                 return nil, fmt.Errorf("unsupported mount kind '%s' for %s: only 'file' is supported", mnt.Kind, bind)
451                         }
452
453                         // Does path start with OutputPath?
454                         prefix := runner.Container.OutputPath
455                         if !strings.HasSuffix(prefix, "/") {
456                                 prefix += "/"
457                         }
458                         if !strings.HasPrefix(mnt.Path, prefix) {
459                                 return nil, fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
460                         }
461                 }
462
463                 if bind == "stdin" {
464                         // Is it a "collection" mount kind?
465                         if mnt.Kind != "collection" && mnt.Kind != "json" {
466                                 return nil, fmt.Errorf("unsupported mount kind '%s' for stdin: only 'collection' and 'json' are supported", mnt.Kind)
467                         }
468                 }
469
470                 if bind == "/etc/arvados/ca-certificates.crt" {
471                         needCertMount = false
472                 }
473
474                 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
475                         if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
476                                 return nil, fmt.Errorf("only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
477                         }
478                 }
479
480                 switch {
481                 case mnt.Kind == "collection" && bind != "stdin":
482                         var src string
483                         if mnt.UUID != "" && mnt.PortableDataHash != "" {
484                                 return nil, fmt.Errorf("cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
485                         }
486                         if mnt.UUID != "" {
487                                 if mnt.Writable {
488                                         return nil, fmt.Errorf("writing to existing collections currently not permitted")
489                                 }
490                                 pdhOnly = false
491                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
492                         } else if mnt.PortableDataHash != "" {
493                                 if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
494                                         return nil, fmt.Errorf("can never write to a collection specified by portable data hash")
495                                 }
496                                 idx := strings.Index(mnt.PortableDataHash, "/")
497                                 if idx > 0 {
498                                         mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
499                                         mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
500                                         runner.Container.Mounts[bind] = mnt
501                                 }
502                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
503                                 if mnt.Path != "" && mnt.Path != "." {
504                                         if strings.HasPrefix(mnt.Path, "./") {
505                                                 mnt.Path = mnt.Path[2:]
506                                         } else if strings.HasPrefix(mnt.Path, "/") {
507                                                 mnt.Path = mnt.Path[1:]
508                                         }
509                                         src += "/" + mnt.Path
510                                 }
511                         } else {
512                                 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
513                                 arvMountCmd = append(arvMountCmd, "--mount-tmp", fmt.Sprintf("tmp%d", tmpcount))
514                                 tmpcount++
515                         }
516                         if mnt.Writable {
517                                 if bind == runner.Container.OutputPath {
518                                         runner.HostOutputDir = src
519                                         bindmounts[bind] = bindmount{HostPath: src}
520                                 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
521                                         copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
522                                 } else {
523                                         bindmounts[bind] = bindmount{HostPath: src}
524                                 }
525                         } else {
526                                 bindmounts[bind] = bindmount{HostPath: src, ReadOnly: true}
527                         }
528                         collectionPaths = append(collectionPaths, src)
529
530                 case mnt.Kind == "tmp":
531                         var tmpdir string
532                         tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
533                         if err != nil {
534                                 return nil, fmt.Errorf("while creating mount temp dir: %v", err)
535                         }
536                         st, staterr := os.Stat(tmpdir)
537                         if staterr != nil {
538                                 return nil, fmt.Errorf("while Stat on temp dir: %v", staterr)
539                         }
540                         err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
541                         if staterr != nil {
542                                 return nil, fmt.Errorf("while Chmod temp dir: %v", err)
543                         }
544                         bindmounts[bind] = bindmount{HostPath: tmpdir}
545                         if bind == runner.Container.OutputPath {
546                                 runner.HostOutputDir = tmpdir
547                         }
548
549                 case mnt.Kind == "json" || mnt.Kind == "text":
550                         var filedata []byte
551                         if mnt.Kind == "json" {
552                                 filedata, err = json.Marshal(mnt.Content)
553                                 if err != nil {
554                                         return nil, fmt.Errorf("encoding json data: %v", err)
555                                 }
556                         } else {
557                                 text, ok := mnt.Content.(string)
558                                 if !ok {
559                                         return nil, fmt.Errorf("content for mount %q must be a string", bind)
560                                 }
561                                 filedata = []byte(text)
562                         }
563
564                         tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
565                         if err != nil {
566                                 return nil, fmt.Errorf("creating temp dir: %v", err)
567                         }
568                         tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
569                         err = ioutil.WriteFile(tmpfn, filedata, 0444)
570                         if err != nil {
571                                 return nil, fmt.Errorf("writing temp file: %v", err)
572                         }
573                         if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && (notSecret || runner.Container.Mounts[runner.Container.OutputPath].Kind != "collection") {
574                                 // In most cases, if the container
575                                 // specifies a literal file inside the
576                                 // output path, we copy it into the
577                                 // output directory (either a mounted
578                                 // collection or a staging area on the
579                                 // host fs). If it's a secret, it will
580                                 // be skipped when copying output from
581                                 // staging to Keep later.
582                                 copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
583                         } else {
584                                 // If a secret is outside OutputPath,
585                                 // we bind mount the secret file
586                                 // directly just like other mounts. We
587                                 // also use this strategy when a
588                                 // secret is inside OutputPath but
589                                 // OutputPath is a live collection, to
590                                 // avoid writing the secret to
591                                 // Keep. Attempting to remove a
592                                 // bind-mounted secret file from
593                                 // inside the container will return a
594                                 // "Device or resource busy" error
595                                 // that might not be handled well by
596                                 // the container, which is why we
597                                 // don't use this strategy when
598                                 // OutputPath is a staging directory.
599                                 bindmounts[bind] = bindmount{HostPath: tmpfn, ReadOnly: true}
600                         }
601
602                 case mnt.Kind == "git_tree":
603                         tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
604                         if err != nil {
605                                 return nil, fmt.Errorf("creating temp dir: %v", err)
606                         }
607                         err = gitMount(mnt).extractTree(runner.ContainerArvClient, tmpdir, token)
608                         if err != nil {
609                                 return nil, err
610                         }
611                         bindmounts[bind] = bindmount{HostPath: tmpdir, ReadOnly: true}
612                 }
613         }
614
615         if runner.HostOutputDir == "" {
616                 return nil, fmt.Errorf("output path does not correspond to a writable mount point")
617         }
618
619         if needCertMount && runner.Container.RuntimeConstraints.API {
620                 for _, certfile := range arvadosclient.CertFiles {
621                         _, err := os.Stat(certfile)
622                         if err == nil {
623                                 bindmounts["/etc/arvados/ca-certificates.crt"] = bindmount{HostPath: certfile, ReadOnly: true}
624                                 break
625                         }
626                 }
627         }
628
629         if pdhOnly {
630                 // If we are only mounting collections by pdh, make
631                 // sure we don't subscribe to websocket events to
632                 // avoid putting undesired load on the API server
633                 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id", "--disable-event-listening")
634         } else {
635                 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
636         }
637         // the by_uuid mount point is used by singularity when writing
638         // out docker images converted to SIF
639         arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_uuid")
640         arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
641
642         runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
643         if err != nil {
644                 return nil, fmt.Errorf("while trying to start arv-mount: %v", err)
645         }
646
647         for _, p := range collectionPaths {
648                 _, err = os.Stat(p)
649                 if err != nil {
650                         return nil, fmt.Errorf("while checking that input files exist: %v", err)
651                 }
652         }
653
654         for _, cp := range copyFiles {
655                 st, err := os.Stat(cp.src)
656                 if err != nil {
657                         return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
658                 }
659                 if st.IsDir() {
660                         err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
661                                 if walkerr != nil {
662                                         return walkerr
663                                 }
664                                 target := path.Join(cp.bind, walkpath[len(cp.src):])
665                                 if walkinfo.Mode().IsRegular() {
666                                         copyerr := copyfile(walkpath, target)
667                                         if copyerr != nil {
668                                                 return copyerr
669                                         }
670                                         return os.Chmod(target, walkinfo.Mode()|0777)
671                                 } else if walkinfo.Mode().IsDir() {
672                                         mkerr := os.MkdirAll(target, 0777)
673                                         if mkerr != nil {
674                                                 return mkerr
675                                         }
676                                         return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
677                                 } else {
678                                         return fmt.Errorf("source %q is not a regular file or directory", cp.src)
679                                 }
680                         })
681                 } else if st.Mode().IsRegular() {
682                         err = copyfile(cp.src, cp.bind)
683                         if err == nil {
684                                 err = os.Chmod(cp.bind, st.Mode()|0777)
685                         }
686                 }
687                 if err != nil {
688                         return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
689                 }
690         }
691
692         return bindmounts, nil
693 }
694
695 func (runner *ContainerRunner) stopHoststat() error {
696         if runner.hoststatReporter == nil {
697                 return nil
698         }
699         runner.hoststatReporter.Stop()
700         err := runner.hoststatLogger.Close()
701         if err != nil {
702                 return fmt.Errorf("error closing hoststat logs: %v", err)
703         }
704         return nil
705 }
706
707 func (runner *ContainerRunner) startHoststat() error {
708         w, err := runner.NewLogWriter("hoststat")
709         if err != nil {
710                 return err
711         }
712         runner.hoststatLogger = NewThrottledLogger(w)
713         runner.hoststatReporter = &crunchstat.Reporter{
714                 Logger:     log.New(runner.hoststatLogger, "", 0),
715                 CgroupRoot: runner.cgroupRoot,
716                 PollPeriod: runner.statInterval,
717         }
718         runner.hoststatReporter.Start()
719         return nil
720 }
721
722 func (runner *ContainerRunner) startCrunchstat() error {
723         w, err := runner.NewLogWriter("crunchstat")
724         if err != nil {
725                 return err
726         }
727         runner.statLogger = NewThrottledLogger(w)
728         runner.statReporter = &crunchstat.Reporter{
729                 CID:          runner.executor.CgroupID(),
730                 Logger:       log.New(runner.statLogger, "", 0),
731                 CgroupParent: runner.expectCgroupParent,
732                 CgroupRoot:   runner.cgroupRoot,
733                 PollPeriod:   runner.statInterval,
734                 TempDir:      runner.parentTemp,
735         }
736         runner.statReporter.Start()
737         return nil
738 }
739
740 type infoCommand struct {
741         label string
742         cmd   []string
743 }
744
745 // LogHostInfo logs info about the current host, for debugging and
746 // accounting purposes. Although it's logged as "node-info", this is
747 // about the environment where crunch-run is actually running, which
748 // might differ from what's described in the node record (see
749 // LogNodeRecord).
750 func (runner *ContainerRunner) LogHostInfo() (err error) {
751         w, err := runner.NewLogWriter("node-info")
752         if err != nil {
753                 return
754         }
755
756         commands := []infoCommand{
757                 {
758                         label: "Host Information",
759                         cmd:   []string{"uname", "-a"},
760                 },
761                 {
762                         label: "CPU Information",
763                         cmd:   []string{"cat", "/proc/cpuinfo"},
764                 },
765                 {
766                         label: "Memory Information",
767                         cmd:   []string{"cat", "/proc/meminfo"},
768                 },
769                 {
770                         label: "Disk Space",
771                         cmd:   []string{"df", "-m", "/", os.TempDir()},
772                 },
773                 {
774                         label: "Disk INodes",
775                         cmd:   []string{"df", "-i", "/", os.TempDir()},
776                 },
777         }
778
779         // Run commands with informational output to be logged.
780         for _, command := range commands {
781                 fmt.Fprintln(w, command.label)
782                 cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
783                 cmd.Stdout = w
784                 cmd.Stderr = w
785                 if err := cmd.Run(); err != nil {
786                         err = fmt.Errorf("While running command %q: %v", command.cmd, err)
787                         fmt.Fprintln(w, err)
788                         return err
789                 }
790                 fmt.Fprintln(w, "")
791         }
792
793         err = w.Close()
794         if err != nil {
795                 return fmt.Errorf("While closing node-info logs: %v", err)
796         }
797         return nil
798 }
799
800 // LogContainerRecord gets and saves the raw JSON container record from the API server
801 func (runner *ContainerRunner) LogContainerRecord() error {
802         logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
803         if !logged && err == nil {
804                 err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
805         }
806         return err
807 }
808
809 // LogNodeRecord logs the current host's InstanceType config entry (or
810 // the arvados#node record, if running via crunch-dispatch-slurm).
811 func (runner *ContainerRunner) LogNodeRecord() error {
812         if it := os.Getenv("InstanceType"); it != "" {
813                 // Dispatched via arvados-dispatch-cloud. Save
814                 // InstanceType config fragment received from
815                 // dispatcher on stdin.
816                 w, err := runner.LogCollection.OpenFile("node.json", os.O_CREATE|os.O_WRONLY, 0666)
817                 if err != nil {
818                         return err
819                 }
820                 defer w.Close()
821                 _, err = io.WriteString(w, it)
822                 if err != nil {
823                         return err
824                 }
825                 return w.Close()
826         }
827         // Dispatched via crunch-dispatch-slurm. Look up
828         // apiserver's node record corresponding to
829         // $SLURMD_NODENAME.
830         hostname := os.Getenv("SLURMD_NODENAME")
831         if hostname == "" {
832                 hostname, _ = os.Hostname()
833         }
834         _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
835                 // The "info" field has admin-only info when
836                 // obtained with a privileged token, and
837                 // should not be logged.
838                 node, ok := resp.(map[string]interface{})
839                 if ok {
840                         delete(node, "info")
841                 }
842         })
843         return err
844 }
845
846 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
847         writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
848         if err != nil {
849                 return false, err
850         }
851         w := &ArvLogWriter{
852                 ArvClient:     runner.DispatcherArvClient,
853                 UUID:          runner.Container.UUID,
854                 loggingStream: label,
855                 writeCloser:   writer,
856         }
857
858         reader, err := runner.DispatcherArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
859         if err != nil {
860                 return false, fmt.Errorf("error getting %s record: %v", label, err)
861         }
862         defer reader.Close()
863
864         dec := json.NewDecoder(reader)
865         dec.UseNumber()
866         var resp map[string]interface{}
867         if err = dec.Decode(&resp); err != nil {
868                 return false, fmt.Errorf("error decoding %s list response: %v", label, err)
869         }
870         items, ok := resp["items"].([]interface{})
871         if !ok {
872                 return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
873         } else if len(items) < 1 {
874                 return false, nil
875         }
876         if munge != nil {
877                 munge(items[0])
878         }
879         // Re-encode it using indentation to improve readability
880         enc := json.NewEncoder(w)
881         enc.SetIndent("", "    ")
882         if err = enc.Encode(items[0]); err != nil {
883                 return false, fmt.Errorf("error logging %s record: %v", label, err)
884         }
885         err = w.Close()
886         if err != nil {
887                 return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
888         }
889         return true, nil
890 }
891
892 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
893         stdoutPath := mntPath[len(runner.Container.OutputPath):]
894         index := strings.LastIndex(stdoutPath, "/")
895         if index > 0 {
896                 subdirs := stdoutPath[:index]
897                 if subdirs != "" {
898                         st, err := os.Stat(runner.HostOutputDir)
899                         if err != nil {
900                                 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
901                         }
902                         stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
903                         err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
904                         if err != nil {
905                                 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
906                         }
907                 }
908         }
909         stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
910         if err != nil {
911                 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
912         }
913
914         return stdoutFile, nil
915 }
916
917 // CreateContainer creates the docker container.
918 func (runner *ContainerRunner) CreateContainer(imageID string, bindmounts map[string]bindmount) error {
919         var stdin io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
920         if mnt, ok := runner.Container.Mounts["stdin"]; ok {
921                 switch mnt.Kind {
922                 case "collection":
923                         var collID string
924                         if mnt.UUID != "" {
925                                 collID = mnt.UUID
926                         } else {
927                                 collID = mnt.PortableDataHash
928                         }
929                         path := runner.ArvMountPoint + "/by_id/" + collID + "/" + mnt.Path
930                         f, err := os.Open(path)
931                         if err != nil {
932                                 return err
933                         }
934                         stdin = f
935                 case "json":
936                         j, err := json.Marshal(mnt.Content)
937                         if err != nil {
938                                 return fmt.Errorf("error encoding stdin json data: %v", err)
939                         }
940                         stdin = ioutil.NopCloser(bytes.NewReader(j))
941                 default:
942                         return fmt.Errorf("stdin mount has unsupported kind %q", mnt.Kind)
943                 }
944         }
945
946         var stdout, stderr io.WriteCloser
947         if mnt, ok := runner.Container.Mounts["stdout"]; ok {
948                 f, err := runner.getStdoutFile(mnt.Path)
949                 if err != nil {
950                         return err
951                 }
952                 stdout = f
953         } else if w, err := runner.NewLogWriter("stdout"); err != nil {
954                 return err
955         } else {
956                 stdout = NewThrottledLogger(w)
957         }
958
959         if mnt, ok := runner.Container.Mounts["stderr"]; ok {
960                 f, err := runner.getStdoutFile(mnt.Path)
961                 if err != nil {
962                         return err
963                 }
964                 stderr = f
965         } else if w, err := runner.NewLogWriter("stderr"); err != nil {
966                 return err
967         } else {
968                 stderr = NewThrottledLogger(w)
969         }
970
971         env := runner.Container.Environment
972         enableNetwork := runner.enableNetwork == "always"
973         if runner.Container.RuntimeConstraints.API {
974                 enableNetwork = true
975                 tok, err := runner.ContainerToken()
976                 if err != nil {
977                         return err
978                 }
979                 env = map[string]string{}
980                 for k, v := range runner.Container.Environment {
981                         env[k] = v
982                 }
983                 env["ARVADOS_API_TOKEN"] = tok
984                 env["ARVADOS_API_HOST"] = os.Getenv("ARVADOS_API_HOST")
985                 env["ARVADOS_API_HOST_INSECURE"] = os.Getenv("ARVADOS_API_HOST_INSECURE")
986         }
987         workdir := runner.Container.Cwd
988         if workdir == "." {
989                 // both "" and "." mean default
990                 workdir = ""
991         }
992         ram := runner.Container.RuntimeConstraints.RAM
993         if !runner.enableMemoryLimit {
994                 ram = 0
995         }
996         runner.executorStdin = stdin
997         runner.executorStdout = stdout
998         runner.executorStderr = stderr
999         return runner.executor.Create(containerSpec{
1000                 Image:         imageID,
1001                 VCPUs:         runner.Container.RuntimeConstraints.VCPUs,
1002                 RAM:           ram,
1003                 WorkingDir:    workdir,
1004                 Env:           env,
1005                 BindMounts:    bindmounts,
1006                 Command:       runner.Container.Command,
1007                 EnableNetwork: enableNetwork,
1008                 NetworkMode:   runner.networkMode,
1009                 CgroupParent:  runner.setCgroupParent,
1010                 Stdin:         stdin,
1011                 Stdout:        stdout,
1012                 Stderr:        stderr,
1013         })
1014 }
1015
1016 // StartContainer starts the docker container created by CreateContainer.
1017 func (runner *ContainerRunner) StartContainer() error {
1018         runner.CrunchLog.Printf("Starting container")
1019         runner.cStateLock.Lock()
1020         defer runner.cStateLock.Unlock()
1021         if runner.cCancelled {
1022                 return ErrCancelled
1023         }
1024         err := runner.executor.Start()
1025         if err != nil {
1026                 var advice string
1027                 if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
1028                         advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
1029                 }
1030                 return fmt.Errorf("could not start container: %v%s", err, advice)
1031         }
1032         return nil
1033 }
1034
1035 // WaitFinish waits for the container to terminate, capture the exit code, and
1036 // close the stdout/stderr logging.
1037 func (runner *ContainerRunner) WaitFinish() error {
1038         runner.CrunchLog.Print("Waiting for container to finish")
1039         var timeout <-chan time.Time
1040         if s := runner.Container.SchedulingParameters.MaxRunTime; s > 0 {
1041                 timeout = time.After(time.Duration(s) * time.Second)
1042         }
1043         ctx, cancel := context.WithCancel(context.Background())
1044         defer cancel()
1045         go func() {
1046                 select {
1047                 case <-timeout:
1048                         runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
1049                         runner.stop(nil)
1050                 case <-runner.ArvMountExit:
1051                         runner.CrunchLog.Printf("arv-mount exited while container is still running. Stopping container.")
1052                         runner.stop(nil)
1053                 case <-ctx.Done():
1054                 }
1055         }()
1056         exitcode, err := runner.executor.Wait(ctx)
1057         if err != nil {
1058                 runner.checkBrokenNode(err)
1059                 return err
1060         }
1061         runner.ExitCode = &exitcode
1062
1063         var returnErr error
1064         if err = runner.executorStdin.Close(); err != nil {
1065                 err = fmt.Errorf("error closing container stdin: %s", err)
1066                 runner.CrunchLog.Printf("%s", err)
1067                 returnErr = err
1068         }
1069         if err = runner.executorStdout.Close(); err != nil {
1070                 err = fmt.Errorf("error closing container stdout: %s", err)
1071                 runner.CrunchLog.Printf("%s", err)
1072                 if returnErr == nil {
1073                         returnErr = err
1074                 }
1075         }
1076         if err = runner.executorStderr.Close(); err != nil {
1077                 err = fmt.Errorf("error closing container stderr: %s", err)
1078                 runner.CrunchLog.Printf("%s", err)
1079                 if returnErr == nil {
1080                         returnErr = err
1081                 }
1082         }
1083
1084         if runner.statReporter != nil {
1085                 runner.statReporter.Stop()
1086                 err = runner.statLogger.Close()
1087                 if err != nil {
1088                         runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
1089                 }
1090         }
1091         return returnErr
1092 }
1093
1094 func (runner *ContainerRunner) updateLogs() {
1095         ticker := time.NewTicker(crunchLogUpdatePeriod / 360)
1096         defer ticker.Stop()
1097
1098         sigusr1 := make(chan os.Signal, 1)
1099         signal.Notify(sigusr1, syscall.SIGUSR1)
1100         defer signal.Stop(sigusr1)
1101
1102         saveAtTime := time.Now().Add(crunchLogUpdatePeriod)
1103         saveAtSize := crunchLogUpdateSize
1104         var savedSize int64
1105         for {
1106                 select {
1107                 case <-ticker.C:
1108                 case <-sigusr1:
1109                         saveAtTime = time.Now()
1110                 }
1111                 runner.logMtx.Lock()
1112                 done := runner.LogsPDH != nil
1113                 runner.logMtx.Unlock()
1114                 if done {
1115                         return
1116                 }
1117                 size := runner.LogCollection.Size()
1118                 if size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) {
1119                         continue
1120                 }
1121                 saveAtTime = time.Now().Add(crunchLogUpdatePeriod)
1122                 saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize
1123                 saved, err := runner.saveLogCollection(false)
1124                 if err != nil {
1125                         runner.CrunchLog.Printf("error updating log collection: %s", err)
1126                         continue
1127                 }
1128
1129                 var updated arvados.Container
1130                 err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1131                         "container": arvadosclient.Dict{"log": saved.PortableDataHash},
1132                 }, &updated)
1133                 if err != nil {
1134                         runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
1135                         continue
1136                 }
1137
1138                 savedSize = size
1139         }
1140 }
1141
1142 func (runner *ContainerRunner) reportArvMountWarning(pattern, text string) {
1143         var updated arvados.Container
1144         err := runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1145                 "container": arvadosclient.Dict{
1146                         "runtime_status": arvadosclient.Dict{
1147                                 "warning":       "arv-mount: " + pattern,
1148                                 "warningDetail": text,
1149                         },
1150                 },
1151         }, &updated)
1152         if err != nil {
1153                 runner.CrunchLog.Printf("error updating container runtime_status: %s", err)
1154         }
1155 }
1156
1157 // CaptureOutput saves data from the container's output directory if
1158 // needed, and updates the container output accordingly.
1159 func (runner *ContainerRunner) CaptureOutput(bindmounts map[string]bindmount) error {
1160         if runner.Container.RuntimeConstraints.API {
1161                 // Output may have been set directly by the container, so
1162                 // refresh the container record to check.
1163                 err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID,
1164                         nil, &runner.Container)
1165                 if err != nil {
1166                         return err
1167                 }
1168                 if runner.Container.Output != "" {
1169                         // Container output is already set.
1170                         runner.OutputPDH = &runner.Container.Output
1171                         return nil
1172                 }
1173         }
1174
1175         txt, err := (&copier{
1176                 client:        runner.containerClient,
1177                 arvClient:     runner.ContainerArvClient,
1178                 keepClient:    runner.ContainerKeepClient,
1179                 hostOutputDir: runner.HostOutputDir,
1180                 ctrOutputDir:  runner.Container.OutputPath,
1181                 bindmounts:    bindmounts,
1182                 mounts:        runner.Container.Mounts,
1183                 secretMounts:  runner.SecretMounts,
1184                 logger:        runner.CrunchLog,
1185         }).Copy()
1186         if err != nil {
1187                 return err
1188         }
1189         if n := len(regexp.MustCompile(` [0-9a-f]+\+\S*\+R`).FindAllStringIndex(txt, -1)); n > 0 {
1190                 runner.CrunchLog.Printf("Copying %d data blocks from remote input collections...", n)
1191                 fs, err := (&arvados.Collection{ManifestText: txt}).FileSystem(runner.containerClient, runner.ContainerKeepClient)
1192                 if err != nil {
1193                         return err
1194                 }
1195                 txt, err = fs.MarshalManifest(".")
1196                 if err != nil {
1197                         return err
1198                 }
1199         }
1200         var resp arvados.Collection
1201         err = runner.ContainerArvClient.Create("collections", arvadosclient.Dict{
1202                 "ensure_unique_name": true,
1203                 "collection": arvadosclient.Dict{
1204                         "is_trashed":    true,
1205                         "name":          "output for " + runner.Container.UUID,
1206                         "manifest_text": txt,
1207                 },
1208         }, &resp)
1209         if err != nil {
1210                 return fmt.Errorf("error creating output collection: %v", err)
1211         }
1212         runner.OutputPDH = &resp.PortableDataHash
1213         return nil
1214 }
1215
1216 func (runner *ContainerRunner) CleanupDirs() {
1217         if runner.ArvMount != nil {
1218                 var delay int64 = 8
1219                 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1220                 umount.Stdout = runner.CrunchLog
1221                 umount.Stderr = runner.CrunchLog
1222                 runner.CrunchLog.Printf("Running %v", umount.Args)
1223                 umnterr := umount.Start()
1224
1225                 if umnterr != nil {
1226                         runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1227                         runner.ArvMount.Process.Kill()
1228                 } else {
1229                         // If arv-mount --unmount gets stuck for any reason, we
1230                         // don't want to wait for it forever.  Do Wait() in a goroutine
1231                         // so it doesn't block crunch-run.
1232                         umountExit := make(chan error)
1233                         go func() {
1234                                 mnterr := umount.Wait()
1235                                 if mnterr != nil {
1236                                         runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1237                                 }
1238                                 umountExit <- mnterr
1239                         }()
1240
1241                         for again := true; again; {
1242                                 again = false
1243                                 select {
1244                                 case <-umountExit:
1245                                         umount = nil
1246                                         again = true
1247                                 case <-runner.ArvMountExit:
1248                                         break
1249                                 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1250                                         runner.CrunchLog.Printf("Timed out waiting for unmount")
1251                                         if umount != nil {
1252                                                 umount.Process.Kill()
1253                                         }
1254                                         runner.ArvMount.Process.Kill()
1255                                 }
1256                         }
1257                 }
1258                 runner.ArvMount = nil
1259         }
1260
1261         if runner.ArvMountPoint != "" {
1262                 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1263                         runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1264                 }
1265                 runner.ArvMountPoint = ""
1266         }
1267
1268         if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {
1269                 runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr)
1270         }
1271 }
1272
1273 // CommitLogs posts the collection containing the final container logs.
1274 func (runner *ContainerRunner) CommitLogs() error {
1275         func() {
1276                 // Hold cStateLock to prevent races on CrunchLog (e.g., stop()).
1277                 runner.cStateLock.Lock()
1278                 defer runner.cStateLock.Unlock()
1279
1280                 runner.CrunchLog.Print(runner.finalState)
1281
1282                 if runner.arvMountLog != nil {
1283                         runner.arvMountLog.Close()
1284                 }
1285                 runner.CrunchLog.Close()
1286
1287                 // Closing CrunchLog above allows them to be committed to Keep at this
1288                 // point, but re-open crunch log with ArvClient in case there are any
1289                 // other further errors (such as failing to write the log to Keep!)
1290                 // while shutting down
1291                 runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{
1292                         ArvClient:     runner.DispatcherArvClient,
1293                         UUID:          runner.Container.UUID,
1294                         loggingStream: "crunch-run",
1295                         writeCloser:   nil,
1296                 })
1297                 runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
1298         }()
1299
1300         if runner.LogsPDH != nil {
1301                 // If we have already assigned something to LogsPDH,
1302                 // we must be closing the re-opened log, which won't
1303                 // end up getting attached to the container record and
1304                 // therefore doesn't need to be saved as a collection
1305                 // -- it exists only to send logs to other channels.
1306                 return nil
1307         }
1308         saved, err := runner.saveLogCollection(true)
1309         if err != nil {
1310                 return fmt.Errorf("error saving log collection: %s", err)
1311         }
1312         runner.logMtx.Lock()
1313         defer runner.logMtx.Unlock()
1314         runner.LogsPDH = &saved.PortableDataHash
1315         return nil
1316 }
1317
1318 func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
1319         runner.logMtx.Lock()
1320         defer runner.logMtx.Unlock()
1321         if runner.LogsPDH != nil {
1322                 // Already finalized.
1323                 return
1324         }
1325         updates := arvadosclient.Dict{
1326                 "name": "logs for " + runner.Container.UUID,
1327         }
1328         mt, err1 := runner.LogCollection.MarshalManifest(".")
1329         if err1 == nil {
1330                 // Only send updated manifest text if there was no
1331                 // error.
1332                 updates["manifest_text"] = mt
1333         }
1334
1335         // Even if flushing the manifest had an error, we still want
1336         // to update the log record, if possible, to push the trash_at
1337         // and delete_at times into the future.  Details on bug
1338         // #17293.
1339         if final {
1340                 updates["is_trashed"] = true
1341         } else {
1342                 exp := time.Now().Add(crunchLogUpdatePeriod * 24)
1343                 updates["trash_at"] = exp
1344                 updates["delete_at"] = exp
1345         }
1346         reqBody := arvadosclient.Dict{"collection": updates}
1347         var err2 error
1348         if runner.logUUID == "" {
1349                 reqBody["ensure_unique_name"] = true
1350                 err2 = runner.DispatcherArvClient.Create("collections", reqBody, &response)
1351         } else {
1352                 err2 = runner.DispatcherArvClient.Update("collections", runner.logUUID, reqBody, &response)
1353         }
1354         if err2 == nil {
1355                 runner.logUUID = response.UUID
1356         }
1357
1358         if err1 != nil || err2 != nil {
1359                 err = fmt.Errorf("error recording logs: %q, %q", err1, err2)
1360         }
1361         return
1362 }
1363
1364 // UpdateContainerRunning updates the container state to "Running"
1365 func (runner *ContainerRunner) UpdateContainerRunning() error {
1366         runner.cStateLock.Lock()
1367         defer runner.cStateLock.Unlock()
1368         if runner.cCancelled {
1369                 return ErrCancelled
1370         }
1371         return runner.DispatcherArvClient.Update("containers", runner.Container.UUID,
1372                 arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running", "gateway_address": runner.gateway.Address}}, nil)
1373 }
1374
1375 // ContainerToken returns the api_token the container (and any
1376 // arv-mount processes) are allowed to use.
1377 func (runner *ContainerRunner) ContainerToken() (string, error) {
1378         if runner.token != "" {
1379                 return runner.token, nil
1380         }
1381
1382         var auth arvados.APIClientAuthorization
1383         err := runner.DispatcherArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1384         if err != nil {
1385                 return "", err
1386         }
1387         runner.token = fmt.Sprintf("v2/%s/%s/%s", auth.UUID, auth.APIToken, runner.Container.UUID)
1388         return runner.token, nil
1389 }
1390
1391 // UpdateContainerFinal updates the container record state on API
1392 // server to "Complete" or "Cancelled"
1393 func (runner *ContainerRunner) UpdateContainerFinal() error {
1394         update := arvadosclient.Dict{}
1395         update["state"] = runner.finalState
1396         if runner.LogsPDH != nil {
1397                 update["log"] = *runner.LogsPDH
1398         }
1399         if runner.finalState == "Complete" {
1400                 if runner.ExitCode != nil {
1401                         update["exit_code"] = *runner.ExitCode
1402                 }
1403                 if runner.OutputPDH != nil {
1404                         update["output"] = *runner.OutputPDH
1405                 }
1406         }
1407         return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
1408 }
1409
1410 // IsCancelled returns the value of Cancelled, with goroutine safety.
1411 func (runner *ContainerRunner) IsCancelled() bool {
1412         runner.cStateLock.Lock()
1413         defer runner.cStateLock.Unlock()
1414         return runner.cCancelled
1415 }
1416
1417 // NewArvLogWriter creates an ArvLogWriter
1418 func (runner *ContainerRunner) NewArvLogWriter(name string) (io.WriteCloser, error) {
1419         writer, err := runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
1420         if err != nil {
1421                 return nil, err
1422         }
1423         return &ArvLogWriter{
1424                 ArvClient:     runner.DispatcherArvClient,
1425                 UUID:          runner.Container.UUID,
1426                 loggingStream: name,
1427                 writeCloser:   writer,
1428         }, nil
1429 }
1430
1431 // Run the full container lifecycle.
1432 func (runner *ContainerRunner) Run() (err error) {
1433         runner.CrunchLog.Printf("crunch-run %s started", cmd.Version.String())
1434         runner.CrunchLog.Printf("Executing container '%s' using %s runtime", runner.Container.UUID, runner.executor.Runtime())
1435
1436         hostname, hosterr := os.Hostname()
1437         if hosterr != nil {
1438                 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1439         } else {
1440                 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1441         }
1442
1443         runner.finalState = "Queued"
1444
1445         defer func() {
1446                 runner.CleanupDirs()
1447
1448                 runner.CrunchLog.Printf("crunch-run finished")
1449                 runner.CrunchLog.Close()
1450         }()
1451
1452         err = runner.fetchContainerRecord()
1453         if err != nil {
1454                 return
1455         }
1456         if runner.Container.State != "Locked" {
1457                 return fmt.Errorf("dispatch error detected: container %q has state %q", runner.Container.UUID, runner.Container.State)
1458         }
1459
1460         var bindmounts map[string]bindmount
1461         defer func() {
1462                 // checkErr prints e (unless it's nil) and sets err to
1463                 // e (unless err is already non-nil). Thus, if err
1464                 // hasn't already been assigned when Run() returns,
1465                 // this cleanup func will cause Run() to return the
1466                 // first non-nil error that is passed to checkErr().
1467                 checkErr := func(errorIn string, e error) {
1468                         if e == nil {
1469                                 return
1470                         }
1471                         runner.CrunchLog.Printf("error in %s: %v", errorIn, e)
1472                         if err == nil {
1473                                 err = e
1474                         }
1475                         if runner.finalState == "Complete" {
1476                                 // There was an error in the finalization.
1477                                 runner.finalState = "Cancelled"
1478                         }
1479                 }
1480
1481                 // Log the error encountered in Run(), if any
1482                 checkErr("Run", err)
1483
1484                 if runner.finalState == "Queued" {
1485                         runner.UpdateContainerFinal()
1486                         return
1487                 }
1488
1489                 if runner.IsCancelled() {
1490                         runner.finalState = "Cancelled"
1491                         // but don't return yet -- we still want to
1492                         // capture partial output and write logs
1493                 }
1494
1495                 if bindmounts != nil {
1496                         checkErr("CaptureOutput", runner.CaptureOutput(bindmounts))
1497                 }
1498                 checkErr("stopHoststat", runner.stopHoststat())
1499                 checkErr("CommitLogs", runner.CommitLogs())
1500                 runner.CleanupDirs()
1501                 checkErr("UpdateContainerFinal", runner.UpdateContainerFinal())
1502         }()
1503
1504         runner.setupSignals()
1505         err = runner.startHoststat()
1506         if err != nil {
1507                 return
1508         }
1509
1510         // set up FUSE mount and binds
1511         bindmounts, err = runner.SetupMounts()
1512         if err != nil {
1513                 runner.finalState = "Cancelled"
1514                 err = fmt.Errorf("While setting up mounts: %v", err)
1515                 return
1516         }
1517
1518         // check for and/or load image
1519         imageID, err := runner.LoadImage()
1520         if err != nil {
1521                 if !runner.checkBrokenNode(err) {
1522                         // Failed to load image but not due to a "broken node"
1523                         // condition, probably user error.
1524                         runner.finalState = "Cancelled"
1525                 }
1526                 err = fmt.Errorf("While loading container image: %v", err)
1527                 return
1528         }
1529
1530         err = runner.CreateContainer(imageID, bindmounts)
1531         if err != nil {
1532                 return
1533         }
1534         err = runner.LogHostInfo()
1535         if err != nil {
1536                 return
1537         }
1538         err = runner.LogNodeRecord()
1539         if err != nil {
1540                 return
1541         }
1542         err = runner.LogContainerRecord()
1543         if err != nil {
1544                 return
1545         }
1546
1547         if runner.IsCancelled() {
1548                 return
1549         }
1550
1551         err = runner.UpdateContainerRunning()
1552         if err != nil {
1553                 return
1554         }
1555         runner.finalState = "Cancelled"
1556
1557         err = runner.startCrunchstat()
1558         if err != nil {
1559                 return
1560         }
1561
1562         err = runner.StartContainer()
1563         if err != nil {
1564                 runner.checkBrokenNode(err)
1565                 return
1566         }
1567
1568         err = runner.WaitFinish()
1569         if err == nil && !runner.IsCancelled() {
1570                 runner.finalState = "Complete"
1571         }
1572         return
1573 }
1574
1575 // Fetch the current container record (uuid = runner.Container.UUID)
1576 // into runner.Container.
1577 func (runner *ContainerRunner) fetchContainerRecord() error {
1578         reader, err := runner.DispatcherArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1579         if err != nil {
1580                 return fmt.Errorf("error fetching container record: %v", err)
1581         }
1582         defer reader.Close()
1583
1584         dec := json.NewDecoder(reader)
1585         dec.UseNumber()
1586         err = dec.Decode(&runner.Container)
1587         if err != nil {
1588                 return fmt.Errorf("error decoding container record: %v", err)
1589         }
1590
1591         var sm struct {
1592                 SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
1593         }
1594
1595         containerToken, err := runner.ContainerToken()
1596         if err != nil {
1597                 return fmt.Errorf("error getting container token: %v", err)
1598         }
1599
1600         runner.ContainerArvClient, runner.ContainerKeepClient,
1601                 runner.containerClient, err = runner.MkArvClient(containerToken)
1602         if err != nil {
1603                 return fmt.Errorf("error creating container API client: %v", err)
1604         }
1605
1606         runner.ContainerKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)
1607         runner.DispatcherKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)
1608
1609         err = runner.ContainerArvClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
1610         if err != nil {
1611                 if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
1612                         return fmt.Errorf("error fetching secret_mounts: %v", err)
1613                 }
1614                 // ok && apierr.HttpStatusCode == 404, which means
1615                 // secret_mounts isn't supported by this API server.
1616         }
1617         runner.SecretMounts = sm.SecretMounts
1618
1619         return nil
1620 }
1621
1622 // NewContainerRunner creates a new container runner.
1623 func NewContainerRunner(dispatcherClient *arvados.Client,
1624         dispatcherArvClient IArvadosClient,
1625         dispatcherKeepClient IKeepClient,
1626         containerUUID string) (*ContainerRunner, error) {
1627
1628         cr := &ContainerRunner{
1629                 dispatcherClient:     dispatcherClient,
1630                 DispatcherArvClient:  dispatcherArvClient,
1631                 DispatcherKeepClient: dispatcherKeepClient,
1632         }
1633         cr.NewLogWriter = cr.NewArvLogWriter
1634         cr.RunArvMount = cr.ArvMountCmd
1635         cr.MkTempDir = ioutil.TempDir
1636         cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
1637                 cl, err := arvadosclient.MakeArvadosClient()
1638                 if err != nil {
1639                         return nil, nil, nil, err
1640                 }
1641                 cl.ApiToken = token
1642                 kc, err := keepclient.MakeKeepClient(cl)
1643                 if err != nil {
1644                         return nil, nil, nil, err
1645                 }
1646                 c2 := arvados.NewClientFromEnv()
1647                 c2.AuthToken = token
1648                 return cl, kc, c2, nil
1649         }
1650         var err error
1651         cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.dispatcherClient, cr.DispatcherKeepClient)
1652         if err != nil {
1653                 return nil, err
1654         }
1655         cr.Container.UUID = containerUUID
1656         w, err := cr.NewLogWriter("crunch-run")
1657         if err != nil {
1658                 return nil, err
1659         }
1660         cr.CrunchLog = NewThrottledLogger(w)
1661         cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
1662
1663         loadLogThrottleParams(dispatcherArvClient)
1664         go cr.updateLogs()
1665
1666         return cr, nil
1667 }
1668
1669 func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
1670         flags := flag.NewFlagSet(prog, flag.ContinueOnError)
1671         statInterval := flags.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1672         cgroupRoot := flags.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
1673         cgroupParent := flags.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
1674         cgroupParentSubsystem := flags.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
1675         caCertsPath := flags.String("ca-certs", "", "Path to TLS root certificates")
1676         detach := flags.Bool("detach", false, "Detach from parent process and run in the background")
1677         stdinEnv := flags.Bool("stdin-env", false, "Load environment variables from JSON message on stdin")
1678         sleep := flags.Duration("sleep", 0, "Delay before starting (testing use only)")
1679         kill := flags.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID")
1680         list := flags.Bool("list", false, "List UUIDs of existing crunch-run processes")
1681         enableMemoryLimit := flags.Bool("enable-memory-limit", true, "tell container runtime to limit container's memory usage")
1682         enableNetwork := flags.String("container-enable-networking", "default", "enable networking \"always\" (for all containers) or \"default\" (for containers that request it)")
1683         networkMode := flags.String("container-network-mode", "default", `Docker network mode for container (use any argument valid for docker --net)`)
1684         memprofile := flags.String("memprofile", "", "write memory profile to `file` after running container")
1685         runtimeEngine := flags.String("runtime-engine", "docker", "container runtime: docker or singularity")
1686         flags.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.")
1687
1688         ignoreDetachFlag := false
1689         if len(args) > 0 && args[0] == "-no-detach" {
1690                 // This process was invoked by a parent process, which
1691                 // has passed along its own arguments, including
1692                 // -detach, after the leading -no-detach flag.  Strip
1693                 // the leading -no-detach flag (it's not recognized by
1694                 // flags.Parse()) and ignore the -detach flag that
1695                 // comes later.
1696                 args = args[1:]
1697                 ignoreDetachFlag = true
1698         }
1699
1700         if err := flags.Parse(args); err == flag.ErrHelp {
1701                 return 0
1702         } else if err != nil {
1703                 log.Print(err)
1704                 return 1
1705         }
1706
1707         if *stdinEnv && !ignoreDetachFlag {
1708                 // Load env vars on stdin if asked (but not in a
1709                 // detached child process, in which case stdin is
1710                 // /dev/null).
1711                 err := loadEnv(os.Stdin)
1712                 if err != nil {
1713                         log.Print(err)
1714                         return 1
1715                 }
1716         }
1717
1718         containerUUID := flags.Arg(0)
1719
1720         switch {
1721         case *detach && !ignoreDetachFlag:
1722                 return Detach(containerUUID, prog, args, os.Stdout, os.Stderr)
1723         case *kill >= 0:
1724                 return KillProcess(containerUUID, syscall.Signal(*kill), os.Stdout, os.Stderr)
1725         case *list:
1726                 return ListProcesses(os.Stdout, os.Stderr)
1727         }
1728
1729         if containerUUID == "" {
1730                 log.Printf("usage: %s [options] UUID", prog)
1731                 return 1
1732         }
1733
1734         log.Printf("crunch-run %s started", cmd.Version.String())
1735         time.Sleep(*sleep)
1736
1737         if *caCertsPath != "" {
1738                 arvadosclient.CertFiles = []string{*caCertsPath}
1739         }
1740
1741         api, err := arvadosclient.MakeArvadosClient()
1742         if err != nil {
1743                 log.Printf("%s: %v", containerUUID, err)
1744                 return 1
1745         }
1746         api.Retries = 8
1747
1748         kc, kcerr := keepclient.MakeKeepClient(api)
1749         if kcerr != nil {
1750                 log.Printf("%s: %v", containerUUID, kcerr)
1751                 return 1
1752         }
1753         kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
1754         kc.Retries = 4
1755
1756         cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, containerUUID)
1757         if err != nil {
1758                 log.Print(err)
1759                 return 1
1760         }
1761
1762         switch *runtimeEngine {
1763         case "docker":
1764                 cr.executor, err = newDockerExecutor(containerUUID, cr.CrunchLog.Printf, cr.containerWatchdogInterval)
1765         case "singularity":
1766                 cr.executor, err = newSingularityExecutor(cr.CrunchLog.Printf)
1767         default:
1768                 cr.CrunchLog.Printf("%s: unsupported RuntimeEngine %q", containerUUID, *runtimeEngine)
1769                 cr.CrunchLog.Close()
1770                 return 1
1771         }
1772         if err != nil {
1773                 cr.CrunchLog.Printf("%s: %v", containerUUID, err)
1774                 cr.checkBrokenNode(err)
1775                 cr.CrunchLog.Close()
1776                 return 1
1777         }
1778         defer cr.executor.Close()
1779
1780         gwAuthSecret := os.Getenv("GatewayAuthSecret")
1781         os.Unsetenv("GatewayAuthSecret")
1782         if gwAuthSecret == "" {
1783                 // not safe to run a gateway service without an auth
1784                 // secret
1785                 cr.CrunchLog.Printf("Not starting a gateway server (GatewayAuthSecret was not provided by dispatcher)")
1786         } else if gwListen := os.Getenv("GatewayAddress"); gwListen == "" {
1787                 // dispatcher did not tell us which external IP
1788                 // address to advertise --> no gateway service
1789                 cr.CrunchLog.Printf("Not starting a gateway server (GatewayAddress was not provided by dispatcher)")
1790         } else if de, ok := cr.executor.(*dockerExecutor); ok {
1791                 cr.gateway = Gateway{
1792                         Address:            gwListen,
1793                         AuthSecret:         gwAuthSecret,
1794                         ContainerUUID:      containerUUID,
1795                         DockerContainerID:  &de.containerID,
1796                         Log:                cr.CrunchLog,
1797                         ContainerIPAddress: dockerContainerIPAddress(&de.containerID),
1798                 }
1799                 err = cr.gateway.Start()
1800                 if err != nil {
1801                         log.Printf("error starting gateway server: %s", err)
1802                         return 1
1803                 }
1804         }
1805
1806         parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerUUID+".")
1807         if tmperr != nil {
1808                 log.Printf("%s: %v", containerUUID, tmperr)
1809                 return 1
1810         }
1811
1812         cr.parentTemp = parentTemp
1813         cr.statInterval = *statInterval
1814         cr.cgroupRoot = *cgroupRoot
1815         cr.expectCgroupParent = *cgroupParent
1816         cr.enableMemoryLimit = *enableMemoryLimit
1817         cr.enableNetwork = *enableNetwork
1818         cr.networkMode = *networkMode
1819         if *cgroupParentSubsystem != "" {
1820                 p := findCgroup(*cgroupParentSubsystem)
1821                 cr.setCgroupParent = p
1822                 cr.expectCgroupParent = p
1823         }
1824
1825         runerr := cr.Run()
1826
1827         if *memprofile != "" {
1828                 f, err := os.Create(*memprofile)
1829                 if err != nil {
1830                         log.Printf("could not create memory profile: %s", err)
1831                 }
1832                 runtime.GC() // get up-to-date statistics
1833                 if err := pprof.WriteHeapProfile(f); err != nil {
1834                         log.Printf("could not write memory profile: %s", err)
1835                 }
1836                 closeerr := f.Close()
1837                 if closeerr != nil {
1838                         log.Printf("closing memprofile file: %s", err)
1839                 }
1840         }
1841
1842         if runerr != nil {
1843                 log.Printf("%s: %v", containerUUID, runerr)
1844                 return 1
1845         }
1846         return 0
1847 }
1848
1849 func loadEnv(rdr io.Reader) error {
1850         buf, err := ioutil.ReadAll(rdr)
1851         if err != nil {
1852                 return fmt.Errorf("read stdin: %s", err)
1853         }
1854         var env map[string]string
1855         err = json.Unmarshal(buf, &env)
1856         if err != nil {
1857                 return fmt.Errorf("decode stdin: %s", err)
1858         }
1859         for k, v := range env {
1860                 err = os.Setenv(k, v)
1861                 if err != nil {
1862                         return fmt.Errorf("setenv(%q): %s", k, err)
1863                 }
1864         }
1865         return nil
1866 }