Merge branch '12614-broken-docker' refs #12614
[arvados.git] / services / crunch-run / crunchrun.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package main
6
7 import (
8         "bytes"
9         "context"
10         "encoding/json"
11         "errors"
12         "flag"
13         "fmt"
14         "io"
15         "io/ioutil"
16         "log"
17         "os"
18         "os/exec"
19         "os/signal"
20         "path"
21         "path/filepath"
22         "regexp"
23         "runtime"
24         "runtime/pprof"
25         "sort"
26         "strings"
27         "sync"
28         "syscall"
29         "time"
30
31         "git.curoverse.com/arvados.git/lib/crunchstat"
32         "git.curoverse.com/arvados.git/sdk/go/arvados"
33         "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
34         "git.curoverse.com/arvados.git/sdk/go/keepclient"
35         "git.curoverse.com/arvados.git/sdk/go/manifest"
36
37         dockertypes "github.com/docker/docker/api/types"
38         dockercontainer "github.com/docker/docker/api/types/container"
39         dockernetwork "github.com/docker/docker/api/types/network"
40         dockerclient "github.com/docker/docker/client"
41 )
42
43 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
44 type IArvadosClient interface {
45         Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
46         Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
47         Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
48         Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
49         CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
50         Discovery(key string) (interface{}, error)
51 }
52
53 // ErrCancelled is the error returned when the container is cancelled.
54 var ErrCancelled = errors.New("Cancelled")
55
56 // IKeepClient is the minimal Keep API methods used by crunch-run.
57 type IKeepClient interface {
58         PutHB(hash string, buf []byte) (string, int, error)
59         ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
60         ClearBlockCache()
61 }
62
63 // NewLogWriter is a factory function to create a new log writer.
64 type NewLogWriter func(name string) io.WriteCloser
65
66 type RunArvMount func(args []string, tok string) (*exec.Cmd, error)
67
68 type MkTempDir func(string, string) (string, error)
69
70 // ThinDockerClient is the minimal Docker client interface used by crunch-run.
71 type ThinDockerClient interface {
72         ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error)
73         ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
74                 networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error)
75         ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error
76         ContainerStop(ctx context.Context, container string, timeout *time.Duration) error
77         ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error)
78         ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error)
79         ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error)
80         ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
81 }
82
83 // ThinDockerClientProxy is a proxy implementation of ThinDockerClient
84 // that executes the docker requests on dockerclient.Client
85 type ThinDockerClientProxy struct {
86         Docker *dockerclient.Client
87 }
88
89 // ContainerAttach invokes dockerclient.Client.ContainerAttach
90 func (proxy ThinDockerClientProxy) ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error) {
91         return proxy.Docker.ContainerAttach(ctx, container, options)
92 }
93
94 // ContainerCreate invokes dockerclient.Client.ContainerCreate
95 func (proxy ThinDockerClientProxy) ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
96         networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error) {
97         return proxy.Docker.ContainerCreate(ctx, config, hostConfig, networkingConfig, containerName)
98 }
99
100 // ContainerStart invokes dockerclient.Client.ContainerStart
101 func (proxy ThinDockerClientProxy) ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error {
102         return proxy.Docker.ContainerStart(ctx, container, options)
103 }
104
105 // ContainerStop invokes dockerclient.Client.ContainerStop
106 func (proxy ThinDockerClientProxy) ContainerStop(ctx context.Context, container string, timeout *time.Duration) error {
107         return proxy.Docker.ContainerStop(ctx, container, timeout)
108 }
109
110 // ContainerWait invokes dockerclient.Client.ContainerWait
111 func (proxy ThinDockerClientProxy) ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error) {
112         return proxy.Docker.ContainerWait(ctx, container, condition)
113 }
114
115 // ImageInspectWithRaw invokes dockerclient.Client.ImageInspectWithRaw
116 func (proxy ThinDockerClientProxy) ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error) {
117         return proxy.Docker.ImageInspectWithRaw(ctx, image)
118 }
119
120 // ImageLoad invokes dockerclient.Client.ImageLoad
121 func (proxy ThinDockerClientProxy) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error) {
122         return proxy.Docker.ImageLoad(ctx, input, quiet)
123 }
124
125 // ImageRemove invokes dockerclient.Client.ImageRemove
126 func (proxy ThinDockerClientProxy) ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) {
127         return proxy.Docker.ImageRemove(ctx, image, options)
128 }
129
130 // ContainerRunner is the main stateful struct used for a single execution of a
131 // container.
132 type ContainerRunner struct {
133         Docker    ThinDockerClient
134         ArvClient IArvadosClient
135         Kc        IKeepClient
136         arvados.Container
137         ContainerConfig dockercontainer.Config
138         dockercontainer.HostConfig
139         token       string
140         ContainerID string
141         ExitCode    *int
142         NewLogWriter
143         loggingDone   chan bool
144         CrunchLog     *ThrottledLogger
145         Stdout        io.WriteCloser
146         Stderr        io.WriteCloser
147         LogCollection *CollectionWriter
148         LogsPDH       *string
149         RunArvMount
150         MkTempDir
151         ArvMount       *exec.Cmd
152         ArvMountPoint  string
153         HostOutputDir  string
154         CleanupTempDir []string
155         Binds          []string
156         Volumes        map[string]struct{}
157         OutputPDH      *string
158         SigChan        chan os.Signal
159         ArvMountExit   chan error
160         finalState     string
161
162         statLogger   io.WriteCloser
163         statReporter *crunchstat.Reporter
164         statInterval time.Duration
165         cgroupRoot   string
166         // What we expect the container's cgroup parent to be.
167         expectCgroupParent string
168         // What we tell docker to use as the container's cgroup
169         // parent. Note: Ideally we would use the same field for both
170         // expectCgroupParent and setCgroupParent, and just make it
171         // default to "docker". However, when using docker < 1.10 with
172         // systemd, specifying a non-empty cgroup parent (even the
173         // default value "docker") hits a docker bug
174         // (https://github.com/docker/docker/issues/17126). Using two
175         // separate fields makes it possible to use the "expect cgroup
176         // parent to be X" feature even on sites where the "specify
177         // cgroup parent" feature breaks.
178         setCgroupParent string
179
180         cStateLock sync.Mutex
181         cStarted   bool // StartContainer() succeeded
182         cCancelled bool // StopContainer() invoked
183
184         enableNetwork string // one of "default" or "always"
185         networkMode   string // passed through to HostConfig.NetworkMode
186         arvMountLog   *ThrottledLogger
187 }
188
189 // setupSignals sets up signal handling to gracefully terminate the underlying
190 // Docker container and update state when receiving a TERM, INT or QUIT signal.
191 func (runner *ContainerRunner) setupSignals() {
192         runner.SigChan = make(chan os.Signal, 1)
193         signal.Notify(runner.SigChan, syscall.SIGTERM)
194         signal.Notify(runner.SigChan, syscall.SIGINT)
195         signal.Notify(runner.SigChan, syscall.SIGQUIT)
196
197         go func(sig chan os.Signal) {
198                 s := <-sig
199                 if s != nil {
200                         runner.CrunchLog.Printf("Caught signal %v", s)
201                 }
202                 runner.stop()
203         }(runner.SigChan)
204 }
205
206 // stop the underlying Docker container.
207 func (runner *ContainerRunner) stop() {
208         runner.cStateLock.Lock()
209         defer runner.cStateLock.Unlock()
210         if runner.cCancelled {
211                 return
212         }
213         runner.cCancelled = true
214         if runner.cStarted {
215                 timeout := time.Duration(10)
216                 err := runner.Docker.ContainerStop(context.TODO(), runner.ContainerID, &(timeout))
217                 if err != nil {
218                         runner.CrunchLog.Printf("StopContainer failed: %s", err)
219                 }
220                 // Suppress multiple calls to stop()
221                 runner.cStarted = false
222         }
223 }
224
225 func (runner *ContainerRunner) stopSignals() {
226         if runner.SigChan != nil {
227                 signal.Stop(runner.SigChan)
228                 close(runner.SigChan)
229         }
230 }
231
232 var errorBlacklist = []string{
233         "(?ms).*[Cc]annot connect to the Docker daemon.*",
234         "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
235 }
236 var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
237
238 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
239         for _, d := range errorBlacklist {
240                 if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
241                         runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
242                         if *brokenNodeHook == "" {
243                                 runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.")
244                         } else {
245                                 runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
246                                 // run killme script
247                                 c := exec.Command(*brokenNodeHook)
248                                 c.Stdout = runner.CrunchLog
249                                 c.Stderr = runner.CrunchLog
250                                 err := c.Run()
251                                 if err != nil {
252                                         runner.CrunchLog.Printf("Error running broken node hook: %v", err)
253                                 }
254                         }
255                         return true
256                 }
257         }
258         return false
259 }
260
261 // LoadImage determines the docker image id from the container record and
262 // checks if it is available in the local Docker image store.  If not, it loads
263 // the image from Keep.
264 func (runner *ContainerRunner) LoadImage() (err error) {
265
266         runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
267
268         var collection arvados.Collection
269         err = runner.ArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection)
270         if err != nil {
271                 return fmt.Errorf("While getting container image collection: %v", err)
272         }
273         manifest := manifest.Manifest{Text: collection.ManifestText}
274         var img, imageID string
275         for ms := range manifest.StreamIter() {
276                 img = ms.FileStreamSegments[0].Name
277                 if !strings.HasSuffix(img, ".tar") {
278                         return fmt.Errorf("First file in the container image collection does not end in .tar")
279                 }
280                 imageID = img[:len(img)-4]
281         }
282
283         runner.CrunchLog.Printf("Using Docker image id '%s'", imageID)
284
285         _, _, err = runner.Docker.ImageInspectWithRaw(context.TODO(), imageID)
286         if err != nil {
287                 runner.CrunchLog.Print("Loading Docker image from keep")
288
289                 var readCloser io.ReadCloser
290                 readCloser, err = runner.Kc.ManifestFileReader(manifest, img)
291                 if err != nil {
292                         return fmt.Errorf("While creating ManifestFileReader for container image: %v", err)
293                 }
294
295                 response, err := runner.Docker.ImageLoad(context.TODO(), readCloser, true)
296                 if err != nil {
297                         return fmt.Errorf("While loading container image into Docker: %v", err)
298                 }
299
300                 defer response.Body.Close()
301                 rbody, err := ioutil.ReadAll(response.Body)
302                 if err != nil {
303                         return fmt.Errorf("Reading response to image load: %v", err)
304                 }
305                 runner.CrunchLog.Printf("Docker response: %s", rbody)
306         } else {
307                 runner.CrunchLog.Print("Docker image is available")
308         }
309
310         runner.ContainerConfig.Image = imageID
311
312         runner.Kc.ClearBlockCache()
313
314         return nil
315 }
316
317 func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (c *exec.Cmd, err error) {
318         c = exec.Command("arv-mount", arvMountCmd...)
319
320         // Copy our environment, but override ARVADOS_API_TOKEN with
321         // the container auth token.
322         c.Env = nil
323         for _, s := range os.Environ() {
324                 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
325                         c.Env = append(c.Env, s)
326                 }
327         }
328         c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
329
330         runner.arvMountLog = NewThrottledLogger(runner.NewLogWriter("arv-mount"))
331         c.Stdout = runner.arvMountLog
332         c.Stderr = runner.arvMountLog
333
334         runner.CrunchLog.Printf("Running %v", c.Args)
335
336         err = c.Start()
337         if err != nil {
338                 return nil, err
339         }
340
341         statReadme := make(chan bool)
342         runner.ArvMountExit = make(chan error)
343
344         keepStatting := true
345         go func() {
346                 for keepStatting {
347                         time.Sleep(100 * time.Millisecond)
348                         _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
349                         if err == nil {
350                                 keepStatting = false
351                                 statReadme <- true
352                         }
353                 }
354                 close(statReadme)
355         }()
356
357         go func() {
358                 mnterr := c.Wait()
359                 if mnterr != nil {
360                         runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
361                 }
362                 runner.ArvMountExit <- mnterr
363                 close(runner.ArvMountExit)
364         }()
365
366         select {
367         case <-statReadme:
368                 break
369         case err := <-runner.ArvMountExit:
370                 runner.ArvMount = nil
371                 keepStatting = false
372                 return nil, err
373         }
374
375         return c, nil
376 }
377
378 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
379         if runner.ArvMountPoint == "" {
380                 runner.ArvMountPoint, err = runner.MkTempDir("", prefix)
381         }
382         return
383 }
384
385 func (runner *ContainerRunner) SetupMounts() (err error) {
386         err = runner.SetupArvMountPoint("keep")
387         if err != nil {
388                 return fmt.Errorf("While creating keep mount temp dir: %v", err)
389         }
390
391         pdhOnly := true
392         tmpcount := 0
393         arvMountCmd := []string{
394                 "--foreground",
395                 "--allow-other",
396                 "--read-write",
397                 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
398
399         if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
400                 arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
401         }
402
403         collectionPaths := []string{}
404         runner.Binds = nil
405         runner.Volumes = make(map[string]struct{})
406         needCertMount := true
407
408         var binds []string
409         for bind := range runner.Container.Mounts {
410                 binds = append(binds, bind)
411         }
412         sort.Strings(binds)
413
414         for _, bind := range binds {
415                 mnt := runner.Container.Mounts[bind]
416                 if bind == "stdout" || bind == "stderr" {
417                         // Is it a "file" mount kind?
418                         if mnt.Kind != "file" {
419                                 return fmt.Errorf("Unsupported mount kind '%s' for %s. Only 'file' is supported.", mnt.Kind, bind)
420                         }
421
422                         // Does path start with OutputPath?
423                         prefix := runner.Container.OutputPath
424                         if !strings.HasSuffix(prefix, "/") {
425                                 prefix += "/"
426                         }
427                         if !strings.HasPrefix(mnt.Path, prefix) {
428                                 return fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
429                         }
430                 }
431
432                 if bind == "stdin" {
433                         // Is it a "collection" mount kind?
434                         if mnt.Kind != "collection" && mnt.Kind != "json" {
435                                 return fmt.Errorf("Unsupported mount kind '%s' for stdin. Only 'collection' or 'json' are supported.", mnt.Kind)
436                         }
437                 }
438
439                 if bind == "/etc/arvados/ca-certificates.crt" {
440                         needCertMount = false
441                 }
442
443                 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
444                         if mnt.Kind != "collection" {
445                                 return fmt.Errorf("Only mount points of kind 'collection' are supported underneath the output_path: %v", bind)
446                         }
447                 }
448
449                 switch {
450                 case mnt.Kind == "collection" && bind != "stdin":
451                         var src string
452                         if mnt.UUID != "" && mnt.PortableDataHash != "" {
453                                 return fmt.Errorf("Cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
454                         }
455                         if mnt.UUID != "" {
456                                 if mnt.Writable {
457                                         return fmt.Errorf("Writing to existing collections currently not permitted.")
458                                 }
459                                 pdhOnly = false
460                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
461                         } else if mnt.PortableDataHash != "" {
462                                 if mnt.Writable {
463                                         return fmt.Errorf("Can never write to a collection specified by portable data hash")
464                                 }
465                                 idx := strings.Index(mnt.PortableDataHash, "/")
466                                 if idx > 0 {
467                                         mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
468                                         mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
469                                         runner.Container.Mounts[bind] = mnt
470                                 }
471                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
472                                 if mnt.Path != "" && mnt.Path != "." {
473                                         if strings.HasPrefix(mnt.Path, "./") {
474                                                 mnt.Path = mnt.Path[2:]
475                                         } else if strings.HasPrefix(mnt.Path, "/") {
476                                                 mnt.Path = mnt.Path[1:]
477                                         }
478                                         src += "/" + mnt.Path
479                                 }
480                         } else {
481                                 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
482                                 arvMountCmd = append(arvMountCmd, "--mount-tmp")
483                                 arvMountCmd = append(arvMountCmd, fmt.Sprintf("tmp%d", tmpcount))
484                                 tmpcount += 1
485                         }
486                         if mnt.Writable {
487                                 if bind == runner.Container.OutputPath {
488                                         runner.HostOutputDir = src
489                                 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
490                                         return fmt.Errorf("Writable mount points are not permitted underneath the output_path: %v", bind)
491                                 }
492                                 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
493                         } else {
494                                 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind))
495                         }
496                         collectionPaths = append(collectionPaths, src)
497
498                 case mnt.Kind == "tmp":
499                         var tmpdir string
500                         tmpdir, err = runner.MkTempDir("", "")
501                         if err != nil {
502                                 return fmt.Errorf("While creating mount temp dir: %v", err)
503                         }
504                         st, staterr := os.Stat(tmpdir)
505                         if staterr != nil {
506                                 return fmt.Errorf("While Stat on temp dir: %v", staterr)
507                         }
508                         err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
509                         if staterr != nil {
510                                 return fmt.Errorf("While Chmod temp dir: %v", err)
511                         }
512                         runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
513                         runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", tmpdir, bind))
514                         if bind == runner.Container.OutputPath {
515                                 runner.HostOutputDir = tmpdir
516                         }
517
518                 case mnt.Kind == "json":
519                         jsondata, err := json.Marshal(mnt.Content)
520                         if err != nil {
521                                 return fmt.Errorf("encoding json data: %v", err)
522                         }
523                         // Create a tempdir with a single file
524                         // (instead of just a tempfile): this way we
525                         // can ensure the file is world-readable
526                         // inside the container, without having to
527                         // make it world-readable on the docker host.
528                         tmpdir, err := runner.MkTempDir("", "")
529                         if err != nil {
530                                 return fmt.Errorf("creating temp dir: %v", err)
531                         }
532                         runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
533                         tmpfn := filepath.Join(tmpdir, "mountdata.json")
534                         err = ioutil.WriteFile(tmpfn, jsondata, 0644)
535                         if err != nil {
536                                 return fmt.Errorf("writing temp file: %v", err)
537                         }
538                         runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
539                 }
540         }
541
542         if runner.HostOutputDir == "" {
543                 return fmt.Errorf("Output path does not correspond to a writable mount point")
544         }
545
546         if wantAPI := runner.Container.RuntimeConstraints.API; needCertMount && wantAPI != nil && *wantAPI {
547                 for _, certfile := range arvadosclient.CertFiles {
548                         _, err := os.Stat(certfile)
549                         if err == nil {
550                                 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:/etc/arvados/ca-certificates.crt:ro", certfile))
551                                 break
552                         }
553                 }
554         }
555
556         if pdhOnly {
557                 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
558         } else {
559                 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
560         }
561         arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
562
563         token, err := runner.ContainerToken()
564         if err != nil {
565                 return fmt.Errorf("could not get container token: %s", err)
566         }
567
568         runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
569         if err != nil {
570                 return fmt.Errorf("While trying to start arv-mount: %v", err)
571         }
572
573         for _, p := range collectionPaths {
574                 _, err = os.Stat(p)
575                 if err != nil {
576                         return fmt.Errorf("While checking that input files exist: %v", err)
577                 }
578         }
579
580         return nil
581 }
582
583 func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
584         // Handle docker log protocol
585         // https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container
586
587         header := make([]byte, 8)
588         for {
589                 _, readerr := io.ReadAtLeast(containerReader, header, 8)
590
591                 if readerr == nil {
592                         readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
593                         if header[0] == 1 {
594                                 // stdout
595                                 _, readerr = io.CopyN(runner.Stdout, containerReader, readsize)
596                         } else {
597                                 // stderr
598                                 _, readerr = io.CopyN(runner.Stderr, containerReader, readsize)
599                         }
600                 }
601
602                 if readerr != nil {
603                         if readerr != io.EOF {
604                                 runner.CrunchLog.Printf("While reading docker logs: %v", readerr)
605                         }
606
607                         closeerr := runner.Stdout.Close()
608                         if closeerr != nil {
609                                 runner.CrunchLog.Printf("While closing stdout logs: %v", closeerr)
610                         }
611
612                         closeerr = runner.Stderr.Close()
613                         if closeerr != nil {
614                                 runner.CrunchLog.Printf("While closing stderr logs: %v", closeerr)
615                         }
616
617                         if runner.statReporter != nil {
618                                 runner.statReporter.Stop()
619                                 closeerr = runner.statLogger.Close()
620                                 if closeerr != nil {
621                                         runner.CrunchLog.Printf("While closing crunchstat logs: %v", closeerr)
622                                 }
623                         }
624
625                         runner.loggingDone <- true
626                         close(runner.loggingDone)
627                         return
628                 }
629         }
630 }
631
632 func (runner *ContainerRunner) StartCrunchstat() {
633         runner.statLogger = NewThrottledLogger(runner.NewLogWriter("crunchstat"))
634         runner.statReporter = &crunchstat.Reporter{
635                 CID:          runner.ContainerID,
636                 Logger:       log.New(runner.statLogger, "", 0),
637                 CgroupParent: runner.expectCgroupParent,
638                 CgroupRoot:   runner.cgroupRoot,
639                 PollPeriod:   runner.statInterval,
640         }
641         runner.statReporter.Start()
642 }
643
644 type infoCommand struct {
645         label string
646         cmd   []string
647 }
648
649 // Gather node information and store it on the log for debugging
650 // purposes.
651 func (runner *ContainerRunner) LogNodeInfo() (err error) {
652         w := runner.NewLogWriter("node-info")
653         logger := log.New(w, "node-info", 0)
654
655         commands := []infoCommand{
656                 {
657                         label: "Host Information",
658                         cmd:   []string{"uname", "-a"},
659                 },
660                 {
661                         label: "CPU Information",
662                         cmd:   []string{"cat", "/proc/cpuinfo"},
663                 },
664                 {
665                         label: "Memory Information",
666                         cmd:   []string{"cat", "/proc/meminfo"},
667                 },
668                 {
669                         label: "Disk Space",
670                         cmd:   []string{"df", "-m", "/", os.TempDir()},
671                 },
672                 {
673                         label: "Disk INodes",
674                         cmd:   []string{"df", "-i", "/", os.TempDir()},
675                 },
676         }
677
678         // Run commands with informational output to be logged.
679         var out []byte
680         for _, command := range commands {
681                 out, err = exec.Command(command.cmd[0], command.cmd[1:]...).CombinedOutput()
682                 if err != nil {
683                         return fmt.Errorf("While running command %q: %v",
684                                 command.cmd, err)
685                 }
686                 logger.Println(command.label)
687                 for _, line := range strings.Split(string(out), "\n") {
688                         logger.Println(" ", line)
689                 }
690         }
691
692         err = w.Close()
693         if err != nil {
694                 return fmt.Errorf("While closing node-info logs: %v", err)
695         }
696         return nil
697 }
698
699 // Get and save the raw JSON container record from the API server
700 func (runner *ContainerRunner) LogContainerRecord() (err error) {
701         w := &ArvLogWriter{
702                 ArvClient:     runner.ArvClient,
703                 UUID:          runner.Container.UUID,
704                 loggingStream: "container",
705                 writeCloser:   runner.LogCollection.Open("container.json"),
706         }
707
708         // Get Container record JSON from the API Server
709         reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
710         if err != nil {
711                 return fmt.Errorf("While retrieving container record from the API server: %v", err)
712         }
713         defer reader.Close()
714
715         dec := json.NewDecoder(reader)
716         dec.UseNumber()
717         var cr map[string]interface{}
718         if err = dec.Decode(&cr); err != nil {
719                 return fmt.Errorf("While decoding the container record JSON response: %v", err)
720         }
721         // Re-encode it using indentation to improve readability
722         enc := json.NewEncoder(w)
723         enc.SetIndent("", "    ")
724         if err = enc.Encode(cr); err != nil {
725                 return fmt.Errorf("While logging the JSON container record: %v", err)
726         }
727         err = w.Close()
728         if err != nil {
729                 return fmt.Errorf("While closing container.json log: %v", err)
730         }
731         return nil
732 }
733
734 // AttachStreams connects the docker container stdin, stdout and stderr logs
735 // to the Arvados logger which logs to Keep and the API server logs table.
736 func (runner *ContainerRunner) AttachStreams() (err error) {
737
738         runner.CrunchLog.Print("Attaching container streams")
739
740         // If stdin mount is provided, attach it to the docker container
741         var stdinRdr arvados.File
742         var stdinJson []byte
743         if stdinMnt, ok := runner.Container.Mounts["stdin"]; ok {
744                 if stdinMnt.Kind == "collection" {
745                         var stdinColl arvados.Collection
746                         collId := stdinMnt.UUID
747                         if collId == "" {
748                                 collId = stdinMnt.PortableDataHash
749                         }
750                         err = runner.ArvClient.Get("collections", collId, nil, &stdinColl)
751                         if err != nil {
752                                 return fmt.Errorf("While getting stding collection: %v", err)
753                         }
754
755                         stdinRdr, err = runner.Kc.ManifestFileReader(manifest.Manifest{Text: stdinColl.ManifestText}, stdinMnt.Path)
756                         if os.IsNotExist(err) {
757                                 return fmt.Errorf("stdin collection path not found: %v", stdinMnt.Path)
758                         } else if err != nil {
759                                 return fmt.Errorf("While getting stdin collection path %v: %v", stdinMnt.Path, err)
760                         }
761                 } else if stdinMnt.Kind == "json" {
762                         stdinJson, err = json.Marshal(stdinMnt.Content)
763                         if err != nil {
764                                 return fmt.Errorf("While encoding stdin json data: %v", err)
765                         }
766                 }
767         }
768
769         stdinUsed := stdinRdr != nil || len(stdinJson) != 0
770         response, err := runner.Docker.ContainerAttach(context.TODO(), runner.ContainerID,
771                 dockertypes.ContainerAttachOptions{Stream: true, Stdin: stdinUsed, Stdout: true, Stderr: true})
772         if err != nil {
773                 return fmt.Errorf("While attaching container stdout/stderr streams: %v", err)
774         }
775
776         runner.loggingDone = make(chan bool)
777
778         if stdoutMnt, ok := runner.Container.Mounts["stdout"]; ok {
779                 stdoutFile, err := runner.getStdoutFile(stdoutMnt.Path)
780                 if err != nil {
781                         return err
782                 }
783                 runner.Stdout = stdoutFile
784         } else {
785                 runner.Stdout = NewThrottledLogger(runner.NewLogWriter("stdout"))
786         }
787
788         if stderrMnt, ok := runner.Container.Mounts["stderr"]; ok {
789                 stderrFile, err := runner.getStdoutFile(stderrMnt.Path)
790                 if err != nil {
791                         return err
792                 }
793                 runner.Stderr = stderrFile
794         } else {
795                 runner.Stderr = NewThrottledLogger(runner.NewLogWriter("stderr"))
796         }
797
798         if stdinRdr != nil {
799                 go func() {
800                         _, err := io.Copy(response.Conn, stdinRdr)
801                         if err != nil {
802                                 runner.CrunchLog.Print("While writing stdin collection to docker container %q", err)
803                                 runner.stop()
804                         }
805                         stdinRdr.Close()
806                         response.CloseWrite()
807                 }()
808         } else if len(stdinJson) != 0 {
809                 go func() {
810                         _, err := io.Copy(response.Conn, bytes.NewReader(stdinJson))
811                         if err != nil {
812                                 runner.CrunchLog.Print("While writing stdin json to docker container %q", err)
813                                 runner.stop()
814                         }
815                         response.CloseWrite()
816                 }()
817         }
818
819         go runner.ProcessDockerAttach(response.Reader)
820
821         return nil
822 }
823
824 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
825         stdoutPath := mntPath[len(runner.Container.OutputPath):]
826         index := strings.LastIndex(stdoutPath, "/")
827         if index > 0 {
828                 subdirs := stdoutPath[:index]
829                 if subdirs != "" {
830                         st, err := os.Stat(runner.HostOutputDir)
831                         if err != nil {
832                                 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
833                         }
834                         stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
835                         err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
836                         if err != nil {
837                                 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
838                         }
839                 }
840         }
841         stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
842         if err != nil {
843                 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
844         }
845
846         return stdoutFile, nil
847 }
848
849 // CreateContainer creates the docker container.
850 func (runner *ContainerRunner) CreateContainer() error {
851         runner.CrunchLog.Print("Creating Docker container")
852
853         runner.ContainerConfig.Cmd = runner.Container.Command
854         if runner.Container.Cwd != "." {
855                 runner.ContainerConfig.WorkingDir = runner.Container.Cwd
856         }
857
858         for k, v := range runner.Container.Environment {
859                 runner.ContainerConfig.Env = append(runner.ContainerConfig.Env, k+"="+v)
860         }
861
862         runner.ContainerConfig.Volumes = runner.Volumes
863
864         runner.HostConfig = dockercontainer.HostConfig{
865                 Binds: runner.Binds,
866                 LogConfig: dockercontainer.LogConfig{
867                         Type: "none",
868                 },
869                 Resources: dockercontainer.Resources{
870                         CgroupParent: runner.setCgroupParent,
871                 },
872         }
873
874         if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
875                 tok, err := runner.ContainerToken()
876                 if err != nil {
877                         return err
878                 }
879                 runner.ContainerConfig.Env = append(runner.ContainerConfig.Env,
880                         "ARVADOS_API_TOKEN="+tok,
881                         "ARVADOS_API_HOST="+os.Getenv("ARVADOS_API_HOST"),
882                         "ARVADOS_API_HOST_INSECURE="+os.Getenv("ARVADOS_API_HOST_INSECURE"),
883                 )
884                 runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
885         } else {
886                 if runner.enableNetwork == "always" {
887                         runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
888                 } else {
889                         runner.HostConfig.NetworkMode = dockercontainer.NetworkMode("none")
890                 }
891         }
892
893         _, stdinUsed := runner.Container.Mounts["stdin"]
894         runner.ContainerConfig.OpenStdin = stdinUsed
895         runner.ContainerConfig.StdinOnce = stdinUsed
896         runner.ContainerConfig.AttachStdin = stdinUsed
897         runner.ContainerConfig.AttachStdout = true
898         runner.ContainerConfig.AttachStderr = true
899
900         createdBody, err := runner.Docker.ContainerCreate(context.TODO(), &runner.ContainerConfig, &runner.HostConfig, nil, runner.Container.UUID)
901         if err != nil {
902                 return fmt.Errorf("While creating container: %v", err)
903         }
904
905         runner.ContainerID = createdBody.ID
906
907         return runner.AttachStreams()
908 }
909
910 // StartContainer starts the docker container created by CreateContainer.
911 func (runner *ContainerRunner) StartContainer() error {
912         runner.CrunchLog.Printf("Starting Docker container id '%s'", runner.ContainerID)
913         runner.cStateLock.Lock()
914         defer runner.cStateLock.Unlock()
915         if runner.cCancelled {
916                 return ErrCancelled
917         }
918         err := runner.Docker.ContainerStart(context.TODO(), runner.ContainerID,
919                 dockertypes.ContainerStartOptions{})
920         if err != nil {
921                 var advice string
922                 if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
923                         advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
924                 }
925                 return fmt.Errorf("could not start container: %v%s", err, advice)
926         }
927         runner.cStarted = true
928         return nil
929 }
930
931 // WaitFinish waits for the container to terminate, capture the exit code, and
932 // close the stdout/stderr logging.
933 func (runner *ContainerRunner) WaitFinish() (err error) {
934         runner.CrunchLog.Print("Waiting for container to finish")
935
936         waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, "not-running")
937
938         go func() {
939                 <-runner.ArvMountExit
940                 if runner.cStarted {
941                         runner.CrunchLog.Printf("arv-mount exited while container is still running.  Stopping container.")
942                         runner.stop()
943                 }
944         }()
945
946         var waitBody dockercontainer.ContainerWaitOKBody
947         select {
948         case waitBody = <-waitOk:
949         case err = <-waitErr:
950         }
951
952         // Container isn't running any more
953         runner.cStarted = false
954
955         if err != nil {
956                 return fmt.Errorf("container wait: %v", err)
957         }
958
959         runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
960         code := int(waitBody.StatusCode)
961         runner.ExitCode = &code
962
963         // wait for stdout/stderr to complete
964         <-runner.loggingDone
965
966         return nil
967 }
968
969 var ErrNotInOutputDir = fmt.Errorf("Must point to path within the output directory")
970
971 func (runner *ContainerRunner) derefOutputSymlink(path string, startinfo os.FileInfo) (tgt string, readlinktgt string, info os.FileInfo, err error) {
972         // Follow symlinks if necessary
973         info = startinfo
974         tgt = path
975         readlinktgt = ""
976         nextlink := path
977         for followed := 0; info.Mode()&os.ModeSymlink != 0; followed++ {
978                 if followed >= limitFollowSymlinks {
979                         // Got stuck in a loop or just a pathological number of links, give up.
980                         err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
981                         return
982                 }
983
984                 readlinktgt, err = os.Readlink(nextlink)
985                 if err != nil {
986                         return
987                 }
988
989                 tgt = readlinktgt
990                 if !strings.HasPrefix(tgt, "/") {
991                         // Relative symlink, resolve it to host path
992                         tgt = filepath.Join(filepath.Dir(path), tgt)
993                 }
994                 if strings.HasPrefix(tgt, runner.Container.OutputPath+"/") && !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
995                         // Absolute symlink to container output path, adjust it to host output path.
996                         tgt = filepath.Join(runner.HostOutputDir, tgt[len(runner.Container.OutputPath):])
997                 }
998                 if !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
999                         // After dereferencing, symlink target must either be
1000                         // within output directory, or must point to a
1001                         // collection mount.
1002                         err = ErrNotInOutputDir
1003                         return
1004                 }
1005
1006                 info, err = os.Lstat(tgt)
1007                 if err != nil {
1008                         // tgt
1009                         err = fmt.Errorf("Symlink in output %q points to invalid location %q: %v",
1010                                 path[len(runner.HostOutputDir):], readlinktgt, err)
1011                         return
1012                 }
1013
1014                 nextlink = tgt
1015         }
1016
1017         return
1018 }
1019
1020 var limitFollowSymlinks = 10
1021
1022 // UploadFile uploads files within the output directory, with special handling
1023 // for symlinks. If the symlink leads to a keep mount, copy the manifest text
1024 // from the keep mount into the output manifestText.  Ensure that whether
1025 // symlinks are relative or absolute, every symlink target (even targets that
1026 // are symlinks themselves) must point to a path in either the output directory
1027 // or a collection mount.
1028 //
1029 // Assumes initial value of "path" is absolute, and located within runner.HostOutputDir.
1030 func (runner *ContainerRunner) UploadOutputFile(
1031         path string,
1032         info os.FileInfo,
1033         infoerr error,
1034         binds []string,
1035         walkUpload *WalkUpload,
1036         relocateFrom string,
1037         relocateTo string,
1038         followed int) (manifestText string, err error) {
1039
1040         if info.Mode().IsDir() {
1041                 return
1042         }
1043
1044         if infoerr != nil {
1045                 return "", infoerr
1046         }
1047
1048         if followed >= limitFollowSymlinks {
1049                 // Got stuck in a loop or just a pathological number of
1050                 // directory links, give up.
1051                 err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
1052                 return
1053         }
1054
1055         // When following symlinks, the source path may need to be logically
1056         // relocated to some other path within the output collection.  Remove
1057         // the relocateFrom prefix and replace it with relocateTo.
1058         relocated := relocateTo + path[len(relocateFrom):]
1059
1060         tgt, readlinktgt, info, derefErr := runner.derefOutputSymlink(path, info)
1061         if derefErr != nil && derefErr != ErrNotInOutputDir {
1062                 return "", derefErr
1063         }
1064
1065         // go through mounts and try reverse map to collection reference
1066         for _, bind := range binds {
1067                 mnt := runner.Container.Mounts[bind]
1068                 if tgt == bind || strings.HasPrefix(tgt, bind+"/") {
1069                         // get path relative to bind
1070                         targetSuffix := tgt[len(bind):]
1071
1072                         // Copy mount and adjust the path to add path relative to the bind
1073                         adjustedMount := mnt
1074                         adjustedMount.Path = filepath.Join(adjustedMount.Path, targetSuffix)
1075
1076                         // Terminates in this keep mount, so add the
1077                         // manifest text at appropriate location.
1078                         outputSuffix := path[len(runner.HostOutputDir):]
1079                         manifestText, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix)
1080                         return
1081                 }
1082         }
1083
1084         // If target is not a collection mount, it must be located within the
1085         // output directory, otherwise it is an error.
1086         if derefErr == ErrNotInOutputDir {
1087                 err = fmt.Errorf("Symlink in output %q points to invalid location %q, must point to path within the output directory.",
1088                         path[len(runner.HostOutputDir):], readlinktgt)
1089                 return
1090         }
1091
1092         if info.Mode().IsRegular() {
1093                 return "", walkUpload.UploadFile(relocated, tgt)
1094         }
1095
1096         if info.Mode().IsDir() {
1097                 // Symlink leads to directory.  Walk() doesn't follow
1098                 // directory symlinks, so we walk the target directory
1099                 // instead.  Within the walk, file paths are relocated
1100                 // so they appear under the original symlink path.
1101                 err = filepath.Walk(tgt, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
1102                         var m string
1103                         m, walkerr = runner.UploadOutputFile(walkpath, walkinfo, walkerr,
1104                                 binds, walkUpload, tgt, relocated, followed+1)
1105                         if walkerr == nil {
1106                                 manifestText = manifestText + m
1107                         }
1108                         return walkerr
1109                 })
1110                 return
1111         }
1112
1113         return
1114 }
1115
1116 // HandleOutput sets the output, unmounts the FUSE mount, and deletes temporary directories
1117 func (runner *ContainerRunner) CaptureOutput() error {
1118         if runner.finalState != "Complete" {
1119                 return nil
1120         }
1121
1122         if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
1123                 // Output may have been set directly by the container, so
1124                 // refresh the container record to check.
1125                 err := runner.ArvClient.Get("containers", runner.Container.UUID,
1126                         nil, &runner.Container)
1127                 if err != nil {
1128                         return err
1129                 }
1130                 if runner.Container.Output != "" {
1131                         // Container output is already set.
1132                         runner.OutputPDH = &runner.Container.Output
1133                         return nil
1134                 }
1135         }
1136
1137         if runner.HostOutputDir == "" {
1138                 return nil
1139         }
1140
1141         _, err := os.Stat(runner.HostOutputDir)
1142         if err != nil {
1143                 return fmt.Errorf("While checking host output path: %v", err)
1144         }
1145
1146         // Pre-populate output from the configured mount points
1147         var binds []string
1148         for bind, mnt := range runner.Container.Mounts {
1149                 if mnt.Kind == "collection" {
1150                         binds = append(binds, bind)
1151                 }
1152         }
1153         sort.Strings(binds)
1154
1155         var manifestText string
1156
1157         collectionMetafile := fmt.Sprintf("%s/.arvados#collection", runner.HostOutputDir)
1158         _, err = os.Stat(collectionMetafile)
1159         if err != nil {
1160                 // Regular directory
1161
1162                 cw := CollectionWriter{0, runner.Kc, nil, nil, sync.Mutex{}}
1163                 walkUpload := cw.BeginUpload(runner.HostOutputDir, runner.CrunchLog.Logger)
1164
1165                 var m string
1166                 err = filepath.Walk(runner.HostOutputDir, func(path string, info os.FileInfo, err error) error {
1167                         m, err = runner.UploadOutputFile(path, info, err, binds, walkUpload, "", "", 0)
1168                         if err == nil {
1169                                 manifestText = manifestText + m
1170                         }
1171                         return err
1172                 })
1173
1174                 cw.EndUpload(walkUpload)
1175
1176                 if err != nil {
1177                         return fmt.Errorf("While uploading output files: %v", err)
1178                 }
1179
1180                 m, err = cw.ManifestText()
1181                 manifestText = manifestText + m
1182                 if err != nil {
1183                         return fmt.Errorf("While uploading output files: %v", err)
1184                 }
1185         } else {
1186                 // FUSE mount directory
1187                 file, openerr := os.Open(collectionMetafile)
1188                 if openerr != nil {
1189                         return fmt.Errorf("While opening FUSE metafile: %v", err)
1190                 }
1191                 defer file.Close()
1192
1193                 var rec arvados.Collection
1194                 err = json.NewDecoder(file).Decode(&rec)
1195                 if err != nil {
1196                         return fmt.Errorf("While reading FUSE metafile: %v", err)
1197                 }
1198                 manifestText = rec.ManifestText
1199         }
1200
1201         for _, bind := range binds {
1202                 mnt := runner.Container.Mounts[bind]
1203
1204                 bindSuffix := strings.TrimPrefix(bind, runner.Container.OutputPath)
1205
1206                 if bindSuffix == bind || len(bindSuffix) <= 0 {
1207                         // either does not start with OutputPath or is OutputPath itself
1208                         continue
1209                 }
1210
1211                 if mnt.ExcludeFromOutput == true {
1212                         continue
1213                 }
1214
1215                 // append to manifest_text
1216                 m, err := runner.getCollectionManifestForPath(mnt, bindSuffix)
1217                 if err != nil {
1218                         return err
1219                 }
1220
1221                 manifestText = manifestText + m
1222         }
1223
1224         // Save output
1225         var response arvados.Collection
1226         manifest := manifest.Manifest{Text: manifestText}
1227         manifestText = manifest.Extract(".", ".").Text
1228         err = runner.ArvClient.Create("collections",
1229                 arvadosclient.Dict{
1230                         "ensure_unique_name": true,
1231                         "collection": arvadosclient.Dict{
1232                                 "is_trashed":    true,
1233                                 "name":          "output for " + runner.Container.UUID,
1234                                 "manifest_text": manifestText}},
1235                 &response)
1236         if err != nil {
1237                 return fmt.Errorf("While creating output collection: %v", err)
1238         }
1239         runner.OutputPDH = &response.PortableDataHash
1240         return nil
1241 }
1242
1243 var outputCollections = make(map[string]arvados.Collection)
1244
1245 // Fetch the collection for the mnt.PortableDataHash
1246 // Return the manifest_text fragment corresponding to the specified mnt.Path
1247 //  after making any required updates.
1248 //  Ex:
1249 //    If mnt.Path is not specified,
1250 //      return the entire manifest_text after replacing any "." with bindSuffix
1251 //    If mnt.Path corresponds to one stream,
1252 //      return the manifest_text for that stream after replacing that stream name with bindSuffix
1253 //    Otherwise, check if a filename in any one stream is being sought. Return the manifest_text
1254 //      for that stream after replacing stream name with bindSuffix minus the last word
1255 //      and the file name with last word of the bindSuffix
1256 //  Allowed path examples:
1257 //    "path":"/"
1258 //    "path":"/subdir1"
1259 //    "path":"/subdir1/subdir2"
1260 //    "path":"/subdir/filename" etc
1261 func (runner *ContainerRunner) getCollectionManifestForPath(mnt arvados.Mount, bindSuffix string) (string, error) {
1262         collection := outputCollections[mnt.PortableDataHash]
1263         if collection.PortableDataHash == "" {
1264                 err := runner.ArvClient.Get("collections", mnt.PortableDataHash, nil, &collection)
1265                 if err != nil {
1266                         return "", fmt.Errorf("While getting collection for %v: %v", mnt.PortableDataHash, err)
1267                 }
1268                 outputCollections[mnt.PortableDataHash] = collection
1269         }
1270
1271         if collection.ManifestText == "" {
1272                 runner.CrunchLog.Printf("No manifest text for collection %v", collection.PortableDataHash)
1273                 return "", nil
1274         }
1275
1276         mft := manifest.Manifest{Text: collection.ManifestText}
1277         extracted := mft.Extract(mnt.Path, bindSuffix)
1278         if extracted.Err != nil {
1279                 return "", fmt.Errorf("Error parsing manifest for %v: %v", mnt.PortableDataHash, extracted.Err.Error())
1280         }
1281         return extracted.Text, nil
1282 }
1283
1284 func (runner *ContainerRunner) CleanupDirs() {
1285         if runner.ArvMount != nil {
1286                 var delay int64 = 8
1287                 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1288                 umount.Stdout = runner.CrunchLog
1289                 umount.Stderr = runner.CrunchLog
1290                 runner.CrunchLog.Printf("Running %v", umount.Args)
1291                 umnterr := umount.Start()
1292
1293                 if umnterr != nil {
1294                         runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1295                 } else {
1296                         // If arv-mount --unmount gets stuck for any reason, we
1297                         // don't want to wait for it forever.  Do Wait() in a goroutine
1298                         // so it doesn't block crunch-run.
1299                         umountExit := make(chan error)
1300                         go func() {
1301                                 mnterr := umount.Wait()
1302                                 if mnterr != nil {
1303                                         runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1304                                 }
1305                                 umountExit <- mnterr
1306                         }()
1307
1308                         for again := true; again; {
1309                                 again = false
1310                                 select {
1311                                 case <-umountExit:
1312                                         umount = nil
1313                                         again = true
1314                                 case <-runner.ArvMountExit:
1315                                         break
1316                                 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1317                                         runner.CrunchLog.Printf("Timed out waiting for unmount")
1318                                         if umount != nil {
1319                                                 umount.Process.Kill()
1320                                         }
1321                                         runner.ArvMount.Process.Kill()
1322                                 }
1323                         }
1324                 }
1325         }
1326
1327         if runner.ArvMountPoint != "" {
1328                 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1329                         runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1330                 }
1331         }
1332
1333         for _, tmpdir := range runner.CleanupTempDir {
1334                 if rmerr := os.RemoveAll(tmpdir); rmerr != nil {
1335                         runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", tmpdir, rmerr)
1336                 }
1337         }
1338 }
1339
1340 // CommitLogs posts the collection containing the final container logs.
1341 func (runner *ContainerRunner) CommitLogs() error {
1342         runner.CrunchLog.Print(runner.finalState)
1343
1344         if runner.arvMountLog != nil {
1345                 runner.arvMountLog.Close()
1346         }
1347         runner.CrunchLog.Close()
1348
1349         // Closing CrunchLog above allows them to be committed to Keep at this
1350         // point, but re-open crunch log with ArvClient in case there are any
1351         // other further errors (such as failing to write the log to Keep!)
1352         // while shutting down
1353         runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{ArvClient: runner.ArvClient,
1354                 UUID: runner.Container.UUID, loggingStream: "crunch-run", writeCloser: nil})
1355         runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
1356
1357         if runner.LogsPDH != nil {
1358                 // If we have already assigned something to LogsPDH,
1359                 // we must be closing the re-opened log, which won't
1360                 // end up getting attached to the container record and
1361                 // therefore doesn't need to be saved as a collection
1362                 // -- it exists only to send logs to other channels.
1363                 return nil
1364         }
1365
1366         mt, err := runner.LogCollection.ManifestText()
1367         if err != nil {
1368                 return fmt.Errorf("While creating log manifest: %v", err)
1369         }
1370
1371         var response arvados.Collection
1372         err = runner.ArvClient.Create("collections",
1373                 arvadosclient.Dict{
1374                         "ensure_unique_name": true,
1375                         "collection": arvadosclient.Dict{
1376                                 "is_trashed":    true,
1377                                 "name":          "logs for " + runner.Container.UUID,
1378                                 "manifest_text": mt}},
1379                 &response)
1380         if err != nil {
1381                 return fmt.Errorf("While creating log collection: %v", err)
1382         }
1383         runner.LogsPDH = &response.PortableDataHash
1384         return nil
1385 }
1386
1387 // UpdateContainerRunning updates the container state to "Running"
1388 func (runner *ContainerRunner) UpdateContainerRunning() error {
1389         runner.cStateLock.Lock()
1390         defer runner.cStateLock.Unlock()
1391         if runner.cCancelled {
1392                 return ErrCancelled
1393         }
1394         return runner.ArvClient.Update("containers", runner.Container.UUID,
1395                 arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running"}}, nil)
1396 }
1397
1398 // ContainerToken returns the api_token the container (and any
1399 // arv-mount processes) are allowed to use.
1400 func (runner *ContainerRunner) ContainerToken() (string, error) {
1401         if runner.token != "" {
1402                 return runner.token, nil
1403         }
1404
1405         var auth arvados.APIClientAuthorization
1406         err := runner.ArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1407         if err != nil {
1408                 return "", err
1409         }
1410         runner.token = auth.APIToken
1411         return runner.token, nil
1412 }
1413
1414 // UpdateContainerComplete updates the container record state on API
1415 // server to "Complete" or "Cancelled"
1416 func (runner *ContainerRunner) UpdateContainerFinal() error {
1417         update := arvadosclient.Dict{}
1418         update["state"] = runner.finalState
1419         if runner.LogsPDH != nil {
1420                 update["log"] = *runner.LogsPDH
1421         }
1422         if runner.finalState == "Complete" {
1423                 if runner.ExitCode != nil {
1424                         update["exit_code"] = *runner.ExitCode
1425                 }
1426                 if runner.OutputPDH != nil {
1427                         update["output"] = *runner.OutputPDH
1428                 }
1429         }
1430         return runner.ArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
1431 }
1432
1433 // IsCancelled returns the value of Cancelled, with goroutine safety.
1434 func (runner *ContainerRunner) IsCancelled() bool {
1435         runner.cStateLock.Lock()
1436         defer runner.cStateLock.Unlock()
1437         return runner.cCancelled
1438 }
1439
1440 // NewArvLogWriter creates an ArvLogWriter
1441 func (runner *ContainerRunner) NewArvLogWriter(name string) io.WriteCloser {
1442         return &ArvLogWriter{
1443                 ArvClient:     runner.ArvClient,
1444                 UUID:          runner.Container.UUID,
1445                 loggingStream: name,
1446                 writeCloser:   runner.LogCollection.Open(name + ".txt")}
1447 }
1448
1449 // Run the full container lifecycle.
1450 func (runner *ContainerRunner) Run() (err error) {
1451         runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
1452
1453         hostname, hosterr := os.Hostname()
1454         if hosterr != nil {
1455                 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1456         } else {
1457                 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1458         }
1459
1460         runner.finalState = "Queued"
1461
1462         defer func() {
1463                 runner.stopSignals()
1464                 runner.CleanupDirs()
1465
1466                 runner.CrunchLog.Printf("crunch-run finished")
1467                 runner.CrunchLog.Close()
1468         }()
1469
1470         defer func() {
1471                 // checkErr prints e (unless it's nil) and sets err to
1472                 // e (unless err is already non-nil). Thus, if err
1473                 // hasn't already been assigned when Run() returns,
1474                 // this cleanup func will cause Run() to return the
1475                 // first non-nil error that is passed to checkErr().
1476                 checkErr := func(e error) {
1477                         if e == nil {
1478                                 return
1479                         }
1480                         runner.CrunchLog.Print(e)
1481                         if err == nil {
1482                                 err = e
1483                         }
1484                         if runner.finalState == "Complete" {
1485                                 // There was an error in the finalization.
1486                                 runner.finalState = "Cancelled"
1487                         }
1488                 }
1489
1490                 // Log the error encountered in Run(), if any
1491                 checkErr(err)
1492
1493                 if runner.finalState == "Queued" {
1494                         runner.UpdateContainerFinal()
1495                         return
1496                 }
1497
1498                 if runner.IsCancelled() {
1499                         runner.finalState = "Cancelled"
1500                         // but don't return yet -- we still want to
1501                         // capture partial output and write logs
1502                 }
1503
1504                 checkErr(runner.CaptureOutput())
1505                 checkErr(runner.CommitLogs())
1506                 checkErr(runner.UpdateContainerFinal())
1507         }()
1508
1509         err = runner.fetchContainerRecord()
1510         if err != nil {
1511                 return
1512         }
1513
1514         // setup signal handling
1515         runner.setupSignals()
1516
1517         // check for and/or load image
1518         err = runner.LoadImage()
1519         if err != nil {
1520                 if !runner.checkBrokenNode(err) {
1521                         // Failed to load image but not due to a "broken node"
1522                         // condition, probably user error.
1523                         runner.finalState = "Cancelled"
1524                 }
1525                 err = fmt.Errorf("While loading container image: %v", err)
1526                 return
1527         }
1528
1529         // set up FUSE mount and binds
1530         err = runner.SetupMounts()
1531         if err != nil {
1532                 runner.finalState = "Cancelled"
1533                 err = fmt.Errorf("While setting up mounts: %v", err)
1534                 return
1535         }
1536
1537         err = runner.CreateContainer()
1538         if err != nil {
1539                 return
1540         }
1541
1542         // Gather and record node information
1543         err = runner.LogNodeInfo()
1544         if err != nil {
1545                 return
1546         }
1547         // Save container.json record on log collection
1548         err = runner.LogContainerRecord()
1549         if err != nil {
1550                 return
1551         }
1552
1553         if runner.IsCancelled() {
1554                 return
1555         }
1556
1557         err = runner.UpdateContainerRunning()
1558         if err != nil {
1559                 return
1560         }
1561         runner.finalState = "Cancelled"
1562
1563         runner.StartCrunchstat()
1564
1565         err = runner.StartContainer()
1566         if err != nil {
1567                 runner.checkBrokenNode(err)
1568                 return
1569         }
1570
1571         err = runner.WaitFinish()
1572         if err == nil {
1573                 runner.finalState = "Complete"
1574         }
1575         return
1576 }
1577
1578 // Fetch the current container record (uuid = runner.Container.UUID)
1579 // into runner.Container.
1580 func (runner *ContainerRunner) fetchContainerRecord() error {
1581         reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1582         if err != nil {
1583                 return fmt.Errorf("error fetching container record: %v", err)
1584         }
1585         defer reader.Close()
1586
1587         dec := json.NewDecoder(reader)
1588         dec.UseNumber()
1589         err = dec.Decode(&runner.Container)
1590         if err != nil {
1591                 return fmt.Errorf("error decoding container record: %v", err)
1592         }
1593         return nil
1594 }
1595
1596 // NewContainerRunner creates a new container runner.
1597 func NewContainerRunner(api IArvadosClient,
1598         kc IKeepClient,
1599         docker ThinDockerClient,
1600         containerUUID string) *ContainerRunner {
1601
1602         cr := &ContainerRunner{ArvClient: api, Kc: kc, Docker: docker}
1603         cr.NewLogWriter = cr.NewArvLogWriter
1604         cr.RunArvMount = cr.ArvMountCmd
1605         cr.MkTempDir = ioutil.TempDir
1606         cr.LogCollection = &CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
1607         cr.Container.UUID = containerUUID
1608         cr.CrunchLog = NewThrottledLogger(cr.NewLogWriter("crunch-run"))
1609         cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
1610
1611         loadLogThrottleParams(api)
1612
1613         return cr
1614 }
1615
1616 func main() {
1617         statInterval := flag.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1618         cgroupRoot := flag.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
1619         cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
1620         cgroupParentSubsystem := flag.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
1621         caCertsPath := flag.String("ca-certs", "", "Path to TLS root certificates")
1622         enableNetwork := flag.String("container-enable-networking", "default",
1623                 `Specify if networking should be enabled for container.  One of 'default', 'always':
1624         default: only enable networking if container requests it.
1625         always:  containers always have networking enabled
1626         `)
1627         networkMode := flag.String("container-network-mode", "default",
1628                 `Set networking mode for container.  Corresponds to Docker network mode (--net).
1629         `)
1630         memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
1631         flag.Parse()
1632
1633         containerId := flag.Arg(0)
1634
1635         if *caCertsPath != "" {
1636                 arvadosclient.CertFiles = []string{*caCertsPath}
1637         }
1638
1639         api, err := arvadosclient.MakeArvadosClient()
1640         if err != nil {
1641                 log.Fatalf("%s: %v", containerId, err)
1642         }
1643         api.Retries = 8
1644
1645         kc, kcerr := keepclient.MakeKeepClient(api)
1646         if kcerr != nil {
1647                 log.Fatalf("%s: %v", containerId, kcerr)
1648         }
1649         kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
1650         kc.Retries = 4
1651
1652         // API version 1.21 corresponds to Docker 1.9, which is currently the
1653         // minimum version we want to support.
1654         docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
1655         dockerClientProxy := ThinDockerClientProxy{Docker: docker}
1656
1657         cr := NewContainerRunner(api, kc, dockerClientProxy, containerId)
1658
1659         if dockererr != nil {
1660                 cr.CrunchLog.Printf("%s: %v", containerId, dockererr)
1661                 cr.checkBrokenNode(dockererr)
1662                 cr.CrunchLog.Close()
1663                 os.Exit(1)
1664         }
1665
1666         cr.statInterval = *statInterval
1667         cr.cgroupRoot = *cgroupRoot
1668         cr.expectCgroupParent = *cgroupParent
1669         cr.enableNetwork = *enableNetwork
1670         cr.networkMode = *networkMode
1671         if *cgroupParentSubsystem != "" {
1672                 p := findCgroup(*cgroupParentSubsystem)
1673                 cr.setCgroupParent = p
1674                 cr.expectCgroupParent = p
1675         }
1676
1677         runerr := cr.Run()
1678
1679         if *memprofile != "" {
1680                 f, err := os.Create(*memprofile)
1681                 if err != nil {
1682                         log.Printf("could not create memory profile: ", err)
1683                 }
1684                 runtime.GC() // get up-to-date statistics
1685                 if err := pprof.WriteHeapProfile(f); err != nil {
1686                         log.Printf("could not write memory profile: ", err)
1687                 }
1688                 closeerr := f.Close()
1689                 if closeerr != nil {
1690                         log.Printf("closing memprofile file: ", err)
1691                 }
1692         }
1693
1694         if runerr != nil {
1695                 log.Fatalf("%s: %v", containerId, runerr)
1696         }
1697 }