10666: Merge branch 'master' into 10666-report-version
[arvados.git] / services / crunch-run / crunchrun.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package main
6
7 import (
8         "bytes"
9         "context"
10         "encoding/json"
11         "errors"
12         "flag"
13         "fmt"
14         "io"
15         "io/ioutil"
16         "log"
17         "os"
18         "os/exec"
19         "os/signal"
20         "path"
21         "path/filepath"
22         "runtime"
23         "runtime/pprof"
24         "sort"
25         "strings"
26         "sync"
27         "syscall"
28         "time"
29
30         "git.curoverse.com/arvados.git/lib/crunchstat"
31         "git.curoverse.com/arvados.git/sdk/go/arvados"
32         "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
33         "git.curoverse.com/arvados.git/sdk/go/keepclient"
34         "git.curoverse.com/arvados.git/sdk/go/manifest"
35
36         dockertypes "github.com/docker/docker/api/types"
37         dockercontainer "github.com/docker/docker/api/types/container"
38         dockernetwork "github.com/docker/docker/api/types/network"
39         dockerclient "github.com/docker/docker/client"
40 )
41
42 var version = "dev"
43
44 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
45 type IArvadosClient interface {
46         Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
47         Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
48         Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
49         Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
50         CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
51         Discovery(key string) (interface{}, error)
52 }
53
54 // ErrCancelled is the error returned when the container is cancelled.
55 var ErrCancelled = errors.New("Cancelled")
56
57 // IKeepClient is the minimal Keep API methods used by crunch-run.
58 type IKeepClient interface {
59         PutHB(hash string, buf []byte) (string, int, error)
60         ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
61         ClearBlockCache()
62 }
63
64 // NewLogWriter is a factory function to create a new log writer.
65 type NewLogWriter func(name string) io.WriteCloser
66
67 type RunArvMount func(args []string, tok string) (*exec.Cmd, error)
68
69 type MkTempDir func(string, string) (string, error)
70
71 // ThinDockerClient is the minimal Docker client interface used by crunch-run.
72 type ThinDockerClient interface {
73         ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error)
74         ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
75                 networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error)
76         ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error
77         ContainerStop(ctx context.Context, container string, timeout *time.Duration) error
78         ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error)
79         ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error)
80         ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error)
81         ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
82 }
83
84 // ThinDockerClientProxy is a proxy implementation of ThinDockerClient
85 // that executes the docker requests on dockerclient.Client
86 type ThinDockerClientProxy struct {
87         Docker *dockerclient.Client
88 }
89
90 // ContainerAttach invokes dockerclient.Client.ContainerAttach
91 func (proxy ThinDockerClientProxy) ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error) {
92         return proxy.Docker.ContainerAttach(ctx, container, options)
93 }
94
95 // ContainerCreate invokes dockerclient.Client.ContainerCreate
96 func (proxy ThinDockerClientProxy) ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
97         networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error) {
98         return proxy.Docker.ContainerCreate(ctx, config, hostConfig, networkingConfig, containerName)
99 }
100
101 // ContainerStart invokes dockerclient.Client.ContainerStart
102 func (proxy ThinDockerClientProxy) ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error {
103         return proxy.Docker.ContainerStart(ctx, container, options)
104 }
105
106 // ContainerStop invokes dockerclient.Client.ContainerStop
107 func (proxy ThinDockerClientProxy) ContainerStop(ctx context.Context, container string, timeout *time.Duration) error {
108         return proxy.Docker.ContainerStop(ctx, container, timeout)
109 }
110
111 // ContainerWait invokes dockerclient.Client.ContainerWait
112 func (proxy ThinDockerClientProxy) ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error) {
113         return proxy.Docker.ContainerWait(ctx, container, condition)
114 }
115
116 // ImageInspectWithRaw invokes dockerclient.Client.ImageInspectWithRaw
117 func (proxy ThinDockerClientProxy) ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error) {
118         return proxy.Docker.ImageInspectWithRaw(ctx, image)
119 }
120
121 // ImageLoad invokes dockerclient.Client.ImageLoad
122 func (proxy ThinDockerClientProxy) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error) {
123         return proxy.Docker.ImageLoad(ctx, input, quiet)
124 }
125
126 // ImageRemove invokes dockerclient.Client.ImageRemove
127 func (proxy ThinDockerClientProxy) ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) {
128         return proxy.Docker.ImageRemove(ctx, image, options)
129 }
130
131 // ContainerRunner is the main stateful struct used for a single execution of a
132 // container.
133 type ContainerRunner struct {
134         Docker    ThinDockerClient
135         ArvClient IArvadosClient
136         Kc        IKeepClient
137         arvados.Container
138         ContainerConfig dockercontainer.Config
139         dockercontainer.HostConfig
140         token       string
141         ContainerID string
142         ExitCode    *int
143         NewLogWriter
144         loggingDone   chan bool
145         CrunchLog     *ThrottledLogger
146         Stdout        io.WriteCloser
147         Stderr        io.WriteCloser
148         LogCollection *CollectionWriter
149         LogsPDH       *string
150         RunArvMount
151         MkTempDir
152         ArvMount       *exec.Cmd
153         ArvMountPoint  string
154         HostOutputDir  string
155         CleanupTempDir []string
156         Binds          []string
157         Volumes        map[string]struct{}
158         OutputPDH      *string
159         SigChan        chan os.Signal
160         ArvMountExit   chan error
161         finalState     string
162
163         statLogger   io.WriteCloser
164         statReporter *crunchstat.Reporter
165         statInterval time.Duration
166         cgroupRoot   string
167         // What we expect the container's cgroup parent to be.
168         expectCgroupParent string
169         // What we tell docker to use as the container's cgroup
170         // parent. Note: Ideally we would use the same field for both
171         // expectCgroupParent and setCgroupParent, and just make it
172         // default to "docker". However, when using docker < 1.10 with
173         // systemd, specifying a non-empty cgroup parent (even the
174         // default value "docker") hits a docker bug
175         // (https://github.com/docker/docker/issues/17126). Using two
176         // separate fields makes it possible to use the "expect cgroup
177         // parent to be X" feature even on sites where the "specify
178         // cgroup parent" feature breaks.
179         setCgroupParent string
180
181         cStateLock sync.Mutex
182         cStarted   bool // StartContainer() succeeded
183         cCancelled bool // StopContainer() invoked
184
185         enableNetwork string // one of "default" or "always"
186         networkMode   string // passed through to HostConfig.NetworkMode
187         arvMountLog   *ThrottledLogger
188 }
189
190 // setupSignals sets up signal handling to gracefully terminate the underlying
191 // Docker container and update state when receiving a TERM, INT or QUIT signal.
192 func (runner *ContainerRunner) setupSignals() {
193         runner.SigChan = make(chan os.Signal, 1)
194         signal.Notify(runner.SigChan, syscall.SIGTERM)
195         signal.Notify(runner.SigChan, syscall.SIGINT)
196         signal.Notify(runner.SigChan, syscall.SIGQUIT)
197
198         go func(sig chan os.Signal) {
199                 s := <-sig
200                 if s != nil {
201                         runner.CrunchLog.Printf("Caught signal %v", s)
202                 }
203                 runner.stop()
204         }(runner.SigChan)
205 }
206
207 // stop the underlying Docker container.
208 func (runner *ContainerRunner) stop() {
209         runner.cStateLock.Lock()
210         defer runner.cStateLock.Unlock()
211         if runner.cCancelled {
212                 return
213         }
214         runner.cCancelled = true
215         if runner.cStarted {
216                 timeout := time.Duration(10)
217                 err := runner.Docker.ContainerStop(context.TODO(), runner.ContainerID, &(timeout))
218                 if err != nil {
219                         runner.CrunchLog.Printf("StopContainer failed: %s", err)
220                 }
221                 // Suppress multiple calls to stop()
222                 runner.cStarted = false
223         }
224 }
225
226 func (runner *ContainerRunner) stopSignals() {
227         if runner.SigChan != nil {
228                 signal.Stop(runner.SigChan)
229                 close(runner.SigChan)
230         }
231 }
232
233 var errorBlacklist = []string{"Cannot connect to the Docker daemon"}
234 var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
235
236 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
237         for _, d := range errorBlacklist {
238                 if strings.Index(goterr.Error(), d) != -1 {
239                         runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
240                         if *brokenNodeHook == "" {
241                                 runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.")
242                         } else {
243                                 runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
244                                 // run killme script
245                                 c := exec.Command(*brokenNodeHook)
246                                 c.Stdout = runner.CrunchLog
247                                 c.Stderr = runner.CrunchLog
248                                 err := c.Run()
249                                 if err != nil {
250                                         runner.CrunchLog.Printf("Error running broken node hook: %v", err)
251                                 }
252                         }
253                         return true
254                 }
255         }
256         return false
257 }
258
259 // LoadImage determines the docker image id from the container record and
260 // checks if it is available in the local Docker image store.  If not, it loads
261 // the image from Keep.
262 func (runner *ContainerRunner) LoadImage() (err error) {
263
264         runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
265
266         var collection arvados.Collection
267         err = runner.ArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection)
268         if err != nil {
269                 return fmt.Errorf("While getting container image collection: %v", err)
270         }
271         manifest := manifest.Manifest{Text: collection.ManifestText}
272         var img, imageID string
273         for ms := range manifest.StreamIter() {
274                 img = ms.FileStreamSegments[0].Name
275                 if !strings.HasSuffix(img, ".tar") {
276                         return fmt.Errorf("First file in the container image collection does not end in .tar")
277                 }
278                 imageID = img[:len(img)-4]
279         }
280
281         runner.CrunchLog.Printf("Using Docker image id '%s'", imageID)
282
283         _, _, err = runner.Docker.ImageInspectWithRaw(context.TODO(), imageID)
284         if err != nil {
285                 runner.CrunchLog.Print("Loading Docker image from keep")
286
287                 var readCloser io.ReadCloser
288                 readCloser, err = runner.Kc.ManifestFileReader(manifest, img)
289                 if err != nil {
290                         return fmt.Errorf("While creating ManifestFileReader for container image: %v", err)
291                 }
292
293                 response, err := runner.Docker.ImageLoad(context.TODO(), readCloser, true)
294                 if err != nil {
295                         return fmt.Errorf("While loading container image into Docker: %v", err)
296                 }
297
298                 defer response.Body.Close()
299                 rbody, err := ioutil.ReadAll(response.Body)
300                 if err != nil {
301                         return fmt.Errorf("Reading response to image load: %v", err)
302                 }
303                 runner.CrunchLog.Printf("Docker response: %s", rbody)
304         } else {
305                 runner.CrunchLog.Print("Docker image is available")
306         }
307
308         runner.ContainerConfig.Image = imageID
309
310         runner.Kc.ClearBlockCache()
311
312         return nil
313 }
314
315 func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (c *exec.Cmd, err error) {
316         c = exec.Command("arv-mount", arvMountCmd...)
317
318         // Copy our environment, but override ARVADOS_API_TOKEN with
319         // the container auth token.
320         c.Env = nil
321         for _, s := range os.Environ() {
322                 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
323                         c.Env = append(c.Env, s)
324                 }
325         }
326         c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
327
328         runner.arvMountLog = NewThrottledLogger(runner.NewLogWriter("arv-mount"))
329         c.Stdout = runner.arvMountLog
330         c.Stderr = runner.arvMountLog
331
332         runner.CrunchLog.Printf("Running %v", c.Args)
333
334         err = c.Start()
335         if err != nil {
336                 return nil, err
337         }
338
339         statReadme := make(chan bool)
340         runner.ArvMountExit = make(chan error)
341
342         keepStatting := true
343         go func() {
344                 for keepStatting {
345                         time.Sleep(100 * time.Millisecond)
346                         _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
347                         if err == nil {
348                                 keepStatting = false
349                                 statReadme <- true
350                         }
351                 }
352                 close(statReadme)
353         }()
354
355         go func() {
356                 mnterr := c.Wait()
357                 if mnterr != nil {
358                         runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
359                 }
360                 runner.ArvMountExit <- mnterr
361                 close(runner.ArvMountExit)
362         }()
363
364         select {
365         case <-statReadme:
366                 break
367         case err := <-runner.ArvMountExit:
368                 runner.ArvMount = nil
369                 keepStatting = false
370                 return nil, err
371         }
372
373         return c, nil
374 }
375
376 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
377         if runner.ArvMountPoint == "" {
378                 runner.ArvMountPoint, err = runner.MkTempDir("", prefix)
379         }
380         return
381 }
382
383 func (runner *ContainerRunner) SetupMounts() (err error) {
384         err = runner.SetupArvMountPoint("keep")
385         if err != nil {
386                 return fmt.Errorf("While creating keep mount temp dir: %v", err)
387         }
388
389         pdhOnly := true
390         tmpcount := 0
391         arvMountCmd := []string{
392                 "--foreground",
393                 "--allow-other",
394                 "--read-write",
395                 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
396
397         if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
398                 arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
399         }
400
401         collectionPaths := []string{}
402         runner.Binds = nil
403         runner.Volumes = make(map[string]struct{})
404         needCertMount := true
405
406         var binds []string
407         for bind := range runner.Container.Mounts {
408                 binds = append(binds, bind)
409         }
410         sort.Strings(binds)
411
412         for _, bind := range binds {
413                 mnt := runner.Container.Mounts[bind]
414                 if bind == "stdout" || bind == "stderr" {
415                         // Is it a "file" mount kind?
416                         if mnt.Kind != "file" {
417                                 return fmt.Errorf("Unsupported mount kind '%s' for %s. Only 'file' is supported.", mnt.Kind, bind)
418                         }
419
420                         // Does path start with OutputPath?
421                         prefix := runner.Container.OutputPath
422                         if !strings.HasSuffix(prefix, "/") {
423                                 prefix += "/"
424                         }
425                         if !strings.HasPrefix(mnt.Path, prefix) {
426                                 return fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
427                         }
428                 }
429
430                 if bind == "stdin" {
431                         // Is it a "collection" mount kind?
432                         if mnt.Kind != "collection" && mnt.Kind != "json" {
433                                 return fmt.Errorf("Unsupported mount kind '%s' for stdin. Only 'collection' or 'json' are supported.", mnt.Kind)
434                         }
435                 }
436
437                 if bind == "/etc/arvados/ca-certificates.crt" {
438                         needCertMount = false
439                 }
440
441                 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
442                         if mnt.Kind != "collection" {
443                                 return fmt.Errorf("Only mount points of kind 'collection' are supported underneath the output_path: %v", bind)
444                         }
445                 }
446
447                 switch {
448                 case mnt.Kind == "collection" && bind != "stdin":
449                         var src string
450                         if mnt.UUID != "" && mnt.PortableDataHash != "" {
451                                 return fmt.Errorf("Cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
452                         }
453                         if mnt.UUID != "" {
454                                 if mnt.Writable {
455                                         return fmt.Errorf("Writing to existing collections currently not permitted.")
456                                 }
457                                 pdhOnly = false
458                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
459                         } else if mnt.PortableDataHash != "" {
460                                 if mnt.Writable {
461                                         return fmt.Errorf("Can never write to a collection specified by portable data hash")
462                                 }
463                                 idx := strings.Index(mnt.PortableDataHash, "/")
464                                 if idx > 0 {
465                                         mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
466                                         mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
467                                         runner.Container.Mounts[bind] = mnt
468                                 }
469                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
470                                 if mnt.Path != "" && mnt.Path != "." {
471                                         if strings.HasPrefix(mnt.Path, "./") {
472                                                 mnt.Path = mnt.Path[2:]
473                                         } else if strings.HasPrefix(mnt.Path, "/") {
474                                                 mnt.Path = mnt.Path[1:]
475                                         }
476                                         src += "/" + mnt.Path
477                                 }
478                         } else {
479                                 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
480                                 arvMountCmd = append(arvMountCmd, "--mount-tmp")
481                                 arvMountCmd = append(arvMountCmd, fmt.Sprintf("tmp%d", tmpcount))
482                                 tmpcount += 1
483                         }
484                         if mnt.Writable {
485                                 if bind == runner.Container.OutputPath {
486                                         runner.HostOutputDir = src
487                                 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
488                                         return fmt.Errorf("Writable mount points are not permitted underneath the output_path: %v", bind)
489                                 }
490                                 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
491                         } else {
492                                 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind))
493                         }
494                         collectionPaths = append(collectionPaths, src)
495
496                 case mnt.Kind == "tmp":
497                         var tmpdir string
498                         tmpdir, err = runner.MkTempDir("", "")
499                         if err != nil {
500                                 return fmt.Errorf("While creating mount temp dir: %v", err)
501                         }
502                         st, staterr := os.Stat(tmpdir)
503                         if staterr != nil {
504                                 return fmt.Errorf("While Stat on temp dir: %v", staterr)
505                         }
506                         err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
507                         if staterr != nil {
508                                 return fmt.Errorf("While Chmod temp dir: %v", err)
509                         }
510                         runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
511                         runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", tmpdir, bind))
512                         if bind == runner.Container.OutputPath {
513                                 runner.HostOutputDir = tmpdir
514                         }
515
516                 case mnt.Kind == "json":
517                         jsondata, err := json.Marshal(mnt.Content)
518                         if err != nil {
519                                 return fmt.Errorf("encoding json data: %v", err)
520                         }
521                         // Create a tempdir with a single file
522                         // (instead of just a tempfile): this way we
523                         // can ensure the file is world-readable
524                         // inside the container, without having to
525                         // make it world-readable on the docker host.
526                         tmpdir, err := runner.MkTempDir("", "")
527                         if err != nil {
528                                 return fmt.Errorf("creating temp dir: %v", err)
529                         }
530                         runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
531                         tmpfn := filepath.Join(tmpdir, "mountdata.json")
532                         err = ioutil.WriteFile(tmpfn, jsondata, 0644)
533                         if err != nil {
534                                 return fmt.Errorf("writing temp file: %v", err)
535                         }
536                         runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
537                 }
538         }
539
540         if runner.HostOutputDir == "" {
541                 return fmt.Errorf("Output path does not correspond to a writable mount point")
542         }
543
544         if wantAPI := runner.Container.RuntimeConstraints.API; needCertMount && wantAPI != nil && *wantAPI {
545                 for _, certfile := range arvadosclient.CertFiles {
546                         _, err := os.Stat(certfile)
547                         if err == nil {
548                                 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:/etc/arvados/ca-certificates.crt:ro", certfile))
549                                 break
550                         }
551                 }
552         }
553
554         if pdhOnly {
555                 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
556         } else {
557                 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
558         }
559         arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
560
561         token, err := runner.ContainerToken()
562         if err != nil {
563                 return fmt.Errorf("could not get container token: %s", err)
564         }
565
566         runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
567         if err != nil {
568                 return fmt.Errorf("While trying to start arv-mount: %v", err)
569         }
570
571         for _, p := range collectionPaths {
572                 _, err = os.Stat(p)
573                 if err != nil {
574                         return fmt.Errorf("While checking that input files exist: %v", err)
575                 }
576         }
577
578         return nil
579 }
580
581 func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
582         // Handle docker log protocol
583         // https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container
584
585         header := make([]byte, 8)
586         for {
587                 _, readerr := io.ReadAtLeast(containerReader, header, 8)
588
589                 if readerr == nil {
590                         readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
591                         if header[0] == 1 {
592                                 // stdout
593                                 _, readerr = io.CopyN(runner.Stdout, containerReader, readsize)
594                         } else {
595                                 // stderr
596                                 _, readerr = io.CopyN(runner.Stderr, containerReader, readsize)
597                         }
598                 }
599
600                 if readerr != nil {
601                         if readerr != io.EOF {
602                                 runner.CrunchLog.Printf("While reading docker logs: %v", readerr)
603                         }
604
605                         closeerr := runner.Stdout.Close()
606                         if closeerr != nil {
607                                 runner.CrunchLog.Printf("While closing stdout logs: %v", closeerr)
608                         }
609
610                         closeerr = runner.Stderr.Close()
611                         if closeerr != nil {
612                                 runner.CrunchLog.Printf("While closing stderr logs: %v", closeerr)
613                         }
614
615                         if runner.statReporter != nil {
616                                 runner.statReporter.Stop()
617                                 closeerr = runner.statLogger.Close()
618                                 if closeerr != nil {
619                                         runner.CrunchLog.Printf("While closing crunchstat logs: %v", closeerr)
620                                 }
621                         }
622
623                         runner.loggingDone <- true
624                         close(runner.loggingDone)
625                         return
626                 }
627         }
628 }
629
630 func (runner *ContainerRunner) StartCrunchstat() {
631         runner.statLogger = NewThrottledLogger(runner.NewLogWriter("crunchstat"))
632         runner.statReporter = &crunchstat.Reporter{
633                 CID:          runner.ContainerID,
634                 Logger:       log.New(runner.statLogger, "", 0),
635                 CgroupParent: runner.expectCgroupParent,
636                 CgroupRoot:   runner.cgroupRoot,
637                 PollPeriod:   runner.statInterval,
638         }
639         runner.statReporter.Start()
640 }
641
642 type infoCommand struct {
643         label string
644         cmd   []string
645 }
646
647 // LogNodeInfo gathers node information and store it on the log for debugging
648 // purposes.
649 func (runner *ContainerRunner) LogNodeInfo() (err error) {
650         w := runner.NewLogWriter("node-info")
651         logger := log.New(w, "node-info", 0)
652
653         commands := []infoCommand{
654                 {
655                         label: "Host Information",
656                         cmd:   []string{"uname", "-a"},
657                 },
658                 {
659                         label: "CPU Information",
660                         cmd:   []string{"cat", "/proc/cpuinfo"},
661                 },
662                 {
663                         label: "Memory Information",
664                         cmd:   []string{"cat", "/proc/meminfo"},
665                 },
666                 {
667                         label: "Disk Space",
668                         cmd:   []string{"df", "-m", "/", os.TempDir()},
669                 },
670                 {
671                         label: "Disk INodes",
672                         cmd:   []string{"df", "-i", "/", os.TempDir()},
673                 },
674         }
675
676         // Run commands with informational output to be logged.
677         var out []byte
678         for _, command := range commands {
679                 out, err = exec.Command(command.cmd[0], command.cmd[1:]...).CombinedOutput()
680                 if err != nil {
681                         return fmt.Errorf("While running command %q: %v",
682                                 command.cmd, err)
683                 }
684                 logger.Println(command.label)
685                 for _, line := range strings.Split(string(out), "\n") {
686                         logger.Println(" ", line)
687                 }
688         }
689
690         err = w.Close()
691         if err != nil {
692                 return fmt.Errorf("While closing node-info logs: %v", err)
693         }
694         return nil
695 }
696
697 // LogContainerRecord gets and saves the raw JSON container record from the API server
698 func (runner *ContainerRunner) LogContainerRecord() (err error) {
699         w := &ArvLogWriter{
700                 ArvClient:     runner.ArvClient,
701                 UUID:          runner.Container.UUID,
702                 loggingStream: "container",
703                 writeCloser:   runner.LogCollection.Open("container.json"),
704         }
705
706         // Get Container record JSON from the API Server
707         reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
708         if err != nil {
709                 return fmt.Errorf("While retrieving container record from the API server: %v", err)
710         }
711         defer reader.Close()
712
713         dec := json.NewDecoder(reader)
714         dec.UseNumber()
715         var cr map[string]interface{}
716         if err = dec.Decode(&cr); err != nil {
717                 return fmt.Errorf("While decoding the container record JSON response: %v", err)
718         }
719         // Re-encode it using indentation to improve readability
720         enc := json.NewEncoder(w)
721         enc.SetIndent("", "    ")
722         if err = enc.Encode(cr); err != nil {
723                 return fmt.Errorf("While logging the JSON container record: %v", err)
724         }
725         err = w.Close()
726         if err != nil {
727                 return fmt.Errorf("While closing container.json log: %v", err)
728         }
729         return nil
730 }
731
732 // AttachStreams connects the docker container stdin, stdout and stderr logs
733 // to the Arvados logger which logs to Keep and the API server logs table.
734 func (runner *ContainerRunner) AttachStreams() (err error) {
735
736         runner.CrunchLog.Print("Attaching container streams")
737
738         // If stdin mount is provided, attach it to the docker container
739         var stdinRdr arvados.File
740         var stdinJson []byte
741         if stdinMnt, ok := runner.Container.Mounts["stdin"]; ok {
742                 if stdinMnt.Kind == "collection" {
743                         var stdinColl arvados.Collection
744                         collId := stdinMnt.UUID
745                         if collId == "" {
746                                 collId = stdinMnt.PortableDataHash
747                         }
748                         err = runner.ArvClient.Get("collections", collId, nil, &stdinColl)
749                         if err != nil {
750                                 return fmt.Errorf("While getting stding collection: %v", err)
751                         }
752
753                         stdinRdr, err = runner.Kc.ManifestFileReader(manifest.Manifest{Text: stdinColl.ManifestText}, stdinMnt.Path)
754                         if os.IsNotExist(err) {
755                                 return fmt.Errorf("stdin collection path not found: %v", stdinMnt.Path)
756                         } else if err != nil {
757                                 return fmt.Errorf("While getting stdin collection path %v: %v", stdinMnt.Path, err)
758                         }
759                 } else if stdinMnt.Kind == "json" {
760                         stdinJson, err = json.Marshal(stdinMnt.Content)
761                         if err != nil {
762                                 return fmt.Errorf("While encoding stdin json data: %v", err)
763                         }
764                 }
765         }
766
767         stdinUsed := stdinRdr != nil || len(stdinJson) != 0
768         response, err := runner.Docker.ContainerAttach(context.TODO(), runner.ContainerID,
769                 dockertypes.ContainerAttachOptions{Stream: true, Stdin: stdinUsed, Stdout: true, Stderr: true})
770         if err != nil {
771                 return fmt.Errorf("While attaching container stdout/stderr streams: %v", err)
772         }
773
774         runner.loggingDone = make(chan bool)
775
776         if stdoutMnt, ok := runner.Container.Mounts["stdout"]; ok {
777                 stdoutFile, err := runner.getStdoutFile(stdoutMnt.Path)
778                 if err != nil {
779                         return err
780                 }
781                 runner.Stdout = stdoutFile
782         } else {
783                 runner.Stdout = NewThrottledLogger(runner.NewLogWriter("stdout"))
784         }
785
786         if stderrMnt, ok := runner.Container.Mounts["stderr"]; ok {
787                 stderrFile, err := runner.getStdoutFile(stderrMnt.Path)
788                 if err != nil {
789                         return err
790                 }
791                 runner.Stderr = stderrFile
792         } else {
793                 runner.Stderr = NewThrottledLogger(runner.NewLogWriter("stderr"))
794         }
795
796         if stdinRdr != nil {
797                 go func() {
798                         _, err := io.Copy(response.Conn, stdinRdr)
799                         if err != nil {
800                                 runner.CrunchLog.Print("While writing stdin collection to docker container %q", err)
801                                 runner.stop()
802                         }
803                         stdinRdr.Close()
804                         response.CloseWrite()
805                 }()
806         } else if len(stdinJson) != 0 {
807                 go func() {
808                         _, err := io.Copy(response.Conn, bytes.NewReader(stdinJson))
809                         if err != nil {
810                                 runner.CrunchLog.Print("While writing stdin json to docker container %q", err)
811                                 runner.stop()
812                         }
813                         response.CloseWrite()
814                 }()
815         }
816
817         go runner.ProcessDockerAttach(response.Reader)
818
819         return nil
820 }
821
822 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
823         stdoutPath := mntPath[len(runner.Container.OutputPath):]
824         index := strings.LastIndex(stdoutPath, "/")
825         if index > 0 {
826                 subdirs := stdoutPath[:index]
827                 if subdirs != "" {
828                         st, err := os.Stat(runner.HostOutputDir)
829                         if err != nil {
830                                 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
831                         }
832                         stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
833                         err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
834                         if err != nil {
835                                 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
836                         }
837                 }
838         }
839         stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
840         if err != nil {
841                 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
842         }
843
844         return stdoutFile, nil
845 }
846
847 // CreateContainer creates the docker container.
848 func (runner *ContainerRunner) CreateContainer() error {
849         runner.CrunchLog.Print("Creating Docker container")
850
851         runner.ContainerConfig.Cmd = runner.Container.Command
852         if runner.Container.Cwd != "." {
853                 runner.ContainerConfig.WorkingDir = runner.Container.Cwd
854         }
855
856         for k, v := range runner.Container.Environment {
857                 runner.ContainerConfig.Env = append(runner.ContainerConfig.Env, k+"="+v)
858         }
859
860         runner.ContainerConfig.Volumes = runner.Volumes
861
862         runner.HostConfig = dockercontainer.HostConfig{
863                 Binds: runner.Binds,
864                 LogConfig: dockercontainer.LogConfig{
865                         Type: "none",
866                 },
867                 Resources: dockercontainer.Resources{
868                         CgroupParent: runner.setCgroupParent,
869                 },
870         }
871
872         if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
873                 tok, err := runner.ContainerToken()
874                 if err != nil {
875                         return err
876                 }
877                 runner.ContainerConfig.Env = append(runner.ContainerConfig.Env,
878                         "ARVADOS_API_TOKEN="+tok,
879                         "ARVADOS_API_HOST="+os.Getenv("ARVADOS_API_HOST"),
880                         "ARVADOS_API_HOST_INSECURE="+os.Getenv("ARVADOS_API_HOST_INSECURE"),
881                 )
882                 runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
883         } else {
884                 if runner.enableNetwork == "always" {
885                         runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
886                 } else {
887                         runner.HostConfig.NetworkMode = dockercontainer.NetworkMode("none")
888                 }
889         }
890
891         _, stdinUsed := runner.Container.Mounts["stdin"]
892         runner.ContainerConfig.OpenStdin = stdinUsed
893         runner.ContainerConfig.StdinOnce = stdinUsed
894         runner.ContainerConfig.AttachStdin = stdinUsed
895         runner.ContainerConfig.AttachStdout = true
896         runner.ContainerConfig.AttachStderr = true
897
898         createdBody, err := runner.Docker.ContainerCreate(context.TODO(), &runner.ContainerConfig, &runner.HostConfig, nil, runner.Container.UUID)
899         if err != nil {
900                 return fmt.Errorf("While creating container: %v", err)
901         }
902
903         runner.ContainerID = createdBody.ID
904
905         return runner.AttachStreams()
906 }
907
908 // StartContainer starts the docker container created by CreateContainer.
909 func (runner *ContainerRunner) StartContainer() error {
910         runner.CrunchLog.Printf("Starting Docker container id '%s'", runner.ContainerID)
911         runner.cStateLock.Lock()
912         defer runner.cStateLock.Unlock()
913         if runner.cCancelled {
914                 return ErrCancelled
915         }
916         err := runner.Docker.ContainerStart(context.TODO(), runner.ContainerID,
917                 dockertypes.ContainerStartOptions{})
918         if err != nil {
919                 var advice string
920                 if strings.Contains(err.Error(), "no such file or directory") {
921                         advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
922                 }
923                 return fmt.Errorf("could not start container: %v%s", err, advice)
924         }
925         runner.cStarted = true
926         return nil
927 }
928
929 // WaitFinish waits for the container to terminate, capture the exit code, and
930 // close the stdout/stderr logging.
931 func (runner *ContainerRunner) WaitFinish() (err error) {
932         runner.CrunchLog.Print("Waiting for container to finish")
933
934         waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, "not-running")
935
936         go func() {
937                 <-runner.ArvMountExit
938                 if runner.cStarted {
939                         runner.CrunchLog.Printf("arv-mount exited while container is still running.  Stopping container.")
940                         runner.stop()
941                 }
942         }()
943
944         var waitBody dockercontainer.ContainerWaitOKBody
945         select {
946         case waitBody = <-waitOk:
947         case err = <-waitErr:
948         }
949
950         // Container isn't running any more
951         runner.cStarted = false
952
953         if err != nil {
954                 return fmt.Errorf("container wait: %v", err)
955         }
956
957         runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
958         code := int(waitBody.StatusCode)
959         runner.ExitCode = &code
960
961         // wait for stdout/stderr to complete
962         <-runner.loggingDone
963
964         return nil
965 }
966
967 var ErrNotInOutputDir = fmt.Errorf("Must point to path within the output directory")
968
969 func (runner *ContainerRunner) derefOutputSymlink(path string, startinfo os.FileInfo) (tgt string, readlinktgt string, info os.FileInfo, err error) {
970         // Follow symlinks if necessary
971         info = startinfo
972         tgt = path
973         readlinktgt = ""
974         nextlink := path
975         for followed := 0; info.Mode()&os.ModeSymlink != 0; followed++ {
976                 if followed >= limitFollowSymlinks {
977                         // Got stuck in a loop or just a pathological number of links, give up.
978                         err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
979                         return
980                 }
981
982                 readlinktgt, err = os.Readlink(nextlink)
983                 if err != nil {
984                         return
985                 }
986
987                 tgt = readlinktgt
988                 if !strings.HasPrefix(tgt, "/") {
989                         // Relative symlink, resolve it to host path
990                         tgt = filepath.Join(filepath.Dir(path), tgt)
991                 }
992                 if strings.HasPrefix(tgt, runner.Container.OutputPath+"/") && !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
993                         // Absolute symlink to container output path, adjust it to host output path.
994                         tgt = filepath.Join(runner.HostOutputDir, tgt[len(runner.Container.OutputPath):])
995                 }
996                 if !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
997                         // After dereferencing, symlink target must either be
998                         // within output directory, or must point to a
999                         // collection mount.
1000                         err = ErrNotInOutputDir
1001                         return
1002                 }
1003
1004                 info, err = os.Lstat(tgt)
1005                 if err != nil {
1006                         // tgt
1007                         err = fmt.Errorf("Symlink in output %q points to invalid location %q: %v",
1008                                 path[len(runner.HostOutputDir):], readlinktgt, err)
1009                         return
1010                 }
1011
1012                 nextlink = tgt
1013         }
1014
1015         return
1016 }
1017
1018 var limitFollowSymlinks = 10
1019
1020 // UploadFile uploads files within the output directory, with special handling
1021 // for symlinks. If the symlink leads to a keep mount, copy the manifest text
1022 // from the keep mount into the output manifestText.  Ensure that whether
1023 // symlinks are relative or absolute, every symlink target (even targets that
1024 // are symlinks themselves) must point to a path in either the output directory
1025 // or a collection mount.
1026 //
1027 // Assumes initial value of "path" is absolute, and located within runner.HostOutputDir.
1028 func (runner *ContainerRunner) UploadOutputFile(
1029         path string,
1030         info os.FileInfo,
1031         infoerr error,
1032         binds []string,
1033         walkUpload *WalkUpload,
1034         relocateFrom string,
1035         relocateTo string,
1036         followed int) (manifestText string, err error) {
1037
1038         if info.Mode().IsDir() {
1039                 return
1040         }
1041
1042         if infoerr != nil {
1043                 return "", infoerr
1044         }
1045
1046         if followed >= limitFollowSymlinks {
1047                 // Got stuck in a loop or just a pathological number of
1048                 // directory links, give up.
1049                 err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
1050                 return
1051         }
1052
1053         // When following symlinks, the source path may need to be logically
1054         // relocated to some other path within the output collection.  Remove
1055         // the relocateFrom prefix and replace it with relocateTo.
1056         relocated := relocateTo + path[len(relocateFrom):]
1057
1058         tgt, readlinktgt, info, derefErr := runner.derefOutputSymlink(path, info)
1059         if derefErr != nil && derefErr != ErrNotInOutputDir {
1060                 return "", derefErr
1061         }
1062
1063         // go through mounts and try reverse map to collection reference
1064         for _, bind := range binds {
1065                 mnt := runner.Container.Mounts[bind]
1066                 if tgt == bind || strings.HasPrefix(tgt, bind+"/") {
1067                         // get path relative to bind
1068                         targetSuffix := tgt[len(bind):]
1069
1070                         // Copy mount and adjust the path to add path relative to the bind
1071                         adjustedMount := mnt
1072                         adjustedMount.Path = filepath.Join(adjustedMount.Path, targetSuffix)
1073
1074                         // Terminates in this keep mount, so add the
1075                         // manifest text at appropriate location.
1076                         outputSuffix := path[len(runner.HostOutputDir):]
1077                         manifestText, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix)
1078                         return
1079                 }
1080         }
1081
1082         // If target is not a collection mount, it must be located within the
1083         // output directory, otherwise it is an error.
1084         if derefErr == ErrNotInOutputDir {
1085                 err = fmt.Errorf("Symlink in output %q points to invalid location %q, must point to path within the output directory.",
1086                         path[len(runner.HostOutputDir):], readlinktgt)
1087                 return
1088         }
1089
1090         if info.Mode().IsRegular() {
1091                 return "", walkUpload.UploadFile(relocated, tgt)
1092         }
1093
1094         if info.Mode().IsDir() {
1095                 // Symlink leads to directory.  Walk() doesn't follow
1096                 // directory symlinks, so we walk the target directory
1097                 // instead.  Within the walk, file paths are relocated
1098                 // so they appear under the original symlink path.
1099                 err = filepath.Walk(tgt, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
1100                         var m string
1101                         m, walkerr = runner.UploadOutputFile(walkpath, walkinfo, walkerr,
1102                                 binds, walkUpload, tgt, relocated, followed+1)
1103                         if walkerr == nil {
1104                                 manifestText = manifestText + m
1105                         }
1106                         return walkerr
1107                 })
1108                 return
1109         }
1110
1111         return
1112 }
1113
1114 // HandleOutput sets the output, unmounts the FUSE mount, and deletes temporary directories
1115 func (runner *ContainerRunner) CaptureOutput() error {
1116         if runner.finalState != "Complete" {
1117                 return nil
1118         }
1119
1120         if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
1121                 // Output may have been set directly by the container, so
1122                 // refresh the container record to check.
1123                 err := runner.ArvClient.Get("containers", runner.Container.UUID,
1124                         nil, &runner.Container)
1125                 if err != nil {
1126                         return err
1127                 }
1128                 if runner.Container.Output != "" {
1129                         // Container output is already set.
1130                         runner.OutputPDH = &runner.Container.Output
1131                         return nil
1132                 }
1133         }
1134
1135         if runner.HostOutputDir == "" {
1136                 return nil
1137         }
1138
1139         _, err := os.Stat(runner.HostOutputDir)
1140         if err != nil {
1141                 return fmt.Errorf("While checking host output path: %v", err)
1142         }
1143
1144         // Pre-populate output from the configured mount points
1145         var binds []string
1146         for bind, mnt := range runner.Container.Mounts {
1147                 if mnt.Kind == "collection" {
1148                         binds = append(binds, bind)
1149                 }
1150         }
1151         sort.Strings(binds)
1152
1153         var manifestText string
1154
1155         collectionMetafile := fmt.Sprintf("%s/.arvados#collection", runner.HostOutputDir)
1156         _, err = os.Stat(collectionMetafile)
1157         if err != nil {
1158                 // Regular directory
1159
1160                 cw := CollectionWriter{0, runner.Kc, nil, nil, sync.Mutex{}}
1161                 walkUpload := cw.BeginUpload(runner.HostOutputDir, runner.CrunchLog.Logger)
1162
1163                 var m string
1164                 err = filepath.Walk(runner.HostOutputDir, func(path string, info os.FileInfo, err error) error {
1165                         m, err = runner.UploadOutputFile(path, info, err, binds, walkUpload, "", "", 0)
1166                         if err == nil {
1167                                 manifestText = manifestText + m
1168                         }
1169                         return err
1170                 })
1171
1172                 cw.EndUpload(walkUpload)
1173
1174                 if err != nil {
1175                         return fmt.Errorf("While uploading output files: %v", err)
1176                 }
1177
1178                 m, err = cw.ManifestText()
1179                 manifestText = manifestText + m
1180                 if err != nil {
1181                         return fmt.Errorf("While uploading output files: %v", err)
1182                 }
1183         } else {
1184                 // FUSE mount directory
1185                 file, openerr := os.Open(collectionMetafile)
1186                 if openerr != nil {
1187                         return fmt.Errorf("While opening FUSE metafile: %v", err)
1188                 }
1189                 defer file.Close()
1190
1191                 var rec arvados.Collection
1192                 err = json.NewDecoder(file).Decode(&rec)
1193                 if err != nil {
1194                         return fmt.Errorf("While reading FUSE metafile: %v", err)
1195                 }
1196                 manifestText = rec.ManifestText
1197         }
1198
1199         for _, bind := range binds {
1200                 mnt := runner.Container.Mounts[bind]
1201
1202                 bindSuffix := strings.TrimPrefix(bind, runner.Container.OutputPath)
1203
1204                 if bindSuffix == bind || len(bindSuffix) <= 0 {
1205                         // either does not start with OutputPath or is OutputPath itself
1206                         continue
1207                 }
1208
1209                 if mnt.ExcludeFromOutput == true {
1210                         continue
1211                 }
1212
1213                 // append to manifest_text
1214                 m, err := runner.getCollectionManifestForPath(mnt, bindSuffix)
1215                 if err != nil {
1216                         return err
1217                 }
1218
1219                 manifestText = manifestText + m
1220         }
1221
1222         // Save output
1223         var response arvados.Collection
1224         manifest := manifest.Manifest{Text: manifestText}
1225         manifestText = manifest.Extract(".", ".").Text
1226         err = runner.ArvClient.Create("collections",
1227                 arvadosclient.Dict{
1228                         "ensure_unique_name": true,
1229                         "collection": arvadosclient.Dict{
1230                                 "is_trashed":    true,
1231                                 "name":          "output for " + runner.Container.UUID,
1232                                 "manifest_text": manifestText}},
1233                 &response)
1234         if err != nil {
1235                 return fmt.Errorf("While creating output collection: %v", err)
1236         }
1237         runner.OutputPDH = &response.PortableDataHash
1238         return nil
1239 }
1240
1241 var outputCollections = make(map[string]arvados.Collection)
1242
1243 // Fetch the collection for the mnt.PortableDataHash
1244 // Return the manifest_text fragment corresponding to the specified mnt.Path
1245 //  after making any required updates.
1246 //  Ex:
1247 //    If mnt.Path is not specified,
1248 //      return the entire manifest_text after replacing any "." with bindSuffix
1249 //    If mnt.Path corresponds to one stream,
1250 //      return the manifest_text for that stream after replacing that stream name with bindSuffix
1251 //    Otherwise, check if a filename in any one stream is being sought. Return the manifest_text
1252 //      for that stream after replacing stream name with bindSuffix minus the last word
1253 //      and the file name with last word of the bindSuffix
1254 //  Allowed path examples:
1255 //    "path":"/"
1256 //    "path":"/subdir1"
1257 //    "path":"/subdir1/subdir2"
1258 //    "path":"/subdir/filename" etc
1259 func (runner *ContainerRunner) getCollectionManifestForPath(mnt arvados.Mount, bindSuffix string) (string, error) {
1260         collection := outputCollections[mnt.PortableDataHash]
1261         if collection.PortableDataHash == "" {
1262                 err := runner.ArvClient.Get("collections", mnt.PortableDataHash, nil, &collection)
1263                 if err != nil {
1264                         return "", fmt.Errorf("While getting collection for %v: %v", mnt.PortableDataHash, err)
1265                 }
1266                 outputCollections[mnt.PortableDataHash] = collection
1267         }
1268
1269         if collection.ManifestText == "" {
1270                 runner.CrunchLog.Printf("No manifest text for collection %v", collection.PortableDataHash)
1271                 return "", nil
1272         }
1273
1274         mft := manifest.Manifest{Text: collection.ManifestText}
1275         extracted := mft.Extract(mnt.Path, bindSuffix)
1276         if extracted.Err != nil {
1277                 return "", fmt.Errorf("Error parsing manifest for %v: %v", mnt.PortableDataHash, extracted.Err.Error())
1278         }
1279         return extracted.Text, nil
1280 }
1281
1282 func (runner *ContainerRunner) CleanupDirs() {
1283         if runner.ArvMount != nil {
1284                 var delay int64 = 8
1285                 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1286                 umount.Stdout = runner.CrunchLog
1287                 umount.Stderr = runner.CrunchLog
1288                 runner.CrunchLog.Printf("Running %v", umount.Args)
1289                 umnterr := umount.Start()
1290
1291                 if umnterr != nil {
1292                         runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1293                 } else {
1294                         // If arv-mount --unmount gets stuck for any reason, we
1295                         // don't want to wait for it forever.  Do Wait() in a goroutine
1296                         // so it doesn't block crunch-run.
1297                         umountExit := make(chan error)
1298                         go func() {
1299                                 mnterr := umount.Wait()
1300                                 if mnterr != nil {
1301                                         runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1302                                 }
1303                                 umountExit <- mnterr
1304                         }()
1305
1306                         for again := true; again; {
1307                                 again = false
1308                                 select {
1309                                 case <-umountExit:
1310                                         umount = nil
1311                                         again = true
1312                                 case <-runner.ArvMountExit:
1313                                         break
1314                                 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1315                                         runner.CrunchLog.Printf("Timed out waiting for unmount")
1316                                         if umount != nil {
1317                                                 umount.Process.Kill()
1318                                         }
1319                                         runner.ArvMount.Process.Kill()
1320                                 }
1321                         }
1322                 }
1323         }
1324
1325         if runner.ArvMountPoint != "" {
1326                 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1327                         runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1328                 }
1329         }
1330
1331         for _, tmpdir := range runner.CleanupTempDir {
1332                 if rmerr := os.RemoveAll(tmpdir); rmerr != nil {
1333                         runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", tmpdir, rmerr)
1334                 }
1335         }
1336 }
1337
1338 // CommitLogs posts the collection containing the final container logs.
1339 func (runner *ContainerRunner) CommitLogs() error {
1340         runner.CrunchLog.Print(runner.finalState)
1341
1342         if runner.arvMountLog != nil {
1343                 runner.arvMountLog.Close()
1344         }
1345         runner.CrunchLog.Close()
1346
1347         // Closing CrunchLog above allows them to be committed to Keep at this
1348         // point, but re-open crunch log with ArvClient in case there are any
1349         // other further errors (such as failing to write the log to Keep!)
1350         // while shutting down
1351         runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{ArvClient: runner.ArvClient,
1352                 UUID: runner.Container.UUID, loggingStream: "crunch-run", writeCloser: nil})
1353         runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
1354
1355         if runner.LogsPDH != nil {
1356                 // If we have already assigned something to LogsPDH,
1357                 // we must be closing the re-opened log, which won't
1358                 // end up getting attached to the container record and
1359                 // therefore doesn't need to be saved as a collection
1360                 // -- it exists only to send logs to other channels.
1361                 return nil
1362         }
1363
1364         mt, err := runner.LogCollection.ManifestText()
1365         if err != nil {
1366                 return fmt.Errorf("While creating log manifest: %v", err)
1367         }
1368
1369         var response arvados.Collection
1370         err = runner.ArvClient.Create("collections",
1371                 arvadosclient.Dict{
1372                         "ensure_unique_name": true,
1373                         "collection": arvadosclient.Dict{
1374                                 "is_trashed":    true,
1375                                 "name":          "logs for " + runner.Container.UUID,
1376                                 "manifest_text": mt}},
1377                 &response)
1378         if err != nil {
1379                 return fmt.Errorf("While creating log collection: %v", err)
1380         }
1381         runner.LogsPDH = &response.PortableDataHash
1382         return nil
1383 }
1384
1385 // UpdateContainerRunning updates the container state to "Running"
1386 func (runner *ContainerRunner) UpdateContainerRunning() error {
1387         runner.cStateLock.Lock()
1388         defer runner.cStateLock.Unlock()
1389         if runner.cCancelled {
1390                 return ErrCancelled
1391         }
1392         return runner.ArvClient.Update("containers", runner.Container.UUID,
1393                 arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running"}}, nil)
1394 }
1395
1396 // ContainerToken returns the api_token the container (and any
1397 // arv-mount processes) are allowed to use.
1398 func (runner *ContainerRunner) ContainerToken() (string, error) {
1399         if runner.token != "" {
1400                 return runner.token, nil
1401         }
1402
1403         var auth arvados.APIClientAuthorization
1404         err := runner.ArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1405         if err != nil {
1406                 return "", err
1407         }
1408         runner.token = auth.APIToken
1409         return runner.token, nil
1410 }
1411
1412 // UpdateContainerComplete updates the container record state on API
1413 // server to "Complete" or "Cancelled"
1414 func (runner *ContainerRunner) UpdateContainerFinal() error {
1415         update := arvadosclient.Dict{}
1416         update["state"] = runner.finalState
1417         if runner.LogsPDH != nil {
1418                 update["log"] = *runner.LogsPDH
1419         }
1420         if runner.finalState == "Complete" {
1421                 if runner.ExitCode != nil {
1422                         update["exit_code"] = *runner.ExitCode
1423                 }
1424                 if runner.OutputPDH != nil {
1425                         update["output"] = *runner.OutputPDH
1426                 }
1427         }
1428         return runner.ArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
1429 }
1430
1431 // IsCancelled returns the value of Cancelled, with goroutine safety.
1432 func (runner *ContainerRunner) IsCancelled() bool {
1433         runner.cStateLock.Lock()
1434         defer runner.cStateLock.Unlock()
1435         return runner.cCancelled
1436 }
1437
1438 // NewArvLogWriter creates an ArvLogWriter
1439 func (runner *ContainerRunner) NewArvLogWriter(name string) io.WriteCloser {
1440         return &ArvLogWriter{
1441                 ArvClient:     runner.ArvClient,
1442                 UUID:          runner.Container.UUID,
1443                 loggingStream: name,
1444                 writeCloser:   runner.LogCollection.Open(name + ".txt")}
1445 }
1446
1447 // Run the full container lifecycle.
1448 func (runner *ContainerRunner) Run() (err error) {
1449         runner.CrunchLog.Printf("crunch-run %s started", version)
1450         runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
1451
1452         hostname, hosterr := os.Hostname()
1453         if hosterr != nil {
1454                 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1455         } else {
1456                 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1457         }
1458
1459         runner.finalState = "Queued"
1460
1461         defer func() {
1462                 runner.stopSignals()
1463                 runner.CleanupDirs()
1464
1465                 runner.CrunchLog.Printf("crunch-run finished")
1466                 runner.CrunchLog.Close()
1467         }()
1468
1469         defer func() {
1470                 // checkErr prints e (unless it's nil) and sets err to
1471                 // e (unless err is already non-nil). Thus, if err
1472                 // hasn't already been assigned when Run() returns,
1473                 // this cleanup func will cause Run() to return the
1474                 // first non-nil error that is passed to checkErr().
1475                 checkErr := func(e error) {
1476                         if e == nil {
1477                                 return
1478                         }
1479                         runner.CrunchLog.Print(e)
1480                         if err == nil {
1481                                 err = e
1482                         }
1483                         if runner.finalState == "Complete" {
1484                                 // There was an error in the finalization.
1485                                 runner.finalState = "Cancelled"
1486                         }
1487                 }
1488
1489                 // Log the error encountered in Run(), if any
1490                 checkErr(err)
1491
1492                 if runner.finalState == "Queued" {
1493                         runner.UpdateContainerFinal()
1494                         return
1495                 }
1496
1497                 if runner.IsCancelled() {
1498                         runner.finalState = "Cancelled"
1499                         // but don't return yet -- we still want to
1500                         // capture partial output and write logs
1501                 }
1502
1503                 checkErr(runner.CaptureOutput())
1504                 checkErr(runner.CommitLogs())
1505                 checkErr(runner.UpdateContainerFinal())
1506         }()
1507
1508         err = runner.fetchContainerRecord()
1509         if err != nil {
1510                 return
1511         }
1512
1513         // setup signal handling
1514         runner.setupSignals()
1515
1516         // check for and/or load image
1517         err = runner.LoadImage()
1518         if err != nil {
1519                 if !runner.checkBrokenNode(err) {
1520                         // Failed to load image but not due to a "broken node"
1521                         // condition, probably user error.
1522                         runner.finalState = "Cancelled"
1523                 }
1524                 err = fmt.Errorf("While loading container image: %v", err)
1525                 return
1526         }
1527
1528         // set up FUSE mount and binds
1529         err = runner.SetupMounts()
1530         if err != nil {
1531                 runner.finalState = "Cancelled"
1532                 err = fmt.Errorf("While setting up mounts: %v", err)
1533                 return
1534         }
1535
1536         err = runner.CreateContainer()
1537         if err != nil {
1538                 return
1539         }
1540
1541         // Gather and record node information
1542         err = runner.LogNodeInfo()
1543         if err != nil {
1544                 return
1545         }
1546         // Save container.json record on log collection
1547         err = runner.LogContainerRecord()
1548         if err != nil {
1549                 return
1550         }
1551
1552         if runner.IsCancelled() {
1553                 return
1554         }
1555
1556         err = runner.UpdateContainerRunning()
1557         if err != nil {
1558                 return
1559         }
1560         runner.finalState = "Cancelled"
1561
1562         runner.StartCrunchstat()
1563
1564         err = runner.StartContainer()
1565         if err != nil {
1566                 runner.checkBrokenNode(err)
1567                 return
1568         }
1569
1570         err = runner.WaitFinish()
1571         if err == nil {
1572                 runner.finalState = "Complete"
1573         }
1574         return
1575 }
1576
1577 // Fetch the current container record (uuid = runner.Container.UUID)
1578 // into runner.Container.
1579 func (runner *ContainerRunner) fetchContainerRecord() error {
1580         reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1581         if err != nil {
1582                 return fmt.Errorf("error fetching container record: %v", err)
1583         }
1584         defer reader.Close()
1585
1586         dec := json.NewDecoder(reader)
1587         dec.UseNumber()
1588         err = dec.Decode(&runner.Container)
1589         if err != nil {
1590                 return fmt.Errorf("error decoding container record: %v", err)
1591         }
1592         return nil
1593 }
1594
1595 // NewContainerRunner creates a new container runner.
1596 func NewContainerRunner(api IArvadosClient,
1597         kc IKeepClient,
1598         docker ThinDockerClient,
1599         containerUUID string) *ContainerRunner {
1600
1601         cr := &ContainerRunner{ArvClient: api, Kc: kc, Docker: docker}
1602         cr.NewLogWriter = cr.NewArvLogWriter
1603         cr.RunArvMount = cr.ArvMountCmd
1604         cr.MkTempDir = ioutil.TempDir
1605         cr.LogCollection = &CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
1606         cr.Container.UUID = containerUUID
1607         cr.CrunchLog = NewThrottledLogger(cr.NewLogWriter("crunch-run"))
1608         cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
1609
1610         loadLogThrottleParams(api)
1611
1612         return cr
1613 }
1614
1615 func main() {
1616         statInterval := flag.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1617         cgroupRoot := flag.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
1618         cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
1619         cgroupParentSubsystem := flag.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
1620         caCertsPath := flag.String("ca-certs", "", "Path to TLS root certificates")
1621         enableNetwork := flag.String("container-enable-networking", "default",
1622                 `Specify if networking should be enabled for container.  One of 'default', 'always':
1623         default: only enable networking if container requests it.
1624         always:  containers always have networking enabled
1625         `)
1626         networkMode := flag.String("container-network-mode", "default",
1627                 `Set networking mode for container.  Corresponds to Docker network mode (--net).
1628         `)
1629         memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
1630         getVersion := flag.Bool("version", false, "Print version information and exit.")
1631         flag.Parse()
1632
1633         // Print version information if requested
1634         if *getVersion {
1635                 fmt.Printf("crunch-run %s\n", version)
1636                 return
1637         }
1638
1639         log.Printf("crunch-run %s started", version)
1640
1641         containerId := flag.Arg(0)
1642
1643         if *caCertsPath != "" {
1644                 arvadosclient.CertFiles = []string{*caCertsPath}
1645         }
1646
1647         api, err := arvadosclient.MakeArvadosClient()
1648         if err != nil {
1649                 log.Fatalf("%s: %v", containerId, err)
1650         }
1651         api.Retries = 8
1652
1653         kc, kcerr := keepclient.MakeKeepClient(api)
1654         if kcerr != nil {
1655                 log.Fatalf("%s: %v", containerId, kcerr)
1656         }
1657         kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
1658         kc.Retries = 4
1659
1660         // API version 1.21 corresponds to Docker 1.9, which is currently the
1661         // minimum version we want to support.
1662         docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
1663         dockerClientProxy := ThinDockerClientProxy{Docker: docker}
1664
1665         cr := NewContainerRunner(api, kc, dockerClientProxy, containerId)
1666
1667         if dockererr != nil {
1668                 cr.CrunchLog.Printf("%s: %v", containerId, dockererr)
1669                 cr.checkBrokenNode(dockererr)
1670                 cr.CrunchLog.Close()
1671                 os.Exit(1)
1672         }
1673
1674         cr.statInterval = *statInterval
1675         cr.cgroupRoot = *cgroupRoot
1676         cr.expectCgroupParent = *cgroupParent
1677         cr.enableNetwork = *enableNetwork
1678         cr.networkMode = *networkMode
1679         if *cgroupParentSubsystem != "" {
1680                 p := findCgroup(*cgroupParentSubsystem)
1681                 cr.setCgroupParent = p
1682                 cr.expectCgroupParent = p
1683         }
1684
1685         runerr := cr.Run()
1686
1687         if *memprofile != "" {
1688                 f, err := os.Create(*memprofile)
1689                 if err != nil {
1690                         log.Printf("could not create memory profile: ", err)
1691                 }
1692                 runtime.GC() // get up-to-date statistics
1693                 if err := pprof.WriteHeapProfile(f); err != nil {
1694                         log.Printf("could not write memory profile: ", err)
1695                 }
1696                 closeerr := f.Close()
1697                 if closeerr != nil {
1698                         log.Printf("closing memprofile file: ", err)
1699                 }
1700         }
1701
1702         if runerr != nil {
1703                 log.Fatalf("%s: %v", containerId, runerr)
1704         }
1705 }