13134: Get secret_mounts from separate API endpoint. Update tests.
[arvados.git] / services / crunch-run / crunchrun.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package main
6
7 import (
8         "bytes"
9         "encoding/json"
10         "errors"
11         "flag"
12         "fmt"
13         "io"
14         "io/ioutil"
15         "log"
16         "os"
17         "os/exec"
18         "os/signal"
19         "path"
20         "path/filepath"
21         "regexp"
22         "runtime"
23         "runtime/pprof"
24         "sort"
25         "strings"
26         "sync"
27         "syscall"
28         "time"
29
30         "git.curoverse.com/arvados.git/lib/crunchstat"
31         "git.curoverse.com/arvados.git/sdk/go/arvados"
32         "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
33         "git.curoverse.com/arvados.git/sdk/go/keepclient"
34         "git.curoverse.com/arvados.git/sdk/go/manifest"
35         "golang.org/x/net/context"
36
37         dockertypes "github.com/docker/docker/api/types"
38         dockercontainer "github.com/docker/docker/api/types/container"
39         dockernetwork "github.com/docker/docker/api/types/network"
40         dockerclient "github.com/docker/docker/client"
41 )
42
43 var version = "dev"
44
45 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
46 type IArvadosClient interface {
47         Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
48         Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
49         Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
50         Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
51         CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
52         Discovery(key string) (interface{}, error)
53 }
54
55 // ErrCancelled is the error returned when the container is cancelled.
56 var ErrCancelled = errors.New("Cancelled")
57
58 // IKeepClient is the minimal Keep API methods used by crunch-run.
59 type IKeepClient interface {
60         PutHB(hash string, buf []byte) (string, int, error)
61         ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
62         ClearBlockCache()
63 }
64
65 // NewLogWriter is a factory function to create a new log writer.
66 type NewLogWriter func(name string) io.WriteCloser
67
68 type RunArvMount func(args []string, tok string) (*exec.Cmd, error)
69
70 type MkTempDir func(string, string) (string, error)
71
72 // ThinDockerClient is the minimal Docker client interface used by crunch-run.
73 type ThinDockerClient interface {
74         ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error)
75         ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
76                 networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error)
77         ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error
78         ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error
79         ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error)
80         ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error)
81         ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error)
82         ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
83 }
84
85 // ContainerRunner is the main stateful struct used for a single execution of a
86 // container.
87 type ContainerRunner struct {
88         Docker    ThinDockerClient
89         ArvClient IArvadosClient
90         Kc        IKeepClient
91         arvados.Container
92         ContainerConfig dockercontainer.Config
93         dockercontainer.HostConfig
94         token       string
95         ContainerID string
96         ExitCode    *int
97         NewLogWriter
98         loggingDone   chan bool
99         CrunchLog     *ThrottledLogger
100         Stdout        io.WriteCloser
101         Stderr        io.WriteCloser
102         LogCollection *CollectionWriter
103         LogsPDH       *string
104         RunArvMount
105         MkTempDir
106         ArvMount      *exec.Cmd
107         ArvMountPoint string
108         HostOutputDir string
109         Binds         []string
110         Volumes       map[string]struct{}
111         OutputPDH     *string
112         SigChan       chan os.Signal
113         ArvMountExit  chan error
114         SecretMounts  map[string]arvados.Mount
115         finalState    string
116         parentTemp    string
117
118         statLogger       io.WriteCloser
119         statReporter     *crunchstat.Reporter
120         hoststatLogger   io.WriteCloser
121         hoststatReporter *crunchstat.Reporter
122         statInterval     time.Duration
123         cgroupRoot       string
124         // What we expect the container's cgroup parent to be.
125         expectCgroupParent string
126         // What we tell docker to use as the container's cgroup
127         // parent. Note: Ideally we would use the same field for both
128         // expectCgroupParent and setCgroupParent, and just make it
129         // default to "docker". However, when using docker < 1.10 with
130         // systemd, specifying a non-empty cgroup parent (even the
131         // default value "docker") hits a docker bug
132         // (https://github.com/docker/docker/issues/17126). Using two
133         // separate fields makes it possible to use the "expect cgroup
134         // parent to be X" feature even on sites where the "specify
135         // cgroup parent" feature breaks.
136         setCgroupParent string
137
138         cStateLock sync.Mutex
139         cCancelled bool // StopContainer() invoked
140
141         enableNetwork string // one of "default" or "always"
142         networkMode   string // passed through to HostConfig.NetworkMode
143         arvMountLog   *ThrottledLogger
144 }
145
146 // setupSignals sets up signal handling to gracefully terminate the underlying
147 // Docker container and update state when receiving a TERM, INT or QUIT signal.
148 func (runner *ContainerRunner) setupSignals() {
149         runner.SigChan = make(chan os.Signal, 1)
150         signal.Notify(runner.SigChan, syscall.SIGTERM)
151         signal.Notify(runner.SigChan, syscall.SIGINT)
152         signal.Notify(runner.SigChan, syscall.SIGQUIT)
153
154         go func(sig chan os.Signal) {
155                 for s := range sig {
156                         runner.stop(s)
157                 }
158         }(runner.SigChan)
159 }
160
161 // stop the underlying Docker container.
162 func (runner *ContainerRunner) stop(sig os.Signal) {
163         runner.cStateLock.Lock()
164         defer runner.cStateLock.Unlock()
165         if sig != nil {
166                 runner.CrunchLog.Printf("caught signal: %v", sig)
167         }
168         if runner.ContainerID == "" {
169                 return
170         }
171         runner.cCancelled = true
172         runner.CrunchLog.Printf("removing container")
173         err := runner.Docker.ContainerRemove(context.TODO(), runner.ContainerID, dockertypes.ContainerRemoveOptions{Force: true})
174         if err != nil {
175                 runner.CrunchLog.Printf("error removing container: %s", err)
176         }
177 }
178
179 var errorBlacklist = []string{
180         "(?ms).*[Cc]annot connect to the Docker daemon.*",
181         "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
182 }
183 var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
184
185 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
186         for _, d := range errorBlacklist {
187                 if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
188                         runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
189                         if *brokenNodeHook == "" {
190                                 runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.")
191                         } else {
192                                 runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
193                                 // run killme script
194                                 c := exec.Command(*brokenNodeHook)
195                                 c.Stdout = runner.CrunchLog
196                                 c.Stderr = runner.CrunchLog
197                                 err := c.Run()
198                                 if err != nil {
199                                         runner.CrunchLog.Printf("Error running broken node hook: %v", err)
200                                 }
201                         }
202                         return true
203                 }
204         }
205         return false
206 }
207
208 // LoadImage determines the docker image id from the container record and
209 // checks if it is available in the local Docker image store.  If not, it loads
210 // the image from Keep.
211 func (runner *ContainerRunner) LoadImage() (err error) {
212
213         runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
214
215         var collection arvados.Collection
216         err = runner.ArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection)
217         if err != nil {
218                 return fmt.Errorf("While getting container image collection: %v", err)
219         }
220         manifest := manifest.Manifest{Text: collection.ManifestText}
221         var img, imageID string
222         for ms := range manifest.StreamIter() {
223                 img = ms.FileStreamSegments[0].Name
224                 if !strings.HasSuffix(img, ".tar") {
225                         return fmt.Errorf("First file in the container image collection does not end in .tar")
226                 }
227                 imageID = img[:len(img)-4]
228         }
229
230         runner.CrunchLog.Printf("Using Docker image id '%s'", imageID)
231
232         _, _, err = runner.Docker.ImageInspectWithRaw(context.TODO(), imageID)
233         if err != nil {
234                 runner.CrunchLog.Print("Loading Docker image from keep")
235
236                 var readCloser io.ReadCloser
237                 readCloser, err = runner.Kc.ManifestFileReader(manifest, img)
238                 if err != nil {
239                         return fmt.Errorf("While creating ManifestFileReader for container image: %v", err)
240                 }
241
242                 response, err := runner.Docker.ImageLoad(context.TODO(), readCloser, true)
243                 if err != nil {
244                         return fmt.Errorf("While loading container image into Docker: %v", err)
245                 }
246
247                 defer response.Body.Close()
248                 rbody, err := ioutil.ReadAll(response.Body)
249                 if err != nil {
250                         return fmt.Errorf("Reading response to image load: %v", err)
251                 }
252                 runner.CrunchLog.Printf("Docker response: %s", rbody)
253         } else {
254                 runner.CrunchLog.Print("Docker image is available")
255         }
256
257         runner.ContainerConfig.Image = imageID
258
259         runner.Kc.ClearBlockCache()
260
261         return nil
262 }
263
264 func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (c *exec.Cmd, err error) {
265         c = exec.Command("arv-mount", arvMountCmd...)
266
267         // Copy our environment, but override ARVADOS_API_TOKEN with
268         // the container auth token.
269         c.Env = nil
270         for _, s := range os.Environ() {
271                 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
272                         c.Env = append(c.Env, s)
273                 }
274         }
275         c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
276
277         runner.arvMountLog = NewThrottledLogger(runner.NewLogWriter("arv-mount"))
278         c.Stdout = runner.arvMountLog
279         c.Stderr = runner.arvMountLog
280
281         runner.CrunchLog.Printf("Running %v", c.Args)
282
283         err = c.Start()
284         if err != nil {
285                 return nil, err
286         }
287
288         statReadme := make(chan bool)
289         runner.ArvMountExit = make(chan error)
290
291         keepStatting := true
292         go func() {
293                 for keepStatting {
294                         time.Sleep(100 * time.Millisecond)
295                         _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
296                         if err == nil {
297                                 keepStatting = false
298                                 statReadme <- true
299                         }
300                 }
301                 close(statReadme)
302         }()
303
304         go func() {
305                 mnterr := c.Wait()
306                 if mnterr != nil {
307                         runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
308                 }
309                 runner.ArvMountExit <- mnterr
310                 close(runner.ArvMountExit)
311         }()
312
313         select {
314         case <-statReadme:
315                 break
316         case err := <-runner.ArvMountExit:
317                 runner.ArvMount = nil
318                 keepStatting = false
319                 return nil, err
320         }
321
322         return c, nil
323 }
324
325 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
326         if runner.ArvMountPoint == "" {
327                 runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)
328         }
329         return
330 }
331
332 func copyfile(src string, dst string) (err error) {
333         srcfile, err := os.Open(src)
334         if err != nil {
335                 return
336         }
337
338         os.MkdirAll(path.Dir(dst), 0777)
339
340         dstfile, err := os.Create(dst)
341         if err != nil {
342                 return
343         }
344         _, err = io.Copy(dstfile, srcfile)
345         if err != nil {
346                 return
347         }
348
349         err = srcfile.Close()
350         err2 := dstfile.Close()
351
352         if err != nil {
353                 return
354         }
355
356         if err2 != nil {
357                 return err2
358         }
359
360         return nil
361 }
362
363 func (runner *ContainerRunner) SetupMounts() (err error) {
364         err = runner.SetupArvMountPoint("keep")
365         if err != nil {
366                 return fmt.Errorf("While creating keep mount temp dir: %v", err)
367         }
368
369         token, err := runner.ContainerToken()
370         if err != nil {
371                 return fmt.Errorf("could not get container token: %s", err)
372         }
373
374         pdhOnly := true
375         tmpcount := 0
376         arvMountCmd := []string{
377                 "--foreground",
378                 "--allow-other",
379                 "--read-write",
380                 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
381
382         if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
383                 arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
384         }
385
386         collectionPaths := []string{}
387         runner.Binds = nil
388         runner.Volumes = make(map[string]struct{})
389         needCertMount := true
390         type copyFile struct {
391                 src  string
392                 bind string
393         }
394         var copyFiles []copyFile
395
396         var binds []string
397         for bind := range runner.Container.Mounts {
398                 binds = append(binds, bind)
399         }
400         for bind := range runner.SecretMounts {
401                 if runner.SecretMounts[bind].Kind != "json" &&
402                         runner.SecretMounts[bind].Kind != "text" {
403                         return fmt.Errorf("Secret mount %q type is %q but only 'json' and 'text' are permitted.",
404                                 bind, runner.SecretMounts[bind].Kind)
405                 }
406                 binds = append(binds, bind)
407         }
408         sort.Strings(binds)
409
410         for _, bind := range binds {
411                 mnt, ok := runner.Container.Mounts[bind]
412                 if !ok {
413                         mnt = runner.SecretMounts[bind]
414                 }
415                 if bind == "stdout" || bind == "stderr" {
416                         // Is it a "file" mount kind?
417                         if mnt.Kind != "file" {
418                                 return fmt.Errorf("Unsupported mount kind '%s' for %s. Only 'file' is supported.", mnt.Kind, bind)
419                         }
420
421                         // Does path start with OutputPath?
422                         prefix := runner.Container.OutputPath
423                         if !strings.HasSuffix(prefix, "/") {
424                                 prefix += "/"
425                         }
426                         if !strings.HasPrefix(mnt.Path, prefix) {
427                                 return fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
428                         }
429                 }
430
431                 if bind == "stdin" {
432                         // Is it a "collection" mount kind?
433                         if mnt.Kind != "collection" && mnt.Kind != "json" {
434                                 return fmt.Errorf("Unsupported mount kind '%s' for stdin. Only 'collection' or 'json' are supported.", mnt.Kind)
435                         }
436                 }
437
438                 if bind == "/etc/arvados/ca-certificates.crt" {
439                         needCertMount = false
440                 }
441
442                 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
443                         if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
444                                 return fmt.Errorf("Only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
445                         }
446                 }
447
448                 switch {
449                 case mnt.Kind == "collection" && bind != "stdin":
450                         var src string
451                         if mnt.UUID != "" && mnt.PortableDataHash != "" {
452                                 return fmt.Errorf("Cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
453                         }
454                         if mnt.UUID != "" {
455                                 if mnt.Writable {
456                                         return fmt.Errorf("Writing to existing collections currently not permitted.")
457                                 }
458                                 pdhOnly = false
459                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
460                         } else if mnt.PortableDataHash != "" {
461                                 if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
462                                         return fmt.Errorf("Can never write to a collection specified by portable data hash")
463                                 }
464                                 idx := strings.Index(mnt.PortableDataHash, "/")
465                                 if idx > 0 {
466                                         mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
467                                         mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
468                                         runner.Container.Mounts[bind] = mnt
469                                 }
470                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
471                                 if mnt.Path != "" && mnt.Path != "." {
472                                         if strings.HasPrefix(mnt.Path, "./") {
473                                                 mnt.Path = mnt.Path[2:]
474                                         } else if strings.HasPrefix(mnt.Path, "/") {
475                                                 mnt.Path = mnt.Path[1:]
476                                         }
477                                         src += "/" + mnt.Path
478                                 }
479                         } else {
480                                 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
481                                 arvMountCmd = append(arvMountCmd, "--mount-tmp")
482                                 arvMountCmd = append(arvMountCmd, fmt.Sprintf("tmp%d", tmpcount))
483                                 tmpcount += 1
484                         }
485                         if mnt.Writable {
486                                 if bind == runner.Container.OutputPath {
487                                         runner.HostOutputDir = src
488                                         runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
489                                 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
490                                         copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
491                                 } else {
492                                         runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
493                                 }
494                         } else {
495                                 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind))
496                         }
497                         collectionPaths = append(collectionPaths, src)
498
499                 case mnt.Kind == "tmp":
500                         var tmpdir string
501                         tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
502                         if err != nil {
503                                 return fmt.Errorf("While creating mount temp dir: %v", err)
504                         }
505                         st, staterr := os.Stat(tmpdir)
506                         if staterr != nil {
507                                 return fmt.Errorf("While Stat on temp dir: %v", staterr)
508                         }
509                         err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
510                         if staterr != nil {
511                                 return fmt.Errorf("While Chmod temp dir: %v", err)
512                         }
513                         runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", tmpdir, bind))
514                         if bind == runner.Container.OutputPath {
515                                 runner.HostOutputDir = tmpdir
516                         }
517
518                 case mnt.Kind == "json" || mnt.Kind == "text":
519                         var filedata []byte
520                         if mnt.Kind == "json" {
521                                 filedata, err = json.Marshal(mnt.Content)
522                                 if err != nil {
523                                         return fmt.Errorf("encoding json data: %v", err)
524                                 }
525                         } else {
526                                 text, ok := mnt.Content.(string)
527                                 if !ok {
528                                         return fmt.Errorf("content for mount %q must be a string", bind)
529                                 }
530                                 filedata = []byte(text)
531                         }
532
533                         tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
534                         if err != nil {
535                                 return fmt.Errorf("creating temp dir: %v", err)
536                         }
537                         tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
538                         err = ioutil.WriteFile(tmpfn, filedata, 0444)
539                         if err != nil {
540                                 return fmt.Errorf("writing temp file: %v", err)
541                         }
542                         if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
543                                 copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
544                         } else {
545                                 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
546                         }
547
548                 case mnt.Kind == "git_tree":
549                         tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
550                         if err != nil {
551                                 return fmt.Errorf("creating temp dir: %v", err)
552                         }
553                         err = gitMount(mnt).extractTree(runner.ArvClient, tmpdir, token)
554                         if err != nil {
555                                 return err
556                         }
557                         runner.Binds = append(runner.Binds, tmpdir+":"+bind+":ro")
558                 }
559         }
560
561         if runner.HostOutputDir == "" {
562                 return fmt.Errorf("Output path does not correspond to a writable mount point")
563         }
564
565         if wantAPI := runner.Container.RuntimeConstraints.API; needCertMount && wantAPI != nil && *wantAPI {
566                 for _, certfile := range arvadosclient.CertFiles {
567                         _, err := os.Stat(certfile)
568                         if err == nil {
569                                 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:/etc/arvados/ca-certificates.crt:ro", certfile))
570                                 break
571                         }
572                 }
573         }
574
575         if pdhOnly {
576                 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
577         } else {
578                 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
579         }
580         arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
581
582         runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
583         if err != nil {
584                 return fmt.Errorf("While trying to start arv-mount: %v", err)
585         }
586
587         for _, p := range collectionPaths {
588                 _, err = os.Stat(p)
589                 if err != nil {
590                         return fmt.Errorf("While checking that input files exist: %v", err)
591                 }
592         }
593
594         for _, cp := range copyFiles {
595                 st, err := os.Stat(cp.src)
596                 if err != nil {
597                         return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
598                 }
599                 if st.IsDir() {
600                         err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
601                                 if walkerr != nil {
602                                         return walkerr
603                                 }
604                                 target := path.Join(cp.bind, walkpath[len(cp.src):])
605                                 if walkinfo.Mode().IsRegular() {
606                                         copyerr := copyfile(walkpath, target)
607                                         if copyerr != nil {
608                                                 return copyerr
609                                         }
610                                         return os.Chmod(target, walkinfo.Mode()|0777)
611                                 } else if walkinfo.Mode().IsDir() {
612                                         mkerr := os.MkdirAll(target, 0777)
613                                         if mkerr != nil {
614                                                 return mkerr
615                                         }
616                                         return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
617                                 } else {
618                                         return fmt.Errorf("Source %q is not a regular file or directory", cp.src)
619                                 }
620                         })
621                 } else if st.Mode().IsRegular() {
622                         err = copyfile(cp.src, cp.bind)
623                         if err == nil {
624                                 err = os.Chmod(cp.bind, st.Mode()|0777)
625                         }
626                 }
627                 if err != nil {
628                         return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
629                 }
630         }
631
632         return nil
633 }
634
635 func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
636         // Handle docker log protocol
637         // https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container
638         defer close(runner.loggingDone)
639
640         header := make([]byte, 8)
641         var err error
642         for err == nil {
643                 _, err = io.ReadAtLeast(containerReader, header, 8)
644                 if err != nil {
645                         if err == io.EOF {
646                                 err = nil
647                         }
648                         break
649                 }
650                 readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
651                 if header[0] == 1 {
652                         // stdout
653                         _, err = io.CopyN(runner.Stdout, containerReader, readsize)
654                 } else {
655                         // stderr
656                         _, err = io.CopyN(runner.Stderr, containerReader, readsize)
657                 }
658         }
659
660         if err != nil {
661                 runner.CrunchLog.Printf("error reading docker logs: %v", err)
662         }
663
664         err = runner.Stdout.Close()
665         if err != nil {
666                 runner.CrunchLog.Printf("error closing stdout logs: %v", err)
667         }
668
669         err = runner.Stderr.Close()
670         if err != nil {
671                 runner.CrunchLog.Printf("error closing stderr logs: %v", err)
672         }
673
674         if runner.statReporter != nil {
675                 runner.statReporter.Stop()
676                 err = runner.statLogger.Close()
677                 if err != nil {
678                         runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
679                 }
680         }
681 }
682
683 func (runner *ContainerRunner) stopHoststat() error {
684         if runner.hoststatReporter == nil {
685                 return nil
686         }
687         runner.hoststatReporter.Stop()
688         err := runner.hoststatLogger.Close()
689         if err != nil {
690                 return fmt.Errorf("error closing hoststat logs: %v", err)
691         }
692         return nil
693 }
694
695 func (runner *ContainerRunner) startHoststat() {
696         runner.hoststatLogger = NewThrottledLogger(runner.NewLogWriter("hoststat"))
697         runner.hoststatReporter = &crunchstat.Reporter{
698                 Logger:     log.New(runner.hoststatLogger, "", 0),
699                 CgroupRoot: runner.cgroupRoot,
700                 PollPeriod: runner.statInterval,
701         }
702         runner.hoststatReporter.Start()
703 }
704
705 func (runner *ContainerRunner) startCrunchstat() {
706         runner.statLogger = NewThrottledLogger(runner.NewLogWriter("crunchstat"))
707         runner.statReporter = &crunchstat.Reporter{
708                 CID:          runner.ContainerID,
709                 Logger:       log.New(runner.statLogger, "", 0),
710                 CgroupParent: runner.expectCgroupParent,
711                 CgroupRoot:   runner.cgroupRoot,
712                 PollPeriod:   runner.statInterval,
713         }
714         runner.statReporter.Start()
715 }
716
717 type infoCommand struct {
718         label string
719         cmd   []string
720 }
721
722 // LogHostInfo logs info about the current host, for debugging and
723 // accounting purposes. Although it's logged as "node-info", this is
724 // about the environment where crunch-run is actually running, which
725 // might differ from what's described in the node record (see
726 // LogNodeRecord).
727 func (runner *ContainerRunner) LogHostInfo() (err error) {
728         w := runner.NewLogWriter("node-info")
729
730         commands := []infoCommand{
731                 {
732                         label: "Host Information",
733                         cmd:   []string{"uname", "-a"},
734                 },
735                 {
736                         label: "CPU Information",
737                         cmd:   []string{"cat", "/proc/cpuinfo"},
738                 },
739                 {
740                         label: "Memory Information",
741                         cmd:   []string{"cat", "/proc/meminfo"},
742                 },
743                 {
744                         label: "Disk Space",
745                         cmd:   []string{"df", "-m", "/", os.TempDir()},
746                 },
747                 {
748                         label: "Disk INodes",
749                         cmd:   []string{"df", "-i", "/", os.TempDir()},
750                 },
751         }
752
753         // Run commands with informational output to be logged.
754         for _, command := range commands {
755                 fmt.Fprintln(w, command.label)
756                 cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
757                 cmd.Stdout = w
758                 cmd.Stderr = w
759                 if err := cmd.Run(); err != nil {
760                         err = fmt.Errorf("While running command %q: %v", command.cmd, err)
761                         fmt.Fprintln(w, err)
762                         return err
763                 }
764                 fmt.Fprintln(w, "")
765         }
766
767         err = w.Close()
768         if err != nil {
769                 return fmt.Errorf("While closing node-info logs: %v", err)
770         }
771         return nil
772 }
773
774 // LogContainerRecord gets and saves the raw JSON container record from the API server
775 func (runner *ContainerRunner) LogContainerRecord() error {
776         logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
777         if !logged && err == nil {
778                 err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
779         }
780         return err
781 }
782
783 // LogNodeRecord logs arvados#node record corresponding to the current host.
784 func (runner *ContainerRunner) LogNodeRecord() error {
785         hostname := os.Getenv("SLURMD_NODENAME")
786         if hostname == "" {
787                 hostname, _ = os.Hostname()
788         }
789         _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
790                 // The "info" field has admin-only info when obtained
791                 // with a privileged token, and should not be logged.
792                 node, ok := resp.(map[string]interface{})
793                 if ok {
794                         delete(node, "info")
795                 }
796         })
797         return err
798 }
799
800 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
801         w := &ArvLogWriter{
802                 ArvClient:     runner.ArvClient,
803                 UUID:          runner.Container.UUID,
804                 loggingStream: label,
805                 writeCloser:   runner.LogCollection.Open(label + ".json"),
806         }
807
808         reader, err := runner.ArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
809         if err != nil {
810                 return false, fmt.Errorf("error getting %s record: %v", label, err)
811         }
812         defer reader.Close()
813
814         dec := json.NewDecoder(reader)
815         dec.UseNumber()
816         var resp map[string]interface{}
817         if err = dec.Decode(&resp); err != nil {
818                 return false, fmt.Errorf("error decoding %s list response: %v", label, err)
819         }
820         items, ok := resp["items"].([]interface{})
821         if !ok {
822                 return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
823         } else if len(items) < 1 {
824                 return false, nil
825         }
826         if munge != nil {
827                 munge(items[0])
828         }
829         // Re-encode it using indentation to improve readability
830         enc := json.NewEncoder(w)
831         enc.SetIndent("", "    ")
832         if err = enc.Encode(items[0]); err != nil {
833                 return false, fmt.Errorf("error logging %s record: %v", label, err)
834         }
835         err = w.Close()
836         if err != nil {
837                 return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
838         }
839         return true, nil
840 }
841
842 // AttachStreams connects the docker container stdin, stdout and stderr logs
843 // to the Arvados logger which logs to Keep and the API server logs table.
844 func (runner *ContainerRunner) AttachStreams() (err error) {
845
846         runner.CrunchLog.Print("Attaching container streams")
847
848         // If stdin mount is provided, attach it to the docker container
849         var stdinRdr arvados.File
850         var stdinJson []byte
851         if stdinMnt, ok := runner.Container.Mounts["stdin"]; ok {
852                 if stdinMnt.Kind == "collection" {
853                         var stdinColl arvados.Collection
854                         collId := stdinMnt.UUID
855                         if collId == "" {
856                                 collId = stdinMnt.PortableDataHash
857                         }
858                         err = runner.ArvClient.Get("collections", collId, nil, &stdinColl)
859                         if err != nil {
860                                 return fmt.Errorf("While getting stding collection: %v", err)
861                         }
862
863                         stdinRdr, err = runner.Kc.ManifestFileReader(manifest.Manifest{Text: stdinColl.ManifestText}, stdinMnt.Path)
864                         if os.IsNotExist(err) {
865                                 return fmt.Errorf("stdin collection path not found: %v", stdinMnt.Path)
866                         } else if err != nil {
867                                 return fmt.Errorf("While getting stdin collection path %v: %v", stdinMnt.Path, err)
868                         }
869                 } else if stdinMnt.Kind == "json" {
870                         stdinJson, err = json.Marshal(stdinMnt.Content)
871                         if err != nil {
872                                 return fmt.Errorf("While encoding stdin json data: %v", err)
873                         }
874                 }
875         }
876
877         stdinUsed := stdinRdr != nil || len(stdinJson) != 0
878         response, err := runner.Docker.ContainerAttach(context.TODO(), runner.ContainerID,
879                 dockertypes.ContainerAttachOptions{Stream: true, Stdin: stdinUsed, Stdout: true, Stderr: true})
880         if err != nil {
881                 return fmt.Errorf("While attaching container stdout/stderr streams: %v", err)
882         }
883
884         runner.loggingDone = make(chan bool)
885
886         if stdoutMnt, ok := runner.Container.Mounts["stdout"]; ok {
887                 stdoutFile, err := runner.getStdoutFile(stdoutMnt.Path)
888                 if err != nil {
889                         return err
890                 }
891                 runner.Stdout = stdoutFile
892         } else {
893                 runner.Stdout = NewThrottledLogger(runner.NewLogWriter("stdout"))
894         }
895
896         if stderrMnt, ok := runner.Container.Mounts["stderr"]; ok {
897                 stderrFile, err := runner.getStdoutFile(stderrMnt.Path)
898                 if err != nil {
899                         return err
900                 }
901                 runner.Stderr = stderrFile
902         } else {
903                 runner.Stderr = NewThrottledLogger(runner.NewLogWriter("stderr"))
904         }
905
906         if stdinRdr != nil {
907                 go func() {
908                         _, err := io.Copy(response.Conn, stdinRdr)
909                         if err != nil {
910                                 runner.CrunchLog.Print("While writing stdin collection to docker container %q", err)
911                                 runner.stop(nil)
912                         }
913                         stdinRdr.Close()
914                         response.CloseWrite()
915                 }()
916         } else if len(stdinJson) != 0 {
917                 go func() {
918                         _, err := io.Copy(response.Conn, bytes.NewReader(stdinJson))
919                         if err != nil {
920                                 runner.CrunchLog.Print("While writing stdin json to docker container %q", err)
921                                 runner.stop(nil)
922                         }
923                         response.CloseWrite()
924                 }()
925         }
926
927         go runner.ProcessDockerAttach(response.Reader)
928
929         return nil
930 }
931
932 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
933         stdoutPath := mntPath[len(runner.Container.OutputPath):]
934         index := strings.LastIndex(stdoutPath, "/")
935         if index > 0 {
936                 subdirs := stdoutPath[:index]
937                 if subdirs != "" {
938                         st, err := os.Stat(runner.HostOutputDir)
939                         if err != nil {
940                                 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
941                         }
942                         stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
943                         err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
944                         if err != nil {
945                                 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
946                         }
947                 }
948         }
949         stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
950         if err != nil {
951                 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
952         }
953
954         return stdoutFile, nil
955 }
956
957 // CreateContainer creates the docker container.
958 func (runner *ContainerRunner) CreateContainer() error {
959         runner.CrunchLog.Print("Creating Docker container")
960
961         runner.ContainerConfig.Cmd = runner.Container.Command
962         if runner.Container.Cwd != "." {
963                 runner.ContainerConfig.WorkingDir = runner.Container.Cwd
964         }
965
966         for k, v := range runner.Container.Environment {
967                 runner.ContainerConfig.Env = append(runner.ContainerConfig.Env, k+"="+v)
968         }
969
970         runner.ContainerConfig.Volumes = runner.Volumes
971
972         maxRAM := int64(runner.Container.RuntimeConstraints.RAM)
973         runner.HostConfig = dockercontainer.HostConfig{
974                 Binds: runner.Binds,
975                 LogConfig: dockercontainer.LogConfig{
976                         Type: "none",
977                 },
978                 Resources: dockercontainer.Resources{
979                         CgroupParent: runner.setCgroupParent,
980                         NanoCPUs:     int64(runner.Container.RuntimeConstraints.VCPUs) * 1000000000,
981                         Memory:       maxRAM, // RAM
982                         MemorySwap:   maxRAM, // RAM+swap
983                         KernelMemory: maxRAM, // kernel portion
984                 },
985         }
986
987         if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
988                 tok, err := runner.ContainerToken()
989                 if err != nil {
990                         return err
991                 }
992                 runner.ContainerConfig.Env = append(runner.ContainerConfig.Env,
993                         "ARVADOS_API_TOKEN="+tok,
994                         "ARVADOS_API_HOST="+os.Getenv("ARVADOS_API_HOST"),
995                         "ARVADOS_API_HOST_INSECURE="+os.Getenv("ARVADOS_API_HOST_INSECURE"),
996                 )
997                 runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
998         } else {
999                 if runner.enableNetwork == "always" {
1000                         runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
1001                 } else {
1002                         runner.HostConfig.NetworkMode = dockercontainer.NetworkMode("none")
1003                 }
1004         }
1005
1006         _, stdinUsed := runner.Container.Mounts["stdin"]
1007         runner.ContainerConfig.OpenStdin = stdinUsed
1008         runner.ContainerConfig.StdinOnce = stdinUsed
1009         runner.ContainerConfig.AttachStdin = stdinUsed
1010         runner.ContainerConfig.AttachStdout = true
1011         runner.ContainerConfig.AttachStderr = true
1012
1013         createdBody, err := runner.Docker.ContainerCreate(context.TODO(), &runner.ContainerConfig, &runner.HostConfig, nil, runner.Container.UUID)
1014         if err != nil {
1015                 return fmt.Errorf("While creating container: %v", err)
1016         }
1017
1018         runner.ContainerID = createdBody.ID
1019
1020         return runner.AttachStreams()
1021 }
1022
1023 // StartContainer starts the docker container created by CreateContainer.
1024 func (runner *ContainerRunner) StartContainer() error {
1025         runner.CrunchLog.Printf("Starting Docker container id '%s'", runner.ContainerID)
1026         runner.cStateLock.Lock()
1027         defer runner.cStateLock.Unlock()
1028         if runner.cCancelled {
1029                 return ErrCancelled
1030         }
1031         err := runner.Docker.ContainerStart(context.TODO(), runner.ContainerID,
1032                 dockertypes.ContainerStartOptions{})
1033         if err != nil {
1034                 var advice string
1035                 if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
1036                         advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
1037                 }
1038                 return fmt.Errorf("could not start container: %v%s", err, advice)
1039         }
1040         return nil
1041 }
1042
1043 // WaitFinish waits for the container to terminate, capture the exit code, and
1044 // close the stdout/stderr logging.
1045 func (runner *ContainerRunner) WaitFinish() error {
1046         runner.CrunchLog.Print("Waiting for container to finish")
1047
1048         waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, dockercontainer.WaitConditionNotRunning)
1049         arvMountExit := runner.ArvMountExit
1050         for {
1051                 select {
1052                 case waitBody := <-waitOk:
1053                         runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
1054                         code := int(waitBody.StatusCode)
1055                         runner.ExitCode = &code
1056
1057                         // wait for stdout/stderr to complete
1058                         <-runner.loggingDone
1059                         return nil
1060
1061                 case err := <-waitErr:
1062                         return fmt.Errorf("container wait: %v", err)
1063
1064                 case <-arvMountExit:
1065                         runner.CrunchLog.Printf("arv-mount exited while container is still running.  Stopping container.")
1066                         runner.stop(nil)
1067                         // arvMountExit will always be ready now that
1068                         // it's closed, but that doesn't interest us.
1069                         arvMountExit = nil
1070                 }
1071         }
1072 }
1073
1074 var ErrNotInOutputDir = fmt.Errorf("Must point to path within the output directory")
1075
1076 func (runner *ContainerRunner) derefOutputSymlink(path string, startinfo os.FileInfo) (tgt string, readlinktgt string, info os.FileInfo, err error) {
1077         // Follow symlinks if necessary
1078         info = startinfo
1079         tgt = path
1080         readlinktgt = ""
1081         nextlink := path
1082         for followed := 0; info.Mode()&os.ModeSymlink != 0; followed++ {
1083                 if followed >= limitFollowSymlinks {
1084                         // Got stuck in a loop or just a pathological number of links, give up.
1085                         err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
1086                         return
1087                 }
1088
1089                 readlinktgt, err = os.Readlink(nextlink)
1090                 if err != nil {
1091                         return
1092                 }
1093
1094                 tgt = readlinktgt
1095                 if !strings.HasPrefix(tgt, "/") {
1096                         // Relative symlink, resolve it to host path
1097                         tgt = filepath.Join(filepath.Dir(path), tgt)
1098                 }
1099                 if strings.HasPrefix(tgt, runner.Container.OutputPath+"/") && !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
1100                         // Absolute symlink to container output path, adjust it to host output path.
1101                         tgt = filepath.Join(runner.HostOutputDir, tgt[len(runner.Container.OutputPath):])
1102                 }
1103                 if !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
1104                         // After dereferencing, symlink target must either be
1105                         // within output directory, or must point to a
1106                         // collection mount.
1107                         err = ErrNotInOutputDir
1108                         return
1109                 }
1110
1111                 info, err = os.Lstat(tgt)
1112                 if err != nil {
1113                         // tgt
1114                         err = fmt.Errorf("Symlink in output %q points to invalid location %q: %v",
1115                                 path[len(runner.HostOutputDir):], readlinktgt, err)
1116                         return
1117                 }
1118
1119                 nextlink = tgt
1120         }
1121
1122         return
1123 }
1124
1125 var limitFollowSymlinks = 10
1126
1127 // UploadFile uploads files within the output directory, with special handling
1128 // for symlinks. If the symlink leads to a keep mount, copy the manifest text
1129 // from the keep mount into the output manifestText.  Ensure that whether
1130 // symlinks are relative or absolute, every symlink target (even targets that
1131 // are symlinks themselves) must point to a path in either the output directory
1132 // or a collection mount.
1133 //
1134 // Assumes initial value of "path" is absolute, and located within runner.HostOutputDir.
1135 func (runner *ContainerRunner) UploadOutputFile(
1136         path string,
1137         info os.FileInfo,
1138         infoerr error,
1139         binds []string,
1140         walkUpload *WalkUpload,
1141         relocateFrom string,
1142         relocateTo string,
1143         followed int) (manifestText string, err error) {
1144
1145         if infoerr != nil {
1146                 return "", infoerr
1147         }
1148
1149         if info.Mode().IsDir() {
1150                 // if empty, need to create a .keep file
1151                 dir, direrr := os.Open(path)
1152                 if direrr != nil {
1153                         return "", direrr
1154                 }
1155                 defer dir.Close()
1156                 names, eof := dir.Readdirnames(1)
1157                 if len(names) == 0 && eof == io.EOF && path != runner.HostOutputDir {
1158                         containerPath := runner.OutputPath + path[len(runner.HostOutputDir):]
1159                         for _, bind := range binds {
1160                                 mnt := runner.Container.Mounts[bind]
1161                                 // Check if there is a bind for this
1162                                 // directory, in which case assume we don't need .keep
1163                                 if (containerPath == bind || strings.HasPrefix(containerPath, bind+"/")) && mnt.PortableDataHash != "d41d8cd98f00b204e9800998ecf8427e+0" {
1164                                         return
1165                                 }
1166                         }
1167                         outputSuffix := path[len(runner.HostOutputDir)+1:]
1168                         return fmt.Sprintf("./%v d41d8cd98f00b204e9800998ecf8427e+0 0:0:.keep\n", outputSuffix), nil
1169                 }
1170                 return
1171         }
1172
1173         if followed >= limitFollowSymlinks {
1174                 // Got stuck in a loop or just a pathological number of
1175                 // directory links, give up.
1176                 err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
1177                 return
1178         }
1179
1180         // "path" is the actual path we are visiting
1181         // "tgt" is the target of "path" (a non-symlink) after following symlinks
1182         // "relocated" is the path in the output manifest where the file should be placed,
1183         // but has HostOutputDir as a prefix.
1184
1185         // The destination path in the output manifest may need to be
1186         // logically relocated to some other path in order to appear
1187         // in the correct location as a result of following a symlink.
1188         // Remove the relocateFrom prefix and replace it with
1189         // relocateTo.
1190         relocated := relocateTo + path[len(relocateFrom):]
1191
1192         tgt, readlinktgt, info, derefErr := runner.derefOutputSymlink(path, info)
1193         if derefErr != nil && derefErr != ErrNotInOutputDir {
1194                 return "", derefErr
1195         }
1196
1197         // go through mounts and try reverse map to collection reference
1198         for _, bind := range binds {
1199                 mnt := runner.Container.Mounts[bind]
1200                 if (tgt == bind || strings.HasPrefix(tgt, bind+"/")) && !mnt.Writable {
1201                         // get path relative to bind
1202                         targetSuffix := tgt[len(bind):]
1203
1204                         // Copy mount and adjust the path to add path relative to the bind
1205                         adjustedMount := mnt
1206                         adjustedMount.Path = filepath.Join(adjustedMount.Path, targetSuffix)
1207
1208                         // Terminates in this keep mount, so add the
1209                         // manifest text at appropriate location.
1210                         outputSuffix := relocated[len(runner.HostOutputDir):]
1211                         manifestText, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix)
1212                         return
1213                 }
1214         }
1215
1216         // If target is not a collection mount, it must be located within the
1217         // output directory, otherwise it is an error.
1218         if derefErr == ErrNotInOutputDir {
1219                 err = fmt.Errorf("Symlink in output %q points to invalid location %q, must point to path within the output directory.",
1220                         path[len(runner.HostOutputDir):], readlinktgt)
1221                 return
1222         }
1223
1224         if info.Mode().IsRegular() {
1225                 return "", walkUpload.UploadFile(relocated, tgt)
1226         }
1227
1228         if info.Mode().IsDir() {
1229                 // Symlink leads to directory.  Walk() doesn't follow
1230                 // directory symlinks, so we walk the target directory
1231                 // instead.  Within the walk, file paths are relocated
1232                 // so they appear under the original symlink path.
1233                 err = filepath.Walk(tgt, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
1234                         var m string
1235                         m, walkerr = runner.UploadOutputFile(walkpath, walkinfo, walkerr,
1236                                 binds, walkUpload, tgt, relocated, followed+1)
1237                         if walkerr == nil {
1238                                 manifestText = manifestText + m
1239                         }
1240                         return walkerr
1241                 })
1242                 return
1243         }
1244
1245         return
1246 }
1247
1248 // HandleOutput sets the output, unmounts the FUSE mount, and deletes temporary directories
1249 func (runner *ContainerRunner) CaptureOutput() error {
1250         if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
1251                 // Output may have been set directly by the container, so
1252                 // refresh the container record to check.
1253                 err := runner.ArvClient.Get("containers", runner.Container.UUID,
1254                         nil, &runner.Container)
1255                 if err != nil {
1256                         return err
1257                 }
1258                 if runner.Container.Output != "" {
1259                         // Container output is already set.
1260                         runner.OutputPDH = &runner.Container.Output
1261                         return nil
1262                 }
1263         }
1264
1265         if runner.HostOutputDir == "" {
1266                 return nil
1267         }
1268
1269         _, err := os.Stat(runner.HostOutputDir)
1270         if err != nil {
1271                 return fmt.Errorf("While checking host output path: %v", err)
1272         }
1273
1274         // Pre-populate output from the configured mount points
1275         var binds []string
1276         for bind, mnt := range runner.Container.Mounts {
1277                 if mnt.Kind == "collection" {
1278                         binds = append(binds, bind)
1279                 }
1280         }
1281         sort.Strings(binds)
1282
1283         // Delete secret mounts so they don't get saved to the output collection.
1284         for bind := range runner.SecretMounts {
1285                 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
1286                         err = os.Remove(runner.HostOutputDir + bind[len(runner.Container.OutputPath):])
1287                         if err != nil {
1288                                 return fmt.Errorf("Unable to remove secret mount: %v", err)
1289                         }
1290                 }
1291         }
1292
1293         var manifestText string
1294
1295         collectionMetafile := fmt.Sprintf("%s/.arvados#collection", runner.HostOutputDir)
1296         _, err = os.Stat(collectionMetafile)
1297         if err != nil {
1298                 // Regular directory
1299
1300                 cw := CollectionWriter{0, runner.Kc, nil, nil, sync.Mutex{}}
1301                 walkUpload := cw.BeginUpload(runner.HostOutputDir, runner.CrunchLog.Logger)
1302
1303                 var m string
1304                 err = filepath.Walk(runner.HostOutputDir, func(path string, info os.FileInfo, err error) error {
1305                         m, err = runner.UploadOutputFile(path, info, err, binds, walkUpload, "", "", 0)
1306                         if err == nil {
1307                                 manifestText = manifestText + m
1308                         }
1309                         return err
1310                 })
1311
1312                 cw.EndUpload(walkUpload)
1313
1314                 if err != nil {
1315                         return fmt.Errorf("While uploading output files: %v", err)
1316                 }
1317
1318                 m, err = cw.ManifestText()
1319                 manifestText = manifestText + m
1320                 if err != nil {
1321                         return fmt.Errorf("While uploading output files: %v", err)
1322                 }
1323         } else {
1324                 // FUSE mount directory
1325                 file, openerr := os.Open(collectionMetafile)
1326                 if openerr != nil {
1327                         return fmt.Errorf("While opening FUSE metafile: %v", err)
1328                 }
1329                 defer file.Close()
1330
1331                 var rec arvados.Collection
1332                 err = json.NewDecoder(file).Decode(&rec)
1333                 if err != nil {
1334                         return fmt.Errorf("While reading FUSE metafile: %v", err)
1335                 }
1336                 manifestText = rec.ManifestText
1337         }
1338
1339         for _, bind := range binds {
1340                 mnt := runner.Container.Mounts[bind]
1341
1342                 bindSuffix := strings.TrimPrefix(bind, runner.Container.OutputPath)
1343
1344                 if bindSuffix == bind || len(bindSuffix) <= 0 {
1345                         // either does not start with OutputPath or is OutputPath itself
1346                         continue
1347                 }
1348
1349                 if mnt.ExcludeFromOutput == true || mnt.Writable {
1350                         continue
1351                 }
1352
1353                 // append to manifest_text
1354                 m, err := runner.getCollectionManifestForPath(mnt, bindSuffix)
1355                 if err != nil {
1356                         return err
1357                 }
1358
1359                 manifestText = manifestText + m
1360         }
1361
1362         // Save output
1363         var response arvados.Collection
1364         manifest := manifest.Manifest{Text: manifestText}
1365         manifestText = manifest.Extract(".", ".").Text
1366         err = runner.ArvClient.Create("collections",
1367                 arvadosclient.Dict{
1368                         "ensure_unique_name": true,
1369                         "collection": arvadosclient.Dict{
1370                                 "is_trashed":    true,
1371                                 "name":          "output for " + runner.Container.UUID,
1372                                 "manifest_text": manifestText}},
1373                 &response)
1374         if err != nil {
1375                 return fmt.Errorf("While creating output collection: %v", err)
1376         }
1377         runner.OutputPDH = &response.PortableDataHash
1378         return nil
1379 }
1380
1381 var outputCollections = make(map[string]arvados.Collection)
1382
1383 // Fetch the collection for the mnt.PortableDataHash
1384 // Return the manifest_text fragment corresponding to the specified mnt.Path
1385 //  after making any required updates.
1386 //  Ex:
1387 //    If mnt.Path is not specified,
1388 //      return the entire manifest_text after replacing any "." with bindSuffix
1389 //    If mnt.Path corresponds to one stream,
1390 //      return the manifest_text for that stream after replacing that stream name with bindSuffix
1391 //    Otherwise, check if a filename in any one stream is being sought. Return the manifest_text
1392 //      for that stream after replacing stream name with bindSuffix minus the last word
1393 //      and the file name with last word of the bindSuffix
1394 //  Allowed path examples:
1395 //    "path":"/"
1396 //    "path":"/subdir1"
1397 //    "path":"/subdir1/subdir2"
1398 //    "path":"/subdir/filename" etc
1399 func (runner *ContainerRunner) getCollectionManifestForPath(mnt arvados.Mount, bindSuffix string) (string, error) {
1400         collection := outputCollections[mnt.PortableDataHash]
1401         if collection.PortableDataHash == "" {
1402                 err := runner.ArvClient.Get("collections", mnt.PortableDataHash, nil, &collection)
1403                 if err != nil {
1404                         return "", fmt.Errorf("While getting collection for %v: %v", mnt.PortableDataHash, err)
1405                 }
1406                 outputCollections[mnt.PortableDataHash] = collection
1407         }
1408
1409         if collection.ManifestText == "" {
1410                 runner.CrunchLog.Printf("No manifest text for collection %v", collection.PortableDataHash)
1411                 return "", nil
1412         }
1413
1414         mft := manifest.Manifest{Text: collection.ManifestText}
1415         extracted := mft.Extract(mnt.Path, bindSuffix)
1416         if extracted.Err != nil {
1417                 return "", fmt.Errorf("Error parsing manifest for %v: %v", mnt.PortableDataHash, extracted.Err.Error())
1418         }
1419         return extracted.Text, nil
1420 }
1421
1422 func (runner *ContainerRunner) CleanupDirs() {
1423         if runner.ArvMount != nil {
1424                 var delay int64 = 8
1425                 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1426                 umount.Stdout = runner.CrunchLog
1427                 umount.Stderr = runner.CrunchLog
1428                 runner.CrunchLog.Printf("Running %v", umount.Args)
1429                 umnterr := umount.Start()
1430
1431                 if umnterr != nil {
1432                         runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1433                 } else {
1434                         // If arv-mount --unmount gets stuck for any reason, we
1435                         // don't want to wait for it forever.  Do Wait() in a goroutine
1436                         // so it doesn't block crunch-run.
1437                         umountExit := make(chan error)
1438                         go func() {
1439                                 mnterr := umount.Wait()
1440                                 if mnterr != nil {
1441                                         runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1442                                 }
1443                                 umountExit <- mnterr
1444                         }()
1445
1446                         for again := true; again; {
1447                                 again = false
1448                                 select {
1449                                 case <-umountExit:
1450                                         umount = nil
1451                                         again = true
1452                                 case <-runner.ArvMountExit:
1453                                         break
1454                                 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1455                                         runner.CrunchLog.Printf("Timed out waiting for unmount")
1456                                         if umount != nil {
1457                                                 umount.Process.Kill()
1458                                         }
1459                                         runner.ArvMount.Process.Kill()
1460                                 }
1461                         }
1462                 }
1463         }
1464
1465         if runner.ArvMountPoint != "" {
1466                 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1467                         runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1468                 }
1469         }
1470
1471         if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {
1472                 runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr)
1473         }
1474 }
1475
1476 // CommitLogs posts the collection containing the final container logs.
1477 func (runner *ContainerRunner) CommitLogs() error {
1478         func() {
1479                 // Hold cStateLock to prevent races on CrunchLog (e.g., stop()).
1480                 runner.cStateLock.Lock()
1481                 defer runner.cStateLock.Unlock()
1482
1483                 runner.CrunchLog.Print(runner.finalState)
1484
1485                 if runner.arvMountLog != nil {
1486                         runner.arvMountLog.Close()
1487                 }
1488                 runner.CrunchLog.Close()
1489
1490                 // Closing CrunchLog above allows them to be committed to Keep at this
1491                 // point, but re-open crunch log with ArvClient in case there are any
1492                 // other further errors (such as failing to write the log to Keep!)
1493                 // while shutting down
1494                 runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{ArvClient: runner.ArvClient,
1495                         UUID: runner.Container.UUID, loggingStream: "crunch-run", writeCloser: nil})
1496                 runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
1497         }()
1498
1499         if runner.LogsPDH != nil {
1500                 // If we have already assigned something to LogsPDH,
1501                 // we must be closing the re-opened log, which won't
1502                 // end up getting attached to the container record and
1503                 // therefore doesn't need to be saved as a collection
1504                 // -- it exists only to send logs to other channels.
1505                 return nil
1506         }
1507
1508         mt, err := runner.LogCollection.ManifestText()
1509         if err != nil {
1510                 return fmt.Errorf("While creating log manifest: %v", err)
1511         }
1512
1513         var response arvados.Collection
1514         err = runner.ArvClient.Create("collections",
1515                 arvadosclient.Dict{
1516                         "ensure_unique_name": true,
1517                         "collection": arvadosclient.Dict{
1518                                 "is_trashed":    true,
1519                                 "name":          "logs for " + runner.Container.UUID,
1520                                 "manifest_text": mt}},
1521                 &response)
1522         if err != nil {
1523                 return fmt.Errorf("While creating log collection: %v", err)
1524         }
1525         runner.LogsPDH = &response.PortableDataHash
1526         return nil
1527 }
1528
1529 // UpdateContainerRunning updates the container state to "Running"
1530 func (runner *ContainerRunner) UpdateContainerRunning() error {
1531         runner.cStateLock.Lock()
1532         defer runner.cStateLock.Unlock()
1533         if runner.cCancelled {
1534                 return ErrCancelled
1535         }
1536         return runner.ArvClient.Update("containers", runner.Container.UUID,
1537                 arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running"}}, nil)
1538 }
1539
1540 // ContainerToken returns the api_token the container (and any
1541 // arv-mount processes) are allowed to use.
1542 func (runner *ContainerRunner) ContainerToken() (string, error) {
1543         if runner.token != "" {
1544                 return runner.token, nil
1545         }
1546
1547         var auth arvados.APIClientAuthorization
1548         err := runner.ArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1549         if err != nil {
1550                 return "", err
1551         }
1552         runner.token = auth.APIToken
1553         return runner.token, nil
1554 }
1555
1556 // UpdateContainerComplete updates the container record state on API
1557 // server to "Complete" or "Cancelled"
1558 func (runner *ContainerRunner) UpdateContainerFinal() error {
1559         update := arvadosclient.Dict{}
1560         update["state"] = runner.finalState
1561         if runner.LogsPDH != nil {
1562                 update["log"] = *runner.LogsPDH
1563         }
1564         if runner.finalState == "Complete" {
1565                 if runner.ExitCode != nil {
1566                         update["exit_code"] = *runner.ExitCode
1567                 }
1568                 if runner.OutputPDH != nil {
1569                         update["output"] = *runner.OutputPDH
1570                 }
1571         }
1572         return runner.ArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
1573 }
1574
1575 // IsCancelled returns the value of Cancelled, with goroutine safety.
1576 func (runner *ContainerRunner) IsCancelled() bool {
1577         runner.cStateLock.Lock()
1578         defer runner.cStateLock.Unlock()
1579         return runner.cCancelled
1580 }
1581
1582 // NewArvLogWriter creates an ArvLogWriter
1583 func (runner *ContainerRunner) NewArvLogWriter(name string) io.WriteCloser {
1584         return &ArvLogWriter{
1585                 ArvClient:     runner.ArvClient,
1586                 UUID:          runner.Container.UUID,
1587                 loggingStream: name,
1588                 writeCloser:   runner.LogCollection.Open(name + ".txt")}
1589 }
1590
1591 // Run the full container lifecycle.
1592 func (runner *ContainerRunner) Run() (err error) {
1593         runner.CrunchLog.Printf("crunch-run %s started", version)
1594         runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
1595
1596         hostname, hosterr := os.Hostname()
1597         if hosterr != nil {
1598                 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1599         } else {
1600                 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1601         }
1602
1603         runner.finalState = "Queued"
1604
1605         defer func() {
1606                 runner.CleanupDirs()
1607
1608                 runner.CrunchLog.Printf("crunch-run finished")
1609                 runner.CrunchLog.Close()
1610         }()
1611
1612         defer func() {
1613                 // checkErr prints e (unless it's nil) and sets err to
1614                 // e (unless err is already non-nil). Thus, if err
1615                 // hasn't already been assigned when Run() returns,
1616                 // this cleanup func will cause Run() to return the
1617                 // first non-nil error that is passed to checkErr().
1618                 checkErr := func(e error) {
1619                         if e == nil {
1620                                 return
1621                         }
1622                         runner.CrunchLog.Print(e)
1623                         if err == nil {
1624                                 err = e
1625                         }
1626                         if runner.finalState == "Complete" {
1627                                 // There was an error in the finalization.
1628                                 runner.finalState = "Cancelled"
1629                         }
1630                 }
1631
1632                 // Log the error encountered in Run(), if any
1633                 checkErr(err)
1634
1635                 if runner.finalState == "Queued" {
1636                         runner.UpdateContainerFinal()
1637                         return
1638                 }
1639
1640                 if runner.IsCancelled() {
1641                         runner.finalState = "Cancelled"
1642                         // but don't return yet -- we still want to
1643                         // capture partial output and write logs
1644                 }
1645
1646                 checkErr(runner.CaptureOutput())
1647                 checkErr(runner.stopHoststat())
1648                 checkErr(runner.CommitLogs())
1649                 checkErr(runner.UpdateContainerFinal())
1650         }()
1651
1652         err = runner.fetchContainerRecord()
1653         if err != nil {
1654                 return
1655         }
1656         runner.setupSignals()
1657         runner.startHoststat()
1658
1659         // check for and/or load image
1660         err = runner.LoadImage()
1661         if err != nil {
1662                 if !runner.checkBrokenNode(err) {
1663                         // Failed to load image but not due to a "broken node"
1664                         // condition, probably user error.
1665                         runner.finalState = "Cancelled"
1666                 }
1667                 err = fmt.Errorf("While loading container image: %v", err)
1668                 return
1669         }
1670
1671         // set up FUSE mount and binds
1672         err = runner.SetupMounts()
1673         if err != nil {
1674                 runner.finalState = "Cancelled"
1675                 err = fmt.Errorf("While setting up mounts: %v", err)
1676                 return
1677         }
1678
1679         err = runner.CreateContainer()
1680         if err != nil {
1681                 return
1682         }
1683         err = runner.LogHostInfo()
1684         if err != nil {
1685                 return
1686         }
1687         err = runner.LogNodeRecord()
1688         if err != nil {
1689                 return
1690         }
1691         err = runner.LogContainerRecord()
1692         if err != nil {
1693                 return
1694         }
1695
1696         if runner.IsCancelled() {
1697                 return
1698         }
1699
1700         err = runner.UpdateContainerRunning()
1701         if err != nil {
1702                 return
1703         }
1704         runner.finalState = "Cancelled"
1705
1706         runner.startCrunchstat()
1707
1708         err = runner.StartContainer()
1709         if err != nil {
1710                 runner.checkBrokenNode(err)
1711                 return
1712         }
1713
1714         err = runner.WaitFinish()
1715         if err == nil && !runner.IsCancelled() {
1716                 runner.finalState = "Complete"
1717         }
1718         return
1719 }
1720
1721 // Fetch the current container record (uuid = runner.Container.UUID)
1722 // into runner.Container.
1723 func (runner *ContainerRunner) fetchContainerRecord() error {
1724         reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1725         if err != nil {
1726                 return fmt.Errorf("error fetching container record: %v", err)
1727         }
1728         defer reader.Close()
1729
1730         dec := json.NewDecoder(reader)
1731         dec.UseNumber()
1732         err = dec.Decode(&runner.Container)
1733         if err != nil {
1734                 return fmt.Errorf("error decoding container record: %v", err)
1735         }
1736
1737         var sm struct {
1738                 SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
1739         }
1740
1741         err = runner.ArvClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
1742         if err != nil {
1743                 if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
1744                         return fmt.Errorf("error fetching secret_mounts: %v", err)
1745                 }
1746                 // ok && apierr.HttpStatusCode == 404, which means
1747                 // secret_mounts isn't supported by this API server.
1748         }
1749         runner.SecretMounts = sm.SecretMounts
1750
1751         return nil
1752 }
1753
1754 // NewContainerRunner creates a new container runner.
1755 func NewContainerRunner(api IArvadosClient,
1756         kc IKeepClient,
1757         docker ThinDockerClient,
1758         containerUUID string) *ContainerRunner {
1759
1760         cr := &ContainerRunner{ArvClient: api, Kc: kc, Docker: docker}
1761         cr.NewLogWriter = cr.NewArvLogWriter
1762         cr.RunArvMount = cr.ArvMountCmd
1763         cr.MkTempDir = ioutil.TempDir
1764         cr.LogCollection = &CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
1765         cr.Container.UUID = containerUUID
1766         cr.CrunchLog = NewThrottledLogger(cr.NewLogWriter("crunch-run"))
1767         cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
1768
1769         loadLogThrottleParams(api)
1770
1771         return cr
1772 }
1773
1774 func main() {
1775         statInterval := flag.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1776         cgroupRoot := flag.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
1777         cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
1778         cgroupParentSubsystem := flag.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
1779         caCertsPath := flag.String("ca-certs", "", "Path to TLS root certificates")
1780         enableNetwork := flag.String("container-enable-networking", "default",
1781                 `Specify if networking should be enabled for container.  One of 'default', 'always':
1782         default: only enable networking if container requests it.
1783         always:  containers always have networking enabled
1784         `)
1785         networkMode := flag.String("container-network-mode", "default",
1786                 `Set networking mode for container.  Corresponds to Docker network mode (--net).
1787         `)
1788         memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
1789         getVersion := flag.Bool("version", false, "Print version information and exit.")
1790         flag.Parse()
1791
1792         // Print version information if requested
1793         if *getVersion {
1794                 fmt.Printf("crunch-run %s\n", version)
1795                 return
1796         }
1797
1798         log.Printf("crunch-run %s started", version)
1799
1800         containerId := flag.Arg(0)
1801
1802         if *caCertsPath != "" {
1803                 arvadosclient.CertFiles = []string{*caCertsPath}
1804         }
1805
1806         api, err := arvadosclient.MakeArvadosClient()
1807         if err != nil {
1808                 log.Fatalf("%s: %v", containerId, err)
1809         }
1810         api.Retries = 8
1811
1812         kc, kcerr := keepclient.MakeKeepClient(api)
1813         if kcerr != nil {
1814                 log.Fatalf("%s: %v", containerId, kcerr)
1815         }
1816         kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
1817         kc.Retries = 4
1818
1819         // API version 1.21 corresponds to Docker 1.9, which is currently the
1820         // minimum version we want to support.
1821         docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
1822
1823         cr := NewContainerRunner(api, kc, docker, containerId)
1824         if dockererr != nil {
1825                 cr.CrunchLog.Printf("%s: %v", containerId, dockererr)
1826                 cr.checkBrokenNode(dockererr)
1827                 cr.CrunchLog.Close()
1828                 os.Exit(1)
1829         }
1830
1831         parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerId+".")
1832         if tmperr != nil {
1833                 log.Fatalf("%s: %v", containerId, tmperr)
1834         }
1835
1836         cr.parentTemp = parentTemp
1837         cr.statInterval = *statInterval
1838         cr.cgroupRoot = *cgroupRoot
1839         cr.expectCgroupParent = *cgroupParent
1840         cr.enableNetwork = *enableNetwork
1841         cr.networkMode = *networkMode
1842         if *cgroupParentSubsystem != "" {
1843                 p := findCgroup(*cgroupParentSubsystem)
1844                 cr.setCgroupParent = p
1845                 cr.expectCgroupParent = p
1846         }
1847
1848         runerr := cr.Run()
1849
1850         if *memprofile != "" {
1851                 f, err := os.Create(*memprofile)
1852                 if err != nil {
1853                         log.Printf("could not create memory profile: ", err)
1854                 }
1855                 runtime.GC() // get up-to-date statistics
1856                 if err := pprof.WriteHeapProfile(f); err != nil {
1857                         log.Printf("could not write memory profile: ", err)
1858                 }
1859                 closeerr := f.Close()
1860                 if closeerr != nil {
1861                         log.Printf("closing memprofile file: ", err)
1862                 }
1863         }
1864
1865         if runerr != nil {
1866                 log.Fatalf("%s: %v", containerId, runerr)
1867         }
1868 }