14406: Copy remote blocks to local storage during MarshalManifest.
[arvados.git] / services / crunch-run / crunchrun.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package main
6
7 import (
8         "bytes"
9         "encoding/json"
10         "errors"
11         "flag"
12         "fmt"
13         "io"
14         "io/ioutil"
15         "log"
16         "os"
17         "os/exec"
18         "os/signal"
19         "path"
20         "path/filepath"
21         "regexp"
22         "runtime"
23         "runtime/pprof"
24         "sort"
25         "strings"
26         "sync"
27         "syscall"
28         "time"
29
30         "git.curoverse.com/arvados.git/lib/crunchstat"
31         "git.curoverse.com/arvados.git/sdk/go/arvados"
32         "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
33         "git.curoverse.com/arvados.git/sdk/go/keepclient"
34         "git.curoverse.com/arvados.git/sdk/go/manifest"
35         "github.com/shirou/gopsutil/process"
36         "golang.org/x/net/context"
37
38         dockertypes "github.com/docker/docker/api/types"
39         dockercontainer "github.com/docker/docker/api/types/container"
40         dockernetwork "github.com/docker/docker/api/types/network"
41         dockerclient "github.com/docker/docker/client"
42 )
43
44 var version = "dev"
45
46 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
47 type IArvadosClient interface {
48         Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
49         Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
50         Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
51         Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
52         CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
53         Discovery(key string) (interface{}, error)
54 }
55
56 // ErrCancelled is the error returned when the container is cancelled.
57 var ErrCancelled = errors.New("Cancelled")
58
59 // IKeepClient is the minimal Keep API methods used by crunch-run.
60 type IKeepClient interface {
61         PutB(buf []byte) (string, int, error)
62         ReadAt(locator string, p []byte, off int) (int, error)
63         ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
64         LocalLocator(locator string) (string, error)
65         ClearBlockCache()
66 }
67
68 // NewLogWriter is a factory function to create a new log writer.
69 type NewLogWriter func(name string) (io.WriteCloser, error)
70
71 type RunArvMount func(args []string, tok string) (*exec.Cmd, error)
72
73 type MkTempDir func(string, string) (string, error)
74
75 // ThinDockerClient is the minimal Docker client interface used by crunch-run.
76 type ThinDockerClient interface {
77         ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error)
78         ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
79                 networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error)
80         ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error
81         ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error
82         ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error)
83         ContainerInspect(ctx context.Context, id string) (dockertypes.ContainerJSON, error)
84         ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error)
85         ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error)
86         ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
87 }
88
89 type PsProcess interface {
90         CmdlineSlice() ([]string, error)
91 }
92
93 // ContainerRunner is the main stateful struct used for a single execution of a
94 // container.
95 type ContainerRunner struct {
96         Docker          ThinDockerClient
97         client          *arvados.Client
98         ArvClient       IArvadosClient
99         Kc              IKeepClient
100         Container       arvados.Container
101         ContainerConfig dockercontainer.Config
102         HostConfig      dockercontainer.HostConfig
103         token           string
104         ContainerID     string
105         ExitCode        *int
106         NewLogWriter    NewLogWriter
107         loggingDone     chan bool
108         CrunchLog       *ThrottledLogger
109         Stdout          io.WriteCloser
110         Stderr          io.WriteCloser
111         logUUID         string
112         logMtx          sync.Mutex
113         LogCollection   arvados.CollectionFileSystem
114         LogsPDH         *string
115         RunArvMount     RunArvMount
116         MkTempDir       MkTempDir
117         ArvMount        *exec.Cmd
118         ArvMountPoint   string
119         HostOutputDir   string
120         Binds           []string
121         Volumes         map[string]struct{}
122         OutputPDH       *string
123         SigChan         chan os.Signal
124         ArvMountExit    chan error
125         SecretMounts    map[string]arvados.Mount
126         MkArvClient     func(token string) (IArvadosClient, error)
127         finalState      string
128         parentTemp      string
129
130         ListProcesses func() ([]PsProcess, error)
131
132         statLogger       io.WriteCloser
133         statReporter     *crunchstat.Reporter
134         hoststatLogger   io.WriteCloser
135         hoststatReporter *crunchstat.Reporter
136         statInterval     time.Duration
137         cgroupRoot       string
138         // What we expect the container's cgroup parent to be.
139         expectCgroupParent string
140         // What we tell docker to use as the container's cgroup
141         // parent. Note: Ideally we would use the same field for both
142         // expectCgroupParent and setCgroupParent, and just make it
143         // default to "docker". However, when using docker < 1.10 with
144         // systemd, specifying a non-empty cgroup parent (even the
145         // default value "docker") hits a docker bug
146         // (https://github.com/docker/docker/issues/17126). Using two
147         // separate fields makes it possible to use the "expect cgroup
148         // parent to be X" feature even on sites where the "specify
149         // cgroup parent" feature breaks.
150         setCgroupParent string
151
152         cStateLock sync.Mutex
153         cCancelled bool // StopContainer() invoked
154         cRemoved   bool // docker confirmed the container no longer exists
155
156         enableNetwork   string // one of "default" or "always"
157         networkMode     string // passed through to HostConfig.NetworkMode
158         arvMountLog     *ThrottledLogger
159         checkContainerd time.Duration
160
161         containerWatchdogInterval time.Duration
162 }
163
164 // setupSignals sets up signal handling to gracefully terminate the underlying
165 // Docker container and update state when receiving a TERM, INT or QUIT signal.
166 func (runner *ContainerRunner) setupSignals() {
167         runner.SigChan = make(chan os.Signal, 1)
168         signal.Notify(runner.SigChan, syscall.SIGTERM)
169         signal.Notify(runner.SigChan, syscall.SIGINT)
170         signal.Notify(runner.SigChan, syscall.SIGQUIT)
171
172         go func(sig chan os.Signal) {
173                 for s := range sig {
174                         runner.stop(s)
175                 }
176         }(runner.SigChan)
177 }
178
179 // stop the underlying Docker container.
180 func (runner *ContainerRunner) stop(sig os.Signal) {
181         runner.cStateLock.Lock()
182         defer runner.cStateLock.Unlock()
183         if sig != nil {
184                 runner.CrunchLog.Printf("caught signal: %v", sig)
185         }
186         if runner.ContainerID == "" {
187                 return
188         }
189         runner.cCancelled = true
190         runner.CrunchLog.Printf("removing container")
191         err := runner.Docker.ContainerRemove(context.TODO(), runner.ContainerID, dockertypes.ContainerRemoveOptions{Force: true})
192         if err != nil {
193                 runner.CrunchLog.Printf("error removing container: %s", err)
194         }
195         if err == nil || strings.Contains(err.Error(), "No such container: "+runner.ContainerID) {
196                 runner.cRemoved = true
197         }
198 }
199
200 var errorBlacklist = []string{
201         "(?ms).*[Cc]annot connect to the Docker daemon.*",
202         "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
203         "(?ms).*grpc: the connection is unavailable.*",
204 }
205 var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
206
207 func (runner *ContainerRunner) runBrokenNodeHook() {
208         if *brokenNodeHook == "" {
209                 runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.")
210         } else {
211                 runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
212                 // run killme script
213                 c := exec.Command(*brokenNodeHook)
214                 c.Stdout = runner.CrunchLog
215                 c.Stderr = runner.CrunchLog
216                 err := c.Run()
217                 if err != nil {
218                         runner.CrunchLog.Printf("Error running broken node hook: %v", err)
219                 }
220         }
221 }
222
223 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
224         for _, d := range errorBlacklist {
225                 if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
226                         runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
227                         runner.runBrokenNodeHook()
228                         return true
229                 }
230         }
231         return false
232 }
233
234 // LoadImage determines the docker image id from the container record and
235 // checks if it is available in the local Docker image store.  If not, it loads
236 // the image from Keep.
237 func (runner *ContainerRunner) LoadImage() (err error) {
238
239         runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
240
241         var collection arvados.Collection
242         err = runner.ArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection)
243         if err != nil {
244                 return fmt.Errorf("While getting container image collection: %v", err)
245         }
246         manifest := manifest.Manifest{Text: collection.ManifestText}
247         var img, imageID string
248         for ms := range manifest.StreamIter() {
249                 img = ms.FileStreamSegments[0].Name
250                 if !strings.HasSuffix(img, ".tar") {
251                         return fmt.Errorf("First file in the container image collection does not end in .tar")
252                 }
253                 imageID = img[:len(img)-4]
254         }
255
256         runner.CrunchLog.Printf("Using Docker image id '%s'", imageID)
257
258         _, _, err = runner.Docker.ImageInspectWithRaw(context.TODO(), imageID)
259         if err != nil {
260                 runner.CrunchLog.Print("Loading Docker image from keep")
261
262                 var readCloser io.ReadCloser
263                 readCloser, err = runner.Kc.ManifestFileReader(manifest, img)
264                 if err != nil {
265                         return fmt.Errorf("While creating ManifestFileReader for container image: %v", err)
266                 }
267
268                 response, err := runner.Docker.ImageLoad(context.TODO(), readCloser, true)
269                 if err != nil {
270                         return fmt.Errorf("While loading container image into Docker: %v", err)
271                 }
272
273                 defer response.Body.Close()
274                 rbody, err := ioutil.ReadAll(response.Body)
275                 if err != nil {
276                         return fmt.Errorf("Reading response to image load: %v", err)
277                 }
278                 runner.CrunchLog.Printf("Docker response: %s", rbody)
279         } else {
280                 runner.CrunchLog.Print("Docker image is available")
281         }
282
283         runner.ContainerConfig.Image = imageID
284
285         runner.Kc.ClearBlockCache()
286
287         return nil
288 }
289
290 func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (c *exec.Cmd, err error) {
291         c = exec.Command("arv-mount", arvMountCmd...)
292
293         // Copy our environment, but override ARVADOS_API_TOKEN with
294         // the container auth token.
295         c.Env = nil
296         for _, s := range os.Environ() {
297                 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
298                         c.Env = append(c.Env, s)
299                 }
300         }
301         c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
302
303         w, err := runner.NewLogWriter("arv-mount")
304         if err != nil {
305                 return nil, err
306         }
307         runner.arvMountLog = NewThrottledLogger(w)
308         c.Stdout = runner.arvMountLog
309         c.Stderr = runner.arvMountLog
310
311         runner.CrunchLog.Printf("Running %v", c.Args)
312
313         err = c.Start()
314         if err != nil {
315                 return nil, err
316         }
317
318         statReadme := make(chan bool)
319         runner.ArvMountExit = make(chan error)
320
321         keepStatting := true
322         go func() {
323                 for keepStatting {
324                         time.Sleep(100 * time.Millisecond)
325                         _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
326                         if err == nil {
327                                 keepStatting = false
328                                 statReadme <- true
329                         }
330                 }
331                 close(statReadme)
332         }()
333
334         go func() {
335                 mnterr := c.Wait()
336                 if mnterr != nil {
337                         runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
338                 }
339                 runner.ArvMountExit <- mnterr
340                 close(runner.ArvMountExit)
341         }()
342
343         select {
344         case <-statReadme:
345                 break
346         case err := <-runner.ArvMountExit:
347                 runner.ArvMount = nil
348                 keepStatting = false
349                 return nil, err
350         }
351
352         return c, nil
353 }
354
355 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
356         if runner.ArvMountPoint == "" {
357                 runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)
358         }
359         return
360 }
361
362 func copyfile(src string, dst string) (err error) {
363         srcfile, err := os.Open(src)
364         if err != nil {
365                 return
366         }
367
368         os.MkdirAll(path.Dir(dst), 0777)
369
370         dstfile, err := os.Create(dst)
371         if err != nil {
372                 return
373         }
374         _, err = io.Copy(dstfile, srcfile)
375         if err != nil {
376                 return
377         }
378
379         err = srcfile.Close()
380         err2 := dstfile.Close()
381
382         if err != nil {
383                 return
384         }
385
386         if err2 != nil {
387                 return err2
388         }
389
390         return nil
391 }
392
393 func (runner *ContainerRunner) SetupMounts() (err error) {
394         err = runner.SetupArvMountPoint("keep")
395         if err != nil {
396                 return fmt.Errorf("While creating keep mount temp dir: %v", err)
397         }
398
399         token, err := runner.ContainerToken()
400         if err != nil {
401                 return fmt.Errorf("could not get container token: %s", err)
402         }
403
404         pdhOnly := true
405         tmpcount := 0
406         arvMountCmd := []string{
407                 "--foreground",
408                 "--allow-other",
409                 "--read-write",
410                 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
411
412         if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
413                 arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
414         }
415
416         collectionPaths := []string{}
417         runner.Binds = nil
418         runner.Volumes = make(map[string]struct{})
419         needCertMount := true
420         type copyFile struct {
421                 src  string
422                 bind string
423         }
424         var copyFiles []copyFile
425
426         var binds []string
427         for bind := range runner.Container.Mounts {
428                 binds = append(binds, bind)
429         }
430         for bind := range runner.SecretMounts {
431                 if _, ok := runner.Container.Mounts[bind]; ok {
432                         return fmt.Errorf("Secret mount %q conflicts with regular mount", bind)
433                 }
434                 if runner.SecretMounts[bind].Kind != "json" &&
435                         runner.SecretMounts[bind].Kind != "text" {
436                         return fmt.Errorf("Secret mount %q type is %q but only 'json' and 'text' are permitted.",
437                                 bind, runner.SecretMounts[bind].Kind)
438                 }
439                 binds = append(binds, bind)
440         }
441         sort.Strings(binds)
442
443         for _, bind := range binds {
444                 mnt, ok := runner.Container.Mounts[bind]
445                 if !ok {
446                         mnt = runner.SecretMounts[bind]
447                 }
448                 if bind == "stdout" || bind == "stderr" {
449                         // Is it a "file" mount kind?
450                         if mnt.Kind != "file" {
451                                 return fmt.Errorf("Unsupported mount kind '%s' for %s. Only 'file' is supported.", mnt.Kind, bind)
452                         }
453
454                         // Does path start with OutputPath?
455                         prefix := runner.Container.OutputPath
456                         if !strings.HasSuffix(prefix, "/") {
457                                 prefix += "/"
458                         }
459                         if !strings.HasPrefix(mnt.Path, prefix) {
460                                 return fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
461                         }
462                 }
463
464                 if bind == "stdin" {
465                         // Is it a "collection" mount kind?
466                         if mnt.Kind != "collection" && mnt.Kind != "json" {
467                                 return fmt.Errorf("Unsupported mount kind '%s' for stdin. Only 'collection' or 'json' are supported.", mnt.Kind)
468                         }
469                 }
470
471                 if bind == "/etc/arvados/ca-certificates.crt" {
472                         needCertMount = false
473                 }
474
475                 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
476                         if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
477                                 return fmt.Errorf("Only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
478                         }
479                 }
480
481                 switch {
482                 case mnt.Kind == "collection" && bind != "stdin":
483                         var src string
484                         if mnt.UUID != "" && mnt.PortableDataHash != "" {
485                                 return fmt.Errorf("Cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
486                         }
487                         if mnt.UUID != "" {
488                                 if mnt.Writable {
489                                         return fmt.Errorf("Writing to existing collections currently not permitted.")
490                                 }
491                                 pdhOnly = false
492                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
493                         } else if mnt.PortableDataHash != "" {
494                                 if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
495                                         return fmt.Errorf("Can never write to a collection specified by portable data hash")
496                                 }
497                                 idx := strings.Index(mnt.PortableDataHash, "/")
498                                 if idx > 0 {
499                                         mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
500                                         mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
501                                         runner.Container.Mounts[bind] = mnt
502                                 }
503                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
504                                 if mnt.Path != "" && mnt.Path != "." {
505                                         if strings.HasPrefix(mnt.Path, "./") {
506                                                 mnt.Path = mnt.Path[2:]
507                                         } else if strings.HasPrefix(mnt.Path, "/") {
508                                                 mnt.Path = mnt.Path[1:]
509                                         }
510                                         src += "/" + mnt.Path
511                                 }
512                         } else {
513                                 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
514                                 arvMountCmd = append(arvMountCmd, "--mount-tmp")
515                                 arvMountCmd = append(arvMountCmd, fmt.Sprintf("tmp%d", tmpcount))
516                                 tmpcount += 1
517                         }
518                         if mnt.Writable {
519                                 if bind == runner.Container.OutputPath {
520                                         runner.HostOutputDir = src
521                                         runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
522                                 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
523                                         copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
524                                 } else {
525                                         runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
526                                 }
527                         } else {
528                                 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind))
529                         }
530                         collectionPaths = append(collectionPaths, src)
531
532                 case mnt.Kind == "tmp":
533                         var tmpdir string
534                         tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
535                         if err != nil {
536                                 return fmt.Errorf("While creating mount temp dir: %v", err)
537                         }
538                         st, staterr := os.Stat(tmpdir)
539                         if staterr != nil {
540                                 return fmt.Errorf("While Stat on temp dir: %v", staterr)
541                         }
542                         err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
543                         if staterr != nil {
544                                 return fmt.Errorf("While Chmod temp dir: %v", err)
545                         }
546                         runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", tmpdir, bind))
547                         if bind == runner.Container.OutputPath {
548                                 runner.HostOutputDir = tmpdir
549                         }
550
551                 case mnt.Kind == "json" || mnt.Kind == "text":
552                         var filedata []byte
553                         if mnt.Kind == "json" {
554                                 filedata, err = json.Marshal(mnt.Content)
555                                 if err != nil {
556                                         return fmt.Errorf("encoding json data: %v", err)
557                                 }
558                         } else {
559                                 text, ok := mnt.Content.(string)
560                                 if !ok {
561                                         return fmt.Errorf("content for mount %q must be a string", bind)
562                                 }
563                                 filedata = []byte(text)
564                         }
565
566                         tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
567                         if err != nil {
568                                 return fmt.Errorf("creating temp dir: %v", err)
569                         }
570                         tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
571                         err = ioutil.WriteFile(tmpfn, filedata, 0444)
572                         if err != nil {
573                                 return fmt.Errorf("writing temp file: %v", err)
574                         }
575                         if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
576                                 copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
577                         } else {
578                                 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
579                         }
580
581                 case mnt.Kind == "git_tree":
582                         tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
583                         if err != nil {
584                                 return fmt.Errorf("creating temp dir: %v", err)
585                         }
586                         err = gitMount(mnt).extractTree(runner.ArvClient, tmpdir, token)
587                         if err != nil {
588                                 return err
589                         }
590                         runner.Binds = append(runner.Binds, tmpdir+":"+bind+":ro")
591                 }
592         }
593
594         if runner.HostOutputDir == "" {
595                 return fmt.Errorf("Output path does not correspond to a writable mount point")
596         }
597
598         if wantAPI := runner.Container.RuntimeConstraints.API; needCertMount && wantAPI != nil && *wantAPI {
599                 for _, certfile := range arvadosclient.CertFiles {
600                         _, err := os.Stat(certfile)
601                         if err == nil {
602                                 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:/etc/arvados/ca-certificates.crt:ro", certfile))
603                                 break
604                         }
605                 }
606         }
607
608         if pdhOnly {
609                 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
610         } else {
611                 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
612         }
613         arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
614
615         runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
616         if err != nil {
617                 return fmt.Errorf("While trying to start arv-mount: %v", err)
618         }
619
620         for _, p := range collectionPaths {
621                 _, err = os.Stat(p)
622                 if err != nil {
623                         return fmt.Errorf("While checking that input files exist: %v", err)
624                 }
625         }
626
627         for _, cp := range copyFiles {
628                 st, err := os.Stat(cp.src)
629                 if err != nil {
630                         return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
631                 }
632                 if st.IsDir() {
633                         err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
634                                 if walkerr != nil {
635                                         return walkerr
636                                 }
637                                 target := path.Join(cp.bind, walkpath[len(cp.src):])
638                                 if walkinfo.Mode().IsRegular() {
639                                         copyerr := copyfile(walkpath, target)
640                                         if copyerr != nil {
641                                                 return copyerr
642                                         }
643                                         return os.Chmod(target, walkinfo.Mode()|0777)
644                                 } else if walkinfo.Mode().IsDir() {
645                                         mkerr := os.MkdirAll(target, 0777)
646                                         if mkerr != nil {
647                                                 return mkerr
648                                         }
649                                         return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
650                                 } else {
651                                         return fmt.Errorf("Source %q is not a regular file or directory", cp.src)
652                                 }
653                         })
654                 } else if st.Mode().IsRegular() {
655                         err = copyfile(cp.src, cp.bind)
656                         if err == nil {
657                                 err = os.Chmod(cp.bind, st.Mode()|0777)
658                         }
659                 }
660                 if err != nil {
661                         return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
662                 }
663         }
664
665         return nil
666 }
667
668 func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
669         // Handle docker log protocol
670         // https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container
671         defer close(runner.loggingDone)
672
673         header := make([]byte, 8)
674         var err error
675         for err == nil {
676                 _, err = io.ReadAtLeast(containerReader, header, 8)
677                 if err != nil {
678                         if err == io.EOF {
679                                 err = nil
680                         }
681                         break
682                 }
683                 readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
684                 if header[0] == 1 {
685                         // stdout
686                         _, err = io.CopyN(runner.Stdout, containerReader, readsize)
687                 } else {
688                         // stderr
689                         _, err = io.CopyN(runner.Stderr, containerReader, readsize)
690                 }
691         }
692
693         if err != nil {
694                 runner.CrunchLog.Printf("error reading docker logs: %v", err)
695         }
696
697         err = runner.Stdout.Close()
698         if err != nil {
699                 runner.CrunchLog.Printf("error closing stdout logs: %v", err)
700         }
701
702         err = runner.Stderr.Close()
703         if err != nil {
704                 runner.CrunchLog.Printf("error closing stderr logs: %v", err)
705         }
706
707         if runner.statReporter != nil {
708                 runner.statReporter.Stop()
709                 err = runner.statLogger.Close()
710                 if err != nil {
711                         runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
712                 }
713         }
714 }
715
716 func (runner *ContainerRunner) stopHoststat() error {
717         if runner.hoststatReporter == nil {
718                 return nil
719         }
720         runner.hoststatReporter.Stop()
721         err := runner.hoststatLogger.Close()
722         if err != nil {
723                 return fmt.Errorf("error closing hoststat logs: %v", err)
724         }
725         return nil
726 }
727
728 func (runner *ContainerRunner) startHoststat() error {
729         w, err := runner.NewLogWriter("hoststat")
730         if err != nil {
731                 return err
732         }
733         runner.hoststatLogger = NewThrottledLogger(w)
734         runner.hoststatReporter = &crunchstat.Reporter{
735                 Logger:     log.New(runner.hoststatLogger, "", 0),
736                 CgroupRoot: runner.cgroupRoot,
737                 PollPeriod: runner.statInterval,
738         }
739         runner.hoststatReporter.Start()
740         return nil
741 }
742
743 func (runner *ContainerRunner) startCrunchstat() error {
744         w, err := runner.NewLogWriter("crunchstat")
745         if err != nil {
746                 return err
747         }
748         runner.statLogger = NewThrottledLogger(w)
749         runner.statReporter = &crunchstat.Reporter{
750                 CID:          runner.ContainerID,
751                 Logger:       log.New(runner.statLogger, "", 0),
752                 CgroupParent: runner.expectCgroupParent,
753                 CgroupRoot:   runner.cgroupRoot,
754                 PollPeriod:   runner.statInterval,
755                 TempDir:      runner.parentTemp,
756         }
757         runner.statReporter.Start()
758         return nil
759 }
760
761 type infoCommand struct {
762         label string
763         cmd   []string
764 }
765
766 // LogHostInfo logs info about the current host, for debugging and
767 // accounting purposes. Although it's logged as "node-info", this is
768 // about the environment where crunch-run is actually running, which
769 // might differ from what's described in the node record (see
770 // LogNodeRecord).
771 func (runner *ContainerRunner) LogHostInfo() (err error) {
772         w, err := runner.NewLogWriter("node-info")
773         if err != nil {
774                 return
775         }
776
777         commands := []infoCommand{
778                 {
779                         label: "Host Information",
780                         cmd:   []string{"uname", "-a"},
781                 },
782                 {
783                         label: "CPU Information",
784                         cmd:   []string{"cat", "/proc/cpuinfo"},
785                 },
786                 {
787                         label: "Memory Information",
788                         cmd:   []string{"cat", "/proc/meminfo"},
789                 },
790                 {
791                         label: "Disk Space",
792                         cmd:   []string{"df", "-m", "/", os.TempDir()},
793                 },
794                 {
795                         label: "Disk INodes",
796                         cmd:   []string{"df", "-i", "/", os.TempDir()},
797                 },
798         }
799
800         // Run commands with informational output to be logged.
801         for _, command := range commands {
802                 fmt.Fprintln(w, command.label)
803                 cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
804                 cmd.Stdout = w
805                 cmd.Stderr = w
806                 if err := cmd.Run(); err != nil {
807                         err = fmt.Errorf("While running command %q: %v", command.cmd, err)
808                         fmt.Fprintln(w, err)
809                         return err
810                 }
811                 fmt.Fprintln(w, "")
812         }
813
814         err = w.Close()
815         if err != nil {
816                 return fmt.Errorf("While closing node-info logs: %v", err)
817         }
818         return nil
819 }
820
821 // LogContainerRecord gets and saves the raw JSON container record from the API server
822 func (runner *ContainerRunner) LogContainerRecord() error {
823         logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
824         if !logged && err == nil {
825                 err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
826         }
827         return err
828 }
829
830 // LogNodeRecord logs arvados#node record corresponding to the current host.
831 func (runner *ContainerRunner) LogNodeRecord() error {
832         hostname := os.Getenv("SLURMD_NODENAME")
833         if hostname == "" {
834                 hostname, _ = os.Hostname()
835         }
836         _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
837                 // The "info" field has admin-only info when obtained
838                 // with a privileged token, and should not be logged.
839                 node, ok := resp.(map[string]interface{})
840                 if ok {
841                         delete(node, "info")
842                 }
843         })
844         return err
845 }
846
847 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
848         writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
849         if err != nil {
850                 return false, err
851         }
852         w := &ArvLogWriter{
853                 ArvClient:     runner.ArvClient,
854                 UUID:          runner.Container.UUID,
855                 loggingStream: label,
856                 writeCloser:   writer,
857         }
858
859         reader, err := runner.ArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
860         if err != nil {
861                 return false, fmt.Errorf("error getting %s record: %v", label, err)
862         }
863         defer reader.Close()
864
865         dec := json.NewDecoder(reader)
866         dec.UseNumber()
867         var resp map[string]interface{}
868         if err = dec.Decode(&resp); err != nil {
869                 return false, fmt.Errorf("error decoding %s list response: %v", label, err)
870         }
871         items, ok := resp["items"].([]interface{})
872         if !ok {
873                 return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
874         } else if len(items) < 1 {
875                 return false, nil
876         }
877         if munge != nil {
878                 munge(items[0])
879         }
880         // Re-encode it using indentation to improve readability
881         enc := json.NewEncoder(w)
882         enc.SetIndent("", "    ")
883         if err = enc.Encode(items[0]); err != nil {
884                 return false, fmt.Errorf("error logging %s record: %v", label, err)
885         }
886         err = w.Close()
887         if err != nil {
888                 return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
889         }
890         return true, nil
891 }
892
893 // AttachStreams connects the docker container stdin, stdout and stderr logs
894 // to the Arvados logger which logs to Keep and the API server logs table.
895 func (runner *ContainerRunner) AttachStreams() (err error) {
896
897         runner.CrunchLog.Print("Attaching container streams")
898
899         // If stdin mount is provided, attach it to the docker container
900         var stdinRdr arvados.File
901         var stdinJson []byte
902         if stdinMnt, ok := runner.Container.Mounts["stdin"]; ok {
903                 if stdinMnt.Kind == "collection" {
904                         var stdinColl arvados.Collection
905                         collId := stdinMnt.UUID
906                         if collId == "" {
907                                 collId = stdinMnt.PortableDataHash
908                         }
909                         err = runner.ArvClient.Get("collections", collId, nil, &stdinColl)
910                         if err != nil {
911                                 return fmt.Errorf("While getting stding collection: %v", err)
912                         }
913
914                         stdinRdr, err = runner.Kc.ManifestFileReader(manifest.Manifest{Text: stdinColl.ManifestText}, stdinMnt.Path)
915                         if os.IsNotExist(err) {
916                                 return fmt.Errorf("stdin collection path not found: %v", stdinMnt.Path)
917                         } else if err != nil {
918                                 return fmt.Errorf("While getting stdin collection path %v: %v", stdinMnt.Path, err)
919                         }
920                 } else if stdinMnt.Kind == "json" {
921                         stdinJson, err = json.Marshal(stdinMnt.Content)
922                         if err != nil {
923                                 return fmt.Errorf("While encoding stdin json data: %v", err)
924                         }
925                 }
926         }
927
928         stdinUsed := stdinRdr != nil || len(stdinJson) != 0
929         response, err := runner.Docker.ContainerAttach(context.TODO(), runner.ContainerID,
930                 dockertypes.ContainerAttachOptions{Stream: true, Stdin: stdinUsed, Stdout: true, Stderr: true})
931         if err != nil {
932                 return fmt.Errorf("While attaching container stdout/stderr streams: %v", err)
933         }
934
935         runner.loggingDone = make(chan bool)
936
937         if stdoutMnt, ok := runner.Container.Mounts["stdout"]; ok {
938                 stdoutFile, err := runner.getStdoutFile(stdoutMnt.Path)
939                 if err != nil {
940                         return err
941                 }
942                 runner.Stdout = stdoutFile
943         } else if w, err := runner.NewLogWriter("stdout"); err != nil {
944                 return err
945         } else {
946                 runner.Stdout = NewThrottledLogger(w)
947         }
948
949         if stderrMnt, ok := runner.Container.Mounts["stderr"]; ok {
950                 stderrFile, err := runner.getStdoutFile(stderrMnt.Path)
951                 if err != nil {
952                         return err
953                 }
954                 runner.Stderr = stderrFile
955         } else if w, err := runner.NewLogWriter("stderr"); err != nil {
956                 return err
957         } else {
958                 runner.Stderr = NewThrottledLogger(w)
959         }
960
961         if stdinRdr != nil {
962                 go func() {
963                         _, err := io.Copy(response.Conn, stdinRdr)
964                         if err != nil {
965                                 runner.CrunchLog.Print("While writing stdin collection to docker container %q", err)
966                                 runner.stop(nil)
967                         }
968                         stdinRdr.Close()
969                         response.CloseWrite()
970                 }()
971         } else if len(stdinJson) != 0 {
972                 go func() {
973                         _, err := io.Copy(response.Conn, bytes.NewReader(stdinJson))
974                         if err != nil {
975                                 runner.CrunchLog.Print("While writing stdin json to docker container %q", err)
976                                 runner.stop(nil)
977                         }
978                         response.CloseWrite()
979                 }()
980         }
981
982         go runner.ProcessDockerAttach(response.Reader)
983
984         return nil
985 }
986
987 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
988         stdoutPath := mntPath[len(runner.Container.OutputPath):]
989         index := strings.LastIndex(stdoutPath, "/")
990         if index > 0 {
991                 subdirs := stdoutPath[:index]
992                 if subdirs != "" {
993                         st, err := os.Stat(runner.HostOutputDir)
994                         if err != nil {
995                                 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
996                         }
997                         stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
998                         err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
999                         if err != nil {
1000                                 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
1001                         }
1002                 }
1003         }
1004         stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
1005         if err != nil {
1006                 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
1007         }
1008
1009         return stdoutFile, nil
1010 }
1011
1012 // CreateContainer creates the docker container.
1013 func (runner *ContainerRunner) CreateContainer() error {
1014         runner.CrunchLog.Print("Creating Docker container")
1015
1016         runner.ContainerConfig.Cmd = runner.Container.Command
1017         if runner.Container.Cwd != "." {
1018                 runner.ContainerConfig.WorkingDir = runner.Container.Cwd
1019         }
1020
1021         for k, v := range runner.Container.Environment {
1022                 runner.ContainerConfig.Env = append(runner.ContainerConfig.Env, k+"="+v)
1023         }
1024
1025         runner.ContainerConfig.Volumes = runner.Volumes
1026
1027         maxRAM := int64(runner.Container.RuntimeConstraints.RAM)
1028         if maxRAM < 4*1024*1024 {
1029                 // Docker daemon won't let you set a limit less than 4 MiB
1030                 maxRAM = 4 * 1024 * 1024
1031         }
1032         runner.HostConfig = dockercontainer.HostConfig{
1033                 Binds: runner.Binds,
1034                 LogConfig: dockercontainer.LogConfig{
1035                         Type: "none",
1036                 },
1037                 Resources: dockercontainer.Resources{
1038                         CgroupParent: runner.setCgroupParent,
1039                         NanoCPUs:     int64(runner.Container.RuntimeConstraints.VCPUs) * 1000000000,
1040                         Memory:       maxRAM, // RAM
1041                         MemorySwap:   maxRAM, // RAM+swap
1042                         KernelMemory: maxRAM, // kernel portion
1043                 },
1044         }
1045
1046         if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
1047                 tok, err := runner.ContainerToken()
1048                 if err != nil {
1049                         return err
1050                 }
1051                 runner.ContainerConfig.Env = append(runner.ContainerConfig.Env,
1052                         "ARVADOS_API_TOKEN="+tok,
1053                         "ARVADOS_API_HOST="+os.Getenv("ARVADOS_API_HOST"),
1054                         "ARVADOS_API_HOST_INSECURE="+os.Getenv("ARVADOS_API_HOST_INSECURE"),
1055                 )
1056                 runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
1057         } else {
1058                 if runner.enableNetwork == "always" {
1059                         runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
1060                 } else {
1061                         runner.HostConfig.NetworkMode = dockercontainer.NetworkMode("none")
1062                 }
1063         }
1064
1065         _, stdinUsed := runner.Container.Mounts["stdin"]
1066         runner.ContainerConfig.OpenStdin = stdinUsed
1067         runner.ContainerConfig.StdinOnce = stdinUsed
1068         runner.ContainerConfig.AttachStdin = stdinUsed
1069         runner.ContainerConfig.AttachStdout = true
1070         runner.ContainerConfig.AttachStderr = true
1071
1072         createdBody, err := runner.Docker.ContainerCreate(context.TODO(), &runner.ContainerConfig, &runner.HostConfig, nil, runner.Container.UUID)
1073         if err != nil {
1074                 return fmt.Errorf("While creating container: %v", err)
1075         }
1076
1077         runner.ContainerID = createdBody.ID
1078
1079         return runner.AttachStreams()
1080 }
1081
1082 // StartContainer starts the docker container created by CreateContainer.
1083 func (runner *ContainerRunner) StartContainer() error {
1084         runner.CrunchLog.Printf("Starting Docker container id '%s'", runner.ContainerID)
1085         runner.cStateLock.Lock()
1086         defer runner.cStateLock.Unlock()
1087         if runner.cCancelled {
1088                 return ErrCancelled
1089         }
1090         err := runner.Docker.ContainerStart(context.TODO(), runner.ContainerID,
1091                 dockertypes.ContainerStartOptions{})
1092         if err != nil {
1093                 var advice string
1094                 if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
1095                         advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
1096                 }
1097                 return fmt.Errorf("could not start container: %v%s", err, advice)
1098         }
1099         return nil
1100 }
1101
1102 // checkContainerd checks if "containerd" is present in the process list.
1103 func (runner *ContainerRunner) CheckContainerd() error {
1104         if runner.checkContainerd == 0 {
1105                 return nil
1106         }
1107         p, _ := runner.ListProcesses()
1108         for _, i := range p {
1109                 e, _ := i.CmdlineSlice()
1110                 if len(e) > 0 {
1111                         if strings.Index(e[0], "containerd") > -1 {
1112                                 return nil
1113                         }
1114                 }
1115         }
1116
1117         // Not found
1118         runner.runBrokenNodeHook()
1119         runner.stop(nil)
1120         return fmt.Errorf("'containerd' not found in process list.")
1121 }
1122
1123 // WaitFinish waits for the container to terminate, capture the exit code, and
1124 // close the stdout/stderr logging.
1125 func (runner *ContainerRunner) WaitFinish() error {
1126         var runTimeExceeded <-chan time.Time
1127         runner.CrunchLog.Print("Waiting for container to finish")
1128
1129         waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, dockercontainer.WaitConditionNotRunning)
1130         arvMountExit := runner.ArvMountExit
1131         if timeout := runner.Container.SchedulingParameters.MaxRunTime; timeout > 0 {
1132                 runTimeExceeded = time.After(time.Duration(timeout) * time.Second)
1133         }
1134
1135         containerGone := make(chan struct{})
1136         go func() {
1137                 defer close(containerGone)
1138                 if runner.containerWatchdogInterval < 1 {
1139                         runner.containerWatchdogInterval = time.Minute
1140                 }
1141                 for range time.NewTicker(runner.containerWatchdogInterval).C {
1142                         ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(runner.containerWatchdogInterval))
1143                         ctr, err := runner.Docker.ContainerInspect(ctx, runner.ContainerID)
1144                         cancel()
1145                         runner.cStateLock.Lock()
1146                         done := runner.cRemoved || runner.ExitCode != nil
1147                         runner.cStateLock.Unlock()
1148                         if done {
1149                                 return
1150                         } else if err != nil {
1151                                 runner.CrunchLog.Printf("Error inspecting container: %s", err)
1152                                 runner.checkBrokenNode(err)
1153                                 return
1154                         } else if ctr.State == nil || !(ctr.State.Running || ctr.State.Status == "created") {
1155                                 runner.CrunchLog.Printf("Container is not running: State=%v", ctr.State)
1156                                 return
1157                         }
1158                 }
1159         }()
1160
1161         containerdGone := make(chan error)
1162         defer close(containerdGone)
1163         if runner.checkContainerd > 0 {
1164                 go func() {
1165                         ticker := time.NewTicker(time.Duration(runner.checkContainerd))
1166                         defer ticker.Stop()
1167                         for {
1168                                 select {
1169                                 case <-ticker.C:
1170                                         if ck := runner.CheckContainerd(); ck != nil {
1171                                                 containerdGone <- ck
1172                                                 return
1173                                         }
1174                                 case <-containerdGone:
1175                                         // Channel closed, quit goroutine
1176                                         return
1177                                 }
1178                         }
1179                 }()
1180         }
1181
1182         for {
1183                 select {
1184                 case waitBody := <-waitOk:
1185                         runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
1186                         code := int(waitBody.StatusCode)
1187                         runner.ExitCode = &code
1188
1189                         // wait for stdout/stderr to complete
1190                         <-runner.loggingDone
1191                         return nil
1192
1193                 case err := <-waitErr:
1194                         return fmt.Errorf("container wait: %v", err)
1195
1196                 case <-arvMountExit:
1197                         runner.CrunchLog.Printf("arv-mount exited while container is still running.  Stopping container.")
1198                         runner.stop(nil)
1199                         // arvMountExit will always be ready now that
1200                         // it's closed, but that doesn't interest us.
1201                         arvMountExit = nil
1202
1203                 case <-runTimeExceeded:
1204                         runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
1205                         runner.stop(nil)
1206                         runTimeExceeded = nil
1207
1208                 case <-containerGone:
1209                         return errors.New("docker client never returned status")
1210
1211                 case err := <-containerdGone:
1212                         return err
1213                 }
1214         }
1215 }
1216
1217 func (runner *ContainerRunner) updateLogs() {
1218         ticker := time.NewTicker(crunchLogUpdatePeriod / 360)
1219         defer ticker.Stop()
1220
1221         sigusr1 := make(chan os.Signal, 1)
1222         signal.Notify(sigusr1, syscall.SIGUSR1)
1223         defer signal.Stop(sigusr1)
1224
1225         saveAtTime := time.Now().Add(crunchLogUpdatePeriod)
1226         saveAtSize := crunchLogUpdateSize
1227         var savedSize int64
1228         for {
1229                 select {
1230                 case <-ticker.C:
1231                 case <-sigusr1:
1232                         saveAtTime = time.Now()
1233                 }
1234                 runner.logMtx.Lock()
1235                 done := runner.LogsPDH != nil
1236                 runner.logMtx.Unlock()
1237                 if done {
1238                         return
1239                 }
1240                 size := runner.LogCollection.Size()
1241                 if size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) {
1242                         continue
1243                 }
1244                 saveAtTime = time.Now().Add(crunchLogUpdatePeriod)
1245                 saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize
1246                 saved, err := runner.saveLogCollection(false)
1247                 if err != nil {
1248                         runner.CrunchLog.Printf("error updating log collection: %s", err)
1249                         continue
1250                 }
1251
1252                 var updated arvados.Container
1253                 err = runner.ArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1254                         "container": arvadosclient.Dict{"log": saved.PortableDataHash},
1255                 }, &updated)
1256                 if err != nil {
1257                         runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
1258                         continue
1259                 }
1260
1261                 savedSize = size
1262         }
1263 }
1264
1265 // CaptureOutput saves data from the container's output directory if
1266 // needed, and updates the container output accordingly.
1267 func (runner *ContainerRunner) CaptureOutput() error {
1268         if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
1269                 // Output may have been set directly by the container, so
1270                 // refresh the container record to check.
1271                 err := runner.ArvClient.Get("containers", runner.Container.UUID,
1272                         nil, &runner.Container)
1273                 if err != nil {
1274                         return err
1275                 }
1276                 if runner.Container.Output != "" {
1277                         // Container output is already set.
1278                         runner.OutputPDH = &runner.Container.Output
1279                         return nil
1280                 }
1281         }
1282
1283         txt, err := (&copier{
1284                 client:        runner.client,
1285                 arvClient:     runner.ArvClient,
1286                 keepClient:    runner.Kc,
1287                 hostOutputDir: runner.HostOutputDir,
1288                 ctrOutputDir:  runner.Container.OutputPath,
1289                 binds:         runner.Binds,
1290                 mounts:        runner.Container.Mounts,
1291                 secretMounts:  runner.SecretMounts,
1292                 logger:        runner.CrunchLog,
1293         }).Copy()
1294         if err != nil {
1295                 return err
1296         }
1297         if n := len(regexp.MustCompile(` [0-9a-f]+\+\S*\+R`).FindAllStringIndex(txt, -1)); n > 0 {
1298                 runner.CrunchLog.Printf("Copying %d data blocks from remote input collections...", n)
1299                 fs, err := (&arvados.Collection{ManifestText: txt}).FileSystem(runner.client, runner.Kc)
1300                 if err != nil {
1301                         return err
1302                 }
1303                 txt, err = fs.MarshalManifest(".")
1304                 if err != nil {
1305                         return err
1306                 }
1307         }
1308         var resp arvados.Collection
1309         err = runner.ArvClient.Create("collections", arvadosclient.Dict{
1310                 "ensure_unique_name": true,
1311                 "collection": arvadosclient.Dict{
1312                         "is_trashed":    true,
1313                         "name":          "output for " + runner.Container.UUID,
1314                         "manifest_text": txt,
1315                 },
1316         }, &resp)
1317         if err != nil {
1318                 return fmt.Errorf("error creating output collection: %v", err)
1319         }
1320         runner.OutputPDH = &resp.PortableDataHash
1321         return nil
1322 }
1323
1324 func (runner *ContainerRunner) CleanupDirs() {
1325         if runner.ArvMount != nil {
1326                 var delay int64 = 8
1327                 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1328                 umount.Stdout = runner.CrunchLog
1329                 umount.Stderr = runner.CrunchLog
1330                 runner.CrunchLog.Printf("Running %v", umount.Args)
1331                 umnterr := umount.Start()
1332
1333                 if umnterr != nil {
1334                         runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1335                 } else {
1336                         // If arv-mount --unmount gets stuck for any reason, we
1337                         // don't want to wait for it forever.  Do Wait() in a goroutine
1338                         // so it doesn't block crunch-run.
1339                         umountExit := make(chan error)
1340                         go func() {
1341                                 mnterr := umount.Wait()
1342                                 if mnterr != nil {
1343                                         runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1344                                 }
1345                                 umountExit <- mnterr
1346                         }()
1347
1348                         for again := true; again; {
1349                                 again = false
1350                                 select {
1351                                 case <-umountExit:
1352                                         umount = nil
1353                                         again = true
1354                                 case <-runner.ArvMountExit:
1355                                         break
1356                                 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1357                                         runner.CrunchLog.Printf("Timed out waiting for unmount")
1358                                         if umount != nil {
1359                                                 umount.Process.Kill()
1360                                         }
1361                                         runner.ArvMount.Process.Kill()
1362                                 }
1363                         }
1364                 }
1365         }
1366
1367         if runner.ArvMountPoint != "" {
1368                 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1369                         runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1370                 }
1371         }
1372
1373         if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {
1374                 runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr)
1375         }
1376 }
1377
1378 // CommitLogs posts the collection containing the final container logs.
1379 func (runner *ContainerRunner) CommitLogs() error {
1380         func() {
1381                 // Hold cStateLock to prevent races on CrunchLog (e.g., stop()).
1382                 runner.cStateLock.Lock()
1383                 defer runner.cStateLock.Unlock()
1384
1385                 runner.CrunchLog.Print(runner.finalState)
1386
1387                 if runner.arvMountLog != nil {
1388                         runner.arvMountLog.Close()
1389                 }
1390                 runner.CrunchLog.Close()
1391
1392                 // Closing CrunchLog above allows them to be committed to Keep at this
1393                 // point, but re-open crunch log with ArvClient in case there are any
1394                 // other further errors (such as failing to write the log to Keep!)
1395                 // while shutting down
1396                 runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{
1397                         ArvClient:     runner.ArvClient,
1398                         UUID:          runner.Container.UUID,
1399                         loggingStream: "crunch-run",
1400                         writeCloser:   nil,
1401                 })
1402                 runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
1403         }()
1404
1405         if runner.LogsPDH != nil {
1406                 // If we have already assigned something to LogsPDH,
1407                 // we must be closing the re-opened log, which won't
1408                 // end up getting attached to the container record and
1409                 // therefore doesn't need to be saved as a collection
1410                 // -- it exists only to send logs to other channels.
1411                 return nil
1412         }
1413         saved, err := runner.saveLogCollection(true)
1414         if err != nil {
1415                 return fmt.Errorf("error saving log collection: %s", err)
1416         }
1417         runner.logMtx.Lock()
1418         defer runner.logMtx.Unlock()
1419         runner.LogsPDH = &saved.PortableDataHash
1420         return nil
1421 }
1422
1423 func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
1424         runner.logMtx.Lock()
1425         defer runner.logMtx.Unlock()
1426         if runner.LogsPDH != nil {
1427                 // Already finalized.
1428                 return
1429         }
1430         mt, err := runner.LogCollection.MarshalManifest(".")
1431         if err != nil {
1432                 err = fmt.Errorf("error creating log manifest: %v", err)
1433                 return
1434         }
1435         updates := arvadosclient.Dict{
1436                 "name":          "logs for " + runner.Container.UUID,
1437                 "manifest_text": mt,
1438         }
1439         if final {
1440                 updates["is_trashed"] = true
1441         } else {
1442                 exp := time.Now().Add(crunchLogUpdatePeriod * 24)
1443                 updates["trash_at"] = exp
1444                 updates["delete_at"] = exp
1445         }
1446         reqBody := arvadosclient.Dict{"collection": updates}
1447         if runner.logUUID == "" {
1448                 reqBody["ensure_unique_name"] = true
1449                 err = runner.ArvClient.Create("collections", reqBody, &response)
1450         } else {
1451                 err = runner.ArvClient.Update("collections", runner.logUUID, reqBody, &response)
1452         }
1453         if err != nil {
1454                 return
1455         }
1456         runner.logUUID = response.UUID
1457         return
1458 }
1459
1460 // UpdateContainerRunning updates the container state to "Running"
1461 func (runner *ContainerRunner) UpdateContainerRunning() error {
1462         runner.cStateLock.Lock()
1463         defer runner.cStateLock.Unlock()
1464         if runner.cCancelled {
1465                 return ErrCancelled
1466         }
1467         return runner.ArvClient.Update("containers", runner.Container.UUID,
1468                 arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running"}}, nil)
1469 }
1470
1471 // ContainerToken returns the api_token the container (and any
1472 // arv-mount processes) are allowed to use.
1473 func (runner *ContainerRunner) ContainerToken() (string, error) {
1474         if runner.token != "" {
1475                 return runner.token, nil
1476         }
1477
1478         var auth arvados.APIClientAuthorization
1479         err := runner.ArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1480         if err != nil {
1481                 return "", err
1482         }
1483         runner.token = fmt.Sprintf("v2/%s/%s/%s", auth.UUID, auth.APIToken, runner.Container.UUID)
1484         return runner.token, nil
1485 }
1486
1487 // UpdateContainerComplete updates the container record state on API
1488 // server to "Complete" or "Cancelled"
1489 func (runner *ContainerRunner) UpdateContainerFinal() error {
1490         update := arvadosclient.Dict{}
1491         update["state"] = runner.finalState
1492         if runner.LogsPDH != nil {
1493                 update["log"] = *runner.LogsPDH
1494         }
1495         if runner.finalState == "Complete" {
1496                 if runner.ExitCode != nil {
1497                         update["exit_code"] = *runner.ExitCode
1498                 }
1499                 if runner.OutputPDH != nil {
1500                         update["output"] = *runner.OutputPDH
1501                 }
1502         }
1503         return runner.ArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
1504 }
1505
1506 // IsCancelled returns the value of Cancelled, with goroutine safety.
1507 func (runner *ContainerRunner) IsCancelled() bool {
1508         runner.cStateLock.Lock()
1509         defer runner.cStateLock.Unlock()
1510         return runner.cCancelled
1511 }
1512
1513 // NewArvLogWriter creates an ArvLogWriter
1514 func (runner *ContainerRunner) NewArvLogWriter(name string) (io.WriteCloser, error) {
1515         writer, err := runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
1516         if err != nil {
1517                 return nil, err
1518         }
1519         return &ArvLogWriter{
1520                 ArvClient:     runner.ArvClient,
1521                 UUID:          runner.Container.UUID,
1522                 loggingStream: name,
1523                 writeCloser:   writer,
1524         }, nil
1525 }
1526
1527 // Run the full container lifecycle.
1528 func (runner *ContainerRunner) Run() (err error) {
1529         runner.CrunchLog.Printf("crunch-run %s started", version)
1530         runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
1531
1532         hostname, hosterr := os.Hostname()
1533         if hosterr != nil {
1534                 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1535         } else {
1536                 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1537         }
1538
1539         runner.finalState = "Queued"
1540
1541         defer func() {
1542                 runner.CleanupDirs()
1543
1544                 runner.CrunchLog.Printf("crunch-run finished")
1545                 runner.CrunchLog.Close()
1546         }()
1547
1548         defer func() {
1549                 // checkErr prints e (unless it's nil) and sets err to
1550                 // e (unless err is already non-nil). Thus, if err
1551                 // hasn't already been assigned when Run() returns,
1552                 // this cleanup func will cause Run() to return the
1553                 // first non-nil error that is passed to checkErr().
1554                 checkErr := func(errorIn string, e error) {
1555                         if e == nil {
1556                                 return
1557                         }
1558                         runner.CrunchLog.Printf("error in %s: %v", errorIn, e)
1559                         if err == nil {
1560                                 err = e
1561                         }
1562                         if runner.finalState == "Complete" {
1563                                 // There was an error in the finalization.
1564                                 runner.finalState = "Cancelled"
1565                         }
1566                 }
1567
1568                 // Log the error encountered in Run(), if any
1569                 checkErr("Run", err)
1570
1571                 if runner.finalState == "Queued" {
1572                         runner.UpdateContainerFinal()
1573                         return
1574                 }
1575
1576                 if runner.IsCancelled() {
1577                         runner.finalState = "Cancelled"
1578                         // but don't return yet -- we still want to
1579                         // capture partial output and write logs
1580                 }
1581
1582                 checkErr("CaptureOutput", runner.CaptureOutput())
1583                 checkErr("stopHoststat", runner.stopHoststat())
1584                 checkErr("CommitLogs", runner.CommitLogs())
1585                 checkErr("UpdateContainerFinal", runner.UpdateContainerFinal())
1586         }()
1587
1588         err = runner.fetchContainerRecord()
1589         if err != nil {
1590                 return
1591         }
1592         runner.setupSignals()
1593         err = runner.startHoststat()
1594         if err != nil {
1595                 return
1596         }
1597
1598         // Sanity check that containerd is running.
1599         err = runner.CheckContainerd()
1600         if err != nil {
1601                 return
1602         }
1603
1604         // check for and/or load image
1605         err = runner.LoadImage()
1606         if err != nil {
1607                 if !runner.checkBrokenNode(err) {
1608                         // Failed to load image but not due to a "broken node"
1609                         // condition, probably user error.
1610                         runner.finalState = "Cancelled"
1611                 }
1612                 err = fmt.Errorf("While loading container image: %v", err)
1613                 return
1614         }
1615
1616         // set up FUSE mount and binds
1617         err = runner.SetupMounts()
1618         if err != nil {
1619                 runner.finalState = "Cancelled"
1620                 err = fmt.Errorf("While setting up mounts: %v", err)
1621                 return
1622         }
1623
1624         err = runner.CreateContainer()
1625         if err != nil {
1626                 return
1627         }
1628         err = runner.LogHostInfo()
1629         if err != nil {
1630                 return
1631         }
1632         err = runner.LogNodeRecord()
1633         if err != nil {
1634                 return
1635         }
1636         err = runner.LogContainerRecord()
1637         if err != nil {
1638                 return
1639         }
1640
1641         if runner.IsCancelled() {
1642                 return
1643         }
1644
1645         err = runner.UpdateContainerRunning()
1646         if err != nil {
1647                 return
1648         }
1649         runner.finalState = "Cancelled"
1650
1651         err = runner.startCrunchstat()
1652         if err != nil {
1653                 return
1654         }
1655
1656         err = runner.StartContainer()
1657         if err != nil {
1658                 runner.checkBrokenNode(err)
1659                 return
1660         }
1661
1662         err = runner.WaitFinish()
1663         if err == nil && !runner.IsCancelled() {
1664                 runner.finalState = "Complete"
1665         }
1666         return
1667 }
1668
1669 // Fetch the current container record (uuid = runner.Container.UUID)
1670 // into runner.Container.
1671 func (runner *ContainerRunner) fetchContainerRecord() error {
1672         reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1673         if err != nil {
1674                 return fmt.Errorf("error fetching container record: %v", err)
1675         }
1676         defer reader.Close()
1677
1678         dec := json.NewDecoder(reader)
1679         dec.UseNumber()
1680         err = dec.Decode(&runner.Container)
1681         if err != nil {
1682                 return fmt.Errorf("error decoding container record: %v", err)
1683         }
1684
1685         var sm struct {
1686                 SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
1687         }
1688
1689         containerToken, err := runner.ContainerToken()
1690         if err != nil {
1691                 return fmt.Errorf("error getting container token: %v", err)
1692         }
1693
1694         containerClient, err := runner.MkArvClient(containerToken)
1695         if err != nil {
1696                 return fmt.Errorf("error creating container API client: %v", err)
1697         }
1698
1699         err = containerClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
1700         if err != nil {
1701                 if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
1702                         return fmt.Errorf("error fetching secret_mounts: %v", err)
1703                 }
1704                 // ok && apierr.HttpStatusCode == 404, which means
1705                 // secret_mounts isn't supported by this API server.
1706         }
1707         runner.SecretMounts = sm.SecretMounts
1708
1709         return nil
1710 }
1711
1712 // NewContainerRunner creates a new container runner.
1713 func NewContainerRunner(client *arvados.Client, api IArvadosClient, kc IKeepClient, docker ThinDockerClient, containerUUID string) (*ContainerRunner, error) {
1714         cr := &ContainerRunner{
1715                 client:    client,
1716                 ArvClient: api,
1717                 Kc:        kc,
1718                 Docker:    docker,
1719         }
1720         cr.NewLogWriter = cr.NewArvLogWriter
1721         cr.RunArvMount = cr.ArvMountCmd
1722         cr.MkTempDir = ioutil.TempDir
1723         cr.ListProcesses = func() ([]PsProcess, error) {
1724                 pr, err := process.Processes()
1725                 if err != nil {
1726                         return nil, err
1727                 }
1728                 ps := make([]PsProcess, len(pr))
1729                 for i, j := range pr {
1730                         ps[i] = j
1731                 }
1732                 return ps, nil
1733         }
1734         cr.MkArvClient = func(token string) (IArvadosClient, error) {
1735                 cl, err := arvadosclient.MakeArvadosClient()
1736                 if err != nil {
1737                         return nil, err
1738                 }
1739                 cl.ApiToken = token
1740                 return cl, nil
1741         }
1742         var err error
1743         cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.client, cr.Kc)
1744         if err != nil {
1745                 return nil, err
1746         }
1747         cr.Container.UUID = containerUUID
1748         w, err := cr.NewLogWriter("crunch-run")
1749         if err != nil {
1750                 return nil, err
1751         }
1752         cr.CrunchLog = NewThrottledLogger(w)
1753         cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
1754
1755         loadLogThrottleParams(api)
1756         go cr.updateLogs()
1757
1758         return cr, nil
1759 }
1760
1761 func main() {
1762         statInterval := flag.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1763         cgroupRoot := flag.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
1764         cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
1765         cgroupParentSubsystem := flag.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
1766         caCertsPath := flag.String("ca-certs", "", "Path to TLS root certificates")
1767         enableNetwork := flag.String("container-enable-networking", "default",
1768                 `Specify if networking should be enabled for container.  One of 'default', 'always':
1769         default: only enable networking if container requests it.
1770         always:  containers always have networking enabled
1771         `)
1772         networkMode := flag.String("container-network-mode", "default",
1773                 `Set networking mode for container.  Corresponds to Docker network mode (--net).
1774         `)
1775         memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
1776         getVersion := flag.Bool("version", false, "Print version information and exit.")
1777         checkContainerd := flag.Duration("check-containerd", 60*time.Second, "Periodic check if (docker-)containerd is running (use 0s to disable).")
1778         flag.Parse()
1779
1780         // Print version information if requested
1781         if *getVersion {
1782                 fmt.Printf("crunch-run %s\n", version)
1783                 return
1784         }
1785
1786         log.Printf("crunch-run %s started", version)
1787
1788         containerId := flag.Arg(0)
1789
1790         if *caCertsPath != "" {
1791                 arvadosclient.CertFiles = []string{*caCertsPath}
1792         }
1793
1794         api, err := arvadosclient.MakeArvadosClient()
1795         if err != nil {
1796                 log.Fatalf("%s: %v", containerId, err)
1797         }
1798         api.Retries = 8
1799
1800         kc, kcerr := keepclient.MakeKeepClient(api)
1801         if kcerr != nil {
1802                 log.Fatalf("%s: %v", containerId, kcerr)
1803         }
1804         kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
1805         kc.Retries = 4
1806
1807         // API version 1.21 corresponds to Docker 1.9, which is currently the
1808         // minimum version we want to support.
1809         docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
1810
1811         cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, docker, containerId)
1812         if err != nil {
1813                 log.Fatal(err)
1814         }
1815         if dockererr != nil {
1816                 cr.CrunchLog.Printf("%s: %v", containerId, dockererr)
1817                 cr.checkBrokenNode(dockererr)
1818                 cr.CrunchLog.Close()
1819                 os.Exit(1)
1820         }
1821
1822         parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerId+".")
1823         if tmperr != nil {
1824                 log.Fatalf("%s: %v", containerId, tmperr)
1825         }
1826
1827         cr.parentTemp = parentTemp
1828         cr.statInterval = *statInterval
1829         cr.cgroupRoot = *cgroupRoot
1830         cr.expectCgroupParent = *cgroupParent
1831         cr.enableNetwork = *enableNetwork
1832         cr.networkMode = *networkMode
1833         cr.checkContainerd = *checkContainerd
1834         if *cgroupParentSubsystem != "" {
1835                 p := findCgroup(*cgroupParentSubsystem)
1836                 cr.setCgroupParent = p
1837                 cr.expectCgroupParent = p
1838         }
1839
1840         runerr := cr.Run()
1841
1842         if *memprofile != "" {
1843                 f, err := os.Create(*memprofile)
1844                 if err != nil {
1845                         log.Printf("could not create memory profile: %s", err)
1846                 }
1847                 runtime.GC() // get up-to-date statistics
1848                 if err := pprof.WriteHeapProfile(f); err != nil {
1849                         log.Printf("could not write memory profile: %s", err)
1850                 }
1851                 closeerr := f.Close()
1852                 if closeerr != nil {
1853                         log.Printf("closing memprofile file: %s", err)
1854                 }
1855         }
1856
1857         if runerr != nil {
1858                 log.Fatalf("%s: %v", containerId, runerr)
1859         }
1860 }