Merge branch '14328-watch-docker-ps'
[arvados.git] / services / crunch-run / crunchrun.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package main
6
7 import (
8         "bytes"
9         "encoding/json"
10         "errors"
11         "flag"
12         "fmt"
13         "io"
14         "io/ioutil"
15         "log"
16         "os"
17         "os/exec"
18         "os/signal"
19         "path"
20         "path/filepath"
21         "regexp"
22         "runtime"
23         "runtime/pprof"
24         "sort"
25         "strings"
26         "sync"
27         "syscall"
28         "time"
29
30         "git.curoverse.com/arvados.git/lib/crunchstat"
31         "git.curoverse.com/arvados.git/sdk/go/arvados"
32         "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
33         "git.curoverse.com/arvados.git/sdk/go/keepclient"
34         "git.curoverse.com/arvados.git/sdk/go/manifest"
35         "github.com/shirou/gopsutil/process"
36         "golang.org/x/net/context"
37
38         dockertypes "github.com/docker/docker/api/types"
39         dockercontainer "github.com/docker/docker/api/types/container"
40         dockernetwork "github.com/docker/docker/api/types/network"
41         dockerclient "github.com/docker/docker/client"
42 )
43
44 var version = "dev"
45
46 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
47 type IArvadosClient interface {
48         Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
49         Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
50         Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
51         Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
52         CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
53         Discovery(key string) (interface{}, error)
54 }
55
56 // ErrCancelled is the error returned when the container is cancelled.
57 var ErrCancelled = errors.New("Cancelled")
58
59 // IKeepClient is the minimal Keep API methods used by crunch-run.
60 type IKeepClient interface {
61         PutB(buf []byte) (string, int, error)
62         ReadAt(locator string, p []byte, off int) (int, error)
63         ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
64         ClearBlockCache()
65 }
66
67 // NewLogWriter is a factory function to create a new log writer.
68 type NewLogWriter func(name string) (io.WriteCloser, error)
69
70 type RunArvMount func(args []string, tok string) (*exec.Cmd, error)
71
72 type MkTempDir func(string, string) (string, error)
73
74 // ThinDockerClient is the minimal Docker client interface used by crunch-run.
75 type ThinDockerClient interface {
76         ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error)
77         ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
78                 networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error)
79         ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error
80         ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error
81         ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error)
82         ContainerInspect(ctx context.Context, id string) (dockertypes.ContainerJSON, error)
83         ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error)
84         ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error)
85         ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
86 }
87
88 type PsProcess interface {
89         CmdlineSlice() ([]string, error)
90 }
91
92 // ContainerRunner is the main stateful struct used for a single execution of a
93 // container.
94 type ContainerRunner struct {
95         Docker          ThinDockerClient
96         client          *arvados.Client
97         ArvClient       IArvadosClient
98         Kc              IKeepClient
99         Container       arvados.Container
100         ContainerConfig dockercontainer.Config
101         HostConfig      dockercontainer.HostConfig
102         token           string
103         ContainerID     string
104         ExitCode        *int
105         NewLogWriter    NewLogWriter
106         loggingDone     chan bool
107         CrunchLog       *ThrottledLogger
108         Stdout          io.WriteCloser
109         Stderr          io.WriteCloser
110         logUUID         string
111         logMtx          sync.Mutex
112         LogCollection   arvados.CollectionFileSystem
113         LogsPDH         *string
114         RunArvMount     RunArvMount
115         MkTempDir       MkTempDir
116         ArvMount        *exec.Cmd
117         ArvMountPoint   string
118         HostOutputDir   string
119         Binds           []string
120         Volumes         map[string]struct{}
121         OutputPDH       *string
122         SigChan         chan os.Signal
123         ArvMountExit    chan error
124         SecretMounts    map[string]arvados.Mount
125         MkArvClient     func(token string) (IArvadosClient, error)
126         finalState      string
127         parentTemp      string
128
129         ListProcesses func() ([]PsProcess, error)
130
131         statLogger       io.WriteCloser
132         statReporter     *crunchstat.Reporter
133         hoststatLogger   io.WriteCloser
134         hoststatReporter *crunchstat.Reporter
135         statInterval     time.Duration
136         cgroupRoot       string
137         // What we expect the container's cgroup parent to be.
138         expectCgroupParent string
139         // What we tell docker to use as the container's cgroup
140         // parent. Note: Ideally we would use the same field for both
141         // expectCgroupParent and setCgroupParent, and just make it
142         // default to "docker". However, when using docker < 1.10 with
143         // systemd, specifying a non-empty cgroup parent (even the
144         // default value "docker") hits a docker bug
145         // (https://github.com/docker/docker/issues/17126). Using two
146         // separate fields makes it possible to use the "expect cgroup
147         // parent to be X" feature even on sites where the "specify
148         // cgroup parent" feature breaks.
149         setCgroupParent string
150
151         cStateLock sync.Mutex
152         cCancelled bool // StopContainer() invoked
153         cRemoved   bool // docker confirmed the container no longer exists
154
155         enableNetwork   string // one of "default" or "always"
156         networkMode     string // passed through to HostConfig.NetworkMode
157         arvMountLog     *ThrottledLogger
158         checkContainerd time.Duration
159
160         containerWatchdogInterval time.Duration
161 }
162
163 // setupSignals sets up signal handling to gracefully terminate the underlying
164 // Docker container and update state when receiving a TERM, INT or QUIT signal.
165 func (runner *ContainerRunner) setupSignals() {
166         runner.SigChan = make(chan os.Signal, 1)
167         signal.Notify(runner.SigChan, syscall.SIGTERM)
168         signal.Notify(runner.SigChan, syscall.SIGINT)
169         signal.Notify(runner.SigChan, syscall.SIGQUIT)
170
171         go func(sig chan os.Signal) {
172                 for s := range sig {
173                         runner.stop(s)
174                 }
175         }(runner.SigChan)
176 }
177
178 // stop the underlying Docker container.
179 func (runner *ContainerRunner) stop(sig os.Signal) {
180         runner.cStateLock.Lock()
181         defer runner.cStateLock.Unlock()
182         if sig != nil {
183                 runner.CrunchLog.Printf("caught signal: %v", sig)
184         }
185         if runner.ContainerID == "" {
186                 return
187         }
188         runner.cCancelled = true
189         runner.CrunchLog.Printf("removing container")
190         err := runner.Docker.ContainerRemove(context.TODO(), runner.ContainerID, dockertypes.ContainerRemoveOptions{Force: true})
191         if err != nil {
192                 runner.CrunchLog.Printf("error removing container: %s", err)
193         }
194         if err == nil || strings.Contains(err.Error(), "No such container: "+runner.ContainerID) {
195                 runner.cRemoved = true
196         }
197 }
198
199 var errorBlacklist = []string{
200         "(?ms).*[Cc]annot connect to the Docker daemon.*",
201         "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
202         "(?ms).*grpc: the connection is unavailable.*",
203 }
204 var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
205
206 func (runner *ContainerRunner) runBrokenNodeHook() {
207         if *brokenNodeHook == "" {
208                 runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.")
209         } else {
210                 runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
211                 // run killme script
212                 c := exec.Command(*brokenNodeHook)
213                 c.Stdout = runner.CrunchLog
214                 c.Stderr = runner.CrunchLog
215                 err := c.Run()
216                 if err != nil {
217                         runner.CrunchLog.Printf("Error running broken node hook: %v", err)
218                 }
219         }
220 }
221
222 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
223         for _, d := range errorBlacklist {
224                 if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
225                         runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
226                         runner.runBrokenNodeHook()
227                         return true
228                 }
229         }
230         return false
231 }
232
233 // LoadImage determines the docker image id from the container record and
234 // checks if it is available in the local Docker image store.  If not, it loads
235 // the image from Keep.
236 func (runner *ContainerRunner) LoadImage() (err error) {
237
238         runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
239
240         var collection arvados.Collection
241         err = runner.ArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection)
242         if err != nil {
243                 return fmt.Errorf("While getting container image collection: %v", err)
244         }
245         manifest := manifest.Manifest{Text: collection.ManifestText}
246         var img, imageID string
247         for ms := range manifest.StreamIter() {
248                 img = ms.FileStreamSegments[0].Name
249                 if !strings.HasSuffix(img, ".tar") {
250                         return fmt.Errorf("First file in the container image collection does not end in .tar")
251                 }
252                 imageID = img[:len(img)-4]
253         }
254
255         runner.CrunchLog.Printf("Using Docker image id '%s'", imageID)
256
257         _, _, err = runner.Docker.ImageInspectWithRaw(context.TODO(), imageID)
258         if err != nil {
259                 runner.CrunchLog.Print("Loading Docker image from keep")
260
261                 var readCloser io.ReadCloser
262                 readCloser, err = runner.Kc.ManifestFileReader(manifest, img)
263                 if err != nil {
264                         return fmt.Errorf("While creating ManifestFileReader for container image: %v", err)
265                 }
266
267                 response, err := runner.Docker.ImageLoad(context.TODO(), readCloser, true)
268                 if err != nil {
269                         return fmt.Errorf("While loading container image into Docker: %v", err)
270                 }
271
272                 defer response.Body.Close()
273                 rbody, err := ioutil.ReadAll(response.Body)
274                 if err != nil {
275                         return fmt.Errorf("Reading response to image load: %v", err)
276                 }
277                 runner.CrunchLog.Printf("Docker response: %s", rbody)
278         } else {
279                 runner.CrunchLog.Print("Docker image is available")
280         }
281
282         runner.ContainerConfig.Image = imageID
283
284         runner.Kc.ClearBlockCache()
285
286         return nil
287 }
288
289 func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (c *exec.Cmd, err error) {
290         c = exec.Command("arv-mount", arvMountCmd...)
291
292         // Copy our environment, but override ARVADOS_API_TOKEN with
293         // the container auth token.
294         c.Env = nil
295         for _, s := range os.Environ() {
296                 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
297                         c.Env = append(c.Env, s)
298                 }
299         }
300         c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
301
302         w, err := runner.NewLogWriter("arv-mount")
303         if err != nil {
304                 return nil, err
305         }
306         runner.arvMountLog = NewThrottledLogger(w)
307         c.Stdout = runner.arvMountLog
308         c.Stderr = runner.arvMountLog
309
310         runner.CrunchLog.Printf("Running %v", c.Args)
311
312         err = c.Start()
313         if err != nil {
314                 return nil, err
315         }
316
317         statReadme := make(chan bool)
318         runner.ArvMountExit = make(chan error)
319
320         keepStatting := true
321         go func() {
322                 for keepStatting {
323                         time.Sleep(100 * time.Millisecond)
324                         _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
325                         if err == nil {
326                                 keepStatting = false
327                                 statReadme <- true
328                         }
329                 }
330                 close(statReadme)
331         }()
332
333         go func() {
334                 mnterr := c.Wait()
335                 if mnterr != nil {
336                         runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
337                 }
338                 runner.ArvMountExit <- mnterr
339                 close(runner.ArvMountExit)
340         }()
341
342         select {
343         case <-statReadme:
344                 break
345         case err := <-runner.ArvMountExit:
346                 runner.ArvMount = nil
347                 keepStatting = false
348                 return nil, err
349         }
350
351         return c, nil
352 }
353
354 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
355         if runner.ArvMountPoint == "" {
356                 runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)
357         }
358         return
359 }
360
361 func copyfile(src string, dst string) (err error) {
362         srcfile, err := os.Open(src)
363         if err != nil {
364                 return
365         }
366
367         os.MkdirAll(path.Dir(dst), 0777)
368
369         dstfile, err := os.Create(dst)
370         if err != nil {
371                 return
372         }
373         _, err = io.Copy(dstfile, srcfile)
374         if err != nil {
375                 return
376         }
377
378         err = srcfile.Close()
379         err2 := dstfile.Close()
380
381         if err != nil {
382                 return
383         }
384
385         if err2 != nil {
386                 return err2
387         }
388
389         return nil
390 }
391
392 func (runner *ContainerRunner) SetupMounts() (err error) {
393         err = runner.SetupArvMountPoint("keep")
394         if err != nil {
395                 return fmt.Errorf("While creating keep mount temp dir: %v", err)
396         }
397
398         token, err := runner.ContainerToken()
399         if err != nil {
400                 return fmt.Errorf("could not get container token: %s", err)
401         }
402
403         pdhOnly := true
404         tmpcount := 0
405         arvMountCmd := []string{
406                 "--foreground",
407                 "--allow-other",
408                 "--read-write",
409                 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
410
411         if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
412                 arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
413         }
414
415         collectionPaths := []string{}
416         runner.Binds = nil
417         runner.Volumes = make(map[string]struct{})
418         needCertMount := true
419         type copyFile struct {
420                 src  string
421                 bind string
422         }
423         var copyFiles []copyFile
424
425         var binds []string
426         for bind := range runner.Container.Mounts {
427                 binds = append(binds, bind)
428         }
429         for bind := range runner.SecretMounts {
430                 if _, ok := runner.Container.Mounts[bind]; ok {
431                         return fmt.Errorf("Secret mount %q conflicts with regular mount", bind)
432                 }
433                 if runner.SecretMounts[bind].Kind != "json" &&
434                         runner.SecretMounts[bind].Kind != "text" {
435                         return fmt.Errorf("Secret mount %q type is %q but only 'json' and 'text' are permitted.",
436                                 bind, runner.SecretMounts[bind].Kind)
437                 }
438                 binds = append(binds, bind)
439         }
440         sort.Strings(binds)
441
442         for _, bind := range binds {
443                 mnt, ok := runner.Container.Mounts[bind]
444                 if !ok {
445                         mnt = runner.SecretMounts[bind]
446                 }
447                 if bind == "stdout" || bind == "stderr" {
448                         // Is it a "file" mount kind?
449                         if mnt.Kind != "file" {
450                                 return fmt.Errorf("Unsupported mount kind '%s' for %s. Only 'file' is supported.", mnt.Kind, bind)
451                         }
452
453                         // Does path start with OutputPath?
454                         prefix := runner.Container.OutputPath
455                         if !strings.HasSuffix(prefix, "/") {
456                                 prefix += "/"
457                         }
458                         if !strings.HasPrefix(mnt.Path, prefix) {
459                                 return fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
460                         }
461                 }
462
463                 if bind == "stdin" {
464                         // Is it a "collection" mount kind?
465                         if mnt.Kind != "collection" && mnt.Kind != "json" {
466                                 return fmt.Errorf("Unsupported mount kind '%s' for stdin. Only 'collection' or 'json' are supported.", mnt.Kind)
467                         }
468                 }
469
470                 if bind == "/etc/arvados/ca-certificates.crt" {
471                         needCertMount = false
472                 }
473
474                 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
475                         if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
476                                 return fmt.Errorf("Only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
477                         }
478                 }
479
480                 switch {
481                 case mnt.Kind == "collection" && bind != "stdin":
482                         var src string
483                         if mnt.UUID != "" && mnt.PortableDataHash != "" {
484                                 return fmt.Errorf("Cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
485                         }
486                         if mnt.UUID != "" {
487                                 if mnt.Writable {
488                                         return fmt.Errorf("Writing to existing collections currently not permitted.")
489                                 }
490                                 pdhOnly = false
491                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
492                         } else if mnt.PortableDataHash != "" {
493                                 if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
494                                         return fmt.Errorf("Can never write to a collection specified by portable data hash")
495                                 }
496                                 idx := strings.Index(mnt.PortableDataHash, "/")
497                                 if idx > 0 {
498                                         mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
499                                         mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
500                                         runner.Container.Mounts[bind] = mnt
501                                 }
502                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
503                                 if mnt.Path != "" && mnt.Path != "." {
504                                         if strings.HasPrefix(mnt.Path, "./") {
505                                                 mnt.Path = mnt.Path[2:]
506                                         } else if strings.HasPrefix(mnt.Path, "/") {
507                                                 mnt.Path = mnt.Path[1:]
508                                         }
509                                         src += "/" + mnt.Path
510                                 }
511                         } else {
512                                 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
513                                 arvMountCmd = append(arvMountCmd, "--mount-tmp")
514                                 arvMountCmd = append(arvMountCmd, fmt.Sprintf("tmp%d", tmpcount))
515                                 tmpcount += 1
516                         }
517                         if mnt.Writable {
518                                 if bind == runner.Container.OutputPath {
519                                         runner.HostOutputDir = src
520                                         runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
521                                 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
522                                         copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
523                                 } else {
524                                         runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
525                                 }
526                         } else {
527                                 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind))
528                         }
529                         collectionPaths = append(collectionPaths, src)
530
531                 case mnt.Kind == "tmp":
532                         var tmpdir string
533                         tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
534                         if err != nil {
535                                 return fmt.Errorf("While creating mount temp dir: %v", err)
536                         }
537                         st, staterr := os.Stat(tmpdir)
538                         if staterr != nil {
539                                 return fmt.Errorf("While Stat on temp dir: %v", staterr)
540                         }
541                         err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
542                         if staterr != nil {
543                                 return fmt.Errorf("While Chmod temp dir: %v", err)
544                         }
545                         runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", tmpdir, bind))
546                         if bind == runner.Container.OutputPath {
547                                 runner.HostOutputDir = tmpdir
548                         }
549
550                 case mnt.Kind == "json" || mnt.Kind == "text":
551                         var filedata []byte
552                         if mnt.Kind == "json" {
553                                 filedata, err = json.Marshal(mnt.Content)
554                                 if err != nil {
555                                         return fmt.Errorf("encoding json data: %v", err)
556                                 }
557                         } else {
558                                 text, ok := mnt.Content.(string)
559                                 if !ok {
560                                         return fmt.Errorf("content for mount %q must be a string", bind)
561                                 }
562                                 filedata = []byte(text)
563                         }
564
565                         tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
566                         if err != nil {
567                                 return fmt.Errorf("creating temp dir: %v", err)
568                         }
569                         tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
570                         err = ioutil.WriteFile(tmpfn, filedata, 0444)
571                         if err != nil {
572                                 return fmt.Errorf("writing temp file: %v", err)
573                         }
574                         if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
575                                 copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
576                         } else {
577                                 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
578                         }
579
580                 case mnt.Kind == "git_tree":
581                         tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
582                         if err != nil {
583                                 return fmt.Errorf("creating temp dir: %v", err)
584                         }
585                         err = gitMount(mnt).extractTree(runner.ArvClient, tmpdir, token)
586                         if err != nil {
587                                 return err
588                         }
589                         runner.Binds = append(runner.Binds, tmpdir+":"+bind+":ro")
590                 }
591         }
592
593         if runner.HostOutputDir == "" {
594                 return fmt.Errorf("Output path does not correspond to a writable mount point")
595         }
596
597         if wantAPI := runner.Container.RuntimeConstraints.API; needCertMount && wantAPI != nil && *wantAPI {
598                 for _, certfile := range arvadosclient.CertFiles {
599                         _, err := os.Stat(certfile)
600                         if err == nil {
601                                 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:/etc/arvados/ca-certificates.crt:ro", certfile))
602                                 break
603                         }
604                 }
605         }
606
607         if pdhOnly {
608                 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
609         } else {
610                 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
611         }
612         arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
613
614         runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
615         if err != nil {
616                 return fmt.Errorf("While trying to start arv-mount: %v", err)
617         }
618
619         for _, p := range collectionPaths {
620                 _, err = os.Stat(p)
621                 if err != nil {
622                         return fmt.Errorf("While checking that input files exist: %v", err)
623                 }
624         }
625
626         for _, cp := range copyFiles {
627                 st, err := os.Stat(cp.src)
628                 if err != nil {
629                         return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
630                 }
631                 if st.IsDir() {
632                         err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
633                                 if walkerr != nil {
634                                         return walkerr
635                                 }
636                                 target := path.Join(cp.bind, walkpath[len(cp.src):])
637                                 if walkinfo.Mode().IsRegular() {
638                                         copyerr := copyfile(walkpath, target)
639                                         if copyerr != nil {
640                                                 return copyerr
641                                         }
642                                         return os.Chmod(target, walkinfo.Mode()|0777)
643                                 } else if walkinfo.Mode().IsDir() {
644                                         mkerr := os.MkdirAll(target, 0777)
645                                         if mkerr != nil {
646                                                 return mkerr
647                                         }
648                                         return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
649                                 } else {
650                                         return fmt.Errorf("Source %q is not a regular file or directory", cp.src)
651                                 }
652                         })
653                 } else if st.Mode().IsRegular() {
654                         err = copyfile(cp.src, cp.bind)
655                         if err == nil {
656                                 err = os.Chmod(cp.bind, st.Mode()|0777)
657                         }
658                 }
659                 if err != nil {
660                         return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
661                 }
662         }
663
664         return nil
665 }
666
667 func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
668         // Handle docker log protocol
669         // https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container
670         defer close(runner.loggingDone)
671
672         header := make([]byte, 8)
673         var err error
674         for err == nil {
675                 _, err = io.ReadAtLeast(containerReader, header, 8)
676                 if err != nil {
677                         if err == io.EOF {
678                                 err = nil
679                         }
680                         break
681                 }
682                 readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
683                 if header[0] == 1 {
684                         // stdout
685                         _, err = io.CopyN(runner.Stdout, containerReader, readsize)
686                 } else {
687                         // stderr
688                         _, err = io.CopyN(runner.Stderr, containerReader, readsize)
689                 }
690         }
691
692         if err != nil {
693                 runner.CrunchLog.Printf("error reading docker logs: %v", err)
694         }
695
696         err = runner.Stdout.Close()
697         if err != nil {
698                 runner.CrunchLog.Printf("error closing stdout logs: %v", err)
699         }
700
701         err = runner.Stderr.Close()
702         if err != nil {
703                 runner.CrunchLog.Printf("error closing stderr logs: %v", err)
704         }
705
706         if runner.statReporter != nil {
707                 runner.statReporter.Stop()
708                 err = runner.statLogger.Close()
709                 if err != nil {
710                         runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
711                 }
712         }
713 }
714
715 func (runner *ContainerRunner) stopHoststat() error {
716         if runner.hoststatReporter == nil {
717                 return nil
718         }
719         runner.hoststatReporter.Stop()
720         err := runner.hoststatLogger.Close()
721         if err != nil {
722                 return fmt.Errorf("error closing hoststat logs: %v", err)
723         }
724         return nil
725 }
726
727 func (runner *ContainerRunner) startHoststat() error {
728         w, err := runner.NewLogWriter("hoststat")
729         if err != nil {
730                 return err
731         }
732         runner.hoststatLogger = NewThrottledLogger(w)
733         runner.hoststatReporter = &crunchstat.Reporter{
734                 Logger:     log.New(runner.hoststatLogger, "", 0),
735                 CgroupRoot: runner.cgroupRoot,
736                 PollPeriod: runner.statInterval,
737         }
738         runner.hoststatReporter.Start()
739         return nil
740 }
741
742 func (runner *ContainerRunner) startCrunchstat() error {
743         w, err := runner.NewLogWriter("crunchstat")
744         if err != nil {
745                 return err
746         }
747         runner.statLogger = NewThrottledLogger(w)
748         runner.statReporter = &crunchstat.Reporter{
749                 CID:          runner.ContainerID,
750                 Logger:       log.New(runner.statLogger, "", 0),
751                 CgroupParent: runner.expectCgroupParent,
752                 CgroupRoot:   runner.cgroupRoot,
753                 PollPeriod:   runner.statInterval,
754                 TempDir:      runner.parentTemp,
755         }
756         runner.statReporter.Start()
757         return nil
758 }
759
760 type infoCommand struct {
761         label string
762         cmd   []string
763 }
764
765 // LogHostInfo logs info about the current host, for debugging and
766 // accounting purposes. Although it's logged as "node-info", this is
767 // about the environment where crunch-run is actually running, which
768 // might differ from what's described in the node record (see
769 // LogNodeRecord).
770 func (runner *ContainerRunner) LogHostInfo() (err error) {
771         w, err := runner.NewLogWriter("node-info")
772         if err != nil {
773                 return
774         }
775
776         commands := []infoCommand{
777                 {
778                         label: "Host Information",
779                         cmd:   []string{"uname", "-a"},
780                 },
781                 {
782                         label: "CPU Information",
783                         cmd:   []string{"cat", "/proc/cpuinfo"},
784                 },
785                 {
786                         label: "Memory Information",
787                         cmd:   []string{"cat", "/proc/meminfo"},
788                 },
789                 {
790                         label: "Disk Space",
791                         cmd:   []string{"df", "-m", "/", os.TempDir()},
792                 },
793                 {
794                         label: "Disk INodes",
795                         cmd:   []string{"df", "-i", "/", os.TempDir()},
796                 },
797         }
798
799         // Run commands with informational output to be logged.
800         for _, command := range commands {
801                 fmt.Fprintln(w, command.label)
802                 cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
803                 cmd.Stdout = w
804                 cmd.Stderr = w
805                 if err := cmd.Run(); err != nil {
806                         err = fmt.Errorf("While running command %q: %v", command.cmd, err)
807                         fmt.Fprintln(w, err)
808                         return err
809                 }
810                 fmt.Fprintln(w, "")
811         }
812
813         err = w.Close()
814         if err != nil {
815                 return fmt.Errorf("While closing node-info logs: %v", err)
816         }
817         return nil
818 }
819
820 // LogContainerRecord gets and saves the raw JSON container record from the API server
821 func (runner *ContainerRunner) LogContainerRecord() error {
822         logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
823         if !logged && err == nil {
824                 err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
825         }
826         return err
827 }
828
829 // LogNodeRecord logs arvados#node record corresponding to the current host.
830 func (runner *ContainerRunner) LogNodeRecord() error {
831         hostname := os.Getenv("SLURMD_NODENAME")
832         if hostname == "" {
833                 hostname, _ = os.Hostname()
834         }
835         _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
836                 // The "info" field has admin-only info when obtained
837                 // with a privileged token, and should not be logged.
838                 node, ok := resp.(map[string]interface{})
839                 if ok {
840                         delete(node, "info")
841                 }
842         })
843         return err
844 }
845
846 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
847         writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
848         if err != nil {
849                 return false, err
850         }
851         w := &ArvLogWriter{
852                 ArvClient:     runner.ArvClient,
853                 UUID:          runner.Container.UUID,
854                 loggingStream: label,
855                 writeCloser:   writer,
856         }
857
858         reader, err := runner.ArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
859         if err != nil {
860                 return false, fmt.Errorf("error getting %s record: %v", label, err)
861         }
862         defer reader.Close()
863
864         dec := json.NewDecoder(reader)
865         dec.UseNumber()
866         var resp map[string]interface{}
867         if err = dec.Decode(&resp); err != nil {
868                 return false, fmt.Errorf("error decoding %s list response: %v", label, err)
869         }
870         items, ok := resp["items"].([]interface{})
871         if !ok {
872                 return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
873         } else if len(items) < 1 {
874                 return false, nil
875         }
876         if munge != nil {
877                 munge(items[0])
878         }
879         // Re-encode it using indentation to improve readability
880         enc := json.NewEncoder(w)
881         enc.SetIndent("", "    ")
882         if err = enc.Encode(items[0]); err != nil {
883                 return false, fmt.Errorf("error logging %s record: %v", label, err)
884         }
885         err = w.Close()
886         if err != nil {
887                 return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
888         }
889         return true, nil
890 }
891
892 // AttachStreams connects the docker container stdin, stdout and stderr logs
893 // to the Arvados logger which logs to Keep and the API server logs table.
894 func (runner *ContainerRunner) AttachStreams() (err error) {
895
896         runner.CrunchLog.Print("Attaching container streams")
897
898         // If stdin mount is provided, attach it to the docker container
899         var stdinRdr arvados.File
900         var stdinJson []byte
901         if stdinMnt, ok := runner.Container.Mounts["stdin"]; ok {
902                 if stdinMnt.Kind == "collection" {
903                         var stdinColl arvados.Collection
904                         collId := stdinMnt.UUID
905                         if collId == "" {
906                                 collId = stdinMnt.PortableDataHash
907                         }
908                         err = runner.ArvClient.Get("collections", collId, nil, &stdinColl)
909                         if err != nil {
910                                 return fmt.Errorf("While getting stding collection: %v", err)
911                         }
912
913                         stdinRdr, err = runner.Kc.ManifestFileReader(manifest.Manifest{Text: stdinColl.ManifestText}, stdinMnt.Path)
914                         if os.IsNotExist(err) {
915                                 return fmt.Errorf("stdin collection path not found: %v", stdinMnt.Path)
916                         } else if err != nil {
917                                 return fmt.Errorf("While getting stdin collection path %v: %v", stdinMnt.Path, err)
918                         }
919                 } else if stdinMnt.Kind == "json" {
920                         stdinJson, err = json.Marshal(stdinMnt.Content)
921                         if err != nil {
922                                 return fmt.Errorf("While encoding stdin json data: %v", err)
923                         }
924                 }
925         }
926
927         stdinUsed := stdinRdr != nil || len(stdinJson) != 0
928         response, err := runner.Docker.ContainerAttach(context.TODO(), runner.ContainerID,
929                 dockertypes.ContainerAttachOptions{Stream: true, Stdin: stdinUsed, Stdout: true, Stderr: true})
930         if err != nil {
931                 return fmt.Errorf("While attaching container stdout/stderr streams: %v", err)
932         }
933
934         runner.loggingDone = make(chan bool)
935
936         if stdoutMnt, ok := runner.Container.Mounts["stdout"]; ok {
937                 stdoutFile, err := runner.getStdoutFile(stdoutMnt.Path)
938                 if err != nil {
939                         return err
940                 }
941                 runner.Stdout = stdoutFile
942         } else if w, err := runner.NewLogWriter("stdout"); err != nil {
943                 return err
944         } else {
945                 runner.Stdout = NewThrottledLogger(w)
946         }
947
948         if stderrMnt, ok := runner.Container.Mounts["stderr"]; ok {
949                 stderrFile, err := runner.getStdoutFile(stderrMnt.Path)
950                 if err != nil {
951                         return err
952                 }
953                 runner.Stderr = stderrFile
954         } else if w, err := runner.NewLogWriter("stderr"); err != nil {
955                 return err
956         } else {
957                 runner.Stderr = NewThrottledLogger(w)
958         }
959
960         if stdinRdr != nil {
961                 go func() {
962                         _, err := io.Copy(response.Conn, stdinRdr)
963                         if err != nil {
964                                 runner.CrunchLog.Print("While writing stdin collection to docker container %q", err)
965                                 runner.stop(nil)
966                         }
967                         stdinRdr.Close()
968                         response.CloseWrite()
969                 }()
970         } else if len(stdinJson) != 0 {
971                 go func() {
972                         _, err := io.Copy(response.Conn, bytes.NewReader(stdinJson))
973                         if err != nil {
974                                 runner.CrunchLog.Print("While writing stdin json to docker container %q", err)
975                                 runner.stop(nil)
976                         }
977                         response.CloseWrite()
978                 }()
979         }
980
981         go runner.ProcessDockerAttach(response.Reader)
982
983         return nil
984 }
985
986 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
987         stdoutPath := mntPath[len(runner.Container.OutputPath):]
988         index := strings.LastIndex(stdoutPath, "/")
989         if index > 0 {
990                 subdirs := stdoutPath[:index]
991                 if subdirs != "" {
992                         st, err := os.Stat(runner.HostOutputDir)
993                         if err != nil {
994                                 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
995                         }
996                         stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
997                         err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
998                         if err != nil {
999                                 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
1000                         }
1001                 }
1002         }
1003         stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
1004         if err != nil {
1005                 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
1006         }
1007
1008         return stdoutFile, nil
1009 }
1010
1011 // CreateContainer creates the docker container.
1012 func (runner *ContainerRunner) CreateContainer() error {
1013         runner.CrunchLog.Print("Creating Docker container")
1014
1015         runner.ContainerConfig.Cmd = runner.Container.Command
1016         if runner.Container.Cwd != "." {
1017                 runner.ContainerConfig.WorkingDir = runner.Container.Cwd
1018         }
1019
1020         for k, v := range runner.Container.Environment {
1021                 runner.ContainerConfig.Env = append(runner.ContainerConfig.Env, k+"="+v)
1022         }
1023
1024         runner.ContainerConfig.Volumes = runner.Volumes
1025
1026         maxRAM := int64(runner.Container.RuntimeConstraints.RAM)
1027         if maxRAM < 4*1024*1024 {
1028                 // Docker daemon won't let you set a limit less than 4 MiB
1029                 maxRAM = 4 * 1024 * 1024
1030         }
1031         runner.HostConfig = dockercontainer.HostConfig{
1032                 Binds: runner.Binds,
1033                 LogConfig: dockercontainer.LogConfig{
1034                         Type: "none",
1035                 },
1036                 Resources: dockercontainer.Resources{
1037                         CgroupParent: runner.setCgroupParent,
1038                         NanoCPUs:     int64(runner.Container.RuntimeConstraints.VCPUs) * 1000000000,
1039                         Memory:       maxRAM, // RAM
1040                         MemorySwap:   maxRAM, // RAM+swap
1041                         KernelMemory: maxRAM, // kernel portion
1042                 },
1043         }
1044
1045         if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
1046                 tok, err := runner.ContainerToken()
1047                 if err != nil {
1048                         return err
1049                 }
1050                 runner.ContainerConfig.Env = append(runner.ContainerConfig.Env,
1051                         "ARVADOS_API_TOKEN="+tok,
1052                         "ARVADOS_API_HOST="+os.Getenv("ARVADOS_API_HOST"),
1053                         "ARVADOS_API_HOST_INSECURE="+os.Getenv("ARVADOS_API_HOST_INSECURE"),
1054                 )
1055                 runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
1056         } else {
1057                 if runner.enableNetwork == "always" {
1058                         runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
1059                 } else {
1060                         runner.HostConfig.NetworkMode = dockercontainer.NetworkMode("none")
1061                 }
1062         }
1063
1064         _, stdinUsed := runner.Container.Mounts["stdin"]
1065         runner.ContainerConfig.OpenStdin = stdinUsed
1066         runner.ContainerConfig.StdinOnce = stdinUsed
1067         runner.ContainerConfig.AttachStdin = stdinUsed
1068         runner.ContainerConfig.AttachStdout = true
1069         runner.ContainerConfig.AttachStderr = true
1070
1071         createdBody, err := runner.Docker.ContainerCreate(context.TODO(), &runner.ContainerConfig, &runner.HostConfig, nil, runner.Container.UUID)
1072         if err != nil {
1073                 return fmt.Errorf("While creating container: %v", err)
1074         }
1075
1076         runner.ContainerID = createdBody.ID
1077
1078         return runner.AttachStreams()
1079 }
1080
1081 // StartContainer starts the docker container created by CreateContainer.
1082 func (runner *ContainerRunner) StartContainer() error {
1083         runner.CrunchLog.Printf("Starting Docker container id '%s'", runner.ContainerID)
1084         runner.cStateLock.Lock()
1085         defer runner.cStateLock.Unlock()
1086         if runner.cCancelled {
1087                 return ErrCancelled
1088         }
1089         err := runner.Docker.ContainerStart(context.TODO(), runner.ContainerID,
1090                 dockertypes.ContainerStartOptions{})
1091         if err != nil {
1092                 var advice string
1093                 if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
1094                         advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
1095                 }
1096                 return fmt.Errorf("could not start container: %v%s", err, advice)
1097         }
1098         return nil
1099 }
1100
1101 // checkContainerd checks if "containerd" is present in the process list.
1102 func (runner *ContainerRunner) CheckContainerd() error {
1103         if runner.checkContainerd == 0 {
1104                 return nil
1105         }
1106         p, _ := runner.ListProcesses()
1107         for _, i := range p {
1108                 e, _ := i.CmdlineSlice()
1109                 if len(e) > 0 {
1110                         if strings.Index(e[0], "containerd") > -1 {
1111                                 return nil
1112                         }
1113                 }
1114         }
1115
1116         // Not found
1117         runner.runBrokenNodeHook()
1118         runner.stop(nil)
1119         return fmt.Errorf("'containerd' not found in process list.")
1120 }
1121
1122 // WaitFinish waits for the container to terminate, capture the exit code, and
1123 // close the stdout/stderr logging.
1124 func (runner *ContainerRunner) WaitFinish() error {
1125         var runTimeExceeded <-chan time.Time
1126         runner.CrunchLog.Print("Waiting for container to finish")
1127
1128         waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, dockercontainer.WaitConditionNotRunning)
1129         arvMountExit := runner.ArvMountExit
1130         if timeout := runner.Container.SchedulingParameters.MaxRunTime; timeout > 0 {
1131                 runTimeExceeded = time.After(time.Duration(timeout) * time.Second)
1132         }
1133
1134         containerGone := make(chan struct{})
1135         go func() {
1136                 defer close(containerGone)
1137                 if runner.containerWatchdogInterval < 1 {
1138                         runner.containerWatchdogInterval = time.Minute
1139                 }
1140                 for range time.NewTicker(runner.containerWatchdogInterval).C {
1141                         ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(runner.containerWatchdogInterval))
1142                         ctr, err := runner.Docker.ContainerInspect(ctx, runner.ContainerID)
1143                         cancel()
1144                         runner.cStateLock.Lock()
1145                         done := runner.cRemoved || runner.ExitCode != nil
1146                         runner.cStateLock.Unlock()
1147                         if done {
1148                                 return
1149                         } else if err != nil {
1150                                 runner.CrunchLog.Printf("Error inspecting container: %s", err)
1151                                 runner.checkBrokenNode(err)
1152                                 return
1153                         } else if ctr.State == nil || !(ctr.State.Running || ctr.State.Status == "created") {
1154                                 runner.CrunchLog.Printf("Container is not running: State=%v", ctr.State)
1155                                 return
1156                         }
1157                 }
1158         }()
1159
1160         containerdGone := make(chan error)
1161         defer close(containerdGone)
1162         if runner.checkContainerd > 0 {
1163                 go func() {
1164                         ticker := time.NewTicker(time.Duration(runner.checkContainerd))
1165                         defer ticker.Stop()
1166                         for {
1167                                 select {
1168                                 case <-ticker.C:
1169                                         if ck := runner.CheckContainerd(); ck != nil {
1170                                                 containerdGone <- ck
1171                                                 return
1172                                         }
1173                                 case <-containerdGone:
1174                                         // Channel closed, quit goroutine
1175                                         return
1176                                 }
1177                         }
1178                 }()
1179         }
1180
1181         for {
1182                 select {
1183                 case waitBody := <-waitOk:
1184                         runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
1185                         code := int(waitBody.StatusCode)
1186                         runner.ExitCode = &code
1187
1188                         // wait for stdout/stderr to complete
1189                         <-runner.loggingDone
1190                         return nil
1191
1192                 case err := <-waitErr:
1193                         return fmt.Errorf("container wait: %v", err)
1194
1195                 case <-arvMountExit:
1196                         runner.CrunchLog.Printf("arv-mount exited while container is still running.  Stopping container.")
1197                         runner.stop(nil)
1198                         // arvMountExit will always be ready now that
1199                         // it's closed, but that doesn't interest us.
1200                         arvMountExit = nil
1201
1202                 case <-runTimeExceeded:
1203                         runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
1204                         runner.stop(nil)
1205                         runTimeExceeded = nil
1206
1207                 case <-containerGone:
1208                         return errors.New("docker client never returned status")
1209
1210                 case err := <-containerdGone:
1211                         return err
1212                 }
1213         }
1214 }
1215
1216 func (runner *ContainerRunner) updateLogs() {
1217         ticker := time.NewTicker(crunchLogUpdatePeriod / 360)
1218         defer ticker.Stop()
1219
1220         sigusr1 := make(chan os.Signal, 1)
1221         signal.Notify(sigusr1, syscall.SIGUSR1)
1222         defer signal.Stop(sigusr1)
1223
1224         saveAtTime := time.Now().Add(crunchLogUpdatePeriod)
1225         saveAtSize := crunchLogUpdateSize
1226         var savedSize int64
1227         for {
1228                 select {
1229                 case <-ticker.C:
1230                 case <-sigusr1:
1231                         saveAtTime = time.Now()
1232                 }
1233                 runner.logMtx.Lock()
1234                 done := runner.LogsPDH != nil
1235                 runner.logMtx.Unlock()
1236                 if done {
1237                         return
1238                 }
1239                 size := runner.LogCollection.Size()
1240                 if size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) {
1241                         continue
1242                 }
1243                 saveAtTime = time.Now().Add(crunchLogUpdatePeriod)
1244                 saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize
1245                 saved, err := runner.saveLogCollection(false)
1246                 if err != nil {
1247                         runner.CrunchLog.Printf("error updating log collection: %s", err)
1248                         continue
1249                 }
1250
1251                 var updated arvados.Container
1252                 err = runner.ArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1253                         "container": arvadosclient.Dict{"log": saved.PortableDataHash},
1254                 }, &updated)
1255                 if err != nil {
1256                         runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
1257                         continue
1258                 }
1259
1260                 savedSize = size
1261         }
1262 }
1263
1264 // CaptureOutput saves data from the container's output directory if
1265 // needed, and updates the container output accordingly.
1266 func (runner *ContainerRunner) CaptureOutput() error {
1267         if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
1268                 // Output may have been set directly by the container, so
1269                 // refresh the container record to check.
1270                 err := runner.ArvClient.Get("containers", runner.Container.UUID,
1271                         nil, &runner.Container)
1272                 if err != nil {
1273                         return err
1274                 }
1275                 if runner.Container.Output != "" {
1276                         // Container output is already set.
1277                         runner.OutputPDH = &runner.Container.Output
1278                         return nil
1279                 }
1280         }
1281
1282         txt, err := (&copier{
1283                 client:        runner.client,
1284                 arvClient:     runner.ArvClient,
1285                 keepClient:    runner.Kc,
1286                 hostOutputDir: runner.HostOutputDir,
1287                 ctrOutputDir:  runner.Container.OutputPath,
1288                 binds:         runner.Binds,
1289                 mounts:        runner.Container.Mounts,
1290                 secretMounts:  runner.SecretMounts,
1291                 logger:        runner.CrunchLog,
1292         }).Copy()
1293         if err != nil {
1294                 return err
1295         }
1296         var resp arvados.Collection
1297         err = runner.ArvClient.Create("collections", arvadosclient.Dict{
1298                 "ensure_unique_name": true,
1299                 "collection": arvadosclient.Dict{
1300                         "is_trashed":    true,
1301                         "name":          "output for " + runner.Container.UUID,
1302                         "manifest_text": txt,
1303                 },
1304         }, &resp)
1305         if err != nil {
1306                 return fmt.Errorf("error creating output collection: %v", err)
1307         }
1308         runner.OutputPDH = &resp.PortableDataHash
1309         return nil
1310 }
1311
1312 func (runner *ContainerRunner) CleanupDirs() {
1313         if runner.ArvMount != nil {
1314                 var delay int64 = 8
1315                 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1316                 umount.Stdout = runner.CrunchLog
1317                 umount.Stderr = runner.CrunchLog
1318                 runner.CrunchLog.Printf("Running %v", umount.Args)
1319                 umnterr := umount.Start()
1320
1321                 if umnterr != nil {
1322                         runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1323                 } else {
1324                         // If arv-mount --unmount gets stuck for any reason, we
1325                         // don't want to wait for it forever.  Do Wait() in a goroutine
1326                         // so it doesn't block crunch-run.
1327                         umountExit := make(chan error)
1328                         go func() {
1329                                 mnterr := umount.Wait()
1330                                 if mnterr != nil {
1331                                         runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1332                                 }
1333                                 umountExit <- mnterr
1334                         }()
1335
1336                         for again := true; again; {
1337                                 again = false
1338                                 select {
1339                                 case <-umountExit:
1340                                         umount = nil
1341                                         again = true
1342                                 case <-runner.ArvMountExit:
1343                                         break
1344                                 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1345                                         runner.CrunchLog.Printf("Timed out waiting for unmount")
1346                                         if umount != nil {
1347                                                 umount.Process.Kill()
1348                                         }
1349                                         runner.ArvMount.Process.Kill()
1350                                 }
1351                         }
1352                 }
1353         }
1354
1355         if runner.ArvMountPoint != "" {
1356                 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1357                         runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1358                 }
1359         }
1360
1361         if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {
1362                 runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr)
1363         }
1364 }
1365
1366 // CommitLogs posts the collection containing the final container logs.
1367 func (runner *ContainerRunner) CommitLogs() error {
1368         func() {
1369                 // Hold cStateLock to prevent races on CrunchLog (e.g., stop()).
1370                 runner.cStateLock.Lock()
1371                 defer runner.cStateLock.Unlock()
1372
1373                 runner.CrunchLog.Print(runner.finalState)
1374
1375                 if runner.arvMountLog != nil {
1376                         runner.arvMountLog.Close()
1377                 }
1378                 runner.CrunchLog.Close()
1379
1380                 // Closing CrunchLog above allows them to be committed to Keep at this
1381                 // point, but re-open crunch log with ArvClient in case there are any
1382                 // other further errors (such as failing to write the log to Keep!)
1383                 // while shutting down
1384                 runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{
1385                         ArvClient:     runner.ArvClient,
1386                         UUID:          runner.Container.UUID,
1387                         loggingStream: "crunch-run",
1388                         writeCloser:   nil,
1389                 })
1390                 runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
1391         }()
1392
1393         if runner.LogsPDH != nil {
1394                 // If we have already assigned something to LogsPDH,
1395                 // we must be closing the re-opened log, which won't
1396                 // end up getting attached to the container record and
1397                 // therefore doesn't need to be saved as a collection
1398                 // -- it exists only to send logs to other channels.
1399                 return nil
1400         }
1401         saved, err := runner.saveLogCollection(true)
1402         if err != nil {
1403                 return fmt.Errorf("error saving log collection: %s", err)
1404         }
1405         runner.logMtx.Lock()
1406         defer runner.logMtx.Unlock()
1407         runner.LogsPDH = &saved.PortableDataHash
1408         return nil
1409 }
1410
1411 func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
1412         runner.logMtx.Lock()
1413         defer runner.logMtx.Unlock()
1414         if runner.LogsPDH != nil {
1415                 // Already finalized.
1416                 return
1417         }
1418         mt, err := runner.LogCollection.MarshalManifest(".")
1419         if err != nil {
1420                 err = fmt.Errorf("error creating log manifest: %v", err)
1421                 return
1422         }
1423         updates := arvadosclient.Dict{
1424                 "name":          "logs for " + runner.Container.UUID,
1425                 "manifest_text": mt,
1426         }
1427         if final {
1428                 updates["is_trashed"] = true
1429         } else {
1430                 exp := time.Now().Add(crunchLogUpdatePeriod * 24)
1431                 updates["trash_at"] = exp
1432                 updates["delete_at"] = exp
1433         }
1434         reqBody := arvadosclient.Dict{"collection": updates}
1435         if runner.logUUID == "" {
1436                 reqBody["ensure_unique_name"] = true
1437                 err = runner.ArvClient.Create("collections", reqBody, &response)
1438         } else {
1439                 err = runner.ArvClient.Update("collections", runner.logUUID, reqBody, &response)
1440         }
1441         if err != nil {
1442                 return
1443         }
1444         runner.logUUID = response.UUID
1445         return
1446 }
1447
1448 // UpdateContainerRunning updates the container state to "Running"
1449 func (runner *ContainerRunner) UpdateContainerRunning() error {
1450         runner.cStateLock.Lock()
1451         defer runner.cStateLock.Unlock()
1452         if runner.cCancelled {
1453                 return ErrCancelled
1454         }
1455         return runner.ArvClient.Update("containers", runner.Container.UUID,
1456                 arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running"}}, nil)
1457 }
1458
1459 // ContainerToken returns the api_token the container (and any
1460 // arv-mount processes) are allowed to use.
1461 func (runner *ContainerRunner) ContainerToken() (string, error) {
1462         if runner.token != "" {
1463                 return runner.token, nil
1464         }
1465
1466         var auth arvados.APIClientAuthorization
1467         err := runner.ArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1468         if err != nil {
1469                 return "", err
1470         }
1471         runner.token = fmt.Sprintf("v2/%s/%s/%s", auth.UUID, auth.APIToken, runner.Container.UUID)
1472         return runner.token, nil
1473 }
1474
1475 // UpdateContainerComplete updates the container record state on API
1476 // server to "Complete" or "Cancelled"
1477 func (runner *ContainerRunner) UpdateContainerFinal() error {
1478         update := arvadosclient.Dict{}
1479         update["state"] = runner.finalState
1480         if runner.LogsPDH != nil {
1481                 update["log"] = *runner.LogsPDH
1482         }
1483         if runner.finalState == "Complete" {
1484                 if runner.ExitCode != nil {
1485                         update["exit_code"] = *runner.ExitCode
1486                 }
1487                 if runner.OutputPDH != nil {
1488                         update["output"] = *runner.OutputPDH
1489                 }
1490         }
1491         return runner.ArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
1492 }
1493
1494 // IsCancelled returns the value of Cancelled, with goroutine safety.
1495 func (runner *ContainerRunner) IsCancelled() bool {
1496         runner.cStateLock.Lock()
1497         defer runner.cStateLock.Unlock()
1498         return runner.cCancelled
1499 }
1500
1501 // NewArvLogWriter creates an ArvLogWriter
1502 func (runner *ContainerRunner) NewArvLogWriter(name string) (io.WriteCloser, error) {
1503         writer, err := runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
1504         if err != nil {
1505                 return nil, err
1506         }
1507         return &ArvLogWriter{
1508                 ArvClient:     runner.ArvClient,
1509                 UUID:          runner.Container.UUID,
1510                 loggingStream: name,
1511                 writeCloser:   writer,
1512         }, nil
1513 }
1514
1515 // Run the full container lifecycle.
1516 func (runner *ContainerRunner) Run() (err error) {
1517         runner.CrunchLog.Printf("crunch-run %s started", version)
1518         runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
1519
1520         hostname, hosterr := os.Hostname()
1521         if hosterr != nil {
1522                 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1523         } else {
1524                 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1525         }
1526
1527         runner.finalState = "Queued"
1528
1529         defer func() {
1530                 runner.CleanupDirs()
1531
1532                 runner.CrunchLog.Printf("crunch-run finished")
1533                 runner.CrunchLog.Close()
1534         }()
1535
1536         defer func() {
1537                 // checkErr prints e (unless it's nil) and sets err to
1538                 // e (unless err is already non-nil). Thus, if err
1539                 // hasn't already been assigned when Run() returns,
1540                 // this cleanup func will cause Run() to return the
1541                 // first non-nil error that is passed to checkErr().
1542                 checkErr := func(errorIn string, e error) {
1543                         if e == nil {
1544                                 return
1545                         }
1546                         runner.CrunchLog.Printf("error in %s: %v", errorIn, e)
1547                         if err == nil {
1548                                 err = e
1549                         }
1550                         if runner.finalState == "Complete" {
1551                                 // There was an error in the finalization.
1552                                 runner.finalState = "Cancelled"
1553                         }
1554                 }
1555
1556                 // Log the error encountered in Run(), if any
1557                 checkErr("Run", err)
1558
1559                 if runner.finalState == "Queued" {
1560                         runner.UpdateContainerFinal()
1561                         return
1562                 }
1563
1564                 if runner.IsCancelled() {
1565                         runner.finalState = "Cancelled"
1566                         // but don't return yet -- we still want to
1567                         // capture partial output and write logs
1568                 }
1569
1570                 checkErr("CaptureOutput", runner.CaptureOutput())
1571                 checkErr("stopHoststat", runner.stopHoststat())
1572                 checkErr("CommitLogs", runner.CommitLogs())
1573                 checkErr("UpdateContainerFinal", runner.UpdateContainerFinal())
1574         }()
1575
1576         err = runner.fetchContainerRecord()
1577         if err != nil {
1578                 return
1579         }
1580         runner.setupSignals()
1581         err = runner.startHoststat()
1582         if err != nil {
1583                 return
1584         }
1585
1586         // Sanity check that containerd is running.
1587         err = runner.CheckContainerd()
1588         if err != nil {
1589                 return
1590         }
1591
1592         // check for and/or load image
1593         err = runner.LoadImage()
1594         if err != nil {
1595                 if !runner.checkBrokenNode(err) {
1596                         // Failed to load image but not due to a "broken node"
1597                         // condition, probably user error.
1598                         runner.finalState = "Cancelled"
1599                 }
1600                 err = fmt.Errorf("While loading container image: %v", err)
1601                 return
1602         }
1603
1604         // set up FUSE mount and binds
1605         err = runner.SetupMounts()
1606         if err != nil {
1607                 runner.finalState = "Cancelled"
1608                 err = fmt.Errorf("While setting up mounts: %v", err)
1609                 return
1610         }
1611
1612         err = runner.CreateContainer()
1613         if err != nil {
1614                 return
1615         }
1616         err = runner.LogHostInfo()
1617         if err != nil {
1618                 return
1619         }
1620         err = runner.LogNodeRecord()
1621         if err != nil {
1622                 return
1623         }
1624         err = runner.LogContainerRecord()
1625         if err != nil {
1626                 return
1627         }
1628
1629         if runner.IsCancelled() {
1630                 return
1631         }
1632
1633         err = runner.UpdateContainerRunning()
1634         if err != nil {
1635                 return
1636         }
1637         runner.finalState = "Cancelled"
1638
1639         err = runner.startCrunchstat()
1640         if err != nil {
1641                 return
1642         }
1643
1644         err = runner.StartContainer()
1645         if err != nil {
1646                 runner.checkBrokenNode(err)
1647                 return
1648         }
1649
1650         err = runner.WaitFinish()
1651         if err == nil && !runner.IsCancelled() {
1652                 runner.finalState = "Complete"
1653         }
1654         return
1655 }
1656
1657 // Fetch the current container record (uuid = runner.Container.UUID)
1658 // into runner.Container.
1659 func (runner *ContainerRunner) fetchContainerRecord() error {
1660         reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1661         if err != nil {
1662                 return fmt.Errorf("error fetching container record: %v", err)
1663         }
1664         defer reader.Close()
1665
1666         dec := json.NewDecoder(reader)
1667         dec.UseNumber()
1668         err = dec.Decode(&runner.Container)
1669         if err != nil {
1670                 return fmt.Errorf("error decoding container record: %v", err)
1671         }
1672
1673         var sm struct {
1674                 SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
1675         }
1676
1677         containerToken, err := runner.ContainerToken()
1678         if err != nil {
1679                 return fmt.Errorf("error getting container token: %v", err)
1680         }
1681
1682         containerClient, err := runner.MkArvClient(containerToken)
1683         if err != nil {
1684                 return fmt.Errorf("error creating container API client: %v", err)
1685         }
1686
1687         err = containerClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
1688         if err != nil {
1689                 if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
1690                         return fmt.Errorf("error fetching secret_mounts: %v", err)
1691                 }
1692                 // ok && apierr.HttpStatusCode == 404, which means
1693                 // secret_mounts isn't supported by this API server.
1694         }
1695         runner.SecretMounts = sm.SecretMounts
1696
1697         return nil
1698 }
1699
1700 // NewContainerRunner creates a new container runner.
1701 func NewContainerRunner(client *arvados.Client, api IArvadosClient, kc IKeepClient, docker ThinDockerClient, containerUUID string) (*ContainerRunner, error) {
1702         cr := &ContainerRunner{
1703                 client:    client,
1704                 ArvClient: api,
1705                 Kc:        kc,
1706                 Docker:    docker,
1707         }
1708         cr.NewLogWriter = cr.NewArvLogWriter
1709         cr.RunArvMount = cr.ArvMountCmd
1710         cr.MkTempDir = ioutil.TempDir
1711         cr.ListProcesses = func() ([]PsProcess, error) {
1712                 pr, err := process.Processes()
1713                 if err != nil {
1714                         return nil, err
1715                 }
1716                 ps := make([]PsProcess, len(pr))
1717                 for i, j := range pr {
1718                         ps[i] = j
1719                 }
1720                 return ps, nil
1721         }
1722         cr.MkArvClient = func(token string) (IArvadosClient, error) {
1723                 cl, err := arvadosclient.MakeArvadosClient()
1724                 if err != nil {
1725                         return nil, err
1726                 }
1727                 cl.ApiToken = token
1728                 return cl, nil
1729         }
1730         var err error
1731         cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.client, cr.Kc)
1732         if err != nil {
1733                 return nil, err
1734         }
1735         cr.Container.UUID = containerUUID
1736         w, err := cr.NewLogWriter("crunch-run")
1737         if err != nil {
1738                 return nil, err
1739         }
1740         cr.CrunchLog = NewThrottledLogger(w)
1741         cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
1742
1743         loadLogThrottleParams(api)
1744         go cr.updateLogs()
1745
1746         return cr, nil
1747 }
1748
1749 func main() {
1750         statInterval := flag.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1751         cgroupRoot := flag.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
1752         cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
1753         cgroupParentSubsystem := flag.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
1754         caCertsPath := flag.String("ca-certs", "", "Path to TLS root certificates")
1755         enableNetwork := flag.String("container-enable-networking", "default",
1756                 `Specify if networking should be enabled for container.  One of 'default', 'always':
1757         default: only enable networking if container requests it.
1758         always:  containers always have networking enabled
1759         `)
1760         networkMode := flag.String("container-network-mode", "default",
1761                 `Set networking mode for container.  Corresponds to Docker network mode (--net).
1762         `)
1763         memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
1764         getVersion := flag.Bool("version", false, "Print version information and exit.")
1765         checkContainerd := flag.Duration("check-containerd", 60*time.Second, "Periodic check if (docker-)containerd is running (use 0s to disable).")
1766         flag.Parse()
1767
1768         // Print version information if requested
1769         if *getVersion {
1770                 fmt.Printf("crunch-run %s\n", version)
1771                 return
1772         }
1773
1774         log.Printf("crunch-run %s started", version)
1775
1776         containerId := flag.Arg(0)
1777
1778         if *caCertsPath != "" {
1779                 arvadosclient.CertFiles = []string{*caCertsPath}
1780         }
1781
1782         api, err := arvadosclient.MakeArvadosClient()
1783         if err != nil {
1784                 log.Fatalf("%s: %v", containerId, err)
1785         }
1786         api.Retries = 8
1787
1788         kc, kcerr := keepclient.MakeKeepClient(api)
1789         if kcerr != nil {
1790                 log.Fatalf("%s: %v", containerId, kcerr)
1791         }
1792         kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
1793         kc.Retries = 4
1794
1795         // API version 1.21 corresponds to Docker 1.9, which is currently the
1796         // minimum version we want to support.
1797         docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
1798
1799         cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, docker, containerId)
1800         if err != nil {
1801                 log.Fatal(err)
1802         }
1803         if dockererr != nil {
1804                 cr.CrunchLog.Printf("%s: %v", containerId, dockererr)
1805                 cr.checkBrokenNode(dockererr)
1806                 cr.CrunchLog.Close()
1807                 os.Exit(1)
1808         }
1809
1810         parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerId+".")
1811         if tmperr != nil {
1812                 log.Fatalf("%s: %v", containerId, tmperr)
1813         }
1814
1815         cr.parentTemp = parentTemp
1816         cr.statInterval = *statInterval
1817         cr.cgroupRoot = *cgroupRoot
1818         cr.expectCgroupParent = *cgroupParent
1819         cr.enableNetwork = *enableNetwork
1820         cr.networkMode = *networkMode
1821         cr.checkContainerd = *checkContainerd
1822         if *cgroupParentSubsystem != "" {
1823                 p := findCgroup(*cgroupParentSubsystem)
1824                 cr.setCgroupParent = p
1825                 cr.expectCgroupParent = p
1826         }
1827
1828         runerr := cr.Run()
1829
1830         if *memprofile != "" {
1831                 f, err := os.Create(*memprofile)
1832                 if err != nil {
1833                         log.Printf("could not create memory profile: %s", err)
1834                 }
1835                 runtime.GC() // get up-to-date statistics
1836                 if err := pprof.WriteHeapProfile(f); err != nil {
1837                         log.Printf("could not write memory profile: %s", err)
1838                 }
1839                 closeerr := f.Close()
1840                 if closeerr != nil {
1841                         log.Printf("closing memprofile file: %s", err)
1842                 }
1843         }
1844
1845         if runerr != nil {
1846                 log.Fatalf("%s: %v", containerId, runerr)
1847         }
1848 }