20300: Merge branch 'main' into 20300-rails7
[arvados.git] / lib / crunchrun / crunchrun.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package crunchrun
6
7 import (
8         "bytes"
9         "context"
10         "encoding/json"
11         "errors"
12         "flag"
13         "fmt"
14         "io"
15         "io/fs"
16         "io/ioutil"
17         "log"
18         "net"
19         "net/http"
20         "os"
21         "os/exec"
22         "os/signal"
23         "os/user"
24         "path"
25         "path/filepath"
26         "regexp"
27         "runtime"
28         "runtime/pprof"
29         "sort"
30         "strings"
31         "sync"
32         "syscall"
33         "time"
34
35         "git.arvados.org/arvados.git/lib/cloud"
36         "git.arvados.org/arvados.git/lib/cmd"
37         "git.arvados.org/arvados.git/lib/config"
38         "git.arvados.org/arvados.git/lib/crunchstat"
39         "git.arvados.org/arvados.git/sdk/go/arvados"
40         "git.arvados.org/arvados.git/sdk/go/arvadosclient"
41         "git.arvados.org/arvados.git/sdk/go/ctxlog"
42         "git.arvados.org/arvados.git/sdk/go/keepclient"
43         "git.arvados.org/arvados.git/sdk/go/manifest"
44         "golang.org/x/sys/unix"
45 )
46
47 type command struct{}
48
49 var arvadosCertPath = "/etc/arvados/ca-certificates.crt"
50
51 var Command = command{}
52
53 // ConfigData contains environment variables and (when needed) cluster
54 // configuration, passed from dispatchcloud to crunch-run on stdin.
55 type ConfigData struct {
56         Env          map[string]string
57         KeepBuffers  int
58         EC2SpotCheck bool
59         Cluster      *arvados.Cluster
60 }
61
62 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
63 type IArvadosClient interface {
64         Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
65         Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
66         Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
67         Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
68         CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
69         Discovery(key string) (interface{}, error)
70 }
71
72 // ErrCancelled is the error returned when the container is cancelled.
73 var ErrCancelled = errors.New("Cancelled")
74
75 // IKeepClient is the minimal Keep API methods used by crunch-run.
76 type IKeepClient interface {
77         BlockWrite(context.Context, arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error)
78         ReadAt(locator string, p []byte, off int) (int, error)
79         ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
80         LocalLocator(locator string) (string, error)
81         ClearBlockCache()
82         SetStorageClasses(sc []string)
83 }
84
85 // NewLogWriter is a factory function to create a new log writer.
86 type NewLogWriter func(name string) (io.WriteCloser, error)
87
88 type RunArvMount func(cmdline []string, tok string) (*exec.Cmd, error)
89
90 type MkTempDir func(string, string) (string, error)
91
92 type PsProcess interface {
93         CmdlineSlice() ([]string, error)
94 }
95
96 // ContainerRunner is the main stateful struct used for a single execution of a
97 // container.
98 type ContainerRunner struct {
99         executor       containerExecutor
100         executorStdin  io.Closer
101         executorStdout io.Closer
102         executorStderr io.Closer
103
104         // Dispatcher client is initialized with the Dispatcher token.
105         // This is a privileged token used to manage container status
106         // and logs.
107         //
108         // We have both dispatcherClient and DispatcherArvClient
109         // because there are two different incompatible Arvados Go
110         // SDKs and we have to use both (hopefully this gets fixed in
111         // #14467)
112         dispatcherClient     *arvados.Client
113         DispatcherArvClient  IArvadosClient
114         DispatcherKeepClient IKeepClient
115
116         // Container client is initialized with the Container token
117         // This token controls the permissions of the container, and
118         // must be used for operations such as reading collections.
119         //
120         // Same comment as above applies to
121         // containerClient/ContainerArvClient.
122         containerClient     *arvados.Client
123         ContainerArvClient  IArvadosClient
124         ContainerKeepClient IKeepClient
125
126         Container     arvados.Container
127         token         string
128         ExitCode      *int
129         NewLogWriter  NewLogWriter
130         CrunchLog     *ThrottledLogger
131         logUUID       string
132         logMtx        sync.Mutex
133         LogCollection arvados.CollectionFileSystem
134         LogsPDH       *string
135         RunArvMount   RunArvMount
136         MkTempDir     MkTempDir
137         ArvMount      *exec.Cmd
138         ArvMountPoint string
139         HostOutputDir string
140         Volumes       map[string]struct{}
141         OutputPDH     *string
142         SigChan       chan os.Signal
143         ArvMountExit  chan error
144         SecretMounts  map[string]arvados.Mount
145         MkArvClient   func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error)
146         finalState    string
147         parentTemp    string
148         costStartTime time.Time
149
150         keepstore        *exec.Cmd
151         keepstoreLogger  io.WriteCloser
152         keepstoreLogbuf  *bufThenWrite
153         statLogger       io.WriteCloser
154         statReporter     *crunchstat.Reporter
155         hoststatLogger   io.WriteCloser
156         hoststatReporter *crunchstat.Reporter
157         statInterval     time.Duration
158         // What we tell docker to use as the container's cgroup
159         // parent.
160         setCgroupParent string
161         // Fake root dir where crunchstat.Reporter should read OS
162         // files, for testing.
163         crunchstatFakeFS fs.FS
164
165         cStateLock sync.Mutex
166         cCancelled bool // StopContainer() invoked
167
168         enableMemoryLimit bool
169         enableNetwork     string // one of "default" or "always"
170         networkMode       string // "none", "host", or "" -- passed through to executor
171         brokenNodeHook    string // script to run if node appears to be broken
172         arvMountLog       *ThrottledLogger
173
174         containerWatchdogInterval time.Duration
175
176         gateway Gateway
177
178         prices     []cloud.InstancePrice
179         pricesLock sync.Mutex
180 }
181
182 // setupSignals sets up signal handling to gracefully terminate the
183 // underlying container and update state when receiving a TERM, INT or
184 // QUIT signal.
185 func (runner *ContainerRunner) setupSignals() {
186         runner.SigChan = make(chan os.Signal, 1)
187         signal.Notify(runner.SigChan, syscall.SIGTERM)
188         signal.Notify(runner.SigChan, syscall.SIGINT)
189         signal.Notify(runner.SigChan, syscall.SIGQUIT)
190
191         go func(sig chan os.Signal) {
192                 for s := range sig {
193                         runner.stop(s)
194                 }
195         }(runner.SigChan)
196 }
197
198 // stop the underlying container.
199 func (runner *ContainerRunner) stop(sig os.Signal) {
200         runner.cStateLock.Lock()
201         defer runner.cStateLock.Unlock()
202         if sig != nil {
203                 runner.CrunchLog.Printf("caught signal: %v", sig)
204         }
205         runner.cCancelled = true
206         runner.CrunchLog.Printf("stopping container")
207         err := runner.executor.Stop()
208         if err != nil {
209                 runner.CrunchLog.Printf("error stopping container: %s", err)
210         }
211 }
212
213 var errorBlacklist = []string{
214         "(?ms).*[Cc]annot connect to the Docker daemon.*",
215         "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
216         "(?ms).*grpc: the connection is unavailable.*",
217 }
218
219 func (runner *ContainerRunner) runBrokenNodeHook() {
220         if runner.brokenNodeHook == "" {
221                 path := filepath.Join(lockdir, brokenfile)
222                 runner.CrunchLog.Printf("Writing %s to mark node as broken", path)
223                 f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0700)
224                 if err != nil {
225                         runner.CrunchLog.Printf("Error writing %s: %s", path, err)
226                         return
227                 }
228                 f.Close()
229         } else {
230                 runner.CrunchLog.Printf("Running broken node hook %q", runner.brokenNodeHook)
231                 // run killme script
232                 c := exec.Command(runner.brokenNodeHook)
233                 c.Stdout = runner.CrunchLog
234                 c.Stderr = runner.CrunchLog
235                 err := c.Run()
236                 if err != nil {
237                         runner.CrunchLog.Printf("Error running broken node hook: %v", err)
238                 }
239         }
240 }
241
242 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
243         for _, d := range errorBlacklist {
244                 if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
245                         runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
246                         runner.runBrokenNodeHook()
247                         return true
248                 }
249         }
250         return false
251 }
252
253 // LoadImage determines the docker image id from the container record and
254 // checks if it is available in the local Docker image store.  If not, it loads
255 // the image from Keep.
256 func (runner *ContainerRunner) LoadImage() (string, error) {
257         runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
258
259         d, err := os.Open(runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage)
260         if err != nil {
261                 return "", err
262         }
263         defer d.Close()
264         allfiles, err := d.Readdirnames(-1)
265         if err != nil {
266                 return "", err
267         }
268         var tarfiles []string
269         for _, fnm := range allfiles {
270                 if strings.HasSuffix(fnm, ".tar") {
271                         tarfiles = append(tarfiles, fnm)
272                 }
273         }
274         if len(tarfiles) == 0 {
275                 return "", fmt.Errorf("image collection does not include a .tar image file")
276         }
277         if len(tarfiles) > 1 {
278                 return "", fmt.Errorf("cannot choose from multiple tar files in image collection: %v", tarfiles)
279         }
280         imageID := tarfiles[0][:len(tarfiles[0])-4]
281         imageTarballPath := runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage + "/" + imageID + ".tar"
282         runner.CrunchLog.Printf("Using Docker image id %q", imageID)
283
284         runner.CrunchLog.Print("Loading Docker image from keep")
285         err = runner.executor.LoadImage(imageID, imageTarballPath, runner.Container, runner.ArvMountPoint,
286                 runner.containerClient)
287         if err != nil {
288                 return "", err
289         }
290
291         return imageID, nil
292 }
293
294 func (runner *ContainerRunner) ArvMountCmd(cmdline []string, token string) (c *exec.Cmd, err error) {
295         c = exec.Command(cmdline[0], cmdline[1:]...)
296
297         // Copy our environment, but override ARVADOS_API_TOKEN with
298         // the container auth token.
299         c.Env = nil
300         for _, s := range os.Environ() {
301                 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
302                         c.Env = append(c.Env, s)
303                 }
304         }
305         c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
306
307         w, err := runner.NewLogWriter("arv-mount")
308         if err != nil {
309                 return nil, err
310         }
311         runner.arvMountLog = NewThrottledLogger(w)
312         scanner := logScanner{
313                 Patterns: []string{
314                         "Keep write error",
315                         "Block not found error",
316                         "Unhandled exception during FUSE operation",
317                 },
318                 ReportFunc: func(pattern, text string) {
319                         runner.updateRuntimeStatus(arvadosclient.Dict{
320                                 "warning":       "arv-mount: " + pattern,
321                                 "warningDetail": text,
322                         })
323                 },
324         }
325         c.Stdout = runner.arvMountLog
326         c.Stderr = io.MultiWriter(runner.arvMountLog, os.Stderr, &scanner)
327
328         runner.CrunchLog.Printf("Running %v", c.Args)
329
330         err = c.Start()
331         if err != nil {
332                 return nil, err
333         }
334
335         statReadme := make(chan bool)
336         runner.ArvMountExit = make(chan error)
337
338         keepStatting := true
339         go func() {
340                 for keepStatting {
341                         time.Sleep(100 * time.Millisecond)
342                         _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
343                         if err == nil {
344                                 keepStatting = false
345                                 statReadme <- true
346                         }
347                 }
348                 close(statReadme)
349         }()
350
351         go func() {
352                 mnterr := c.Wait()
353                 if mnterr != nil {
354                         runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
355                 }
356                 runner.ArvMountExit <- mnterr
357                 close(runner.ArvMountExit)
358         }()
359
360         select {
361         case <-statReadme:
362                 break
363         case err := <-runner.ArvMountExit:
364                 runner.ArvMount = nil
365                 keepStatting = false
366                 return nil, err
367         }
368
369         return c, nil
370 }
371
372 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
373         if runner.ArvMountPoint == "" {
374                 runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)
375         }
376         return
377 }
378
379 func copyfile(src string, dst string) (err error) {
380         srcfile, err := os.Open(src)
381         if err != nil {
382                 return
383         }
384
385         os.MkdirAll(path.Dir(dst), 0777)
386
387         dstfile, err := os.Create(dst)
388         if err != nil {
389                 return
390         }
391         _, err = io.Copy(dstfile, srcfile)
392         if err != nil {
393                 return
394         }
395
396         err = srcfile.Close()
397         err2 := dstfile.Close()
398
399         if err != nil {
400                 return
401         }
402
403         if err2 != nil {
404                 return err2
405         }
406
407         return nil
408 }
409
410 func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
411         bindmounts := map[string]bindmount{}
412         err := runner.SetupArvMountPoint("keep")
413         if err != nil {
414                 return nil, fmt.Errorf("While creating keep mount temp dir: %v", err)
415         }
416
417         token, err := runner.ContainerToken()
418         if err != nil {
419                 return nil, fmt.Errorf("could not get container token: %s", err)
420         }
421         runner.CrunchLog.Printf("container token %q", token)
422
423         pdhOnly := true
424         tmpcount := 0
425         arvMountCmd := []string{
426                 "arv-mount",
427                 "--foreground",
428                 "--read-write",
429                 "--storage-classes", strings.Join(runner.Container.OutputStorageClasses, ","),
430                 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
431
432         if _, isdocker := runner.executor.(*dockerExecutor); isdocker {
433                 arvMountCmd = append(arvMountCmd, "--allow-other")
434         }
435
436         if runner.Container.RuntimeConstraints.KeepCacheDisk > 0 {
437                 keepcachedir, err := runner.MkTempDir(runner.parentTemp, "keepcache")
438                 if err != nil {
439                         return nil, fmt.Errorf("while creating keep cache temp dir: %v", err)
440                 }
441                 arvMountCmd = append(arvMountCmd, "--disk-cache", "--disk-cache-dir", keepcachedir, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheDisk))
442         } else if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
443                 arvMountCmd = append(arvMountCmd, "--ram-cache", "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
444         }
445
446         collectionPaths := []string{}
447         needCertMount := true
448         type copyFile struct {
449                 src  string
450                 bind string
451         }
452         var copyFiles []copyFile
453
454         var binds []string
455         for bind := range runner.Container.Mounts {
456                 binds = append(binds, bind)
457         }
458         for bind := range runner.SecretMounts {
459                 if _, ok := runner.Container.Mounts[bind]; ok {
460                         return nil, fmt.Errorf("secret mount %q conflicts with regular mount", bind)
461                 }
462                 if runner.SecretMounts[bind].Kind != "json" &&
463                         runner.SecretMounts[bind].Kind != "text" {
464                         return nil, fmt.Errorf("secret mount %q type is %q but only 'json' and 'text' are permitted",
465                                 bind, runner.SecretMounts[bind].Kind)
466                 }
467                 binds = append(binds, bind)
468         }
469         sort.Strings(binds)
470
471         for _, bind := range binds {
472                 mnt, notSecret := runner.Container.Mounts[bind]
473                 if !notSecret {
474                         mnt = runner.SecretMounts[bind]
475                 }
476                 if bind == "stdout" || bind == "stderr" {
477                         // Is it a "file" mount kind?
478                         if mnt.Kind != "file" {
479                                 return nil, fmt.Errorf("unsupported mount kind '%s' for %s: only 'file' is supported", mnt.Kind, bind)
480                         }
481
482                         // Does path start with OutputPath?
483                         prefix := runner.Container.OutputPath
484                         if !strings.HasSuffix(prefix, "/") {
485                                 prefix += "/"
486                         }
487                         if !strings.HasPrefix(mnt.Path, prefix) {
488                                 return nil, fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
489                         }
490                 }
491
492                 if bind == "stdin" {
493                         // Is it a "collection" mount kind?
494                         if mnt.Kind != "collection" && mnt.Kind != "json" {
495                                 return nil, fmt.Errorf("unsupported mount kind '%s' for stdin: only 'collection' and 'json' are supported", mnt.Kind)
496                         }
497                 }
498
499                 if bind == arvadosCertPath {
500                         needCertMount = false
501                 }
502
503                 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
504                         if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
505                                 return nil, fmt.Errorf("only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
506                         }
507                 }
508
509                 switch {
510                 case mnt.Kind == "collection" && bind != "stdin":
511                         var src string
512                         if mnt.UUID != "" && mnt.PortableDataHash != "" {
513                                 return nil, fmt.Errorf("cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
514                         }
515                         if mnt.UUID != "" {
516                                 if mnt.Writable {
517                                         return nil, fmt.Errorf("writing to existing collections currently not permitted")
518                                 }
519                                 pdhOnly = false
520                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
521                         } else if mnt.PortableDataHash != "" {
522                                 if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
523                                         return nil, fmt.Errorf("can never write to a collection specified by portable data hash")
524                                 }
525                                 idx := strings.Index(mnt.PortableDataHash, "/")
526                                 if idx > 0 {
527                                         mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
528                                         mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
529                                         runner.Container.Mounts[bind] = mnt
530                                 }
531                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
532                                 if mnt.Path != "" && mnt.Path != "." {
533                                         if strings.HasPrefix(mnt.Path, "./") {
534                                                 mnt.Path = mnt.Path[2:]
535                                         } else if strings.HasPrefix(mnt.Path, "/") {
536                                                 mnt.Path = mnt.Path[1:]
537                                         }
538                                         src += "/" + mnt.Path
539                                 }
540                         } else {
541                                 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
542                                 arvMountCmd = append(arvMountCmd, "--mount-tmp", fmt.Sprintf("tmp%d", tmpcount))
543                                 tmpcount++
544                         }
545                         if mnt.Writable {
546                                 if bind == runner.Container.OutputPath {
547                                         runner.HostOutputDir = src
548                                         bindmounts[bind] = bindmount{HostPath: src}
549                                 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
550                                         copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
551                                 } else {
552                                         bindmounts[bind] = bindmount{HostPath: src}
553                                 }
554                         } else {
555                                 bindmounts[bind] = bindmount{HostPath: src, ReadOnly: true}
556                         }
557                         collectionPaths = append(collectionPaths, src)
558
559                 case mnt.Kind == "tmp":
560                         var tmpdir string
561                         tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
562                         if err != nil {
563                                 return nil, fmt.Errorf("while creating mount temp dir: %v", err)
564                         }
565                         st, staterr := os.Stat(tmpdir)
566                         if staterr != nil {
567                                 return nil, fmt.Errorf("while Stat on temp dir: %v", staterr)
568                         }
569                         err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
570                         if staterr != nil {
571                                 return nil, fmt.Errorf("while Chmod temp dir: %v", err)
572                         }
573                         bindmounts[bind] = bindmount{HostPath: tmpdir}
574                         if bind == runner.Container.OutputPath {
575                                 runner.HostOutputDir = tmpdir
576                         }
577
578                 case mnt.Kind == "json" || mnt.Kind == "text":
579                         var filedata []byte
580                         if mnt.Kind == "json" {
581                                 filedata, err = json.Marshal(mnt.Content)
582                                 if err != nil {
583                                         return nil, fmt.Errorf("encoding json data: %v", err)
584                                 }
585                         } else {
586                                 text, ok := mnt.Content.(string)
587                                 if !ok {
588                                         return nil, fmt.Errorf("content for mount %q must be a string", bind)
589                                 }
590                                 filedata = []byte(text)
591                         }
592
593                         tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
594                         if err != nil {
595                                 return nil, fmt.Errorf("creating temp dir: %v", err)
596                         }
597                         tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
598                         err = ioutil.WriteFile(tmpfn, filedata, 0444)
599                         if err != nil {
600                                 return nil, fmt.Errorf("writing temp file: %v", err)
601                         }
602                         if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && (notSecret || runner.Container.Mounts[runner.Container.OutputPath].Kind != "collection") {
603                                 // In most cases, if the container
604                                 // specifies a literal file inside the
605                                 // output path, we copy it into the
606                                 // output directory (either a mounted
607                                 // collection or a staging area on the
608                                 // host fs). If it's a secret, it will
609                                 // be skipped when copying output from
610                                 // staging to Keep later.
611                                 copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
612                         } else {
613                                 // If a secret is outside OutputPath,
614                                 // we bind mount the secret file
615                                 // directly just like other mounts. We
616                                 // also use this strategy when a
617                                 // secret is inside OutputPath but
618                                 // OutputPath is a live collection, to
619                                 // avoid writing the secret to
620                                 // Keep. Attempting to remove a
621                                 // bind-mounted secret file from
622                                 // inside the container will return a
623                                 // "Device or resource busy" error
624                                 // that might not be handled well by
625                                 // the container, which is why we
626                                 // don't use this strategy when
627                                 // OutputPath is a staging directory.
628                                 bindmounts[bind] = bindmount{HostPath: tmpfn, ReadOnly: true}
629                         }
630
631                 case mnt.Kind == "git_tree":
632                         tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
633                         if err != nil {
634                                 return nil, fmt.Errorf("creating temp dir: %v", err)
635                         }
636                         err = gitMount(mnt).extractTree(runner.containerClient, tmpdir, token)
637                         if err != nil {
638                                 return nil, err
639                         }
640                         bindmounts[bind] = bindmount{HostPath: tmpdir, ReadOnly: true}
641                 }
642         }
643
644         if runner.HostOutputDir == "" {
645                 return nil, fmt.Errorf("output path does not correspond to a writable mount point")
646         }
647
648         if needCertMount && runner.Container.RuntimeConstraints.API {
649                 for _, certfile := range []string{
650                         // Populated by caller, or sdk/go/arvados init(), or test suite:
651                         os.Getenv("SSL_CERT_FILE"),
652                         // Copied from Go 1.21 stdlib (src/crypto/x509/root_linux.go):
653                         "/etc/ssl/certs/ca-certificates.crt",                // Debian/Ubuntu/Gentoo etc.
654                         "/etc/pki/tls/certs/ca-bundle.crt",                  // Fedora/RHEL 6
655                         "/etc/ssl/ca-bundle.pem",                            // OpenSUSE
656                         "/etc/pki/tls/cacert.pem",                           // OpenELEC
657                         "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7
658                         "/etc/ssl/cert.pem",                                 // Alpine Linux
659                 } {
660                         if _, err := os.Stat(certfile); err == nil {
661                                 bindmounts[arvadosCertPath] = bindmount{HostPath: certfile, ReadOnly: true}
662                                 break
663                         }
664                 }
665         }
666
667         if pdhOnly {
668                 // If we are only mounting collections by pdh, make
669                 // sure we don't subscribe to websocket events to
670                 // avoid putting undesired load on the API server
671                 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id", "--disable-event-listening")
672         } else {
673                 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
674         }
675         // the by_uuid mount point is used by singularity when writing
676         // out docker images converted to SIF
677         arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_uuid")
678         arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
679
680         runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
681         if err != nil {
682                 return nil, fmt.Errorf("while trying to start arv-mount: %v", err)
683         }
684         if runner.hoststatReporter != nil && runner.ArvMount != nil {
685                 runner.hoststatReporter.ReportPID("arv-mount", runner.ArvMount.Process.Pid)
686         }
687
688         for _, p := range collectionPaths {
689                 _, err = os.Stat(p)
690                 if err != nil {
691                         return nil, fmt.Errorf("while checking that input files exist: %v", err)
692                 }
693         }
694
695         for _, cp := range copyFiles {
696                 st, err := os.Stat(cp.src)
697                 if err != nil {
698                         return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
699                 }
700                 if st.IsDir() {
701                         err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
702                                 if walkerr != nil {
703                                         return walkerr
704                                 }
705                                 target := path.Join(cp.bind, walkpath[len(cp.src):])
706                                 if walkinfo.Mode().IsRegular() {
707                                         copyerr := copyfile(walkpath, target)
708                                         if copyerr != nil {
709                                                 return copyerr
710                                         }
711                                         return os.Chmod(target, walkinfo.Mode()|0777)
712                                 } else if walkinfo.Mode().IsDir() {
713                                         mkerr := os.MkdirAll(target, 0777)
714                                         if mkerr != nil {
715                                                 return mkerr
716                                         }
717                                         return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
718                                 } else {
719                                         return fmt.Errorf("source %q is not a regular file or directory", cp.src)
720                                 }
721                         })
722                 } else if st.Mode().IsRegular() {
723                         err = copyfile(cp.src, cp.bind)
724                         if err == nil {
725                                 err = os.Chmod(cp.bind, st.Mode()|0777)
726                         }
727                 }
728                 if err != nil {
729                         return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
730                 }
731         }
732
733         return bindmounts, nil
734 }
735
736 func (runner *ContainerRunner) stopHoststat() error {
737         if runner.hoststatReporter == nil {
738                 return nil
739         }
740         runner.hoststatReporter.Stop()
741         runner.hoststatReporter.LogProcessMemMax(runner.CrunchLog)
742         err := runner.hoststatLogger.Close()
743         if err != nil {
744                 return fmt.Errorf("error closing hoststat logs: %v", err)
745         }
746         return nil
747 }
748
749 func (runner *ContainerRunner) startHoststat() error {
750         w, err := runner.NewLogWriter("hoststat")
751         if err != nil {
752                 return err
753         }
754         runner.hoststatLogger = NewThrottledLogger(w)
755         runner.hoststatReporter = &crunchstat.Reporter{
756                 Logger: log.New(runner.hoststatLogger, "", 0),
757                 // Our own cgroup is the "host" cgroup, in the sense
758                 // that it accounts for resource usage outside the
759                 // container. It doesn't count _all_ resource usage on
760                 // the system.
761                 //
762                 // TODO?: Use the furthest ancestor of our own cgroup
763                 // that has stats available. (Currently crunchstat
764                 // does not have that capability.)
765                 Pid:        os.Getpid,
766                 PollPeriod: runner.statInterval,
767         }
768         runner.hoststatReporter.Start()
769         runner.hoststatReporter.ReportPID("crunch-run", os.Getpid())
770         return nil
771 }
772
773 func (runner *ContainerRunner) startCrunchstat() error {
774         w, err := runner.NewLogWriter("crunchstat")
775         if err != nil {
776                 return err
777         }
778         runner.statLogger = NewThrottledLogger(w)
779         runner.statReporter = &crunchstat.Reporter{
780                 Pid:    runner.executor.Pid,
781                 FS:     runner.crunchstatFakeFS,
782                 Logger: log.New(runner.statLogger, "", 0),
783                 MemThresholds: map[string][]crunchstat.Threshold{
784                         "rss": crunchstat.NewThresholdsFromPercentages(runner.Container.RuntimeConstraints.RAM, []int64{90, 95, 99}),
785                 },
786                 PollPeriod:      runner.statInterval,
787                 TempDir:         runner.parentTemp,
788                 ThresholdLogger: runner.CrunchLog,
789         }
790         runner.statReporter.Start()
791         return nil
792 }
793
794 type infoCommand struct {
795         label string
796         cmd   []string
797 }
798
799 // LogHostInfo logs info about the current host, for debugging and
800 // accounting purposes. Although it's logged as "node-info", this is
801 // about the environment where crunch-run is actually running, which
802 // might differ from what's described in the node record (see
803 // LogNodeRecord).
804 func (runner *ContainerRunner) LogHostInfo() (err error) {
805         w, err := runner.NewLogWriter("node-info")
806         if err != nil {
807                 return
808         }
809
810         commands := []infoCommand{
811                 {
812                         label: "Host Information",
813                         cmd:   []string{"uname", "-a"},
814                 },
815                 {
816                         label: "CPU Information",
817                         cmd:   []string{"cat", "/proc/cpuinfo"},
818                 },
819                 {
820                         label: "Memory Information",
821                         cmd:   []string{"cat", "/proc/meminfo"},
822                 },
823                 {
824                         label: "Disk Space",
825                         cmd:   []string{"df", "-m", "/", os.TempDir()},
826                 },
827                 {
828                         label: "Disk INodes",
829                         cmd:   []string{"df", "-i", "/", os.TempDir()},
830                 },
831         }
832
833         // Run commands with informational output to be logged.
834         for _, command := range commands {
835                 fmt.Fprintln(w, command.label)
836                 cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
837                 cmd.Stdout = w
838                 cmd.Stderr = w
839                 if err := cmd.Run(); err != nil {
840                         err = fmt.Errorf("While running command %q: %v", command.cmd, err)
841                         fmt.Fprintln(w, err)
842                         return err
843                 }
844                 fmt.Fprintln(w, "")
845         }
846
847         err = w.Close()
848         if err != nil {
849                 return fmt.Errorf("While closing node-info logs: %v", err)
850         }
851         return nil
852 }
853
854 // LogContainerRecord gets and saves the raw JSON container record from the API server
855 func (runner *ContainerRunner) LogContainerRecord() error {
856         logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
857         if !logged && err == nil {
858                 err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
859         }
860         return err
861 }
862
863 // LogNodeRecord logs the current host's InstanceType config entry (or
864 // the arvados#node record, if running via crunch-dispatch-slurm).
865 func (runner *ContainerRunner) LogNodeRecord() error {
866         if it := os.Getenv("InstanceType"); it != "" {
867                 // Dispatched via arvados-dispatch-cloud. Save
868                 // InstanceType config fragment received from
869                 // dispatcher on stdin.
870                 w, err := runner.LogCollection.OpenFile("node.json", os.O_CREATE|os.O_WRONLY, 0666)
871                 if err != nil {
872                         return err
873                 }
874                 defer w.Close()
875                 _, err = io.WriteString(w, it)
876                 if err != nil {
877                         return err
878                 }
879                 return w.Close()
880         }
881         // Dispatched via crunch-dispatch-slurm. Look up
882         // apiserver's node record corresponding to
883         // $SLURMD_NODENAME.
884         hostname := os.Getenv("SLURMD_NODENAME")
885         if hostname == "" {
886                 hostname, _ = os.Hostname()
887         }
888         _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
889                 // The "info" field has admin-only info when
890                 // obtained with a privileged token, and
891                 // should not be logged.
892                 node, ok := resp.(map[string]interface{})
893                 if ok {
894                         delete(node, "info")
895                 }
896         })
897         return err
898 }
899
900 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
901         writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
902         if err != nil {
903                 return false, err
904         }
905         w := &ArvLogWriter{
906                 ArvClient:     runner.DispatcherArvClient,
907                 UUID:          runner.Container.UUID,
908                 loggingStream: label,
909                 writeCloser:   writer,
910         }
911
912         reader, err := runner.DispatcherArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
913         if err != nil {
914                 return false, fmt.Errorf("error getting %s record: %v", label, err)
915         }
916         defer reader.Close()
917
918         dec := json.NewDecoder(reader)
919         dec.UseNumber()
920         var resp map[string]interface{}
921         if err = dec.Decode(&resp); err != nil {
922                 return false, fmt.Errorf("error decoding %s list response: %v", label, err)
923         }
924         items, ok := resp["items"].([]interface{})
925         if !ok {
926                 return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
927         } else if len(items) < 1 {
928                 return false, nil
929         }
930         if munge != nil {
931                 munge(items[0])
932         }
933         // Re-encode it using indentation to improve readability
934         enc := json.NewEncoder(w)
935         enc.SetIndent("", "    ")
936         if err = enc.Encode(items[0]); err != nil {
937                 return false, fmt.Errorf("error logging %s record: %v", label, err)
938         }
939         err = w.Close()
940         if err != nil {
941                 return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
942         }
943         return true, nil
944 }
945
946 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
947         stdoutPath := mntPath[len(runner.Container.OutputPath):]
948         index := strings.LastIndex(stdoutPath, "/")
949         if index > 0 {
950                 subdirs := stdoutPath[:index]
951                 if subdirs != "" {
952                         st, err := os.Stat(runner.HostOutputDir)
953                         if err != nil {
954                                 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
955                         }
956                         stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
957                         err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
958                         if err != nil {
959                                 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
960                         }
961                 }
962         }
963         stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
964         if err != nil {
965                 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
966         }
967
968         return stdoutFile, nil
969 }
970
971 // CreateContainer creates the docker container.
972 func (runner *ContainerRunner) CreateContainer(imageID string, bindmounts map[string]bindmount) error {
973         var stdin io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
974         if mnt, ok := runner.Container.Mounts["stdin"]; ok {
975                 switch mnt.Kind {
976                 case "collection":
977                         var collID string
978                         if mnt.UUID != "" {
979                                 collID = mnt.UUID
980                         } else {
981                                 collID = mnt.PortableDataHash
982                         }
983                         path := runner.ArvMountPoint + "/by_id/" + collID + "/" + mnt.Path
984                         f, err := os.Open(path)
985                         if err != nil {
986                                 return err
987                         }
988                         stdin = f
989                 case "json":
990                         j, err := json.Marshal(mnt.Content)
991                         if err != nil {
992                                 return fmt.Errorf("error encoding stdin json data: %v", err)
993                         }
994                         stdin = ioutil.NopCloser(bytes.NewReader(j))
995                 default:
996                         return fmt.Errorf("stdin mount has unsupported kind %q", mnt.Kind)
997                 }
998         }
999
1000         var stdout, stderr io.WriteCloser
1001         if mnt, ok := runner.Container.Mounts["stdout"]; ok {
1002                 f, err := runner.getStdoutFile(mnt.Path)
1003                 if err != nil {
1004                         return err
1005                 }
1006                 stdout = f
1007         } else if w, err := runner.NewLogWriter("stdout"); err != nil {
1008                 return err
1009         } else {
1010                 stdout = NewThrottledLogger(w)
1011         }
1012
1013         if mnt, ok := runner.Container.Mounts["stderr"]; ok {
1014                 f, err := runner.getStdoutFile(mnt.Path)
1015                 if err != nil {
1016                         return err
1017                 }
1018                 stderr = f
1019         } else if w, err := runner.NewLogWriter("stderr"); err != nil {
1020                 return err
1021         } else {
1022                 stderr = NewThrottledLogger(w)
1023         }
1024
1025         env := runner.Container.Environment
1026         enableNetwork := runner.enableNetwork == "always"
1027         if runner.Container.RuntimeConstraints.API {
1028                 enableNetwork = true
1029                 tok, err := runner.ContainerToken()
1030                 if err != nil {
1031                         return err
1032                 }
1033                 env = map[string]string{}
1034                 for k, v := range runner.Container.Environment {
1035                         env[k] = v
1036                 }
1037                 env["ARVADOS_API_TOKEN"] = tok
1038                 env["ARVADOS_API_HOST"] = os.Getenv("ARVADOS_API_HOST")
1039                 env["ARVADOS_API_HOST_INSECURE"] = os.Getenv("ARVADOS_API_HOST_INSECURE")
1040                 env["ARVADOS_KEEP_SERVICES"] = os.Getenv("ARVADOS_KEEP_SERVICES")
1041         }
1042         workdir := runner.Container.Cwd
1043         if workdir == "." {
1044                 // both "" and "." mean default
1045                 workdir = ""
1046         }
1047         ram := runner.Container.RuntimeConstraints.RAM
1048         if !runner.enableMemoryLimit {
1049                 ram = 0
1050         }
1051         runner.executorStdin = stdin
1052         runner.executorStdout = stdout
1053         runner.executorStderr = stderr
1054
1055         if runner.Container.RuntimeConstraints.CUDA.DeviceCount > 0 {
1056                 nvidiaModprobe(runner.CrunchLog)
1057         }
1058
1059         return runner.executor.Create(containerSpec{
1060                 Image:           imageID,
1061                 VCPUs:           runner.Container.RuntimeConstraints.VCPUs,
1062                 RAM:             ram,
1063                 WorkingDir:      workdir,
1064                 Env:             env,
1065                 BindMounts:      bindmounts,
1066                 Command:         runner.Container.Command,
1067                 EnableNetwork:   enableNetwork,
1068                 CUDADeviceCount: runner.Container.RuntimeConstraints.CUDA.DeviceCount,
1069                 NetworkMode:     runner.networkMode,
1070                 CgroupParent:    runner.setCgroupParent,
1071                 Stdin:           stdin,
1072                 Stdout:          stdout,
1073                 Stderr:          stderr,
1074         })
1075 }
1076
1077 // StartContainer starts the docker container created by CreateContainer.
1078 func (runner *ContainerRunner) StartContainer() error {
1079         runner.CrunchLog.Printf("Starting container")
1080         runner.cStateLock.Lock()
1081         defer runner.cStateLock.Unlock()
1082         if runner.cCancelled {
1083                 return ErrCancelled
1084         }
1085         err := runner.executor.Start()
1086         if err != nil {
1087                 var advice string
1088                 if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
1089                         advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
1090                 }
1091                 return fmt.Errorf("could not start container: %v%s", err, advice)
1092         }
1093         return nil
1094 }
1095
1096 // WaitFinish waits for the container to terminate, capture the exit code, and
1097 // close the stdout/stderr logging.
1098 func (runner *ContainerRunner) WaitFinish() error {
1099         runner.CrunchLog.Print("Waiting for container to finish")
1100         var timeout <-chan time.Time
1101         if s := runner.Container.SchedulingParameters.MaxRunTime; s > 0 {
1102                 timeout = time.After(time.Duration(s) * time.Second)
1103         }
1104         ctx, cancel := context.WithCancel(context.Background())
1105         defer cancel()
1106         go func() {
1107                 select {
1108                 case <-timeout:
1109                         runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
1110                         runner.stop(nil)
1111                 case <-runner.ArvMountExit:
1112                         runner.CrunchLog.Printf("arv-mount exited while container is still running. Stopping container.")
1113                         runner.stop(nil)
1114                 case <-ctx.Done():
1115                 }
1116         }()
1117         exitcode, err := runner.executor.Wait(ctx)
1118         if err != nil {
1119                 runner.checkBrokenNode(err)
1120                 return err
1121         }
1122         runner.ExitCode = &exitcode
1123
1124         extra := ""
1125         if exitcode&0x80 != 0 {
1126                 // Convert raw exit status (0x80 + signal number) to a
1127                 // string to log after the code, like " (signal 101)"
1128                 // or " (signal 9, killed)"
1129                 sig := syscall.WaitStatus(exitcode).Signal()
1130                 if name := unix.SignalName(sig); name != "" {
1131                         extra = fmt.Sprintf(" (signal %d, %s)", sig, name)
1132                 } else {
1133                         extra = fmt.Sprintf(" (signal %d)", sig)
1134                 }
1135         }
1136         runner.CrunchLog.Printf("Container exited with status code %d%s", exitcode, extra)
1137         err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1138                 "select":    []string{"uuid"},
1139                 "container": arvadosclient.Dict{"exit_code": exitcode},
1140         }, nil)
1141         if err != nil {
1142                 runner.CrunchLog.Printf("ignoring error updating exit_code: %s", err)
1143         }
1144
1145         var returnErr error
1146         if err = runner.executorStdin.Close(); err != nil {
1147                 err = fmt.Errorf("error closing container stdin: %s", err)
1148                 runner.CrunchLog.Printf("%s", err)
1149                 returnErr = err
1150         }
1151         if err = runner.executorStdout.Close(); err != nil {
1152                 err = fmt.Errorf("error closing container stdout: %s", err)
1153                 runner.CrunchLog.Printf("%s", err)
1154                 if returnErr == nil {
1155                         returnErr = err
1156                 }
1157         }
1158         if err = runner.executorStderr.Close(); err != nil {
1159                 err = fmt.Errorf("error closing container stderr: %s", err)
1160                 runner.CrunchLog.Printf("%s", err)
1161                 if returnErr == nil {
1162                         returnErr = err
1163                 }
1164         }
1165
1166         if runner.statReporter != nil {
1167                 runner.statReporter.Stop()
1168                 runner.statReporter.LogMaxima(runner.CrunchLog, map[string]int64{
1169                         "rss": runner.Container.RuntimeConstraints.RAM,
1170                 })
1171                 err = runner.statLogger.Close()
1172                 if err != nil {
1173                         runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
1174                 }
1175         }
1176         return returnErr
1177 }
1178
1179 func (runner *ContainerRunner) updateLogs() {
1180         ticker := time.NewTicker(crunchLogUpdatePeriod / 360)
1181         defer ticker.Stop()
1182
1183         sigusr1 := make(chan os.Signal, 1)
1184         signal.Notify(sigusr1, syscall.SIGUSR1)
1185         defer signal.Stop(sigusr1)
1186
1187         saveAtTime := time.Now().Add(crunchLogUpdatePeriod)
1188         saveAtSize := crunchLogUpdateSize
1189         var savedSize int64
1190         for {
1191                 select {
1192                 case <-ticker.C:
1193                 case <-sigusr1:
1194                         saveAtTime = time.Now()
1195                 }
1196                 runner.logMtx.Lock()
1197                 done := runner.LogsPDH != nil
1198                 runner.logMtx.Unlock()
1199                 if done {
1200                         return
1201                 }
1202                 size := runner.LogCollection.Size()
1203                 if size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) {
1204                         continue
1205                 }
1206                 saveAtTime = time.Now().Add(crunchLogUpdatePeriod)
1207                 saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize
1208                 saved, err := runner.saveLogCollection(false)
1209                 if err != nil {
1210                         runner.CrunchLog.Printf("error updating log collection: %s", err)
1211                         continue
1212                 }
1213
1214                 err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1215                         "select": []string{"uuid"},
1216                         "container": arvadosclient.Dict{
1217                                 "log": saved.PortableDataHash,
1218                         },
1219                 }, nil)
1220                 if err != nil {
1221                         runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
1222                         continue
1223                 }
1224
1225                 savedSize = size
1226         }
1227 }
1228
1229 var spotInterruptionCheckInterval = 5 * time.Second
1230 var ec2MetadataBaseURL = "http://169.254.169.254"
1231
1232 const ec2TokenTTL = time.Second * 21600
1233
1234 func (runner *ContainerRunner) checkSpotInterruptionNotices() {
1235         type ec2metadata struct {
1236                 Action string    `json:"action"`
1237                 Time   time.Time `json:"time"`
1238         }
1239         runner.CrunchLog.Printf("Checking for spot interruptions every %v using instance metadata at %s", spotInterruptionCheckInterval, ec2MetadataBaseURL)
1240         var metadata ec2metadata
1241         var token string
1242         var tokenExp time.Time
1243         check := func() error {
1244                 ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))
1245                 defer cancel()
1246                 if token == "" || tokenExp.Sub(time.Now()) < time.Minute {
1247                         req, err := http.NewRequestWithContext(ctx, http.MethodPut, ec2MetadataBaseURL+"/latest/api/token", nil)
1248                         if err != nil {
1249                                 return err
1250                         }
1251                         req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", fmt.Sprintf("%d", int(ec2TokenTTL/time.Second)))
1252                         resp, err := http.DefaultClient.Do(req)
1253                         if err != nil {
1254                                 return err
1255                         }
1256                         defer resp.Body.Close()
1257                         if resp.StatusCode != http.StatusOK {
1258                                 return fmt.Errorf("%s", resp.Status)
1259                         }
1260                         newtoken, err := ioutil.ReadAll(resp.Body)
1261                         if err != nil {
1262                                 return err
1263                         }
1264                         token = strings.TrimSpace(string(newtoken))
1265                         tokenExp = time.Now().Add(ec2TokenTTL)
1266                 }
1267                 req, err := http.NewRequestWithContext(ctx, http.MethodGet, ec2MetadataBaseURL+"/latest/meta-data/spot/instance-action", nil)
1268                 if err != nil {
1269                         return err
1270                 }
1271                 req.Header.Set("X-aws-ec2-metadata-token", token)
1272                 resp, err := http.DefaultClient.Do(req)
1273                 if err != nil {
1274                         return err
1275                 }
1276                 defer resp.Body.Close()
1277                 metadata = ec2metadata{}
1278                 switch resp.StatusCode {
1279                 case http.StatusOK:
1280                         break
1281                 case http.StatusNotFound:
1282                         // "If Amazon EC2 is not preparing to stop or
1283                         // terminate the instance, or if you
1284                         // terminated the instance yourself,
1285                         // instance-action is not present in the
1286                         // instance metadata and you receive an HTTP
1287                         // 404 error when you try to retrieve it."
1288                         return nil
1289                 case http.StatusUnauthorized:
1290                         token = ""
1291                         return fmt.Errorf("%s", resp.Status)
1292                 default:
1293                         return fmt.Errorf("%s", resp.Status)
1294                 }
1295                 err = json.NewDecoder(resp.Body).Decode(&metadata)
1296                 if err != nil {
1297                         return err
1298                 }
1299                 return nil
1300         }
1301         failures := 0
1302         var lastmetadata ec2metadata
1303         for range time.NewTicker(spotInterruptionCheckInterval).C {
1304                 err := check()
1305                 if err != nil {
1306                         runner.CrunchLog.Printf("Error checking spot interruptions: %s", err)
1307                         failures++
1308                         if failures > 5 {
1309                                 runner.CrunchLog.Printf("Giving up on checking spot interruptions after too many consecutive failures")
1310                                 return
1311                         }
1312                         continue
1313                 }
1314                 failures = 0
1315                 if metadata != lastmetadata {
1316                         lastmetadata = metadata
1317                         text := fmt.Sprintf("Cloud provider scheduled instance %s at %s", metadata.Action, metadata.Time.UTC().Format(time.RFC3339))
1318                         runner.CrunchLog.Printf("%s", text)
1319                         runner.updateRuntimeStatus(arvadosclient.Dict{
1320                                 "warning":          "preemption notice",
1321                                 "warningDetail":    text,
1322                                 "preemptionNotice": text,
1323                         })
1324                         if proc, err := os.FindProcess(os.Getpid()); err == nil {
1325                                 // trigger updateLogs
1326                                 proc.Signal(syscall.SIGUSR1)
1327                         }
1328                 }
1329         }
1330 }
1331
1332 func (runner *ContainerRunner) updateRuntimeStatus(status arvadosclient.Dict) {
1333         err := runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1334                 "select": []string{"uuid"},
1335                 "container": arvadosclient.Dict{
1336                         "runtime_status": status,
1337                 },
1338         }, nil)
1339         if err != nil {
1340                 runner.CrunchLog.Printf("error updating container runtime_status: %s", err)
1341         }
1342 }
1343
1344 // CaptureOutput saves data from the container's output directory if
1345 // needed, and updates the container output accordingly.
1346 func (runner *ContainerRunner) CaptureOutput(bindmounts map[string]bindmount) error {
1347         if runner.Container.RuntimeConstraints.API {
1348                 // Output may have been set directly by the container, so
1349                 // refresh the container record to check.
1350                 err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID,
1351                         arvadosclient.Dict{
1352                                 "select": []string{"output"},
1353                         }, &runner.Container)
1354                 if err != nil {
1355                         return err
1356                 }
1357                 if runner.Container.Output != "" {
1358                         // Container output is already set.
1359                         runner.OutputPDH = &runner.Container.Output
1360                         return nil
1361                 }
1362         }
1363
1364         txt, err := (&copier{
1365                 client:        runner.containerClient,
1366                 keepClient:    runner.ContainerKeepClient,
1367                 hostOutputDir: runner.HostOutputDir,
1368                 ctrOutputDir:  runner.Container.OutputPath,
1369                 bindmounts:    bindmounts,
1370                 mounts:        runner.Container.Mounts,
1371                 secretMounts:  runner.SecretMounts,
1372                 logger:        runner.CrunchLog,
1373         }).Copy()
1374         if err != nil {
1375                 return err
1376         }
1377         if n := len(regexp.MustCompile(` [0-9a-f]+\+\S*\+R`).FindAllStringIndex(txt, -1)); n > 0 {
1378                 runner.CrunchLog.Printf("Copying %d data blocks from remote input collections...", n)
1379                 fs, err := (&arvados.Collection{ManifestText: txt}).FileSystem(runner.containerClient, runner.ContainerKeepClient)
1380                 if err != nil {
1381                         return err
1382                 }
1383                 txt, err = fs.MarshalManifest(".")
1384                 if err != nil {
1385                         return err
1386                 }
1387         }
1388         var resp arvados.Collection
1389         err = runner.ContainerArvClient.Create("collections", arvadosclient.Dict{
1390                 "ensure_unique_name": true,
1391                 "select":             []string{"portable_data_hash"},
1392                 "collection": arvadosclient.Dict{
1393                         "is_trashed":    true,
1394                         "name":          "output for " + runner.Container.UUID,
1395                         "manifest_text": txt,
1396                 },
1397         }, &resp)
1398         if err != nil {
1399                 return fmt.Errorf("error creating output collection: %v", err)
1400         }
1401         runner.OutputPDH = &resp.PortableDataHash
1402         return nil
1403 }
1404
1405 func (runner *ContainerRunner) CleanupDirs() {
1406         if runner.ArvMount != nil {
1407                 var delay int64 = 8
1408                 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1409                 umount.Stdout = runner.CrunchLog
1410                 umount.Stderr = runner.CrunchLog
1411                 runner.CrunchLog.Printf("Running %v", umount.Args)
1412                 umnterr := umount.Start()
1413
1414                 if umnterr != nil {
1415                         runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1416                         runner.ArvMount.Process.Kill()
1417                 } else {
1418                         // If arv-mount --unmount gets stuck for any reason, we
1419                         // don't want to wait for it forever.  Do Wait() in a goroutine
1420                         // so it doesn't block crunch-run.
1421                         umountExit := make(chan error)
1422                         go func() {
1423                                 mnterr := umount.Wait()
1424                                 if mnterr != nil {
1425                                         runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1426                                 }
1427                                 umountExit <- mnterr
1428                         }()
1429
1430                         for again := true; again; {
1431                                 again = false
1432                                 select {
1433                                 case <-umountExit:
1434                                         umount = nil
1435                                         again = true
1436                                 case <-runner.ArvMountExit:
1437                                         break
1438                                 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1439                                         runner.CrunchLog.Printf("Timed out waiting for unmount")
1440                                         if umount != nil {
1441                                                 umount.Process.Kill()
1442                                         }
1443                                         runner.ArvMount.Process.Kill()
1444                                 }
1445                         }
1446                 }
1447                 runner.ArvMount = nil
1448         }
1449
1450         if runner.ArvMountPoint != "" {
1451                 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1452                         runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1453                 }
1454                 runner.ArvMountPoint = ""
1455         }
1456
1457         if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {
1458                 runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr)
1459         }
1460 }
1461
1462 // CommitLogs posts the collection containing the final container logs.
1463 func (runner *ContainerRunner) CommitLogs() error {
1464         func() {
1465                 // Hold cStateLock to prevent races on CrunchLog (e.g., stop()).
1466                 runner.cStateLock.Lock()
1467                 defer runner.cStateLock.Unlock()
1468
1469                 runner.CrunchLog.Print(runner.finalState)
1470
1471                 if runner.arvMountLog != nil {
1472                         runner.arvMountLog.Close()
1473                 }
1474                 runner.CrunchLog.Close()
1475
1476                 // Closing CrunchLog above allows them to be committed to Keep at this
1477                 // point, but re-open crunch log with ArvClient in case there are any
1478                 // other further errors (such as failing to write the log to Keep!)
1479                 // while shutting down
1480                 runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{
1481                         ArvClient:     runner.DispatcherArvClient,
1482                         UUID:          runner.Container.UUID,
1483                         loggingStream: "crunch-run",
1484                         writeCloser:   nil,
1485                 })
1486                 runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
1487         }()
1488
1489         if runner.keepstoreLogger != nil {
1490                 // Flush any buffered logs from our local keepstore
1491                 // process.  Discard anything logged after this point
1492                 // -- it won't end up in the log collection, so
1493                 // there's no point writing it to the collectionfs.
1494                 runner.keepstoreLogbuf.SetWriter(io.Discard)
1495                 runner.keepstoreLogger.Close()
1496                 runner.keepstoreLogger = nil
1497         }
1498
1499         if runner.LogsPDH != nil {
1500                 // If we have already assigned something to LogsPDH,
1501                 // we must be closing the re-opened log, which won't
1502                 // end up getting attached to the container record and
1503                 // therefore doesn't need to be saved as a collection
1504                 // -- it exists only to send logs to other channels.
1505                 return nil
1506         }
1507
1508         saved, err := runner.saveLogCollection(true)
1509         if err != nil {
1510                 return fmt.Errorf("error saving log collection: %s", err)
1511         }
1512         runner.logMtx.Lock()
1513         defer runner.logMtx.Unlock()
1514         runner.LogsPDH = &saved.PortableDataHash
1515         return nil
1516 }
1517
1518 // Create/update the log collection. Return value has UUID and
1519 // PortableDataHash fields populated, but others may be blank.
1520 func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
1521         runner.logMtx.Lock()
1522         defer runner.logMtx.Unlock()
1523         if runner.LogsPDH != nil {
1524                 // Already finalized.
1525                 return
1526         }
1527         updates := arvadosclient.Dict{
1528                 "name": "logs for " + runner.Container.UUID,
1529         }
1530         mt, err1 := runner.LogCollection.MarshalManifest(".")
1531         if err1 == nil {
1532                 // Only send updated manifest text if there was no
1533                 // error.
1534                 updates["manifest_text"] = mt
1535         }
1536
1537         // Even if flushing the manifest had an error, we still want
1538         // to update the log record, if possible, to push the trash_at
1539         // and delete_at times into the future.  Details on bug
1540         // #17293.
1541         if final {
1542                 updates["is_trashed"] = true
1543         } else {
1544                 // We set trash_at so this collection gets
1545                 // automatically cleaned up eventually.  It used to be
1546                 // 12 hours but we had a situation where the API
1547                 // server was down over a weekend but the containers
1548                 // kept running such that the log collection got
1549                 // trashed, so now we make it 2 weeks.  refs #20378
1550                 exp := time.Now().Add(time.Duration(24*14) * time.Hour)
1551                 updates["trash_at"] = exp
1552                 updates["delete_at"] = exp
1553         }
1554         reqBody := arvadosclient.Dict{
1555                 "select":     []string{"uuid", "portable_data_hash"},
1556                 "collection": updates,
1557         }
1558         var err2 error
1559         if runner.logUUID == "" {
1560                 reqBody["ensure_unique_name"] = true
1561                 err2 = runner.DispatcherArvClient.Create("collections", reqBody, &response)
1562         } else {
1563                 err2 = runner.DispatcherArvClient.Update("collections", runner.logUUID, reqBody, &response)
1564         }
1565         if err2 == nil {
1566                 runner.logUUID = response.UUID
1567         }
1568
1569         if err1 != nil || err2 != nil {
1570                 err = fmt.Errorf("error recording logs: %q, %q", err1, err2)
1571         }
1572         return
1573 }
1574
1575 // UpdateContainerRunning updates the container state to "Running"
1576 func (runner *ContainerRunner) UpdateContainerRunning(logId string) error {
1577         runner.cStateLock.Lock()
1578         defer runner.cStateLock.Unlock()
1579         if runner.cCancelled {
1580                 return ErrCancelled
1581         }
1582         updates := arvadosclient.Dict{
1583                 "gateway_address": runner.gateway.Address,
1584                 "state":           "Running",
1585         }
1586         if logId != "" {
1587                 updates["log"] = logId
1588         }
1589         return runner.DispatcherArvClient.Update(
1590                 "containers",
1591                 runner.Container.UUID,
1592                 arvadosclient.Dict{
1593                         "select":    []string{"uuid"},
1594                         "container": updates,
1595                 },
1596                 nil,
1597         )
1598 }
1599
1600 // ContainerToken returns the api_token the container (and any
1601 // arv-mount processes) are allowed to use.
1602 func (runner *ContainerRunner) ContainerToken() (string, error) {
1603         if runner.token != "" {
1604                 return runner.token, nil
1605         }
1606
1607         var auth arvados.APIClientAuthorization
1608         err := runner.DispatcherArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1609         if err != nil {
1610                 return "", err
1611         }
1612         runner.token = fmt.Sprintf("v2/%s/%s/%s", auth.UUID, auth.APIToken, runner.Container.UUID)
1613         return runner.token, nil
1614 }
1615
1616 // UpdateContainerFinal updates the container record state on API
1617 // server to "Complete" or "Cancelled"
1618 func (runner *ContainerRunner) UpdateContainerFinal() error {
1619         update := arvadosclient.Dict{}
1620         update["state"] = runner.finalState
1621         if runner.LogsPDH != nil {
1622                 update["log"] = *runner.LogsPDH
1623         }
1624         if runner.ExitCode != nil {
1625                 update["exit_code"] = *runner.ExitCode
1626         } else {
1627                 update["exit_code"] = nil
1628         }
1629         if runner.finalState == "Complete" && runner.OutputPDH != nil {
1630                 update["output"] = *runner.OutputPDH
1631         }
1632         update["cost"] = runner.calculateCost(time.Now())
1633         return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1634                 "select":    []string{"uuid"},
1635                 "container": update,
1636         }, nil)
1637 }
1638
1639 // IsCancelled returns the value of Cancelled, with goroutine safety.
1640 func (runner *ContainerRunner) IsCancelled() bool {
1641         runner.cStateLock.Lock()
1642         defer runner.cStateLock.Unlock()
1643         return runner.cCancelled
1644 }
1645
1646 // NewArvLogWriter creates an ArvLogWriter
1647 func (runner *ContainerRunner) NewArvLogWriter(name string) (io.WriteCloser, error) {
1648         writer, err := runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
1649         if err != nil {
1650                 return nil, err
1651         }
1652         return &ArvLogWriter{
1653                 ArvClient:     runner.DispatcherArvClient,
1654                 UUID:          runner.Container.UUID,
1655                 loggingStream: name,
1656                 writeCloser:   writer,
1657         }, nil
1658 }
1659
1660 // Run the full container lifecycle.
1661 func (runner *ContainerRunner) Run() (err error) {
1662         runner.CrunchLog.Printf("crunch-run %s started", cmd.Version.String())
1663         runner.CrunchLog.Printf("%s", currentUserAndGroups())
1664         v, _ := exec.Command("arv-mount", "--version").CombinedOutput()
1665         runner.CrunchLog.Printf("Using FUSE mount: %s", v)
1666         runner.CrunchLog.Printf("Using container runtime: %s", runner.executor.Runtime())
1667         runner.CrunchLog.Printf("Executing container: %s", runner.Container.UUID)
1668         runner.costStartTime = time.Now()
1669
1670         hostname, hosterr := os.Hostname()
1671         if hosterr != nil {
1672                 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1673         } else {
1674                 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1675         }
1676
1677         sigusr2 := make(chan os.Signal, 1)
1678         signal.Notify(sigusr2, syscall.SIGUSR2)
1679         defer signal.Stop(sigusr2)
1680         runner.loadPrices()
1681         go runner.handleSIGUSR2(sigusr2)
1682
1683         runner.finalState = "Queued"
1684
1685         defer func() {
1686                 runner.CleanupDirs()
1687
1688                 runner.CrunchLog.Printf("crunch-run finished")
1689                 runner.CrunchLog.Close()
1690         }()
1691
1692         err = runner.fetchContainerRecord()
1693         if err != nil {
1694                 return
1695         }
1696         if runner.Container.State != "Locked" {
1697                 return fmt.Errorf("dispatch error detected: container %q has state %q", runner.Container.UUID, runner.Container.State)
1698         }
1699
1700         var bindmounts map[string]bindmount
1701         defer func() {
1702                 // checkErr prints e (unless it's nil) and sets err to
1703                 // e (unless err is already non-nil). Thus, if err
1704                 // hasn't already been assigned when Run() returns,
1705                 // this cleanup func will cause Run() to return the
1706                 // first non-nil error that is passed to checkErr().
1707                 checkErr := func(errorIn string, e error) {
1708                         if e == nil {
1709                                 return
1710                         }
1711                         runner.CrunchLog.Printf("error in %s: %v", errorIn, e)
1712                         if err == nil {
1713                                 err = e
1714                         }
1715                         if runner.finalState == "Complete" {
1716                                 // There was an error in the finalization.
1717                                 runner.finalState = "Cancelled"
1718                         }
1719                 }
1720
1721                 // Log the error encountered in Run(), if any
1722                 checkErr("Run", err)
1723
1724                 if runner.finalState == "Queued" {
1725                         runner.UpdateContainerFinal()
1726                         return
1727                 }
1728
1729                 if runner.IsCancelled() {
1730                         runner.finalState = "Cancelled"
1731                         // but don't return yet -- we still want to
1732                         // capture partial output and write logs
1733                 }
1734
1735                 if bindmounts != nil {
1736                         checkErr("CaptureOutput", runner.CaptureOutput(bindmounts))
1737                 }
1738                 checkErr("stopHoststat", runner.stopHoststat())
1739                 checkErr("CommitLogs", runner.CommitLogs())
1740                 runner.CleanupDirs()
1741                 checkErr("UpdateContainerFinal", runner.UpdateContainerFinal())
1742         }()
1743
1744         runner.setupSignals()
1745         err = runner.startHoststat()
1746         if err != nil {
1747                 return
1748         }
1749         if runner.keepstore != nil {
1750                 runner.hoststatReporter.ReportPID("keepstore", runner.keepstore.Process.Pid)
1751         }
1752
1753         // set up FUSE mount and binds
1754         bindmounts, err = runner.SetupMounts()
1755         if err != nil {
1756                 runner.finalState = "Cancelled"
1757                 err = fmt.Errorf("While setting up mounts: %v", err)
1758                 return
1759         }
1760
1761         // check for and/or load image
1762         imageID, err := runner.LoadImage()
1763         if err != nil {
1764                 if !runner.checkBrokenNode(err) {
1765                         // Failed to load image but not due to a "broken node"
1766                         // condition, probably user error.
1767                         runner.finalState = "Cancelled"
1768                 }
1769                 err = fmt.Errorf("While loading container image: %v", err)
1770                 return
1771         }
1772
1773         err = runner.CreateContainer(imageID, bindmounts)
1774         if err != nil {
1775                 return
1776         }
1777         err = runner.LogHostInfo()
1778         if err != nil {
1779                 return
1780         }
1781         err = runner.LogNodeRecord()
1782         if err != nil {
1783                 return
1784         }
1785         err = runner.LogContainerRecord()
1786         if err != nil {
1787                 return
1788         }
1789
1790         if runner.IsCancelled() {
1791                 return
1792         }
1793
1794         logCollection, err := runner.saveLogCollection(false)
1795         var logId string
1796         if err == nil {
1797                 logId = logCollection.PortableDataHash
1798         } else {
1799                 runner.CrunchLog.Printf("Error committing initial log collection: %v", err)
1800         }
1801         err = runner.UpdateContainerRunning(logId)
1802         if err != nil {
1803                 return
1804         }
1805         runner.finalState = "Cancelled"
1806
1807         err = runner.startCrunchstat()
1808         if err != nil {
1809                 return
1810         }
1811
1812         err = runner.StartContainer()
1813         if err != nil {
1814                 runner.checkBrokenNode(err)
1815                 return
1816         }
1817
1818         err = runner.WaitFinish()
1819         if err == nil && !runner.IsCancelled() {
1820                 runner.finalState = "Complete"
1821         }
1822         return
1823 }
1824
1825 // Fetch the current container record (uuid = runner.Container.UUID)
1826 // into runner.Container.
1827 func (runner *ContainerRunner) fetchContainerRecord() error {
1828         reader, err := runner.DispatcherArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1829         if err != nil {
1830                 return fmt.Errorf("error fetching container record: %v", err)
1831         }
1832         defer reader.Close()
1833
1834         dec := json.NewDecoder(reader)
1835         dec.UseNumber()
1836         err = dec.Decode(&runner.Container)
1837         if err != nil {
1838                 return fmt.Errorf("error decoding container record: %v", err)
1839         }
1840
1841         var sm struct {
1842                 SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
1843         }
1844
1845         containerToken, err := runner.ContainerToken()
1846         if err != nil {
1847                 return fmt.Errorf("error getting container token: %v", err)
1848         }
1849
1850         runner.ContainerArvClient, runner.ContainerKeepClient,
1851                 runner.containerClient, err = runner.MkArvClient(containerToken)
1852         if err != nil {
1853                 return fmt.Errorf("error creating container API client: %v", err)
1854         }
1855
1856         runner.ContainerKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)
1857         runner.DispatcherKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)
1858
1859         err = runner.ContainerArvClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
1860         if err != nil {
1861                 if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
1862                         return fmt.Errorf("error fetching secret_mounts: %v", err)
1863                 }
1864                 // ok && apierr.HttpStatusCode == 404, which means
1865                 // secret_mounts isn't supported by this API server.
1866         }
1867         runner.SecretMounts = sm.SecretMounts
1868
1869         return nil
1870 }
1871
1872 // NewContainerRunner creates a new container runner.
1873 func NewContainerRunner(dispatcherClient *arvados.Client,
1874         dispatcherArvClient IArvadosClient,
1875         dispatcherKeepClient IKeepClient,
1876         containerUUID string) (*ContainerRunner, error) {
1877
1878         cr := &ContainerRunner{
1879                 dispatcherClient:     dispatcherClient,
1880                 DispatcherArvClient:  dispatcherArvClient,
1881                 DispatcherKeepClient: dispatcherKeepClient,
1882         }
1883         cr.NewLogWriter = cr.NewArvLogWriter
1884         cr.RunArvMount = cr.ArvMountCmd
1885         cr.MkTempDir = ioutil.TempDir
1886         cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
1887                 cl, err := arvadosclient.MakeArvadosClient()
1888                 if err != nil {
1889                         return nil, nil, nil, err
1890                 }
1891                 cl.ApiToken = token
1892                 kc, err := keepclient.MakeKeepClient(cl)
1893                 if err != nil {
1894                         return nil, nil, nil, err
1895                 }
1896                 c2 := arvados.NewClientFromEnv()
1897                 c2.AuthToken = token
1898                 return cl, kc, c2, nil
1899         }
1900         var err error
1901         cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.dispatcherClient, cr.DispatcherKeepClient)
1902         if err != nil {
1903                 return nil, err
1904         }
1905         cr.Container.UUID = containerUUID
1906         w, err := cr.NewLogWriter("crunch-run")
1907         if err != nil {
1908                 return nil, err
1909         }
1910         cr.CrunchLog = NewThrottledLogger(w)
1911         cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
1912
1913         loadLogThrottleParams(dispatcherArvClient)
1914         go cr.updateLogs()
1915
1916         return cr, nil
1917 }
1918
1919 func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
1920         log := log.New(stderr, "", 0)
1921         flags := flag.NewFlagSet(prog, flag.ContinueOnError)
1922         statInterval := flags.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1923         flags.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree (obsolete, ignored)")
1924         flags.String("cgroup-parent", "docker", "name of container's parent cgroup (obsolete, ignored)")
1925         cgroupParentSubsystem := flags.String("cgroup-parent-subsystem", "", "use current cgroup for given `subsystem` as parent cgroup for container (subsystem argument is only relevant for cgroups v1; in cgroups v2 / unified mode, any non-empty value means use current cgroup); if empty, use the docker daemon's default cgroup parent. See https://doc.arvados.org/install/crunch2-slurm/install-dispatch.html#CrunchRunCommand-cgroups")
1926         caCertsPath := flags.String("ca-certs", "", "Path to TLS root certificates")
1927         detach := flags.Bool("detach", false, "Detach from parent process and run in the background")
1928         stdinConfig := flags.Bool("stdin-config", false, "Load config and environment variables from JSON message on stdin")
1929         configFile := flags.String("config", arvados.DefaultConfigFile, "filename of cluster config file to try loading if -stdin-config=false (default is $ARVADOS_CONFIG)")
1930         sleep := flags.Duration("sleep", 0, "Delay before starting (testing use only)")
1931         kill := flags.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID")
1932         list := flags.Bool("list", false, "List UUIDs of existing crunch-run processes (and notify them to use price data passed on stdin)")
1933         enableMemoryLimit := flags.Bool("enable-memory-limit", true, "tell container runtime to limit container's memory usage")
1934         enableNetwork := flags.String("container-enable-networking", "default", "enable networking \"always\" (for all containers) or \"default\" (for containers that request it)")
1935         networkMode := flags.String("container-network-mode", "default", `Docker network mode for container (use any argument valid for docker --net)`)
1936         memprofile := flags.String("memprofile", "", "write memory profile to `file` after running container")
1937         runtimeEngine := flags.String("runtime-engine", "docker", "container runtime: docker or singularity")
1938         brokenNodeHook := flags.String("broken-node-hook", "", "script to run if node is detected to be broken (for example, Docker daemon is not running)")
1939         flags.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.")
1940         version := flags.Bool("version", false, "Write version information to stdout and exit 0.")
1941
1942         ignoreDetachFlag := false
1943         if len(args) > 0 && args[0] == "-no-detach" {
1944                 // This process was invoked by a parent process, which
1945                 // has passed along its own arguments, including
1946                 // -detach, after the leading -no-detach flag.  Strip
1947                 // the leading -no-detach flag (it's not recognized by
1948                 // flags.Parse()) and ignore the -detach flag that
1949                 // comes later.
1950                 args = args[1:]
1951                 ignoreDetachFlag = true
1952         }
1953
1954         if ok, code := cmd.ParseFlags(flags, prog, args, "container-uuid", stderr); !ok {
1955                 return code
1956         } else if *version {
1957                 fmt.Fprintln(stdout, prog, cmd.Version.String())
1958                 return 0
1959         } else if !*list && flags.NArg() != 1 {
1960                 fmt.Fprintf(stderr, "missing required argument: container-uuid (try -help)\n")
1961                 return 2
1962         }
1963
1964         containerUUID := flags.Arg(0)
1965
1966         switch {
1967         case *detach && !ignoreDetachFlag:
1968                 return Detach(containerUUID, prog, args, stdin, stdout, stderr)
1969         case *kill >= 0:
1970                 return KillProcess(containerUUID, syscall.Signal(*kill), stdout, stderr)
1971         case *list:
1972                 return ListProcesses(stdin, stdout, stderr)
1973         }
1974
1975         if len(containerUUID) != 27 {
1976                 log.Printf("usage: %s [options] UUID", prog)
1977                 return 1
1978         }
1979
1980         var keepstoreLogbuf bufThenWrite
1981         var conf ConfigData
1982         if *stdinConfig {
1983                 err := json.NewDecoder(stdin).Decode(&conf)
1984                 if err != nil {
1985                         log.Printf("decode stdin: %s", err)
1986                         return 1
1987                 }
1988                 for k, v := range conf.Env {
1989                         err = os.Setenv(k, v)
1990                         if err != nil {
1991                                 log.Printf("setenv(%q): %s", k, err)
1992                                 return 1
1993                         }
1994                 }
1995                 if conf.Cluster != nil {
1996                         // ClusterID is missing from the JSON
1997                         // representation, but we need it to generate
1998                         // a valid config file for keepstore, so we
1999                         // fill it using the container UUID prefix.
2000                         conf.Cluster.ClusterID = containerUUID[:5]
2001                 }
2002         } else {
2003                 conf = hpcConfData(containerUUID, *configFile, io.MultiWriter(&keepstoreLogbuf, stderr))
2004         }
2005
2006         log.Printf("crunch-run %s started", cmd.Version.String())
2007         time.Sleep(*sleep)
2008
2009         if *caCertsPath != "" {
2010                 os.Setenv("SSL_CERT_FILE", *caCertsPath)
2011         }
2012
2013         keepstore, err := startLocalKeepstore(conf, io.MultiWriter(&keepstoreLogbuf, stderr))
2014         if err != nil {
2015                 log.Print(err)
2016                 return 1
2017         }
2018         if keepstore != nil {
2019                 defer keepstore.Process.Kill()
2020         }
2021
2022         api, err := arvadosclient.MakeArvadosClient()
2023         if err != nil {
2024                 log.Printf("%s: %v", containerUUID, err)
2025                 return 1
2026         }
2027         // arvadosclient now interprets Retries=10 to mean
2028         // Timeout=10m, retrying with exponential backoff + jitter.
2029         api.Retries = 10
2030
2031         kc, err := keepclient.MakeKeepClient(api)
2032         if err != nil {
2033                 log.Printf("%s: %v", containerUUID, err)
2034                 return 1
2035         }
2036         kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
2037         kc.Retries = 4
2038
2039         cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, containerUUID)
2040         if err != nil {
2041                 log.Print(err)
2042                 return 1
2043         }
2044
2045         cr.keepstore = keepstore
2046         if keepstore == nil {
2047                 // Log explanation (if any) for why we're not running
2048                 // a local keepstore.
2049                 var buf bytes.Buffer
2050                 keepstoreLogbuf.SetWriter(&buf)
2051                 if buf.Len() > 0 {
2052                         cr.CrunchLog.Printf("%s", strings.TrimSpace(buf.String()))
2053                 }
2054         } else if logWhat := conf.Cluster.Containers.LocalKeepLogsToContainerLog; logWhat == "none" {
2055                 cr.CrunchLog.Printf("using local keepstore process (pid %d) at %s", keepstore.Process.Pid, os.Getenv("ARVADOS_KEEP_SERVICES"))
2056                 keepstoreLogbuf.SetWriter(io.Discard)
2057         } else {
2058                 cr.CrunchLog.Printf("using local keepstore process (pid %d) at %s, writing logs to keepstore.txt in log collection", keepstore.Process.Pid, os.Getenv("ARVADOS_KEEP_SERVICES"))
2059                 logwriter, err := cr.NewLogWriter("keepstore")
2060                 if err != nil {
2061                         log.Print(err)
2062                         return 1
2063                 }
2064                 cr.keepstoreLogger = NewThrottledLogger(logwriter)
2065
2066                 var writer io.WriteCloser = cr.keepstoreLogger
2067                 if logWhat == "errors" {
2068                         writer = &filterKeepstoreErrorsOnly{WriteCloser: writer}
2069                 } else if logWhat != "all" {
2070                         // should have been caught earlier by
2071                         // dispatcher's config loader
2072                         log.Printf("invalid value for Containers.LocalKeepLogsToContainerLog: %q", logWhat)
2073                         return 1
2074                 }
2075                 err = keepstoreLogbuf.SetWriter(writer)
2076                 if err != nil {
2077                         log.Print(err)
2078                         return 1
2079                 }
2080                 cr.keepstoreLogbuf = &keepstoreLogbuf
2081         }
2082
2083         switch *runtimeEngine {
2084         case "docker":
2085                 cr.executor, err = newDockerExecutor(containerUUID, cr.CrunchLog.Printf, cr.containerWatchdogInterval)
2086         case "singularity":
2087                 cr.executor, err = newSingularityExecutor(cr.CrunchLog.Printf)
2088         default:
2089                 cr.CrunchLog.Printf("%s: unsupported RuntimeEngine %q", containerUUID, *runtimeEngine)
2090                 cr.CrunchLog.Close()
2091                 return 1
2092         }
2093         if err != nil {
2094                 cr.CrunchLog.Printf("%s: %v", containerUUID, err)
2095                 cr.checkBrokenNode(err)
2096                 cr.CrunchLog.Close()
2097                 return 1
2098         }
2099         defer cr.executor.Close()
2100
2101         cr.brokenNodeHook = *brokenNodeHook
2102
2103         gwAuthSecret := os.Getenv("GatewayAuthSecret")
2104         os.Unsetenv("GatewayAuthSecret")
2105         if gwAuthSecret == "" {
2106                 // not safe to run a gateway service without an auth
2107                 // secret
2108                 cr.CrunchLog.Printf("Not starting a gateway server (GatewayAuthSecret was not provided by dispatcher)")
2109         } else {
2110                 gwListen := os.Getenv("GatewayAddress")
2111                 cr.gateway = Gateway{
2112                         Address:       gwListen,
2113                         AuthSecret:    gwAuthSecret,
2114                         ContainerUUID: containerUUID,
2115                         Target:        cr.executor,
2116                         Log:           cr.CrunchLog,
2117                         LogCollection: cr.LogCollection,
2118                 }
2119                 if gwListen == "" {
2120                         // Direct connection won't work, so we use the
2121                         // gateway_address field to indicate the
2122                         // internalURL of the controller process that
2123                         // has the current tunnel connection.
2124                         cr.gateway.ArvadosClient = cr.dispatcherClient
2125                         cr.gateway.UpdateTunnelURL = func(url string) {
2126                                 cr.gateway.Address = "tunnel " + url
2127                                 cr.DispatcherArvClient.Update("containers", containerUUID,
2128                                         arvadosclient.Dict{
2129                                                 "select":    []string{"uuid"},
2130                                                 "container": arvadosclient.Dict{"gateway_address": cr.gateway.Address},
2131                                         }, nil)
2132                         }
2133                 }
2134                 err = cr.gateway.Start()
2135                 if err != nil {
2136                         log.Printf("error starting gateway server: %s", err)
2137                         return 1
2138                 }
2139         }
2140
2141         parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerUUID+".")
2142         if tmperr != nil {
2143                 log.Printf("%s: %v", containerUUID, tmperr)
2144                 return 1
2145         }
2146
2147         cr.parentTemp = parentTemp
2148         cr.statInterval = *statInterval
2149         cr.enableMemoryLimit = *enableMemoryLimit
2150         cr.enableNetwork = *enableNetwork
2151         cr.networkMode = *networkMode
2152         if *cgroupParentSubsystem != "" {
2153                 p, err := findCgroup(os.DirFS("/"), *cgroupParentSubsystem)
2154                 if err != nil {
2155                         log.Printf("fatal: cgroup parent subsystem: %s", err)
2156                         return 1
2157                 }
2158                 cr.setCgroupParent = p
2159         }
2160
2161         if conf.EC2SpotCheck {
2162                 go cr.checkSpotInterruptionNotices()
2163         }
2164
2165         runerr := cr.Run()
2166
2167         if *memprofile != "" {
2168                 f, err := os.Create(*memprofile)
2169                 if err != nil {
2170                         log.Printf("could not create memory profile: %s", err)
2171                 }
2172                 runtime.GC() // get up-to-date statistics
2173                 if err := pprof.WriteHeapProfile(f); err != nil {
2174                         log.Printf("could not write memory profile: %s", err)
2175                 }
2176                 closeerr := f.Close()
2177                 if closeerr != nil {
2178                         log.Printf("closing memprofile file: %s", err)
2179                 }
2180         }
2181
2182         if runerr != nil {
2183                 log.Printf("%s: %v", containerUUID, runerr)
2184                 return 1
2185         }
2186         return 0
2187 }
2188
2189 // Try to load ConfigData in hpc (slurm/lsf) environment. This means
2190 // loading the cluster config from the specified file and (if that
2191 // works) getting the runtime_constraints container field from
2192 // controller to determine # VCPUs so we can calculate KeepBuffers.
2193 func hpcConfData(uuid string, configFile string, stderr io.Writer) ConfigData {
2194         var conf ConfigData
2195         conf.Cluster = loadClusterConfigFile(configFile, stderr)
2196         if conf.Cluster == nil {
2197                 // skip loading the container record -- we won't be
2198                 // able to start local keepstore anyway.
2199                 return conf
2200         }
2201         arv, err := arvadosclient.MakeArvadosClient()
2202         if err != nil {
2203                 fmt.Fprintf(stderr, "error setting up arvadosclient: %s\n", err)
2204                 return conf
2205         }
2206         // arvadosclient now interprets Retries=10 to mean
2207         // Timeout=10m, retrying with exponential backoff + jitter.
2208         arv.Retries = 10
2209         var ctr arvados.Container
2210         err = arv.Call("GET", "containers", uuid, "", arvadosclient.Dict{"select": []string{"runtime_constraints"}}, &ctr)
2211         if err != nil {
2212                 fmt.Fprintf(stderr, "error getting container record: %s\n", err)
2213                 return conf
2214         }
2215         if ctr.RuntimeConstraints.VCPUs > 0 {
2216                 conf.KeepBuffers = ctr.RuntimeConstraints.VCPUs * conf.Cluster.Containers.LocalKeepBlobBuffersPerVCPU
2217         }
2218         return conf
2219 }
2220
2221 // Load cluster config file from given path. If an error occurs, log
2222 // the error to stderr and return nil.
2223 func loadClusterConfigFile(path string, stderr io.Writer) *arvados.Cluster {
2224         ldr := config.NewLoader(&bytes.Buffer{}, ctxlog.New(stderr, "plain", "info"))
2225         ldr.Path = path
2226         cfg, err := ldr.Load()
2227         if err != nil {
2228                 fmt.Fprintf(stderr, "could not load config file %s: %s\n", path, err)
2229                 return nil
2230         }
2231         cluster, err := cfg.GetCluster("")
2232         if err != nil {
2233                 fmt.Fprintf(stderr, "could not use config file %s: %s\n", path, err)
2234                 return nil
2235         }
2236         fmt.Fprintf(stderr, "loaded config file %s\n", path)
2237         return cluster
2238 }
2239
2240 func startLocalKeepstore(configData ConfigData, logbuf io.Writer) (*exec.Cmd, error) {
2241         if configData.KeepBuffers < 1 {
2242                 fmt.Fprintf(logbuf, "not starting a local keepstore process because KeepBuffers=%v in config\n", configData.KeepBuffers)
2243                 return nil, nil
2244         }
2245         if configData.Cluster == nil {
2246                 fmt.Fprint(logbuf, "not starting a local keepstore process because cluster config file was not loaded\n")
2247                 return nil, nil
2248         }
2249         for uuid, vol := range configData.Cluster.Volumes {
2250                 if len(vol.AccessViaHosts) > 0 {
2251                         fmt.Fprintf(logbuf, "not starting a local keepstore process because a volume (%s) uses AccessViaHosts\n", uuid)
2252                         return nil, nil
2253                 }
2254                 if !vol.ReadOnly && vol.Replication < configData.Cluster.Collections.DefaultReplication {
2255                         fmt.Fprintf(logbuf, "not starting a local keepstore process because a writable volume (%s) has replication less than Collections.DefaultReplication (%d < %d)\n", uuid, vol.Replication, configData.Cluster.Collections.DefaultReplication)
2256                         return nil, nil
2257                 }
2258         }
2259
2260         // Rather than have an alternate way to tell keepstore how
2261         // many buffers to use when starting it this way, we just
2262         // modify the cluster configuration that we feed it on stdin.
2263         configData.Cluster.API.MaxKeepBlobBuffers = configData.KeepBuffers
2264
2265         localaddr := localKeepstoreAddr()
2266         ln, err := net.Listen("tcp", net.JoinHostPort(localaddr, "0"))
2267         if err != nil {
2268                 return nil, err
2269         }
2270         _, port, err := net.SplitHostPort(ln.Addr().String())
2271         if err != nil {
2272                 ln.Close()
2273                 return nil, err
2274         }
2275         ln.Close()
2276         url := "http://" + net.JoinHostPort(localaddr, port)
2277
2278         fmt.Fprintf(logbuf, "starting keepstore on %s\n", url)
2279
2280         var confJSON bytes.Buffer
2281         err = json.NewEncoder(&confJSON).Encode(arvados.Config{
2282                 Clusters: map[string]arvados.Cluster{
2283                         configData.Cluster.ClusterID: *configData.Cluster,
2284                 },
2285         })
2286         if err != nil {
2287                 return nil, err
2288         }
2289         cmd := exec.Command("/proc/self/exe", "keepstore", "-config=-")
2290         if target, err := os.Readlink(cmd.Path); err == nil && strings.HasSuffix(target, ".test") {
2291                 // If we're a 'go test' process, running
2292                 // /proc/self/exe would start the test suite in a
2293                 // child process, which is not what we want.
2294                 cmd.Path, _ = exec.LookPath("go")
2295                 cmd.Args = append([]string{"go", "run", "../../cmd/arvados-server"}, cmd.Args[1:]...)
2296                 cmd.Env = os.Environ()
2297         }
2298         cmd.Stdin = &confJSON
2299         cmd.Stdout = logbuf
2300         cmd.Stderr = logbuf
2301         cmd.Env = append(cmd.Env,
2302                 "GOGC=10",
2303                 "ARVADOS_SERVICE_INTERNAL_URL="+url)
2304         err = cmd.Start()
2305         if err != nil {
2306                 return nil, fmt.Errorf("error starting keepstore process: %w", err)
2307         }
2308         cmdExited := false
2309         go func() {
2310                 cmd.Wait()
2311                 cmdExited = true
2312         }()
2313         ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
2314         defer cancel()
2315         poll := time.NewTicker(time.Second / 10)
2316         defer poll.Stop()
2317         client := http.Client{}
2318         for range poll.C {
2319                 testReq, err := http.NewRequestWithContext(ctx, "GET", url+"/_health/ping", nil)
2320                 testReq.Header.Set("Authorization", "Bearer "+configData.Cluster.ManagementToken)
2321                 if err != nil {
2322                         return nil, err
2323                 }
2324                 resp, err := client.Do(testReq)
2325                 if err == nil {
2326                         resp.Body.Close()
2327                         if resp.StatusCode == http.StatusOK {
2328                                 break
2329                         }
2330                 }
2331                 if cmdExited {
2332                         return nil, fmt.Errorf("keepstore child process exited")
2333                 }
2334                 if ctx.Err() != nil {
2335                         return nil, fmt.Errorf("timed out waiting for new keepstore process to report healthy")
2336                 }
2337         }
2338         os.Setenv("ARVADOS_KEEP_SERVICES", url)
2339         return cmd, nil
2340 }
2341
2342 // return current uid, gid, groups in a format suitable for logging:
2343 // "crunch-run process has uid=1234(arvados) gid=1234(arvados)
2344 // groups=1234(arvados),114(fuse)"
2345 func currentUserAndGroups() string {
2346         u, err := user.Current()
2347         if err != nil {
2348                 return fmt.Sprintf("error getting current user ID: %s", err)
2349         }
2350         s := fmt.Sprintf("crunch-run process has uid=%s(%s) gid=%s", u.Uid, u.Username, u.Gid)
2351         if g, err := user.LookupGroupId(u.Gid); err == nil {
2352                 s += fmt.Sprintf("(%s)", g.Name)
2353         }
2354         s += " groups="
2355         if gids, err := u.GroupIds(); err == nil {
2356                 for i, gid := range gids {
2357                         if i > 0 {
2358                                 s += ","
2359                         }
2360                         s += gid
2361                         if g, err := user.LookupGroupId(gid); err == nil {
2362                                 s += fmt.Sprintf("(%s)", g.Name)
2363                         }
2364                 }
2365         }
2366         return s
2367 }
2368
2369 // Return a suitable local interface address for a local keepstore
2370 // service. Currently this is the numerically lowest non-loopback ipv4
2371 // address assigned to a local interface that is not in any of the
2372 // link-local/vpn/loopback ranges 169.254/16, 100.64/10, or 127/8.
2373 func localKeepstoreAddr() string {
2374         var ips []net.IP
2375         // Ignore error (proceed with zero IPs)
2376         addrs, _ := processIPs(os.Getpid())
2377         for addr := range addrs {
2378                 ip := net.ParseIP(addr)
2379                 if ip == nil {
2380                         // invalid
2381                         continue
2382                 }
2383                 if ip.Mask(net.CIDRMask(8, 32)).Equal(net.IPv4(127, 0, 0, 0)) ||
2384                         ip.Mask(net.CIDRMask(10, 32)).Equal(net.IPv4(100, 64, 0, 0)) ||
2385                         ip.Mask(net.CIDRMask(16, 32)).Equal(net.IPv4(169, 254, 0, 0)) {
2386                         // unsuitable
2387                         continue
2388                 }
2389                 ips = append(ips, ip)
2390         }
2391         if len(ips) == 0 {
2392                 return "0.0.0.0"
2393         }
2394         sort.Slice(ips, func(ii, jj int) bool {
2395                 i, j := ips[ii], ips[jj]
2396                 if len(i) != len(j) {
2397                         return len(i) < len(j)
2398                 }
2399                 for x := range i {
2400                         if i[x] != j[x] {
2401                                 return i[x] < j[x]
2402                         }
2403                 }
2404                 return false
2405         })
2406         return ips[0].String()
2407 }
2408
2409 func (cr *ContainerRunner) loadPrices() {
2410         buf, err := os.ReadFile(filepath.Join(lockdir, pricesfile))
2411         if err != nil {
2412                 if !os.IsNotExist(err) {
2413                         cr.CrunchLog.Printf("loadPrices: read: %s", err)
2414                 }
2415                 return
2416         }
2417         var prices []cloud.InstancePrice
2418         err = json.Unmarshal(buf, &prices)
2419         if err != nil {
2420                 cr.CrunchLog.Printf("loadPrices: decode: %s", err)
2421                 return
2422         }
2423         cr.pricesLock.Lock()
2424         defer cr.pricesLock.Unlock()
2425         var lastKnown time.Time
2426         if len(cr.prices) > 0 {
2427                 lastKnown = cr.prices[0].StartTime
2428         }
2429         cr.prices = cloud.NormalizePriceHistory(append(prices, cr.prices...))
2430         for i := len(cr.prices) - 1; i >= 0; i-- {
2431                 price := cr.prices[i]
2432                 if price.StartTime.After(lastKnown) {
2433                         cr.CrunchLog.Printf("Instance price changed to %#.3g at %s", price.Price, price.StartTime.UTC())
2434                 }
2435         }
2436 }
2437
2438 func (cr *ContainerRunner) calculateCost(now time.Time) float64 {
2439         cr.pricesLock.Lock()
2440         defer cr.pricesLock.Unlock()
2441
2442         // First, make a "prices" slice with the real data as far back
2443         // as it goes, and (if needed) a "since the beginning of time"
2444         // placeholder containing a reasonable guess about what the
2445         // price was between cr.costStartTime and the earliest real
2446         // data point.
2447         prices := cr.prices
2448         if len(prices) == 0 {
2449                 // use price info in InstanceType record initially
2450                 // provided by cloud dispatcher
2451                 var p float64
2452                 var it arvados.InstanceType
2453                 if j := os.Getenv("InstanceType"); j != "" && json.Unmarshal([]byte(j), &it) == nil && it.Price > 0 {
2454                         p = it.Price
2455                 }
2456                 prices = []cloud.InstancePrice{{Price: p}}
2457         } else if prices[len(prices)-1].StartTime.After(cr.costStartTime) {
2458                 // guess earlier pricing was the same as the earliest
2459                 // price we know about
2460                 filler := prices[len(prices)-1]
2461                 filler.StartTime = time.Time{}
2462                 prices = append(prices, filler)
2463         }
2464
2465         // Now that our history of price changes goes back at least as
2466         // far as cr.costStartTime, add up the costs for each
2467         // interval.
2468         cost := 0.0
2469         spanEnd := now
2470         for _, ip := range prices {
2471                 spanStart := ip.StartTime
2472                 if spanStart.After(now) {
2473                         // pricing information from the future -- not
2474                         // expected from AWS, but possible in
2475                         // principle, and exercised by tests.
2476                         continue
2477                 }
2478                 last := false
2479                 if spanStart.Before(cr.costStartTime) {
2480                         spanStart = cr.costStartTime
2481                         last = true
2482                 }
2483                 cost += ip.Price * spanEnd.Sub(spanStart).Seconds() / 3600
2484                 if last {
2485                         break
2486                 }
2487                 spanEnd = spanStart
2488         }
2489
2490         return cost
2491 }
2492
2493 func (runner *ContainerRunner) handleSIGUSR2(sigchan chan os.Signal) {
2494         for range sigchan {
2495                 runner.loadPrices()
2496                 update := arvadosclient.Dict{
2497                         "select": []string{"uuid"},
2498                         "container": arvadosclient.Dict{
2499                                 "cost": runner.calculateCost(time.Now()),
2500                         },
2501                 }
2502                 runner.DispatcherArvClient.Update("containers", runner.Container.UUID, update, nil)
2503         }
2504 }