]> git.arvados.org - arvados.git/blob - lib/crunchrun/crunchrun.go
Merge branch '23009-multiselect-bug' into main. Closes #23009
[arvados.git] / lib / crunchrun / crunchrun.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package crunchrun
6
7 import (
8         "bytes"
9         "context"
10         "encoding/json"
11         "errors"
12         "flag"
13         "fmt"
14         "io"
15         "io/fs"
16         "io/ioutil"
17         "log"
18         "net"
19         "net/http"
20         "os"
21         "os/exec"
22         "os/signal"
23         "os/user"
24         "path"
25         "path/filepath"
26         "regexp"
27         "runtime"
28         "runtime/pprof"
29         "sort"
30         "strings"
31         "sync"
32         "syscall"
33         "time"
34
35         "git.arvados.org/arvados.git/lib/cloud"
36         "git.arvados.org/arvados.git/lib/cmd"
37         "git.arvados.org/arvados.git/lib/config"
38         "git.arvados.org/arvados.git/lib/crunchstat"
39         "git.arvados.org/arvados.git/sdk/go/arvados"
40         "git.arvados.org/arvados.git/sdk/go/arvadosclient"
41         "git.arvados.org/arvados.git/sdk/go/ctxlog"
42         "git.arvados.org/arvados.git/sdk/go/keepclient"
43         "golang.org/x/sys/unix"
44 )
45
46 type command struct{}
47
48 var arvadosCertPath = "/etc/arvados/ca-certificates.crt"
49
50 var Command = command{}
51
52 // ConfigData contains environment variables and (when needed) cluster
53 // configuration, passed from dispatchcloud to crunch-run on stdin.
54 type ConfigData struct {
55         Env          map[string]string
56         KeepBuffers  int
57         EC2SpotCheck bool
58         Cluster      *arvados.Cluster
59 }
60
61 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
62 type IArvadosClient interface {
63         Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
64         Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
65         Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
66         Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
67         CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
68         Discovery(key string) (interface{}, error)
69 }
70
71 // ErrCancelled is the error returned when the container is cancelled.
72 var ErrCancelled = errors.New("Cancelled")
73
74 // IKeepClient is the minimal Keep API methods used by crunch-run.
75 type IKeepClient interface {
76         BlockRead(context.Context, arvados.BlockReadOptions) (int, error)
77         BlockWrite(context.Context, arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error)
78         ReadAt(locator string, p []byte, off int) (int, error)
79         LocalLocator(locator string) (string, error)
80         SetStorageClasses(sc []string)
81 }
82
83 type RunArvMount func(cmdline []string, tok string) (*exec.Cmd, error)
84
85 type MkTempDir func(string, string) (string, error)
86
87 type PsProcess interface {
88         CmdlineSlice() ([]string, error)
89 }
90
91 // ContainerRunner is the main stateful struct used for a single execution of a
92 // container.
93 type ContainerRunner struct {
94         executor       containerExecutor
95         executorStdin  io.Closer
96         executorStdout io.Closer
97         executorStderr io.Closer
98
99         // Dispatcher client is initialized with the Dispatcher token.
100         // This is a privileged token used to manage container status
101         // and logs.
102         //
103         // We have both dispatcherClient and DispatcherArvClient
104         // because there are two different incompatible Arvados Go
105         // SDKs and we have to use both (hopefully this gets fixed in
106         // #14467)
107         dispatcherClient     *arvados.Client
108         DispatcherArvClient  IArvadosClient
109         DispatcherKeepClient IKeepClient
110
111         // Container client is initialized with the Container token
112         // This token controls the permissions of the container, and
113         // must be used for operations such as reading collections.
114         //
115         // Same comment as above applies to
116         // containerClient/ContainerArvClient.
117         containerClient     *arvados.Client
118         ContainerArvClient  IArvadosClient
119         ContainerKeepClient IKeepClient
120
121         Container     arvados.Container
122         token         string
123         ExitCode      *int
124         CrunchLog     *logWriter
125         logUUID       string
126         logPDH        string
127         logMtx        sync.Mutex
128         LogCollection arvados.CollectionFileSystem
129         logPDHFinal   *string
130         RunArvMount   RunArvMount
131         MkTempDir     MkTempDir
132         ArvMount      *exec.Cmd
133         ArvMountPoint string
134         HostOutputDir string
135         Volumes       map[string]struct{}
136         OutputPDH     *string
137         SigChan       chan os.Signal
138         ArvMountExit  chan error
139         SecretMounts  map[string]arvados.Mount
140         MkArvClient   func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error)
141         finalState    string
142         parentTemp    string
143         costStartTime time.Time
144
145         keepstore        *exec.Cmd
146         keepstoreLogger  io.WriteCloser
147         keepstoreLogbuf  *bufThenWrite
148         statLogger       io.WriteCloser
149         statReporter     *crunchstat.Reporter
150         hoststatLogger   io.WriteCloser
151         hoststatReporter *crunchstat.Reporter
152         statInterval     time.Duration
153         // What we tell docker to use as the container's cgroup
154         // parent.
155         setCgroupParent string
156         // Fake root dir where crunchstat.Reporter should read OS
157         // files, for testing.
158         crunchstatFakeFS fs.FS
159
160         cStateLock sync.Mutex
161         cCancelled bool // StopContainer() invoked
162
163         enableMemoryLimit bool
164         enableNetwork     string // one of "default" or "always"
165         networkMode       string // "none", "host", or "" -- passed through to executor
166         brokenNodeHook    string // script to run if node appears to be broken
167         arvMountLog       io.WriteCloser
168
169         containerWatchdogInterval time.Duration
170
171         gateway Gateway
172
173         prices     []cloud.InstancePrice
174         pricesLock sync.Mutex
175 }
176
177 // setupSignals sets up signal handling to gracefully terminate the
178 // underlying container and update state when receiving a TERM, INT or
179 // QUIT signal.
180 func (runner *ContainerRunner) setupSignals() {
181         runner.SigChan = make(chan os.Signal, 1)
182         signal.Notify(runner.SigChan, syscall.SIGTERM)
183         signal.Notify(runner.SigChan, syscall.SIGINT)
184         signal.Notify(runner.SigChan, syscall.SIGQUIT)
185
186         go func(sig chan os.Signal) {
187                 for s := range sig {
188                         runner.stop(s)
189                 }
190         }(runner.SigChan)
191 }
192
193 // stop the underlying container.
194 func (runner *ContainerRunner) stop(sig os.Signal) {
195         runner.cStateLock.Lock()
196         defer runner.cStateLock.Unlock()
197         if sig != nil {
198                 runner.CrunchLog.Printf("caught signal: %v", sig)
199         }
200         runner.cCancelled = true
201         runner.CrunchLog.Printf("stopping container")
202         err := runner.executor.Stop()
203         if err != nil {
204                 runner.CrunchLog.Printf("error stopping container: %s", err)
205         }
206 }
207
208 var errorBlacklist = []string{
209         "(?ms).*[Cc]annot connect to the Docker daemon.*",
210         "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
211         "(?ms).*grpc: the connection is unavailable.*",
212         "(?ms).*no space left on device.*",
213 }
214
215 func (runner *ContainerRunner) runBrokenNodeHook() {
216         if runner.brokenNodeHook == "" {
217                 path := filepath.Join(lockdir, brokenfile)
218                 runner.CrunchLog.Printf("Writing %s to mark node as broken", path)
219                 f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0700)
220                 if err != nil {
221                         runner.CrunchLog.Printf("Error writing %s: %s", path, err)
222                         return
223                 }
224                 f.Close()
225         } else {
226                 runner.CrunchLog.Printf("Running broken node hook %q", runner.brokenNodeHook)
227                 // run killme script
228                 c := exec.Command(runner.brokenNodeHook)
229                 c.Stdout = runner.CrunchLog
230                 c.Stderr = runner.CrunchLog
231                 err := c.Run()
232                 if err != nil {
233                         runner.CrunchLog.Printf("Error running broken node hook: %v", err)
234                 }
235         }
236 }
237
238 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
239         for _, d := range errorBlacklist {
240                 if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
241                         runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
242                         runner.runBrokenNodeHook()
243                         return true
244                 }
245         }
246         return false
247 }
248
249 // LoadImage determines the docker image id from the container record and
250 // checks if it is available in the local Docker image store.  If not, it loads
251 // the image from Keep.
252 func (runner *ContainerRunner) LoadImage() (string, error) {
253         runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
254
255         d, err := os.Open(runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage)
256         if err != nil {
257                 return "", err
258         }
259         defer d.Close()
260         allfiles, err := d.Readdirnames(-1)
261         if err != nil {
262                 return "", err
263         }
264         var tarfiles []string
265         for _, fnm := range allfiles {
266                 if strings.HasSuffix(fnm, ".tar") {
267                         tarfiles = append(tarfiles, fnm)
268                 }
269         }
270         if len(tarfiles) == 0 {
271                 return "", fmt.Errorf("image collection does not include a .tar image file")
272         }
273         if len(tarfiles) > 1 {
274                 return "", fmt.Errorf("cannot choose from multiple tar files in image collection: %v", tarfiles)
275         }
276         imageID := tarfiles[0][:len(tarfiles[0])-4]
277         imageTarballPath := runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage + "/" + imageID + ".tar"
278         runner.CrunchLog.Printf("Using Docker image id %q", imageID)
279
280         runner.CrunchLog.Print("Loading Docker image from keep")
281         err = runner.executor.LoadImage(imageID, imageTarballPath, runner.Container, runner.ArvMountPoint,
282                 runner.containerClient)
283         if err != nil {
284                 return "", err
285         }
286
287         return imageID, nil
288 }
289
290 func (runner *ContainerRunner) ArvMountCmd(cmdline []string, token string) (c *exec.Cmd, err error) {
291         c = exec.Command(cmdline[0], cmdline[1:]...)
292
293         // Copy our environment, but override ARVADOS_API_TOKEN with
294         // the container auth token.
295         c.Env = nil
296         for _, s := range os.Environ() {
297                 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
298                         c.Env = append(c.Env, s)
299                 }
300         }
301         c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
302
303         runner.arvMountLog, err = runner.openLogFile("arv-mount")
304         if err != nil {
305                 return nil, err
306         }
307         scanner := logScanner{
308                 Patterns: []string{
309                         "Keep write error",
310                         "Block not found error",
311                         "Unhandled exception during FUSE operation",
312                 },
313                 ReportFunc: func(pattern, text string) {
314                         runner.updateRuntimeStatus(arvadosclient.Dict{
315                                 "warning":       "arv-mount: " + pattern,
316                                 "warningDetail": text,
317                         })
318                 },
319         }
320         c.Stdout = newTimestamper(io.MultiWriter(runner.arvMountLog, os.Stderr))
321         c.Stderr = io.MultiWriter(&scanner, newTimestamper(io.MultiWriter(runner.arvMountLog, os.Stderr)))
322
323         runner.CrunchLog.Printf("Running %v", c.Args)
324
325         err = c.Start()
326         if err != nil {
327                 return nil, err
328         }
329
330         statReadme := make(chan bool)
331         runner.ArvMountExit = make(chan error)
332
333         keepStatting := true
334         go func() {
335                 for keepStatting {
336                         time.Sleep(100 * time.Millisecond)
337                         _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
338                         if err == nil {
339                                 keepStatting = false
340                                 statReadme <- true
341                         }
342                 }
343                 close(statReadme)
344         }()
345
346         go func() {
347                 mnterr := c.Wait()
348                 if mnterr != nil {
349                         runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
350                 }
351                 runner.ArvMountExit <- mnterr
352                 close(runner.ArvMountExit)
353         }()
354
355         select {
356         case <-statReadme:
357                 break
358         case err := <-runner.ArvMountExit:
359                 runner.ArvMount = nil
360                 keepStatting = false
361                 return nil, err
362         }
363
364         return c, nil
365 }
366
367 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
368         if runner.ArvMountPoint == "" {
369                 runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)
370         }
371         return
372 }
373
374 func copyfile(src string, dst string) (err error) {
375         srcfile, err := os.Open(src)
376         if err != nil {
377                 return
378         }
379
380         os.MkdirAll(path.Dir(dst), 0777)
381
382         dstfile, err := os.Create(dst)
383         if err != nil {
384                 return
385         }
386         _, err = io.Copy(dstfile, srcfile)
387         if err != nil {
388                 return
389         }
390
391         err = srcfile.Close()
392         err2 := dstfile.Close()
393
394         if err != nil {
395                 return
396         }
397
398         if err2 != nil {
399                 return err2
400         }
401
402         return nil
403 }
404
405 func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
406         bindmounts := map[string]bindmount{}
407         err := runner.SetupArvMountPoint("keep")
408         if err != nil {
409                 return nil, fmt.Errorf("While creating keep mount temp dir: %v", err)
410         }
411
412         token, err := runner.ContainerToken()
413         if err != nil {
414                 return nil, fmt.Errorf("could not get container token: %s", err)
415         }
416         runner.CrunchLog.Printf("container token %q", token)
417
418         pdhOnly := true
419         tmpcount := 0
420         arvMountCmd := []string{
421                 "arv-mount",
422                 "--foreground",
423                 "--read-write",
424                 "--storage-classes", strings.Join(runner.Container.OutputStorageClasses, ","),
425                 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
426
427         if _, isdocker := runner.executor.(*dockerExecutor); isdocker {
428                 arvMountCmd = append(arvMountCmd, "--allow-other")
429         }
430
431         if runner.Container.RuntimeConstraints.KeepCacheDisk > 0 {
432                 keepcachedir, err := runner.MkTempDir(runner.parentTemp, "keepcache")
433                 if err != nil {
434                         return nil, fmt.Errorf("while creating keep cache temp dir: %v", err)
435                 }
436                 arvMountCmd = append(arvMountCmd, "--disk-cache", "--disk-cache-dir", keepcachedir, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheDisk))
437         } else if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
438                 arvMountCmd = append(arvMountCmd, "--ram-cache", "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
439         }
440
441         collectionPaths := []string{}
442         needCertMount := true
443         type copyFile struct {
444                 src  string
445                 bind string
446         }
447         var copyFiles []copyFile
448
449         var binds []string
450         for bind := range runner.Container.Mounts {
451                 binds = append(binds, bind)
452         }
453         for bind := range runner.SecretMounts {
454                 if _, ok := runner.Container.Mounts[bind]; ok {
455                         return nil, fmt.Errorf("secret mount %q conflicts with regular mount", bind)
456                 }
457                 if runner.SecretMounts[bind].Kind != "json" &&
458                         runner.SecretMounts[bind].Kind != "text" {
459                         return nil, fmt.Errorf("secret mount %q type is %q but only 'json' and 'text' are permitted",
460                                 bind, runner.SecretMounts[bind].Kind)
461                 }
462                 binds = append(binds, bind)
463         }
464         sort.Strings(binds)
465
466         for _, bind := range binds {
467                 mnt, notSecret := runner.Container.Mounts[bind]
468                 if !notSecret {
469                         mnt = runner.SecretMounts[bind]
470                 }
471                 if bind == "stdout" || bind == "stderr" {
472                         // Is it a "file" mount kind?
473                         if mnt.Kind != "file" {
474                                 return nil, fmt.Errorf("unsupported mount kind '%s' for %s: only 'file' is supported", mnt.Kind, bind)
475                         }
476
477                         // Does path start with OutputPath?
478                         prefix := runner.Container.OutputPath
479                         if !strings.HasSuffix(prefix, "/") {
480                                 prefix += "/"
481                         }
482                         if !strings.HasPrefix(mnt.Path, prefix) {
483                                 return nil, fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
484                         }
485                 }
486
487                 if bind == "stdin" {
488                         // Is it a "collection" mount kind?
489                         if mnt.Kind != "collection" && mnt.Kind != "json" {
490                                 return nil, fmt.Errorf("unsupported mount kind '%s' for stdin: only 'collection' and 'json' are supported", mnt.Kind)
491                         }
492                 }
493
494                 if bind == arvadosCertPath {
495                         needCertMount = false
496                 }
497
498                 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
499                         if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
500                                 return nil, fmt.Errorf("only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
501                         }
502                 }
503
504                 switch {
505                 case mnt.Kind == "collection" && bind != "stdin":
506                         var src string
507                         if mnt.UUID != "" && mnt.PortableDataHash != "" {
508                                 return nil, fmt.Errorf("cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
509                         }
510                         if mnt.UUID != "" {
511                                 if mnt.Writable {
512                                         return nil, fmt.Errorf("writing to existing collections currently not permitted")
513                                 }
514                                 pdhOnly = false
515                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
516                         } else if mnt.PortableDataHash != "" {
517                                 if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
518                                         return nil, fmt.Errorf("can never write to a collection specified by portable data hash")
519                                 }
520                                 idx := strings.Index(mnt.PortableDataHash, "/")
521                                 if idx > 0 {
522                                         mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
523                                         mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
524                                         runner.Container.Mounts[bind] = mnt
525                                 }
526                                 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
527                                 if mnt.Path != "" && mnt.Path != "." {
528                                         if strings.HasPrefix(mnt.Path, "./") {
529                                                 mnt.Path = mnt.Path[2:]
530                                         } else if strings.HasPrefix(mnt.Path, "/") {
531                                                 mnt.Path = mnt.Path[1:]
532                                         }
533                                         src += "/" + mnt.Path
534                                 }
535                         } else {
536                                 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
537                                 arvMountCmd = append(arvMountCmd, "--mount-tmp", fmt.Sprintf("tmp%d", tmpcount))
538                                 tmpcount++
539                         }
540                         if mnt.Writable {
541                                 if bind == runner.Container.OutputPath {
542                                         runner.HostOutputDir = src
543                                         bindmounts[bind] = bindmount{HostPath: src}
544                                 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
545                                         copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
546                                 } else {
547                                         bindmounts[bind] = bindmount{HostPath: src}
548                                 }
549                         } else {
550                                 bindmounts[bind] = bindmount{HostPath: src, ReadOnly: true}
551                         }
552                         collectionPaths = append(collectionPaths, src)
553
554                 case mnt.Kind == "tmp":
555                         var tmpdir string
556                         tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
557                         if err != nil {
558                                 return nil, fmt.Errorf("while creating mount temp dir: %v", err)
559                         }
560                         st, staterr := os.Stat(tmpdir)
561                         if staterr != nil {
562                                 return nil, fmt.Errorf("while Stat on temp dir: %v", staterr)
563                         }
564                         err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
565                         if staterr != nil {
566                                 return nil, fmt.Errorf("while Chmod temp dir: %v", err)
567                         }
568                         bindmounts[bind] = bindmount{HostPath: tmpdir}
569                         if bind == runner.Container.OutputPath {
570                                 runner.HostOutputDir = tmpdir
571                         }
572
573                 case mnt.Kind == "json" || mnt.Kind == "text":
574                         var filedata []byte
575                         if mnt.Kind == "json" {
576                                 filedata, err = json.Marshal(mnt.Content)
577                                 if err != nil {
578                                         return nil, fmt.Errorf("encoding json data: %v", err)
579                                 }
580                         } else {
581                                 text, ok := mnt.Content.(string)
582                                 if !ok {
583                                         return nil, fmt.Errorf("content for mount %q must be a string", bind)
584                                 }
585                                 filedata = []byte(text)
586                         }
587
588                         tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
589                         if err != nil {
590                                 return nil, fmt.Errorf("creating temp dir: %v", err)
591                         }
592                         tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
593                         err = ioutil.WriteFile(tmpfn, filedata, 0444)
594                         if err != nil {
595                                 return nil, fmt.Errorf("writing temp file: %v", err)
596                         }
597                         if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && (notSecret || runner.Container.Mounts[runner.Container.OutputPath].Kind != "collection") {
598                                 // In most cases, if the container
599                                 // specifies a literal file inside the
600                                 // output path, we copy it into the
601                                 // output directory (either a mounted
602                                 // collection or a staging area on the
603                                 // host fs). If it's a secret, it will
604                                 // be skipped when copying output from
605                                 // staging to Keep later.
606                                 copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
607                         } else {
608                                 // If a secret is outside OutputPath,
609                                 // we bind mount the secret file
610                                 // directly just like other mounts. We
611                                 // also use this strategy when a
612                                 // secret is inside OutputPath but
613                                 // OutputPath is a live collection, to
614                                 // avoid writing the secret to
615                                 // Keep. Attempting to remove a
616                                 // bind-mounted secret file from
617                                 // inside the container will return a
618                                 // "Device or resource busy" error
619                                 // that might not be handled well by
620                                 // the container, which is why we
621                                 // don't use this strategy when
622                                 // OutputPath is a staging directory.
623                                 bindmounts[bind] = bindmount{HostPath: tmpfn, ReadOnly: true}
624                         }
625                 }
626         }
627
628         if runner.HostOutputDir == "" {
629                 return nil, fmt.Errorf("output path does not correspond to a writable mount point")
630         }
631
632         if needCertMount && runner.Container.RuntimeConstraints.API {
633                 for _, certfile := range []string{
634                         // Populated by caller, or sdk/go/arvados init(), or test suite:
635                         os.Getenv("SSL_CERT_FILE"),
636                         // Copied from Go 1.21 stdlib (src/crypto/x509/root_linux.go):
637                         "/etc/ssl/certs/ca-certificates.crt",                // Debian/Ubuntu/Gentoo etc.
638                         "/etc/pki/tls/certs/ca-bundle.crt",                  // Fedora/RHEL 6
639                         "/etc/ssl/ca-bundle.pem",                            // OpenSUSE
640                         "/etc/pki/tls/cacert.pem",                           // OpenELEC
641                         "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7
642                         "/etc/ssl/cert.pem",                                 // Alpine Linux
643                 } {
644                         if _, err := os.Stat(certfile); err == nil {
645                                 bindmounts[arvadosCertPath] = bindmount{HostPath: certfile, ReadOnly: true}
646                                 break
647                         }
648                 }
649         }
650
651         if pdhOnly {
652                 // If we are only mounting collections by pdh, make
653                 // sure we don't subscribe to websocket events to
654                 // avoid putting undesired load on the API server
655                 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id", "--disable-event-listening")
656         } else {
657                 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
658         }
659         // the by_uuid mount point is used by singularity when writing
660         // out docker images converted to SIF
661         arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_uuid")
662         arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
663
664         runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
665         if err != nil {
666                 return nil, fmt.Errorf("while trying to start arv-mount: %v", err)
667         }
668         if runner.hoststatReporter != nil && runner.ArvMount != nil {
669                 runner.hoststatReporter.ReportPID("arv-mount", runner.ArvMount.Process.Pid)
670         }
671
672         for _, p := range collectionPaths {
673                 _, err = os.Stat(p)
674                 if err != nil {
675                         return nil, fmt.Errorf("while checking that input files exist: %v", err)
676                 }
677         }
678
679         for _, cp := range copyFiles {
680                 st, err := os.Stat(cp.src)
681                 if err != nil {
682                         return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
683                 }
684                 if st.IsDir() {
685                         err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
686                                 if walkerr != nil {
687                                         return walkerr
688                                 }
689                                 target := path.Join(cp.bind, walkpath[len(cp.src):])
690                                 if walkinfo.Mode().IsRegular() {
691                                         copyerr := copyfile(walkpath, target)
692                                         if copyerr != nil {
693                                                 return copyerr
694                                         }
695                                         return os.Chmod(target, walkinfo.Mode()|0777)
696                                 } else if walkinfo.Mode().IsDir() {
697                                         mkerr := os.MkdirAll(target, 0777)
698                                         if mkerr != nil {
699                                                 return mkerr
700                                         }
701                                         return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
702                                 } else {
703                                         return fmt.Errorf("source %q is not a regular file or directory", cp.src)
704                                 }
705                         })
706                 } else if st.Mode().IsRegular() {
707                         err = copyfile(cp.src, cp.bind)
708                         if err == nil {
709                                 err = os.Chmod(cp.bind, st.Mode()|0777)
710                         }
711                 }
712                 if err != nil {
713                         return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
714                 }
715         }
716
717         return bindmounts, nil
718 }
719
720 func (runner *ContainerRunner) stopHoststat() error {
721         if runner.hoststatReporter == nil {
722                 return nil
723         }
724         runner.hoststatReporter.Stop()
725         runner.hoststatReporter.LogProcessMemMax(runner.CrunchLog)
726         err := runner.hoststatLogger.Close()
727         if err != nil {
728                 return fmt.Errorf("error closing hoststat logs: %v", err)
729         }
730         return nil
731 }
732
733 func (runner *ContainerRunner) startHoststat() error {
734         var err error
735         runner.hoststatLogger, err = runner.openLogFile("hoststat")
736         if err != nil {
737                 return err
738         }
739         runner.hoststatReporter = &crunchstat.Reporter{
740                 Logger: newLogWriter(newTimestamper(runner.hoststatLogger)),
741                 // Our own cgroup is the "host" cgroup, in the sense
742                 // that it accounts for resource usage outside the
743                 // container. It doesn't count _all_ resource usage on
744                 // the system.
745                 //
746                 // TODO?: Use the furthest ancestor of our own cgroup
747                 // that has stats available. (Currently crunchstat
748                 // does not have that capability.)
749                 Pid:        os.Getpid,
750                 PollPeriod: runner.statInterval,
751         }
752         runner.hoststatReporter.Start()
753         runner.hoststatReporter.ReportPID("crunch-run", os.Getpid())
754         return nil
755 }
756
757 func (runner *ContainerRunner) startCrunchstat() error {
758         var err error
759         runner.statLogger, err = runner.openLogFile("crunchstat")
760         if err != nil {
761                 return err
762         }
763         runner.statReporter = &crunchstat.Reporter{
764                 Pid:    runner.executor.Pid,
765                 FS:     runner.crunchstatFakeFS,
766                 Logger: newLogWriter(newTimestamper(runner.statLogger)),
767                 MemThresholds: map[string][]crunchstat.Threshold{
768                         "rss": crunchstat.NewThresholdsFromPercentages(runner.Container.RuntimeConstraints.RAM, []int64{90, 95, 99}),
769                 },
770                 PollPeriod:      runner.statInterval,
771                 TempDir:         runner.parentTemp,
772                 ThresholdLogger: runner.CrunchLog,
773         }
774         runner.statReporter.Start()
775         return nil
776 }
777
778 type infoCommand struct {
779         label string
780         cmd   []string
781 }
782
783 // LogHostInfo logs info about the current host, for debugging and
784 // accounting purposes. Although it's logged as "node-info", this is
785 // about the environment where crunch-run is actually running, which
786 // might differ from what's described in the node record (see
787 // LogNodeRecord).
788 func (runner *ContainerRunner) LogHostInfo() (err error) {
789         w, err := runner.openLogFile("node-info")
790         if err != nil {
791                 return
792         }
793
794         commands := []infoCommand{
795                 {
796                         label: "Host Information",
797                         cmd:   []string{"uname", "-a"},
798                 },
799                 {
800                         label: "CPU Information",
801                         cmd:   []string{"cat", "/proc/cpuinfo"},
802                 },
803                 {
804                         label: "Memory Information",
805                         cmd:   []string{"cat", "/proc/meminfo"},
806                 },
807                 {
808                         label: "Disk Space",
809                         cmd:   []string{"df", "-m", "/", os.TempDir()},
810                 },
811                 {
812                         label: "Disk INodes",
813                         cmd:   []string{"df", "-i", "/", os.TempDir()},
814                 },
815         }
816
817         // Run commands with informational output to be logged.
818         for _, command := range commands {
819                 fmt.Fprintln(w, command.label)
820                 cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
821                 cmd.Stdout = w
822                 cmd.Stderr = w
823                 if err := cmd.Run(); err != nil {
824                         err = fmt.Errorf("While running command %q: %v", command.cmd, err)
825                         fmt.Fprintln(w, err)
826                         return err
827                 }
828                 fmt.Fprintln(w, "")
829         }
830
831         err = w.Close()
832         if err != nil {
833                 return fmt.Errorf("While closing node-info logs: %v", err)
834         }
835         return nil
836 }
837
838 // LogContainerRecord gets and saves the raw JSON container record from the API server
839 func (runner *ContainerRunner) LogContainerRecord() error {
840         logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}})
841         if !logged && err == nil {
842                 err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
843         }
844         return err
845 }
846
847 // LogNodeRecord logs the current host's InstanceType config entry, if
848 // running via arvados-dispatch-cloud.
849 func (runner *ContainerRunner) LogNodeRecord() error {
850         it := os.Getenv("InstanceType")
851         if it == "" {
852                 // Not dispatched by arvados-dispatch-cloud.
853                 return nil
854         }
855         // Save InstanceType config fragment received from dispatcher
856         // on stdin.
857         w, err := runner.LogCollection.OpenFile("node.json", os.O_CREATE|os.O_WRONLY, 0666)
858         if err != nil {
859                 return err
860         }
861         defer w.Close()
862         _, err = io.WriteString(w, it)
863         if err != nil {
864                 return err
865         }
866         return w.Close()
867 }
868
869 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}) (logged bool, err error) {
870         writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
871         if err != nil {
872                 return false, err
873         }
874         reader, err := runner.DispatcherArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
875         if err != nil {
876                 return false, fmt.Errorf("error getting %s record: %v", label, err)
877         }
878         defer reader.Close()
879
880         dec := json.NewDecoder(reader)
881         dec.UseNumber()
882         var resp map[string]interface{}
883         if err = dec.Decode(&resp); err != nil {
884                 return false, fmt.Errorf("error decoding %s list response: %v", label, err)
885         }
886         items, ok := resp["items"].([]interface{})
887         if !ok {
888                 return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
889         } else if len(items) < 1 {
890                 return false, nil
891         }
892         // Re-encode it using indentation to improve readability
893         enc := json.NewEncoder(writer)
894         enc.SetIndent("", "    ")
895         if err = enc.Encode(items[0]); err != nil {
896                 return false, fmt.Errorf("error logging %s record: %v", label, err)
897         }
898         err = writer.Close()
899         if err != nil {
900                 return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
901         }
902         return true, nil
903 }
904
905 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
906         stdoutPath := mntPath[len(runner.Container.OutputPath):]
907         index := strings.LastIndex(stdoutPath, "/")
908         if index > 0 {
909                 subdirs := stdoutPath[:index]
910                 if subdirs != "" {
911                         st, err := os.Stat(runner.HostOutputDir)
912                         if err != nil {
913                                 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
914                         }
915                         stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
916                         err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
917                         if err != nil {
918                                 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
919                         }
920                 }
921         }
922         stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
923         if err != nil {
924                 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
925         }
926
927         return stdoutFile, nil
928 }
929
930 // CreateContainer creates the docker container.
931 func (runner *ContainerRunner) CreateContainer(imageID string, bindmounts map[string]bindmount) error {
932         var stdin io.Reader
933         if mnt, ok := runner.Container.Mounts["stdin"]; ok {
934                 switch mnt.Kind {
935                 case "collection":
936                         var collID string
937                         if mnt.UUID != "" {
938                                 collID = mnt.UUID
939                         } else {
940                                 collID = mnt.PortableDataHash
941                         }
942                         path := runner.ArvMountPoint + "/by_id/" + collID + "/" + mnt.Path
943                         f, err := os.Open(path)
944                         if err != nil {
945                                 return err
946                         }
947                         stdin = f
948                         runner.executorStdin = f
949                 case "json":
950                         j, err := json.Marshal(mnt.Content)
951                         if err != nil {
952                                 return fmt.Errorf("error encoding stdin json data: %v", err)
953                         }
954                         stdin = bytes.NewReader(j)
955                         runner.executorStdin = io.NopCloser(nil)
956                 default:
957                         return fmt.Errorf("stdin mount has unsupported kind %q", mnt.Kind)
958                 }
959         } else {
960                 stdin = bytes.NewReader(nil)
961                 runner.executorStdin = ioutil.NopCloser(nil)
962         }
963
964         var stdout, stderr io.Writer
965         if mnt, ok := runner.Container.Mounts["stdout"]; ok {
966                 f, err := runner.getStdoutFile(mnt.Path)
967                 if err != nil {
968                         return err
969                 }
970                 stdout = f
971                 runner.executorStdout = f
972         } else if w, err := runner.openLogFile("stdout"); err != nil {
973                 return err
974         } else {
975                 stdout = newTimestamper(w)
976                 runner.executorStdout = w
977         }
978
979         if mnt, ok := runner.Container.Mounts["stderr"]; ok {
980                 f, err := runner.getStdoutFile(mnt.Path)
981                 if err != nil {
982                         return err
983                 }
984                 stderr = f
985                 runner.executorStderr = f
986         } else if w, err := runner.openLogFile("stderr"); err != nil {
987                 return err
988         } else {
989                 stderr = newTimestamper(w)
990                 runner.executorStderr = w
991         }
992
993         env := runner.Container.Environment
994         enableNetwork := runner.enableNetwork == "always"
995         if runner.Container.RuntimeConstraints.API {
996                 enableNetwork = true
997                 tok, err := runner.ContainerToken()
998                 if err != nil {
999                         return err
1000                 }
1001                 env = map[string]string{}
1002                 for k, v := range runner.Container.Environment {
1003                         env[k] = v
1004                 }
1005                 env["ARVADOS_API_TOKEN"] = tok
1006                 env["ARVADOS_API_HOST"] = os.Getenv("ARVADOS_API_HOST")
1007                 env["ARVADOS_API_HOST_INSECURE"] = os.Getenv("ARVADOS_API_HOST_INSECURE")
1008                 env["ARVADOS_KEEP_SERVICES"] = os.Getenv("ARVADOS_KEEP_SERVICES")
1009         }
1010         workdir := runner.Container.Cwd
1011         if workdir == "." {
1012                 // both "" and "." mean default
1013                 workdir = ""
1014         }
1015         ram := runner.Container.RuntimeConstraints.RAM
1016         if !runner.enableMemoryLimit {
1017                 ram = 0
1018         }
1019
1020         if runner.Container.RuntimeConstraints.GPU.Stack == "cuda" {
1021                 nvidiaModprobe(runner.CrunchLog)
1022         }
1023
1024         return runner.executor.Create(containerSpec{
1025                 Image:          imageID,
1026                 VCPUs:          runner.Container.RuntimeConstraints.VCPUs,
1027                 RAM:            ram,
1028                 WorkingDir:     workdir,
1029                 Env:            env,
1030                 BindMounts:     bindmounts,
1031                 Command:        runner.Container.Command,
1032                 EnableNetwork:  enableNetwork,
1033                 GPUStack:       runner.Container.RuntimeConstraints.GPU.Stack,
1034                 GPUDeviceCount: runner.Container.RuntimeConstraints.GPU.DeviceCount,
1035                 NetworkMode:    runner.networkMode,
1036                 CgroupParent:   runner.setCgroupParent,
1037                 Stdin:          stdin,
1038                 Stdout:         stdout,
1039                 Stderr:         stderr,
1040         })
1041 }
1042
1043 // StartContainer starts the docker container created by CreateContainer.
1044 func (runner *ContainerRunner) StartContainer() error {
1045         runner.CrunchLog.Printf("Starting container")
1046         runner.cStateLock.Lock()
1047         defer runner.cStateLock.Unlock()
1048         if runner.cCancelled {
1049                 return ErrCancelled
1050         }
1051         err := runner.executor.Start()
1052         if err != nil {
1053                 var advice string
1054                 if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
1055                         advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
1056                 }
1057                 return fmt.Errorf("could not start container: %v%s", err, advice)
1058         }
1059         return nil
1060 }
1061
1062 // WaitFinish waits for the container to terminate, capture the exit code, and
1063 // close the stdout/stderr logging.
1064 func (runner *ContainerRunner) WaitFinish() error {
1065         runner.CrunchLog.Print("Waiting for container to finish")
1066         var timeout <-chan time.Time
1067         if s := runner.Container.SchedulingParameters.MaxRunTime; s > 0 {
1068                 timeout = time.After(time.Duration(s) * time.Second)
1069         }
1070         ctx, cancel := context.WithCancel(context.Background())
1071         defer cancel()
1072         go func() {
1073                 select {
1074                 case <-timeout:
1075                         runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
1076                         runner.stop(nil)
1077                 case <-runner.ArvMountExit:
1078                         runner.CrunchLog.Printf("arv-mount exited while container is still running. Stopping container.")
1079                         runner.stop(nil)
1080                 case <-ctx.Done():
1081                 }
1082         }()
1083         exitcode, err := runner.executor.Wait(ctx)
1084         if err != nil {
1085                 runner.checkBrokenNode(err)
1086                 return err
1087         }
1088         runner.ExitCode = &exitcode
1089
1090         extra := ""
1091         if exitcode&0x80 != 0 {
1092                 // Convert raw exit status (0x80 + signal number) to a
1093                 // string to log after the code, like " (signal 101)"
1094                 // or " (signal 9, killed)"
1095                 sig := syscall.WaitStatus(exitcode).Signal()
1096                 if name := unix.SignalName(sig); name != "" {
1097                         extra = fmt.Sprintf(" (signal %d, %s)", sig, name)
1098                 } else {
1099                         extra = fmt.Sprintf(" (signal %d)", sig)
1100                 }
1101         }
1102         runner.CrunchLog.Printf("Container exited with status code %d%s", exitcode, extra)
1103         err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1104                 "select":    []string{"uuid"},
1105                 "container": arvadosclient.Dict{"exit_code": exitcode},
1106         }, nil)
1107         if err != nil {
1108                 runner.CrunchLog.Printf("ignoring error updating exit_code: %s", err)
1109         }
1110
1111         var returnErr error
1112         if err = runner.executorStdin.Close(); err != nil {
1113                 err = fmt.Errorf("error closing container stdin: %s", err)
1114                 runner.CrunchLog.Printf("%s", err)
1115                 returnErr = err
1116         }
1117         if err = runner.executorStdout.Close(); err != nil {
1118                 err = fmt.Errorf("error closing container stdout: %s", err)
1119                 runner.CrunchLog.Printf("%s", err)
1120                 if returnErr == nil {
1121                         returnErr = err
1122                 }
1123         }
1124         if err = runner.executorStderr.Close(); err != nil {
1125                 err = fmt.Errorf("error closing container stderr: %s", err)
1126                 runner.CrunchLog.Printf("%s", err)
1127                 if returnErr == nil {
1128                         returnErr = err
1129                 }
1130         }
1131
1132         if runner.statReporter != nil {
1133                 runner.statReporter.Stop()
1134                 runner.statReporter.LogMaxima(runner.CrunchLog, map[string]int64{
1135                         "rss": runner.Container.RuntimeConstraints.RAM,
1136                 })
1137                 err = runner.statLogger.Close()
1138                 if err != nil {
1139                         runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
1140                 }
1141         }
1142         return returnErr
1143 }
1144
1145 func (runner *ContainerRunner) updateLogs() {
1146         ticker := time.NewTicker(crunchLogUpdatePeriod / 360)
1147         defer ticker.Stop()
1148
1149         sigusr1 := make(chan os.Signal, 1)
1150         signal.Notify(sigusr1, syscall.SIGUSR1)
1151         defer signal.Stop(sigusr1)
1152
1153         saveAtTime := time.Now().Add(crunchLogUpdatePeriod)
1154         saveAtSize := crunchLogUpdateSize
1155         var savedSize int64
1156         for {
1157                 select {
1158                 case <-ticker.C:
1159                 case <-sigusr1:
1160                         saveAtTime = time.Now()
1161                 }
1162                 runner.logMtx.Lock()
1163                 done := runner.logPDHFinal != nil
1164                 runner.logMtx.Unlock()
1165                 if done {
1166                         return
1167                 }
1168                 size := runner.LogCollection.Size()
1169                 if size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) {
1170                         continue
1171                 }
1172                 saveAtTime = time.Now().Add(crunchLogUpdatePeriod)
1173                 saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize
1174                 err := runner.saveLogCollection(false)
1175                 if err != nil {
1176                         runner.CrunchLog.Printf("error updating log collection: %s", err)
1177                         continue
1178                 }
1179                 savedSize = size
1180         }
1181 }
1182
1183 var spotInterruptionCheckInterval = 5 * time.Second
1184 var ec2MetadataBaseURL = "http://169.254.169.254"
1185
1186 const ec2TokenTTL = time.Second * 21600
1187
1188 func (runner *ContainerRunner) checkSpotInterruptionNotices() {
1189         type ec2metadata struct {
1190                 Action string    `json:"action"`
1191                 Time   time.Time `json:"time"`
1192         }
1193         runner.CrunchLog.Printf("Checking for spot instance interruptions every %v using instance metadata at %s", spotInterruptionCheckInterval, ec2MetadataBaseURL)
1194         var metadata ec2metadata
1195         var token string
1196         var tokenExp time.Time
1197         check := func() error {
1198                 ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))
1199                 defer cancel()
1200                 if token == "" || tokenExp.Sub(time.Now()) < time.Minute {
1201                         req, err := http.NewRequestWithContext(ctx, http.MethodPut, ec2MetadataBaseURL+"/latest/api/token", nil)
1202                         if err != nil {
1203                                 return err
1204                         }
1205                         req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", fmt.Sprintf("%d", int(ec2TokenTTL/time.Second)))
1206                         resp, err := http.DefaultClient.Do(req)
1207                         if err != nil {
1208                                 return err
1209                         }
1210                         defer resp.Body.Close()
1211                         if resp.StatusCode != http.StatusOK {
1212                                 return fmt.Errorf("%s", resp.Status)
1213                         }
1214                         newtoken, err := ioutil.ReadAll(resp.Body)
1215                         if err != nil {
1216                                 return err
1217                         }
1218                         token = strings.TrimSpace(string(newtoken))
1219                         tokenExp = time.Now().Add(ec2TokenTTL)
1220                 }
1221                 req, err := http.NewRequestWithContext(ctx, http.MethodGet, ec2MetadataBaseURL+"/latest/meta-data/spot/instance-action", nil)
1222                 if err != nil {
1223                         return err
1224                 }
1225                 req.Header.Set("X-aws-ec2-metadata-token", token)
1226                 resp, err := http.DefaultClient.Do(req)
1227                 if err != nil {
1228                         return err
1229                 }
1230                 defer resp.Body.Close()
1231                 switch resp.StatusCode {
1232                 case http.StatusOK:
1233                         break
1234                 case http.StatusNotFound:
1235                         // "If Amazon EC2 is not preparing to stop or
1236                         // terminate the instance, or if you
1237                         // terminated the instance yourself,
1238                         // instance-action is not present in the
1239                         // instance metadata and you receive an HTTP
1240                         // 404 error when you try to retrieve it."
1241                         metadata = ec2metadata{}
1242                         return nil
1243                 case http.StatusUnauthorized:
1244                         token = ""
1245                         return fmt.Errorf("%s", resp.Status)
1246                 default:
1247                         return fmt.Errorf("%s", resp.Status)
1248                 }
1249                 nextmetadata := ec2metadata{}
1250                 err = json.NewDecoder(resp.Body).Decode(&nextmetadata)
1251                 if err != nil {
1252                         return err
1253                 }
1254                 metadata = nextmetadata
1255                 return nil
1256         }
1257         failures := 0
1258         var lastmetadata ec2metadata
1259         for range time.NewTicker(spotInterruptionCheckInterval).C {
1260                 err := check()
1261                 if err != nil {
1262                         message := fmt.Sprintf("Spot instance interruption check was inconclusive: %s", err)
1263                         if failures++; failures > 5 {
1264                                 runner.CrunchLog.Printf("%s -- now giving up after too many consecutive errors", message)
1265                                 return
1266                         } else {
1267                                 runner.CrunchLog.Printf("%s -- will retry in %v", message, spotInterruptionCheckInterval)
1268                                 continue
1269                         }
1270                 }
1271                 failures = 0
1272                 if metadata.Action != "" && metadata != lastmetadata {
1273                         lastmetadata = metadata
1274                         text := fmt.Sprintf("Cloud provider scheduled instance %s at %s", metadata.Action, metadata.Time.UTC().Format(time.RFC3339))
1275                         runner.CrunchLog.Printf("%s", text)
1276                         runner.updateRuntimeStatus(arvadosclient.Dict{
1277                                 "warning":          "preemption notice",
1278                                 "warningDetail":    text,
1279                                 "preemptionNotice": text,
1280                         })
1281                         if proc, err := os.FindProcess(os.Getpid()); err == nil {
1282                                 // trigger updateLogs
1283                                 proc.Signal(syscall.SIGUSR1)
1284                         }
1285                 }
1286         }
1287 }
1288
1289 func (runner *ContainerRunner) updateRuntimeStatus(status arvadosclient.Dict) {
1290         err := runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1291                 "select": []string{"uuid"},
1292                 "container": arvadosclient.Dict{
1293                         "runtime_status": status,
1294                 },
1295         }, nil)
1296         if err != nil {
1297                 runner.CrunchLog.Printf("error updating container runtime_status: %s", err)
1298         }
1299 }
1300
1301 // CaptureOutput saves data from the container's output directory if
1302 // needed, and updates the container output accordingly.
1303 func (runner *ContainerRunner) CaptureOutput(bindmounts map[string]bindmount) error {
1304         if runner.Container.RuntimeConstraints.API {
1305                 // Output may have been set directly by the container, so
1306                 // refresh the container record to check.
1307                 err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID,
1308                         arvadosclient.Dict{
1309                                 "select": []string{"output"},
1310                         }, &runner.Container)
1311                 if err != nil {
1312                         return err
1313                 }
1314                 if runner.Container.Output != "" {
1315                         // Container output is already set.
1316                         runner.OutputPDH = &runner.Container.Output
1317                         return nil
1318                 }
1319         }
1320
1321         txt, err := (&copier{
1322                 client:        runner.containerClient,
1323                 keepClient:    runner.ContainerKeepClient,
1324                 hostOutputDir: runner.HostOutputDir,
1325                 ctrOutputDir:  runner.Container.OutputPath,
1326                 globs:         runner.Container.OutputGlob,
1327                 bindmounts:    bindmounts,
1328                 mounts:        runner.Container.Mounts,
1329                 secretMounts:  runner.SecretMounts,
1330                 logger:        runner.CrunchLog,
1331         }).Copy()
1332         if err != nil {
1333                 return err
1334         }
1335         if n := len(regexp.MustCompile(` [0-9a-f]+\+\S*\+R`).FindAllStringIndex(txt, -1)); n > 0 {
1336                 runner.CrunchLog.Printf("Copying %d data blocks from remote input collections...", n)
1337                 fs, err := (&arvados.Collection{ManifestText: txt}).FileSystem(runner.containerClient, runner.ContainerKeepClient)
1338                 if err != nil {
1339                         return err
1340                 }
1341                 txt, err = fs.MarshalManifest(".")
1342                 if err != nil {
1343                         return err
1344                 }
1345         }
1346         var resp arvados.Collection
1347         err = runner.ContainerArvClient.Create("collections", arvadosclient.Dict{
1348                 "ensure_unique_name": true,
1349                 "select":             []string{"portable_data_hash"},
1350                 "collection": arvadosclient.Dict{
1351                         "is_trashed":    true,
1352                         "name":          "output for " + runner.Container.UUID,
1353                         "manifest_text": txt,
1354                 },
1355         }, &resp)
1356         if err != nil {
1357                 return fmt.Errorf("error creating output collection: %v", err)
1358         }
1359         runner.OutputPDH = &resp.PortableDataHash
1360         return nil
1361 }
1362
1363 func (runner *ContainerRunner) CleanupDirs() {
1364         if runner.ArvMount != nil {
1365                 var delay int64 = 8
1366                 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1367                 umount.Stdout = runner.CrunchLog
1368                 umount.Stderr = runner.CrunchLog
1369                 runner.CrunchLog.Printf("Running %v", umount.Args)
1370                 umnterr := umount.Start()
1371
1372                 if umnterr != nil {
1373                         runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1374                         runner.ArvMount.Process.Kill()
1375                 } else {
1376                         // If arv-mount --unmount gets stuck for any reason, we
1377                         // don't want to wait for it forever.  Do Wait() in a goroutine
1378                         // so it doesn't block crunch-run.
1379                         umountExit := make(chan error)
1380                         go func() {
1381                                 mnterr := umount.Wait()
1382                                 if mnterr != nil {
1383                                         runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1384                                 }
1385                                 umountExit <- mnterr
1386                         }()
1387
1388                         for again := true; again; {
1389                                 again = false
1390                                 select {
1391                                 case <-umountExit:
1392                                         umount = nil
1393                                         again = true
1394                                 case <-runner.ArvMountExit:
1395                                         break
1396                                 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1397                                         runner.CrunchLog.Printf("Timed out waiting for unmount")
1398                                         if umount != nil {
1399                                                 umount.Process.Kill()
1400                                         }
1401                                         runner.ArvMount.Process.Kill()
1402                                 }
1403                         }
1404                 }
1405                 runner.ArvMount = nil
1406         }
1407
1408         if runner.ArvMountPoint != "" {
1409                 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1410                         runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1411                 }
1412                 runner.ArvMountPoint = ""
1413         }
1414
1415         if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {
1416                 runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr)
1417         }
1418 }
1419
1420 // CommitLogs posts the collection containing the final container logs.
1421 func (runner *ContainerRunner) CommitLogs() error {
1422         func() {
1423                 // Hold cStateLock to prevent races on CrunchLog (e.g., stop()).
1424                 runner.cStateLock.Lock()
1425                 defer runner.cStateLock.Unlock()
1426
1427                 runner.CrunchLog.Print(runner.finalState)
1428
1429                 if runner.arvMountLog != nil {
1430                         runner.arvMountLog.Close()
1431                 }
1432
1433                 // From now on just log to stderr, in case there are
1434                 // any other further errors (such as failing to write
1435                 // the log to Keep!)  while shutting down
1436                 runner.CrunchLog = newLogWriter(newTimestamper(newStringPrefixer(os.Stderr, runner.Container.UUID+" ")))
1437         }()
1438
1439         if runner.keepstoreLogger != nil {
1440                 // Flush any buffered logs from our local keepstore
1441                 // process.  Discard anything logged after this point
1442                 // -- it won't end up in the log collection, so
1443                 // there's no point writing it to the collectionfs.
1444                 runner.keepstoreLogbuf.SetWriter(io.Discard)
1445                 runner.keepstoreLogger.Close()
1446                 runner.keepstoreLogger = nil
1447         }
1448
1449         if runner.logPDHFinal != nil {
1450                 // If we have already assigned something to logPDHFinal,
1451                 // we must be closing the re-opened log, which won't
1452                 // end up getting attached to the container record and
1453                 // therefore doesn't need to be saved as a collection
1454                 // -- it exists only to send logs to other channels.
1455                 return nil
1456         }
1457
1458         return runner.saveLogCollection(true)
1459 }
1460
1461 // Flush buffered logs to Keep and create/update the log collection.
1462 //
1463 // Also update the container record with the updated log PDH -- except
1464 // this part is skipped if (a) the container hasn't entered Running
1465 // state yet, meaning we can't assign a log value, or (b) final==true,
1466 // meaning the caller will immediately update the container record to
1467 // Completed state and update the log PDH in the same API call.
1468 func (runner *ContainerRunner) saveLogCollection(final bool) error {
1469         runner.logMtx.Lock()
1470         defer runner.logMtx.Unlock()
1471         if runner.logPDHFinal != nil {
1472                 // Already finalized.
1473                 return nil
1474         }
1475         updates := arvadosclient.Dict{
1476                 "name": "logs for " + runner.Container.UUID,
1477         }
1478         mt, errFlush := runner.LogCollection.MarshalManifest(".")
1479         if errFlush == nil {
1480                 // Only send updated manifest text if there was no
1481                 // error.
1482                 updates["manifest_text"] = mt
1483         }
1484
1485         // Even if flushing the manifest had an error, we still want
1486         // to update the log record, if possible, to push the trash_at
1487         // and delete_at times into the future.  Details on bug
1488         // #17293.
1489         if final {
1490                 updates["is_trashed"] = true
1491         } else {
1492                 // We set trash_at so this collection gets
1493                 // automatically cleaned up eventually.  It used to be
1494                 // 12 hours but we had a situation where the API
1495                 // server was down over a weekend but the containers
1496                 // kept running such that the log collection got
1497                 // trashed, so now we make it 2 weeks.  refs #20378
1498                 exp := time.Now().Add(time.Duration(24*14) * time.Hour)
1499                 updates["trash_at"] = exp
1500                 updates["delete_at"] = exp
1501         }
1502         reqBody := arvadosclient.Dict{
1503                 "select":     []string{"uuid", "portable_data_hash"},
1504                 "collection": updates,
1505         }
1506         var saved arvados.Collection
1507         var errUpdate error
1508         if runner.logUUID == "" {
1509                 reqBody["ensure_unique_name"] = true
1510                 errUpdate = runner.DispatcherArvClient.Create("collections", reqBody, &saved)
1511         } else {
1512                 errUpdate = runner.DispatcherArvClient.Update("collections", runner.logUUID, reqBody, &saved)
1513         }
1514         if errUpdate == nil {
1515                 runner.logUUID = saved.UUID
1516                 runner.logPDH = saved.PortableDataHash
1517         }
1518
1519         if errFlush != nil || errUpdate != nil {
1520                 return fmt.Errorf("error recording logs: %q, %q", errFlush, errUpdate)
1521         }
1522         if final {
1523                 runner.logPDHFinal = &saved.PortableDataHash
1524         }
1525         if final || runner.finalState == "Queued" {
1526                 // If final, the caller (Run -> CommitLogs) will
1527                 // immediately update the log attribute to logPDHFinal
1528                 // while setting state to Complete, so it would be
1529                 // redundant to do it here.
1530                 //
1531                 // If runner.finalState=="Queued", the container state
1532                 // has not changed to "Running", so updating the log
1533                 // attribute is not allowed.
1534                 return nil
1535         }
1536         return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1537                 "select": []string{"uuid"},
1538                 "container": arvadosclient.Dict{
1539                         "log": saved.PortableDataHash,
1540                 },
1541         }, nil)
1542 }
1543
1544 // UpdateContainerRunning updates the container state to "Running"
1545 func (runner *ContainerRunner) UpdateContainerRunning() error {
1546         runner.logMtx.Lock()
1547         logPDH := runner.logPDH
1548         runner.logMtx.Unlock()
1549
1550         runner.cStateLock.Lock()
1551         defer runner.cStateLock.Unlock()
1552         if runner.cCancelled {
1553                 return ErrCancelled
1554         }
1555         return runner.DispatcherArvClient.Update(
1556                 "containers",
1557                 runner.Container.UUID,
1558                 arvadosclient.Dict{
1559                         "select": []string{"uuid"},
1560                         "container": arvadosclient.Dict{
1561                                 "gateway_address": runner.gateway.Address,
1562                                 "state":           "Running",
1563                                 "log":             logPDH,
1564                         },
1565                 },
1566                 nil,
1567         )
1568 }
1569
1570 // ContainerToken returns the api_token the container (and any
1571 // arv-mount processes) are allowed to use.
1572 func (runner *ContainerRunner) ContainerToken() (string, error) {
1573         if runner.token != "" {
1574                 return runner.token, nil
1575         }
1576
1577         var auth arvados.APIClientAuthorization
1578         err := runner.DispatcherArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1579         if err != nil {
1580                 return "", err
1581         }
1582         runner.token = fmt.Sprintf("v2/%s/%s/%s", auth.UUID, auth.APIToken, runner.Container.UUID)
1583         return runner.token, nil
1584 }
1585
1586 // UpdateContainerFinal updates the container record state on API
1587 // server to "Complete" or "Cancelled"
1588 func (runner *ContainerRunner) UpdateContainerFinal() error {
1589         update := arvadosclient.Dict{}
1590         update["state"] = runner.finalState
1591         if runner.logPDHFinal != nil {
1592                 update["log"] = *runner.logPDHFinal
1593         }
1594         if runner.ExitCode != nil {
1595                 update["exit_code"] = *runner.ExitCode
1596         } else {
1597                 update["exit_code"] = nil
1598         }
1599         if runner.finalState == "Complete" && runner.OutputPDH != nil {
1600                 update["output"] = *runner.OutputPDH
1601         }
1602         update["cost"] = runner.calculateCost(time.Now())
1603         return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1604                 "select":    []string{"uuid"},
1605                 "container": update,
1606         }, nil)
1607 }
1608
1609 // IsCancelled returns the value of Cancelled, with goroutine safety.
1610 func (runner *ContainerRunner) IsCancelled() bool {
1611         runner.cStateLock.Lock()
1612         defer runner.cStateLock.Unlock()
1613         return runner.cCancelled
1614 }
1615
1616 func (runner *ContainerRunner) openLogFile(name string) (io.WriteCloser, error) {
1617         return runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
1618 }
1619
1620 // Run the full container lifecycle.
1621 func (runner *ContainerRunner) Run() (err error) {
1622         runner.CrunchLog.Printf("crunch-run %s started", cmd.Version.String())
1623         runner.CrunchLog.Printf("%s", currentUserAndGroups())
1624         v, _ := exec.Command("arv-mount", "--version").CombinedOutput()
1625         runner.CrunchLog.Printf("Using FUSE mount: %s", v)
1626         runner.CrunchLog.Printf("Using container runtime: %s", runner.executor.Runtime())
1627         runner.CrunchLog.Printf("Executing container: %s", runner.Container.UUID)
1628         runner.costStartTime = time.Now()
1629
1630         hostname, hosterr := os.Hostname()
1631         if hosterr != nil {
1632                 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1633         } else {
1634                 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1635         }
1636
1637         sigusr2 := make(chan os.Signal, 1)
1638         signal.Notify(sigusr2, syscall.SIGUSR2)
1639         defer signal.Stop(sigusr2)
1640         runner.loadPrices()
1641         go runner.handleSIGUSR2(sigusr2)
1642
1643         runner.finalState = "Queued"
1644
1645         defer func() {
1646                 runner.CleanupDirs()
1647                 runner.CrunchLog.Printf("crunch-run finished")
1648         }()
1649
1650         err = runner.fetchContainerRecord()
1651         if err != nil {
1652                 return
1653         }
1654         if runner.Container.State != "Locked" {
1655                 return fmt.Errorf("dispatch error detected: container %q has state %q", runner.Container.UUID, runner.Container.State)
1656         }
1657
1658         var bindmounts map[string]bindmount
1659         defer func() {
1660                 // checkErr prints e (unless it's nil) and sets err to
1661                 // e (unless err is already non-nil). Thus, if err
1662                 // hasn't already been assigned when Run() returns,
1663                 // this cleanup func will cause Run() to return the
1664                 // first non-nil error that is passed to checkErr().
1665                 checkErr := func(errorIn string, e error) {
1666                         if e == nil {
1667                                 return
1668                         }
1669                         runner.CrunchLog.Printf("error in %s: %v", errorIn, e)
1670                         if err == nil {
1671                                 err = e
1672                         }
1673                         if runner.finalState == "Complete" {
1674                                 // There was an error in the finalization.
1675                                 runner.finalState = "Cancelled"
1676                         }
1677                 }
1678
1679                 // Log the error encountered in Run(), if any
1680                 checkErr("Run", err)
1681
1682                 if runner.finalState == "Queued" {
1683                         runner.UpdateContainerFinal()
1684                         return
1685                 }
1686
1687                 if runner.IsCancelled() {
1688                         runner.finalState = "Cancelled"
1689                         // but don't return yet -- we still want to
1690                         // capture partial output and write logs
1691                 }
1692
1693                 if bindmounts != nil {
1694                         if errSave := runner.saveLogCollection(false); errSave != nil {
1695                                 // This doesn't merit failing the
1696                                 // container, but should be logged.
1697                                 runner.CrunchLog.Printf("error saving log collection: %v", errSave)
1698                         }
1699                         checkErr("CaptureOutput", runner.CaptureOutput(bindmounts))
1700                 }
1701                 checkErr("stopHoststat", runner.stopHoststat())
1702                 checkErr("CommitLogs", runner.CommitLogs())
1703                 runner.CleanupDirs()
1704                 checkErr("UpdateContainerFinal", runner.UpdateContainerFinal())
1705         }()
1706
1707         runner.setupSignals()
1708         err = runner.startHoststat()
1709         if err != nil {
1710                 return
1711         }
1712         if runner.keepstore != nil {
1713                 runner.hoststatReporter.ReportPID("keepstore", runner.keepstore.Process.Pid)
1714         }
1715
1716         // set up FUSE mount and binds
1717         bindmounts, err = runner.SetupMounts()
1718         if err != nil {
1719                 runner.finalState = "Cancelled"
1720                 err = fmt.Errorf("While setting up mounts: %v", err)
1721                 return
1722         }
1723
1724         // check for and/or load image
1725         imageID, err := runner.LoadImage()
1726         if err != nil {
1727                 if !runner.checkBrokenNode(err) {
1728                         // Failed to load image but not due to a "broken node"
1729                         // condition, probably user error.
1730                         runner.finalState = "Cancelled"
1731                 }
1732                 err = fmt.Errorf("failed to load container image: %v", err)
1733                 return
1734         }
1735
1736         err = runner.CreateContainer(imageID, bindmounts)
1737         if err != nil {
1738                 return
1739         }
1740         err = runner.LogHostInfo()
1741         if err != nil {
1742                 return
1743         }
1744         err = runner.LogNodeRecord()
1745         if err != nil {
1746                 return
1747         }
1748         err = runner.LogContainerRecord()
1749         if err != nil {
1750                 return
1751         }
1752
1753         if runner.IsCancelled() {
1754                 return
1755         }
1756
1757         err = runner.saveLogCollection(false)
1758         if err != nil {
1759                 return
1760         }
1761         err = runner.UpdateContainerRunning()
1762         if err != nil {
1763                 return
1764         }
1765         runner.finalState = "Cancelled"
1766
1767         err = runner.startCrunchstat()
1768         if err != nil {
1769                 return
1770         }
1771
1772         err = runner.StartContainer()
1773         if err != nil {
1774                 runner.checkBrokenNode(err)
1775                 return
1776         }
1777
1778         err = runner.WaitFinish()
1779         if err == nil && !runner.IsCancelled() {
1780                 runner.finalState = "Complete"
1781         }
1782         return
1783 }
1784
1785 // Fetch the current container record (uuid = runner.Container.UUID)
1786 // into runner.Container.
1787 func (runner *ContainerRunner) fetchContainerRecord() error {
1788         reader, err := runner.DispatcherArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1789         if err != nil {
1790                 return fmt.Errorf("error fetching container record: %v", err)
1791         }
1792         defer reader.Close()
1793
1794         dec := json.NewDecoder(reader)
1795         dec.UseNumber()
1796         err = dec.Decode(&runner.Container)
1797         if err != nil {
1798                 return fmt.Errorf("error decoding container record: %v", err)
1799         }
1800
1801         var sm struct {
1802                 SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
1803         }
1804
1805         containerToken, err := runner.ContainerToken()
1806         if err != nil {
1807                 return fmt.Errorf("error getting container token: %v", err)
1808         }
1809
1810         runner.ContainerArvClient, runner.ContainerKeepClient,
1811                 runner.containerClient, err = runner.MkArvClient(containerToken)
1812         if err != nil {
1813                 return fmt.Errorf("error creating container API client: %v", err)
1814         }
1815
1816         runner.ContainerKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)
1817         runner.DispatcherKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)
1818
1819         err = runner.ContainerArvClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
1820         if err != nil {
1821                 if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
1822                         return fmt.Errorf("error fetching secret_mounts: %v", err)
1823                 }
1824                 // ok && apierr.HttpStatusCode == 404, which means
1825                 // secret_mounts isn't supported by this API server.
1826         }
1827         runner.SecretMounts = sm.SecretMounts
1828
1829         return nil
1830 }
1831
1832 // NewContainerRunner creates a new container runner.
1833 func NewContainerRunner(dispatcherClient *arvados.Client,
1834         dispatcherArvClient IArvadosClient,
1835         dispatcherKeepClient IKeepClient,
1836         containerUUID string) (*ContainerRunner, error) {
1837
1838         cr := &ContainerRunner{
1839                 dispatcherClient:     dispatcherClient,
1840                 DispatcherArvClient:  dispatcherArvClient,
1841                 DispatcherKeepClient: dispatcherKeepClient,
1842         }
1843         cr.RunArvMount = cr.ArvMountCmd
1844         cr.MkTempDir = ioutil.TempDir
1845         cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
1846                 cl, err := arvadosclient.MakeArvadosClient()
1847                 if err != nil {
1848                         return nil, nil, nil, err
1849                 }
1850                 cl.Retries = 10
1851                 cl.ApiToken = token
1852                 kc, err := keepclient.MakeKeepClient(cl)
1853                 if err != nil {
1854                         return nil, nil, nil, err
1855                 }
1856                 kc.Retries = 10
1857                 c2 := arvados.NewClientFromEnv()
1858                 c2.AuthToken = token
1859                 return cl, kc, c2, nil
1860         }
1861         var err error
1862         cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.dispatcherClient, cr.DispatcherKeepClient)
1863         if err != nil {
1864                 return nil, err
1865         }
1866         cr.Container.UUID = containerUUID
1867         f, err := cr.openLogFile("crunch-run")
1868         if err != nil {
1869                 return nil, err
1870         }
1871         cr.CrunchLog = newLogWriter(newTimestamper(io.MultiWriter(f, newStringPrefixer(os.Stderr, cr.Container.UUID+" "))))
1872
1873         go cr.updateLogs()
1874
1875         return cr, nil
1876 }
1877
1878 func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
1879         log := log.New(stderr, "", 0)
1880         flags := flag.NewFlagSet(prog, flag.ContinueOnError)
1881         statInterval := flags.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1882         flags.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree (obsolete, ignored)")
1883         flags.String("cgroup-parent", "docker", "name of container's parent cgroup (obsolete, ignored)")
1884         cgroupParentSubsystem := flags.String("cgroup-parent-subsystem", "", "use current cgroup for given `subsystem` as parent cgroup for container (subsystem argument is only relevant for cgroups v1; in cgroups v2 / unified mode, any non-empty value means use current cgroup); if empty, use the docker daemon's default cgroup parent. See https://doc.arvados.org/install/crunch2-slurm/install-dispatch.html#CrunchRunCommand-cgroups")
1885         caCertsPath := flags.String("ca-certs", "", "Path to TLS root certificates")
1886         detach := flags.Bool("detach", false, "Detach from parent process and run in the background")
1887         stdinConfig := flags.Bool("stdin-config", false, "Load config and environment variables from JSON message on stdin")
1888         configFile := flags.String("config", arvados.DefaultConfigFile, "filename of cluster config file to try loading if -stdin-config=false (default is $ARVADOS_CONFIG)")
1889         sleep := flags.Duration("sleep", 0, "Delay before starting (testing use only)")
1890         kill := flags.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID")
1891         list := flags.Bool("list", false, "List UUIDs of existing crunch-run processes (and notify them to use price data passed on stdin)")
1892         enableMemoryLimit := flags.Bool("enable-memory-limit", true, "tell container runtime to limit container's memory usage")
1893         enableNetwork := flags.String("container-enable-networking", "default", "enable networking \"always\" (for all containers) or \"default\" (for containers that request it)")
1894         networkMode := flags.String("container-network-mode", "default", `Docker network mode for container (use any argument valid for docker --net)`)
1895         memprofile := flags.String("memprofile", "", "write memory profile to `file` after running container")
1896         runtimeEngine := flags.String("runtime-engine", "docker", "container runtime: docker or singularity")
1897         brokenNodeHook := flags.String("broken-node-hook", "", "script to run if node is detected to be broken (for example, Docker daemon is not running)")
1898         flags.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.")
1899         version := flags.Bool("version", false, "Write version information to stdout and exit 0.")
1900
1901         ignoreDetachFlag := false
1902         if len(args) > 0 && args[0] == "-no-detach" {
1903                 // This process was invoked by a parent process, which
1904                 // has passed along its own arguments, including
1905                 // -detach, after the leading -no-detach flag.  Strip
1906                 // the leading -no-detach flag (it's not recognized by
1907                 // flags.Parse()) and ignore the -detach flag that
1908                 // comes later.
1909                 args = args[1:]
1910                 ignoreDetachFlag = true
1911         }
1912
1913         if ok, code := cmd.ParseFlags(flags, prog, args, "container-uuid", stderr); !ok {
1914                 return code
1915         } else if *version {
1916                 fmt.Fprintln(stdout, prog, cmd.Version.String())
1917                 return 0
1918         } else if !*list && flags.NArg() != 1 {
1919                 fmt.Fprintf(stderr, "missing required argument: container-uuid (try -help)\n")
1920                 return 2
1921         }
1922
1923         containerUUID := flags.Arg(0)
1924
1925         switch {
1926         case *detach && !ignoreDetachFlag:
1927                 return Detach(containerUUID, prog, args, stdin, stdout, stderr)
1928         case *kill >= 0:
1929                 return KillProcess(containerUUID, syscall.Signal(*kill), stdout, stderr)
1930         case *list:
1931                 return ListProcesses(stdin, stdout, stderr)
1932         }
1933
1934         if len(containerUUID) != 27 {
1935                 log.Printf("usage: %s [options] UUID", prog)
1936                 return 1
1937         }
1938
1939         var keepstoreLogbuf bufThenWrite
1940         var conf ConfigData
1941         if *stdinConfig {
1942                 err := json.NewDecoder(stdin).Decode(&conf)
1943                 if err != nil {
1944                         log.Printf("decode stdin: %s", err)
1945                         return 1
1946                 }
1947                 for k, v := range conf.Env {
1948                         err = os.Setenv(k, v)
1949                         if err != nil {
1950                                 log.Printf("setenv(%q): %s", k, err)
1951                                 return 1
1952                         }
1953                 }
1954                 if conf.Cluster != nil {
1955                         // ClusterID is missing from the JSON
1956                         // representation, but we need it to generate
1957                         // a valid config file for keepstore, so we
1958                         // fill it using the container UUID prefix.
1959                         conf.Cluster.ClusterID = containerUUID[:5]
1960                 }
1961         } else {
1962                 conf = hpcConfData(containerUUID, *configFile, io.MultiWriter(&keepstoreLogbuf, stderr))
1963         }
1964
1965         log.Printf("crunch-run %s started", cmd.Version.String())
1966         time.Sleep(*sleep)
1967
1968         if *caCertsPath != "" {
1969                 os.Setenv("SSL_CERT_FILE", *caCertsPath)
1970         }
1971
1972         keepstore, err := startLocalKeepstore(conf, io.MultiWriter(&keepstoreLogbuf, stderr))
1973         if err != nil {
1974                 log.Print(err)
1975                 return 1
1976         }
1977         if keepstore != nil {
1978                 defer keepstore.Process.Kill()
1979         }
1980
1981         api, err := arvadosclient.MakeArvadosClient()
1982         if err != nil {
1983                 log.Printf("%s: %v", containerUUID, err)
1984                 return 1
1985         }
1986         // arvadosclient now interprets Retries=10 to mean
1987         // Timeout=10m, retrying with exponential backoff + jitter.
1988         api.Retries = 10
1989
1990         kc, err := keepclient.MakeKeepClient(api)
1991         if err != nil {
1992                 log.Printf("%s: %v", containerUUID, err)
1993                 return 1
1994         }
1995         kc.Retries = 10
1996
1997         cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, containerUUID)
1998         if err != nil {
1999                 log.Print(err)
2000                 return 1
2001         }
2002
2003         cr.keepstore = keepstore
2004         if keepstore == nil {
2005                 // Log explanation (if any) for why we're not running
2006                 // a local keepstore.
2007                 var buf bytes.Buffer
2008                 keepstoreLogbuf.SetWriter(&buf)
2009                 if buf.Len() > 0 {
2010                         cr.CrunchLog.Printf("%s", strings.TrimSpace(buf.String()))
2011                 }
2012         } else if logWhat := conf.Cluster.Containers.LocalKeepLogsToContainerLog; logWhat == "none" {
2013                 cr.CrunchLog.Printf("using local keepstore process (pid %d) at %s", keepstore.Process.Pid, os.Getenv("ARVADOS_KEEP_SERVICES"))
2014                 keepstoreLogbuf.SetWriter(io.Discard)
2015         } else {
2016                 cr.CrunchLog.Printf("using local keepstore process (pid %d) at %s, writing logs to keepstore.txt in log collection", keepstore.Process.Pid, os.Getenv("ARVADOS_KEEP_SERVICES"))
2017                 cr.keepstoreLogger, err = cr.openLogFile("keepstore")
2018                 if err != nil {
2019                         log.Print(err)
2020                         return 1
2021                 }
2022
2023                 var writer io.WriteCloser = cr.keepstoreLogger
2024                 if logWhat == "errors" {
2025                         writer = &filterKeepstoreErrorsOnly{WriteCloser: writer}
2026                 } else if logWhat != "all" {
2027                         // should have been caught earlier by
2028                         // dispatcher's config loader
2029                         log.Printf("invalid value for Containers.LocalKeepLogsToContainerLog: %q", logWhat)
2030                         return 1
2031                 }
2032                 err = keepstoreLogbuf.SetWriter(writer)
2033                 if err != nil {
2034                         log.Print(err)
2035                         return 1
2036                 }
2037                 cr.keepstoreLogbuf = &keepstoreLogbuf
2038         }
2039
2040         switch *runtimeEngine {
2041         case "docker":
2042                 cr.executor, err = newDockerExecutor(containerUUID, cr.CrunchLog.Printf, cr.containerWatchdogInterval)
2043         case "singularity":
2044                 cr.executor, err = newSingularityExecutor(cr.CrunchLog.Printf)
2045         default:
2046                 cr.CrunchLog.Printf("%s: unsupported RuntimeEngine %q", containerUUID, *runtimeEngine)
2047                 return 1
2048         }
2049         if err != nil {
2050                 cr.CrunchLog.Printf("%s: %v", containerUUID, err)
2051                 cr.checkBrokenNode(err)
2052                 return 1
2053         }
2054         defer cr.executor.Close()
2055
2056         cr.brokenNodeHook = *brokenNodeHook
2057
2058         gwAuthSecret := os.Getenv("GatewayAuthSecret")
2059         os.Unsetenv("GatewayAuthSecret")
2060         if gwAuthSecret == "" {
2061                 // not safe to run a gateway service without an auth
2062                 // secret
2063                 cr.CrunchLog.Printf("Not starting a gateway server (GatewayAuthSecret was not provided by dispatcher)")
2064         } else {
2065                 gwListen := os.Getenv("GatewayAddress")
2066                 cr.gateway = Gateway{
2067                         Address:       gwListen,
2068                         AuthSecret:    gwAuthSecret,
2069                         ContainerUUID: containerUUID,
2070                         Target:        cr.executor,
2071                         Log:           cr.CrunchLog,
2072                         LogCollection: cr.LogCollection,
2073                 }
2074                 if gwListen == "" {
2075                         // Direct connection won't work, so we use the
2076                         // gateway_address field to indicate the
2077                         // internalURL of the controller process that
2078                         // has the current tunnel connection.
2079                         cr.gateway.ArvadosClient = cr.dispatcherClient
2080                         cr.gateway.UpdateTunnelURL = func(url string) {
2081                                 cr.gateway.Address = "tunnel " + url
2082                                 cr.DispatcherArvClient.Update("containers", containerUUID,
2083                                         arvadosclient.Dict{
2084                                                 "select":    []string{"uuid"},
2085                                                 "container": arvadosclient.Dict{"gateway_address": cr.gateway.Address},
2086                                         }, nil)
2087                         }
2088                 }
2089                 err = cr.gateway.Start()
2090                 if err != nil {
2091                         log.Printf("error starting gateway server: %s", err)
2092                         return 1
2093                 }
2094         }
2095
2096         parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerUUID+".")
2097         if tmperr != nil {
2098                 log.Printf("%s: %v", containerUUID, tmperr)
2099                 return 1
2100         }
2101
2102         cr.parentTemp = parentTemp
2103         cr.statInterval = *statInterval
2104         cr.enableMemoryLimit = *enableMemoryLimit
2105         cr.enableNetwork = *enableNetwork
2106         cr.networkMode = *networkMode
2107         if *cgroupParentSubsystem != "" {
2108                 p, err := findCgroup(os.DirFS("/"), *cgroupParentSubsystem)
2109                 if err != nil {
2110                         log.Printf("fatal: cgroup parent subsystem: %s", err)
2111                         return 1
2112                 }
2113                 cr.setCgroupParent = p
2114         }
2115
2116         if conf.EC2SpotCheck {
2117                 go cr.checkSpotInterruptionNotices()
2118         }
2119
2120         runerr := cr.Run()
2121
2122         if *memprofile != "" {
2123                 f, err := os.Create(*memprofile)
2124                 if err != nil {
2125                         log.Printf("could not create memory profile: %s", err)
2126                 }
2127                 runtime.GC() // get up-to-date statistics
2128                 if err := pprof.WriteHeapProfile(f); err != nil {
2129                         log.Printf("could not write memory profile: %s", err)
2130                 }
2131                 closeerr := f.Close()
2132                 if closeerr != nil {
2133                         log.Printf("closing memprofile file: %s", err)
2134                 }
2135         }
2136
2137         if runerr != nil {
2138                 log.Printf("%s: %v", containerUUID, runerr)
2139                 return 1
2140         }
2141         return 0
2142 }
2143
2144 // Try to load ConfigData in hpc (slurm/lsf) environment. This means
2145 // loading the cluster config from the specified file and (if that
2146 // works) getting the runtime_constraints container field from
2147 // controller to determine # VCPUs so we can calculate KeepBuffers.
2148 func hpcConfData(uuid string, configFile string, stderr io.Writer) ConfigData {
2149         var conf ConfigData
2150         conf.Cluster = loadClusterConfigFile(configFile, stderr)
2151         if conf.Cluster == nil {
2152                 // skip loading the container record -- we won't be
2153                 // able to start local keepstore anyway.
2154                 return conf
2155         }
2156         arv, err := arvadosclient.MakeArvadosClient()
2157         if err != nil {
2158                 fmt.Fprintf(stderr, "error setting up arvadosclient: %s\n", err)
2159                 return conf
2160         }
2161         // arvadosclient now interprets Retries=10 to mean
2162         // Timeout=10m, retrying with exponential backoff + jitter.
2163         arv.Retries = 10
2164         var ctr arvados.Container
2165         err = arv.Call("GET", "containers", uuid, "", arvadosclient.Dict{"select": []string{"runtime_constraints"}}, &ctr)
2166         if err != nil {
2167                 fmt.Fprintf(stderr, "error getting container record: %s\n", err)
2168                 return conf
2169         }
2170         if ctr.RuntimeConstraints.VCPUs > 0 {
2171                 conf.KeepBuffers = ctr.RuntimeConstraints.VCPUs * conf.Cluster.Containers.LocalKeepBlobBuffersPerVCPU
2172         }
2173         return conf
2174 }
2175
2176 // Load cluster config file from given path. If an error occurs, log
2177 // the error to stderr and return nil.
2178 func loadClusterConfigFile(path string, stderr io.Writer) *arvados.Cluster {
2179         ldr := config.NewLoader(&bytes.Buffer{}, ctxlog.New(stderr, "plain", "info"))
2180         ldr.Path = path
2181         cfg, err := ldr.Load()
2182         if err != nil {
2183                 fmt.Fprintf(stderr, "could not load config file %s: %s\n", path, err)
2184                 return nil
2185         }
2186         cluster, err := cfg.GetCluster("")
2187         if err != nil {
2188                 fmt.Fprintf(stderr, "could not use config file %s: %s\n", path, err)
2189                 return nil
2190         }
2191         fmt.Fprintf(stderr, "loaded config file %s\n", path)
2192         return cluster
2193 }
2194
2195 func startLocalKeepstore(configData ConfigData, logbuf io.Writer) (*exec.Cmd, error) {
2196         if configData.KeepBuffers < 1 {
2197                 fmt.Fprintf(logbuf, "not starting a local keepstore process because KeepBuffers=%v in config\n", configData.KeepBuffers)
2198                 return nil, nil
2199         }
2200         if configData.Cluster == nil {
2201                 fmt.Fprint(logbuf, "not starting a local keepstore process because cluster config file was not loaded\n")
2202                 return nil, nil
2203         }
2204         for uuid, vol := range configData.Cluster.Volumes {
2205                 if len(vol.AccessViaHosts) > 0 {
2206                         fmt.Fprintf(logbuf, "not starting a local keepstore process because a volume (%s) uses AccessViaHosts\n", uuid)
2207                         return nil, nil
2208                 }
2209                 if !vol.ReadOnly && vol.Replication < configData.Cluster.Collections.DefaultReplication {
2210                         fmt.Fprintf(logbuf, "not starting a local keepstore process because a writable volume (%s) has replication less than Collections.DefaultReplication (%d < %d)\n", uuid, vol.Replication, configData.Cluster.Collections.DefaultReplication)
2211                         return nil, nil
2212                 }
2213         }
2214
2215         // Rather than have an alternate way to tell keepstore how
2216         // many buffers to use, etc., when starting it this way, we
2217         // just modify the cluster configuration that we feed it on
2218         // stdin.
2219         ccfg := *configData.Cluster
2220         ccfg.API.MaxKeepBlobBuffers = configData.KeepBuffers
2221         ccfg.Collections.BlobTrash = false
2222         ccfg.Collections.BlobTrashConcurrency = 0
2223         ccfg.Collections.BlobDeleteConcurrency = 0
2224
2225         localaddr := localKeepstoreAddr()
2226         ln, err := net.Listen("tcp", net.JoinHostPort(localaddr, "0"))
2227         if err != nil {
2228                 return nil, err
2229         }
2230         _, port, err := net.SplitHostPort(ln.Addr().String())
2231         if err != nil {
2232                 ln.Close()
2233                 return nil, err
2234         }
2235         ln.Close()
2236         url := "http://" + net.JoinHostPort(localaddr, port)
2237
2238         fmt.Fprintf(logbuf, "starting keepstore on %s\n", url)
2239
2240         var confJSON bytes.Buffer
2241         err = json.NewEncoder(&confJSON).Encode(arvados.Config{
2242                 Clusters: map[string]arvados.Cluster{
2243                         ccfg.ClusterID: ccfg,
2244                 },
2245         })
2246         if err != nil {
2247                 return nil, err
2248         }
2249         cmd := exec.Command("/proc/self/exe", "keepstore", "-config=-")
2250         if target, err := os.Readlink(cmd.Path); err == nil && strings.HasSuffix(target, ".test") {
2251                 // If we're a 'go test' process, running
2252                 // /proc/self/exe would start the test suite in a
2253                 // child process, which is not what we want.
2254                 cmd.Path, _ = exec.LookPath("go")
2255                 cmd.Args = append([]string{"go", "run", "../../cmd/arvados-server"}, cmd.Args[1:]...)
2256                 cmd.Env = os.Environ()
2257         }
2258         cmd.Stdin = &confJSON
2259         cmd.Stdout = logbuf
2260         cmd.Stderr = logbuf
2261         cmd.Env = append(cmd.Env,
2262                 "GOGC=10",
2263                 "ARVADOS_SERVICE_INTERNAL_URL="+url)
2264         err = cmd.Start()
2265         if err != nil {
2266                 return nil, fmt.Errorf("error starting keepstore process: %w", err)
2267         }
2268         cmdExited := false
2269         go func() {
2270                 cmd.Wait()
2271                 cmdExited = true
2272         }()
2273         ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
2274         defer cancel()
2275         poll := time.NewTicker(time.Second / 10)
2276         defer poll.Stop()
2277         client := http.Client{}
2278         for range poll.C {
2279                 testReq, err := http.NewRequestWithContext(ctx, "GET", url+"/_health/ping", nil)
2280                 testReq.Header.Set("Authorization", "Bearer "+configData.Cluster.ManagementToken)
2281                 if err != nil {
2282                         return nil, err
2283                 }
2284                 resp, err := client.Do(testReq)
2285                 if err == nil {
2286                         resp.Body.Close()
2287                         if resp.StatusCode == http.StatusOK {
2288                                 break
2289                         }
2290                 }
2291                 if cmdExited {
2292                         return nil, fmt.Errorf("keepstore child process exited")
2293                 }
2294                 if ctx.Err() != nil {
2295                         return nil, fmt.Errorf("timed out waiting for new keepstore process to report healthy")
2296                 }
2297         }
2298         os.Setenv("ARVADOS_KEEP_SERVICES", url)
2299         return cmd, nil
2300 }
2301
2302 // return current uid, gid, groups in a format suitable for logging:
2303 // "crunch-run process has uid=1234(arvados) gid=1234(arvados)
2304 // groups=1234(arvados),114(fuse)"
2305 func currentUserAndGroups() string {
2306         u, err := user.Current()
2307         if err != nil {
2308                 return fmt.Sprintf("error getting current user ID: %s", err)
2309         }
2310         s := fmt.Sprintf("crunch-run process has uid=%s(%s) gid=%s", u.Uid, u.Username, u.Gid)
2311         if g, err := user.LookupGroupId(u.Gid); err == nil {
2312                 s += fmt.Sprintf("(%s)", g.Name)
2313         }
2314         s += " groups="
2315         if gids, err := u.GroupIds(); err == nil {
2316                 for i, gid := range gids {
2317                         if i > 0 {
2318                                 s += ","
2319                         }
2320                         s += gid
2321                         if g, err := user.LookupGroupId(gid); err == nil {
2322                                 s += fmt.Sprintf("(%s)", g.Name)
2323                         }
2324                 }
2325         }
2326         return s
2327 }
2328
2329 // Return a suitable local interface address for a local keepstore
2330 // service. Currently this is the numerically lowest non-loopback ipv4
2331 // address assigned to a local interface that is not in any of the
2332 // link-local/vpn/loopback ranges 169.254/16, 100.64/10, or 127/8.
2333 func localKeepstoreAddr() string {
2334         var ips []net.IP
2335         // Ignore error (proceed with zero IPs)
2336         addrs, _ := processIPs(os.Getpid())
2337         for addr := range addrs {
2338                 ip := net.ParseIP(addr)
2339                 if ip == nil {
2340                         // invalid
2341                         continue
2342                 }
2343                 if ip.Mask(net.CIDRMask(8, 32)).Equal(net.IPv4(127, 0, 0, 0)) ||
2344                         ip.Mask(net.CIDRMask(10, 32)).Equal(net.IPv4(100, 64, 0, 0)) ||
2345                         ip.Mask(net.CIDRMask(16, 32)).Equal(net.IPv4(169, 254, 0, 0)) {
2346                         // unsuitable
2347                         continue
2348                 }
2349                 ips = append(ips, ip)
2350         }
2351         if len(ips) == 0 {
2352                 return "0.0.0.0"
2353         }
2354         sort.Slice(ips, func(ii, jj int) bool {
2355                 i, j := ips[ii], ips[jj]
2356                 if len(i) != len(j) {
2357                         return len(i) < len(j)
2358                 }
2359                 for x := range i {
2360                         if i[x] != j[x] {
2361                                 return i[x] < j[x]
2362                         }
2363                 }
2364                 return false
2365         })
2366         return ips[0].String()
2367 }
2368
2369 func (cr *ContainerRunner) loadPrices() {
2370         buf, err := os.ReadFile(filepath.Join(lockdir, pricesfile))
2371         if err != nil {
2372                 if !os.IsNotExist(err) {
2373                         cr.CrunchLog.Printf("loadPrices: read: %s", err)
2374                 }
2375                 return
2376         }
2377         var prices []cloud.InstancePrice
2378         err = json.Unmarshal(buf, &prices)
2379         if err != nil {
2380                 cr.CrunchLog.Printf("loadPrices: decode: %s", err)
2381                 return
2382         }
2383         cr.pricesLock.Lock()
2384         defer cr.pricesLock.Unlock()
2385         var lastKnown time.Time
2386         if len(cr.prices) > 0 {
2387                 lastKnown = cr.prices[0].StartTime
2388         }
2389         cr.prices = cloud.NormalizePriceHistory(append(prices, cr.prices...))
2390         for i := len(cr.prices) - 1; i >= 0; i-- {
2391                 price := cr.prices[i]
2392                 if price.StartTime.After(lastKnown) {
2393                         cr.CrunchLog.Printf("Instance price changed to %#.3g at %s", price.Price, price.StartTime.UTC())
2394                 }
2395         }
2396 }
2397
2398 func (cr *ContainerRunner) calculateCost(now time.Time) float64 {
2399         cr.pricesLock.Lock()
2400         defer cr.pricesLock.Unlock()
2401
2402         // First, make a "prices" slice with the real data as far back
2403         // as it goes, and (if needed) a "since the beginning of time"
2404         // placeholder containing a reasonable guess about what the
2405         // price was between cr.costStartTime and the earliest real
2406         // data point.
2407         prices := cr.prices
2408         if len(prices) == 0 {
2409                 // use price info in InstanceType record initially
2410                 // provided by cloud dispatcher
2411                 var p float64
2412                 var it arvados.InstanceType
2413                 if j := os.Getenv("InstanceType"); j != "" && json.Unmarshal([]byte(j), &it) == nil && it.Price > 0 {
2414                         p = it.Price
2415                 }
2416                 prices = []cloud.InstancePrice{{Price: p}}
2417         } else if prices[len(prices)-1].StartTime.After(cr.costStartTime) {
2418                 // guess earlier pricing was the same as the earliest
2419                 // price we know about
2420                 filler := prices[len(prices)-1]
2421                 filler.StartTime = time.Time{}
2422                 prices = append(prices, filler)
2423         }
2424
2425         // Now that our history of price changes goes back at least as
2426         // far as cr.costStartTime, add up the costs for each
2427         // interval.
2428         cost := 0.0
2429         spanEnd := now
2430         for _, ip := range prices {
2431                 spanStart := ip.StartTime
2432                 if spanStart.After(now) {
2433                         // pricing information from the future -- not
2434                         // expected from AWS, but possible in
2435                         // principle, and exercised by tests.
2436                         continue
2437                 }
2438                 last := false
2439                 if spanStart.Before(cr.costStartTime) {
2440                         spanStart = cr.costStartTime
2441                         last = true
2442                 }
2443                 cost += ip.Price * spanEnd.Sub(spanStart).Seconds() / 3600
2444                 if last {
2445                         break
2446                 }
2447                 spanEnd = spanStart
2448         }
2449
2450         return cost
2451 }
2452
2453 func (runner *ContainerRunner) handleSIGUSR2(sigchan chan os.Signal) {
2454         for range sigchan {
2455                 runner.loadPrices()
2456                 update := arvadosclient.Dict{
2457                         "select": []string{"uuid"},
2458                         "container": arvadosclient.Dict{
2459                                 "cost": runner.calculateCost(time.Now()),
2460                         },
2461                 }
2462                 runner.DispatcherArvClient.Update("containers", runner.Container.UUID, update, nil)
2463         }
2464 }