1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
35 "git.arvados.org/arvados.git/lib/cloud"
36 "git.arvados.org/arvados.git/lib/cmd"
37 "git.arvados.org/arvados.git/lib/config"
38 "git.arvados.org/arvados.git/lib/crunchstat"
39 "git.arvados.org/arvados.git/sdk/go/arvados"
40 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
41 "git.arvados.org/arvados.git/sdk/go/ctxlog"
42 "git.arvados.org/arvados.git/sdk/go/keepclient"
43 "golang.org/x/sys/unix"
48 var arvadosCertPath = "/etc/arvados/ca-certificates.crt"
50 var Command = command{}
52 // ConfigData contains environment variables and (when needed) cluster
53 // configuration, passed from dispatchcloud to crunch-run on stdin.
54 type ConfigData struct {
58 Cluster *arvados.Cluster
61 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
62 type IArvadosClient interface {
63 Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
64 Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
65 Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
66 Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
67 CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
68 Discovery(key string) (interface{}, error)
71 // ErrCancelled is the error returned when the container is cancelled.
72 var ErrCancelled = errors.New("Cancelled")
74 // IKeepClient is the minimal Keep API methods used by crunch-run.
75 type IKeepClient interface {
76 BlockRead(context.Context, arvados.BlockReadOptions) (int, error)
77 BlockWrite(context.Context, arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error)
78 ReadAt(locator string, p []byte, off int) (int, error)
79 LocalLocator(locator string) (string, error)
80 SetStorageClasses(sc []string)
83 type RunArvMount func(cmdline []string, tok string) (*exec.Cmd, error)
85 type MkTempDir func(string, string) (string, error)
87 type PsProcess interface {
88 CmdlineSlice() ([]string, error)
91 // ContainerRunner is the main stateful struct used for a single execution of a
93 type ContainerRunner struct {
94 executor containerExecutor
95 executorStdin io.Closer
96 executorStdout io.Closer
97 executorStderr io.Closer
99 // Dispatcher client is initialized with the Dispatcher token.
100 // This is a privileged token used to manage container status
103 // We have both dispatcherClient and DispatcherArvClient
104 // because there are two different incompatible Arvados Go
105 // SDKs and we have to use both (hopefully this gets fixed in
107 dispatcherClient *arvados.Client
108 DispatcherArvClient IArvadosClient
109 DispatcherKeepClient IKeepClient
111 // Container client is initialized with the Container token
112 // This token controls the permissions of the container, and
113 // must be used for operations such as reading collections.
115 // Same comment as above applies to
116 // containerClient/ContainerArvClient.
117 containerClient *arvados.Client
118 ContainerArvClient IArvadosClient
119 ContainerKeepClient IKeepClient
121 Container arvados.Container
127 LogCollection arvados.CollectionFileSystem
129 RunArvMount RunArvMount
134 Volumes map[string]struct{}
136 SigChan chan os.Signal
137 ArvMountExit chan error
138 SecretMounts map[string]arvados.Mount
139 MkArvClient func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error)
142 costStartTime time.Time
145 keepstoreLogger io.WriteCloser
146 keepstoreLogbuf *bufThenWrite
147 statLogger io.WriteCloser
148 statReporter *crunchstat.Reporter
149 hoststatLogger io.WriteCloser
150 hoststatReporter *crunchstat.Reporter
151 statInterval time.Duration
152 // What we tell docker to use as the container's cgroup
154 setCgroupParent string
155 // Fake root dir where crunchstat.Reporter should read OS
156 // files, for testing.
157 crunchstatFakeFS fs.FS
159 cStateLock sync.Mutex
160 cCancelled bool // StopContainer() invoked
162 enableMemoryLimit bool
163 enableNetwork string // one of "default" or "always"
164 networkMode string // "none", "host", or "" -- passed through to executor
165 brokenNodeHook string // script to run if node appears to be broken
166 arvMountLog io.WriteCloser
168 containerWatchdogInterval time.Duration
172 prices []cloud.InstancePrice
173 pricesLock sync.Mutex
176 // setupSignals sets up signal handling to gracefully terminate the
177 // underlying container and update state when receiving a TERM, INT or
179 func (runner *ContainerRunner) setupSignals() {
180 runner.SigChan = make(chan os.Signal, 1)
181 signal.Notify(runner.SigChan, syscall.SIGTERM)
182 signal.Notify(runner.SigChan, syscall.SIGINT)
183 signal.Notify(runner.SigChan, syscall.SIGQUIT)
185 go func(sig chan os.Signal) {
192 // stop the underlying container.
193 func (runner *ContainerRunner) stop(sig os.Signal) {
194 runner.cStateLock.Lock()
195 defer runner.cStateLock.Unlock()
197 runner.CrunchLog.Printf("caught signal: %v", sig)
199 runner.cCancelled = true
200 runner.CrunchLog.Printf("stopping container")
201 err := runner.executor.Stop()
203 runner.CrunchLog.Printf("error stopping container: %s", err)
207 var errorBlacklist = []string{
208 "(?ms).*[Cc]annot connect to the Docker daemon.*",
209 "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
210 "(?ms).*grpc: the connection is unavailable.*",
211 "(?ms).*no space left on device.*",
214 func (runner *ContainerRunner) runBrokenNodeHook() {
215 if runner.brokenNodeHook == "" {
216 path := filepath.Join(lockdir, brokenfile)
217 runner.CrunchLog.Printf("Writing %s to mark node as broken", path)
218 f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0700)
220 runner.CrunchLog.Printf("Error writing %s: %s", path, err)
225 runner.CrunchLog.Printf("Running broken node hook %q", runner.brokenNodeHook)
227 c := exec.Command(runner.brokenNodeHook)
228 c.Stdout = runner.CrunchLog
229 c.Stderr = runner.CrunchLog
232 runner.CrunchLog.Printf("Error running broken node hook: %v", err)
237 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
238 for _, d := range errorBlacklist {
239 if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
240 runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
241 runner.runBrokenNodeHook()
248 // LoadImage determines the docker image id from the container record and
249 // checks if it is available in the local Docker image store. If not, it loads
250 // the image from Keep.
251 func (runner *ContainerRunner) LoadImage() (string, error) {
252 runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
254 d, err := os.Open(runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage)
259 allfiles, err := d.Readdirnames(-1)
263 var tarfiles []string
264 for _, fnm := range allfiles {
265 if strings.HasSuffix(fnm, ".tar") {
266 tarfiles = append(tarfiles, fnm)
269 if len(tarfiles) == 0 {
270 return "", fmt.Errorf("image collection does not include a .tar image file")
272 if len(tarfiles) > 1 {
273 return "", fmt.Errorf("cannot choose from multiple tar files in image collection: %v", tarfiles)
275 imageID := tarfiles[0][:len(tarfiles[0])-4]
276 imageTarballPath := runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage + "/" + imageID + ".tar"
277 runner.CrunchLog.Printf("Using Docker image id %q", imageID)
279 runner.CrunchLog.Print("Loading Docker image from keep")
280 err = runner.executor.LoadImage(imageID, imageTarballPath, runner.Container, runner.ArvMountPoint,
281 runner.containerClient)
289 func (runner *ContainerRunner) ArvMountCmd(cmdline []string, token string) (c *exec.Cmd, err error) {
290 c = exec.Command(cmdline[0], cmdline[1:]...)
292 // Copy our environment, but override ARVADOS_API_TOKEN with
293 // the container auth token.
295 for _, s := range os.Environ() {
296 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
297 c.Env = append(c.Env, s)
300 c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
302 runner.arvMountLog, err = runner.openLogFile("arv-mount")
306 scanner := logScanner{
309 "Block not found error",
310 "Unhandled exception during FUSE operation",
312 ReportFunc: func(pattern, text string) {
313 runner.updateRuntimeStatus(arvadosclient.Dict{
314 "warning": "arv-mount: " + pattern,
315 "warningDetail": text,
319 c.Stdout = newTimestamper(io.MultiWriter(runner.arvMountLog, os.Stderr))
320 c.Stderr = io.MultiWriter(&scanner, newTimestamper(io.MultiWriter(runner.arvMountLog, os.Stderr)))
322 runner.CrunchLog.Printf("Running %v", c.Args)
329 statReadme := make(chan bool)
330 runner.ArvMountExit = make(chan error)
335 time.Sleep(100 * time.Millisecond)
336 _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
348 runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
350 runner.ArvMountExit <- mnterr
351 close(runner.ArvMountExit)
357 case err := <-runner.ArvMountExit:
358 runner.ArvMount = nil
366 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
367 if runner.ArvMountPoint == "" {
368 runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)
373 func copyfile(src string, dst string) (err error) {
374 srcfile, err := os.Open(src)
379 os.MkdirAll(path.Dir(dst), 0777)
381 dstfile, err := os.Create(dst)
385 _, err = io.Copy(dstfile, srcfile)
390 err = srcfile.Close()
391 err2 := dstfile.Close()
404 func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
405 bindmounts := map[string]bindmount{}
406 err := runner.SetupArvMountPoint("keep")
408 return nil, fmt.Errorf("While creating keep mount temp dir: %v", err)
411 token, err := runner.ContainerToken()
413 return nil, fmt.Errorf("could not get container token: %s", err)
415 runner.CrunchLog.Printf("container token %q", token)
419 arvMountCmd := []string{
423 "--storage-classes", strings.Join(runner.Container.OutputStorageClasses, ","),
424 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
426 if _, isdocker := runner.executor.(*dockerExecutor); isdocker {
427 arvMountCmd = append(arvMountCmd, "--allow-other")
430 if runner.Container.RuntimeConstraints.KeepCacheDisk > 0 {
431 keepcachedir, err := runner.MkTempDir(runner.parentTemp, "keepcache")
433 return nil, fmt.Errorf("while creating keep cache temp dir: %v", err)
435 arvMountCmd = append(arvMountCmd, "--disk-cache", "--disk-cache-dir", keepcachedir, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheDisk))
436 } else if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
437 arvMountCmd = append(arvMountCmd, "--ram-cache", "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
440 collectionPaths := []string{}
441 needCertMount := true
442 type copyFile struct {
446 var copyFiles []copyFile
449 for bind := range runner.Container.Mounts {
450 binds = append(binds, bind)
452 for bind := range runner.SecretMounts {
453 if _, ok := runner.Container.Mounts[bind]; ok {
454 return nil, fmt.Errorf("secret mount %q conflicts with regular mount", bind)
456 if runner.SecretMounts[bind].Kind != "json" &&
457 runner.SecretMounts[bind].Kind != "text" {
458 return nil, fmt.Errorf("secret mount %q type is %q but only 'json' and 'text' are permitted",
459 bind, runner.SecretMounts[bind].Kind)
461 binds = append(binds, bind)
465 for _, bind := range binds {
466 mnt, notSecret := runner.Container.Mounts[bind]
468 mnt = runner.SecretMounts[bind]
470 if bind == "stdout" || bind == "stderr" {
471 // Is it a "file" mount kind?
472 if mnt.Kind != "file" {
473 return nil, fmt.Errorf("unsupported mount kind '%s' for %s: only 'file' is supported", mnt.Kind, bind)
476 // Does path start with OutputPath?
477 prefix := runner.Container.OutputPath
478 if !strings.HasSuffix(prefix, "/") {
481 if !strings.HasPrefix(mnt.Path, prefix) {
482 return nil, fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
487 // Is it a "collection" mount kind?
488 if mnt.Kind != "collection" && mnt.Kind != "json" {
489 return nil, fmt.Errorf("unsupported mount kind '%s' for stdin: only 'collection' and 'json' are supported", mnt.Kind)
493 if bind == arvadosCertPath {
494 needCertMount = false
497 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
498 if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
499 return nil, fmt.Errorf("only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
504 case mnt.Kind == "collection" && bind != "stdin":
506 if mnt.UUID != "" && mnt.PortableDataHash != "" {
507 return nil, fmt.Errorf("cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
511 return nil, fmt.Errorf("writing to existing collections currently not permitted")
514 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
515 } else if mnt.PortableDataHash != "" {
516 if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
517 return nil, fmt.Errorf("can never write to a collection specified by portable data hash")
519 idx := strings.Index(mnt.PortableDataHash, "/")
521 mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
522 mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
523 runner.Container.Mounts[bind] = mnt
525 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
526 if mnt.Path != "" && mnt.Path != "." {
527 if strings.HasPrefix(mnt.Path, "./") {
528 mnt.Path = mnt.Path[2:]
529 } else if strings.HasPrefix(mnt.Path, "/") {
530 mnt.Path = mnt.Path[1:]
532 src += "/" + mnt.Path
535 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
536 arvMountCmd = append(arvMountCmd, "--mount-tmp", fmt.Sprintf("tmp%d", tmpcount))
540 if bind == runner.Container.OutputPath {
541 runner.HostOutputDir = src
542 bindmounts[bind] = bindmount{HostPath: src}
543 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
544 copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
546 bindmounts[bind] = bindmount{HostPath: src}
549 bindmounts[bind] = bindmount{HostPath: src, ReadOnly: true}
551 collectionPaths = append(collectionPaths, src)
553 case mnt.Kind == "tmp":
555 tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
557 return nil, fmt.Errorf("while creating mount temp dir: %v", err)
559 st, staterr := os.Stat(tmpdir)
561 return nil, fmt.Errorf("while Stat on temp dir: %v", staterr)
563 err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
565 return nil, fmt.Errorf("while Chmod temp dir: %v", err)
567 bindmounts[bind] = bindmount{HostPath: tmpdir}
568 if bind == runner.Container.OutputPath {
569 runner.HostOutputDir = tmpdir
572 case mnt.Kind == "json" || mnt.Kind == "text":
574 if mnt.Kind == "json" {
575 filedata, err = json.Marshal(mnt.Content)
577 return nil, fmt.Errorf("encoding json data: %v", err)
580 text, ok := mnt.Content.(string)
582 return nil, fmt.Errorf("content for mount %q must be a string", bind)
584 filedata = []byte(text)
587 tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
589 return nil, fmt.Errorf("creating temp dir: %v", err)
591 tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
592 err = ioutil.WriteFile(tmpfn, filedata, 0444)
594 return nil, fmt.Errorf("writing temp file: %v", err)
596 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && (notSecret || runner.Container.Mounts[runner.Container.OutputPath].Kind != "collection") {
597 // In most cases, if the container
598 // specifies a literal file inside the
599 // output path, we copy it into the
600 // output directory (either a mounted
601 // collection or a staging area on the
602 // host fs). If it's a secret, it will
603 // be skipped when copying output from
604 // staging to Keep later.
605 copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
607 // If a secret is outside OutputPath,
608 // we bind mount the secret file
609 // directly just like other mounts. We
610 // also use this strategy when a
611 // secret is inside OutputPath but
612 // OutputPath is a live collection, to
613 // avoid writing the secret to
614 // Keep. Attempting to remove a
615 // bind-mounted secret file from
616 // inside the container will return a
617 // "Device or resource busy" error
618 // that might not be handled well by
619 // the container, which is why we
620 // don't use this strategy when
621 // OutputPath is a staging directory.
622 bindmounts[bind] = bindmount{HostPath: tmpfn, ReadOnly: true}
627 if runner.HostOutputDir == "" {
628 return nil, fmt.Errorf("output path does not correspond to a writable mount point")
631 if needCertMount && runner.Container.RuntimeConstraints.API {
632 for _, certfile := range []string{
633 // Populated by caller, or sdk/go/arvados init(), or test suite:
634 os.Getenv("SSL_CERT_FILE"),
635 // Copied from Go 1.21 stdlib (src/crypto/x509/root_linux.go):
636 "/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
637 "/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL 6
638 "/etc/ssl/ca-bundle.pem", // OpenSUSE
639 "/etc/pki/tls/cacert.pem", // OpenELEC
640 "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7
641 "/etc/ssl/cert.pem", // Alpine Linux
643 if _, err := os.Stat(certfile); err == nil {
644 bindmounts[arvadosCertPath] = bindmount{HostPath: certfile, ReadOnly: true}
651 // If we are only mounting collections by pdh, make
652 // sure we don't subscribe to websocket events to
653 // avoid putting undesired load on the API server
654 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id", "--disable-event-listening")
656 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
658 // the by_uuid mount point is used by singularity when writing
659 // out docker images converted to SIF
660 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_uuid")
661 arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
663 runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
665 return nil, fmt.Errorf("while trying to start arv-mount: %v", err)
667 if runner.hoststatReporter != nil && runner.ArvMount != nil {
668 runner.hoststatReporter.ReportPID("arv-mount", runner.ArvMount.Process.Pid)
671 for _, p := range collectionPaths {
674 return nil, fmt.Errorf("while checking that input files exist: %v", err)
678 for _, cp := range copyFiles {
679 st, err := os.Stat(cp.src)
681 return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
684 err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
688 target := path.Join(cp.bind, walkpath[len(cp.src):])
689 if walkinfo.Mode().IsRegular() {
690 copyerr := copyfile(walkpath, target)
694 return os.Chmod(target, walkinfo.Mode()|0777)
695 } else if walkinfo.Mode().IsDir() {
696 mkerr := os.MkdirAll(target, 0777)
700 return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
702 return fmt.Errorf("source %q is not a regular file or directory", cp.src)
705 } else if st.Mode().IsRegular() {
706 err = copyfile(cp.src, cp.bind)
708 err = os.Chmod(cp.bind, st.Mode()|0777)
712 return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
716 return bindmounts, nil
719 func (runner *ContainerRunner) stopHoststat() error {
720 if runner.hoststatReporter == nil {
723 runner.hoststatReporter.Stop()
724 runner.hoststatReporter.LogProcessMemMax(runner.CrunchLog)
725 err := runner.hoststatLogger.Close()
727 return fmt.Errorf("error closing hoststat logs: %v", err)
732 func (runner *ContainerRunner) startHoststat() error {
734 runner.hoststatLogger, err = runner.openLogFile("hoststat")
738 runner.hoststatReporter = &crunchstat.Reporter{
739 Logger: newLogWriter(newTimestamper(runner.hoststatLogger)),
740 // Our own cgroup is the "host" cgroup, in the sense
741 // that it accounts for resource usage outside the
742 // container. It doesn't count _all_ resource usage on
745 // TODO?: Use the furthest ancestor of our own cgroup
746 // that has stats available. (Currently crunchstat
747 // does not have that capability.)
749 PollPeriod: runner.statInterval,
751 runner.hoststatReporter.Start()
752 runner.hoststatReporter.ReportPID("crunch-run", os.Getpid())
756 func (runner *ContainerRunner) startCrunchstat() error {
758 runner.statLogger, err = runner.openLogFile("crunchstat")
762 runner.statReporter = &crunchstat.Reporter{
763 Pid: runner.executor.Pid,
764 FS: runner.crunchstatFakeFS,
765 Logger: newLogWriter(newTimestamper(runner.statLogger)),
766 MemThresholds: map[string][]crunchstat.Threshold{
767 "rss": crunchstat.NewThresholdsFromPercentages(runner.Container.RuntimeConstraints.RAM, []int64{90, 95, 99}),
769 PollPeriod: runner.statInterval,
770 TempDir: runner.parentTemp,
771 ThresholdLogger: runner.CrunchLog,
773 runner.statReporter.Start()
777 type infoCommand struct {
782 // LogHostInfo logs info about the current host, for debugging and
783 // accounting purposes. Although it's logged as "node-info", this is
784 // about the environment where crunch-run is actually running, which
785 // might differ from what's described in the node record (see
787 func (runner *ContainerRunner) LogHostInfo() (err error) {
788 w, err := runner.openLogFile("node-info")
793 commands := []infoCommand{
795 label: "Host Information",
796 cmd: []string{"uname", "-a"},
799 label: "CPU Information",
800 cmd: []string{"cat", "/proc/cpuinfo"},
803 label: "Memory Information",
804 cmd: []string{"cat", "/proc/meminfo"},
808 cmd: []string{"df", "-m", "/", os.TempDir()},
811 label: "Disk INodes",
812 cmd: []string{"df", "-i", "/", os.TempDir()},
816 // Run commands with informational output to be logged.
817 for _, command := range commands {
818 fmt.Fprintln(w, command.label)
819 cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
822 if err := cmd.Run(); err != nil {
823 err = fmt.Errorf("While running command %q: %v", command.cmd, err)
832 return fmt.Errorf("While closing node-info logs: %v", err)
837 // LogContainerRecord gets and saves the raw JSON container record from the API server
838 func (runner *ContainerRunner) LogContainerRecord() error {
839 logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}})
840 if !logged && err == nil {
841 err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
846 // LogNodeRecord logs the current host's InstanceType config entry, if
847 // running via arvados-dispatch-cloud.
848 func (runner *ContainerRunner) LogNodeRecord() error {
849 it := os.Getenv("InstanceType")
851 // Not dispatched by arvados-dispatch-cloud.
854 // Save InstanceType config fragment received from dispatcher
856 w, err := runner.LogCollection.OpenFile("node.json", os.O_CREATE|os.O_WRONLY, 0666)
861 _, err = io.WriteString(w, it)
868 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}) (logged bool, err error) {
869 writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
873 reader, err := runner.DispatcherArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
875 return false, fmt.Errorf("error getting %s record: %v", label, err)
879 dec := json.NewDecoder(reader)
881 var resp map[string]interface{}
882 if err = dec.Decode(&resp); err != nil {
883 return false, fmt.Errorf("error decoding %s list response: %v", label, err)
885 items, ok := resp["items"].([]interface{})
887 return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
888 } else if len(items) < 1 {
891 // Re-encode it using indentation to improve readability
892 enc := json.NewEncoder(writer)
893 enc.SetIndent("", " ")
894 if err = enc.Encode(items[0]); err != nil {
895 return false, fmt.Errorf("error logging %s record: %v", label, err)
899 return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
904 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
905 stdoutPath := mntPath[len(runner.Container.OutputPath):]
906 index := strings.LastIndex(stdoutPath, "/")
908 subdirs := stdoutPath[:index]
910 st, err := os.Stat(runner.HostOutputDir)
912 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
914 stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
915 err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
917 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
921 stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
923 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
926 return stdoutFile, nil
929 // CreateContainer creates the docker container.
930 func (runner *ContainerRunner) CreateContainer(imageID string, bindmounts map[string]bindmount) error {
932 if mnt, ok := runner.Container.Mounts["stdin"]; ok {
939 collID = mnt.PortableDataHash
941 path := runner.ArvMountPoint + "/by_id/" + collID + "/" + mnt.Path
942 f, err := os.Open(path)
947 runner.executorStdin = f
949 j, err := json.Marshal(mnt.Content)
951 return fmt.Errorf("error encoding stdin json data: %v", err)
953 stdin = bytes.NewReader(j)
954 runner.executorStdin = io.NopCloser(nil)
956 return fmt.Errorf("stdin mount has unsupported kind %q", mnt.Kind)
959 stdin = bytes.NewReader(nil)
960 runner.executorStdin = ioutil.NopCloser(nil)
963 var stdout, stderr io.Writer
964 if mnt, ok := runner.Container.Mounts["stdout"]; ok {
965 f, err := runner.getStdoutFile(mnt.Path)
970 runner.executorStdout = f
971 } else if w, err := runner.openLogFile("stdout"); err != nil {
974 stdout = newTimestamper(w)
975 runner.executorStdout = w
978 if mnt, ok := runner.Container.Mounts["stderr"]; ok {
979 f, err := runner.getStdoutFile(mnt.Path)
984 runner.executorStderr = f
985 } else if w, err := runner.openLogFile("stderr"); err != nil {
988 stderr = newTimestamper(w)
989 runner.executorStderr = w
992 env := runner.Container.Environment
993 enableNetwork := runner.enableNetwork == "always"
994 if runner.Container.RuntimeConstraints.API {
996 tok, err := runner.ContainerToken()
1000 env = map[string]string{}
1001 for k, v := range runner.Container.Environment {
1004 env["ARVADOS_API_TOKEN"] = tok
1005 env["ARVADOS_API_HOST"] = os.Getenv("ARVADOS_API_HOST")
1006 env["ARVADOS_API_HOST_INSECURE"] = os.Getenv("ARVADOS_API_HOST_INSECURE")
1007 env["ARVADOS_KEEP_SERVICES"] = os.Getenv("ARVADOS_KEEP_SERVICES")
1009 workdir := runner.Container.Cwd
1011 // both "" and "." mean default
1014 ram := runner.Container.RuntimeConstraints.RAM
1015 if !runner.enableMemoryLimit {
1019 if runner.Container.RuntimeConstraints.GPU.Stack == "cuda" {
1020 nvidiaModprobe(runner.CrunchLog)
1023 return runner.executor.Create(containerSpec{
1025 VCPUs: runner.Container.RuntimeConstraints.VCPUs,
1027 WorkingDir: workdir,
1029 BindMounts: bindmounts,
1030 Command: runner.Container.Command,
1031 EnableNetwork: enableNetwork,
1032 GPUStack: runner.Container.RuntimeConstraints.GPU.Stack,
1033 GPUDeviceCount: runner.Container.RuntimeConstraints.GPU.DeviceCount,
1034 NetworkMode: runner.networkMode,
1035 CgroupParent: runner.setCgroupParent,
1042 // StartContainer starts the docker container created by CreateContainer.
1043 func (runner *ContainerRunner) StartContainer() error {
1044 runner.CrunchLog.Printf("Starting container")
1045 runner.cStateLock.Lock()
1046 defer runner.cStateLock.Unlock()
1047 if runner.cCancelled {
1050 err := runner.executor.Start()
1053 if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
1054 advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
1056 return fmt.Errorf("could not start container: %v%s", err, advice)
1061 // WaitFinish waits for the container to terminate, capture the exit code, and
1062 // close the stdout/stderr logging.
1063 func (runner *ContainerRunner) WaitFinish() error {
1064 runner.CrunchLog.Print("Waiting for container to finish")
1065 var timeout <-chan time.Time
1066 if s := runner.Container.SchedulingParameters.MaxRunTime; s > 0 {
1067 timeout = time.After(time.Duration(s) * time.Second)
1069 ctx, cancel := context.WithCancel(context.Background())
1074 runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
1076 case <-runner.ArvMountExit:
1077 runner.CrunchLog.Printf("arv-mount exited while container is still running. Stopping container.")
1082 exitcode, err := runner.executor.Wait(ctx)
1084 runner.checkBrokenNode(err)
1087 runner.ExitCode = &exitcode
1090 if exitcode&0x80 != 0 {
1091 // Convert raw exit status (0x80 + signal number) to a
1092 // string to log after the code, like " (signal 101)"
1093 // or " (signal 9, killed)"
1094 sig := syscall.WaitStatus(exitcode).Signal()
1095 if name := unix.SignalName(sig); name != "" {
1096 extra = fmt.Sprintf(" (signal %d, %s)", sig, name)
1098 extra = fmt.Sprintf(" (signal %d)", sig)
1101 runner.CrunchLog.Printf("Container exited with status code %d%s", exitcode, extra)
1102 err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1103 "select": []string{"uuid"},
1104 "container": arvadosclient.Dict{"exit_code": exitcode},
1107 runner.CrunchLog.Printf("ignoring error updating exit_code: %s", err)
1111 if err = runner.executorStdin.Close(); err != nil {
1112 err = fmt.Errorf("error closing container stdin: %s", err)
1113 runner.CrunchLog.Printf("%s", err)
1116 if err = runner.executorStdout.Close(); err != nil {
1117 err = fmt.Errorf("error closing container stdout: %s", err)
1118 runner.CrunchLog.Printf("%s", err)
1119 if returnErr == nil {
1123 if err = runner.executorStderr.Close(); err != nil {
1124 err = fmt.Errorf("error closing container stderr: %s", err)
1125 runner.CrunchLog.Printf("%s", err)
1126 if returnErr == nil {
1131 if runner.statReporter != nil {
1132 runner.statReporter.Stop()
1133 runner.statReporter.LogMaxima(runner.CrunchLog, map[string]int64{
1134 "rss": runner.Container.RuntimeConstraints.RAM,
1136 err = runner.statLogger.Close()
1138 runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
1144 func (runner *ContainerRunner) updateLogs() {
1145 ticker := time.NewTicker(crunchLogUpdatePeriod / 360)
1148 sigusr1 := make(chan os.Signal, 1)
1149 signal.Notify(sigusr1, syscall.SIGUSR1)
1150 defer signal.Stop(sigusr1)
1152 saveAtTime := time.Now().Add(crunchLogUpdatePeriod)
1153 saveAtSize := crunchLogUpdateSize
1159 saveAtTime = time.Now()
1161 runner.logMtx.Lock()
1162 done := runner.LogsPDH != nil
1163 runner.logMtx.Unlock()
1167 size := runner.LogCollection.Size()
1168 if size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) {
1171 saveAtTime = time.Now().Add(crunchLogUpdatePeriod)
1172 saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize
1173 saved, err := runner.saveLogCollection(false)
1175 runner.CrunchLog.Printf("error updating log collection: %s", err)
1179 err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1180 "select": []string{"uuid"},
1181 "container": arvadosclient.Dict{
1182 "log": saved.PortableDataHash,
1186 runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
1194 var spotInterruptionCheckInterval = 5 * time.Second
1195 var ec2MetadataBaseURL = "http://169.254.169.254"
1197 const ec2TokenTTL = time.Second * 21600
1199 func (runner *ContainerRunner) checkSpotInterruptionNotices() {
1200 type ec2metadata struct {
1201 Action string `json:"action"`
1202 Time time.Time `json:"time"`
1204 runner.CrunchLog.Printf("Checking for spot instance interruptions every %v using instance metadata at %s", spotInterruptionCheckInterval, ec2MetadataBaseURL)
1205 var metadata ec2metadata
1207 var tokenExp time.Time
1208 check := func() error {
1209 ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))
1211 if token == "" || tokenExp.Sub(time.Now()) < time.Minute {
1212 req, err := http.NewRequestWithContext(ctx, http.MethodPut, ec2MetadataBaseURL+"/latest/api/token", nil)
1216 req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", fmt.Sprintf("%d", int(ec2TokenTTL/time.Second)))
1217 resp, err := http.DefaultClient.Do(req)
1221 defer resp.Body.Close()
1222 if resp.StatusCode != http.StatusOK {
1223 return fmt.Errorf("%s", resp.Status)
1225 newtoken, err := ioutil.ReadAll(resp.Body)
1229 token = strings.TrimSpace(string(newtoken))
1230 tokenExp = time.Now().Add(ec2TokenTTL)
1232 req, err := http.NewRequestWithContext(ctx, http.MethodGet, ec2MetadataBaseURL+"/latest/meta-data/spot/instance-action", nil)
1236 req.Header.Set("X-aws-ec2-metadata-token", token)
1237 resp, err := http.DefaultClient.Do(req)
1241 defer resp.Body.Close()
1242 switch resp.StatusCode {
1245 case http.StatusNotFound:
1246 // "If Amazon EC2 is not preparing to stop or
1247 // terminate the instance, or if you
1248 // terminated the instance yourself,
1249 // instance-action is not present in the
1250 // instance metadata and you receive an HTTP
1251 // 404 error when you try to retrieve it."
1252 metadata = ec2metadata{}
1254 case http.StatusUnauthorized:
1256 return fmt.Errorf("%s", resp.Status)
1258 return fmt.Errorf("%s", resp.Status)
1260 nextmetadata := ec2metadata{}
1261 err = json.NewDecoder(resp.Body).Decode(&nextmetadata)
1265 metadata = nextmetadata
1269 var lastmetadata ec2metadata
1270 for range time.NewTicker(spotInterruptionCheckInterval).C {
1273 message := fmt.Sprintf("Spot instance interruption check was inconclusive: %s", err)
1274 if failures++; failures > 5 {
1275 runner.CrunchLog.Printf("%s -- now giving up after too many consecutive errors", message)
1278 runner.CrunchLog.Printf("%s -- will retry in %v", message, spotInterruptionCheckInterval)
1283 if metadata.Action != "" && metadata != lastmetadata {
1284 lastmetadata = metadata
1285 text := fmt.Sprintf("Cloud provider scheduled instance %s at %s", metadata.Action, metadata.Time.UTC().Format(time.RFC3339))
1286 runner.CrunchLog.Printf("%s", text)
1287 runner.updateRuntimeStatus(arvadosclient.Dict{
1288 "warning": "preemption notice",
1289 "warningDetail": text,
1290 "preemptionNotice": text,
1292 if proc, err := os.FindProcess(os.Getpid()); err == nil {
1293 // trigger updateLogs
1294 proc.Signal(syscall.SIGUSR1)
1300 func (runner *ContainerRunner) updateRuntimeStatus(status arvadosclient.Dict) {
1301 err := runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1302 "select": []string{"uuid"},
1303 "container": arvadosclient.Dict{
1304 "runtime_status": status,
1308 runner.CrunchLog.Printf("error updating container runtime_status: %s", err)
1312 // CaptureOutput saves data from the container's output directory if
1313 // needed, and updates the container output accordingly.
1314 func (runner *ContainerRunner) CaptureOutput(bindmounts map[string]bindmount) error {
1315 if runner.Container.RuntimeConstraints.API {
1316 // Output may have been set directly by the container, so
1317 // refresh the container record to check.
1318 err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID,
1320 "select": []string{"output"},
1321 }, &runner.Container)
1325 if runner.Container.Output != "" {
1326 // Container output is already set.
1327 runner.OutputPDH = &runner.Container.Output
1332 txt, err := (&copier{
1333 client: runner.containerClient,
1334 keepClient: runner.ContainerKeepClient,
1335 hostOutputDir: runner.HostOutputDir,
1336 ctrOutputDir: runner.Container.OutputPath,
1337 globs: runner.Container.OutputGlob,
1338 bindmounts: bindmounts,
1339 mounts: runner.Container.Mounts,
1340 secretMounts: runner.SecretMounts,
1341 logger: runner.CrunchLog,
1346 if n := len(regexp.MustCompile(` [0-9a-f]+\+\S*\+R`).FindAllStringIndex(txt, -1)); n > 0 {
1347 runner.CrunchLog.Printf("Copying %d data blocks from remote input collections...", n)
1348 fs, err := (&arvados.Collection{ManifestText: txt}).FileSystem(runner.containerClient, runner.ContainerKeepClient)
1352 txt, err = fs.MarshalManifest(".")
1357 var resp arvados.Collection
1358 err = runner.ContainerArvClient.Create("collections", arvadosclient.Dict{
1359 "ensure_unique_name": true,
1360 "select": []string{"portable_data_hash"},
1361 "collection": arvadosclient.Dict{
1363 "name": "output for " + runner.Container.UUID,
1364 "manifest_text": txt,
1368 return fmt.Errorf("error creating output collection: %v", err)
1370 runner.OutputPDH = &resp.PortableDataHash
1374 func (runner *ContainerRunner) CleanupDirs() {
1375 if runner.ArvMount != nil {
1377 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1378 umount.Stdout = runner.CrunchLog
1379 umount.Stderr = runner.CrunchLog
1380 runner.CrunchLog.Printf("Running %v", umount.Args)
1381 umnterr := umount.Start()
1384 runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1385 runner.ArvMount.Process.Kill()
1387 // If arv-mount --unmount gets stuck for any reason, we
1388 // don't want to wait for it forever. Do Wait() in a goroutine
1389 // so it doesn't block crunch-run.
1390 umountExit := make(chan error)
1392 mnterr := umount.Wait()
1394 runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1396 umountExit <- mnterr
1399 for again := true; again; {
1405 case <-runner.ArvMountExit:
1407 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1408 runner.CrunchLog.Printf("Timed out waiting for unmount")
1410 umount.Process.Kill()
1412 runner.ArvMount.Process.Kill()
1416 runner.ArvMount = nil
1419 if runner.ArvMountPoint != "" {
1420 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1421 runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1423 runner.ArvMountPoint = ""
1426 if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {
1427 runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr)
1431 // CommitLogs posts the collection containing the final container logs.
1432 func (runner *ContainerRunner) CommitLogs() error {
1434 // Hold cStateLock to prevent races on CrunchLog (e.g., stop()).
1435 runner.cStateLock.Lock()
1436 defer runner.cStateLock.Unlock()
1438 runner.CrunchLog.Print(runner.finalState)
1440 if runner.arvMountLog != nil {
1441 runner.arvMountLog.Close()
1444 // From now on just log to stderr, in case there are
1445 // any other further errors (such as failing to write
1446 // the log to Keep!) while shutting down
1447 runner.CrunchLog = newLogWriter(newTimestamper(newStringPrefixer(os.Stderr, runner.Container.UUID+" ")))
1450 if runner.keepstoreLogger != nil {
1451 // Flush any buffered logs from our local keepstore
1452 // process. Discard anything logged after this point
1453 // -- it won't end up in the log collection, so
1454 // there's no point writing it to the collectionfs.
1455 runner.keepstoreLogbuf.SetWriter(io.Discard)
1456 runner.keepstoreLogger.Close()
1457 runner.keepstoreLogger = nil
1460 if runner.LogsPDH != nil {
1461 // If we have already assigned something to LogsPDH,
1462 // we must be closing the re-opened log, which won't
1463 // end up getting attached to the container record and
1464 // therefore doesn't need to be saved as a collection
1465 // -- it exists only to send logs to other channels.
1469 saved, err := runner.saveLogCollection(true)
1471 return fmt.Errorf("error saving log collection: %s", err)
1473 runner.logMtx.Lock()
1474 defer runner.logMtx.Unlock()
1475 runner.LogsPDH = &saved.PortableDataHash
1479 // Create/update the log collection. Return value has UUID and
1480 // PortableDataHash fields populated, but others may be blank.
1481 func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
1482 runner.logMtx.Lock()
1483 defer runner.logMtx.Unlock()
1484 if runner.LogsPDH != nil {
1485 // Already finalized.
1488 updates := arvadosclient.Dict{
1489 "name": "logs for " + runner.Container.UUID,
1491 mt, err1 := runner.LogCollection.MarshalManifest(".")
1493 // Only send updated manifest text if there was no
1495 updates["manifest_text"] = mt
1498 // Even if flushing the manifest had an error, we still want
1499 // to update the log record, if possible, to push the trash_at
1500 // and delete_at times into the future. Details on bug
1503 updates["is_trashed"] = true
1505 // We set trash_at so this collection gets
1506 // automatically cleaned up eventually. It used to be
1507 // 12 hours but we had a situation where the API
1508 // server was down over a weekend but the containers
1509 // kept running such that the log collection got
1510 // trashed, so now we make it 2 weeks. refs #20378
1511 exp := time.Now().Add(time.Duration(24*14) * time.Hour)
1512 updates["trash_at"] = exp
1513 updates["delete_at"] = exp
1515 reqBody := arvadosclient.Dict{
1516 "select": []string{"uuid", "portable_data_hash"},
1517 "collection": updates,
1520 if runner.logUUID == "" {
1521 reqBody["ensure_unique_name"] = true
1522 err2 = runner.DispatcherArvClient.Create("collections", reqBody, &response)
1524 err2 = runner.DispatcherArvClient.Update("collections", runner.logUUID, reqBody, &response)
1527 runner.logUUID = response.UUID
1530 if err1 != nil || err2 != nil {
1531 err = fmt.Errorf("error recording logs: %q, %q", err1, err2)
1536 // UpdateContainerRunning updates the container state to "Running"
1537 func (runner *ContainerRunner) UpdateContainerRunning(logId string) error {
1538 runner.cStateLock.Lock()
1539 defer runner.cStateLock.Unlock()
1540 if runner.cCancelled {
1543 updates := arvadosclient.Dict{
1544 "gateway_address": runner.gateway.Address,
1548 updates["log"] = logId
1550 return runner.DispatcherArvClient.Update(
1552 runner.Container.UUID,
1554 "select": []string{"uuid"},
1555 "container": updates,
1561 // ContainerToken returns the api_token the container (and any
1562 // arv-mount processes) are allowed to use.
1563 func (runner *ContainerRunner) ContainerToken() (string, error) {
1564 if runner.token != "" {
1565 return runner.token, nil
1568 var auth arvados.APIClientAuthorization
1569 err := runner.DispatcherArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1573 runner.token = fmt.Sprintf("v2/%s/%s/%s", auth.UUID, auth.APIToken, runner.Container.UUID)
1574 return runner.token, nil
1577 // UpdateContainerFinal updates the container record state on API
1578 // server to "Complete" or "Cancelled"
1579 func (runner *ContainerRunner) UpdateContainerFinal() error {
1580 update := arvadosclient.Dict{}
1581 update["state"] = runner.finalState
1582 if runner.LogsPDH != nil {
1583 update["log"] = *runner.LogsPDH
1585 if runner.ExitCode != nil {
1586 update["exit_code"] = *runner.ExitCode
1588 update["exit_code"] = nil
1590 if runner.finalState == "Complete" && runner.OutputPDH != nil {
1591 update["output"] = *runner.OutputPDH
1593 update["cost"] = runner.calculateCost(time.Now())
1594 return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1595 "select": []string{"uuid"},
1596 "container": update,
1600 // IsCancelled returns the value of Cancelled, with goroutine safety.
1601 func (runner *ContainerRunner) IsCancelled() bool {
1602 runner.cStateLock.Lock()
1603 defer runner.cStateLock.Unlock()
1604 return runner.cCancelled
1607 func (runner *ContainerRunner) openLogFile(name string) (io.WriteCloser, error) {
1608 return runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
1611 // Run the full container lifecycle.
1612 func (runner *ContainerRunner) Run() (err error) {
1613 runner.CrunchLog.Printf("crunch-run %s started", cmd.Version.String())
1614 runner.CrunchLog.Printf("%s", currentUserAndGroups())
1615 v, _ := exec.Command("arv-mount", "--version").CombinedOutput()
1616 runner.CrunchLog.Printf("Using FUSE mount: %s", v)
1617 runner.CrunchLog.Printf("Using container runtime: %s", runner.executor.Runtime())
1618 runner.CrunchLog.Printf("Executing container: %s", runner.Container.UUID)
1619 runner.costStartTime = time.Now()
1621 hostname, hosterr := os.Hostname()
1623 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1625 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1628 sigusr2 := make(chan os.Signal, 1)
1629 signal.Notify(sigusr2, syscall.SIGUSR2)
1630 defer signal.Stop(sigusr2)
1632 go runner.handleSIGUSR2(sigusr2)
1634 runner.finalState = "Queued"
1637 runner.CleanupDirs()
1638 runner.CrunchLog.Printf("crunch-run finished")
1641 err = runner.fetchContainerRecord()
1645 if runner.Container.State != "Locked" {
1646 return fmt.Errorf("dispatch error detected: container %q has state %q", runner.Container.UUID, runner.Container.State)
1649 var bindmounts map[string]bindmount
1651 // checkErr prints e (unless it's nil) and sets err to
1652 // e (unless err is already non-nil). Thus, if err
1653 // hasn't already been assigned when Run() returns,
1654 // this cleanup func will cause Run() to return the
1655 // first non-nil error that is passed to checkErr().
1656 checkErr := func(errorIn string, e error) {
1660 runner.CrunchLog.Printf("error in %s: %v", errorIn, e)
1664 if runner.finalState == "Complete" {
1665 // There was an error in the finalization.
1666 runner.finalState = "Cancelled"
1670 // Log the error encountered in Run(), if any
1671 checkErr("Run", err)
1673 if runner.finalState == "Queued" {
1674 runner.UpdateContainerFinal()
1678 if runner.IsCancelled() {
1679 runner.finalState = "Cancelled"
1680 // but don't return yet -- we still want to
1681 // capture partial output and write logs
1684 if bindmounts != nil {
1685 checkErr("CaptureOutput", runner.CaptureOutput(bindmounts))
1687 checkErr("stopHoststat", runner.stopHoststat())
1688 checkErr("CommitLogs", runner.CommitLogs())
1689 runner.CleanupDirs()
1690 checkErr("UpdateContainerFinal", runner.UpdateContainerFinal())
1693 runner.setupSignals()
1694 err = runner.startHoststat()
1698 if runner.keepstore != nil {
1699 runner.hoststatReporter.ReportPID("keepstore", runner.keepstore.Process.Pid)
1702 // set up FUSE mount and binds
1703 bindmounts, err = runner.SetupMounts()
1705 runner.finalState = "Cancelled"
1706 err = fmt.Errorf("While setting up mounts: %v", err)
1710 // check for and/or load image
1711 imageID, err := runner.LoadImage()
1713 if !runner.checkBrokenNode(err) {
1714 // Failed to load image but not due to a "broken node"
1715 // condition, probably user error.
1716 runner.finalState = "Cancelled"
1718 err = fmt.Errorf("failed to load container image: %v", err)
1722 err = runner.CreateContainer(imageID, bindmounts)
1726 err = runner.LogHostInfo()
1730 err = runner.LogNodeRecord()
1734 err = runner.LogContainerRecord()
1739 if runner.IsCancelled() {
1743 logCollection, err := runner.saveLogCollection(false)
1746 logId = logCollection.PortableDataHash
1748 runner.CrunchLog.Printf("Error committing initial log collection: %v", err)
1750 err = runner.UpdateContainerRunning(logId)
1754 runner.finalState = "Cancelled"
1756 err = runner.startCrunchstat()
1761 err = runner.StartContainer()
1763 runner.checkBrokenNode(err)
1767 err = runner.WaitFinish()
1768 if err == nil && !runner.IsCancelled() {
1769 runner.finalState = "Complete"
1774 // Fetch the current container record (uuid = runner.Container.UUID)
1775 // into runner.Container.
1776 func (runner *ContainerRunner) fetchContainerRecord() error {
1777 reader, err := runner.DispatcherArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1779 return fmt.Errorf("error fetching container record: %v", err)
1781 defer reader.Close()
1783 dec := json.NewDecoder(reader)
1785 err = dec.Decode(&runner.Container)
1787 return fmt.Errorf("error decoding container record: %v", err)
1791 SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
1794 containerToken, err := runner.ContainerToken()
1796 return fmt.Errorf("error getting container token: %v", err)
1799 runner.ContainerArvClient, runner.ContainerKeepClient,
1800 runner.containerClient, err = runner.MkArvClient(containerToken)
1802 return fmt.Errorf("error creating container API client: %v", err)
1805 runner.ContainerKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)
1806 runner.DispatcherKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)
1808 err = runner.ContainerArvClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
1810 if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
1811 return fmt.Errorf("error fetching secret_mounts: %v", err)
1813 // ok && apierr.HttpStatusCode == 404, which means
1814 // secret_mounts isn't supported by this API server.
1816 runner.SecretMounts = sm.SecretMounts
1821 // NewContainerRunner creates a new container runner.
1822 func NewContainerRunner(dispatcherClient *arvados.Client,
1823 dispatcherArvClient IArvadosClient,
1824 dispatcherKeepClient IKeepClient,
1825 containerUUID string) (*ContainerRunner, error) {
1827 cr := &ContainerRunner{
1828 dispatcherClient: dispatcherClient,
1829 DispatcherArvClient: dispatcherArvClient,
1830 DispatcherKeepClient: dispatcherKeepClient,
1832 cr.RunArvMount = cr.ArvMountCmd
1833 cr.MkTempDir = ioutil.TempDir
1834 cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
1835 cl, err := arvadosclient.MakeArvadosClient()
1837 return nil, nil, nil, err
1841 kc, err := keepclient.MakeKeepClient(cl)
1843 return nil, nil, nil, err
1846 c2 := arvados.NewClientFromEnv()
1847 c2.AuthToken = token
1848 return cl, kc, c2, nil
1851 cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.dispatcherClient, cr.DispatcherKeepClient)
1855 cr.Container.UUID = containerUUID
1856 f, err := cr.openLogFile("crunch-run")
1860 cr.CrunchLog = newLogWriter(newTimestamper(io.MultiWriter(f, newStringPrefixer(os.Stderr, cr.Container.UUID+" "))))
1867 func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
1868 log := log.New(stderr, "", 0)
1869 flags := flag.NewFlagSet(prog, flag.ContinueOnError)
1870 statInterval := flags.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1871 flags.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree (obsolete, ignored)")
1872 flags.String("cgroup-parent", "docker", "name of container's parent cgroup (obsolete, ignored)")
1873 cgroupParentSubsystem := flags.String("cgroup-parent-subsystem", "", "use current cgroup for given `subsystem` as parent cgroup for container (subsystem argument is only relevant for cgroups v1; in cgroups v2 / unified mode, any non-empty value means use current cgroup); if empty, use the docker daemon's default cgroup parent. See https://doc.arvados.org/install/crunch2-slurm/install-dispatch.html#CrunchRunCommand-cgroups")
1874 caCertsPath := flags.String("ca-certs", "", "Path to TLS root certificates")
1875 detach := flags.Bool("detach", false, "Detach from parent process and run in the background")
1876 stdinConfig := flags.Bool("stdin-config", false, "Load config and environment variables from JSON message on stdin")
1877 configFile := flags.String("config", arvados.DefaultConfigFile, "filename of cluster config file to try loading if -stdin-config=false (default is $ARVADOS_CONFIG)")
1878 sleep := flags.Duration("sleep", 0, "Delay before starting (testing use only)")
1879 kill := flags.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID")
1880 list := flags.Bool("list", false, "List UUIDs of existing crunch-run processes (and notify them to use price data passed on stdin)")
1881 enableMemoryLimit := flags.Bool("enable-memory-limit", true, "tell container runtime to limit container's memory usage")
1882 enableNetwork := flags.String("container-enable-networking", "default", "enable networking \"always\" (for all containers) or \"default\" (for containers that request it)")
1883 networkMode := flags.String("container-network-mode", "default", `Docker network mode for container (use any argument valid for docker --net)`)
1884 memprofile := flags.String("memprofile", "", "write memory profile to `file` after running container")
1885 runtimeEngine := flags.String("runtime-engine", "docker", "container runtime: docker or singularity")
1886 brokenNodeHook := flags.String("broken-node-hook", "", "script to run if node is detected to be broken (for example, Docker daemon is not running)")
1887 flags.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.")
1888 version := flags.Bool("version", false, "Write version information to stdout and exit 0.")
1890 ignoreDetachFlag := false
1891 if len(args) > 0 && args[0] == "-no-detach" {
1892 // This process was invoked by a parent process, which
1893 // has passed along its own arguments, including
1894 // -detach, after the leading -no-detach flag. Strip
1895 // the leading -no-detach flag (it's not recognized by
1896 // flags.Parse()) and ignore the -detach flag that
1899 ignoreDetachFlag = true
1902 if ok, code := cmd.ParseFlags(flags, prog, args, "container-uuid", stderr); !ok {
1904 } else if *version {
1905 fmt.Fprintln(stdout, prog, cmd.Version.String())
1907 } else if !*list && flags.NArg() != 1 {
1908 fmt.Fprintf(stderr, "missing required argument: container-uuid (try -help)\n")
1912 containerUUID := flags.Arg(0)
1915 case *detach && !ignoreDetachFlag:
1916 return Detach(containerUUID, prog, args, stdin, stdout, stderr)
1918 return KillProcess(containerUUID, syscall.Signal(*kill), stdout, stderr)
1920 return ListProcesses(stdin, stdout, stderr)
1923 if len(containerUUID) != 27 {
1924 log.Printf("usage: %s [options] UUID", prog)
1928 var keepstoreLogbuf bufThenWrite
1931 err := json.NewDecoder(stdin).Decode(&conf)
1933 log.Printf("decode stdin: %s", err)
1936 for k, v := range conf.Env {
1937 err = os.Setenv(k, v)
1939 log.Printf("setenv(%q): %s", k, err)
1943 if conf.Cluster != nil {
1944 // ClusterID is missing from the JSON
1945 // representation, but we need it to generate
1946 // a valid config file for keepstore, so we
1947 // fill it using the container UUID prefix.
1948 conf.Cluster.ClusterID = containerUUID[:5]
1951 conf = hpcConfData(containerUUID, *configFile, io.MultiWriter(&keepstoreLogbuf, stderr))
1954 log.Printf("crunch-run %s started", cmd.Version.String())
1957 if *caCertsPath != "" {
1958 os.Setenv("SSL_CERT_FILE", *caCertsPath)
1961 keepstore, err := startLocalKeepstore(conf, io.MultiWriter(&keepstoreLogbuf, stderr))
1966 if keepstore != nil {
1967 defer keepstore.Process.Kill()
1970 api, err := arvadosclient.MakeArvadosClient()
1972 log.Printf("%s: %v", containerUUID, err)
1975 // arvadosclient now interprets Retries=10 to mean
1976 // Timeout=10m, retrying with exponential backoff + jitter.
1979 kc, err := keepclient.MakeKeepClient(api)
1981 log.Printf("%s: %v", containerUUID, err)
1986 cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, containerUUID)
1992 cr.keepstore = keepstore
1993 if keepstore == nil {
1994 // Log explanation (if any) for why we're not running
1995 // a local keepstore.
1996 var buf bytes.Buffer
1997 keepstoreLogbuf.SetWriter(&buf)
1999 cr.CrunchLog.Printf("%s", strings.TrimSpace(buf.String()))
2001 } else if logWhat := conf.Cluster.Containers.LocalKeepLogsToContainerLog; logWhat == "none" {
2002 cr.CrunchLog.Printf("using local keepstore process (pid %d) at %s", keepstore.Process.Pid, os.Getenv("ARVADOS_KEEP_SERVICES"))
2003 keepstoreLogbuf.SetWriter(io.Discard)
2005 cr.CrunchLog.Printf("using local keepstore process (pid %d) at %s, writing logs to keepstore.txt in log collection", keepstore.Process.Pid, os.Getenv("ARVADOS_KEEP_SERVICES"))
2006 cr.keepstoreLogger, err = cr.openLogFile("keepstore")
2012 var writer io.WriteCloser = cr.keepstoreLogger
2013 if logWhat == "errors" {
2014 writer = &filterKeepstoreErrorsOnly{WriteCloser: writer}
2015 } else if logWhat != "all" {
2016 // should have been caught earlier by
2017 // dispatcher's config loader
2018 log.Printf("invalid value for Containers.LocalKeepLogsToContainerLog: %q", logWhat)
2021 err = keepstoreLogbuf.SetWriter(writer)
2026 cr.keepstoreLogbuf = &keepstoreLogbuf
2029 switch *runtimeEngine {
2031 cr.executor, err = newDockerExecutor(containerUUID, cr.CrunchLog.Printf, cr.containerWatchdogInterval)
2033 cr.executor, err = newSingularityExecutor(cr.CrunchLog.Printf)
2035 cr.CrunchLog.Printf("%s: unsupported RuntimeEngine %q", containerUUID, *runtimeEngine)
2039 cr.CrunchLog.Printf("%s: %v", containerUUID, err)
2040 cr.checkBrokenNode(err)
2043 defer cr.executor.Close()
2045 cr.brokenNodeHook = *brokenNodeHook
2047 gwAuthSecret := os.Getenv("GatewayAuthSecret")
2048 os.Unsetenv("GatewayAuthSecret")
2049 if gwAuthSecret == "" {
2050 // not safe to run a gateway service without an auth
2052 cr.CrunchLog.Printf("Not starting a gateway server (GatewayAuthSecret was not provided by dispatcher)")
2054 gwListen := os.Getenv("GatewayAddress")
2055 cr.gateway = Gateway{
2057 AuthSecret: gwAuthSecret,
2058 ContainerUUID: containerUUID,
2059 Target: cr.executor,
2061 LogCollection: cr.LogCollection,
2064 // Direct connection won't work, so we use the
2065 // gateway_address field to indicate the
2066 // internalURL of the controller process that
2067 // has the current tunnel connection.
2068 cr.gateway.ArvadosClient = cr.dispatcherClient
2069 cr.gateway.UpdateTunnelURL = func(url string) {
2070 cr.gateway.Address = "tunnel " + url
2071 cr.DispatcherArvClient.Update("containers", containerUUID,
2073 "select": []string{"uuid"},
2074 "container": arvadosclient.Dict{"gateway_address": cr.gateway.Address},
2078 err = cr.gateway.Start()
2080 log.Printf("error starting gateway server: %s", err)
2085 parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerUUID+".")
2087 log.Printf("%s: %v", containerUUID, tmperr)
2091 cr.parentTemp = parentTemp
2092 cr.statInterval = *statInterval
2093 cr.enableMemoryLimit = *enableMemoryLimit
2094 cr.enableNetwork = *enableNetwork
2095 cr.networkMode = *networkMode
2096 if *cgroupParentSubsystem != "" {
2097 p, err := findCgroup(os.DirFS("/"), *cgroupParentSubsystem)
2099 log.Printf("fatal: cgroup parent subsystem: %s", err)
2102 cr.setCgroupParent = p
2105 if conf.EC2SpotCheck {
2106 go cr.checkSpotInterruptionNotices()
2111 if *memprofile != "" {
2112 f, err := os.Create(*memprofile)
2114 log.Printf("could not create memory profile: %s", err)
2116 runtime.GC() // get up-to-date statistics
2117 if err := pprof.WriteHeapProfile(f); err != nil {
2118 log.Printf("could not write memory profile: %s", err)
2120 closeerr := f.Close()
2121 if closeerr != nil {
2122 log.Printf("closing memprofile file: %s", err)
2127 log.Printf("%s: %v", containerUUID, runerr)
2133 // Try to load ConfigData in hpc (slurm/lsf) environment. This means
2134 // loading the cluster config from the specified file and (if that
2135 // works) getting the runtime_constraints container field from
2136 // controller to determine # VCPUs so we can calculate KeepBuffers.
2137 func hpcConfData(uuid string, configFile string, stderr io.Writer) ConfigData {
2139 conf.Cluster = loadClusterConfigFile(configFile, stderr)
2140 if conf.Cluster == nil {
2141 // skip loading the container record -- we won't be
2142 // able to start local keepstore anyway.
2145 arv, err := arvadosclient.MakeArvadosClient()
2147 fmt.Fprintf(stderr, "error setting up arvadosclient: %s\n", err)
2150 // arvadosclient now interprets Retries=10 to mean
2151 // Timeout=10m, retrying with exponential backoff + jitter.
2153 var ctr arvados.Container
2154 err = arv.Call("GET", "containers", uuid, "", arvadosclient.Dict{"select": []string{"runtime_constraints"}}, &ctr)
2156 fmt.Fprintf(stderr, "error getting container record: %s\n", err)
2159 if ctr.RuntimeConstraints.VCPUs > 0 {
2160 conf.KeepBuffers = ctr.RuntimeConstraints.VCPUs * conf.Cluster.Containers.LocalKeepBlobBuffersPerVCPU
2165 // Load cluster config file from given path. If an error occurs, log
2166 // the error to stderr and return nil.
2167 func loadClusterConfigFile(path string, stderr io.Writer) *arvados.Cluster {
2168 ldr := config.NewLoader(&bytes.Buffer{}, ctxlog.New(stderr, "plain", "info"))
2170 cfg, err := ldr.Load()
2172 fmt.Fprintf(stderr, "could not load config file %s: %s\n", path, err)
2175 cluster, err := cfg.GetCluster("")
2177 fmt.Fprintf(stderr, "could not use config file %s: %s\n", path, err)
2180 fmt.Fprintf(stderr, "loaded config file %s\n", path)
2184 func startLocalKeepstore(configData ConfigData, logbuf io.Writer) (*exec.Cmd, error) {
2185 if configData.KeepBuffers < 1 {
2186 fmt.Fprintf(logbuf, "not starting a local keepstore process because KeepBuffers=%v in config\n", configData.KeepBuffers)
2189 if configData.Cluster == nil {
2190 fmt.Fprint(logbuf, "not starting a local keepstore process because cluster config file was not loaded\n")
2193 for uuid, vol := range configData.Cluster.Volumes {
2194 if len(vol.AccessViaHosts) > 0 {
2195 fmt.Fprintf(logbuf, "not starting a local keepstore process because a volume (%s) uses AccessViaHosts\n", uuid)
2198 if !vol.ReadOnly && vol.Replication < configData.Cluster.Collections.DefaultReplication {
2199 fmt.Fprintf(logbuf, "not starting a local keepstore process because a writable volume (%s) has replication less than Collections.DefaultReplication (%d < %d)\n", uuid, vol.Replication, configData.Cluster.Collections.DefaultReplication)
2204 // Rather than have an alternate way to tell keepstore how
2205 // many buffers to use, etc., when starting it this way, we
2206 // just modify the cluster configuration that we feed it on
2208 ccfg := *configData.Cluster
2209 ccfg.API.MaxKeepBlobBuffers = configData.KeepBuffers
2210 ccfg.Collections.BlobTrash = false
2211 ccfg.Collections.BlobTrashConcurrency = 0
2212 ccfg.Collections.BlobDeleteConcurrency = 0
2214 localaddr := localKeepstoreAddr()
2215 ln, err := net.Listen("tcp", net.JoinHostPort(localaddr, "0"))
2219 _, port, err := net.SplitHostPort(ln.Addr().String())
2225 url := "http://" + net.JoinHostPort(localaddr, port)
2227 fmt.Fprintf(logbuf, "starting keepstore on %s\n", url)
2229 var confJSON bytes.Buffer
2230 err = json.NewEncoder(&confJSON).Encode(arvados.Config{
2231 Clusters: map[string]arvados.Cluster{
2232 ccfg.ClusterID: ccfg,
2238 cmd := exec.Command("/proc/self/exe", "keepstore", "-config=-")
2239 if target, err := os.Readlink(cmd.Path); err == nil && strings.HasSuffix(target, ".test") {
2240 // If we're a 'go test' process, running
2241 // /proc/self/exe would start the test suite in a
2242 // child process, which is not what we want.
2243 cmd.Path, _ = exec.LookPath("go")
2244 cmd.Args = append([]string{"go", "run", "../../cmd/arvados-server"}, cmd.Args[1:]...)
2245 cmd.Env = os.Environ()
2247 cmd.Stdin = &confJSON
2250 cmd.Env = append(cmd.Env,
2252 "ARVADOS_SERVICE_INTERNAL_URL="+url)
2255 return nil, fmt.Errorf("error starting keepstore process: %w", err)
2262 ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
2264 poll := time.NewTicker(time.Second / 10)
2266 client := http.Client{}
2268 testReq, err := http.NewRequestWithContext(ctx, "GET", url+"/_health/ping", nil)
2269 testReq.Header.Set("Authorization", "Bearer "+configData.Cluster.ManagementToken)
2273 resp, err := client.Do(testReq)
2276 if resp.StatusCode == http.StatusOK {
2281 return nil, fmt.Errorf("keepstore child process exited")
2283 if ctx.Err() != nil {
2284 return nil, fmt.Errorf("timed out waiting for new keepstore process to report healthy")
2287 os.Setenv("ARVADOS_KEEP_SERVICES", url)
2291 // return current uid, gid, groups in a format suitable for logging:
2292 // "crunch-run process has uid=1234(arvados) gid=1234(arvados)
2293 // groups=1234(arvados),114(fuse)"
2294 func currentUserAndGroups() string {
2295 u, err := user.Current()
2297 return fmt.Sprintf("error getting current user ID: %s", err)
2299 s := fmt.Sprintf("crunch-run process has uid=%s(%s) gid=%s", u.Uid, u.Username, u.Gid)
2300 if g, err := user.LookupGroupId(u.Gid); err == nil {
2301 s += fmt.Sprintf("(%s)", g.Name)
2304 if gids, err := u.GroupIds(); err == nil {
2305 for i, gid := range gids {
2310 if g, err := user.LookupGroupId(gid); err == nil {
2311 s += fmt.Sprintf("(%s)", g.Name)
2318 // Return a suitable local interface address for a local keepstore
2319 // service. Currently this is the numerically lowest non-loopback ipv4
2320 // address assigned to a local interface that is not in any of the
2321 // link-local/vpn/loopback ranges 169.254/16, 100.64/10, or 127/8.
2322 func localKeepstoreAddr() string {
2324 // Ignore error (proceed with zero IPs)
2325 addrs, _ := processIPs(os.Getpid())
2326 for addr := range addrs {
2327 ip := net.ParseIP(addr)
2332 if ip.Mask(net.CIDRMask(8, 32)).Equal(net.IPv4(127, 0, 0, 0)) ||
2333 ip.Mask(net.CIDRMask(10, 32)).Equal(net.IPv4(100, 64, 0, 0)) ||
2334 ip.Mask(net.CIDRMask(16, 32)).Equal(net.IPv4(169, 254, 0, 0)) {
2338 ips = append(ips, ip)
2343 sort.Slice(ips, func(ii, jj int) bool {
2344 i, j := ips[ii], ips[jj]
2345 if len(i) != len(j) {
2346 return len(i) < len(j)
2355 return ips[0].String()
2358 func (cr *ContainerRunner) loadPrices() {
2359 buf, err := os.ReadFile(filepath.Join(lockdir, pricesfile))
2361 if !os.IsNotExist(err) {
2362 cr.CrunchLog.Printf("loadPrices: read: %s", err)
2366 var prices []cloud.InstancePrice
2367 err = json.Unmarshal(buf, &prices)
2369 cr.CrunchLog.Printf("loadPrices: decode: %s", err)
2372 cr.pricesLock.Lock()
2373 defer cr.pricesLock.Unlock()
2374 var lastKnown time.Time
2375 if len(cr.prices) > 0 {
2376 lastKnown = cr.prices[0].StartTime
2378 cr.prices = cloud.NormalizePriceHistory(append(prices, cr.prices...))
2379 for i := len(cr.prices) - 1; i >= 0; i-- {
2380 price := cr.prices[i]
2381 if price.StartTime.After(lastKnown) {
2382 cr.CrunchLog.Printf("Instance price changed to %#.3g at %s", price.Price, price.StartTime.UTC())
2387 func (cr *ContainerRunner) calculateCost(now time.Time) float64 {
2388 cr.pricesLock.Lock()
2389 defer cr.pricesLock.Unlock()
2391 // First, make a "prices" slice with the real data as far back
2392 // as it goes, and (if needed) a "since the beginning of time"
2393 // placeholder containing a reasonable guess about what the
2394 // price was between cr.costStartTime and the earliest real
2397 if len(prices) == 0 {
2398 // use price info in InstanceType record initially
2399 // provided by cloud dispatcher
2401 var it arvados.InstanceType
2402 if j := os.Getenv("InstanceType"); j != "" && json.Unmarshal([]byte(j), &it) == nil && it.Price > 0 {
2405 prices = []cloud.InstancePrice{{Price: p}}
2406 } else if prices[len(prices)-1].StartTime.After(cr.costStartTime) {
2407 // guess earlier pricing was the same as the earliest
2408 // price we know about
2409 filler := prices[len(prices)-1]
2410 filler.StartTime = time.Time{}
2411 prices = append(prices, filler)
2414 // Now that our history of price changes goes back at least as
2415 // far as cr.costStartTime, add up the costs for each
2419 for _, ip := range prices {
2420 spanStart := ip.StartTime
2421 if spanStart.After(now) {
2422 // pricing information from the future -- not
2423 // expected from AWS, but possible in
2424 // principle, and exercised by tests.
2428 if spanStart.Before(cr.costStartTime) {
2429 spanStart = cr.costStartTime
2432 cost += ip.Price * spanEnd.Sub(spanStart).Seconds() / 3600
2442 func (runner *ContainerRunner) handleSIGUSR2(sigchan chan os.Signal) {
2445 update := arvadosclient.Dict{
2446 "select": []string{"uuid"},
2447 "container": arvadosclient.Dict{
2448 "cost": runner.calculateCost(time.Now()),
2451 runner.DispatcherArvClient.Update("containers", runner.Container.UUID, update, nil)