1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
34 "git.arvados.org/arvados.git/lib/cloud"
35 "git.arvados.org/arvados.git/lib/cmd"
36 "git.arvados.org/arvados.git/lib/config"
37 "git.arvados.org/arvados.git/lib/crunchstat"
38 "git.arvados.org/arvados.git/sdk/go/arvados"
39 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
40 "git.arvados.org/arvados.git/sdk/go/ctxlog"
41 "git.arvados.org/arvados.git/sdk/go/keepclient"
42 "git.arvados.org/arvados.git/sdk/go/manifest"
43 "golang.org/x/sys/unix"
48 var Command = command{}
50 // ConfigData contains environment variables and (when needed) cluster
51 // configuration, passed from dispatchcloud to crunch-run on stdin.
52 type ConfigData struct {
55 Cluster *arvados.Cluster
58 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
59 type IArvadosClient interface {
60 Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
61 Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
62 Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
63 Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
64 CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
65 Discovery(key string) (interface{}, error)
68 // ErrCancelled is the error returned when the container is cancelled.
69 var ErrCancelled = errors.New("Cancelled")
71 // IKeepClient is the minimal Keep API methods used by crunch-run.
72 type IKeepClient interface {
73 BlockWrite(context.Context, arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error)
74 ReadAt(locator string, p []byte, off int) (int, error)
75 ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
76 LocalLocator(locator string) (string, error)
78 SetStorageClasses(sc []string)
81 // NewLogWriter is a factory function to create a new log writer.
82 type NewLogWriter func(name string) (io.WriteCloser, error)
84 type RunArvMount func(cmdline []string, tok string) (*exec.Cmd, error)
86 type MkTempDir func(string, string) (string, error)
88 type PsProcess interface {
89 CmdlineSlice() ([]string, error)
92 // ContainerRunner is the main stateful struct used for a single execution of a
94 type ContainerRunner struct {
95 executor containerExecutor
96 executorStdin io.Closer
97 executorStdout io.Closer
98 executorStderr io.Closer
100 // Dispatcher client is initialized with the Dispatcher token.
101 // This is a privileged token used to manage container status
104 // We have both dispatcherClient and DispatcherArvClient
105 // because there are two different incompatible Arvados Go
106 // SDKs and we have to use both (hopefully this gets fixed in
108 dispatcherClient *arvados.Client
109 DispatcherArvClient IArvadosClient
110 DispatcherKeepClient IKeepClient
112 // Container client is initialized with the Container token
113 // This token controls the permissions of the container, and
114 // must be used for operations such as reading collections.
116 // Same comment as above applies to
117 // containerClient/ContainerArvClient.
118 containerClient *arvados.Client
119 ContainerArvClient IArvadosClient
120 ContainerKeepClient IKeepClient
122 Container arvados.Container
125 NewLogWriter NewLogWriter
126 CrunchLog *ThrottledLogger
129 LogCollection arvados.CollectionFileSystem
131 RunArvMount RunArvMount
136 Volumes map[string]struct{}
138 SigChan chan os.Signal
139 ArvMountExit chan error
140 SecretMounts map[string]arvados.Mount
141 MkArvClient func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error)
144 costStartTime time.Time
147 keepstoreLogger io.WriteCloser
148 keepstoreLogbuf *bufThenWrite
149 statLogger io.WriteCloser
150 statReporter *crunchstat.Reporter
151 hoststatLogger io.WriteCloser
152 hoststatReporter *crunchstat.Reporter
153 statInterval time.Duration
155 // What we expect the container's cgroup parent to be.
156 expectCgroupParent string
157 // What we tell docker to use as the container's cgroup
158 // parent. Note: Ideally we would use the same field for both
159 // expectCgroupParent and setCgroupParent, and just make it
160 // default to "docker". However, when using docker < 1.10 with
161 // systemd, specifying a non-empty cgroup parent (even the
162 // default value "docker") hits a docker bug
163 // (https://github.com/docker/docker/issues/17126). Using two
164 // separate fields makes it possible to use the "expect cgroup
165 // parent to be X" feature even on sites where the "specify
166 // cgroup parent" feature breaks.
167 setCgroupParent string
169 cStateLock sync.Mutex
170 cCancelled bool // StopContainer() invoked
172 enableMemoryLimit bool
173 enableNetwork string // one of "default" or "always"
174 networkMode string // "none", "host", or "" -- passed through to executor
175 brokenNodeHook string // script to run if node appears to be broken
176 arvMountLog *ThrottledLogger
178 containerWatchdogInterval time.Duration
182 prices []cloud.InstancePrice
183 pricesLock sync.Mutex
186 // setupSignals sets up signal handling to gracefully terminate the
187 // underlying container and update state when receiving a TERM, INT or
189 func (runner *ContainerRunner) setupSignals() {
190 runner.SigChan = make(chan os.Signal, 1)
191 signal.Notify(runner.SigChan, syscall.SIGTERM)
192 signal.Notify(runner.SigChan, syscall.SIGINT)
193 signal.Notify(runner.SigChan, syscall.SIGQUIT)
195 go func(sig chan os.Signal) {
202 // stop the underlying container.
203 func (runner *ContainerRunner) stop(sig os.Signal) {
204 runner.cStateLock.Lock()
205 defer runner.cStateLock.Unlock()
207 runner.CrunchLog.Printf("caught signal: %v", sig)
209 runner.cCancelled = true
210 runner.CrunchLog.Printf("stopping container")
211 err := runner.executor.Stop()
213 runner.CrunchLog.Printf("error stopping container: %s", err)
217 var errorBlacklist = []string{
218 "(?ms).*[Cc]annot connect to the Docker daemon.*",
219 "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
220 "(?ms).*grpc: the connection is unavailable.*",
223 func (runner *ContainerRunner) runBrokenNodeHook() {
224 if runner.brokenNodeHook == "" {
225 path := filepath.Join(lockdir, brokenfile)
226 runner.CrunchLog.Printf("Writing %s to mark node as broken", path)
227 f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0700)
229 runner.CrunchLog.Printf("Error writing %s: %s", path, err)
234 runner.CrunchLog.Printf("Running broken node hook %q", runner.brokenNodeHook)
236 c := exec.Command(runner.brokenNodeHook)
237 c.Stdout = runner.CrunchLog
238 c.Stderr = runner.CrunchLog
241 runner.CrunchLog.Printf("Error running broken node hook: %v", err)
246 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
247 for _, d := range errorBlacklist {
248 if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
249 runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
250 runner.runBrokenNodeHook()
257 // LoadImage determines the docker image id from the container record and
258 // checks if it is available in the local Docker image store. If not, it loads
259 // the image from Keep.
260 func (runner *ContainerRunner) LoadImage() (string, error) {
261 runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
263 d, err := os.Open(runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage)
268 allfiles, err := d.Readdirnames(-1)
272 var tarfiles []string
273 for _, fnm := range allfiles {
274 if strings.HasSuffix(fnm, ".tar") {
275 tarfiles = append(tarfiles, fnm)
278 if len(tarfiles) == 0 {
279 return "", fmt.Errorf("image collection does not include a .tar image file")
281 if len(tarfiles) > 1 {
282 return "", fmt.Errorf("cannot choose from multiple tar files in image collection: %v", tarfiles)
284 imageID := tarfiles[0][:len(tarfiles[0])-4]
285 imageTarballPath := runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage + "/" + imageID + ".tar"
286 runner.CrunchLog.Printf("Using Docker image id %q", imageID)
288 runner.CrunchLog.Print("Loading Docker image from keep")
289 err = runner.executor.LoadImage(imageID, imageTarballPath, runner.Container, runner.ArvMountPoint,
290 runner.containerClient)
298 func (runner *ContainerRunner) ArvMountCmd(cmdline []string, token string) (c *exec.Cmd, err error) {
299 c = exec.Command(cmdline[0], cmdline[1:]...)
301 // Copy our environment, but override ARVADOS_API_TOKEN with
302 // the container auth token.
304 for _, s := range os.Environ() {
305 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
306 c.Env = append(c.Env, s)
309 c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
311 w, err := runner.NewLogWriter("arv-mount")
315 runner.arvMountLog = NewThrottledLogger(w)
316 scanner := logScanner{
319 "Block not found error",
320 "Unhandled exception during FUSE operation",
322 ReportFunc: runner.reportArvMountWarning,
324 c.Stdout = runner.arvMountLog
325 c.Stderr = io.MultiWriter(runner.arvMountLog, os.Stderr, &scanner)
327 runner.CrunchLog.Printf("Running %v", c.Args)
334 statReadme := make(chan bool)
335 runner.ArvMountExit = make(chan error)
340 time.Sleep(100 * time.Millisecond)
341 _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
353 runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
355 runner.ArvMountExit <- mnterr
356 close(runner.ArvMountExit)
362 case err := <-runner.ArvMountExit:
363 runner.ArvMount = nil
371 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
372 if runner.ArvMountPoint == "" {
373 runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)
378 func copyfile(src string, dst string) (err error) {
379 srcfile, err := os.Open(src)
384 os.MkdirAll(path.Dir(dst), 0777)
386 dstfile, err := os.Create(dst)
390 _, err = io.Copy(dstfile, srcfile)
395 err = srcfile.Close()
396 err2 := dstfile.Close()
409 func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
410 bindmounts := map[string]bindmount{}
411 err := runner.SetupArvMountPoint("keep")
413 return nil, fmt.Errorf("While creating keep mount temp dir: %v", err)
416 token, err := runner.ContainerToken()
418 return nil, fmt.Errorf("could not get container token: %s", err)
420 runner.CrunchLog.Printf("container token %q", token)
424 arvMountCmd := []string{
428 "--storage-classes", strings.Join(runner.Container.OutputStorageClasses, ","),
429 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
431 if _, isdocker := runner.executor.(*dockerExecutor); isdocker {
432 arvMountCmd = append(arvMountCmd, "--allow-other")
435 if runner.Container.RuntimeConstraints.KeepCacheDisk > 0 {
436 keepcachedir, err := runner.MkTempDir(runner.parentTemp, "keepcache")
438 return nil, fmt.Errorf("while creating keep cache temp dir: %v", err)
440 arvMountCmd = append(arvMountCmd, "--disk-cache", "--disk-cache-dir", keepcachedir, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheDisk))
441 } else if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
442 arvMountCmd = append(arvMountCmd, "--ram-cache", "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
445 collectionPaths := []string{}
446 needCertMount := true
447 type copyFile struct {
451 var copyFiles []copyFile
454 for bind := range runner.Container.Mounts {
455 binds = append(binds, bind)
457 for bind := range runner.SecretMounts {
458 if _, ok := runner.Container.Mounts[bind]; ok {
459 return nil, fmt.Errorf("secret mount %q conflicts with regular mount", bind)
461 if runner.SecretMounts[bind].Kind != "json" &&
462 runner.SecretMounts[bind].Kind != "text" {
463 return nil, fmt.Errorf("secret mount %q type is %q but only 'json' and 'text' are permitted",
464 bind, runner.SecretMounts[bind].Kind)
466 binds = append(binds, bind)
470 for _, bind := range binds {
471 mnt, notSecret := runner.Container.Mounts[bind]
473 mnt = runner.SecretMounts[bind]
475 if bind == "stdout" || bind == "stderr" {
476 // Is it a "file" mount kind?
477 if mnt.Kind != "file" {
478 return nil, fmt.Errorf("unsupported mount kind '%s' for %s: only 'file' is supported", mnt.Kind, bind)
481 // Does path start with OutputPath?
482 prefix := runner.Container.OutputPath
483 if !strings.HasSuffix(prefix, "/") {
486 if !strings.HasPrefix(mnt.Path, prefix) {
487 return nil, fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
492 // Is it a "collection" mount kind?
493 if mnt.Kind != "collection" && mnt.Kind != "json" {
494 return nil, fmt.Errorf("unsupported mount kind '%s' for stdin: only 'collection' and 'json' are supported", mnt.Kind)
498 if bind == "/etc/arvados/ca-certificates.crt" {
499 needCertMount = false
502 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
503 if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
504 return nil, fmt.Errorf("only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
509 case mnt.Kind == "collection" && bind != "stdin":
511 if mnt.UUID != "" && mnt.PortableDataHash != "" {
512 return nil, fmt.Errorf("cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
516 return nil, fmt.Errorf("writing to existing collections currently not permitted")
519 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
520 } else if mnt.PortableDataHash != "" {
521 if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
522 return nil, fmt.Errorf("can never write to a collection specified by portable data hash")
524 idx := strings.Index(mnt.PortableDataHash, "/")
526 mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
527 mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
528 runner.Container.Mounts[bind] = mnt
530 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
531 if mnt.Path != "" && mnt.Path != "." {
532 if strings.HasPrefix(mnt.Path, "./") {
533 mnt.Path = mnt.Path[2:]
534 } else if strings.HasPrefix(mnt.Path, "/") {
535 mnt.Path = mnt.Path[1:]
537 src += "/" + mnt.Path
540 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
541 arvMountCmd = append(arvMountCmd, "--mount-tmp", fmt.Sprintf("tmp%d", tmpcount))
545 if bind == runner.Container.OutputPath {
546 runner.HostOutputDir = src
547 bindmounts[bind] = bindmount{HostPath: src}
548 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
549 copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
551 bindmounts[bind] = bindmount{HostPath: src}
554 bindmounts[bind] = bindmount{HostPath: src, ReadOnly: true}
556 collectionPaths = append(collectionPaths, src)
558 case mnt.Kind == "tmp":
560 tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
562 return nil, fmt.Errorf("while creating mount temp dir: %v", err)
564 st, staterr := os.Stat(tmpdir)
566 return nil, fmt.Errorf("while Stat on temp dir: %v", staterr)
568 err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
570 return nil, fmt.Errorf("while Chmod temp dir: %v", err)
572 bindmounts[bind] = bindmount{HostPath: tmpdir}
573 if bind == runner.Container.OutputPath {
574 runner.HostOutputDir = tmpdir
577 case mnt.Kind == "json" || mnt.Kind == "text":
579 if mnt.Kind == "json" {
580 filedata, err = json.Marshal(mnt.Content)
582 return nil, fmt.Errorf("encoding json data: %v", err)
585 text, ok := mnt.Content.(string)
587 return nil, fmt.Errorf("content for mount %q must be a string", bind)
589 filedata = []byte(text)
592 tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
594 return nil, fmt.Errorf("creating temp dir: %v", err)
596 tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
597 err = ioutil.WriteFile(tmpfn, filedata, 0444)
599 return nil, fmt.Errorf("writing temp file: %v", err)
601 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && (notSecret || runner.Container.Mounts[runner.Container.OutputPath].Kind != "collection") {
602 // In most cases, if the container
603 // specifies a literal file inside the
604 // output path, we copy it into the
605 // output directory (either a mounted
606 // collection or a staging area on the
607 // host fs). If it's a secret, it will
608 // be skipped when copying output from
609 // staging to Keep later.
610 copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
612 // If a secret is outside OutputPath,
613 // we bind mount the secret file
614 // directly just like other mounts. We
615 // also use this strategy when a
616 // secret is inside OutputPath but
617 // OutputPath is a live collection, to
618 // avoid writing the secret to
619 // Keep. Attempting to remove a
620 // bind-mounted secret file from
621 // inside the container will return a
622 // "Device or resource busy" error
623 // that might not be handled well by
624 // the container, which is why we
625 // don't use this strategy when
626 // OutputPath is a staging directory.
627 bindmounts[bind] = bindmount{HostPath: tmpfn, ReadOnly: true}
630 case mnt.Kind == "git_tree":
631 tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
633 return nil, fmt.Errorf("creating temp dir: %v", err)
635 err = gitMount(mnt).extractTree(runner.ContainerArvClient, tmpdir, token)
639 bindmounts[bind] = bindmount{HostPath: tmpdir, ReadOnly: true}
643 if runner.HostOutputDir == "" {
644 return nil, fmt.Errorf("output path does not correspond to a writable mount point")
647 if needCertMount && runner.Container.RuntimeConstraints.API {
648 for _, certfile := range arvadosclient.CertFiles {
649 _, err := os.Stat(certfile)
651 bindmounts["/etc/arvados/ca-certificates.crt"] = bindmount{HostPath: certfile, ReadOnly: true}
658 // If we are only mounting collections by pdh, make
659 // sure we don't subscribe to websocket events to
660 // avoid putting undesired load on the API server
661 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id", "--disable-event-listening")
663 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
665 // the by_uuid mount point is used by singularity when writing
666 // out docker images converted to SIF
667 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_uuid")
668 arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
670 runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
672 return nil, fmt.Errorf("while trying to start arv-mount: %v", err)
674 if runner.hoststatReporter != nil && runner.ArvMount != nil {
675 runner.hoststatReporter.ReportPID("arv-mount", runner.ArvMount.Process.Pid)
678 for _, p := range collectionPaths {
681 return nil, fmt.Errorf("while checking that input files exist: %v", err)
685 for _, cp := range copyFiles {
686 st, err := os.Stat(cp.src)
688 return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
691 err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
695 target := path.Join(cp.bind, walkpath[len(cp.src):])
696 if walkinfo.Mode().IsRegular() {
697 copyerr := copyfile(walkpath, target)
701 return os.Chmod(target, walkinfo.Mode()|0777)
702 } else if walkinfo.Mode().IsDir() {
703 mkerr := os.MkdirAll(target, 0777)
707 return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
709 return fmt.Errorf("source %q is not a regular file or directory", cp.src)
712 } else if st.Mode().IsRegular() {
713 err = copyfile(cp.src, cp.bind)
715 err = os.Chmod(cp.bind, st.Mode()|0777)
719 return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
723 return bindmounts, nil
726 func (runner *ContainerRunner) stopHoststat() error {
727 if runner.hoststatReporter == nil {
730 runner.hoststatReporter.Stop()
731 err := runner.hoststatLogger.Close()
733 return fmt.Errorf("error closing hoststat logs: %v", err)
738 func (runner *ContainerRunner) startHoststat() error {
739 w, err := runner.NewLogWriter("hoststat")
743 runner.hoststatLogger = NewThrottledLogger(w)
744 runner.hoststatReporter = &crunchstat.Reporter{
745 Logger: log.New(runner.hoststatLogger, "", 0),
746 CgroupRoot: runner.cgroupRoot,
747 PollPeriod: runner.statInterval,
749 runner.hoststatReporter.Start()
750 runner.hoststatReporter.ReportPID("crunch-run", os.Getpid())
754 func (runner *ContainerRunner) startCrunchstat() error {
755 w, err := runner.NewLogWriter("crunchstat")
759 runner.statLogger = NewThrottledLogger(w)
760 runner.statReporter = &crunchstat.Reporter{
761 CID: runner.executor.CgroupID(),
762 Logger: log.New(runner.statLogger, "", 0),
763 CgroupParent: runner.expectCgroupParent,
764 CgroupRoot: runner.cgroupRoot,
765 PollPeriod: runner.statInterval,
766 TempDir: runner.parentTemp,
768 runner.statReporter.Start()
772 type infoCommand struct {
777 // LogHostInfo logs info about the current host, for debugging and
778 // accounting purposes. Although it's logged as "node-info", this is
779 // about the environment where crunch-run is actually running, which
780 // might differ from what's described in the node record (see
782 func (runner *ContainerRunner) LogHostInfo() (err error) {
783 w, err := runner.NewLogWriter("node-info")
788 commands := []infoCommand{
790 label: "Host Information",
791 cmd: []string{"uname", "-a"},
794 label: "CPU Information",
795 cmd: []string{"cat", "/proc/cpuinfo"},
798 label: "Memory Information",
799 cmd: []string{"cat", "/proc/meminfo"},
803 cmd: []string{"df", "-m", "/", os.TempDir()},
806 label: "Disk INodes",
807 cmd: []string{"df", "-i", "/", os.TempDir()},
811 // Run commands with informational output to be logged.
812 for _, command := range commands {
813 fmt.Fprintln(w, command.label)
814 cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
817 if err := cmd.Run(); err != nil {
818 err = fmt.Errorf("While running command %q: %v", command.cmd, err)
827 return fmt.Errorf("While closing node-info logs: %v", err)
832 // LogContainerRecord gets and saves the raw JSON container record from the API server
833 func (runner *ContainerRunner) LogContainerRecord() error {
834 logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
835 if !logged && err == nil {
836 err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
841 // LogNodeRecord logs the current host's InstanceType config entry (or
842 // the arvados#node record, if running via crunch-dispatch-slurm).
843 func (runner *ContainerRunner) LogNodeRecord() error {
844 if it := os.Getenv("InstanceType"); it != "" {
845 // Dispatched via arvados-dispatch-cloud. Save
846 // InstanceType config fragment received from
847 // dispatcher on stdin.
848 w, err := runner.LogCollection.OpenFile("node.json", os.O_CREATE|os.O_WRONLY, 0666)
853 _, err = io.WriteString(w, it)
859 // Dispatched via crunch-dispatch-slurm. Look up
860 // apiserver's node record corresponding to
862 hostname := os.Getenv("SLURMD_NODENAME")
864 hostname, _ = os.Hostname()
866 _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
867 // The "info" field has admin-only info when
868 // obtained with a privileged token, and
869 // should not be logged.
870 node, ok := resp.(map[string]interface{})
878 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
879 writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
884 ArvClient: runner.DispatcherArvClient,
885 UUID: runner.Container.UUID,
886 loggingStream: label,
890 reader, err := runner.DispatcherArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
892 return false, fmt.Errorf("error getting %s record: %v", label, err)
896 dec := json.NewDecoder(reader)
898 var resp map[string]interface{}
899 if err = dec.Decode(&resp); err != nil {
900 return false, fmt.Errorf("error decoding %s list response: %v", label, err)
902 items, ok := resp["items"].([]interface{})
904 return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
905 } else if len(items) < 1 {
911 // Re-encode it using indentation to improve readability
912 enc := json.NewEncoder(w)
913 enc.SetIndent("", " ")
914 if err = enc.Encode(items[0]); err != nil {
915 return false, fmt.Errorf("error logging %s record: %v", label, err)
919 return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
924 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
925 stdoutPath := mntPath[len(runner.Container.OutputPath):]
926 index := strings.LastIndex(stdoutPath, "/")
928 subdirs := stdoutPath[:index]
930 st, err := os.Stat(runner.HostOutputDir)
932 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
934 stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
935 err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
937 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
941 stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
943 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
946 return stdoutFile, nil
949 // CreateContainer creates the docker container.
950 func (runner *ContainerRunner) CreateContainer(imageID string, bindmounts map[string]bindmount) error {
951 var stdin io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
952 if mnt, ok := runner.Container.Mounts["stdin"]; ok {
959 collID = mnt.PortableDataHash
961 path := runner.ArvMountPoint + "/by_id/" + collID + "/" + mnt.Path
962 f, err := os.Open(path)
968 j, err := json.Marshal(mnt.Content)
970 return fmt.Errorf("error encoding stdin json data: %v", err)
972 stdin = ioutil.NopCloser(bytes.NewReader(j))
974 return fmt.Errorf("stdin mount has unsupported kind %q", mnt.Kind)
978 var stdout, stderr io.WriteCloser
979 if mnt, ok := runner.Container.Mounts["stdout"]; ok {
980 f, err := runner.getStdoutFile(mnt.Path)
985 } else if w, err := runner.NewLogWriter("stdout"); err != nil {
988 stdout = NewThrottledLogger(w)
991 if mnt, ok := runner.Container.Mounts["stderr"]; ok {
992 f, err := runner.getStdoutFile(mnt.Path)
997 } else if w, err := runner.NewLogWriter("stderr"); err != nil {
1000 stderr = NewThrottledLogger(w)
1003 env := runner.Container.Environment
1004 enableNetwork := runner.enableNetwork == "always"
1005 if runner.Container.RuntimeConstraints.API {
1006 enableNetwork = true
1007 tok, err := runner.ContainerToken()
1011 env = map[string]string{}
1012 for k, v := range runner.Container.Environment {
1015 env["ARVADOS_API_TOKEN"] = tok
1016 env["ARVADOS_API_HOST"] = os.Getenv("ARVADOS_API_HOST")
1017 env["ARVADOS_API_HOST_INSECURE"] = os.Getenv("ARVADOS_API_HOST_INSECURE")
1018 env["ARVADOS_KEEP_SERVICES"] = os.Getenv("ARVADOS_KEEP_SERVICES")
1020 workdir := runner.Container.Cwd
1022 // both "" and "." mean default
1025 ram := runner.Container.RuntimeConstraints.RAM
1026 if !runner.enableMemoryLimit {
1029 runner.executorStdin = stdin
1030 runner.executorStdout = stdout
1031 runner.executorStderr = stderr
1033 if runner.Container.RuntimeConstraints.CUDA.DeviceCount > 0 {
1034 nvidiaModprobe(runner.CrunchLog)
1037 return runner.executor.Create(containerSpec{
1039 VCPUs: runner.Container.RuntimeConstraints.VCPUs,
1041 WorkingDir: workdir,
1043 BindMounts: bindmounts,
1044 Command: runner.Container.Command,
1045 EnableNetwork: enableNetwork,
1046 CUDADeviceCount: runner.Container.RuntimeConstraints.CUDA.DeviceCount,
1047 NetworkMode: runner.networkMode,
1048 CgroupParent: runner.setCgroupParent,
1055 // StartContainer starts the docker container created by CreateContainer.
1056 func (runner *ContainerRunner) StartContainer() error {
1057 runner.CrunchLog.Printf("Starting container")
1058 runner.cStateLock.Lock()
1059 defer runner.cStateLock.Unlock()
1060 if runner.cCancelled {
1063 err := runner.executor.Start()
1066 if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
1067 advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
1069 return fmt.Errorf("could not start container: %v%s", err, advice)
1074 // WaitFinish waits for the container to terminate, capture the exit code, and
1075 // close the stdout/stderr logging.
1076 func (runner *ContainerRunner) WaitFinish() error {
1077 runner.CrunchLog.Print("Waiting for container to finish")
1078 var timeout <-chan time.Time
1079 if s := runner.Container.SchedulingParameters.MaxRunTime; s > 0 {
1080 timeout = time.After(time.Duration(s) * time.Second)
1082 ctx, cancel := context.WithCancel(context.Background())
1087 runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
1089 case <-runner.ArvMountExit:
1090 runner.CrunchLog.Printf("arv-mount exited while container is still running. Stopping container.")
1095 exitcode, err := runner.executor.Wait(ctx)
1097 runner.checkBrokenNode(err)
1100 runner.ExitCode = &exitcode
1103 if exitcode&0x80 != 0 {
1104 // Convert raw exit status (0x80 + signal number) to a
1105 // string to log after the code, like " (signal 101)"
1106 // or " (signal 9, killed)"
1107 sig := syscall.WaitStatus(exitcode).Signal()
1108 if name := unix.SignalName(sig); name != "" {
1109 extra = fmt.Sprintf(" (signal %d, %s)", sig, name)
1111 extra = fmt.Sprintf(" (signal %d)", sig)
1114 runner.CrunchLog.Printf("Container exited with status code %d%s", exitcode, extra)
1115 err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1116 "container": arvadosclient.Dict{"exit_code": exitcode},
1119 runner.CrunchLog.Printf("ignoring error updating exit_code: %s", err)
1123 if err = runner.executorStdin.Close(); err != nil {
1124 err = fmt.Errorf("error closing container stdin: %s", err)
1125 runner.CrunchLog.Printf("%s", err)
1128 if err = runner.executorStdout.Close(); err != nil {
1129 err = fmt.Errorf("error closing container stdout: %s", err)
1130 runner.CrunchLog.Printf("%s", err)
1131 if returnErr == nil {
1135 if err = runner.executorStderr.Close(); err != nil {
1136 err = fmt.Errorf("error closing container stderr: %s", err)
1137 runner.CrunchLog.Printf("%s", err)
1138 if returnErr == nil {
1143 if runner.statReporter != nil {
1144 runner.statReporter.Stop()
1145 err = runner.statLogger.Close()
1147 runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
1153 func (runner *ContainerRunner) updateLogs() {
1154 ticker := time.NewTicker(crunchLogUpdatePeriod / 360)
1157 sigusr1 := make(chan os.Signal, 1)
1158 signal.Notify(sigusr1, syscall.SIGUSR1)
1159 defer signal.Stop(sigusr1)
1161 saveAtTime := time.Now().Add(crunchLogUpdatePeriod)
1162 saveAtSize := crunchLogUpdateSize
1168 saveAtTime = time.Now()
1170 runner.logMtx.Lock()
1171 done := runner.LogsPDH != nil
1172 runner.logMtx.Unlock()
1176 size := runner.LogCollection.Size()
1177 if size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) {
1180 saveAtTime = time.Now().Add(crunchLogUpdatePeriod)
1181 saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize
1182 saved, err := runner.saveLogCollection(false)
1184 runner.CrunchLog.Printf("error updating log collection: %s", err)
1188 err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1189 "container": arvadosclient.Dict{"log": saved.PortableDataHash},
1192 runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
1200 func (runner *ContainerRunner) reportArvMountWarning(pattern, text string) {
1201 var updated arvados.Container
1202 err := runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1203 "container": arvadosclient.Dict{
1204 "runtime_status": arvadosclient.Dict{
1205 "warning": "arv-mount: " + pattern,
1206 "warningDetail": text,
1211 runner.CrunchLog.Printf("error updating container runtime_status: %s", err)
1215 // CaptureOutput saves data from the container's output directory if
1216 // needed, and updates the container output accordingly.
1217 func (runner *ContainerRunner) CaptureOutput(bindmounts map[string]bindmount) error {
1218 if runner.Container.RuntimeConstraints.API {
1219 // Output may have been set directly by the container, so
1220 // refresh the container record to check.
1221 err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID,
1222 nil, &runner.Container)
1226 if runner.Container.Output != "" {
1227 // Container output is already set.
1228 runner.OutputPDH = &runner.Container.Output
1233 txt, err := (&copier{
1234 client: runner.containerClient,
1235 arvClient: runner.ContainerArvClient,
1236 keepClient: runner.ContainerKeepClient,
1237 hostOutputDir: runner.HostOutputDir,
1238 ctrOutputDir: runner.Container.OutputPath,
1239 bindmounts: bindmounts,
1240 mounts: runner.Container.Mounts,
1241 secretMounts: runner.SecretMounts,
1242 logger: runner.CrunchLog,
1247 if n := len(regexp.MustCompile(` [0-9a-f]+\+\S*\+R`).FindAllStringIndex(txt, -1)); n > 0 {
1248 runner.CrunchLog.Printf("Copying %d data blocks from remote input collections...", n)
1249 fs, err := (&arvados.Collection{ManifestText: txt}).FileSystem(runner.containerClient, runner.ContainerKeepClient)
1253 txt, err = fs.MarshalManifest(".")
1258 var resp arvados.Collection
1259 err = runner.ContainerArvClient.Create("collections", arvadosclient.Dict{
1260 "ensure_unique_name": true,
1261 "collection": arvadosclient.Dict{
1263 "name": "output for " + runner.Container.UUID,
1264 "manifest_text": txt,
1268 return fmt.Errorf("error creating output collection: %v", err)
1270 runner.OutputPDH = &resp.PortableDataHash
1274 func (runner *ContainerRunner) CleanupDirs() {
1275 if runner.ArvMount != nil {
1277 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1278 umount.Stdout = runner.CrunchLog
1279 umount.Stderr = runner.CrunchLog
1280 runner.CrunchLog.Printf("Running %v", umount.Args)
1281 umnterr := umount.Start()
1284 runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1285 runner.ArvMount.Process.Kill()
1287 // If arv-mount --unmount gets stuck for any reason, we
1288 // don't want to wait for it forever. Do Wait() in a goroutine
1289 // so it doesn't block crunch-run.
1290 umountExit := make(chan error)
1292 mnterr := umount.Wait()
1294 runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1296 umountExit <- mnterr
1299 for again := true; again; {
1305 case <-runner.ArvMountExit:
1307 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1308 runner.CrunchLog.Printf("Timed out waiting for unmount")
1310 umount.Process.Kill()
1312 runner.ArvMount.Process.Kill()
1316 runner.ArvMount = nil
1319 if runner.ArvMountPoint != "" {
1320 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1321 runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1323 runner.ArvMountPoint = ""
1326 if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {
1327 runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr)
1331 // CommitLogs posts the collection containing the final container logs.
1332 func (runner *ContainerRunner) CommitLogs() error {
1334 // Hold cStateLock to prevent races on CrunchLog (e.g., stop()).
1335 runner.cStateLock.Lock()
1336 defer runner.cStateLock.Unlock()
1338 runner.CrunchLog.Print(runner.finalState)
1340 if runner.arvMountLog != nil {
1341 runner.arvMountLog.Close()
1343 runner.CrunchLog.Close()
1345 // Closing CrunchLog above allows them to be committed to Keep at this
1346 // point, but re-open crunch log with ArvClient in case there are any
1347 // other further errors (such as failing to write the log to Keep!)
1348 // while shutting down
1349 runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{
1350 ArvClient: runner.DispatcherArvClient,
1351 UUID: runner.Container.UUID,
1352 loggingStream: "crunch-run",
1355 runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
1358 if runner.keepstoreLogger != nil {
1359 // Flush any buffered logs from our local keepstore
1360 // process. Discard anything logged after this point
1361 // -- it won't end up in the log collection, so
1362 // there's no point writing it to the collectionfs.
1363 runner.keepstoreLogbuf.SetWriter(io.Discard)
1364 runner.keepstoreLogger.Close()
1365 runner.keepstoreLogger = nil
1368 if runner.LogsPDH != nil {
1369 // If we have already assigned something to LogsPDH,
1370 // we must be closing the re-opened log, which won't
1371 // end up getting attached to the container record and
1372 // therefore doesn't need to be saved as a collection
1373 // -- it exists only to send logs to other channels.
1377 saved, err := runner.saveLogCollection(true)
1379 return fmt.Errorf("error saving log collection: %s", err)
1381 runner.logMtx.Lock()
1382 defer runner.logMtx.Unlock()
1383 runner.LogsPDH = &saved.PortableDataHash
1387 func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
1388 runner.logMtx.Lock()
1389 defer runner.logMtx.Unlock()
1390 if runner.LogsPDH != nil {
1391 // Already finalized.
1394 updates := arvadosclient.Dict{
1395 "name": "logs for " + runner.Container.UUID,
1397 mt, err1 := runner.LogCollection.MarshalManifest(".")
1399 // Only send updated manifest text if there was no
1401 updates["manifest_text"] = mt
1404 // Even if flushing the manifest had an error, we still want
1405 // to update the log record, if possible, to push the trash_at
1406 // and delete_at times into the future. Details on bug
1409 updates["is_trashed"] = true
1411 exp := time.Now().Add(crunchLogUpdatePeriod * 24)
1412 updates["trash_at"] = exp
1413 updates["delete_at"] = exp
1415 reqBody := arvadosclient.Dict{"collection": updates}
1417 if runner.logUUID == "" {
1418 reqBody["ensure_unique_name"] = true
1419 err2 = runner.DispatcherArvClient.Create("collections", reqBody, &response)
1421 err2 = runner.DispatcherArvClient.Update("collections", runner.logUUID, reqBody, &response)
1424 runner.logUUID = response.UUID
1427 if err1 != nil || err2 != nil {
1428 err = fmt.Errorf("error recording logs: %q, %q", err1, err2)
1433 // UpdateContainerRunning updates the container state to "Running"
1434 func (runner *ContainerRunner) UpdateContainerRunning(logId string) error {
1435 runner.cStateLock.Lock()
1436 defer runner.cStateLock.Unlock()
1437 if runner.cCancelled {
1440 updates := arvadosclient.Dict{
1441 "gateway_address": runner.gateway.Address,
1445 updates["log"] = logId
1447 return runner.DispatcherArvClient.Update(
1449 runner.Container.UUID,
1450 arvadosclient.Dict{"container": updates},
1455 // ContainerToken returns the api_token the container (and any
1456 // arv-mount processes) are allowed to use.
1457 func (runner *ContainerRunner) ContainerToken() (string, error) {
1458 if runner.token != "" {
1459 return runner.token, nil
1462 var auth arvados.APIClientAuthorization
1463 err := runner.DispatcherArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1467 runner.token = fmt.Sprintf("v2/%s/%s/%s", auth.UUID, auth.APIToken, runner.Container.UUID)
1468 return runner.token, nil
1471 // UpdateContainerFinal updates the container record state on API
1472 // server to "Complete" or "Cancelled"
1473 func (runner *ContainerRunner) UpdateContainerFinal() error {
1474 update := arvadosclient.Dict{}
1475 update["state"] = runner.finalState
1476 if runner.LogsPDH != nil {
1477 update["log"] = *runner.LogsPDH
1479 if runner.ExitCode != nil {
1480 update["exit_code"] = *runner.ExitCode
1482 update["exit_code"] = nil
1484 if runner.finalState == "Complete" && runner.OutputPDH != nil {
1485 update["output"] = *runner.OutputPDH
1487 update["cost"] = runner.calculateCost(time.Now())
1488 return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
1491 // IsCancelled returns the value of Cancelled, with goroutine safety.
1492 func (runner *ContainerRunner) IsCancelled() bool {
1493 runner.cStateLock.Lock()
1494 defer runner.cStateLock.Unlock()
1495 return runner.cCancelled
1498 // NewArvLogWriter creates an ArvLogWriter
1499 func (runner *ContainerRunner) NewArvLogWriter(name string) (io.WriteCloser, error) {
1500 writer, err := runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
1504 return &ArvLogWriter{
1505 ArvClient: runner.DispatcherArvClient,
1506 UUID: runner.Container.UUID,
1507 loggingStream: name,
1508 writeCloser: writer,
1512 // Run the full container lifecycle.
1513 func (runner *ContainerRunner) Run() (err error) {
1514 runner.CrunchLog.Printf("crunch-run %s started", cmd.Version.String())
1515 runner.CrunchLog.Printf("%s", currentUserAndGroups())
1516 v, _ := exec.Command("arv-mount", "--version").CombinedOutput()
1517 runner.CrunchLog.Printf("Using FUSE mount: %s", v)
1518 runner.CrunchLog.Printf("Using container runtime: %s", runner.executor.Runtime())
1519 runner.CrunchLog.Printf("Executing container: %s", runner.Container.UUID)
1520 runner.costStartTime = time.Now()
1522 hostname, hosterr := os.Hostname()
1524 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1526 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1529 sigusr2 := make(chan os.Signal, 1)
1530 signal.Notify(sigusr2, syscall.SIGUSR2)
1531 defer signal.Stop(sigusr2)
1539 runner.finalState = "Queued"
1542 runner.CleanupDirs()
1544 runner.CrunchLog.Printf("crunch-run finished")
1545 runner.CrunchLog.Close()
1548 err = runner.fetchContainerRecord()
1552 if runner.Container.State != "Locked" {
1553 return fmt.Errorf("dispatch error detected: container %q has state %q", runner.Container.UUID, runner.Container.State)
1556 var bindmounts map[string]bindmount
1558 // checkErr prints e (unless it's nil) and sets err to
1559 // e (unless err is already non-nil). Thus, if err
1560 // hasn't already been assigned when Run() returns,
1561 // this cleanup func will cause Run() to return the
1562 // first non-nil error that is passed to checkErr().
1563 checkErr := func(errorIn string, e error) {
1567 runner.CrunchLog.Printf("error in %s: %v", errorIn, e)
1571 if runner.finalState == "Complete" {
1572 // There was an error in the finalization.
1573 runner.finalState = "Cancelled"
1577 // Log the error encountered in Run(), if any
1578 checkErr("Run", err)
1580 if runner.finalState == "Queued" {
1581 runner.UpdateContainerFinal()
1585 if runner.IsCancelled() {
1586 runner.finalState = "Cancelled"
1587 // but don't return yet -- we still want to
1588 // capture partial output and write logs
1591 if bindmounts != nil {
1592 checkErr("CaptureOutput", runner.CaptureOutput(bindmounts))
1594 checkErr("stopHoststat", runner.stopHoststat())
1595 checkErr("CommitLogs", runner.CommitLogs())
1596 runner.CleanupDirs()
1597 checkErr("UpdateContainerFinal", runner.UpdateContainerFinal())
1600 runner.setupSignals()
1601 err = runner.startHoststat()
1605 if runner.keepstore != nil {
1606 runner.hoststatReporter.ReportPID("keepstore", runner.keepstore.Process.Pid)
1609 // set up FUSE mount and binds
1610 bindmounts, err = runner.SetupMounts()
1612 runner.finalState = "Cancelled"
1613 err = fmt.Errorf("While setting up mounts: %v", err)
1617 // check for and/or load image
1618 imageID, err := runner.LoadImage()
1620 if !runner.checkBrokenNode(err) {
1621 // Failed to load image but not due to a "broken node"
1622 // condition, probably user error.
1623 runner.finalState = "Cancelled"
1625 err = fmt.Errorf("While loading container image: %v", err)
1629 err = runner.CreateContainer(imageID, bindmounts)
1633 err = runner.LogHostInfo()
1637 err = runner.LogNodeRecord()
1641 err = runner.LogContainerRecord()
1646 if runner.IsCancelled() {
1650 logCollection, err := runner.saveLogCollection(false)
1653 logId = logCollection.PortableDataHash
1655 runner.CrunchLog.Printf("Error committing initial log collection: %v", err)
1657 err = runner.UpdateContainerRunning(logId)
1661 runner.finalState = "Cancelled"
1663 err = runner.startCrunchstat()
1668 err = runner.StartContainer()
1670 runner.checkBrokenNode(err)
1674 err = runner.WaitFinish()
1675 if err == nil && !runner.IsCancelled() {
1676 runner.finalState = "Complete"
1681 // Fetch the current container record (uuid = runner.Container.UUID)
1682 // into runner.Container.
1683 func (runner *ContainerRunner) fetchContainerRecord() error {
1684 reader, err := runner.DispatcherArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1686 return fmt.Errorf("error fetching container record: %v", err)
1688 defer reader.Close()
1690 dec := json.NewDecoder(reader)
1692 err = dec.Decode(&runner.Container)
1694 return fmt.Errorf("error decoding container record: %v", err)
1698 SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
1701 containerToken, err := runner.ContainerToken()
1703 return fmt.Errorf("error getting container token: %v", err)
1706 runner.ContainerArvClient, runner.ContainerKeepClient,
1707 runner.containerClient, err = runner.MkArvClient(containerToken)
1709 return fmt.Errorf("error creating container API client: %v", err)
1712 runner.ContainerKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)
1713 runner.DispatcherKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)
1715 err = runner.ContainerArvClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
1717 if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
1718 return fmt.Errorf("error fetching secret_mounts: %v", err)
1720 // ok && apierr.HttpStatusCode == 404, which means
1721 // secret_mounts isn't supported by this API server.
1723 runner.SecretMounts = sm.SecretMounts
1728 // NewContainerRunner creates a new container runner.
1729 func NewContainerRunner(dispatcherClient *arvados.Client,
1730 dispatcherArvClient IArvadosClient,
1731 dispatcherKeepClient IKeepClient,
1732 containerUUID string) (*ContainerRunner, error) {
1734 cr := &ContainerRunner{
1735 dispatcherClient: dispatcherClient,
1736 DispatcherArvClient: dispatcherArvClient,
1737 DispatcherKeepClient: dispatcherKeepClient,
1739 cr.NewLogWriter = cr.NewArvLogWriter
1740 cr.RunArvMount = cr.ArvMountCmd
1741 cr.MkTempDir = ioutil.TempDir
1742 cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
1743 cl, err := arvadosclient.MakeArvadosClient()
1745 return nil, nil, nil, err
1748 kc, err := keepclient.MakeKeepClient(cl)
1750 return nil, nil, nil, err
1752 c2 := arvados.NewClientFromEnv()
1753 c2.AuthToken = token
1754 return cl, kc, c2, nil
1757 cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.dispatcherClient, cr.DispatcherKeepClient)
1761 cr.Container.UUID = containerUUID
1762 w, err := cr.NewLogWriter("crunch-run")
1766 cr.CrunchLog = NewThrottledLogger(w)
1767 cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
1769 loadLogThrottleParams(dispatcherArvClient)
1775 func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
1776 log := log.New(stderr, "", 0)
1777 flags := flag.NewFlagSet(prog, flag.ContinueOnError)
1778 statInterval := flags.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1779 cgroupRoot := flags.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
1780 cgroupParent := flags.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
1781 cgroupParentSubsystem := flags.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
1782 caCertsPath := flags.String("ca-certs", "", "Path to TLS root certificates")
1783 detach := flags.Bool("detach", false, "Detach from parent process and run in the background")
1784 stdinConfig := flags.Bool("stdin-config", false, "Load config and environment variables from JSON message on stdin")
1785 configFile := flags.String("config", arvados.DefaultConfigFile, "filename of cluster config file to try loading if -stdin-config=false (default is $ARVADOS_CONFIG)")
1786 sleep := flags.Duration("sleep", 0, "Delay before starting (testing use only)")
1787 kill := flags.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID")
1788 list := flags.Bool("list", false, "List UUIDs of existing crunch-run processes (and notify them to use price data passed on stdin)")
1789 enableMemoryLimit := flags.Bool("enable-memory-limit", true, "tell container runtime to limit container's memory usage")
1790 enableNetwork := flags.String("container-enable-networking", "default", "enable networking \"always\" (for all containers) or \"default\" (for containers that request it)")
1791 networkMode := flags.String("container-network-mode", "default", `Docker network mode for container (use any argument valid for docker --net)`)
1792 memprofile := flags.String("memprofile", "", "write memory profile to `file` after running container")
1793 runtimeEngine := flags.String("runtime-engine", "docker", "container runtime: docker or singularity")
1794 brokenNodeHook := flags.String("broken-node-hook", "", "script to run if node is detected to be broken (for example, Docker daemon is not running)")
1795 flags.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.")
1796 version := flags.Bool("version", false, "Write version information to stdout and exit 0.")
1798 ignoreDetachFlag := false
1799 if len(args) > 0 && args[0] == "-no-detach" {
1800 // This process was invoked by a parent process, which
1801 // has passed along its own arguments, including
1802 // -detach, after the leading -no-detach flag. Strip
1803 // the leading -no-detach flag (it's not recognized by
1804 // flags.Parse()) and ignore the -detach flag that
1807 ignoreDetachFlag = true
1810 if ok, code := cmd.ParseFlags(flags, prog, args, "container-uuid", stderr); !ok {
1812 } else if *version {
1813 fmt.Fprintln(stdout, prog, cmd.Version.String())
1815 } else if !*list && flags.NArg() != 1 {
1816 fmt.Fprintf(stderr, "missing required argument: container-uuid (try -help)\n")
1820 containerUUID := flags.Arg(0)
1823 case *detach && !ignoreDetachFlag:
1824 return Detach(containerUUID, prog, args, stdin, stdout, stderr)
1826 return KillProcess(containerUUID, syscall.Signal(*kill), stdout, stderr)
1828 return ListProcesses(stdin, stdout, stderr)
1831 if len(containerUUID) != 27 {
1832 log.Printf("usage: %s [options] UUID", prog)
1836 var keepstoreLogbuf bufThenWrite
1839 err := json.NewDecoder(stdin).Decode(&conf)
1841 log.Printf("decode stdin: %s", err)
1844 for k, v := range conf.Env {
1845 err = os.Setenv(k, v)
1847 log.Printf("setenv(%q): %s", k, err)
1851 if conf.Cluster != nil {
1852 // ClusterID is missing from the JSON
1853 // representation, but we need it to generate
1854 // a valid config file for keepstore, so we
1855 // fill it using the container UUID prefix.
1856 conf.Cluster.ClusterID = containerUUID[:5]
1859 conf = hpcConfData(containerUUID, *configFile, io.MultiWriter(&keepstoreLogbuf, stderr))
1862 log.Printf("crunch-run %s started", cmd.Version.String())
1865 if *caCertsPath != "" {
1866 arvadosclient.CertFiles = []string{*caCertsPath}
1869 keepstore, err := startLocalKeepstore(conf, io.MultiWriter(&keepstoreLogbuf, stderr))
1874 if keepstore != nil {
1875 defer keepstore.Process.Kill()
1878 api, err := arvadosclient.MakeArvadosClient()
1880 log.Printf("%s: %v", containerUUID, err)
1885 kc, err := keepclient.MakeKeepClient(api)
1887 log.Printf("%s: %v", containerUUID, err)
1890 kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
1893 cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, containerUUID)
1899 cr.keepstore = keepstore
1900 if keepstore == nil {
1901 // Log explanation (if any) for why we're not running
1902 // a local keepstore.
1903 var buf bytes.Buffer
1904 keepstoreLogbuf.SetWriter(&buf)
1906 cr.CrunchLog.Printf("%s", strings.TrimSpace(buf.String()))
1908 } else if logWhat := conf.Cluster.Containers.LocalKeepLogsToContainerLog; logWhat == "none" {
1909 cr.CrunchLog.Printf("using local keepstore process (pid %d) at %s", keepstore.Process.Pid, os.Getenv("ARVADOS_KEEP_SERVICES"))
1910 keepstoreLogbuf.SetWriter(io.Discard)
1912 cr.CrunchLog.Printf("using local keepstore process (pid %d) at %s, writing logs to keepstore.txt in log collection", keepstore.Process.Pid, os.Getenv("ARVADOS_KEEP_SERVICES"))
1913 logwriter, err := cr.NewLogWriter("keepstore")
1918 cr.keepstoreLogger = NewThrottledLogger(logwriter)
1920 var writer io.WriteCloser = cr.keepstoreLogger
1921 if logWhat == "errors" {
1922 writer = &filterKeepstoreErrorsOnly{WriteCloser: writer}
1923 } else if logWhat != "all" {
1924 // should have been caught earlier by
1925 // dispatcher's config loader
1926 log.Printf("invalid value for Containers.LocalKeepLogsToContainerLog: %q", logWhat)
1929 err = keepstoreLogbuf.SetWriter(writer)
1934 cr.keepstoreLogbuf = &keepstoreLogbuf
1937 switch *runtimeEngine {
1939 cr.executor, err = newDockerExecutor(containerUUID, cr.CrunchLog.Printf, cr.containerWatchdogInterval)
1941 cr.executor, err = newSingularityExecutor(cr.CrunchLog.Printf)
1943 cr.CrunchLog.Printf("%s: unsupported RuntimeEngine %q", containerUUID, *runtimeEngine)
1944 cr.CrunchLog.Close()
1948 cr.CrunchLog.Printf("%s: %v", containerUUID, err)
1949 cr.checkBrokenNode(err)
1950 cr.CrunchLog.Close()
1953 defer cr.executor.Close()
1955 cr.brokenNodeHook = *brokenNodeHook
1957 gwAuthSecret := os.Getenv("GatewayAuthSecret")
1958 os.Unsetenv("GatewayAuthSecret")
1959 if gwAuthSecret == "" {
1960 // not safe to run a gateway service without an auth
1962 cr.CrunchLog.Printf("Not starting a gateway server (GatewayAuthSecret was not provided by dispatcher)")
1964 gwListen := os.Getenv("GatewayAddress")
1965 cr.gateway = Gateway{
1967 AuthSecret: gwAuthSecret,
1968 ContainerUUID: containerUUID,
1969 Target: cr.executor,
1973 // Direct connection won't work, so we use the
1974 // gateway_address field to indicate the
1975 // internalURL of the controller process that
1976 // has the current tunnel connection.
1977 cr.gateway.ArvadosClient = cr.dispatcherClient
1978 cr.gateway.UpdateTunnelURL = func(url string) {
1979 cr.gateway.Address = "tunnel " + url
1980 cr.DispatcherArvClient.Update("containers", containerUUID,
1981 arvadosclient.Dict{"container": arvadosclient.Dict{"gateway_address": cr.gateway.Address}}, nil)
1984 err = cr.gateway.Start()
1986 log.Printf("error starting gateway server: %s", err)
1991 parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerUUID+".")
1993 log.Printf("%s: %v", containerUUID, tmperr)
1997 cr.parentTemp = parentTemp
1998 cr.statInterval = *statInterval
1999 cr.cgroupRoot = *cgroupRoot
2000 cr.expectCgroupParent = *cgroupParent
2001 cr.enableMemoryLimit = *enableMemoryLimit
2002 cr.enableNetwork = *enableNetwork
2003 cr.networkMode = *networkMode
2004 if *cgroupParentSubsystem != "" {
2005 p, err := findCgroup(*cgroupParentSubsystem)
2007 log.Printf("fatal: cgroup parent subsystem: %s", err)
2010 cr.setCgroupParent = p
2011 cr.expectCgroupParent = p
2016 if *memprofile != "" {
2017 f, err := os.Create(*memprofile)
2019 log.Printf("could not create memory profile: %s", err)
2021 runtime.GC() // get up-to-date statistics
2022 if err := pprof.WriteHeapProfile(f); err != nil {
2023 log.Printf("could not write memory profile: %s", err)
2025 closeerr := f.Close()
2026 if closeerr != nil {
2027 log.Printf("closing memprofile file: %s", err)
2032 log.Printf("%s: %v", containerUUID, runerr)
2038 // Try to load ConfigData in hpc (slurm/lsf) environment. This means
2039 // loading the cluster config from the specified file and (if that
2040 // works) getting the runtime_constraints container field from
2041 // controller to determine # VCPUs so we can calculate KeepBuffers.
2042 func hpcConfData(uuid string, configFile string, stderr io.Writer) ConfigData {
2044 conf.Cluster = loadClusterConfigFile(configFile, stderr)
2045 if conf.Cluster == nil {
2046 // skip loading the container record -- we won't be
2047 // able to start local keepstore anyway.
2050 arv, err := arvadosclient.MakeArvadosClient()
2052 fmt.Fprintf(stderr, "error setting up arvadosclient: %s\n", err)
2056 var ctr arvados.Container
2057 err = arv.Call("GET", "containers", uuid, "", arvadosclient.Dict{"select": []string{"runtime_constraints"}}, &ctr)
2059 fmt.Fprintf(stderr, "error getting container record: %s\n", err)
2062 if ctr.RuntimeConstraints.VCPUs > 0 {
2063 conf.KeepBuffers = ctr.RuntimeConstraints.VCPUs * conf.Cluster.Containers.LocalKeepBlobBuffersPerVCPU
2068 // Load cluster config file from given path. If an error occurs, log
2069 // the error to stderr and return nil.
2070 func loadClusterConfigFile(path string, stderr io.Writer) *arvados.Cluster {
2071 ldr := config.NewLoader(&bytes.Buffer{}, ctxlog.New(stderr, "plain", "info"))
2073 cfg, err := ldr.Load()
2075 fmt.Fprintf(stderr, "could not load config file %s: %s\n", path, err)
2078 cluster, err := cfg.GetCluster("")
2080 fmt.Fprintf(stderr, "could not use config file %s: %s\n", path, err)
2083 fmt.Fprintf(stderr, "loaded config file %s\n", path)
2087 func startLocalKeepstore(configData ConfigData, logbuf io.Writer) (*exec.Cmd, error) {
2088 if configData.KeepBuffers < 1 {
2089 fmt.Fprintf(logbuf, "not starting a local keepstore process because KeepBuffers=%v in config\n", configData.KeepBuffers)
2092 if configData.Cluster == nil {
2093 fmt.Fprint(logbuf, "not starting a local keepstore process because cluster config file was not loaded\n")
2096 for uuid, vol := range configData.Cluster.Volumes {
2097 if len(vol.AccessViaHosts) > 0 {
2098 fmt.Fprintf(logbuf, "not starting a local keepstore process because a volume (%s) uses AccessViaHosts\n", uuid)
2101 if !vol.ReadOnly && vol.Replication < configData.Cluster.Collections.DefaultReplication {
2102 fmt.Fprintf(logbuf, "not starting a local keepstore process because a writable volume (%s) has replication less than Collections.DefaultReplication (%d < %d)\n", uuid, vol.Replication, configData.Cluster.Collections.DefaultReplication)
2107 // Rather than have an alternate way to tell keepstore how
2108 // many buffers to use when starting it this way, we just
2109 // modify the cluster configuration that we feed it on stdin.
2110 configData.Cluster.API.MaxKeepBlobBuffers = configData.KeepBuffers
2112 localaddr := localKeepstoreAddr()
2113 ln, err := net.Listen("tcp", net.JoinHostPort(localaddr, "0"))
2117 _, port, err := net.SplitHostPort(ln.Addr().String())
2123 url := "http://" + net.JoinHostPort(localaddr, port)
2125 fmt.Fprintf(logbuf, "starting keepstore on %s\n", url)
2127 var confJSON bytes.Buffer
2128 err = json.NewEncoder(&confJSON).Encode(arvados.Config{
2129 Clusters: map[string]arvados.Cluster{
2130 configData.Cluster.ClusterID: *configData.Cluster,
2136 cmd := exec.Command("/proc/self/exe", "keepstore", "-config=-")
2137 if target, err := os.Readlink(cmd.Path); err == nil && strings.HasSuffix(target, ".test") {
2138 // If we're a 'go test' process, running
2139 // /proc/self/exe would start the test suite in a
2140 // child process, which is not what we want.
2141 cmd.Path, _ = exec.LookPath("go")
2142 cmd.Args = append([]string{"go", "run", "../../cmd/arvados-server"}, cmd.Args[1:]...)
2143 cmd.Env = os.Environ()
2145 cmd.Stdin = &confJSON
2148 cmd.Env = append(cmd.Env,
2150 "ARVADOS_SERVICE_INTERNAL_URL="+url)
2153 return nil, fmt.Errorf("error starting keepstore process: %w", err)
2160 ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
2162 poll := time.NewTicker(time.Second / 10)
2164 client := http.Client{}
2166 testReq, err := http.NewRequestWithContext(ctx, "GET", url+"/_health/ping", nil)
2167 testReq.Header.Set("Authorization", "Bearer "+configData.Cluster.ManagementToken)
2171 resp, err := client.Do(testReq)
2174 if resp.StatusCode == http.StatusOK {
2179 return nil, fmt.Errorf("keepstore child process exited")
2181 if ctx.Err() != nil {
2182 return nil, fmt.Errorf("timed out waiting for new keepstore process to report healthy")
2185 os.Setenv("ARVADOS_KEEP_SERVICES", url)
2189 // return current uid, gid, groups in a format suitable for logging:
2190 // "crunch-run process has uid=1234(arvados) gid=1234(arvados)
2191 // groups=1234(arvados),114(fuse)"
2192 func currentUserAndGroups() string {
2193 u, err := user.Current()
2195 return fmt.Sprintf("error getting current user ID: %s", err)
2197 s := fmt.Sprintf("crunch-run process has uid=%s(%s) gid=%s", u.Uid, u.Username, u.Gid)
2198 if g, err := user.LookupGroupId(u.Gid); err == nil {
2199 s += fmt.Sprintf("(%s)", g.Name)
2202 if gids, err := u.GroupIds(); err == nil {
2203 for i, gid := range gids {
2208 if g, err := user.LookupGroupId(gid); err == nil {
2209 s += fmt.Sprintf("(%s)", g.Name)
2216 // Return a suitable local interface address for a local keepstore
2217 // service. Currently this is the numerically lowest non-loopback ipv4
2218 // address assigned to a local interface that is not in any of the
2219 // link-local/vpn/loopback ranges 169.254/16, 100.64/10, or 127/8.
2220 func localKeepstoreAddr() string {
2222 // Ignore error (proceed with zero IPs)
2223 addrs, _ := processIPs(os.Getpid())
2224 for addr := range addrs {
2225 ip := net.ParseIP(addr)
2230 if ip.Mask(net.CIDRMask(8, 32)).Equal(net.IPv4(127, 0, 0, 0)) ||
2231 ip.Mask(net.CIDRMask(10, 32)).Equal(net.IPv4(100, 64, 0, 0)) ||
2232 ip.Mask(net.CIDRMask(16, 32)).Equal(net.IPv4(169, 254, 0, 0)) {
2236 ips = append(ips, ip)
2241 sort.Slice(ips, func(ii, jj int) bool {
2242 i, j := ips[ii], ips[jj]
2243 if len(i) != len(j) {
2244 return len(i) < len(j)
2253 return ips[0].String()
2256 func (cr *ContainerRunner) loadPrices() {
2257 buf, err := os.ReadFile(filepath.Join(lockdir, pricesfile))
2259 if !os.IsNotExist(err) {
2260 cr.CrunchLog.Printf("loadPrices: read: %s", err)
2264 var prices []cloud.InstancePrice
2265 err = json.Unmarshal(buf, &prices)
2267 cr.CrunchLog.Printf("loadPrices: decode: %s", err)
2270 cr.pricesLock.Lock()
2271 defer cr.pricesLock.Unlock()
2272 var lastKnown time.Time
2273 if len(cr.prices) > 0 {
2274 lastKnown = cr.prices[0].StartTime
2276 cr.prices = cloud.NormalizePriceHistory(append(prices, cr.prices...))
2277 for i := len(cr.prices) - 1; i >= 0; i-- {
2278 price := cr.prices[i]
2279 if price.StartTime.After(lastKnown) {
2280 cr.CrunchLog.Printf("Instance price changed to %#.3g at %s", price.Price, price.StartTime.UTC())
2285 func (cr *ContainerRunner) calculateCost(now time.Time) float64 {
2286 cr.pricesLock.Lock()
2287 defer cr.pricesLock.Unlock()
2289 // First, make a "prices" slice with the real data as far back
2290 // as it goes, and (if needed) a "since the beginning of time"
2291 // placeholder containing a reasonable guess about what the
2292 // price was between cr.costStartTime and the earliest real
2295 if len(prices) == 0 {
2296 // use price info in InstanceType record initially
2297 // provided by cloud dispatcher
2299 var it arvados.InstanceType
2300 if j := os.Getenv("InstanceType"); j != "" && json.Unmarshal([]byte(j), &it) == nil && it.Price > 0 {
2303 prices = []cloud.InstancePrice{{Price: p}}
2304 } else if prices[len(prices)-1].StartTime.After(cr.costStartTime) {
2305 // guess earlier pricing was the same as the earliest
2306 // price we know about
2307 filler := prices[len(prices)-1]
2308 filler.StartTime = time.Time{}
2309 prices = append(prices, filler)
2312 // Now that our history of price changes goes back at least as
2313 // far as cr.costStartTime, add up the costs for each
2317 for _, ip := range prices {
2318 spanStart := ip.StartTime
2319 if spanStart.After(now) {
2320 // pricing information from the future -- not
2321 // expected from AWS, but possible in
2322 // principle, and exercised by tests.
2326 if spanStart.Before(cr.costStartTime) {
2327 spanStart = cr.costStartTime
2330 cost += ip.Price * spanEnd.Sub(spanStart).Seconds() / 3600