1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
33 "git.arvados.org/arvados.git/lib/cmd"
34 "git.arvados.org/arvados.git/lib/crunchstat"
35 "git.arvados.org/arvados.git/sdk/go/arvados"
36 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
37 "git.arvados.org/arvados.git/sdk/go/keepclient"
38 "git.arvados.org/arvados.git/sdk/go/manifest"
43 var Command = command{}
45 // ConfigData contains environment variables and (when needed) cluster
46 // configuration, passed from dispatchcloud to crunch-run on stdin.
47 type ConfigData struct {
50 Cluster *arvados.Cluster
53 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
54 type IArvadosClient interface {
55 Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
56 Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
57 Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
58 Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
59 CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
60 Discovery(key string) (interface{}, error)
63 // ErrCancelled is the error returned when the container is cancelled.
64 var ErrCancelled = errors.New("Cancelled")
66 // IKeepClient is the minimal Keep API methods used by crunch-run.
67 type IKeepClient interface {
68 BlockWrite(context.Context, arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error)
69 ReadAt(locator string, p []byte, off int) (int, error)
70 ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
71 LocalLocator(locator string) (string, error)
73 SetStorageClasses(sc []string)
76 // NewLogWriter is a factory function to create a new log writer.
77 type NewLogWriter func(name string) (io.WriteCloser, error)
79 type RunArvMount func(cmdline []string, tok string) (*exec.Cmd, error)
81 type MkTempDir func(string, string) (string, error)
83 type PsProcess interface {
84 CmdlineSlice() ([]string, error)
87 // ContainerRunner is the main stateful struct used for a single execution of a
89 type ContainerRunner struct {
90 executor containerExecutor
91 executorStdin io.Closer
92 executorStdout io.Closer
93 executorStderr io.Closer
95 // Dispatcher client is initialized with the Dispatcher token.
96 // This is a privileged token used to manage container status
99 // We have both dispatcherClient and DispatcherArvClient
100 // because there are two different incompatible Arvados Go
101 // SDKs and we have to use both (hopefully this gets fixed in
103 dispatcherClient *arvados.Client
104 DispatcherArvClient IArvadosClient
105 DispatcherKeepClient IKeepClient
107 // Container client is initialized with the Container token
108 // This token controls the permissions of the container, and
109 // must be used for operations such as reading collections.
111 // Same comment as above applies to
112 // containerClient/ContainerArvClient.
113 containerClient *arvados.Client
114 ContainerArvClient IArvadosClient
115 ContainerKeepClient IKeepClient
117 Container arvados.Container
120 NewLogWriter NewLogWriter
121 CrunchLog *ThrottledLogger
124 LogCollection arvados.CollectionFileSystem
126 RunArvMount RunArvMount
131 Volumes map[string]struct{}
133 SigChan chan os.Signal
134 ArvMountExit chan error
135 SecretMounts map[string]arvados.Mount
136 MkArvClient func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error)
140 keepstoreLogger io.WriteCloser
141 keepstoreLogbuf *bufThenWrite
142 statLogger io.WriteCloser
143 statReporter *crunchstat.Reporter
144 hoststatLogger io.WriteCloser
145 hoststatReporter *crunchstat.Reporter
146 statInterval time.Duration
148 // What we expect the container's cgroup parent to be.
149 expectCgroupParent string
150 // What we tell docker to use as the container's cgroup
151 // parent. Note: Ideally we would use the same field for both
152 // expectCgroupParent and setCgroupParent, and just make it
153 // default to "docker". However, when using docker < 1.10 with
154 // systemd, specifying a non-empty cgroup parent (even the
155 // default value "docker") hits a docker bug
156 // (https://github.com/docker/docker/issues/17126). Using two
157 // separate fields makes it possible to use the "expect cgroup
158 // parent to be X" feature even on sites where the "specify
159 // cgroup parent" feature breaks.
160 setCgroupParent string
162 cStateLock sync.Mutex
163 cCancelled bool // StopContainer() invoked
165 enableMemoryLimit bool
166 enableNetwork string // one of "default" or "always"
167 networkMode string // "none", "host", or "" -- passed through to executor
168 arvMountLog *ThrottledLogger
170 containerWatchdogInterval time.Duration
175 // setupSignals sets up signal handling to gracefully terminate the
176 // underlying container and update state when receiving a TERM, INT or
178 func (runner *ContainerRunner) setupSignals() {
179 runner.SigChan = make(chan os.Signal, 1)
180 signal.Notify(runner.SigChan, syscall.SIGTERM)
181 signal.Notify(runner.SigChan, syscall.SIGINT)
182 signal.Notify(runner.SigChan, syscall.SIGQUIT)
184 go func(sig chan os.Signal) {
191 // stop the underlying container.
192 func (runner *ContainerRunner) stop(sig os.Signal) {
193 runner.cStateLock.Lock()
194 defer runner.cStateLock.Unlock()
196 runner.CrunchLog.Printf("caught signal: %v", sig)
198 runner.cCancelled = true
199 runner.CrunchLog.Printf("stopping container")
200 err := runner.executor.Stop()
202 runner.CrunchLog.Printf("error stopping container: %s", err)
206 var errorBlacklist = []string{
207 "(?ms).*[Cc]annot connect to the Docker daemon.*",
208 "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
209 "(?ms).*grpc: the connection is unavailable.*",
211 var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
213 func (runner *ContainerRunner) runBrokenNodeHook() {
214 if *brokenNodeHook == "" {
215 path := filepath.Join(lockdir, brokenfile)
216 runner.CrunchLog.Printf("Writing %s to mark node as broken", path)
217 f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0700)
219 runner.CrunchLog.Printf("Error writing %s: %s", path, err)
224 runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
226 c := exec.Command(*brokenNodeHook)
227 c.Stdout = runner.CrunchLog
228 c.Stderr = runner.CrunchLog
231 runner.CrunchLog.Printf("Error running broken node hook: %v", err)
236 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
237 for _, d := range errorBlacklist {
238 if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
239 runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
240 runner.runBrokenNodeHook()
247 // LoadImage determines the docker image id from the container record and
248 // checks if it is available in the local Docker image store. If not, it loads
249 // the image from Keep.
250 func (runner *ContainerRunner) LoadImage() (string, error) {
251 runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
253 d, err := os.Open(runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage)
258 allfiles, err := d.Readdirnames(-1)
262 var tarfiles []string
263 for _, fnm := range allfiles {
264 if strings.HasSuffix(fnm, ".tar") {
265 tarfiles = append(tarfiles, fnm)
268 if len(tarfiles) == 0 {
269 return "", fmt.Errorf("image collection does not include a .tar image file")
271 if len(tarfiles) > 1 {
272 return "", fmt.Errorf("cannot choose from multiple tar files in image collection: %v", tarfiles)
274 imageID := tarfiles[0][:len(tarfiles[0])-4]
275 imageTarballPath := runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage + "/" + imageID + ".tar"
276 runner.CrunchLog.Printf("Using Docker image id %q", imageID)
278 runner.CrunchLog.Print("Loading Docker image from keep")
279 err = runner.executor.LoadImage(imageID, imageTarballPath, runner.Container, runner.ArvMountPoint,
280 runner.containerClient)
288 func (runner *ContainerRunner) ArvMountCmd(cmdline []string, token string) (c *exec.Cmd, err error) {
289 c = exec.Command(cmdline[0], cmdline[1:]...)
291 // Copy our environment, but override ARVADOS_API_TOKEN with
292 // the container auth token.
294 for _, s := range os.Environ() {
295 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
296 c.Env = append(c.Env, s)
299 c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
301 w, err := runner.NewLogWriter("arv-mount")
305 runner.arvMountLog = NewThrottledLogger(w)
306 scanner := logScanner{
309 "Block not found error",
310 "Unhandled exception during FUSE operation",
312 ReportFunc: runner.reportArvMountWarning,
314 c.Stdout = runner.arvMountLog
315 c.Stderr = io.MultiWriter(runner.arvMountLog, os.Stderr, &scanner)
317 runner.CrunchLog.Printf("Running %v", c.Args)
324 statReadme := make(chan bool)
325 runner.ArvMountExit = make(chan error)
330 time.Sleep(100 * time.Millisecond)
331 _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
343 runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
345 runner.ArvMountExit <- mnterr
346 close(runner.ArvMountExit)
352 case err := <-runner.ArvMountExit:
353 runner.ArvMount = nil
361 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
362 if runner.ArvMountPoint == "" {
363 runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)
368 func copyfile(src string, dst string) (err error) {
369 srcfile, err := os.Open(src)
374 os.MkdirAll(path.Dir(dst), 0777)
376 dstfile, err := os.Create(dst)
380 _, err = io.Copy(dstfile, srcfile)
385 err = srcfile.Close()
386 err2 := dstfile.Close()
399 func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
400 bindmounts := map[string]bindmount{}
401 err := runner.SetupArvMountPoint("keep")
403 return nil, fmt.Errorf("While creating keep mount temp dir: %v", err)
406 token, err := runner.ContainerToken()
408 return nil, fmt.Errorf("could not get container token: %s", err)
410 runner.CrunchLog.Printf("container token %q", token)
414 arvMountCmd := []string{
418 "--storage-classes", strings.Join(runner.Container.OutputStorageClasses, ","),
419 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
421 if runner.executor.Runtime() == "docker" {
422 arvMountCmd = append(arvMountCmd, "--allow-other")
425 if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
426 arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
429 collectionPaths := []string{}
430 needCertMount := true
431 type copyFile struct {
435 var copyFiles []copyFile
438 for bind := range runner.Container.Mounts {
439 binds = append(binds, bind)
441 for bind := range runner.SecretMounts {
442 if _, ok := runner.Container.Mounts[bind]; ok {
443 return nil, fmt.Errorf("secret mount %q conflicts with regular mount", bind)
445 if runner.SecretMounts[bind].Kind != "json" &&
446 runner.SecretMounts[bind].Kind != "text" {
447 return nil, fmt.Errorf("secret mount %q type is %q but only 'json' and 'text' are permitted",
448 bind, runner.SecretMounts[bind].Kind)
450 binds = append(binds, bind)
454 for _, bind := range binds {
455 mnt, ok := runner.Container.Mounts[bind]
457 mnt = runner.SecretMounts[bind]
459 if bind == "stdout" || bind == "stderr" {
460 // Is it a "file" mount kind?
461 if mnt.Kind != "file" {
462 return nil, fmt.Errorf("unsupported mount kind '%s' for %s: only 'file' is supported", mnt.Kind, bind)
465 // Does path start with OutputPath?
466 prefix := runner.Container.OutputPath
467 if !strings.HasSuffix(prefix, "/") {
470 if !strings.HasPrefix(mnt.Path, prefix) {
471 return nil, fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
476 // Is it a "collection" mount kind?
477 if mnt.Kind != "collection" && mnt.Kind != "json" {
478 return nil, fmt.Errorf("unsupported mount kind '%s' for stdin: only 'collection' and 'json' are supported", mnt.Kind)
482 if bind == "/etc/arvados/ca-certificates.crt" {
483 needCertMount = false
486 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
487 if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
488 return nil, fmt.Errorf("only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
493 case mnt.Kind == "collection" && bind != "stdin":
495 if mnt.UUID != "" && mnt.PortableDataHash != "" {
496 return nil, fmt.Errorf("cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
500 return nil, fmt.Errorf("writing to existing collections currently not permitted")
503 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
504 } else if mnt.PortableDataHash != "" {
505 if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
506 return nil, fmt.Errorf("can never write to a collection specified by portable data hash")
508 idx := strings.Index(mnt.PortableDataHash, "/")
510 mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
511 mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
512 runner.Container.Mounts[bind] = mnt
514 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
515 if mnt.Path != "" && mnt.Path != "." {
516 if strings.HasPrefix(mnt.Path, "./") {
517 mnt.Path = mnt.Path[2:]
518 } else if strings.HasPrefix(mnt.Path, "/") {
519 mnt.Path = mnt.Path[1:]
521 src += "/" + mnt.Path
524 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
525 arvMountCmd = append(arvMountCmd, "--mount-tmp")
526 arvMountCmd = append(arvMountCmd, fmt.Sprintf("tmp%d", tmpcount))
530 if bind == runner.Container.OutputPath {
531 runner.HostOutputDir = src
532 bindmounts[bind] = bindmount{HostPath: src}
533 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
534 copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
536 bindmounts[bind] = bindmount{HostPath: src}
539 bindmounts[bind] = bindmount{HostPath: src, ReadOnly: true}
541 collectionPaths = append(collectionPaths, src)
543 case mnt.Kind == "tmp":
545 tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
547 return nil, fmt.Errorf("while creating mount temp dir: %v", err)
549 st, staterr := os.Stat(tmpdir)
551 return nil, fmt.Errorf("while Stat on temp dir: %v", staterr)
553 err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
555 return nil, fmt.Errorf("while Chmod temp dir: %v", err)
557 bindmounts[bind] = bindmount{HostPath: tmpdir}
558 if bind == runner.Container.OutputPath {
559 runner.HostOutputDir = tmpdir
562 case mnt.Kind == "json" || mnt.Kind == "text":
564 if mnt.Kind == "json" {
565 filedata, err = json.Marshal(mnt.Content)
567 return nil, fmt.Errorf("encoding json data: %v", err)
570 text, ok := mnt.Content.(string)
572 return nil, fmt.Errorf("content for mount %q must be a string", bind)
574 filedata = []byte(text)
577 tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
579 return nil, fmt.Errorf("creating temp dir: %v", err)
581 tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
582 err = ioutil.WriteFile(tmpfn, filedata, 0444)
584 return nil, fmt.Errorf("writing temp file: %v", err)
586 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
587 copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
589 bindmounts[bind] = bindmount{HostPath: tmpfn, ReadOnly: true}
592 case mnt.Kind == "git_tree":
593 tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
595 return nil, fmt.Errorf("creating temp dir: %v", err)
597 err = gitMount(mnt).extractTree(runner.ContainerArvClient, tmpdir, token)
601 bindmounts[bind] = bindmount{HostPath: tmpdir, ReadOnly: true}
605 if runner.HostOutputDir == "" {
606 return nil, fmt.Errorf("output path does not correspond to a writable mount point")
609 if needCertMount && runner.Container.RuntimeConstraints.API {
610 for _, certfile := range arvadosclient.CertFiles {
611 _, err := os.Stat(certfile)
613 bindmounts["/etc/arvados/ca-certificates.crt"] = bindmount{HostPath: certfile, ReadOnly: true}
620 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
622 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
624 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_uuid")
625 arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
627 runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
629 return nil, fmt.Errorf("while trying to start arv-mount: %v", err)
632 for _, p := range collectionPaths {
635 return nil, fmt.Errorf("while checking that input files exist: %v", err)
639 for _, cp := range copyFiles {
640 st, err := os.Stat(cp.src)
642 return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
645 err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
649 target := path.Join(cp.bind, walkpath[len(cp.src):])
650 if walkinfo.Mode().IsRegular() {
651 copyerr := copyfile(walkpath, target)
655 return os.Chmod(target, walkinfo.Mode()|0777)
656 } else if walkinfo.Mode().IsDir() {
657 mkerr := os.MkdirAll(target, 0777)
661 return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
663 return fmt.Errorf("source %q is not a regular file or directory", cp.src)
666 } else if st.Mode().IsRegular() {
667 err = copyfile(cp.src, cp.bind)
669 err = os.Chmod(cp.bind, st.Mode()|0777)
673 return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
677 return bindmounts, nil
680 func (runner *ContainerRunner) stopHoststat() error {
681 if runner.hoststatReporter == nil {
684 runner.hoststatReporter.Stop()
685 err := runner.hoststatLogger.Close()
687 return fmt.Errorf("error closing hoststat logs: %v", err)
692 func (runner *ContainerRunner) startHoststat() error {
693 w, err := runner.NewLogWriter("hoststat")
697 runner.hoststatLogger = NewThrottledLogger(w)
698 runner.hoststatReporter = &crunchstat.Reporter{
699 Logger: log.New(runner.hoststatLogger, "", 0),
700 CgroupRoot: runner.cgroupRoot,
701 PollPeriod: runner.statInterval,
703 runner.hoststatReporter.Start()
707 func (runner *ContainerRunner) startCrunchstat() error {
708 w, err := runner.NewLogWriter("crunchstat")
712 runner.statLogger = NewThrottledLogger(w)
713 runner.statReporter = &crunchstat.Reporter{
714 CID: runner.executor.CgroupID(),
715 Logger: log.New(runner.statLogger, "", 0),
716 CgroupParent: runner.expectCgroupParent,
717 CgroupRoot: runner.cgroupRoot,
718 PollPeriod: runner.statInterval,
719 TempDir: runner.parentTemp,
721 runner.statReporter.Start()
725 type infoCommand struct {
730 // LogHostInfo logs info about the current host, for debugging and
731 // accounting purposes. Although it's logged as "node-info", this is
732 // about the environment where crunch-run is actually running, which
733 // might differ from what's described in the node record (see
735 func (runner *ContainerRunner) LogHostInfo() (err error) {
736 w, err := runner.NewLogWriter("node-info")
741 commands := []infoCommand{
743 label: "Host Information",
744 cmd: []string{"uname", "-a"},
747 label: "CPU Information",
748 cmd: []string{"cat", "/proc/cpuinfo"},
751 label: "Memory Information",
752 cmd: []string{"cat", "/proc/meminfo"},
756 cmd: []string{"df", "-m", "/", os.TempDir()},
759 label: "Disk INodes",
760 cmd: []string{"df", "-i", "/", os.TempDir()},
764 // Run commands with informational output to be logged.
765 for _, command := range commands {
766 fmt.Fprintln(w, command.label)
767 cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
770 if err := cmd.Run(); err != nil {
771 err = fmt.Errorf("While running command %q: %v", command.cmd, err)
780 return fmt.Errorf("While closing node-info logs: %v", err)
785 // LogContainerRecord gets and saves the raw JSON container record from the API server
786 func (runner *ContainerRunner) LogContainerRecord() error {
787 logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
788 if !logged && err == nil {
789 err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
794 // LogNodeRecord logs the current host's InstanceType config entry (or
795 // the arvados#node record, if running via crunch-dispatch-slurm).
796 func (runner *ContainerRunner) LogNodeRecord() error {
797 if it := os.Getenv("InstanceType"); it != "" {
798 // Dispatched via arvados-dispatch-cloud. Save
799 // InstanceType config fragment received from
800 // dispatcher on stdin.
801 w, err := runner.LogCollection.OpenFile("node.json", os.O_CREATE|os.O_WRONLY, 0666)
806 _, err = io.WriteString(w, it)
812 // Dispatched via crunch-dispatch-slurm. Look up
813 // apiserver's node record corresponding to
815 hostname := os.Getenv("SLURMD_NODENAME")
817 hostname, _ = os.Hostname()
819 _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
820 // The "info" field has admin-only info when
821 // obtained with a privileged token, and
822 // should not be logged.
823 node, ok := resp.(map[string]interface{})
831 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
832 writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
837 ArvClient: runner.DispatcherArvClient,
838 UUID: runner.Container.UUID,
839 loggingStream: label,
843 reader, err := runner.DispatcherArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
845 return false, fmt.Errorf("error getting %s record: %v", label, err)
849 dec := json.NewDecoder(reader)
851 var resp map[string]interface{}
852 if err = dec.Decode(&resp); err != nil {
853 return false, fmt.Errorf("error decoding %s list response: %v", label, err)
855 items, ok := resp["items"].([]interface{})
857 return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
858 } else if len(items) < 1 {
864 // Re-encode it using indentation to improve readability
865 enc := json.NewEncoder(w)
866 enc.SetIndent("", " ")
867 if err = enc.Encode(items[0]); err != nil {
868 return false, fmt.Errorf("error logging %s record: %v", label, err)
872 return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
877 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
878 stdoutPath := mntPath[len(runner.Container.OutputPath):]
879 index := strings.LastIndex(stdoutPath, "/")
881 subdirs := stdoutPath[:index]
883 st, err := os.Stat(runner.HostOutputDir)
885 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
887 stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
888 err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
890 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
894 stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
896 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
899 return stdoutFile, nil
902 // CreateContainer creates the docker container.
903 func (runner *ContainerRunner) CreateContainer(imageID string, bindmounts map[string]bindmount) error {
904 var stdin io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
905 if mnt, ok := runner.Container.Mounts["stdin"]; ok {
912 collID = mnt.PortableDataHash
914 path := runner.ArvMountPoint + "/by_id/" + collID + "/" + mnt.Path
915 f, err := os.Open(path)
921 j, err := json.Marshal(mnt.Content)
923 return fmt.Errorf("error encoding stdin json data: %v", err)
925 stdin = ioutil.NopCloser(bytes.NewReader(j))
927 return fmt.Errorf("stdin mount has unsupported kind %q", mnt.Kind)
931 var stdout, stderr io.WriteCloser
932 if mnt, ok := runner.Container.Mounts["stdout"]; ok {
933 f, err := runner.getStdoutFile(mnt.Path)
938 } else if w, err := runner.NewLogWriter("stdout"); err != nil {
941 stdout = NewThrottledLogger(w)
944 if mnt, ok := runner.Container.Mounts["stderr"]; ok {
945 f, err := runner.getStdoutFile(mnt.Path)
950 } else if w, err := runner.NewLogWriter("stderr"); err != nil {
953 stderr = NewThrottledLogger(w)
956 env := runner.Container.Environment
957 enableNetwork := runner.enableNetwork == "always"
958 if runner.Container.RuntimeConstraints.API {
960 tok, err := runner.ContainerToken()
964 env = map[string]string{}
965 for k, v := range runner.Container.Environment {
968 env["ARVADOS_API_TOKEN"] = tok
969 env["ARVADOS_API_HOST"] = os.Getenv("ARVADOS_API_HOST")
970 env["ARVADOS_API_HOST_INSECURE"] = os.Getenv("ARVADOS_API_HOST_INSECURE")
972 workdir := runner.Container.Cwd
974 // both "" and "." mean default
977 ram := runner.Container.RuntimeConstraints.RAM
978 if !runner.enableMemoryLimit {
981 runner.executorStdin = stdin
982 runner.executorStdout = stdout
983 runner.executorStderr = stderr
984 return runner.executor.Create(containerSpec{
986 VCPUs: runner.Container.RuntimeConstraints.VCPUs,
990 BindMounts: bindmounts,
991 Command: runner.Container.Command,
992 EnableNetwork: enableNetwork,
993 NetworkMode: runner.networkMode,
994 CgroupParent: runner.setCgroupParent,
1001 // StartContainer starts the docker container created by CreateContainer.
1002 func (runner *ContainerRunner) StartContainer() error {
1003 runner.CrunchLog.Printf("Starting container")
1004 runner.cStateLock.Lock()
1005 defer runner.cStateLock.Unlock()
1006 if runner.cCancelled {
1009 err := runner.executor.Start()
1012 if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
1013 advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
1015 return fmt.Errorf("could not start container: %v%s", err, advice)
1020 // WaitFinish waits for the container to terminate, capture the exit code, and
1021 // close the stdout/stderr logging.
1022 func (runner *ContainerRunner) WaitFinish() error {
1023 runner.CrunchLog.Print("Waiting for container to finish")
1024 var timeout <-chan time.Time
1025 if s := runner.Container.SchedulingParameters.MaxRunTime; s > 0 {
1026 timeout = time.After(time.Duration(s) * time.Second)
1028 ctx, cancel := context.WithCancel(context.Background())
1033 runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
1035 case <-runner.ArvMountExit:
1036 runner.CrunchLog.Printf("arv-mount exited while container is still running. Stopping container.")
1041 exitcode, err := runner.executor.Wait(ctx)
1043 runner.checkBrokenNode(err)
1046 runner.ExitCode = &exitcode
1049 if err = runner.executorStdin.Close(); err != nil {
1050 err = fmt.Errorf("error closing container stdin: %s", err)
1051 runner.CrunchLog.Printf("%s", err)
1054 if err = runner.executorStdout.Close(); err != nil {
1055 err = fmt.Errorf("error closing container stdout: %s", err)
1056 runner.CrunchLog.Printf("%s", err)
1057 if returnErr == nil {
1061 if err = runner.executorStderr.Close(); err != nil {
1062 err = fmt.Errorf("error closing container stderr: %s", err)
1063 runner.CrunchLog.Printf("%s", err)
1064 if returnErr == nil {
1069 if runner.statReporter != nil {
1070 runner.statReporter.Stop()
1071 err = runner.statLogger.Close()
1073 runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
1079 func (runner *ContainerRunner) updateLogs() {
1080 ticker := time.NewTicker(crunchLogUpdatePeriod / 360)
1083 sigusr1 := make(chan os.Signal, 1)
1084 signal.Notify(sigusr1, syscall.SIGUSR1)
1085 defer signal.Stop(sigusr1)
1087 saveAtTime := time.Now().Add(crunchLogUpdatePeriod)
1088 saveAtSize := crunchLogUpdateSize
1094 saveAtTime = time.Now()
1096 runner.logMtx.Lock()
1097 done := runner.LogsPDH != nil
1098 runner.logMtx.Unlock()
1102 size := runner.LogCollection.Size()
1103 if size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) {
1106 saveAtTime = time.Now().Add(crunchLogUpdatePeriod)
1107 saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize
1108 saved, err := runner.saveLogCollection(false)
1110 runner.CrunchLog.Printf("error updating log collection: %s", err)
1114 var updated arvados.Container
1115 err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1116 "container": arvadosclient.Dict{"log": saved.PortableDataHash},
1119 runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
1127 func (runner *ContainerRunner) reportArvMountWarning(pattern, text string) {
1128 var updated arvados.Container
1129 err := runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1130 "container": arvadosclient.Dict{
1131 "runtime_status": arvadosclient.Dict{
1132 "warning": "arv-mount: " + pattern,
1133 "warningDetail": text,
1138 runner.CrunchLog.Printf("error updating container runtime_status: %s", err)
1142 // CaptureOutput saves data from the container's output directory if
1143 // needed, and updates the container output accordingly.
1144 func (runner *ContainerRunner) CaptureOutput(bindmounts map[string]bindmount) error {
1145 if runner.Container.RuntimeConstraints.API {
1146 // Output may have been set directly by the container, so
1147 // refresh the container record to check.
1148 err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID,
1149 nil, &runner.Container)
1153 if runner.Container.Output != "" {
1154 // Container output is already set.
1155 runner.OutputPDH = &runner.Container.Output
1160 txt, err := (&copier{
1161 client: runner.containerClient,
1162 arvClient: runner.ContainerArvClient,
1163 keepClient: runner.ContainerKeepClient,
1164 hostOutputDir: runner.HostOutputDir,
1165 ctrOutputDir: runner.Container.OutputPath,
1166 bindmounts: bindmounts,
1167 mounts: runner.Container.Mounts,
1168 secretMounts: runner.SecretMounts,
1169 logger: runner.CrunchLog,
1174 if n := len(regexp.MustCompile(` [0-9a-f]+\+\S*\+R`).FindAllStringIndex(txt, -1)); n > 0 {
1175 runner.CrunchLog.Printf("Copying %d data blocks from remote input collections...", n)
1176 fs, err := (&arvados.Collection{ManifestText: txt}).FileSystem(runner.containerClient, runner.ContainerKeepClient)
1180 txt, err = fs.MarshalManifest(".")
1185 var resp arvados.Collection
1186 err = runner.ContainerArvClient.Create("collections", arvadosclient.Dict{
1187 "ensure_unique_name": true,
1188 "collection": arvadosclient.Dict{
1190 "name": "output for " + runner.Container.UUID,
1191 "manifest_text": txt,
1195 return fmt.Errorf("error creating output collection: %v", err)
1197 runner.OutputPDH = &resp.PortableDataHash
1201 func (runner *ContainerRunner) CleanupDirs() {
1202 if runner.ArvMount != nil {
1204 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1205 umount.Stdout = runner.CrunchLog
1206 umount.Stderr = runner.CrunchLog
1207 runner.CrunchLog.Printf("Running %v", umount.Args)
1208 umnterr := umount.Start()
1211 runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1212 runner.ArvMount.Process.Kill()
1214 // If arv-mount --unmount gets stuck for any reason, we
1215 // don't want to wait for it forever. Do Wait() in a goroutine
1216 // so it doesn't block crunch-run.
1217 umountExit := make(chan error)
1219 mnterr := umount.Wait()
1221 runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1223 umountExit <- mnterr
1226 for again := true; again; {
1232 case <-runner.ArvMountExit:
1234 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1235 runner.CrunchLog.Printf("Timed out waiting for unmount")
1237 umount.Process.Kill()
1239 runner.ArvMount.Process.Kill()
1243 runner.ArvMount = nil
1246 if runner.ArvMountPoint != "" {
1247 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1248 runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1250 runner.ArvMountPoint = ""
1253 if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {
1254 runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr)
1258 // CommitLogs posts the collection containing the final container logs.
1259 func (runner *ContainerRunner) CommitLogs() error {
1261 // Hold cStateLock to prevent races on CrunchLog (e.g., stop()).
1262 runner.cStateLock.Lock()
1263 defer runner.cStateLock.Unlock()
1265 runner.CrunchLog.Print(runner.finalState)
1267 if runner.arvMountLog != nil {
1268 runner.arvMountLog.Close()
1270 runner.CrunchLog.Close()
1272 // Closing CrunchLog above allows them to be committed to Keep at this
1273 // point, but re-open crunch log with ArvClient in case there are any
1274 // other further errors (such as failing to write the log to Keep!)
1275 // while shutting down
1276 runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{
1277 ArvClient: runner.DispatcherArvClient,
1278 UUID: runner.Container.UUID,
1279 loggingStream: "crunch-run",
1282 runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
1285 if runner.keepstoreLogger != nil {
1286 // Flush any buffered logs from our local keepstore
1287 // process. Discard anything logged after this point
1288 // -- it won't end up in the log collection, so
1289 // there's no point writing it to the collectionfs.
1290 runner.keepstoreLogbuf.SetWriter(io.Discard)
1291 runner.keepstoreLogger.Close()
1292 runner.keepstoreLogger = nil
1295 if runner.LogsPDH != nil {
1296 // If we have already assigned something to LogsPDH,
1297 // we must be closing the re-opened log, which won't
1298 // end up getting attached to the container record and
1299 // therefore doesn't need to be saved as a collection
1300 // -- it exists only to send logs to other channels.
1304 saved, err := runner.saveLogCollection(true)
1306 return fmt.Errorf("error saving log collection: %s", err)
1308 runner.logMtx.Lock()
1309 defer runner.logMtx.Unlock()
1310 runner.LogsPDH = &saved.PortableDataHash
1314 func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
1315 runner.logMtx.Lock()
1316 defer runner.logMtx.Unlock()
1317 if runner.LogsPDH != nil {
1318 // Already finalized.
1321 updates := arvadosclient.Dict{
1322 "name": "logs for " + runner.Container.UUID,
1324 mt, err1 := runner.LogCollection.MarshalManifest(".")
1326 // Only send updated manifest text if there was no
1328 updates["manifest_text"] = mt
1331 // Even if flushing the manifest had an error, we still want
1332 // to update the log record, if possible, to push the trash_at
1333 // and delete_at times into the future. Details on bug
1336 updates["is_trashed"] = true
1338 exp := time.Now().Add(crunchLogUpdatePeriod * 24)
1339 updates["trash_at"] = exp
1340 updates["delete_at"] = exp
1342 reqBody := arvadosclient.Dict{"collection": updates}
1344 if runner.logUUID == "" {
1345 reqBody["ensure_unique_name"] = true
1346 err2 = runner.DispatcherArvClient.Create("collections", reqBody, &response)
1348 err2 = runner.DispatcherArvClient.Update("collections", runner.logUUID, reqBody, &response)
1351 runner.logUUID = response.UUID
1354 if err1 != nil || err2 != nil {
1355 err = fmt.Errorf("error recording logs: %q, %q", err1, err2)
1360 // UpdateContainerRunning updates the container state to "Running"
1361 func (runner *ContainerRunner) UpdateContainerRunning() error {
1362 runner.cStateLock.Lock()
1363 defer runner.cStateLock.Unlock()
1364 if runner.cCancelled {
1367 return runner.DispatcherArvClient.Update("containers", runner.Container.UUID,
1368 arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running", "gateway_address": runner.gateway.Address}}, nil)
1371 // ContainerToken returns the api_token the container (and any
1372 // arv-mount processes) are allowed to use.
1373 func (runner *ContainerRunner) ContainerToken() (string, error) {
1374 if runner.token != "" {
1375 return runner.token, nil
1378 var auth arvados.APIClientAuthorization
1379 err := runner.DispatcherArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1383 runner.token = fmt.Sprintf("v2/%s/%s/%s", auth.UUID, auth.APIToken, runner.Container.UUID)
1384 return runner.token, nil
1387 // UpdateContainerFinal updates the container record state on API
1388 // server to "Complete" or "Cancelled"
1389 func (runner *ContainerRunner) UpdateContainerFinal() error {
1390 update := arvadosclient.Dict{}
1391 update["state"] = runner.finalState
1392 if runner.LogsPDH != nil {
1393 update["log"] = *runner.LogsPDH
1395 if runner.finalState == "Complete" {
1396 if runner.ExitCode != nil {
1397 update["exit_code"] = *runner.ExitCode
1399 if runner.OutputPDH != nil {
1400 update["output"] = *runner.OutputPDH
1403 return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
1406 // IsCancelled returns the value of Cancelled, with goroutine safety.
1407 func (runner *ContainerRunner) IsCancelled() bool {
1408 runner.cStateLock.Lock()
1409 defer runner.cStateLock.Unlock()
1410 return runner.cCancelled
1413 // NewArvLogWriter creates an ArvLogWriter
1414 func (runner *ContainerRunner) NewArvLogWriter(name string) (io.WriteCloser, error) {
1415 writer, err := runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
1419 return &ArvLogWriter{
1420 ArvClient: runner.DispatcherArvClient,
1421 UUID: runner.Container.UUID,
1422 loggingStream: name,
1423 writeCloser: writer,
1427 // Run the full container lifecycle.
1428 func (runner *ContainerRunner) Run() (err error) {
1429 runner.CrunchLog.Printf("crunch-run %s started", cmd.Version.String())
1430 runner.CrunchLog.Printf("Executing container '%s' using %s runtime", runner.Container.UUID, runner.executor.Runtime())
1432 hostname, hosterr := os.Hostname()
1434 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1436 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1439 runner.finalState = "Queued"
1442 runner.CleanupDirs()
1444 runner.CrunchLog.Printf("crunch-run finished")
1445 runner.CrunchLog.Close()
1448 err = runner.fetchContainerRecord()
1452 if runner.Container.State != "Locked" {
1453 return fmt.Errorf("dispatch error detected: container %q has state %q", runner.Container.UUID, runner.Container.State)
1456 var bindmounts map[string]bindmount
1458 // checkErr prints e (unless it's nil) and sets err to
1459 // e (unless err is already non-nil). Thus, if err
1460 // hasn't already been assigned when Run() returns,
1461 // this cleanup func will cause Run() to return the
1462 // first non-nil error that is passed to checkErr().
1463 checkErr := func(errorIn string, e error) {
1467 runner.CrunchLog.Printf("error in %s: %v", errorIn, e)
1471 if runner.finalState == "Complete" {
1472 // There was an error in the finalization.
1473 runner.finalState = "Cancelled"
1477 // Log the error encountered in Run(), if any
1478 checkErr("Run", err)
1480 if runner.finalState == "Queued" {
1481 runner.UpdateContainerFinal()
1485 if runner.IsCancelled() {
1486 runner.finalState = "Cancelled"
1487 // but don't return yet -- we still want to
1488 // capture partial output and write logs
1491 if bindmounts != nil {
1492 checkErr("CaptureOutput", runner.CaptureOutput(bindmounts))
1494 checkErr("stopHoststat", runner.stopHoststat())
1495 checkErr("CommitLogs", runner.CommitLogs())
1496 runner.CleanupDirs()
1497 checkErr("UpdateContainerFinal", runner.UpdateContainerFinal())
1500 runner.setupSignals()
1501 err = runner.startHoststat()
1506 // set up FUSE mount and binds
1507 bindmounts, err = runner.SetupMounts()
1509 runner.finalState = "Cancelled"
1510 err = fmt.Errorf("While setting up mounts: %v", err)
1514 // check for and/or load image
1515 imageID, err := runner.LoadImage()
1517 if !runner.checkBrokenNode(err) {
1518 // Failed to load image but not due to a "broken node"
1519 // condition, probably user error.
1520 runner.finalState = "Cancelled"
1522 err = fmt.Errorf("While loading container image: %v", err)
1526 err = runner.CreateContainer(imageID, bindmounts)
1530 err = runner.LogHostInfo()
1534 err = runner.LogNodeRecord()
1538 err = runner.LogContainerRecord()
1543 if runner.IsCancelled() {
1547 err = runner.UpdateContainerRunning()
1551 runner.finalState = "Cancelled"
1553 err = runner.startCrunchstat()
1558 err = runner.StartContainer()
1560 runner.checkBrokenNode(err)
1564 err = runner.WaitFinish()
1565 if err == nil && !runner.IsCancelled() {
1566 runner.finalState = "Complete"
1571 // Fetch the current container record (uuid = runner.Container.UUID)
1572 // into runner.Container.
1573 func (runner *ContainerRunner) fetchContainerRecord() error {
1574 reader, err := runner.DispatcherArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1576 return fmt.Errorf("error fetching container record: %v", err)
1578 defer reader.Close()
1580 dec := json.NewDecoder(reader)
1582 err = dec.Decode(&runner.Container)
1584 return fmt.Errorf("error decoding container record: %v", err)
1588 SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
1591 containerToken, err := runner.ContainerToken()
1593 return fmt.Errorf("error getting container token: %v", err)
1596 runner.ContainerArvClient, runner.ContainerKeepClient,
1597 runner.containerClient, err = runner.MkArvClient(containerToken)
1599 return fmt.Errorf("error creating container API client: %v", err)
1602 runner.ContainerKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)
1603 runner.DispatcherKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)
1605 err = runner.ContainerArvClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
1607 if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
1608 return fmt.Errorf("error fetching secret_mounts: %v", err)
1610 // ok && apierr.HttpStatusCode == 404, which means
1611 // secret_mounts isn't supported by this API server.
1613 runner.SecretMounts = sm.SecretMounts
1618 // NewContainerRunner creates a new container runner.
1619 func NewContainerRunner(dispatcherClient *arvados.Client,
1620 dispatcherArvClient IArvadosClient,
1621 dispatcherKeepClient IKeepClient,
1622 containerUUID string) (*ContainerRunner, error) {
1624 cr := &ContainerRunner{
1625 dispatcherClient: dispatcherClient,
1626 DispatcherArvClient: dispatcherArvClient,
1627 DispatcherKeepClient: dispatcherKeepClient,
1629 cr.NewLogWriter = cr.NewArvLogWriter
1630 cr.RunArvMount = cr.ArvMountCmd
1631 cr.MkTempDir = ioutil.TempDir
1632 cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
1633 cl, err := arvadosclient.MakeArvadosClient()
1635 return nil, nil, nil, err
1638 kc, err := keepclient.MakeKeepClient(cl)
1640 return nil, nil, nil, err
1642 c2 := arvados.NewClientFromEnv()
1643 c2.AuthToken = token
1644 return cl, kc, c2, nil
1647 cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.dispatcherClient, cr.DispatcherKeepClient)
1651 cr.Container.UUID = containerUUID
1652 w, err := cr.NewLogWriter("crunch-run")
1656 cr.CrunchLog = NewThrottledLogger(w)
1657 cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
1659 loadLogThrottleParams(dispatcherArvClient)
1665 func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
1666 log := log.New(stderr, "", 0)
1667 flags := flag.NewFlagSet(prog, flag.ContinueOnError)
1668 statInterval := flags.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1669 cgroupRoot := flags.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
1670 cgroupParent := flags.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
1671 cgroupParentSubsystem := flags.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
1672 caCertsPath := flags.String("ca-certs", "", "Path to TLS root certificates")
1673 detach := flags.Bool("detach", false, "Detach from parent process and run in the background")
1674 stdinConfig := flags.Bool("stdin-config", false, "Load config and environment variables from JSON message on stdin")
1675 sleep := flags.Duration("sleep", 0, "Delay before starting (testing use only)")
1676 kill := flags.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID")
1677 list := flags.Bool("list", false, "List UUIDs of existing crunch-run processes")
1678 enableMemoryLimit := flags.Bool("enable-memory-limit", true, "tell container runtime to limit container's memory usage")
1679 enableNetwork := flags.String("container-enable-networking", "default", "enable networking \"always\" (for all containers) or \"default\" (for containers that request it)")
1680 networkMode := flags.String("container-network-mode", "default", `Docker network mode for container (use any argument valid for docker --net)`)
1681 memprofile := flags.String("memprofile", "", "write memory profile to `file` after running container")
1682 runtimeEngine := flags.String("runtime-engine", "docker", "container runtime: docker or singularity")
1683 flags.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.")
1685 ignoreDetachFlag := false
1686 if len(args) > 0 && args[0] == "-no-detach" {
1687 // This process was invoked by a parent process, which
1688 // has passed along its own arguments, including
1689 // -detach, after the leading -no-detach flag. Strip
1690 // the leading -no-detach flag (it's not recognized by
1691 // flags.Parse()) and ignore the -detach flag that
1694 ignoreDetachFlag = true
1697 if err := flags.Parse(args); err == flag.ErrHelp {
1699 } else if err != nil {
1704 containerUUID := flags.Arg(0)
1707 case *detach && !ignoreDetachFlag:
1708 return Detach(containerUUID, prog, args, os.Stdin, os.Stdout, os.Stderr)
1710 return KillProcess(containerUUID, syscall.Signal(*kill), os.Stdout, os.Stderr)
1712 return ListProcesses(os.Stdout, os.Stderr)
1715 if len(containerUUID) != 27 {
1716 log.Printf("usage: %s [options] UUID", prog)
1722 err := json.NewDecoder(stdin).Decode(&conf)
1724 log.Printf("decode stdin: %s", err)
1727 for k, v := range conf.Env {
1728 err = os.Setenv(k, v)
1730 log.Printf("setenv(%q): %s", k, err)
1734 if conf.Cluster != nil {
1735 // ClusterID is missing from the JSON
1736 // representation, but we need it to generate
1737 // a valid config file for keepstore, so we
1738 // fill it using the container UUID prefix.
1739 conf.Cluster.ClusterID = containerUUID[:5]
1743 log.Printf("crunch-run %s started", cmd.Version.String())
1746 if *caCertsPath != "" {
1747 arvadosclient.CertFiles = []string{*caCertsPath}
1750 var keepstoreLogbuf bufThenWrite
1751 keepstore, err := startLocalKeepstore(conf, io.MultiWriter(&keepstoreLogbuf, stderr))
1756 if keepstore != nil {
1757 defer keepstore.Process.Kill()
1760 api, err := arvadosclient.MakeArvadosClient()
1762 log.Printf("%s: %v", containerUUID, err)
1767 kc, err := keepclient.MakeKeepClient(api)
1769 log.Printf("%s: %v", containerUUID, err)
1772 kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
1775 cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, containerUUID)
1781 if keepstore == nil {
1782 // Log explanation (if any) for why we're not running
1783 // a local keepstore.
1784 var buf bytes.Buffer
1785 keepstoreLogbuf.SetWriter(&buf)
1787 cr.CrunchLog.Printf("%s", strings.TrimSpace(buf.String()))
1789 } else if logWhat := conf.Cluster.Containers.LocalKeepLogsToContainerLog; logWhat == "none" {
1790 cr.CrunchLog.Printf("using local keepstore process (pid %d) at %s", keepstore.Process.Pid, os.Getenv("ARVADOS_KEEP_SERVICES"))
1791 keepstoreLogbuf.SetWriter(io.Discard)
1793 cr.CrunchLog.Printf("using local keepstore process (pid %d) at %s, writing logs to keepstore.txt in log collection", keepstore.Process.Pid, os.Getenv("ARVADOS_KEEP_SERVICES"))
1794 logwriter, err := cr.NewLogWriter("keepstore")
1799 cr.keepstoreLogger = NewThrottledLogger(logwriter)
1801 var writer io.WriteCloser = cr.keepstoreLogger
1802 if logWhat == "errors" {
1803 writer = &filterKeepstoreErrorsOnly{WriteCloser: writer}
1804 } else if logWhat != "all" {
1805 // should have been caught earlier by
1806 // dispatcher's config loader
1807 log.Printf("invalid value for Containers.LocalKeepLogsToContainerLog: %q", logWhat)
1810 err = keepstoreLogbuf.SetWriter(writer)
1815 cr.keepstoreLogbuf = &keepstoreLogbuf
1818 switch *runtimeEngine {
1820 cr.executor, err = newDockerExecutor(containerUUID, cr.CrunchLog.Printf, cr.containerWatchdogInterval)
1822 cr.executor, err = newSingularityExecutor(cr.CrunchLog.Printf)
1824 cr.CrunchLog.Printf("%s: unsupported RuntimeEngine %q", containerUUID, *runtimeEngine)
1825 cr.CrunchLog.Close()
1829 cr.CrunchLog.Printf("%s: %v", containerUUID, err)
1830 cr.checkBrokenNode(err)
1831 cr.CrunchLog.Close()
1834 defer cr.executor.Close()
1836 gwAuthSecret := os.Getenv("GatewayAuthSecret")
1837 os.Unsetenv("GatewayAuthSecret")
1838 if gwAuthSecret == "" {
1839 // not safe to run a gateway service without an auth
1841 cr.CrunchLog.Printf("Not starting a gateway server (GatewayAuthSecret was not provided by dispatcher)")
1842 } else if gwListen := os.Getenv("GatewayAddress"); gwListen == "" {
1843 // dispatcher did not tell us which external IP
1844 // address to advertise --> no gateway service
1845 cr.CrunchLog.Printf("Not starting a gateway server (GatewayAddress was not provided by dispatcher)")
1846 } else if de, ok := cr.executor.(*dockerExecutor); ok {
1847 cr.gateway = Gateway{
1849 AuthSecret: gwAuthSecret,
1850 ContainerUUID: containerUUID,
1851 DockerContainerID: &de.containerID,
1853 ContainerIPAddress: dockerContainerIPAddress(&de.containerID),
1855 err = cr.gateway.Start()
1857 log.Printf("error starting gateway server: %s", err)
1862 parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerUUID+".")
1864 log.Printf("%s: %v", containerUUID, tmperr)
1868 cr.parentTemp = parentTemp
1869 cr.statInterval = *statInterval
1870 cr.cgroupRoot = *cgroupRoot
1871 cr.expectCgroupParent = *cgroupParent
1872 cr.enableMemoryLimit = *enableMemoryLimit
1873 cr.enableNetwork = *enableNetwork
1874 cr.networkMode = *networkMode
1875 if *cgroupParentSubsystem != "" {
1876 p := findCgroup(*cgroupParentSubsystem)
1877 cr.setCgroupParent = p
1878 cr.expectCgroupParent = p
1883 if *memprofile != "" {
1884 f, err := os.Create(*memprofile)
1886 log.Printf("could not create memory profile: %s", err)
1888 runtime.GC() // get up-to-date statistics
1889 if err := pprof.WriteHeapProfile(f); err != nil {
1890 log.Printf("could not write memory profile: %s", err)
1892 closeerr := f.Close()
1893 if closeerr != nil {
1894 log.Printf("closing memprofile file: %s", err)
1899 log.Printf("%s: %v", containerUUID, runerr)
1905 func startLocalKeepstore(configData ConfigData, logbuf io.Writer) (*exec.Cmd, error) {
1906 if configData.Cluster == nil || configData.KeepBuffers < 1 {
1909 for uuid, vol := range configData.Cluster.Volumes {
1910 if len(vol.AccessViaHosts) > 0 {
1911 fmt.Fprintf(logbuf, "not starting a local keepstore process because a volume (%s) uses AccessViaHosts\n", uuid)
1914 if !vol.ReadOnly && vol.Replication < configData.Cluster.Collections.DefaultReplication {
1915 fmt.Fprintf(logbuf, "not starting a local keepstore process because a writable volume (%s) has replication less than Collections.DefaultReplication (%d < %d)\n", uuid, vol.Replication, configData.Cluster.Collections.DefaultReplication)
1920 // Rather than have an alternate way to tell keepstore how
1921 // many buffers to use when starting it this way, we just
1922 // modify the cluster configuration that we feed it on stdin.
1923 configData.Cluster.API.MaxKeepBlobBuffers = configData.KeepBuffers
1925 ln, err := net.Listen("tcp", "localhost:0")
1929 _, port, err := net.SplitHostPort(ln.Addr().String())
1935 url := "http://localhost:" + port
1937 fmt.Fprintf(logbuf, "starting keepstore on %s\n", url)
1939 var confJSON bytes.Buffer
1940 err = json.NewEncoder(&confJSON).Encode(arvados.Config{
1941 Clusters: map[string]arvados.Cluster{
1942 configData.Cluster.ClusterID: *configData.Cluster,
1948 cmd := exec.Command("/proc/self/exe", "keepstore", "-config=-")
1949 if target, err := os.Readlink(cmd.Path); err == nil && strings.HasSuffix(target, ".test") {
1950 // If we're a 'go test' process, running
1951 // /proc/self/exe would start the test suite in a
1952 // child process, which is not what we want.
1953 cmd.Path, _ = exec.LookPath("go")
1954 cmd.Args = append([]string{"go", "run", "../../cmd/arvados-server"}, cmd.Args[1:]...)
1955 cmd.Env = os.Environ()
1957 cmd.Stdin = &confJSON
1960 cmd.Env = append(cmd.Env,
1962 "ARVADOS_SERVICE_INTERNAL_URL="+url)
1965 return nil, fmt.Errorf("error starting keepstore process: %w", err)
1972 ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
1974 poll := time.NewTicker(time.Second / 10)
1976 client := http.Client{}
1978 testReq, err := http.NewRequestWithContext(ctx, "GET", url+"/_health/ping", nil)
1979 testReq.Header.Set("Authorization", "Bearer "+configData.Cluster.ManagementToken)
1983 resp, err := client.Do(testReq)
1986 if resp.StatusCode == http.StatusOK {
1991 return nil, fmt.Errorf("keepstore child process exited")
1993 if ctx.Err() != nil {
1994 return nil, fmt.Errorf("timed out waiting for new keepstore process to report healthy")
1997 os.Setenv("ARVADOS_KEEP_SERVICES", url)