1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
30 "git.arvados.org/arvados.git/lib/cmd"
31 "git.arvados.org/arvados.git/lib/crunchstat"
32 "git.arvados.org/arvados.git/sdk/go/arvados"
33 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
34 "git.arvados.org/arvados.git/sdk/go/keepclient"
35 "git.arvados.org/arvados.git/sdk/go/manifest"
36 "golang.org/x/net/context"
41 var Command = command{}
43 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
44 type IArvadosClient interface {
45 Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
46 Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
47 Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
48 Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
49 CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
50 Discovery(key string) (interface{}, error)
53 // ErrCancelled is the error returned when the container is cancelled.
54 var ErrCancelled = errors.New("Cancelled")
56 // IKeepClient is the minimal Keep API methods used by crunch-run.
57 type IKeepClient interface {
58 BlockWrite(context.Context, arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error)
59 ReadAt(locator string, p []byte, off int) (int, error)
60 ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
61 LocalLocator(locator string) (string, error)
63 SetStorageClasses(sc []string)
66 // NewLogWriter is a factory function to create a new log writer.
67 type NewLogWriter func(name string) (io.WriteCloser, error)
69 type RunArvMount func(args []string, tok string) (*exec.Cmd, error)
71 type MkTempDir func(string, string) (string, error)
73 type PsProcess interface {
74 CmdlineSlice() ([]string, error)
77 // ContainerRunner is the main stateful struct used for a single execution of a
79 type ContainerRunner struct {
80 executor containerExecutor
81 executorStdin io.Closer
82 executorStdout io.Closer
83 executorStderr io.Closer
85 // Dispatcher client is initialized with the Dispatcher token.
86 // This is a privileged token used to manage container status
89 // We have both dispatcherClient and DispatcherArvClient
90 // because there are two different incompatible Arvados Go
91 // SDKs and we have to use both (hopefully this gets fixed in
93 dispatcherClient *arvados.Client
94 DispatcherArvClient IArvadosClient
95 DispatcherKeepClient IKeepClient
97 // Container client is initialized with the Container token
98 // This token controls the permissions of the container, and
99 // must be used for operations such as reading collections.
101 // Same comment as above applies to
102 // containerClient/ContainerArvClient.
103 containerClient *arvados.Client
104 ContainerArvClient IArvadosClient
105 ContainerKeepClient IKeepClient
107 Container arvados.Container
110 NewLogWriter NewLogWriter
111 CrunchLog *ThrottledLogger
114 LogCollection arvados.CollectionFileSystem
116 RunArvMount RunArvMount
121 Volumes map[string]struct{}
123 SigChan chan os.Signal
124 ArvMountExit chan error
125 SecretMounts map[string]arvados.Mount
126 MkArvClient func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error)
130 statLogger io.WriteCloser
131 statReporter *crunchstat.Reporter
132 hoststatLogger io.WriteCloser
133 hoststatReporter *crunchstat.Reporter
134 statInterval time.Duration
136 // What we expect the container's cgroup parent to be.
137 expectCgroupParent string
138 // What we tell docker to use as the container's cgroup
139 // parent. Note: Ideally we would use the same field for both
140 // expectCgroupParent and setCgroupParent, and just make it
141 // default to "docker". However, when using docker < 1.10 with
142 // systemd, specifying a non-empty cgroup parent (even the
143 // default value "docker") hits a docker bug
144 // (https://github.com/docker/docker/issues/17126). Using two
145 // separate fields makes it possible to use the "expect cgroup
146 // parent to be X" feature even on sites where the "specify
147 // cgroup parent" feature breaks.
148 setCgroupParent string
150 cStateLock sync.Mutex
151 cCancelled bool // StopContainer() invoked
153 enableMemoryLimit bool
154 enableNetwork string // one of "default" or "always"
155 networkMode string // "none", "host", or "" -- passed through to executor
156 arvMountLog *ThrottledLogger
158 containerWatchdogInterval time.Duration
163 // setupSignals sets up signal handling to gracefully terminate the
164 // underlying container and update state when receiving a TERM, INT or
166 func (runner *ContainerRunner) setupSignals() {
167 runner.SigChan = make(chan os.Signal, 1)
168 signal.Notify(runner.SigChan, syscall.SIGTERM)
169 signal.Notify(runner.SigChan, syscall.SIGINT)
170 signal.Notify(runner.SigChan, syscall.SIGQUIT)
172 go func(sig chan os.Signal) {
179 // stop the underlying container.
180 func (runner *ContainerRunner) stop(sig os.Signal) {
181 runner.cStateLock.Lock()
182 defer runner.cStateLock.Unlock()
184 runner.CrunchLog.Printf("caught signal: %v", sig)
186 runner.cCancelled = true
187 runner.CrunchLog.Printf("stopping container")
188 err := runner.executor.Stop()
190 runner.CrunchLog.Printf("error stopping container: %s", err)
194 var errorBlacklist = []string{
195 "(?ms).*[Cc]annot connect to the Docker daemon.*",
196 "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
197 "(?ms).*grpc: the connection is unavailable.*",
199 var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
201 func (runner *ContainerRunner) runBrokenNodeHook() {
202 if *brokenNodeHook == "" {
203 path := filepath.Join(lockdir, brokenfile)
204 runner.CrunchLog.Printf("Writing %s to mark node as broken", path)
205 f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0700)
207 runner.CrunchLog.Printf("Error writing %s: %s", path, err)
212 runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
214 c := exec.Command(*brokenNodeHook)
215 c.Stdout = runner.CrunchLog
216 c.Stderr = runner.CrunchLog
219 runner.CrunchLog.Printf("Error running broken node hook: %v", err)
224 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
225 for _, d := range errorBlacklist {
226 if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
227 runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
228 runner.runBrokenNodeHook()
235 // LoadImage determines the docker image id from the container record and
236 // checks if it is available in the local Docker image store. If not, it loads
237 // the image from Keep.
238 func (runner *ContainerRunner) LoadImage() (string, error) {
239 runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
241 d, err := os.Open(runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage)
246 allfiles, err := d.Readdirnames(-1)
250 var tarfiles []string
251 for _, fnm := range allfiles {
252 if strings.HasSuffix(fnm, ".tar") {
253 tarfiles = append(tarfiles, fnm)
256 if len(tarfiles) == 0 {
257 return "", fmt.Errorf("image collection does not include a .tar image file")
259 if len(tarfiles) > 1 {
260 return "", fmt.Errorf("cannot choose from multiple tar files in image collection: %v", tarfiles)
262 imageID := tarfiles[0][:len(tarfiles[0])-4]
263 imageFile := runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage + "/" + tarfiles[0]
264 runner.CrunchLog.Printf("Using Docker image id %q", imageID)
266 if !runner.executor.ImageLoaded(imageID) {
267 runner.CrunchLog.Print("Loading Docker image from keep")
268 err = runner.executor.LoadImage(imageFile)
273 runner.CrunchLog.Print("Docker image is available")
278 func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (c *exec.Cmd, err error) {
279 c = exec.Command("arv-mount", arvMountCmd...)
281 // Copy our environment, but override ARVADOS_API_TOKEN with
282 // the container auth token.
284 for _, s := range os.Environ() {
285 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
286 c.Env = append(c.Env, s)
289 c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
291 w, err := runner.NewLogWriter("arv-mount")
295 runner.arvMountLog = NewThrottledLogger(w)
296 c.Stdout = runner.arvMountLog
297 c.Stderr = io.MultiWriter(runner.arvMountLog, os.Stderr)
299 runner.CrunchLog.Printf("Running %v", c.Args)
306 statReadme := make(chan bool)
307 runner.ArvMountExit = make(chan error)
312 time.Sleep(100 * time.Millisecond)
313 _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
325 runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
327 runner.ArvMountExit <- mnterr
328 close(runner.ArvMountExit)
334 case err := <-runner.ArvMountExit:
335 runner.ArvMount = nil
343 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
344 if runner.ArvMountPoint == "" {
345 runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)
350 func copyfile(src string, dst string) (err error) {
351 srcfile, err := os.Open(src)
356 os.MkdirAll(path.Dir(dst), 0777)
358 dstfile, err := os.Create(dst)
362 _, err = io.Copy(dstfile, srcfile)
367 err = srcfile.Close()
368 err2 := dstfile.Close()
381 func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
382 bindmounts := map[string]bindmount{}
383 err := runner.SetupArvMountPoint("keep")
385 return nil, fmt.Errorf("While creating keep mount temp dir: %v", err)
388 token, err := runner.ContainerToken()
390 return nil, fmt.Errorf("could not get container token: %s", err)
392 runner.CrunchLog.Printf("container token %q", token)
396 arvMountCmd := []string{
400 "--storage-classes", strings.Join(runner.Container.OutputStorageClasses, ","),
401 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
403 if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
404 arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
407 collectionPaths := []string{}
408 needCertMount := true
409 type copyFile struct {
413 var copyFiles []copyFile
416 for bind := range runner.Container.Mounts {
417 binds = append(binds, bind)
419 for bind := range runner.SecretMounts {
420 if _, ok := runner.Container.Mounts[bind]; ok {
421 return nil, fmt.Errorf("secret mount %q conflicts with regular mount", bind)
423 if runner.SecretMounts[bind].Kind != "json" &&
424 runner.SecretMounts[bind].Kind != "text" {
425 return nil, fmt.Errorf("secret mount %q type is %q but only 'json' and 'text' are permitted",
426 bind, runner.SecretMounts[bind].Kind)
428 binds = append(binds, bind)
432 for _, bind := range binds {
433 mnt, ok := runner.Container.Mounts[bind]
435 mnt = runner.SecretMounts[bind]
437 if bind == "stdout" || bind == "stderr" {
438 // Is it a "file" mount kind?
439 if mnt.Kind != "file" {
440 return nil, fmt.Errorf("unsupported mount kind '%s' for %s: only 'file' is supported", mnt.Kind, bind)
443 // Does path start with OutputPath?
444 prefix := runner.Container.OutputPath
445 if !strings.HasSuffix(prefix, "/") {
448 if !strings.HasPrefix(mnt.Path, prefix) {
449 return nil, fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
454 // Is it a "collection" mount kind?
455 if mnt.Kind != "collection" && mnt.Kind != "json" {
456 return nil, fmt.Errorf("unsupported mount kind '%s' for stdin: only 'collection' and 'json' are supported", mnt.Kind)
460 if bind == "/etc/arvados/ca-certificates.crt" {
461 needCertMount = false
464 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
465 if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
466 return nil, fmt.Errorf("only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
471 case mnt.Kind == "collection" && bind != "stdin":
473 if mnt.UUID != "" && mnt.PortableDataHash != "" {
474 return nil, fmt.Errorf("cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
478 return nil, fmt.Errorf("writing to existing collections currently not permitted")
481 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
482 } else if mnt.PortableDataHash != "" {
483 if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
484 return nil, fmt.Errorf("can never write to a collection specified by portable data hash")
486 idx := strings.Index(mnt.PortableDataHash, "/")
488 mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
489 mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
490 runner.Container.Mounts[bind] = mnt
492 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
493 if mnt.Path != "" && mnt.Path != "." {
494 if strings.HasPrefix(mnt.Path, "./") {
495 mnt.Path = mnt.Path[2:]
496 } else if strings.HasPrefix(mnt.Path, "/") {
497 mnt.Path = mnt.Path[1:]
499 src += "/" + mnt.Path
502 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
503 arvMountCmd = append(arvMountCmd, "--mount-tmp")
504 arvMountCmd = append(arvMountCmd, fmt.Sprintf("tmp%d", tmpcount))
508 if bind == runner.Container.OutputPath {
509 runner.HostOutputDir = src
510 bindmounts[bind] = bindmount{HostPath: src}
511 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
512 copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
514 bindmounts[bind] = bindmount{HostPath: src}
517 bindmounts[bind] = bindmount{HostPath: src, ReadOnly: true}
519 collectionPaths = append(collectionPaths, src)
521 case mnt.Kind == "tmp":
523 tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
525 return nil, fmt.Errorf("while creating mount temp dir: %v", err)
527 st, staterr := os.Stat(tmpdir)
529 return nil, fmt.Errorf("while Stat on temp dir: %v", staterr)
531 err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
533 return nil, fmt.Errorf("while Chmod temp dir: %v", err)
535 bindmounts[bind] = bindmount{HostPath: tmpdir}
536 if bind == runner.Container.OutputPath {
537 runner.HostOutputDir = tmpdir
540 case mnt.Kind == "json" || mnt.Kind == "text":
542 if mnt.Kind == "json" {
543 filedata, err = json.Marshal(mnt.Content)
545 return nil, fmt.Errorf("encoding json data: %v", err)
548 text, ok := mnt.Content.(string)
550 return nil, fmt.Errorf("content for mount %q must be a string", bind)
552 filedata = []byte(text)
555 tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
557 return nil, fmt.Errorf("creating temp dir: %v", err)
559 tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
560 err = ioutil.WriteFile(tmpfn, filedata, 0444)
562 return nil, fmt.Errorf("writing temp file: %v", err)
564 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
565 copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
567 bindmounts[bind] = bindmount{HostPath: tmpfn, ReadOnly: true}
570 case mnt.Kind == "git_tree":
571 tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
573 return nil, fmt.Errorf("creating temp dir: %v", err)
575 err = gitMount(mnt).extractTree(runner.ContainerArvClient, tmpdir, token)
579 bindmounts[bind] = bindmount{HostPath: tmpdir, ReadOnly: true}
583 if runner.HostOutputDir == "" {
584 return nil, fmt.Errorf("output path does not correspond to a writable mount point")
587 if needCertMount && runner.Container.RuntimeConstraints.API {
588 for _, certfile := range arvadosclient.CertFiles {
589 _, err := os.Stat(certfile)
591 bindmounts["/etc/arvados/ca-certificates.crt"] = bindmount{HostPath: certfile, ReadOnly: true}
598 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
600 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
602 arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
604 runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
606 return nil, fmt.Errorf("while trying to start arv-mount: %v", err)
609 for _, p := range collectionPaths {
612 return nil, fmt.Errorf("while checking that input files exist: %v", err)
616 for _, cp := range copyFiles {
617 st, err := os.Stat(cp.src)
619 return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
622 err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
626 target := path.Join(cp.bind, walkpath[len(cp.src):])
627 if walkinfo.Mode().IsRegular() {
628 copyerr := copyfile(walkpath, target)
632 return os.Chmod(target, walkinfo.Mode()|0777)
633 } else if walkinfo.Mode().IsDir() {
634 mkerr := os.MkdirAll(target, 0777)
638 return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
640 return fmt.Errorf("source %q is not a regular file or directory", cp.src)
643 } else if st.Mode().IsRegular() {
644 err = copyfile(cp.src, cp.bind)
646 err = os.Chmod(cp.bind, st.Mode()|0777)
650 return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
654 return bindmounts, nil
657 func (runner *ContainerRunner) stopHoststat() error {
658 if runner.hoststatReporter == nil {
661 runner.hoststatReporter.Stop()
662 err := runner.hoststatLogger.Close()
664 return fmt.Errorf("error closing hoststat logs: %v", err)
669 func (runner *ContainerRunner) startHoststat() error {
670 w, err := runner.NewLogWriter("hoststat")
674 runner.hoststatLogger = NewThrottledLogger(w)
675 runner.hoststatReporter = &crunchstat.Reporter{
676 Logger: log.New(runner.hoststatLogger, "", 0),
677 CgroupRoot: runner.cgroupRoot,
678 PollPeriod: runner.statInterval,
680 runner.hoststatReporter.Start()
684 func (runner *ContainerRunner) startCrunchstat() error {
685 w, err := runner.NewLogWriter("crunchstat")
689 runner.statLogger = NewThrottledLogger(w)
690 runner.statReporter = &crunchstat.Reporter{
691 CID: runner.executor.CgroupID(),
692 Logger: log.New(runner.statLogger, "", 0),
693 CgroupParent: runner.expectCgroupParent,
694 CgroupRoot: runner.cgroupRoot,
695 PollPeriod: runner.statInterval,
696 TempDir: runner.parentTemp,
698 runner.statReporter.Start()
702 type infoCommand struct {
707 // LogHostInfo logs info about the current host, for debugging and
708 // accounting purposes. Although it's logged as "node-info", this is
709 // about the environment where crunch-run is actually running, which
710 // might differ from what's described in the node record (see
712 func (runner *ContainerRunner) LogHostInfo() (err error) {
713 w, err := runner.NewLogWriter("node-info")
718 commands := []infoCommand{
720 label: "Host Information",
721 cmd: []string{"uname", "-a"},
724 label: "CPU Information",
725 cmd: []string{"cat", "/proc/cpuinfo"},
728 label: "Memory Information",
729 cmd: []string{"cat", "/proc/meminfo"},
733 cmd: []string{"df", "-m", "/", os.TempDir()},
736 label: "Disk INodes",
737 cmd: []string{"df", "-i", "/", os.TempDir()},
741 // Run commands with informational output to be logged.
742 for _, command := range commands {
743 fmt.Fprintln(w, command.label)
744 cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
747 if err := cmd.Run(); err != nil {
748 err = fmt.Errorf("While running command %q: %v", command.cmd, err)
757 return fmt.Errorf("While closing node-info logs: %v", err)
762 // LogContainerRecord gets and saves the raw JSON container record from the API server
763 func (runner *ContainerRunner) LogContainerRecord() error {
764 logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
765 if !logged && err == nil {
766 err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
771 // LogNodeRecord logs the current host's InstanceType config entry (or
772 // the arvados#node record, if running via crunch-dispatch-slurm).
773 func (runner *ContainerRunner) LogNodeRecord() error {
774 if it := os.Getenv("InstanceType"); it != "" {
775 // Dispatched via arvados-dispatch-cloud. Save
776 // InstanceType config fragment received from
777 // dispatcher on stdin.
778 w, err := runner.LogCollection.OpenFile("node.json", os.O_CREATE|os.O_WRONLY, 0666)
783 _, err = io.WriteString(w, it)
789 // Dispatched via crunch-dispatch-slurm. Look up
790 // apiserver's node record corresponding to
792 hostname := os.Getenv("SLURMD_NODENAME")
794 hostname, _ = os.Hostname()
796 _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
797 // The "info" field has admin-only info when
798 // obtained with a privileged token, and
799 // should not be logged.
800 node, ok := resp.(map[string]interface{})
808 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
809 writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
814 ArvClient: runner.DispatcherArvClient,
815 UUID: runner.Container.UUID,
816 loggingStream: label,
820 reader, err := runner.DispatcherArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
822 return false, fmt.Errorf("error getting %s record: %v", label, err)
826 dec := json.NewDecoder(reader)
828 var resp map[string]interface{}
829 if err = dec.Decode(&resp); err != nil {
830 return false, fmt.Errorf("error decoding %s list response: %v", label, err)
832 items, ok := resp["items"].([]interface{})
834 return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
835 } else if len(items) < 1 {
841 // Re-encode it using indentation to improve readability
842 enc := json.NewEncoder(w)
843 enc.SetIndent("", " ")
844 if err = enc.Encode(items[0]); err != nil {
845 return false, fmt.Errorf("error logging %s record: %v", label, err)
849 return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
854 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
855 stdoutPath := mntPath[len(runner.Container.OutputPath):]
856 index := strings.LastIndex(stdoutPath, "/")
858 subdirs := stdoutPath[:index]
860 st, err := os.Stat(runner.HostOutputDir)
862 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
864 stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
865 err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
867 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
871 stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
873 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
876 return stdoutFile, nil
879 // CreateContainer creates the docker container.
880 func (runner *ContainerRunner) CreateContainer(imageID string, bindmounts map[string]bindmount) error {
881 var stdin io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
882 if mnt, ok := runner.Container.Mounts["stdin"]; ok {
889 collID = mnt.PortableDataHash
891 path := runner.ArvMountPoint + "/by_id/" + collID + "/" + mnt.Path
892 f, err := os.Open(path)
898 j, err := json.Marshal(mnt.Content)
900 return fmt.Errorf("error encoding stdin json data: %v", err)
902 stdin = ioutil.NopCloser(bytes.NewReader(j))
904 return fmt.Errorf("stdin mount has unsupported kind %q", mnt.Kind)
908 var stdout, stderr io.WriteCloser
909 if mnt, ok := runner.Container.Mounts["stdout"]; ok {
910 f, err := runner.getStdoutFile(mnt.Path)
915 } else if w, err := runner.NewLogWriter("stdout"); err != nil {
918 stdout = NewThrottledLogger(w)
921 if mnt, ok := runner.Container.Mounts["stderr"]; ok {
922 f, err := runner.getStdoutFile(mnt.Path)
927 } else if w, err := runner.NewLogWriter("stderr"); err != nil {
930 stderr = NewThrottledLogger(w)
933 env := runner.Container.Environment
934 enableNetwork := runner.enableNetwork == "always"
935 if runner.Container.RuntimeConstraints.API {
937 tok, err := runner.ContainerToken()
941 env = map[string]string{}
942 for k, v := range runner.Container.Environment {
945 env["ARVADOS_API_TOKEN"] = tok
946 env["ARVADOS_API_HOST"] = os.Getenv("ARVADOS_API_HOST")
947 env["ARVADOS_API_HOST_INSECURE"] = os.Getenv("ARVADOS_API_HOST_INSECURE")
949 workdir := runner.Container.Cwd
951 // both "" and "." mean default
954 ram := runner.Container.RuntimeConstraints.RAM
955 if !runner.enableMemoryLimit {
958 runner.executorStdin = stdin
959 runner.executorStdout = stdout
960 runner.executorStderr = stderr
961 return runner.executor.Create(containerSpec{
963 VCPUs: runner.Container.RuntimeConstraints.VCPUs,
967 BindMounts: bindmounts,
968 Command: runner.Container.Command,
969 EnableNetwork: enableNetwork,
970 NetworkMode: runner.networkMode,
971 CgroupParent: runner.setCgroupParent,
978 // StartContainer starts the docker container created by CreateContainer.
979 func (runner *ContainerRunner) StartContainer() error {
980 runner.CrunchLog.Printf("Starting container")
981 runner.cStateLock.Lock()
982 defer runner.cStateLock.Unlock()
983 if runner.cCancelled {
986 err := runner.executor.Start()
989 if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
990 advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
992 return fmt.Errorf("could not start container: %v%s", err, advice)
997 // WaitFinish waits for the container to terminate, capture the exit code, and
998 // close the stdout/stderr logging.
999 func (runner *ContainerRunner) WaitFinish() error {
1000 runner.CrunchLog.Print("Waiting for container to finish")
1001 var timeout <-chan time.Time
1002 if s := runner.Container.SchedulingParameters.MaxRunTime; s > 0 {
1003 timeout = time.After(time.Duration(s) * time.Second)
1005 ctx, cancel := context.WithCancel(context.Background())
1010 runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
1012 case <-runner.ArvMountExit:
1013 runner.CrunchLog.Printf("arv-mount exited while container is still running. Stopping container.")
1018 exitcode, err := runner.executor.Wait(ctx)
1020 runner.checkBrokenNode(err)
1023 runner.ExitCode = &exitcode
1026 if err = runner.executorStdin.Close(); err != nil {
1027 err = fmt.Errorf("error closing container stdin: %s", err)
1028 runner.CrunchLog.Printf("%s", err)
1031 if err = runner.executorStdout.Close(); err != nil {
1032 err = fmt.Errorf("error closing container stdout: %s", err)
1033 runner.CrunchLog.Printf("%s", err)
1034 if returnErr == nil {
1038 if err = runner.executorStderr.Close(); err != nil {
1039 err = fmt.Errorf("error closing container stderr: %s", err)
1040 runner.CrunchLog.Printf("%s", err)
1041 if returnErr == nil {
1046 if runner.statReporter != nil {
1047 runner.statReporter.Stop()
1048 err = runner.statLogger.Close()
1050 runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
1056 func (runner *ContainerRunner) updateLogs() {
1057 ticker := time.NewTicker(crunchLogUpdatePeriod / 360)
1060 sigusr1 := make(chan os.Signal, 1)
1061 signal.Notify(sigusr1, syscall.SIGUSR1)
1062 defer signal.Stop(sigusr1)
1064 saveAtTime := time.Now().Add(crunchLogUpdatePeriod)
1065 saveAtSize := crunchLogUpdateSize
1071 saveAtTime = time.Now()
1073 runner.logMtx.Lock()
1074 done := runner.LogsPDH != nil
1075 runner.logMtx.Unlock()
1079 size := runner.LogCollection.Size()
1080 if size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) {
1083 saveAtTime = time.Now().Add(crunchLogUpdatePeriod)
1084 saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize
1085 saved, err := runner.saveLogCollection(false)
1087 runner.CrunchLog.Printf("error updating log collection: %s", err)
1091 var updated arvados.Container
1092 err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1093 "container": arvadosclient.Dict{"log": saved.PortableDataHash},
1096 runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
1104 // CaptureOutput saves data from the container's output directory if
1105 // needed, and updates the container output accordingly.
1106 func (runner *ContainerRunner) CaptureOutput(bindmounts map[string]bindmount) error {
1107 if runner.Container.RuntimeConstraints.API {
1108 // Output may have been set directly by the container, so
1109 // refresh the container record to check.
1110 err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID,
1111 nil, &runner.Container)
1115 if runner.Container.Output != "" {
1116 // Container output is already set.
1117 runner.OutputPDH = &runner.Container.Output
1122 txt, err := (&copier{
1123 client: runner.containerClient,
1124 arvClient: runner.ContainerArvClient,
1125 keepClient: runner.ContainerKeepClient,
1126 hostOutputDir: runner.HostOutputDir,
1127 ctrOutputDir: runner.Container.OutputPath,
1128 bindmounts: bindmounts,
1129 mounts: runner.Container.Mounts,
1130 secretMounts: runner.SecretMounts,
1131 logger: runner.CrunchLog,
1136 if n := len(regexp.MustCompile(` [0-9a-f]+\+\S*\+R`).FindAllStringIndex(txt, -1)); n > 0 {
1137 runner.CrunchLog.Printf("Copying %d data blocks from remote input collections...", n)
1138 fs, err := (&arvados.Collection{ManifestText: txt}).FileSystem(runner.containerClient, runner.ContainerKeepClient)
1142 txt, err = fs.MarshalManifest(".")
1147 var resp arvados.Collection
1148 err = runner.ContainerArvClient.Create("collections", arvadosclient.Dict{
1149 "ensure_unique_name": true,
1150 "collection": arvadosclient.Dict{
1152 "name": "output for " + runner.Container.UUID,
1153 "manifest_text": txt,
1157 return fmt.Errorf("error creating output collection: %v", err)
1159 runner.OutputPDH = &resp.PortableDataHash
1163 func (runner *ContainerRunner) CleanupDirs() {
1164 if runner.ArvMount != nil {
1166 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1167 umount.Stdout = runner.CrunchLog
1168 umount.Stderr = runner.CrunchLog
1169 runner.CrunchLog.Printf("Running %v", umount.Args)
1170 umnterr := umount.Start()
1173 runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1175 // If arv-mount --unmount gets stuck for any reason, we
1176 // don't want to wait for it forever. Do Wait() in a goroutine
1177 // so it doesn't block crunch-run.
1178 umountExit := make(chan error)
1180 mnterr := umount.Wait()
1182 runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1184 umountExit <- mnterr
1187 for again := true; again; {
1193 case <-runner.ArvMountExit:
1195 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1196 runner.CrunchLog.Printf("Timed out waiting for unmount")
1198 umount.Process.Kill()
1200 runner.ArvMount.Process.Kill()
1206 if runner.ArvMountPoint != "" {
1207 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1208 runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1212 if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {
1213 runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr)
1217 // CommitLogs posts the collection containing the final container logs.
1218 func (runner *ContainerRunner) CommitLogs() error {
1220 // Hold cStateLock to prevent races on CrunchLog (e.g., stop()).
1221 runner.cStateLock.Lock()
1222 defer runner.cStateLock.Unlock()
1224 runner.CrunchLog.Print(runner.finalState)
1226 if runner.arvMountLog != nil {
1227 runner.arvMountLog.Close()
1229 runner.CrunchLog.Close()
1231 // Closing CrunchLog above allows them to be committed to Keep at this
1232 // point, but re-open crunch log with ArvClient in case there are any
1233 // other further errors (such as failing to write the log to Keep!)
1234 // while shutting down
1235 runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{
1236 ArvClient: runner.DispatcherArvClient,
1237 UUID: runner.Container.UUID,
1238 loggingStream: "crunch-run",
1241 runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
1244 if runner.LogsPDH != nil {
1245 // If we have already assigned something to LogsPDH,
1246 // we must be closing the re-opened log, which won't
1247 // end up getting attached to the container record and
1248 // therefore doesn't need to be saved as a collection
1249 // -- it exists only to send logs to other channels.
1252 saved, err := runner.saveLogCollection(true)
1254 return fmt.Errorf("error saving log collection: %s", err)
1256 runner.logMtx.Lock()
1257 defer runner.logMtx.Unlock()
1258 runner.LogsPDH = &saved.PortableDataHash
1262 func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
1263 runner.logMtx.Lock()
1264 defer runner.logMtx.Unlock()
1265 if runner.LogsPDH != nil {
1266 // Already finalized.
1269 updates := arvadosclient.Dict{
1270 "name": "logs for " + runner.Container.UUID,
1272 mt, err1 := runner.LogCollection.MarshalManifest(".")
1274 // Only send updated manifest text if there was no
1276 updates["manifest_text"] = mt
1279 // Even if flushing the manifest had an error, we still want
1280 // to update the log record, if possible, to push the trash_at
1281 // and delete_at times into the future. Details on bug
1284 updates["is_trashed"] = true
1286 exp := time.Now().Add(crunchLogUpdatePeriod * 24)
1287 updates["trash_at"] = exp
1288 updates["delete_at"] = exp
1290 reqBody := arvadosclient.Dict{"collection": updates}
1292 if runner.logUUID == "" {
1293 reqBody["ensure_unique_name"] = true
1294 err2 = runner.DispatcherArvClient.Create("collections", reqBody, &response)
1296 err2 = runner.DispatcherArvClient.Update("collections", runner.logUUID, reqBody, &response)
1299 runner.logUUID = response.UUID
1302 if err1 != nil || err2 != nil {
1303 err = fmt.Errorf("error recording logs: %q, %q", err1, err2)
1308 // UpdateContainerRunning updates the container state to "Running"
1309 func (runner *ContainerRunner) UpdateContainerRunning() error {
1310 runner.cStateLock.Lock()
1311 defer runner.cStateLock.Unlock()
1312 if runner.cCancelled {
1315 return runner.DispatcherArvClient.Update("containers", runner.Container.UUID,
1316 arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running", "gateway_address": runner.gateway.Address}}, nil)
1319 // ContainerToken returns the api_token the container (and any
1320 // arv-mount processes) are allowed to use.
1321 func (runner *ContainerRunner) ContainerToken() (string, error) {
1322 if runner.token != "" {
1323 return runner.token, nil
1326 var auth arvados.APIClientAuthorization
1327 err := runner.DispatcherArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1331 runner.token = fmt.Sprintf("v2/%s/%s/%s", auth.UUID, auth.APIToken, runner.Container.UUID)
1332 return runner.token, nil
1335 // UpdateContainerFinal updates the container record state on API
1336 // server to "Complete" or "Cancelled"
1337 func (runner *ContainerRunner) UpdateContainerFinal() error {
1338 update := arvadosclient.Dict{}
1339 update["state"] = runner.finalState
1340 if runner.LogsPDH != nil {
1341 update["log"] = *runner.LogsPDH
1343 if runner.finalState == "Complete" {
1344 if runner.ExitCode != nil {
1345 update["exit_code"] = *runner.ExitCode
1347 if runner.OutputPDH != nil {
1348 update["output"] = *runner.OutputPDH
1351 return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
1354 // IsCancelled returns the value of Cancelled, with goroutine safety.
1355 func (runner *ContainerRunner) IsCancelled() bool {
1356 runner.cStateLock.Lock()
1357 defer runner.cStateLock.Unlock()
1358 return runner.cCancelled
1361 // NewArvLogWriter creates an ArvLogWriter
1362 func (runner *ContainerRunner) NewArvLogWriter(name string) (io.WriteCloser, error) {
1363 writer, err := runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
1367 return &ArvLogWriter{
1368 ArvClient: runner.DispatcherArvClient,
1369 UUID: runner.Container.UUID,
1370 loggingStream: name,
1371 writeCloser: writer,
1375 // Run the full container lifecycle.
1376 func (runner *ContainerRunner) Run() (err error) {
1377 runner.CrunchLog.Printf("crunch-run %s started", cmd.Version.String())
1378 runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
1380 hostname, hosterr := os.Hostname()
1382 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1384 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1387 runner.finalState = "Queued"
1390 runner.CleanupDirs()
1392 runner.CrunchLog.Printf("crunch-run finished")
1393 runner.CrunchLog.Close()
1396 err = runner.fetchContainerRecord()
1400 if runner.Container.State != "Locked" {
1401 return fmt.Errorf("dispatch error detected: container %q has state %q", runner.Container.UUID, runner.Container.State)
1404 var bindmounts map[string]bindmount
1406 // checkErr prints e (unless it's nil) and sets err to
1407 // e (unless err is already non-nil). Thus, if err
1408 // hasn't already been assigned when Run() returns,
1409 // this cleanup func will cause Run() to return the
1410 // first non-nil error that is passed to checkErr().
1411 checkErr := func(errorIn string, e error) {
1415 runner.CrunchLog.Printf("error in %s: %v", errorIn, e)
1419 if runner.finalState == "Complete" {
1420 // There was an error in the finalization.
1421 runner.finalState = "Cancelled"
1425 // Log the error encountered in Run(), if any
1426 checkErr("Run", err)
1428 if runner.finalState == "Queued" {
1429 runner.UpdateContainerFinal()
1433 if runner.IsCancelled() {
1434 runner.finalState = "Cancelled"
1435 // but don't return yet -- we still want to
1436 // capture partial output and write logs
1439 if bindmounts != nil {
1440 checkErr("CaptureOutput", runner.CaptureOutput(bindmounts))
1442 checkErr("stopHoststat", runner.stopHoststat())
1443 checkErr("CommitLogs", runner.CommitLogs())
1444 checkErr("UpdateContainerFinal", runner.UpdateContainerFinal())
1447 runner.setupSignals()
1448 err = runner.startHoststat()
1453 // set up FUSE mount and binds
1454 bindmounts, err = runner.SetupMounts()
1456 runner.finalState = "Cancelled"
1457 err = fmt.Errorf("While setting up mounts: %v", err)
1461 // check for and/or load image
1462 imageID, err := runner.LoadImage()
1464 if !runner.checkBrokenNode(err) {
1465 // Failed to load image but not due to a "broken node"
1466 // condition, probably user error.
1467 runner.finalState = "Cancelled"
1469 err = fmt.Errorf("While loading container image: %v", err)
1473 err = runner.CreateContainer(imageID, bindmounts)
1477 err = runner.LogHostInfo()
1481 err = runner.LogNodeRecord()
1485 err = runner.LogContainerRecord()
1490 if runner.IsCancelled() {
1494 err = runner.UpdateContainerRunning()
1498 runner.finalState = "Cancelled"
1500 err = runner.startCrunchstat()
1505 err = runner.StartContainer()
1507 runner.checkBrokenNode(err)
1511 err = runner.WaitFinish()
1512 if err == nil && !runner.IsCancelled() {
1513 runner.finalState = "Complete"
1518 // Fetch the current container record (uuid = runner.Container.UUID)
1519 // into runner.Container.
1520 func (runner *ContainerRunner) fetchContainerRecord() error {
1521 reader, err := runner.DispatcherArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1523 return fmt.Errorf("error fetching container record: %v", err)
1525 defer reader.Close()
1527 dec := json.NewDecoder(reader)
1529 err = dec.Decode(&runner.Container)
1531 return fmt.Errorf("error decoding container record: %v", err)
1535 SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
1538 containerToken, err := runner.ContainerToken()
1540 return fmt.Errorf("error getting container token: %v", err)
1543 runner.ContainerArvClient, runner.ContainerKeepClient,
1544 runner.containerClient, err = runner.MkArvClient(containerToken)
1546 return fmt.Errorf("error creating container API client: %v", err)
1549 runner.ContainerKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)
1550 runner.DispatcherKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)
1552 err = runner.ContainerArvClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
1554 if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
1555 return fmt.Errorf("error fetching secret_mounts: %v", err)
1557 // ok && apierr.HttpStatusCode == 404, which means
1558 // secret_mounts isn't supported by this API server.
1560 runner.SecretMounts = sm.SecretMounts
1565 // NewContainerRunner creates a new container runner.
1566 func NewContainerRunner(dispatcherClient *arvados.Client,
1567 dispatcherArvClient IArvadosClient,
1568 dispatcherKeepClient IKeepClient,
1569 containerUUID string) (*ContainerRunner, error) {
1571 cr := &ContainerRunner{
1572 dispatcherClient: dispatcherClient,
1573 DispatcherArvClient: dispatcherArvClient,
1574 DispatcherKeepClient: dispatcherKeepClient,
1576 cr.NewLogWriter = cr.NewArvLogWriter
1577 cr.RunArvMount = cr.ArvMountCmd
1578 cr.MkTempDir = ioutil.TempDir
1579 cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
1580 cl, err := arvadosclient.MakeArvadosClient()
1582 return nil, nil, nil, err
1585 kc, err := keepclient.MakeKeepClient(cl)
1587 return nil, nil, nil, err
1589 c2 := arvados.NewClientFromEnv()
1590 c2.AuthToken = token
1591 return cl, kc, c2, nil
1594 cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.dispatcherClient, cr.DispatcherKeepClient)
1598 cr.Container.UUID = containerUUID
1599 w, err := cr.NewLogWriter("crunch-run")
1603 cr.CrunchLog = NewThrottledLogger(w)
1604 cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
1606 loadLogThrottleParams(dispatcherArvClient)
1612 func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
1613 flags := flag.NewFlagSet(prog, flag.ContinueOnError)
1614 statInterval := flags.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1615 cgroupRoot := flags.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
1616 cgroupParent := flags.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
1617 cgroupParentSubsystem := flags.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
1618 caCertsPath := flags.String("ca-certs", "", "Path to TLS root certificates")
1619 detach := flags.Bool("detach", false, "Detach from parent process and run in the background")
1620 stdinEnv := flags.Bool("stdin-env", false, "Load environment variables from JSON message on stdin")
1621 sleep := flags.Duration("sleep", 0, "Delay before starting (testing use only)")
1622 kill := flags.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID")
1623 list := flags.Bool("list", false, "List UUIDs of existing crunch-run processes")
1624 enableMemoryLimit := flags.Bool("enable-memory-limit", true, "tell container runtime to limit container's memory usage")
1625 enableNetwork := flags.String("container-enable-networking", "default", "enable networking \"always\" (for all containers) or \"default\" (for containers that request it)")
1626 networkMode := flags.String("container-network-mode", "default", `Docker network mode for container (use any argument valid for docker --net)`)
1627 memprofile := flags.String("memprofile", "", "write memory profile to `file` after running container")
1628 runtimeEngine := flags.String("runtime-engine", "docker", "container runtime: docker or singularity")
1629 flags.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.")
1631 ignoreDetachFlag := false
1632 if len(args) > 0 && args[0] == "-no-detach" {
1633 // This process was invoked by a parent process, which
1634 // has passed along its own arguments, including
1635 // -detach, after the leading -no-detach flag. Strip
1636 // the leading -no-detach flag (it's not recognized by
1637 // flags.Parse()) and ignore the -detach flag that
1640 ignoreDetachFlag = true
1643 if err := flags.Parse(args); err == flag.ErrHelp {
1645 } else if err != nil {
1650 if *stdinEnv && !ignoreDetachFlag {
1651 // Load env vars on stdin if asked (but not in a
1652 // detached child process, in which case stdin is
1654 err := loadEnv(os.Stdin)
1661 containerUUID := flags.Arg(0)
1664 case *detach && !ignoreDetachFlag:
1665 return Detach(containerUUID, prog, args, os.Stdout, os.Stderr)
1667 return KillProcess(containerUUID, syscall.Signal(*kill), os.Stdout, os.Stderr)
1669 return ListProcesses(os.Stdout, os.Stderr)
1672 if containerUUID == "" {
1673 log.Printf("usage: %s [options] UUID", prog)
1677 log.Printf("crunch-run %s started", cmd.Version.String())
1680 if *caCertsPath != "" {
1681 arvadosclient.CertFiles = []string{*caCertsPath}
1684 api, err := arvadosclient.MakeArvadosClient()
1686 log.Printf("%s: %v", containerUUID, err)
1691 kc, kcerr := keepclient.MakeKeepClient(api)
1693 log.Printf("%s: %v", containerUUID, kcerr)
1696 kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
1699 cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, containerUUID)
1705 switch *runtimeEngine {
1707 cr.executor, err = newDockerExecutor(containerUUID, cr.CrunchLog.Printf, cr.containerWatchdogInterval)
1709 cr.executor, err = newSingularityExecutor(cr.CrunchLog.Printf)
1711 cr.CrunchLog.Printf("%s: unsupported RuntimeEngine %q", containerUUID, *runtimeEngine)
1712 cr.CrunchLog.Close()
1716 cr.CrunchLog.Printf("%s: %v", containerUUID, err)
1717 cr.checkBrokenNode(err)
1718 cr.CrunchLog.Close()
1721 defer cr.executor.Close()
1723 gwAuthSecret := os.Getenv("GatewayAuthSecret")
1724 os.Unsetenv("GatewayAuthSecret")
1725 if gwAuthSecret == "" {
1726 // not safe to run a gateway service without an auth
1728 cr.CrunchLog.Printf("Not starting a gateway server (GatewayAuthSecret was not provided by dispatcher)")
1729 } else if gwListen := os.Getenv("GatewayAddress"); gwListen == "" {
1730 // dispatcher did not tell us which external IP
1731 // address to advertise --> no gateway service
1732 cr.CrunchLog.Printf("Not starting a gateway server (GatewayAddress was not provided by dispatcher)")
1733 } else if de, ok := cr.executor.(*dockerExecutor); ok {
1734 cr.gateway = Gateway{
1736 AuthSecret: gwAuthSecret,
1737 ContainerUUID: containerUUID,
1738 DockerContainerID: &de.containerID,
1740 ContainerIPAddress: dockerContainerIPAddress(&de.containerID),
1742 err = cr.gateway.Start()
1744 log.Printf("error starting gateway server: %s", err)
1749 parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerUUID+".")
1751 log.Printf("%s: %v", containerUUID, tmperr)
1755 cr.parentTemp = parentTemp
1756 cr.statInterval = *statInterval
1757 cr.cgroupRoot = *cgroupRoot
1758 cr.expectCgroupParent = *cgroupParent
1759 cr.enableMemoryLimit = *enableMemoryLimit
1760 cr.enableNetwork = *enableNetwork
1761 cr.networkMode = *networkMode
1762 if *cgroupParentSubsystem != "" {
1763 p := findCgroup(*cgroupParentSubsystem)
1764 cr.setCgroupParent = p
1765 cr.expectCgroupParent = p
1770 if *memprofile != "" {
1771 f, err := os.Create(*memprofile)
1773 log.Printf("could not create memory profile: %s", err)
1775 runtime.GC() // get up-to-date statistics
1776 if err := pprof.WriteHeapProfile(f); err != nil {
1777 log.Printf("could not write memory profile: %s", err)
1779 closeerr := f.Close()
1780 if closeerr != nil {
1781 log.Printf("closing memprofile file: %s", err)
1786 log.Printf("%s: %v", containerUUID, runerr)
1792 func loadEnv(rdr io.Reader) error {
1793 buf, err := ioutil.ReadAll(rdr)
1795 return fmt.Errorf("read stdin: %s", err)
1797 var env map[string]string
1798 err = json.Unmarshal(buf, &env)
1800 return fmt.Errorf("decode stdin: %s", err)
1802 for k, v := range env {
1803 err = os.Setenv(k, v)
1805 return fmt.Errorf("setenv(%q): %s", k, err)