1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
30 "git.arvados.org/arvados.git/lib/cmd"
31 "git.arvados.org/arvados.git/lib/crunchstat"
32 "git.arvados.org/arvados.git/sdk/go/arvados"
33 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
34 "git.arvados.org/arvados.git/sdk/go/keepclient"
35 "git.arvados.org/arvados.git/sdk/go/manifest"
36 "golang.org/x/net/context"
41 var Command = command{}
43 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
44 type IArvadosClient interface {
45 Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
46 Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
47 Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
48 Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
49 CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
50 Discovery(key string) (interface{}, error)
53 // ErrCancelled is the error returned when the container is cancelled.
54 var ErrCancelled = errors.New("Cancelled")
56 // IKeepClient is the minimal Keep API methods used by crunch-run.
57 type IKeepClient interface {
58 PutB(buf []byte) (string, int, error)
59 ReadAt(locator string, p []byte, off int) (int, error)
60 ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
61 LocalLocator(locator string) (string, error)
65 // NewLogWriter is a factory function to create a new log writer.
66 type NewLogWriter func(name string) (io.WriteCloser, error)
68 type RunArvMount func(args []string, tok string) (*exec.Cmd, error)
70 type MkTempDir func(string, string) (string, error)
72 type PsProcess interface {
73 CmdlineSlice() ([]string, error)
76 // ContainerRunner is the main stateful struct used for a single execution of a
78 type ContainerRunner struct {
79 executor containerExecutor
81 // Dispatcher client is initialized with the Dispatcher token.
82 // This is a privileged token used to manage container status
85 // We have both dispatcherClient and DispatcherArvClient
86 // because there are two different incompatible Arvados Go
87 // SDKs and we have to use both (hopefully this gets fixed in
89 dispatcherClient *arvados.Client
90 DispatcherArvClient IArvadosClient
91 DispatcherKeepClient IKeepClient
93 // Container client is initialized with the Container token
94 // This token controls the permissions of the container, and
95 // must be used for operations such as reading collections.
97 // Same comment as above applies to
98 // containerClient/ContainerArvClient.
99 containerClient *arvados.Client
100 ContainerArvClient IArvadosClient
101 ContainerKeepClient IKeepClient
103 Container arvados.Container
106 NewLogWriter NewLogWriter
107 CrunchLog *ThrottledLogger
108 Stdout io.WriteCloser
109 Stderr io.WriteCloser
112 LogCollection arvados.CollectionFileSystem
114 RunArvMount RunArvMount
119 Volumes map[string]struct{}
121 SigChan chan os.Signal
122 ArvMountExit chan error
123 SecretMounts map[string]arvados.Mount
124 MkArvClient func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error)
128 statLogger io.WriteCloser
129 statReporter *crunchstat.Reporter
130 hoststatLogger io.WriteCloser
131 hoststatReporter *crunchstat.Reporter
132 statInterval time.Duration
134 // What we expect the container's cgroup parent to be.
135 expectCgroupParent string
136 // What we tell docker to use as the container's cgroup
137 // parent. Note: Ideally we would use the same field for both
138 // expectCgroupParent and setCgroupParent, and just make it
139 // default to "docker". However, when using docker < 1.10 with
140 // systemd, specifying a non-empty cgroup parent (even the
141 // default value "docker") hits a docker bug
142 // (https://github.com/docker/docker/issues/17126). Using two
143 // separate fields makes it possible to use the "expect cgroup
144 // parent to be X" feature even on sites where the "specify
145 // cgroup parent" feature breaks.
146 setCgroupParent string
148 cStateLock sync.Mutex
149 cCancelled bool // StopContainer() invoked
151 enableNetwork string // one of "default" or "always"
152 networkMode string // "none", "host", or "" -- passed through to executor
153 arvMountLog *ThrottledLogger
155 containerWatchdogInterval time.Duration
160 // setupSignals sets up signal handling to gracefully terminate the
161 // underlying container and update state when receiving a TERM, INT or
163 func (runner *ContainerRunner) setupSignals() {
164 runner.SigChan = make(chan os.Signal, 1)
165 signal.Notify(runner.SigChan, syscall.SIGTERM)
166 signal.Notify(runner.SigChan, syscall.SIGINT)
167 signal.Notify(runner.SigChan, syscall.SIGQUIT)
169 go func(sig chan os.Signal) {
176 // stop the underlying container.
177 func (runner *ContainerRunner) stop(sig os.Signal) {
178 runner.cStateLock.Lock()
179 defer runner.cStateLock.Unlock()
181 runner.CrunchLog.Printf("caught signal: %v", sig)
183 runner.cCancelled = true
184 runner.CrunchLog.Printf("stopping container")
185 err := runner.executor.Stop()
187 runner.CrunchLog.Printf("error stopping container: %s", err)
191 var errorBlacklist = []string{
192 "(?ms).*[Cc]annot connect to the Docker daemon.*",
193 "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
194 "(?ms).*grpc: the connection is unavailable.*",
196 var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
198 func (runner *ContainerRunner) runBrokenNodeHook() {
199 if *brokenNodeHook == "" {
200 path := filepath.Join(lockdir, brokenfile)
201 runner.CrunchLog.Printf("Writing %s to mark node as broken", path)
202 f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0700)
204 runner.CrunchLog.Printf("Error writing %s: %s", path, err)
209 runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
211 c := exec.Command(*brokenNodeHook)
212 c.Stdout = runner.CrunchLog
213 c.Stderr = runner.CrunchLog
216 runner.CrunchLog.Printf("Error running broken node hook: %v", err)
221 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
222 for _, d := range errorBlacklist {
223 if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
224 runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
225 runner.runBrokenNodeHook()
232 // LoadImage determines the docker image id from the container record and
233 // checks if it is available in the local Docker image store. If not, it loads
234 // the image from Keep.
235 func (runner *ContainerRunner) LoadImage() (string, error) {
236 runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
238 d, err := os.Open(runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage)
243 allfiles, err := d.Readdirnames(-1)
247 var tarfiles []string
248 for _, fnm := range allfiles {
249 if strings.HasSuffix(fnm, ".tar") {
250 tarfiles = append(tarfiles, fnm)
253 if len(tarfiles) == 0 {
254 return "", fmt.Errorf("image collection does not include a .tar image file")
256 if len(tarfiles) > 1 {
257 return "", fmt.Errorf("cannot choose from multiple tar files in image collection: %v", tarfiles)
259 imageID := tarfiles[0][:len(tarfiles[0])-4]
260 imageFile := runner.ArvMountPoint + "/by_id/" + runner.Container.ContainerImage + "/" + tarfiles[0]
261 runner.CrunchLog.Printf("Using Docker image id %q", imageID)
263 if !runner.executor.ImageLoaded(imageID) {
264 runner.CrunchLog.Print("Loading Docker image from keep")
265 err = runner.executor.LoadImage(imageFile)
270 runner.CrunchLog.Print("Docker image is available")
275 func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (c *exec.Cmd, err error) {
276 c = exec.Command("arv-mount", arvMountCmd...)
278 // Copy our environment, but override ARVADOS_API_TOKEN with
279 // the container auth token.
281 for _, s := range os.Environ() {
282 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
283 c.Env = append(c.Env, s)
286 c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
288 w, err := runner.NewLogWriter("arv-mount")
292 runner.arvMountLog = NewThrottledLogger(w)
293 c.Stdout = runner.arvMountLog
294 c.Stderr = runner.arvMountLog
296 runner.CrunchLog.Printf("Running %v", c.Args)
303 statReadme := make(chan bool)
304 runner.ArvMountExit = make(chan error)
309 time.Sleep(100 * time.Millisecond)
310 _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
322 runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
324 runner.ArvMountExit <- mnterr
325 close(runner.ArvMountExit)
331 case err := <-runner.ArvMountExit:
332 runner.ArvMount = nil
340 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
341 if runner.ArvMountPoint == "" {
342 runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)
347 func copyfile(src string, dst string) (err error) {
348 srcfile, err := os.Open(src)
353 os.MkdirAll(path.Dir(dst), 0777)
355 dstfile, err := os.Create(dst)
359 _, err = io.Copy(dstfile, srcfile)
364 err = srcfile.Close()
365 err2 := dstfile.Close()
378 func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
379 bindmounts := map[string]bindmount{}
380 err := runner.SetupArvMountPoint("keep")
382 return nil, fmt.Errorf("While creating keep mount temp dir: %v", err)
385 token, err := runner.ContainerToken()
387 return nil, fmt.Errorf("could not get container token: %s", err)
392 arvMountCmd := []string{
396 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
398 if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
399 arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
402 collectionPaths := []string{}
403 needCertMount := true
404 type copyFile struct {
408 var copyFiles []copyFile
411 for bind := range runner.Container.Mounts {
412 binds = append(binds, bind)
414 for bind := range runner.SecretMounts {
415 if _, ok := runner.Container.Mounts[bind]; ok {
416 return nil, fmt.Errorf("secret mount %q conflicts with regular mount", bind)
418 if runner.SecretMounts[bind].Kind != "json" &&
419 runner.SecretMounts[bind].Kind != "text" {
420 return nil, fmt.Errorf("secret mount %q type is %q but only 'json' and 'text' are permitted",
421 bind, runner.SecretMounts[bind].Kind)
423 binds = append(binds, bind)
427 for _, bind := range binds {
428 mnt, ok := runner.Container.Mounts[bind]
430 mnt = runner.SecretMounts[bind]
432 if bind == "stdout" || bind == "stderr" {
433 // Is it a "file" mount kind?
434 if mnt.Kind != "file" {
435 return nil, fmt.Errorf("unsupported mount kind '%s' for %s: only 'file' is supported", mnt.Kind, bind)
438 // Does path start with OutputPath?
439 prefix := runner.Container.OutputPath
440 if !strings.HasSuffix(prefix, "/") {
443 if !strings.HasPrefix(mnt.Path, prefix) {
444 return nil, fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
449 // Is it a "collection" mount kind?
450 if mnt.Kind != "collection" && mnt.Kind != "json" {
451 return nil, fmt.Errorf("unsupported mount kind '%s' for stdin: only 'collection' and 'json' are supported", mnt.Kind)
455 if bind == "/etc/arvados/ca-certificates.crt" {
456 needCertMount = false
459 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
460 if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
461 return nil, fmt.Errorf("only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
466 case mnt.Kind == "collection" && bind != "stdin":
468 if mnt.UUID != "" && mnt.PortableDataHash != "" {
469 return nil, fmt.Errorf("cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
473 return nil, fmt.Errorf("writing to existing collections currently not permitted")
476 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
477 } else if mnt.PortableDataHash != "" {
478 if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
479 return nil, fmt.Errorf("can never write to a collection specified by portable data hash")
481 idx := strings.Index(mnt.PortableDataHash, "/")
483 mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
484 mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
485 runner.Container.Mounts[bind] = mnt
487 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
488 if mnt.Path != "" && mnt.Path != "." {
489 if strings.HasPrefix(mnt.Path, "./") {
490 mnt.Path = mnt.Path[2:]
491 } else if strings.HasPrefix(mnt.Path, "/") {
492 mnt.Path = mnt.Path[1:]
494 src += "/" + mnt.Path
497 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
498 arvMountCmd = append(arvMountCmd, "--mount-tmp")
499 arvMountCmd = append(arvMountCmd, fmt.Sprintf("tmp%d", tmpcount))
503 if bind == runner.Container.OutputPath {
504 runner.HostOutputDir = src
505 bindmounts[bind] = bindmount{HostPath: src}
506 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
507 copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
509 bindmounts[bind] = bindmount{HostPath: src}
512 bindmounts[bind] = bindmount{HostPath: src, ReadOnly: true}
514 collectionPaths = append(collectionPaths, src)
516 case mnt.Kind == "tmp":
518 tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
520 return nil, fmt.Errorf("while creating mount temp dir: %v", err)
522 st, staterr := os.Stat(tmpdir)
524 return nil, fmt.Errorf("while Stat on temp dir: %v", staterr)
526 err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
528 return nil, fmt.Errorf("while Chmod temp dir: %v", err)
530 bindmounts[bind] = bindmount{HostPath: tmpdir}
531 if bind == runner.Container.OutputPath {
532 runner.HostOutputDir = tmpdir
535 case mnt.Kind == "json" || mnt.Kind == "text":
537 if mnt.Kind == "json" {
538 filedata, err = json.Marshal(mnt.Content)
540 return nil, fmt.Errorf("encoding json data: %v", err)
543 text, ok := mnt.Content.(string)
545 return nil, fmt.Errorf("content for mount %q must be a string", bind)
547 filedata = []byte(text)
550 tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
552 return nil, fmt.Errorf("creating temp dir: %v", err)
554 tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
555 err = ioutil.WriteFile(tmpfn, filedata, 0444)
557 return nil, fmt.Errorf("writing temp file: %v", err)
559 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
560 copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
562 bindmounts[bind] = bindmount{HostPath: tmpfn, ReadOnly: true}
565 case mnt.Kind == "git_tree":
566 tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
568 return nil, fmt.Errorf("creating temp dir: %v", err)
570 err = gitMount(mnt).extractTree(runner.ContainerArvClient, tmpdir, token)
574 bindmounts[bind] = bindmount{HostPath: tmpdir, ReadOnly: true}
578 if runner.HostOutputDir == "" {
579 return nil, fmt.Errorf("output path does not correspond to a writable mount point")
582 if needCertMount && runner.Container.RuntimeConstraints.API {
583 for _, certfile := range arvadosclient.CertFiles {
584 _, err := os.Stat(certfile)
586 bindmounts["/etc/arvados/ca-certificates.crt"] = bindmount{HostPath: certfile, ReadOnly: true}
593 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
595 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
597 arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
599 runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
601 return nil, fmt.Errorf("while trying to start arv-mount: %v", err)
604 for _, p := range collectionPaths {
607 return nil, fmt.Errorf("while checking that input files exist: %v", err)
611 for _, cp := range copyFiles {
612 st, err := os.Stat(cp.src)
614 return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
617 err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
621 target := path.Join(cp.bind, walkpath[len(cp.src):])
622 if walkinfo.Mode().IsRegular() {
623 copyerr := copyfile(walkpath, target)
627 return os.Chmod(target, walkinfo.Mode()|0777)
628 } else if walkinfo.Mode().IsDir() {
629 mkerr := os.MkdirAll(target, 0777)
633 return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
635 return fmt.Errorf("source %q is not a regular file or directory", cp.src)
638 } else if st.Mode().IsRegular() {
639 err = copyfile(cp.src, cp.bind)
641 err = os.Chmod(cp.bind, st.Mode()|0777)
645 return nil, fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
649 return bindmounts, nil
652 func (runner *ContainerRunner) stopHoststat() error {
653 if runner.hoststatReporter == nil {
656 runner.hoststatReporter.Stop()
657 err := runner.hoststatLogger.Close()
659 return fmt.Errorf("error closing hoststat logs: %v", err)
664 func (runner *ContainerRunner) startHoststat() error {
665 w, err := runner.NewLogWriter("hoststat")
669 runner.hoststatLogger = NewThrottledLogger(w)
670 runner.hoststatReporter = &crunchstat.Reporter{
671 Logger: log.New(runner.hoststatLogger, "", 0),
672 CgroupRoot: runner.cgroupRoot,
673 PollPeriod: runner.statInterval,
675 runner.hoststatReporter.Start()
679 func (runner *ContainerRunner) startCrunchstat() error {
680 w, err := runner.NewLogWriter("crunchstat")
684 runner.statLogger = NewThrottledLogger(w)
685 runner.statReporter = &crunchstat.Reporter{
686 CID: runner.executor.CgroupID(),
687 Logger: log.New(runner.statLogger, "", 0),
688 CgroupParent: runner.expectCgroupParent,
689 CgroupRoot: runner.cgroupRoot,
690 PollPeriod: runner.statInterval,
691 TempDir: runner.parentTemp,
693 runner.statReporter.Start()
697 type infoCommand struct {
702 // LogHostInfo logs info about the current host, for debugging and
703 // accounting purposes. Although it's logged as "node-info", this is
704 // about the environment where crunch-run is actually running, which
705 // might differ from what's described in the node record (see
707 func (runner *ContainerRunner) LogHostInfo() (err error) {
708 w, err := runner.NewLogWriter("node-info")
713 commands := []infoCommand{
715 label: "Host Information",
716 cmd: []string{"uname", "-a"},
719 label: "CPU Information",
720 cmd: []string{"cat", "/proc/cpuinfo"},
723 label: "Memory Information",
724 cmd: []string{"cat", "/proc/meminfo"},
728 cmd: []string{"df", "-m", "/", os.TempDir()},
731 label: "Disk INodes",
732 cmd: []string{"df", "-i", "/", os.TempDir()},
736 // Run commands with informational output to be logged.
737 for _, command := range commands {
738 fmt.Fprintln(w, command.label)
739 cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
742 if err := cmd.Run(); err != nil {
743 err = fmt.Errorf("While running command %q: %v", command.cmd, err)
752 return fmt.Errorf("While closing node-info logs: %v", err)
757 // LogContainerRecord gets and saves the raw JSON container record from the API server
758 func (runner *ContainerRunner) LogContainerRecord() error {
759 logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
760 if !logged && err == nil {
761 err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
766 // LogNodeRecord logs the current host's InstanceType config entry (or
767 // the arvados#node record, if running via crunch-dispatch-slurm).
768 func (runner *ContainerRunner) LogNodeRecord() error {
769 if it := os.Getenv("InstanceType"); it != "" {
770 // Dispatched via arvados-dispatch-cloud. Save
771 // InstanceType config fragment received from
772 // dispatcher on stdin.
773 w, err := runner.LogCollection.OpenFile("node.json", os.O_CREATE|os.O_WRONLY, 0666)
778 _, err = io.WriteString(w, it)
784 // Dispatched via crunch-dispatch-slurm. Look up
785 // apiserver's node record corresponding to
787 hostname := os.Getenv("SLURMD_NODENAME")
789 hostname, _ = os.Hostname()
791 _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
792 // The "info" field has admin-only info when
793 // obtained with a privileged token, and
794 // should not be logged.
795 node, ok := resp.(map[string]interface{})
803 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
804 writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
809 ArvClient: runner.DispatcherArvClient,
810 UUID: runner.Container.UUID,
811 loggingStream: label,
815 reader, err := runner.DispatcherArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
817 return false, fmt.Errorf("error getting %s record: %v", label, err)
821 dec := json.NewDecoder(reader)
823 var resp map[string]interface{}
824 if err = dec.Decode(&resp); err != nil {
825 return false, fmt.Errorf("error decoding %s list response: %v", label, err)
827 items, ok := resp["items"].([]interface{})
829 return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
830 } else if len(items) < 1 {
836 // Re-encode it using indentation to improve readability
837 enc := json.NewEncoder(w)
838 enc.SetIndent("", " ")
839 if err = enc.Encode(items[0]); err != nil {
840 return false, fmt.Errorf("error logging %s record: %v", label, err)
844 return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
849 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
850 stdoutPath := mntPath[len(runner.Container.OutputPath):]
851 index := strings.LastIndex(stdoutPath, "/")
853 subdirs := stdoutPath[:index]
855 st, err := os.Stat(runner.HostOutputDir)
857 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
859 stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
860 err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
862 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
866 stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
868 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
871 return stdoutFile, nil
874 // CreateContainer creates the docker container.
875 func (runner *ContainerRunner) CreateContainer(imageID string, bindmounts map[string]bindmount) error {
876 var stdin io.ReadCloser
877 if mnt, ok := runner.Container.Mounts["stdin"]; ok {
884 collID = mnt.PortableDataHash
886 path := runner.ArvMountPoint + "/by_id/" + collID + "/" + mnt.Path
887 f, err := os.Open(path)
893 j, err := json.Marshal(mnt.Content)
895 return fmt.Errorf("error encoding stdin json data: %v", err)
897 stdin = ioutil.NopCloser(bytes.NewReader(j))
899 return fmt.Errorf("stdin mount has unsupported kind %q", mnt.Kind)
903 var stdout, stderr io.WriteCloser
904 if mnt, ok := runner.Container.Mounts["stdout"]; ok {
905 f, err := runner.getStdoutFile(mnt.Path)
910 } else if w, err := runner.NewLogWriter("stdout"); err != nil {
913 stdout = NewThrottledLogger(w)
916 if mnt, ok := runner.Container.Mounts["stderr"]; ok {
917 f, err := runner.getStdoutFile(mnt.Path)
922 } else if w, err := runner.NewLogWriter("stderr"); err != nil {
925 stderr = NewThrottledLogger(w)
928 env := runner.Container.Environment
929 enableNetwork := runner.enableNetwork == "always"
930 if runner.Container.RuntimeConstraints.API {
932 tok, err := runner.ContainerToken()
936 env = map[string]string{}
937 for k, v := range runner.Container.Environment {
940 env["ARVADOS_API_TOKEN"] = tok
941 env["ARVADOS_API_HOST"] = os.Getenv("ARVADOS_API_HOST")
942 env["ARVADOS_API_HOST_INSECURE"] = os.Getenv("ARVADOS_API_HOST_INSECURE")
944 workdir := runner.Container.Cwd
946 // both "" and "." mean default
950 return runner.executor.Create(containerSpec{
952 VCPUs: runner.Container.RuntimeConstraints.VCPUs,
953 RAM: runner.Container.RuntimeConstraints.RAM,
956 BindMounts: bindmounts,
957 Command: runner.Container.Command,
958 EnableNetwork: enableNetwork,
959 NetworkMode: runner.networkMode,
960 CgroupParent: runner.setCgroupParent,
967 // StartContainer starts the docker container created by CreateContainer.
968 func (runner *ContainerRunner) StartContainer() error {
969 runner.CrunchLog.Printf("Starting container")
970 runner.cStateLock.Lock()
971 defer runner.cStateLock.Unlock()
972 if runner.cCancelled {
975 err := runner.executor.Start()
978 if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
979 advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
981 return fmt.Errorf("could not start container: %v%s", err, advice)
986 // WaitFinish waits for the container to terminate, capture the exit code, and
987 // close the stdout/stderr logging.
988 func (runner *ContainerRunner) WaitFinish() error {
989 runner.CrunchLog.Print("Waiting for container to finish")
990 var timeout <-chan time.Time
991 if s := runner.Container.SchedulingParameters.MaxRunTime; s > 0 {
992 timeout = time.After(time.Duration(s) * time.Second)
994 ctx, cancel := context.WithCancel(context.Background())
999 runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
1001 case <-runner.ArvMountExit:
1002 runner.CrunchLog.Printf("arv-mount exited while container is still running. Stopping container.")
1007 exitcode, err := runner.executor.Wait(ctx)
1009 runner.checkBrokenNode(err)
1012 runner.ExitCode = &exitcode
1014 if runner.statReporter != nil {
1015 runner.statReporter.Stop()
1016 err = runner.statLogger.Close()
1018 runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
1024 func (runner *ContainerRunner) updateLogs() {
1025 ticker := time.NewTicker(crunchLogUpdatePeriod / 360)
1028 sigusr1 := make(chan os.Signal, 1)
1029 signal.Notify(sigusr1, syscall.SIGUSR1)
1030 defer signal.Stop(sigusr1)
1032 saveAtTime := time.Now().Add(crunchLogUpdatePeriod)
1033 saveAtSize := crunchLogUpdateSize
1039 saveAtTime = time.Now()
1041 runner.logMtx.Lock()
1042 done := runner.LogsPDH != nil
1043 runner.logMtx.Unlock()
1047 size := runner.LogCollection.Size()
1048 if size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) {
1051 saveAtTime = time.Now().Add(crunchLogUpdatePeriod)
1052 saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize
1053 saved, err := runner.saveLogCollection(false)
1055 runner.CrunchLog.Printf("error updating log collection: %s", err)
1059 var updated arvados.Container
1060 err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1061 "container": arvadosclient.Dict{"log": saved.PortableDataHash},
1064 runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
1072 // CaptureOutput saves data from the container's output directory if
1073 // needed, and updates the container output accordingly.
1074 func (runner *ContainerRunner) CaptureOutput(bindmounts map[string]bindmount) error {
1075 if runner.Container.RuntimeConstraints.API {
1076 // Output may have been set directly by the container, so
1077 // refresh the container record to check.
1078 err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID,
1079 nil, &runner.Container)
1083 if runner.Container.Output != "" {
1084 // Container output is already set.
1085 runner.OutputPDH = &runner.Container.Output
1090 txt, err := (&copier{
1091 client: runner.containerClient,
1092 arvClient: runner.ContainerArvClient,
1093 keepClient: runner.ContainerKeepClient,
1094 hostOutputDir: runner.HostOutputDir,
1095 ctrOutputDir: runner.Container.OutputPath,
1096 bindmounts: bindmounts,
1097 mounts: runner.Container.Mounts,
1098 secretMounts: runner.SecretMounts,
1099 logger: runner.CrunchLog,
1104 if n := len(regexp.MustCompile(` [0-9a-f]+\+\S*\+R`).FindAllStringIndex(txt, -1)); n > 0 {
1105 runner.CrunchLog.Printf("Copying %d data blocks from remote input collections...", n)
1106 fs, err := (&arvados.Collection{ManifestText: txt}).FileSystem(runner.containerClient, runner.ContainerKeepClient)
1110 txt, err = fs.MarshalManifest(".")
1115 var resp arvados.Collection
1116 err = runner.ContainerArvClient.Create("collections", arvadosclient.Dict{
1117 "ensure_unique_name": true,
1118 "collection": arvadosclient.Dict{
1120 "name": "output for " + runner.Container.UUID,
1121 "manifest_text": txt,
1125 return fmt.Errorf("error creating output collection: %v", err)
1127 runner.OutputPDH = &resp.PortableDataHash
1131 func (runner *ContainerRunner) CleanupDirs() {
1132 if runner.ArvMount != nil {
1134 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1135 umount.Stdout = runner.CrunchLog
1136 umount.Stderr = runner.CrunchLog
1137 runner.CrunchLog.Printf("Running %v", umount.Args)
1138 umnterr := umount.Start()
1141 runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1143 // If arv-mount --unmount gets stuck for any reason, we
1144 // don't want to wait for it forever. Do Wait() in a goroutine
1145 // so it doesn't block crunch-run.
1146 umountExit := make(chan error)
1148 mnterr := umount.Wait()
1150 runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1152 umountExit <- mnterr
1155 for again := true; again; {
1161 case <-runner.ArvMountExit:
1163 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1164 runner.CrunchLog.Printf("Timed out waiting for unmount")
1166 umount.Process.Kill()
1168 runner.ArvMount.Process.Kill()
1174 if runner.ArvMountPoint != "" {
1175 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1176 runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1180 if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {
1181 runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr)
1185 // CommitLogs posts the collection containing the final container logs.
1186 func (runner *ContainerRunner) CommitLogs() error {
1188 // Hold cStateLock to prevent races on CrunchLog (e.g., stop()).
1189 runner.cStateLock.Lock()
1190 defer runner.cStateLock.Unlock()
1192 runner.CrunchLog.Print(runner.finalState)
1194 if runner.arvMountLog != nil {
1195 runner.arvMountLog.Close()
1197 runner.CrunchLog.Close()
1199 // Closing CrunchLog above allows them to be committed to Keep at this
1200 // point, but re-open crunch log with ArvClient in case there are any
1201 // other further errors (such as failing to write the log to Keep!)
1202 // while shutting down
1203 runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{
1204 ArvClient: runner.DispatcherArvClient,
1205 UUID: runner.Container.UUID,
1206 loggingStream: "crunch-run",
1209 runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
1212 if runner.LogsPDH != nil {
1213 // If we have already assigned something to LogsPDH,
1214 // we must be closing the re-opened log, which won't
1215 // end up getting attached to the container record and
1216 // therefore doesn't need to be saved as a collection
1217 // -- it exists only to send logs to other channels.
1220 saved, err := runner.saveLogCollection(true)
1222 return fmt.Errorf("error saving log collection: %s", err)
1224 runner.logMtx.Lock()
1225 defer runner.logMtx.Unlock()
1226 runner.LogsPDH = &saved.PortableDataHash
1230 func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
1231 runner.logMtx.Lock()
1232 defer runner.logMtx.Unlock()
1233 if runner.LogsPDH != nil {
1234 // Already finalized.
1237 updates := arvadosclient.Dict{
1238 "name": "logs for " + runner.Container.UUID,
1240 mt, err1 := runner.LogCollection.MarshalManifest(".")
1242 // Only send updated manifest text if there was no
1244 updates["manifest_text"] = mt
1247 // Even if flushing the manifest had an error, we still want
1248 // to update the log record, if possible, to push the trash_at
1249 // and delete_at times into the future. Details on bug
1252 updates["is_trashed"] = true
1254 exp := time.Now().Add(crunchLogUpdatePeriod * 24)
1255 updates["trash_at"] = exp
1256 updates["delete_at"] = exp
1258 reqBody := arvadosclient.Dict{"collection": updates}
1260 if runner.logUUID == "" {
1261 reqBody["ensure_unique_name"] = true
1262 err2 = runner.DispatcherArvClient.Create("collections", reqBody, &response)
1264 err2 = runner.DispatcherArvClient.Update("collections", runner.logUUID, reqBody, &response)
1267 runner.logUUID = response.UUID
1270 if err1 != nil || err2 != nil {
1271 err = fmt.Errorf("error recording logs: %q, %q", err1, err2)
1276 // UpdateContainerRunning updates the container state to "Running"
1277 func (runner *ContainerRunner) UpdateContainerRunning() error {
1278 runner.cStateLock.Lock()
1279 defer runner.cStateLock.Unlock()
1280 if runner.cCancelled {
1283 return runner.DispatcherArvClient.Update("containers", runner.Container.UUID,
1284 arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running", "gateway_address": runner.gateway.Address}}, nil)
1287 // ContainerToken returns the api_token the container (and any
1288 // arv-mount processes) are allowed to use.
1289 func (runner *ContainerRunner) ContainerToken() (string, error) {
1290 if runner.token != "" {
1291 return runner.token, nil
1294 var auth arvados.APIClientAuthorization
1295 err := runner.DispatcherArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1299 runner.token = fmt.Sprintf("v2/%s/%s/%s", auth.UUID, auth.APIToken, runner.Container.UUID)
1300 return runner.token, nil
1303 // UpdateContainerFinal updates the container record state on API
1304 // server to "Complete" or "Cancelled"
1305 func (runner *ContainerRunner) UpdateContainerFinal() error {
1306 update := arvadosclient.Dict{}
1307 update["state"] = runner.finalState
1308 if runner.LogsPDH != nil {
1309 update["log"] = *runner.LogsPDH
1311 if runner.finalState == "Complete" {
1312 if runner.ExitCode != nil {
1313 update["exit_code"] = *runner.ExitCode
1315 if runner.OutputPDH != nil {
1316 update["output"] = *runner.OutputPDH
1319 return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
1322 // IsCancelled returns the value of Cancelled, with goroutine safety.
1323 func (runner *ContainerRunner) IsCancelled() bool {
1324 runner.cStateLock.Lock()
1325 defer runner.cStateLock.Unlock()
1326 return runner.cCancelled
1329 // NewArvLogWriter creates an ArvLogWriter
1330 func (runner *ContainerRunner) NewArvLogWriter(name string) (io.WriteCloser, error) {
1331 writer, err := runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
1335 return &ArvLogWriter{
1336 ArvClient: runner.DispatcherArvClient,
1337 UUID: runner.Container.UUID,
1338 loggingStream: name,
1339 writeCloser: writer,
1343 // Run the full container lifecycle.
1344 func (runner *ContainerRunner) Run() (err error) {
1345 runner.CrunchLog.Printf("crunch-run %s started", cmd.Version.String())
1346 runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
1348 hostname, hosterr := os.Hostname()
1350 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1352 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1355 runner.finalState = "Queued"
1358 runner.CleanupDirs()
1360 runner.CrunchLog.Printf("crunch-run finished")
1361 runner.CrunchLog.Close()
1364 err = runner.fetchContainerRecord()
1368 if runner.Container.State != "Locked" {
1369 return fmt.Errorf("dispatch error detected: container %q has state %q", runner.Container.UUID, runner.Container.State)
1372 var bindmounts map[string]bindmount
1374 // checkErr prints e (unless it's nil) and sets err to
1375 // e (unless err is already non-nil). Thus, if err
1376 // hasn't already been assigned when Run() returns,
1377 // this cleanup func will cause Run() to return the
1378 // first non-nil error that is passed to checkErr().
1379 checkErr := func(errorIn string, e error) {
1383 runner.CrunchLog.Printf("error in %s: %v", errorIn, e)
1387 if runner.finalState == "Complete" {
1388 // There was an error in the finalization.
1389 runner.finalState = "Cancelled"
1393 // Log the error encountered in Run(), if any
1394 checkErr("Run", err)
1396 if runner.finalState == "Queued" {
1397 runner.UpdateContainerFinal()
1401 if runner.IsCancelled() {
1402 runner.finalState = "Cancelled"
1403 // but don't return yet -- we still want to
1404 // capture partial output and write logs
1407 if bindmounts != nil {
1408 checkErr("CaptureOutput", runner.CaptureOutput(bindmounts))
1410 checkErr("stopHoststat", runner.stopHoststat())
1411 checkErr("CommitLogs", runner.CommitLogs())
1412 checkErr("UpdateContainerFinal", runner.UpdateContainerFinal())
1415 runner.setupSignals()
1416 err = runner.startHoststat()
1421 // set up FUSE mount and binds
1422 bindmounts, err = runner.SetupMounts()
1424 runner.finalState = "Cancelled"
1425 err = fmt.Errorf("While setting up mounts: %v", err)
1429 // check for and/or load image
1430 imageID, err := runner.LoadImage()
1432 if !runner.checkBrokenNode(err) {
1433 // Failed to load image but not due to a "broken node"
1434 // condition, probably user error.
1435 runner.finalState = "Cancelled"
1437 err = fmt.Errorf("While loading container image: %v", err)
1441 err = runner.CreateContainer(imageID, bindmounts)
1445 err = runner.LogHostInfo()
1449 err = runner.LogNodeRecord()
1453 err = runner.LogContainerRecord()
1458 if runner.IsCancelled() {
1462 err = runner.UpdateContainerRunning()
1466 runner.finalState = "Cancelled"
1468 err = runner.startCrunchstat()
1473 err = runner.StartContainer()
1475 runner.checkBrokenNode(err)
1479 err = runner.WaitFinish()
1480 if err == nil && !runner.IsCancelled() {
1481 runner.finalState = "Complete"
1486 // Fetch the current container record (uuid = runner.Container.UUID)
1487 // into runner.Container.
1488 func (runner *ContainerRunner) fetchContainerRecord() error {
1489 reader, err := runner.DispatcherArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1491 return fmt.Errorf("error fetching container record: %v", err)
1493 defer reader.Close()
1495 dec := json.NewDecoder(reader)
1497 err = dec.Decode(&runner.Container)
1499 return fmt.Errorf("error decoding container record: %v", err)
1503 SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
1506 containerToken, err := runner.ContainerToken()
1508 return fmt.Errorf("error getting container token: %v", err)
1511 runner.ContainerArvClient, runner.ContainerKeepClient,
1512 runner.containerClient, err = runner.MkArvClient(containerToken)
1514 return fmt.Errorf("error creating container API client: %v", err)
1517 err = runner.ContainerArvClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
1519 if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
1520 return fmt.Errorf("error fetching secret_mounts: %v", err)
1522 // ok && apierr.HttpStatusCode == 404, which means
1523 // secret_mounts isn't supported by this API server.
1525 runner.SecretMounts = sm.SecretMounts
1530 // NewContainerRunner creates a new container runner.
1531 func NewContainerRunner(dispatcherClient *arvados.Client,
1532 dispatcherArvClient IArvadosClient,
1533 dispatcherKeepClient IKeepClient,
1534 containerUUID string) (*ContainerRunner, error) {
1536 cr := &ContainerRunner{
1537 dispatcherClient: dispatcherClient,
1538 DispatcherArvClient: dispatcherArvClient,
1539 DispatcherKeepClient: dispatcherKeepClient,
1541 cr.NewLogWriter = cr.NewArvLogWriter
1542 cr.RunArvMount = cr.ArvMountCmd
1543 cr.MkTempDir = ioutil.TempDir
1544 cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
1545 cl, err := arvadosclient.MakeArvadosClient()
1547 return nil, nil, nil, err
1550 kc, err := keepclient.MakeKeepClient(cl)
1552 return nil, nil, nil, err
1554 c2 := arvados.NewClientFromEnv()
1555 c2.AuthToken = token
1556 return cl, kc, c2, nil
1559 cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.dispatcherClient, cr.DispatcherKeepClient)
1563 cr.Container.UUID = containerUUID
1564 w, err := cr.NewLogWriter("crunch-run")
1568 cr.CrunchLog = NewThrottledLogger(w)
1569 cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
1571 loadLogThrottleParams(dispatcherArvClient)
1577 func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
1578 flags := flag.NewFlagSet(prog, flag.ContinueOnError)
1579 statInterval := flags.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1580 cgroupRoot := flags.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
1581 cgroupParent := flags.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
1582 cgroupParentSubsystem := flags.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
1583 caCertsPath := flags.String("ca-certs", "", "Path to TLS root certificates")
1584 detach := flags.Bool("detach", false, "Detach from parent process and run in the background")
1585 stdinEnv := flags.Bool("stdin-env", false, "Load environment variables from JSON message on stdin")
1586 sleep := flags.Duration("sleep", 0, "Delay before starting (testing use only)")
1587 kill := flags.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID")
1588 list := flags.Bool("list", false, "List UUIDs of existing crunch-run processes")
1589 enableNetwork := flags.String("container-enable-networking", "default", "enable networking \"always\" (for all containers) or \"default\" (for containers that request it)")
1590 networkMode := flags.String("container-network-mode", "default", `Docker network mode for container (use any argument valid for docker --net)`)
1591 memprofile := flags.String("memprofile", "", "write memory profile to `file` after running container")
1592 runtimeEngine := flags.String("runtime-engine", "docker", "container runtime: docker or singularity")
1593 flags.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.")
1595 ignoreDetachFlag := false
1596 if len(args) > 0 && args[0] == "-no-detach" {
1597 // This process was invoked by a parent process, which
1598 // has passed along its own arguments, including
1599 // -detach, after the leading -no-detach flag. Strip
1600 // the leading -no-detach flag (it's not recognized by
1601 // flags.Parse()) and ignore the -detach flag that
1604 ignoreDetachFlag = true
1607 if err := flags.Parse(args); err == flag.ErrHelp {
1609 } else if err != nil {
1614 if *stdinEnv && !ignoreDetachFlag {
1615 // Load env vars on stdin if asked (but not in a
1616 // detached child process, in which case stdin is
1618 err := loadEnv(os.Stdin)
1625 containerUUID := flags.Arg(0)
1628 case *detach && !ignoreDetachFlag:
1629 return Detach(containerUUID, prog, args, os.Stdout, os.Stderr)
1631 return KillProcess(containerUUID, syscall.Signal(*kill), os.Stdout, os.Stderr)
1633 return ListProcesses(os.Stdout, os.Stderr)
1636 if containerUUID == "" {
1637 log.Printf("usage: %s [options] UUID", prog)
1641 log.Printf("crunch-run %s started", cmd.Version.String())
1644 if *caCertsPath != "" {
1645 arvadosclient.CertFiles = []string{*caCertsPath}
1648 api, err := arvadosclient.MakeArvadosClient()
1650 log.Printf("%s: %v", containerUUID, err)
1655 kc, kcerr := keepclient.MakeKeepClient(api)
1657 log.Printf("%s: %v", containerUUID, kcerr)
1660 kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
1663 cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, containerUUID)
1669 switch *runtimeEngine {
1671 cr.executor, err = newDockerExecutor(containerUUID, cr.CrunchLog.Printf, cr.containerWatchdogInterval)
1673 cr.executor, err = newSingularityExecutor(cr.CrunchLog.Printf)
1675 cr.CrunchLog.Printf("%s: unsupported RuntimeEngine %q", containerUUID, *runtimeEngine)
1676 cr.CrunchLog.Close()
1680 cr.CrunchLog.Printf("%s: %v", containerUUID, err)
1681 cr.checkBrokenNode(err)
1682 cr.CrunchLog.Close()
1685 defer cr.executor.Close()
1687 gwAuthSecret := os.Getenv("GatewayAuthSecret")
1688 os.Unsetenv("GatewayAuthSecret")
1689 if gwAuthSecret == "" {
1690 // not safe to run a gateway service without an auth
1692 } else if gwListen := os.Getenv("GatewayAddress"); gwListen == "" {
1693 // dispatcher did not tell us which external IP
1694 // address to advertise --> no gateway service
1695 } else if de, ok := cr.executor.(*dockerExecutor); ok {
1696 cr.gateway = Gateway{
1698 AuthSecret: gwAuthSecret,
1699 ContainerUUID: containerUUID,
1700 DockerContainerID: &de.containerID,
1702 ContainerIPAddress: dockerContainerIPAddress(&de.containerID),
1704 err = cr.gateway.Start()
1706 log.Printf("error starting gateway server: %s", err)
1711 parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerUUID+".")
1713 log.Printf("%s: %v", containerUUID, tmperr)
1717 cr.parentTemp = parentTemp
1718 cr.statInterval = *statInterval
1719 cr.cgroupRoot = *cgroupRoot
1720 cr.expectCgroupParent = *cgroupParent
1721 cr.enableNetwork = *enableNetwork
1722 cr.networkMode = *networkMode
1723 if *cgroupParentSubsystem != "" {
1724 p := findCgroup(*cgroupParentSubsystem)
1725 cr.setCgroupParent = p
1726 cr.expectCgroupParent = p
1731 if *memprofile != "" {
1732 f, err := os.Create(*memprofile)
1734 log.Printf("could not create memory profile: %s", err)
1736 runtime.GC() // get up-to-date statistics
1737 if err := pprof.WriteHeapProfile(f); err != nil {
1738 log.Printf("could not write memory profile: %s", err)
1740 closeerr := f.Close()
1741 if closeerr != nil {
1742 log.Printf("closing memprofile file: %s", err)
1747 log.Printf("%s: %v", containerUUID, runerr)
1753 func loadEnv(rdr io.Reader) error {
1754 buf, err := ioutil.ReadAll(rdr)
1756 return fmt.Errorf("read stdin: %s", err)
1758 var env map[string]string
1759 err = json.Unmarshal(buf, &env)
1761 return fmt.Errorf("decode stdin: %s", err)
1763 for k, v := range env {
1764 err = os.Setenv(k, v)
1766 return fmt.Errorf("setenv(%q): %s", k, err)