1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
30 "git.arvados.org/arvados.git/lib/cmd"
31 "git.arvados.org/arvados.git/lib/crunchstat"
32 "git.arvados.org/arvados.git/sdk/go/arvados"
33 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
34 "git.arvados.org/arvados.git/sdk/go/keepclient"
35 "git.arvados.org/arvados.git/sdk/go/manifest"
36 "golang.org/x/net/context"
38 dockertypes "github.com/docker/docker/api/types"
39 dockercontainer "github.com/docker/docker/api/types/container"
40 dockernetwork "github.com/docker/docker/api/types/network"
41 dockerclient "github.com/docker/docker/client"
46 var Command = command{}
48 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
49 type IArvadosClient interface {
50 Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
51 Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
52 Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
53 Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
54 CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
55 Discovery(key string) (interface{}, error)
58 // ErrCancelled is the error returned when the container is cancelled.
59 var ErrCancelled = errors.New("Cancelled")
61 // IKeepClient is the minimal Keep API methods used by crunch-run.
62 type IKeepClient interface {
63 PutB(buf []byte) (string, int, error)
64 ReadAt(locator string, p []byte, off int) (int, error)
65 ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
66 LocalLocator(locator string) (string, error)
70 // NewLogWriter is a factory function to create a new log writer.
71 type NewLogWriter func(name string) (io.WriteCloser, error)
73 type RunArvMount func(args []string, tok string) (*exec.Cmd, error)
75 type MkTempDir func(string, string) (string, error)
77 // ThinDockerClient is the minimal Docker client interface used by crunch-run.
78 type ThinDockerClient interface {
79 ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error)
80 ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
81 networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error)
82 ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error
83 ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error
84 ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error)
85 ContainerInspect(ctx context.Context, id string) (dockertypes.ContainerJSON, error)
86 ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error)
87 ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error)
88 ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
91 type PsProcess interface {
92 CmdlineSlice() ([]string, error)
95 // ContainerRunner is the main stateful struct used for a single execution of a
97 type ContainerRunner struct {
98 ContainerExecRunner ThinContainerExecRunner
100 //Docker ThinDockerClient
101 //ContainerConfig dockercontainer.Config //FIXME: translate this to the ThinContainerRunner interface
102 HostConfig dockercontainer.HostConfig //FIXME: translate this to the ThinContainerRunner interface
105 // Dispatcher client is initialized with the Dispatcher token.
106 // This is a privileged token used to manage container status
109 // We have both dispatcherClient and DispatcherArvClient
110 // because there are two different incompatible Arvados Go
111 // SDKs and we have to use both (hopefully this gets fixed in
113 dispatcherClient *arvados.Client
114 DispatcherArvClient IArvadosClient
115 DispatcherKeepClient IKeepClient
117 // Container client is initialized with the Container token
118 // This token controls the permissions of the container, and
119 // must be used for operations such as reading collections.
121 // Same comment as above applies to
122 // containerClient/ContainerArvClient.
123 containerClient *arvados.Client
124 ContainerArvClient IArvadosClient
125 ContainerKeepClient IKeepClient
127 Container arvados.Container
131 NewLogWriter NewLogWriter
132 loggingDone chan bool
133 CrunchLog *ThrottledLogger
134 Stdout io.WriteCloser
135 Stderr io.WriteCloser
138 LogCollection arvados.CollectionFileSystem
140 RunArvMount RunArvMount
146 Volumes map[string]struct{}
148 SigChan chan os.Signal
149 ArvMountExit chan error
150 SecretMounts map[string]arvados.Mount
151 MkArvClient func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error)
155 statLogger io.WriteCloser
156 statReporter *crunchstat.Reporter
157 hoststatLogger io.WriteCloser
158 hoststatReporter *crunchstat.Reporter
159 statInterval time.Duration
161 // What we expect the container's cgroup parent to be.
162 expectCgroupParent string
163 // What we tell docker to use as the container's cgroup
164 // parent. Note: Ideally we would use the same field for both
165 // expectCgroupParent and setCgroupParent, and just make it
166 // default to "docker". However, when using docker < 1.10 with
167 // systemd, specifying a non-empty cgroup parent (even the
168 // default value "docker") hits a docker bug
169 // (https://github.com/docker/docker/issues/17126). Using two
170 // separate fields makes it possible to use the "expect cgroup
171 // parent to be X" feature even on sites where the "specify
172 // cgroup parent" feature breaks.
173 setCgroupParent string
175 cStateLock sync.Mutex
176 cCancelled bool // StopContainer() invoked
177 cRemoved bool // docker confirmed the container no longer exists
179 enableNetwork string // one of "default" or "always"
180 networkMode string // passed through to HostConfig.NetworkMode
181 arvMountLog *ThrottledLogger
183 containerWatchdogInterval time.Duration
188 // setupSignals sets up signal handling to gracefully terminate the underlying
189 // Docker container and update state when receiving a TERM, INT or QUIT signal.
190 func (runner *ContainerRunner) setupSignals() {
191 runner.SigChan = make(chan os.Signal, 1)
192 signal.Notify(runner.SigChan, syscall.SIGTERM)
193 signal.Notify(runner.SigChan, syscall.SIGINT)
194 signal.Notify(runner.SigChan, syscall.SIGQUIT)
196 go func(sig chan os.Signal) {
203 // stop the underlying Docker container.
204 func (runner *ContainerRunner) stop(sig os.Signal) {
205 runner.cStateLock.Lock()
206 defer runner.cStateLock.Unlock()
208 runner.CrunchLog.Printf("caught signal: %v", sig)
210 if runner.ContainerID == "" {
213 runner.cCancelled = true
214 runner.CrunchLog.Printf("removing container")
215 err := runner.ContainerExecRunner.ContainerRemove(context.TODO(), runner.ContainerID, ContainerRemoveOptions{Force: true})
217 runner.CrunchLog.Printf("error removing container: %s", err)
219 if err == nil || strings.Contains(err.Error(), "No such container: "+runner.ContainerID) {
220 runner.cRemoved = true
224 var errorBlacklist = []string{
225 "(?ms).*[Cc]annot connect to the Docker daemon.*",
226 "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
227 "(?ms).*grpc: the connection is unavailable.*",
229 var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
231 func (runner *ContainerRunner) runBrokenNodeHook() {
232 if *brokenNodeHook == "" {
233 path := filepath.Join(lockdir, brokenfile)
234 runner.CrunchLog.Printf("Writing %s to mark node as broken", path)
235 f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0700)
237 runner.CrunchLog.Printf("Error writing %s: %s", path, err)
242 runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
244 c := exec.Command(*brokenNodeHook)
245 c.Stdout = runner.CrunchLog
246 c.Stderr = runner.CrunchLog
249 runner.CrunchLog.Printf("Error running broken node hook: %v", err)
254 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
255 for _, d := range errorBlacklist {
256 if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
257 runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
258 runner.runBrokenNodeHook()
265 // LoadImage determines the docker image id from the container record and
266 // checks if it is available in the local Docker image store. If not, it loads
267 // the image from Keep.
268 func (runner *ContainerRunner) LoadImage() (err error) {
270 runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
272 var collection arvados.Collection
273 err = runner.ContainerArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection)
275 return fmt.Errorf("While getting container image collection: %v", err)
277 manifest := manifest.Manifest{Text: collection.ManifestText}
278 var img, imageID string
279 for ms := range manifest.StreamIter() {
280 img = ms.FileStreamSegments[0].Name
281 if !strings.HasSuffix(img, ".tar") {
282 return fmt.Errorf("First file in the container image collection does not end in .tar")
284 imageID = img[:len(img)-4]
287 runner.CrunchLog.Printf("Using Docker image id '%s'", imageID)
289 _, _, err = runner.ContainerExecRunner.ImageInspectWithRaw(context.TODO(), imageID)
291 runner.CrunchLog.Print("Loading Docker image from keep")
293 var readCloser io.ReadCloser
294 readCloser, err = runner.ContainerKeepClient.ManifestFileReader(manifest, img)
296 return fmt.Errorf("While creating ManifestFileReader for container image: %v", err)
299 response, err := runner.ContainerExecRunner.ImageLoad(context.TODO(), readCloser, true)
301 return fmt.Errorf("While loading container image into Docker: %v", err)
304 defer response.Body.Close()
305 rbody, err := ioutil.ReadAll(response.Body)
307 return fmt.Errorf("Reading response to image load: %v", err)
309 runner.CrunchLog.Printf("Docker response: %s", rbody)
311 runner.CrunchLog.Print("Docker image is available")
314 runner.ContainerExecRunner.SetImage(imageID)
316 runner.ContainerKeepClient.ClearBlockCache()
321 func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (c *exec.Cmd, err error) {
322 c = exec.Command("arv-mount", arvMountCmd...)
324 // Copy our environment, but override ARVADOS_API_TOKEN with
325 // the container auth token.
327 for _, s := range os.Environ() {
328 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
329 c.Env = append(c.Env, s)
332 c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
334 w, err := runner.NewLogWriter("arv-mount")
338 runner.arvMountLog = NewThrottledLogger(w)
339 c.Stdout = runner.arvMountLog
340 c.Stderr = runner.arvMountLog
342 runner.CrunchLog.Printf("Running %v", c.Args)
349 statReadme := make(chan bool)
350 runner.ArvMountExit = make(chan error)
355 time.Sleep(100 * time.Millisecond)
356 _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
368 runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
370 runner.ArvMountExit <- mnterr
371 close(runner.ArvMountExit)
377 case err := <-runner.ArvMountExit:
378 runner.ArvMount = nil
386 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
387 if runner.ArvMountPoint == "" {
388 runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)
393 func copyfile(src string, dst string) (err error) {
394 srcfile, err := os.Open(src)
399 os.MkdirAll(path.Dir(dst), 0777)
401 dstfile, err := os.Create(dst)
405 _, err = io.Copy(dstfile, srcfile)
410 err = srcfile.Close()
411 err2 := dstfile.Close()
424 func (runner *ContainerRunner) SetupMounts() (err error) {
425 err = runner.SetupArvMountPoint("keep")
427 return fmt.Errorf("While creating keep mount temp dir: %v", err)
430 token, err := runner.ContainerToken()
432 return fmt.Errorf("could not get container token: %s", err)
437 arvMountCmd := []string{
441 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
443 if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
444 arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
447 collectionPaths := []string{}
449 runner.Volumes = make(map[string]struct{})
450 needCertMount := true
451 type copyFile struct {
455 var copyFiles []copyFile
458 for bind := range runner.Container.Mounts {
459 binds = append(binds, bind)
461 for bind := range runner.SecretMounts {
462 if _, ok := runner.Container.Mounts[bind]; ok {
463 return fmt.Errorf("secret mount %q conflicts with regular mount", bind)
465 if runner.SecretMounts[bind].Kind != "json" &&
466 runner.SecretMounts[bind].Kind != "text" {
467 return fmt.Errorf("secret mount %q type is %q but only 'json' and 'text' are permitted",
468 bind, runner.SecretMounts[bind].Kind)
470 binds = append(binds, bind)
474 for _, bind := range binds {
475 mnt, ok := runner.Container.Mounts[bind]
477 mnt = runner.SecretMounts[bind]
479 if bind == "stdout" || bind == "stderr" {
480 // Is it a "file" mount kind?
481 if mnt.Kind != "file" {
482 return fmt.Errorf("unsupported mount kind '%s' for %s: only 'file' is supported", mnt.Kind, bind)
485 // Does path start with OutputPath?
486 prefix := runner.Container.OutputPath
487 if !strings.HasSuffix(prefix, "/") {
490 if !strings.HasPrefix(mnt.Path, prefix) {
491 return fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
496 // Is it a "collection" mount kind?
497 if mnt.Kind != "collection" && mnt.Kind != "json" {
498 return fmt.Errorf("unsupported mount kind '%s' for stdin: only 'collection' and 'json' are supported", mnt.Kind)
502 if bind == "/etc/arvados/ca-certificates.crt" {
503 needCertMount = false
506 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
507 if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
508 return fmt.Errorf("only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
513 case mnt.Kind == "collection" && bind != "stdin":
515 if mnt.UUID != "" && mnt.PortableDataHash != "" {
516 return fmt.Errorf("cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
520 return fmt.Errorf("writing to existing collections currently not permitted")
523 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
524 } else if mnt.PortableDataHash != "" {
525 if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
526 return fmt.Errorf("can never write to a collection specified by portable data hash")
528 idx := strings.Index(mnt.PortableDataHash, "/")
530 mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
531 mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
532 runner.Container.Mounts[bind] = mnt
534 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
535 if mnt.Path != "" && mnt.Path != "." {
536 if strings.HasPrefix(mnt.Path, "./") {
537 mnt.Path = mnt.Path[2:]
538 } else if strings.HasPrefix(mnt.Path, "/") {
539 mnt.Path = mnt.Path[1:]
541 src += "/" + mnt.Path
544 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
545 arvMountCmd = append(arvMountCmd, "--mount-tmp")
546 arvMountCmd = append(arvMountCmd, fmt.Sprintf("tmp%d", tmpcount))
550 if bind == runner.Container.OutputPath {
551 runner.HostOutputDir = src
552 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
553 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
554 copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
556 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
559 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind))
561 collectionPaths = append(collectionPaths, src)
563 case mnt.Kind == "tmp":
565 tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
567 return fmt.Errorf("while creating mount temp dir: %v", err)
569 st, staterr := os.Stat(tmpdir)
571 return fmt.Errorf("while Stat on temp dir: %v", staterr)
573 err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
575 return fmt.Errorf("while Chmod temp dir: %v", err)
577 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", tmpdir, bind))
578 if bind == runner.Container.OutputPath {
579 runner.HostOutputDir = tmpdir
582 case mnt.Kind == "json" || mnt.Kind == "text":
584 if mnt.Kind == "json" {
585 filedata, err = json.Marshal(mnt.Content)
587 return fmt.Errorf("encoding json data: %v", err)
590 text, ok := mnt.Content.(string)
592 return fmt.Errorf("content for mount %q must be a string", bind)
594 filedata = []byte(text)
597 tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
599 return fmt.Errorf("creating temp dir: %v", err)
601 tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
602 err = ioutil.WriteFile(tmpfn, filedata, 0444)
604 return fmt.Errorf("writing temp file: %v", err)
606 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
607 copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
609 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
612 case mnt.Kind == "git_tree":
613 tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
615 return fmt.Errorf("creating temp dir: %v", err)
617 err = gitMount(mnt).extractTree(runner.ContainerArvClient, tmpdir, token)
621 runner.Binds = append(runner.Binds, tmpdir+":"+bind+":ro")
625 if runner.HostOutputDir == "" {
626 return fmt.Errorf("output path does not correspond to a writable mount point")
629 if needCertMount && runner.Container.RuntimeConstraints.API {
630 for _, certfile := range arvadosclient.CertFiles {
631 _, err := os.Stat(certfile)
633 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:/etc/arvados/ca-certificates.crt:ro", certfile))
640 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
642 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
644 arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
646 runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
648 return fmt.Errorf("while trying to start arv-mount: %v", err)
651 for _, p := range collectionPaths {
654 return fmt.Errorf("while checking that input files exist: %v", err)
658 for _, cp := range copyFiles {
659 st, err := os.Stat(cp.src)
661 return fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
664 err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
668 target := path.Join(cp.bind, walkpath[len(cp.src):])
669 if walkinfo.Mode().IsRegular() {
670 copyerr := copyfile(walkpath, target)
674 return os.Chmod(target, walkinfo.Mode()|0777)
675 } else if walkinfo.Mode().IsDir() {
676 mkerr := os.MkdirAll(target, 0777)
680 return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
682 return fmt.Errorf("source %q is not a regular file or directory", cp.src)
685 } else if st.Mode().IsRegular() {
686 err = copyfile(cp.src, cp.bind)
688 err = os.Chmod(cp.bind, st.Mode()|0777)
692 return fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
699 func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
700 // Handle docker log protocol
701 // https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container
702 defer close(runner.loggingDone)
704 header := make([]byte, 8)
707 _, err = io.ReadAtLeast(containerReader, header, 8)
714 readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
717 _, err = io.CopyN(runner.Stdout, containerReader, readsize)
720 _, err = io.CopyN(runner.Stderr, containerReader, readsize)
725 runner.CrunchLog.Printf("error reading docker logs: %v", err)
728 err = runner.Stdout.Close()
730 runner.CrunchLog.Printf("error closing stdout logs: %v", err)
733 err = runner.Stderr.Close()
735 runner.CrunchLog.Printf("error closing stderr logs: %v", err)
738 if runner.statReporter != nil {
739 runner.statReporter.Stop()
740 err = runner.statLogger.Close()
742 runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
747 func (runner *ContainerRunner) stopHoststat() error {
748 if runner.hoststatReporter == nil {
751 runner.hoststatReporter.Stop()
752 err := runner.hoststatLogger.Close()
754 return fmt.Errorf("error closing hoststat logs: %v", err)
759 func (runner *ContainerRunner) startHoststat() error {
760 w, err := runner.NewLogWriter("hoststat")
764 runner.hoststatLogger = NewThrottledLogger(w)
765 runner.hoststatReporter = &crunchstat.Reporter{
766 Logger: log.New(runner.hoststatLogger, "", 0),
767 CgroupRoot: runner.cgroupRoot,
768 PollPeriod: runner.statInterval,
770 runner.hoststatReporter.Start()
774 func (runner *ContainerRunner) startCrunchstat() error {
775 w, err := runner.NewLogWriter("crunchstat")
779 runner.statLogger = NewThrottledLogger(w)
780 runner.statReporter = &crunchstat.Reporter{
781 CID: runner.ContainerID,
782 Logger: log.New(runner.statLogger, "", 0),
783 CgroupParent: runner.expectCgroupParent,
784 CgroupRoot: runner.cgroupRoot,
785 PollPeriod: runner.statInterval,
786 TempDir: runner.parentTemp,
788 runner.statReporter.Start()
792 type infoCommand struct {
797 // LogHostInfo logs info about the current host, for debugging and
798 // accounting purposes. Although it's logged as "node-info", this is
799 // about the environment where crunch-run is actually running, which
800 // might differ from what's described in the node record (see
802 func (runner *ContainerRunner) LogHostInfo() (err error) {
803 w, err := runner.NewLogWriter("node-info")
808 commands := []infoCommand{
810 label: "Host Information",
811 cmd: []string{"uname", "-a"},
814 label: "CPU Information",
815 cmd: []string{"cat", "/proc/cpuinfo"},
818 label: "Memory Information",
819 cmd: []string{"cat", "/proc/meminfo"},
823 cmd: []string{"df", "-m", "/", os.TempDir()},
826 label: "Disk INodes",
827 cmd: []string{"df", "-i", "/", os.TempDir()},
831 // Run commands with informational output to be logged.
832 for _, command := range commands {
833 fmt.Fprintln(w, command.label)
834 cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
837 if err := cmd.Run(); err != nil {
838 err = fmt.Errorf("While running command %q: %v", command.cmd, err)
847 return fmt.Errorf("While closing node-info logs: %v", err)
852 // LogContainerRecord gets and saves the raw JSON container record from the API server
853 func (runner *ContainerRunner) LogContainerRecord() error {
854 logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
855 if !logged && err == nil {
856 err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
861 // LogNodeRecord logs the current host's InstanceType config entry (or
862 // the arvados#node record, if running via crunch-dispatch-slurm).
863 func (runner *ContainerRunner) LogNodeRecord() error {
864 if it := os.Getenv("InstanceType"); it != "" {
865 // Dispatched via arvados-dispatch-cloud. Save
866 // InstanceType config fragment received from
867 // dispatcher on stdin.
868 w, err := runner.LogCollection.OpenFile("node.json", os.O_CREATE|os.O_WRONLY, 0666)
873 _, err = io.WriteString(w, it)
879 // Dispatched via crunch-dispatch-slurm. Look up
880 // apiserver's node record corresponding to
882 hostname := os.Getenv("SLURMD_NODENAME")
884 hostname, _ = os.Hostname()
886 _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
887 // The "info" field has admin-only info when
888 // obtained with a privileged token, and
889 // should not be logged.
890 node, ok := resp.(map[string]interface{})
898 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
899 writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
904 ArvClient: runner.DispatcherArvClient,
905 UUID: runner.Container.UUID,
906 loggingStream: label,
910 reader, err := runner.DispatcherArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
912 return false, fmt.Errorf("error getting %s record: %v", label, err)
916 dec := json.NewDecoder(reader)
918 var resp map[string]interface{}
919 if err = dec.Decode(&resp); err != nil {
920 return false, fmt.Errorf("error decoding %s list response: %v", label, err)
922 items, ok := resp["items"].([]interface{})
924 return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
925 } else if len(items) < 1 {
931 // Re-encode it using indentation to improve readability
932 enc := json.NewEncoder(w)
933 enc.SetIndent("", " ")
934 if err = enc.Encode(items[0]); err != nil {
935 return false, fmt.Errorf("error logging %s record: %v", label, err)
939 return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
944 // AttachStreams connects the docker container stdin, stdout and stderr logs
945 // to the Arvados logger which logs to Keep and the API server logs table.
946 func (runner *ContainerRunner) AttachStreams() (err error) {
948 runner.CrunchLog.Print("Attaching container streams")
950 // If stdin mount is provided, attach it to the docker container
951 var stdinRdr arvados.File
953 if stdinMnt, ok := runner.Container.Mounts["stdin"]; ok {
954 if stdinMnt.Kind == "collection" {
955 var stdinColl arvados.Collection
956 collID := stdinMnt.UUID
958 collID = stdinMnt.PortableDataHash
960 err = runner.ContainerArvClient.Get("collections", collID, nil, &stdinColl)
962 return fmt.Errorf("While getting stdin collection: %v", err)
965 stdinRdr, err = runner.ContainerKeepClient.ManifestFileReader(
966 manifest.Manifest{Text: stdinColl.ManifestText},
968 if os.IsNotExist(err) {
969 return fmt.Errorf("stdin collection path not found: %v", stdinMnt.Path)
970 } else if err != nil {
971 return fmt.Errorf("While getting stdin collection path %v: %v", stdinMnt.Path, err)
973 } else if stdinMnt.Kind == "json" {
974 stdinJSON, err = json.Marshal(stdinMnt.Content)
976 return fmt.Errorf("While encoding stdin json data: %v", err)
981 stdinUsed := stdinRdr != nil || len(stdinJSON) != 0
982 response, err := runner.ContainerExecRunner.ContainerAttach(context.TODO(), runner.ContainerID,
983 ContainerAttachOptions{Stream: true, Stdin: stdinUsed, Stdout: true, Stderr: true})
985 return fmt.Errorf("While attaching container stdout/stderr streams: %v", err)
988 runner.loggingDone = make(chan bool)
990 if stdoutMnt, ok := runner.Container.Mounts["stdout"]; ok {
991 stdoutFile, err := runner.getStdoutFile(stdoutMnt.Path)
995 runner.Stdout = stdoutFile
996 } else if w, err := runner.NewLogWriter("stdout"); err != nil {
999 runner.Stdout = NewThrottledLogger(w)
1002 if stderrMnt, ok := runner.Container.Mounts["stderr"]; ok {
1003 stderrFile, err := runner.getStdoutFile(stderrMnt.Path)
1007 runner.Stderr = stderrFile
1008 } else if w, err := runner.NewLogWriter("stderr"); err != nil {
1011 runner.Stderr = NewThrottledLogger(w)
1014 if stdinRdr != nil {
1016 _, err := io.Copy(response.Conn, stdinRdr)
1018 runner.CrunchLog.Printf("While writing stdin collection to docker container: %v", err)
1022 response.CloseWrite()
1024 } else if len(stdinJSON) != 0 {
1026 _, err := io.Copy(response.Conn, bytes.NewReader(stdinJSON))
1028 runner.CrunchLog.Printf("While writing stdin json to docker container: %v", err)
1031 response.CloseWrite()
1035 go runner.ProcessDockerAttach(response.Reader)
1040 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
1041 stdoutPath := mntPath[len(runner.Container.OutputPath):]
1042 index := strings.LastIndex(stdoutPath, "/")
1044 subdirs := stdoutPath[:index]
1046 st, err := os.Stat(runner.HostOutputDir)
1048 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
1050 stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
1051 err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
1053 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
1057 stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
1059 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
1062 return stdoutFile, nil
1065 // CreateContainer creates the docker container.
1066 func (runner *ContainerRunner) CreateContainer() error {
1067 runner.CrunchLog.Print("Creating Docker container")
1069 containerConfig, err := runner.ContainerExecRunner.GetContainerConfig()
1070 hostConfig, err := runner.ContainerExecRunner.GetHostConfig()
1072 containerConfig.Cmd = runner.Container.Command
1073 if runner.Container.Cwd != "." {
1074 containerConfig.WorkingDir = runner.Container.Cwd
1077 for k, v := range runner.Container.Environment {
1078 containerConfig.Env = append(containerConfig.Env, k+"="+v)
1081 containerConfig.Volumes = runner.Volumes
1083 maxRAM := int64(runner.Container.RuntimeConstraints.RAM)
1084 minDockerRAM := int64(16)
1085 if maxRAM < minDockerRAM*1024*1024 {
1086 // Docker daemon won't let you set a limit less than ~10 MiB
1087 maxRAM = minDockerRAM * 1024 * 1024
1089 runner.HostConfig = dockercontainer.HostConfig{
1090 Binds: runner.Binds,
1091 LogConfig: dockercontainer.LogConfig{
1094 Resources: dockercontainer.Resources{
1095 CgroupParent: runner.setCgroupParent,
1096 NanoCPUs: int64(runner.Container.RuntimeConstraints.VCPUs) * 1000000000,
1097 Memory: maxRAM, // RAM
1098 MemorySwap: maxRAM, // RAM+swap
1099 KernelMemory: maxRAM, // kernel portion
1103 if runner.Container.RuntimeConstraints.API {
1104 tok, err := runner.ContainerToken()
1108 containerConfig.Env = append(containerConfig.Env,
1109 "ARVADOS_API_TOKEN="+tok,
1110 "ARVADOS_API_HOST="+os.Getenv("ARVADOS_API_HOST"),
1111 "ARVADOS_API_HOST_INSECURE="+os.Getenv("ARVADOS_API_HOST_INSECURE"),
1113 runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
1115 if runner.enableNetwork == "always" {
1116 runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
1118 runner.HostConfig.NetworkMode = dockercontainer.NetworkMode("none")
1122 _, stdinUsed := runner.Container.Mounts["stdin"]
1123 containerConfig.OpenStdin = stdinUsed
1124 containerConfig.StdinOnce = stdinUsed
1125 containerConfig.AttachStdin = stdinUsed
1126 containerConfig.AttachStdout = true
1127 containerConfig.AttachStderr = true
1129 createdBody, err := runner.ContainerExecRunner.ContainerCreate(context.TODO(), containerConfig, hostConfig, nil, runner.Container.UUID)
1131 return fmt.Errorf("While creating container: %v", err)
1134 runner.ContainerID = createdBody.ID
1136 return runner.AttachStreams()
1139 // StartContainer starts the docker container created by CreateContainer.
1140 func (runner *ContainerRunner) StartContainer() error {
1141 runner.CrunchLog.Printf("Starting Docker container id '%s'", runner.ContainerID)
1142 runner.cStateLock.Lock()
1143 defer runner.cStateLock.Unlock()
1144 if runner.cCancelled {
1147 err := runner.ContainerExecRunner.ContainerStart(context.TODO(), runner.ContainerID,
1148 ContainerStartOptions{})
1151 if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
1152 advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
1154 return fmt.Errorf("could not start container: %v%s", err, advice)
1159 // WaitFinish waits for the container to terminate, capture the exit code, and
1160 // close the stdout/stderr logging.
1161 func (runner *ContainerRunner) WaitFinish() error {
1162 var runTimeExceeded <-chan time.Time
1163 runner.CrunchLog.Print("Waiting for container to finish")
1165 waitOk, waitErr := runner.ContainerExecRunner.ContainerWait(context.TODO(), runner.ContainerID, WaitConditionNotRunning)
1166 arvMountExit := runner.ArvMountExit
1167 if timeout := runner.Container.SchedulingParameters.MaxRunTime; timeout > 0 {
1168 runTimeExceeded = time.After(time.Duration(timeout) * time.Second)
1171 containerGone := make(chan struct{})
1173 defer close(containerGone)
1174 if runner.containerWatchdogInterval < 1 {
1175 runner.containerWatchdogInterval = time.Minute
1177 for range time.NewTicker(runner.containerWatchdogInterval).C {
1178 ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(runner.containerWatchdogInterval))
1179 ctr, err := runner.ContainerExecRunner.ContainerInspect(ctx, runner.ContainerID)
1181 runner.cStateLock.Lock()
1182 done := runner.cRemoved || runner.ExitCode != nil
1183 runner.cStateLock.Unlock()
1186 } else if err != nil {
1187 runner.CrunchLog.Printf("Error inspecting container: %s", err)
1188 runner.checkBrokenNode(err)
1190 } else if ctr.State == nil || !(ctr.State.Running || ctr.State.Status == "created") {
1191 runner.CrunchLog.Printf("Container is not running: State=%v", ctr.State)
1199 case waitBody := <-waitOk:
1200 runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
1201 code := int(waitBody.StatusCode)
1202 runner.ExitCode = &code
1204 // wait for stdout/stderr to complete
1205 <-runner.loggingDone
1208 case err := <-waitErr:
1209 return fmt.Errorf("container wait: %v", err)
1211 case <-arvMountExit:
1212 runner.CrunchLog.Printf("arv-mount exited while container is still running. Stopping container.")
1214 // arvMountExit will always be ready now that
1215 // it's closed, but that doesn't interest us.
1218 case <-runTimeExceeded:
1219 runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
1221 runTimeExceeded = nil
1223 case <-containerGone:
1224 return errors.New("docker client never returned status")
1229 func (runner *ContainerRunner) updateLogs() {
1230 ticker := time.NewTicker(crunchLogUpdatePeriod / 360)
1233 sigusr1 := make(chan os.Signal, 1)
1234 signal.Notify(sigusr1, syscall.SIGUSR1)
1235 defer signal.Stop(sigusr1)
1237 saveAtTime := time.Now().Add(crunchLogUpdatePeriod)
1238 saveAtSize := crunchLogUpdateSize
1244 saveAtTime = time.Now()
1246 runner.logMtx.Lock()
1247 done := runner.LogsPDH != nil
1248 runner.logMtx.Unlock()
1252 size := runner.LogCollection.Size()
1253 if size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) {
1256 saveAtTime = time.Now().Add(crunchLogUpdatePeriod)
1257 saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize
1258 saved, err := runner.saveLogCollection(false)
1260 runner.CrunchLog.Printf("error updating log collection: %s", err)
1264 var updated arvados.Container
1265 err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1266 "container": arvadosclient.Dict{"log": saved.PortableDataHash},
1269 runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
1277 // CaptureOutput saves data from the container's output directory if
1278 // needed, and updates the container output accordingly.
1279 func (runner *ContainerRunner) CaptureOutput() error {
1280 if runner.Container.RuntimeConstraints.API {
1281 // Output may have been set directly by the container, so
1282 // refresh the container record to check.
1283 err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID,
1284 nil, &runner.Container)
1288 if runner.Container.Output != "" {
1289 // Container output is already set.
1290 runner.OutputPDH = &runner.Container.Output
1295 txt, err := (&copier{
1296 client: runner.containerClient,
1297 arvClient: runner.ContainerArvClient,
1298 keepClient: runner.ContainerKeepClient,
1299 hostOutputDir: runner.HostOutputDir,
1300 ctrOutputDir: runner.Container.OutputPath,
1301 binds: runner.Binds,
1302 mounts: runner.Container.Mounts,
1303 secretMounts: runner.SecretMounts,
1304 logger: runner.CrunchLog,
1309 if n := len(regexp.MustCompile(` [0-9a-f]+\+\S*\+R`).FindAllStringIndex(txt, -1)); n > 0 {
1310 runner.CrunchLog.Printf("Copying %d data blocks from remote input collections...", n)
1311 fs, err := (&arvados.Collection{ManifestText: txt}).FileSystem(runner.containerClient, runner.ContainerKeepClient)
1315 txt, err = fs.MarshalManifest(".")
1320 var resp arvados.Collection
1321 err = runner.ContainerArvClient.Create("collections", arvadosclient.Dict{
1322 "ensure_unique_name": true,
1323 "collection": arvadosclient.Dict{
1325 "name": "output for " + runner.Container.UUID,
1326 "manifest_text": txt,
1330 return fmt.Errorf("error creating output collection: %v", err)
1332 runner.OutputPDH = &resp.PortableDataHash
1336 func (runner *ContainerRunner) CleanupDirs() {
1337 if runner.ArvMount != nil {
1339 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1340 umount.Stdout = runner.CrunchLog
1341 umount.Stderr = runner.CrunchLog
1342 runner.CrunchLog.Printf("Running %v", umount.Args)
1343 umnterr := umount.Start()
1346 runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1348 // If arv-mount --unmount gets stuck for any reason, we
1349 // don't want to wait for it forever. Do Wait() in a goroutine
1350 // so it doesn't block crunch-run.
1351 umountExit := make(chan error)
1353 mnterr := umount.Wait()
1355 runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1357 umountExit <- mnterr
1360 for again := true; again; {
1366 case <-runner.ArvMountExit:
1368 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1369 runner.CrunchLog.Printf("Timed out waiting for unmount")
1371 umount.Process.Kill()
1373 runner.ArvMount.Process.Kill()
1379 if runner.ArvMountPoint != "" {
1380 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1381 runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1385 if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {
1386 runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr)
1390 // CommitLogs posts the collection containing the final container logs.
1391 func (runner *ContainerRunner) CommitLogs() error {
1393 // Hold cStateLock to prevent races on CrunchLog (e.g., stop()).
1394 runner.cStateLock.Lock()
1395 defer runner.cStateLock.Unlock()
1397 runner.CrunchLog.Print(runner.finalState)
1399 if runner.arvMountLog != nil {
1400 runner.arvMountLog.Close()
1402 runner.CrunchLog.Close()
1404 // Closing CrunchLog above allows them to be committed to Keep at this
1405 // point, but re-open crunch log with ArvClient in case there are any
1406 // other further errors (such as failing to write the log to Keep!)
1407 // while shutting down
1408 runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{
1409 ArvClient: runner.DispatcherArvClient,
1410 UUID: runner.Container.UUID,
1411 loggingStream: "crunch-run",
1414 runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
1417 if runner.LogsPDH != nil {
1418 // If we have already assigned something to LogsPDH,
1419 // we must be closing the re-opened log, which won't
1420 // end up getting attached to the container record and
1421 // therefore doesn't need to be saved as a collection
1422 // -- it exists only to send logs to other channels.
1425 saved, err := runner.saveLogCollection(true)
1427 return fmt.Errorf("error saving log collection: %s", err)
1429 runner.logMtx.Lock()
1430 defer runner.logMtx.Unlock()
1431 runner.LogsPDH = &saved.PortableDataHash
1435 func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
1436 runner.logMtx.Lock()
1437 defer runner.logMtx.Unlock()
1438 if runner.LogsPDH != nil {
1439 // Already finalized.
1442 updates := arvadosclient.Dict{
1443 "name": "logs for " + runner.Container.UUID,
1445 mt, err1 := runner.LogCollection.MarshalManifest(".")
1447 // Only send updated manifest text if there was no
1449 updates["manifest_text"] = mt
1452 // Even if flushing the manifest had an error, we still want
1453 // to update the log record, if possible, to push the trash_at
1454 // and delete_at times into the future. Details on bug
1457 updates["is_trashed"] = true
1459 exp := time.Now().Add(crunchLogUpdatePeriod * 24)
1460 updates["trash_at"] = exp
1461 updates["delete_at"] = exp
1463 reqBody := arvadosclient.Dict{"collection": updates}
1465 if runner.logUUID == "" {
1466 reqBody["ensure_unique_name"] = true
1467 err2 = runner.DispatcherArvClient.Create("collections", reqBody, &response)
1469 err2 = runner.DispatcherArvClient.Update("collections", runner.logUUID, reqBody, &response)
1472 runner.logUUID = response.UUID
1475 if err1 != nil || err2 != nil {
1476 err = fmt.Errorf("error recording logs: %q, %q", err1, err2)
1481 // UpdateContainerRunning updates the container state to "Running"
1482 func (runner *ContainerRunner) UpdateContainerRunning() error {
1483 runner.cStateLock.Lock()
1484 defer runner.cStateLock.Unlock()
1485 if runner.cCancelled {
1488 return runner.DispatcherArvClient.Update("containers", runner.Container.UUID,
1489 arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running", "gateway_address": runner.gateway.Address}}, nil)
1492 // ContainerToken returns the api_token the container (and any
1493 // arv-mount processes) are allowed to use.
1494 func (runner *ContainerRunner) ContainerToken() (string, error) {
1495 if runner.token != "" {
1496 return runner.token, nil
1499 var auth arvados.APIClientAuthorization
1500 err := runner.DispatcherArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1504 runner.token = fmt.Sprintf("v2/%s/%s/%s", auth.UUID, auth.APIToken, runner.Container.UUID)
1505 return runner.token, nil
1508 // UpdateContainerFinal updates the container record state on API
1509 // server to "Complete" or "Cancelled"
1510 func (runner *ContainerRunner) UpdateContainerFinal() error {
1511 update := arvadosclient.Dict{}
1512 update["state"] = runner.finalState
1513 if runner.LogsPDH != nil {
1514 update["log"] = *runner.LogsPDH
1516 if runner.finalState == "Complete" {
1517 if runner.ExitCode != nil {
1518 update["exit_code"] = *runner.ExitCode
1520 if runner.OutputPDH != nil {
1521 update["output"] = *runner.OutputPDH
1524 return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
1527 // IsCancelled returns the value of Cancelled, with goroutine safety.
1528 func (runner *ContainerRunner) IsCancelled() bool {
1529 runner.cStateLock.Lock()
1530 defer runner.cStateLock.Unlock()
1531 return runner.cCancelled
1534 // NewArvLogWriter creates an ArvLogWriter
1535 func (runner *ContainerRunner) NewArvLogWriter(name string) (io.WriteCloser, error) {
1536 writer, err := runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
1540 return &ArvLogWriter{
1541 ArvClient: runner.DispatcherArvClient,
1542 UUID: runner.Container.UUID,
1543 loggingStream: name,
1544 writeCloser: writer,
1548 // Run the full container lifecycle.
1549 func (runner *ContainerRunner) Run() (err error) {
1550 runner.CrunchLog.Printf("crunch-run %s started", cmd.Version.String())
1551 runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
1553 hostname, hosterr := os.Hostname()
1555 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1557 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1560 runner.finalState = "Queued"
1563 runner.CleanupDirs()
1565 runner.CrunchLog.Printf("crunch-run finished")
1566 runner.CrunchLog.Close()
1569 err = runner.fetchContainerRecord()
1573 if runner.Container.State != "Locked" {
1574 return fmt.Errorf("dispatch error detected: container %q has state %q", runner.Container.UUID, runner.Container.State)
1578 // checkErr prints e (unless it's nil) and sets err to
1579 // e (unless err is already non-nil). Thus, if err
1580 // hasn't already been assigned when Run() returns,
1581 // this cleanup func will cause Run() to return the
1582 // first non-nil error that is passed to checkErr().
1583 checkErr := func(errorIn string, e error) {
1587 runner.CrunchLog.Printf("error in %s: %v", errorIn, e)
1591 if runner.finalState == "Complete" {
1592 // There was an error in the finalization.
1593 runner.finalState = "Cancelled"
1597 // Log the error encountered in Run(), if any
1598 checkErr("Run", err)
1600 if runner.finalState == "Queued" {
1601 runner.UpdateContainerFinal()
1605 if runner.IsCancelled() {
1606 runner.finalState = "Cancelled"
1607 // but don't return yet -- we still want to
1608 // capture partial output and write logs
1611 checkErr("CaptureOutput", runner.CaptureOutput())
1612 checkErr("stopHoststat", runner.stopHoststat())
1613 checkErr("CommitLogs", runner.CommitLogs())
1614 checkErr("UpdateContainerFinal", runner.UpdateContainerFinal())
1617 runner.setupSignals()
1618 err = runner.startHoststat()
1623 // check for and/or load image
1624 err = runner.LoadImage()
1626 if !runner.checkBrokenNode(err) {
1627 // Failed to load image but not due to a "broken node"
1628 // condition, probably user error.
1629 runner.finalState = "Cancelled"
1631 err = fmt.Errorf("While loading container image: %v", err)
1635 // set up FUSE mount and binds
1636 err = runner.SetupMounts()
1638 runner.finalState = "Cancelled"
1639 err = fmt.Errorf("While setting up mounts: %v", err)
1643 err = runner.CreateContainer()
1647 err = runner.LogHostInfo()
1651 err = runner.LogNodeRecord()
1655 err = runner.LogContainerRecord()
1660 if runner.IsCancelled() {
1664 err = runner.UpdateContainerRunning()
1668 runner.finalState = "Cancelled"
1670 err = runner.startCrunchstat()
1675 err = runner.StartContainer()
1677 runner.checkBrokenNode(err)
1681 err = runner.WaitFinish()
1682 if err == nil && !runner.IsCancelled() {
1683 runner.finalState = "Complete"
1688 // Fetch the current container record (uuid = runner.Container.UUID)
1689 // into runner.Container.
1690 func (runner *ContainerRunner) fetchContainerRecord() error {
1691 reader, err := runner.DispatcherArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1693 return fmt.Errorf("error fetching container record: %v", err)
1695 defer reader.Close()
1697 dec := json.NewDecoder(reader)
1699 err = dec.Decode(&runner.Container)
1701 return fmt.Errorf("error decoding container record: %v", err)
1705 SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
1708 containerToken, err := runner.ContainerToken()
1710 return fmt.Errorf("error getting container token: %v", err)
1713 runner.ContainerArvClient, runner.ContainerKeepClient,
1714 runner.containerClient, err = runner.MkArvClient(containerToken)
1716 return fmt.Errorf("error creating container API client: %v", err)
1719 err = runner.ContainerArvClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
1721 if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
1722 return fmt.Errorf("error fetching secret_mounts: %v", err)
1724 // ok && apierr.HttpStatusCode == 404, which means
1725 // secret_mounts isn't supported by this API server.
1727 runner.SecretMounts = sm.SecretMounts
1732 // NewContainerRunner creates a new container runner.
1733 func NewContainerRunner(dispatcherClient *arvados.Client,
1734 dispatcherArvClient IArvadosClient,
1735 dispatcherKeepClient IKeepClient,
1736 containerRunner ThinContainerExecRunner,
1737 containerUUID string) (*ContainerRunner, error) {
1739 cr := &ContainerRunner{
1740 dispatcherClient: dispatcherClient,
1741 DispatcherArvClient: dispatcherArvClient,
1742 DispatcherKeepClient: dispatcherKeepClient,
1743 ContainerExecRunner: containerRunner,
1745 cr.NewLogWriter = cr.NewArvLogWriter
1746 cr.RunArvMount = cr.ArvMountCmd
1747 cr.MkTempDir = ioutil.TempDir
1748 cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
1749 cl, err := arvadosclient.MakeArvadosClient()
1751 return nil, nil, nil, err
1754 kc, err := keepclient.MakeKeepClient(cl)
1756 return nil, nil, nil, err
1758 c2 := arvados.NewClientFromEnv()
1759 c2.AuthToken = token
1760 return cl, kc, c2, nil
1763 cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.dispatcherClient, cr.DispatcherKeepClient)
1767 cr.Container.UUID = containerUUID
1768 w, err := cr.NewLogWriter("crunch-run")
1772 cr.CrunchLog = NewThrottledLogger(w)
1773 cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
1775 loadLogThrottleParams(dispatcherArvClient)
1781 func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
1782 flags := flag.NewFlagSet(prog, flag.ContinueOnError)
1783 statInterval := flags.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1784 cgroupRoot := flags.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
1785 cgroupParent := flags.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
1786 cgroupParentSubsystem := flags.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
1787 caCertsPath := flags.String("ca-certs", "", "Path to TLS root certificates")
1788 detach := flags.Bool("detach", false, "Detach from parent process and run in the background")
1789 stdinEnv := flags.Bool("stdin-env", false, "Load environment variables from JSON message on stdin")
1790 sleep := flags.Duration("sleep", 0, "Delay before starting (testing use only)")
1791 kill := flags.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID")
1792 list := flags.Bool("list", false, "List UUIDs of existing crunch-run processes")
1793 enableNetwork := flags.String("container-enable-networking", "default",
1794 `Specify if networking should be enabled for container. One of 'default', 'always':
1795 default: only enable networking if container requests it.
1796 always: containers always have networking enabled
1798 networkMode := flags.String("container-network-mode", "default",
1799 `Set networking mode for container. Corresponds to Docker network mode (--net).
1801 memprofile := flags.String("memprofile", "", "write memory profile to `file` after running container")
1802 flags.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.")
1803 containerRunner := flags.String("container-runner", "docker",
1804 `Specify the container runner. available options: docker, singularity.
1806 ignoreDetachFlag := false
1807 if len(args) > 0 && args[0] == "-no-detach" {
1808 // This process was invoked by a parent process, which
1809 // has passed along its own arguments, including
1810 // -detach, after the leading -no-detach flag. Strip
1811 // the leading -no-detach flag (it's not recognized by
1812 // flags.Parse()) and ignore the -detach flag that
1815 ignoreDetachFlag = true
1818 if err := flags.Parse(args); err == flag.ErrHelp {
1820 } else if err != nil {
1825 if *stdinEnv && !ignoreDetachFlag {
1826 // Load env vars on stdin if asked (but not in a
1827 // detached child process, in which case stdin is
1829 err := loadEnv(os.Stdin)
1836 containerID := flags.Arg(0)
1839 case *detach && !ignoreDetachFlag:
1840 return Detach(containerID, prog, args, os.Stdout, os.Stderr)
1842 return KillProcess(containerID, syscall.Signal(*kill), os.Stdout, os.Stderr)
1844 return ListProcesses(os.Stdout, os.Stderr)
1847 if containerID == "" {
1848 log.Printf("usage: %s [options] UUID", prog)
1852 log.Printf("crunch-run %s started", cmd.Version.String())
1855 if *caCertsPath != "" {
1856 arvadosclient.CertFiles = []string{*caCertsPath}
1859 api, err := arvadosclient.MakeArvadosClient()
1861 log.Printf("%s: %v", containerID, err)
1866 kc, kcerr := keepclient.MakeKeepClient(api)
1868 log.Printf("%s: %v", containerID, kcerr)
1871 kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
1874 var cr *ContainerRunner
1875 if *containerRunner == "docker" {
1876 // API version 1.21 corresponds to Docker 1.9, which is currently the
1877 // minimum version we want to support.
1878 docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
1880 cr, err = NewContainerRunner(arvados.NewClientFromEnv(), api, kc, adapter(docker), containerID)
1885 if dockererr != nil {
1886 cr.CrunchLog.Printf("%s: %v", containerID, dockererr)
1887 cr.checkBrokenNode(dockererr)
1888 cr.CrunchLog.Close()
1894 singularity, singularityerr := NewSingularityClient()
1896 cr, err = NewContainerRunner(arvados.NewClientFromEnv(), api, kc, singularity, containerID)
1902 if singularityerr != nil {
1903 cr.CrunchLog.Printf("%s: %v", containerID, singularityerr)
1904 //cr.checkBrokenNode(singularityrerr) //
1905 cr.CrunchLog.Close()
1909 cr.gateway = Gateway{
1910 Address: os.Getenv("GatewayAddress"),
1911 AuthSecret: os.Getenv("GatewayAuthSecret"),
1912 ContainerUUID: containerID,
1913 DockerContainerID: &cr.ContainerID,
1916 os.Unsetenv("GatewayAuthSecret")
1917 if cr.gateway.Address != "" {
1918 err = cr.gateway.Start()
1920 log.Printf("error starting gateway server: %s", err)
1925 parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerID+".")
1927 log.Printf("%s: %v", containerID, tmperr)
1931 cr.parentTemp = parentTemp
1932 cr.statInterval = *statInterval
1933 cr.cgroupRoot = *cgroupRoot
1934 cr.expectCgroupParent = *cgroupParent
1935 cr.enableNetwork = *enableNetwork
1936 cr.networkMode = *networkMode
1937 if *cgroupParentSubsystem != "" {
1938 p := findCgroup(*cgroupParentSubsystem)
1939 cr.setCgroupParent = p
1940 cr.expectCgroupParent = p
1945 if *memprofile != "" {
1946 f, err := os.Create(*memprofile)
1948 log.Printf("could not create memory profile: %s", err)
1950 runtime.GC() // get up-to-date statistics
1951 if err := pprof.WriteHeapProfile(f); err != nil {
1952 log.Printf("could not write memory profile: %s", err)
1954 closeerr := f.Close()
1955 if closeerr != nil {
1956 log.Printf("closing memprofile file: %s", err)
1961 log.Printf("%s: %v", containerID, runerr)
1967 func loadEnv(rdr io.Reader) error {
1968 buf, err := ioutil.ReadAll(rdr)
1970 return fmt.Errorf("read stdin: %s", err)
1972 var env map[string]string
1973 err = json.Unmarshal(buf, &env)
1975 return fmt.Errorf("decode stdin: %s", err)
1977 for k, v := range env {
1978 err = os.Setenv(k, v)
1980 return fmt.Errorf("setenv(%q): %s", k, err)