1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
30 "git.curoverse.com/arvados.git/lib/crunchstat"
31 "git.curoverse.com/arvados.git/sdk/go/arvados"
32 "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
33 "git.curoverse.com/arvados.git/sdk/go/keepclient"
34 "git.curoverse.com/arvados.git/sdk/go/manifest"
35 "golang.org/x/net/context"
37 dockertypes "github.com/docker/docker/api/types"
38 dockercontainer "github.com/docker/docker/api/types/container"
39 dockernetwork "github.com/docker/docker/api/types/network"
40 dockerclient "github.com/docker/docker/client"
45 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
46 type IArvadosClient interface {
47 Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
48 Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
49 Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
50 Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
51 CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
52 Discovery(key string) (interface{}, error)
55 // ErrCancelled is the error returned when the container is cancelled.
56 var ErrCancelled = errors.New("Cancelled")
58 // IKeepClient is the minimal Keep API methods used by crunch-run.
59 type IKeepClient interface {
60 PutHB(hash string, buf []byte) (string, int, error)
61 ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
65 // NewLogWriter is a factory function to create a new log writer.
66 type NewLogWriter func(name string) io.WriteCloser
68 type RunArvMount func(args []string, tok string) (*exec.Cmd, error)
70 type MkTempDir func(string, string) (string, error)
72 // ThinDockerClient is the minimal Docker client interface used by crunch-run.
73 type ThinDockerClient interface {
74 ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error)
75 ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
76 networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error)
77 ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error
78 ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error
79 ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error)
80 ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error)
81 ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error)
82 ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
85 // ContainerRunner is the main stateful struct used for a single execution of a
87 type ContainerRunner struct {
88 Docker ThinDockerClient
89 ArvClient IArvadosClient
92 ContainerConfig dockercontainer.Config
93 dockercontainer.HostConfig
99 CrunchLog *ThrottledLogger
100 Stdout io.WriteCloser
101 Stderr io.WriteCloser
102 LogCollection *CollectionWriter
109 CleanupTempDir []string
111 Volumes map[string]struct{}
113 SigChan chan os.Signal
114 ArvMountExit chan error
117 statLogger io.WriteCloser
118 statReporter *crunchstat.Reporter
119 hoststatLogger io.WriteCloser
120 hoststatReporter *crunchstat.Reporter
121 statInterval time.Duration
123 // What we expect the container's cgroup parent to be.
124 expectCgroupParent string
125 // What we tell docker to use as the container's cgroup
126 // parent. Note: Ideally we would use the same field for both
127 // expectCgroupParent and setCgroupParent, and just make it
128 // default to "docker". However, when using docker < 1.10 with
129 // systemd, specifying a non-empty cgroup parent (even the
130 // default value "docker") hits a docker bug
131 // (https://github.com/docker/docker/issues/17126). Using two
132 // separate fields makes it possible to use the "expect cgroup
133 // parent to be X" feature even on sites where the "specify
134 // cgroup parent" feature breaks.
135 setCgroupParent string
137 cStateLock sync.Mutex
138 cCancelled bool // StopContainer() invoked
140 enableNetwork string // one of "default" or "always"
141 networkMode string // passed through to HostConfig.NetworkMode
142 arvMountLog *ThrottledLogger
145 // setupSignals sets up signal handling to gracefully terminate the underlying
146 // Docker container and update state when receiving a TERM, INT or QUIT signal.
147 func (runner *ContainerRunner) setupSignals() {
148 runner.SigChan = make(chan os.Signal, 1)
149 signal.Notify(runner.SigChan, syscall.SIGTERM)
150 signal.Notify(runner.SigChan, syscall.SIGINT)
151 signal.Notify(runner.SigChan, syscall.SIGQUIT)
153 go func(sig chan os.Signal) {
155 runner.CrunchLog.Printf("caught signal: %v", s)
161 // stop the underlying Docker container.
162 func (runner *ContainerRunner) stop() {
163 runner.cStateLock.Lock()
164 defer runner.cStateLock.Unlock()
165 if runner.ContainerID == "" {
168 runner.cCancelled = true
169 runner.CrunchLog.Printf("removing container")
170 err := runner.Docker.ContainerRemove(context.TODO(), runner.ContainerID, dockertypes.ContainerRemoveOptions{Force: true})
172 runner.CrunchLog.Printf("error removing container: %s", err)
176 func (runner *ContainerRunner) stopSignals() {
177 if runner.SigChan != nil {
178 signal.Stop(runner.SigChan)
182 var errorBlacklist = []string{
183 "(?ms).*[Cc]annot connect to the Docker daemon.*",
184 "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
186 var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
188 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
189 for _, d := range errorBlacklist {
190 if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
191 runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
192 if *brokenNodeHook == "" {
193 runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.")
195 runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
197 c := exec.Command(*brokenNodeHook)
198 c.Stdout = runner.CrunchLog
199 c.Stderr = runner.CrunchLog
202 runner.CrunchLog.Printf("Error running broken node hook: %v", err)
211 // LoadImage determines the docker image id from the container record and
212 // checks if it is available in the local Docker image store. If not, it loads
213 // the image from Keep.
214 func (runner *ContainerRunner) LoadImage() (err error) {
216 runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
218 var collection arvados.Collection
219 err = runner.ArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection)
221 return fmt.Errorf("While getting container image collection: %v", err)
223 manifest := manifest.Manifest{Text: collection.ManifestText}
224 var img, imageID string
225 for ms := range manifest.StreamIter() {
226 img = ms.FileStreamSegments[0].Name
227 if !strings.HasSuffix(img, ".tar") {
228 return fmt.Errorf("First file in the container image collection does not end in .tar")
230 imageID = img[:len(img)-4]
233 runner.CrunchLog.Printf("Using Docker image id '%s'", imageID)
235 _, _, err = runner.Docker.ImageInspectWithRaw(context.TODO(), imageID)
237 runner.CrunchLog.Print("Loading Docker image from keep")
239 var readCloser io.ReadCloser
240 readCloser, err = runner.Kc.ManifestFileReader(manifest, img)
242 return fmt.Errorf("While creating ManifestFileReader for container image: %v", err)
245 response, err := runner.Docker.ImageLoad(context.TODO(), readCloser, true)
247 return fmt.Errorf("While loading container image into Docker: %v", err)
250 defer response.Body.Close()
251 rbody, err := ioutil.ReadAll(response.Body)
253 return fmt.Errorf("Reading response to image load: %v", err)
255 runner.CrunchLog.Printf("Docker response: %s", rbody)
257 runner.CrunchLog.Print("Docker image is available")
260 runner.ContainerConfig.Image = imageID
262 runner.Kc.ClearBlockCache()
267 func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (c *exec.Cmd, err error) {
268 c = exec.Command("arv-mount", arvMountCmd...)
270 // Copy our environment, but override ARVADOS_API_TOKEN with
271 // the container auth token.
273 for _, s := range os.Environ() {
274 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
275 c.Env = append(c.Env, s)
278 c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
280 runner.arvMountLog = NewThrottledLogger(runner.NewLogWriter("arv-mount"))
281 c.Stdout = runner.arvMountLog
282 c.Stderr = runner.arvMountLog
284 runner.CrunchLog.Printf("Running %v", c.Args)
291 statReadme := make(chan bool)
292 runner.ArvMountExit = make(chan error)
297 time.Sleep(100 * time.Millisecond)
298 _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
310 runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
312 runner.ArvMountExit <- mnterr
313 close(runner.ArvMountExit)
319 case err := <-runner.ArvMountExit:
320 runner.ArvMount = nil
328 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
329 if runner.ArvMountPoint == "" {
330 runner.ArvMountPoint, err = runner.MkTempDir("", prefix)
335 func (runner *ContainerRunner) SetupMounts() (err error) {
336 err = runner.SetupArvMountPoint("keep")
338 return fmt.Errorf("While creating keep mount temp dir: %v", err)
341 token, err := runner.ContainerToken()
343 return fmt.Errorf("could not get container token: %s", err)
348 arvMountCmd := []string{
352 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
354 if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
355 arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
358 collectionPaths := []string{}
360 runner.Volumes = make(map[string]struct{})
361 needCertMount := true
364 for bind := range runner.Container.Mounts {
365 binds = append(binds, bind)
369 for _, bind := range binds {
370 mnt := runner.Container.Mounts[bind]
371 if bind == "stdout" || bind == "stderr" {
372 // Is it a "file" mount kind?
373 if mnt.Kind != "file" {
374 return fmt.Errorf("Unsupported mount kind '%s' for %s. Only 'file' is supported.", mnt.Kind, bind)
377 // Does path start with OutputPath?
378 prefix := runner.Container.OutputPath
379 if !strings.HasSuffix(prefix, "/") {
382 if !strings.HasPrefix(mnt.Path, prefix) {
383 return fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
388 // Is it a "collection" mount kind?
389 if mnt.Kind != "collection" && mnt.Kind != "json" {
390 return fmt.Errorf("Unsupported mount kind '%s' for stdin. Only 'collection' or 'json' are supported.", mnt.Kind)
394 if bind == "/etc/arvados/ca-certificates.crt" {
395 needCertMount = false
398 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
399 if mnt.Kind != "collection" {
400 return fmt.Errorf("Only mount points of kind 'collection' are supported underneath the output_path: %v", bind)
405 case mnt.Kind == "collection" && bind != "stdin":
407 if mnt.UUID != "" && mnt.PortableDataHash != "" {
408 return fmt.Errorf("Cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
412 return fmt.Errorf("Writing to existing collections currently not permitted.")
415 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
416 } else if mnt.PortableDataHash != "" {
418 return fmt.Errorf("Can never write to a collection specified by portable data hash")
420 idx := strings.Index(mnt.PortableDataHash, "/")
422 mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
423 mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
424 runner.Container.Mounts[bind] = mnt
426 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
427 if mnt.Path != "" && mnt.Path != "." {
428 if strings.HasPrefix(mnt.Path, "./") {
429 mnt.Path = mnt.Path[2:]
430 } else if strings.HasPrefix(mnt.Path, "/") {
431 mnt.Path = mnt.Path[1:]
433 src += "/" + mnt.Path
436 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
437 arvMountCmd = append(arvMountCmd, "--mount-tmp")
438 arvMountCmd = append(arvMountCmd, fmt.Sprintf("tmp%d", tmpcount))
442 if bind == runner.Container.OutputPath {
443 runner.HostOutputDir = src
444 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
445 return fmt.Errorf("Writable mount points are not permitted underneath the output_path: %v", bind)
447 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
449 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind))
451 collectionPaths = append(collectionPaths, src)
453 case mnt.Kind == "tmp":
455 tmpdir, err = runner.MkTempDir("", "")
457 return fmt.Errorf("While creating mount temp dir: %v", err)
459 st, staterr := os.Stat(tmpdir)
461 return fmt.Errorf("While Stat on temp dir: %v", staterr)
463 err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
465 return fmt.Errorf("While Chmod temp dir: %v", err)
467 runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
468 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", tmpdir, bind))
469 if bind == runner.Container.OutputPath {
470 runner.HostOutputDir = tmpdir
473 case mnt.Kind == "json":
474 jsondata, err := json.Marshal(mnt.Content)
476 return fmt.Errorf("encoding json data: %v", err)
478 // Create a tempdir with a single file
479 // (instead of just a tempfile): this way we
480 // can ensure the file is world-readable
481 // inside the container, without having to
482 // make it world-readable on the docker host.
483 tmpdir, err := runner.MkTempDir("", "")
485 return fmt.Errorf("creating temp dir: %v", err)
487 runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
488 tmpfn := filepath.Join(tmpdir, "mountdata.json")
489 err = ioutil.WriteFile(tmpfn, jsondata, 0644)
491 return fmt.Errorf("writing temp file: %v", err)
493 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
495 case mnt.Kind == "git_tree":
496 tmpdir, err := runner.MkTempDir("", "")
498 return fmt.Errorf("creating temp dir: %v", err)
500 runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
501 err = gitMount(mnt).extractTree(runner.ArvClient, tmpdir, token)
505 runner.Binds = append(runner.Binds, tmpdir+":"+bind+":ro")
509 if runner.HostOutputDir == "" {
510 return fmt.Errorf("Output path does not correspond to a writable mount point")
513 if wantAPI := runner.Container.RuntimeConstraints.API; needCertMount && wantAPI != nil && *wantAPI {
514 for _, certfile := range arvadosclient.CertFiles {
515 _, err := os.Stat(certfile)
517 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:/etc/arvados/ca-certificates.crt:ro", certfile))
524 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
526 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
528 arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
530 runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
532 return fmt.Errorf("While trying to start arv-mount: %v", err)
535 for _, p := range collectionPaths {
538 return fmt.Errorf("While checking that input files exist: %v", err)
545 func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
546 // Handle docker log protocol
547 // https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container
548 defer close(runner.loggingDone)
550 header := make([]byte, 8)
553 _, err = io.ReadAtLeast(containerReader, header, 8)
560 readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
563 _, err = io.CopyN(runner.Stdout, containerReader, readsize)
566 _, err = io.CopyN(runner.Stderr, containerReader, readsize)
571 runner.CrunchLog.Printf("error reading docker logs: %v", err)
574 err = runner.Stdout.Close()
576 runner.CrunchLog.Printf("error closing stdout logs: %v", err)
579 err = runner.Stderr.Close()
581 runner.CrunchLog.Printf("error closing stderr logs: %v", err)
584 if runner.statReporter != nil {
585 runner.statReporter.Stop()
586 err = runner.statLogger.Close()
588 runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
593 func (runner *ContainerRunner) stopHoststat() error {
594 if runner.hoststatReporter == nil {
597 runner.hoststatReporter.Stop()
598 err := runner.hoststatLogger.Close()
600 return fmt.Errorf("error closing hoststat logs: %v", err)
605 func (runner *ContainerRunner) startHoststat() {
606 runner.hoststatLogger = NewThrottledLogger(runner.NewLogWriter("hoststat"))
607 runner.hoststatReporter = &crunchstat.Reporter{
608 Logger: log.New(runner.hoststatLogger, "", 0),
609 CgroupRoot: runner.cgroupRoot,
610 PollPeriod: runner.statInterval,
612 runner.hoststatReporter.Start()
615 func (runner *ContainerRunner) startCrunchstat() {
616 runner.statLogger = NewThrottledLogger(runner.NewLogWriter("crunchstat"))
617 runner.statReporter = &crunchstat.Reporter{
618 CID: runner.ContainerID,
619 Logger: log.New(runner.statLogger, "", 0),
620 CgroupParent: runner.expectCgroupParent,
621 CgroupRoot: runner.cgroupRoot,
622 PollPeriod: runner.statInterval,
624 runner.statReporter.Start()
627 type infoCommand struct {
632 // LogHostInfo logs info about the current host, for debugging and
633 // accounting purposes. Although it's logged as "node-info", this is
634 // about the environment where crunch-run is actually running, which
635 // might differ from what's described in the node record (see
637 func (runner *ContainerRunner) LogHostInfo() (err error) {
638 w := runner.NewLogWriter("node-info")
640 commands := []infoCommand{
642 label: "Host Information",
643 cmd: []string{"uname", "-a"},
646 label: "CPU Information",
647 cmd: []string{"cat", "/proc/cpuinfo"},
650 label: "Memory Information",
651 cmd: []string{"cat", "/proc/meminfo"},
655 cmd: []string{"df", "-m", "/", os.TempDir()},
658 label: "Disk INodes",
659 cmd: []string{"df", "-i", "/", os.TempDir()},
663 // Run commands with informational output to be logged.
664 for _, command := range commands {
665 fmt.Fprintln(w, command.label)
666 cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
669 if err := cmd.Run(); err != nil {
670 err = fmt.Errorf("While running command %q: %v", command.cmd, err)
679 return fmt.Errorf("While closing node-info logs: %v", err)
684 // LogContainerRecord gets and saves the raw JSON container record from the API server
685 func (runner *ContainerRunner) LogContainerRecord() error {
686 logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
687 if !logged && err == nil {
688 err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
693 // LogNodeRecord logs arvados#node record corresponding to the current host.
694 func (runner *ContainerRunner) LogNodeRecord() error {
695 hostname := os.Getenv("SLURMD_NODENAME")
697 hostname, _ = os.Hostname()
699 _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
700 // The "info" field has admin-only info when obtained
701 // with a privileged token, and should not be logged.
702 node, ok := resp.(map[string]interface{})
710 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
712 ArvClient: runner.ArvClient,
713 UUID: runner.Container.UUID,
714 loggingStream: label,
715 writeCloser: runner.LogCollection.Open(label + ".json"),
718 reader, err := runner.ArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
720 return false, fmt.Errorf("error getting %s record: %v", label, err)
724 dec := json.NewDecoder(reader)
726 var resp map[string]interface{}
727 if err = dec.Decode(&resp); err != nil {
728 return false, fmt.Errorf("error decoding %s list response: %v", label, err)
730 items, ok := resp["items"].([]interface{})
732 return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
733 } else if len(items) < 1 {
739 // Re-encode it using indentation to improve readability
740 enc := json.NewEncoder(w)
741 enc.SetIndent("", " ")
742 if err = enc.Encode(items[0]); err != nil {
743 return false, fmt.Errorf("error logging %s record: %v", label, err)
747 return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
752 // AttachStreams connects the docker container stdin, stdout and stderr logs
753 // to the Arvados logger which logs to Keep and the API server logs table.
754 func (runner *ContainerRunner) AttachStreams() (err error) {
756 runner.CrunchLog.Print("Attaching container streams")
758 // If stdin mount is provided, attach it to the docker container
759 var stdinRdr arvados.File
761 if stdinMnt, ok := runner.Container.Mounts["stdin"]; ok {
762 if stdinMnt.Kind == "collection" {
763 var stdinColl arvados.Collection
764 collId := stdinMnt.UUID
766 collId = stdinMnt.PortableDataHash
768 err = runner.ArvClient.Get("collections", collId, nil, &stdinColl)
770 return fmt.Errorf("While getting stding collection: %v", err)
773 stdinRdr, err = runner.Kc.ManifestFileReader(manifest.Manifest{Text: stdinColl.ManifestText}, stdinMnt.Path)
774 if os.IsNotExist(err) {
775 return fmt.Errorf("stdin collection path not found: %v", stdinMnt.Path)
776 } else if err != nil {
777 return fmt.Errorf("While getting stdin collection path %v: %v", stdinMnt.Path, err)
779 } else if stdinMnt.Kind == "json" {
780 stdinJson, err = json.Marshal(stdinMnt.Content)
782 return fmt.Errorf("While encoding stdin json data: %v", err)
787 stdinUsed := stdinRdr != nil || len(stdinJson) != 0
788 response, err := runner.Docker.ContainerAttach(context.TODO(), runner.ContainerID,
789 dockertypes.ContainerAttachOptions{Stream: true, Stdin: stdinUsed, Stdout: true, Stderr: true})
791 return fmt.Errorf("While attaching container stdout/stderr streams: %v", err)
794 runner.loggingDone = make(chan bool)
796 if stdoutMnt, ok := runner.Container.Mounts["stdout"]; ok {
797 stdoutFile, err := runner.getStdoutFile(stdoutMnt.Path)
801 runner.Stdout = stdoutFile
803 runner.Stdout = NewThrottledLogger(runner.NewLogWriter("stdout"))
806 if stderrMnt, ok := runner.Container.Mounts["stderr"]; ok {
807 stderrFile, err := runner.getStdoutFile(stderrMnt.Path)
811 runner.Stderr = stderrFile
813 runner.Stderr = NewThrottledLogger(runner.NewLogWriter("stderr"))
818 _, err := io.Copy(response.Conn, stdinRdr)
820 runner.CrunchLog.Print("While writing stdin collection to docker container %q", err)
824 response.CloseWrite()
826 } else if len(stdinJson) != 0 {
828 _, err := io.Copy(response.Conn, bytes.NewReader(stdinJson))
830 runner.CrunchLog.Print("While writing stdin json to docker container %q", err)
833 response.CloseWrite()
837 go runner.ProcessDockerAttach(response.Reader)
842 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
843 stdoutPath := mntPath[len(runner.Container.OutputPath):]
844 index := strings.LastIndex(stdoutPath, "/")
846 subdirs := stdoutPath[:index]
848 st, err := os.Stat(runner.HostOutputDir)
850 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
852 stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
853 err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
855 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
859 stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
861 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
864 return stdoutFile, nil
867 // CreateContainer creates the docker container.
868 func (runner *ContainerRunner) CreateContainer() error {
869 runner.CrunchLog.Print("Creating Docker container")
871 runner.ContainerConfig.Cmd = runner.Container.Command
872 if runner.Container.Cwd != "." {
873 runner.ContainerConfig.WorkingDir = runner.Container.Cwd
876 for k, v := range runner.Container.Environment {
877 runner.ContainerConfig.Env = append(runner.ContainerConfig.Env, k+"="+v)
880 runner.ContainerConfig.Volumes = runner.Volumes
882 runner.HostConfig = dockercontainer.HostConfig{
884 LogConfig: dockercontainer.LogConfig{
887 Resources: dockercontainer.Resources{
888 CgroupParent: runner.setCgroupParent,
892 if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
893 tok, err := runner.ContainerToken()
897 runner.ContainerConfig.Env = append(runner.ContainerConfig.Env,
898 "ARVADOS_API_TOKEN="+tok,
899 "ARVADOS_API_HOST="+os.Getenv("ARVADOS_API_HOST"),
900 "ARVADOS_API_HOST_INSECURE="+os.Getenv("ARVADOS_API_HOST_INSECURE"),
902 runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
904 if runner.enableNetwork == "always" {
905 runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
907 runner.HostConfig.NetworkMode = dockercontainer.NetworkMode("none")
911 _, stdinUsed := runner.Container.Mounts["stdin"]
912 runner.ContainerConfig.OpenStdin = stdinUsed
913 runner.ContainerConfig.StdinOnce = stdinUsed
914 runner.ContainerConfig.AttachStdin = stdinUsed
915 runner.ContainerConfig.AttachStdout = true
916 runner.ContainerConfig.AttachStderr = true
918 createdBody, err := runner.Docker.ContainerCreate(context.TODO(), &runner.ContainerConfig, &runner.HostConfig, nil, runner.Container.UUID)
920 return fmt.Errorf("While creating container: %v", err)
923 runner.ContainerID = createdBody.ID
925 return runner.AttachStreams()
928 // StartContainer starts the docker container created by CreateContainer.
929 func (runner *ContainerRunner) StartContainer() error {
930 runner.CrunchLog.Printf("Starting Docker container id '%s'", runner.ContainerID)
931 runner.cStateLock.Lock()
932 defer runner.cStateLock.Unlock()
933 if runner.cCancelled {
936 err := runner.Docker.ContainerStart(context.TODO(), runner.ContainerID,
937 dockertypes.ContainerStartOptions{})
940 if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
941 advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
943 return fmt.Errorf("could not start container: %v%s", err, advice)
948 // WaitFinish waits for the container to terminate, capture the exit code, and
949 // close the stdout/stderr logging.
950 func (runner *ContainerRunner) WaitFinish() error {
951 runner.CrunchLog.Print("Waiting for container to finish")
953 waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, dockercontainer.WaitConditionNotRunning)
954 arvMountExit := runner.ArvMountExit
957 case waitBody := <-waitOk:
958 runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
959 code := int(waitBody.StatusCode)
960 runner.ExitCode = &code
962 // wait for stdout/stderr to complete
966 case err := <-waitErr:
967 return fmt.Errorf("container wait: %v", err)
970 runner.CrunchLog.Printf("arv-mount exited while container is still running. Stopping container.")
972 // arvMountExit will always be ready now that
973 // it's closed, but that doesn't interest us.
979 var ErrNotInOutputDir = fmt.Errorf("Must point to path within the output directory")
981 func (runner *ContainerRunner) derefOutputSymlink(path string, startinfo os.FileInfo) (tgt string, readlinktgt string, info os.FileInfo, err error) {
982 // Follow symlinks if necessary
987 for followed := 0; info.Mode()&os.ModeSymlink != 0; followed++ {
988 if followed >= limitFollowSymlinks {
989 // Got stuck in a loop or just a pathological number of links, give up.
990 err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
994 readlinktgt, err = os.Readlink(nextlink)
1000 if !strings.HasPrefix(tgt, "/") {
1001 // Relative symlink, resolve it to host path
1002 tgt = filepath.Join(filepath.Dir(path), tgt)
1004 if strings.HasPrefix(tgt, runner.Container.OutputPath+"/") && !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
1005 // Absolute symlink to container output path, adjust it to host output path.
1006 tgt = filepath.Join(runner.HostOutputDir, tgt[len(runner.Container.OutputPath):])
1008 if !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
1009 // After dereferencing, symlink target must either be
1010 // within output directory, or must point to a
1011 // collection mount.
1012 err = ErrNotInOutputDir
1016 info, err = os.Lstat(tgt)
1019 err = fmt.Errorf("Symlink in output %q points to invalid location %q: %v",
1020 path[len(runner.HostOutputDir):], readlinktgt, err)
1030 var limitFollowSymlinks = 10
1032 // UploadFile uploads files within the output directory, with special handling
1033 // for symlinks. If the symlink leads to a keep mount, copy the manifest text
1034 // from the keep mount into the output manifestText. Ensure that whether
1035 // symlinks are relative or absolute, every symlink target (even targets that
1036 // are symlinks themselves) must point to a path in either the output directory
1037 // or a collection mount.
1039 // Assumes initial value of "path" is absolute, and located within runner.HostOutputDir.
1040 func (runner *ContainerRunner) UploadOutputFile(
1045 walkUpload *WalkUpload,
1046 relocateFrom string,
1048 followed int) (manifestText string, err error) {
1054 if info.Mode().IsDir() {
1055 // if empty, need to create a .keep file
1056 dir, direrr := os.Open(path)
1061 names, eof := dir.Readdirnames(1)
1062 if len(names) == 0 && eof == io.EOF && path != runner.HostOutputDir {
1063 containerPath := runner.OutputPath + path[len(runner.HostOutputDir):]
1064 for _, bind := range binds {
1065 mnt := runner.Container.Mounts[bind]
1066 // Check if there is a bind for this
1067 // directory, in which case assume we don't need .keep
1068 if (containerPath == bind || strings.HasPrefix(containerPath, bind+"/")) && mnt.PortableDataHash != "d41d8cd98f00b204e9800998ecf8427e+0" {
1072 outputSuffix := path[len(runner.HostOutputDir)+1:]
1073 return fmt.Sprintf("./%v d41d8cd98f00b204e9800998ecf8427e+0 0:0:.keep\n", outputSuffix), nil
1078 if followed >= limitFollowSymlinks {
1079 // Got stuck in a loop or just a pathological number of
1080 // directory links, give up.
1081 err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
1085 // "path" is the actual path we are visiting
1086 // "tgt" is the target of "path" (a non-symlink) after following symlinks
1087 // "relocated" is the path in the output manifest where the file should be placed,
1088 // but has HostOutputDir as a prefix.
1090 // The destination path in the output manifest may need to be
1091 // logically relocated to some other path in order to appear
1092 // in the correct location as a result of following a symlink.
1093 // Remove the relocateFrom prefix and replace it with
1095 relocated := relocateTo + path[len(relocateFrom):]
1097 tgt, readlinktgt, info, derefErr := runner.derefOutputSymlink(path, info)
1098 if derefErr != nil && derefErr != ErrNotInOutputDir {
1102 // go through mounts and try reverse map to collection reference
1103 for _, bind := range binds {
1104 mnt := runner.Container.Mounts[bind]
1105 if tgt == bind || strings.HasPrefix(tgt, bind+"/") {
1106 // get path relative to bind
1107 targetSuffix := tgt[len(bind):]
1109 // Copy mount and adjust the path to add path relative to the bind
1110 adjustedMount := mnt
1111 adjustedMount.Path = filepath.Join(adjustedMount.Path, targetSuffix)
1113 // Terminates in this keep mount, so add the
1114 // manifest text at appropriate location.
1115 outputSuffix := relocated[len(runner.HostOutputDir):]
1116 manifestText, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix)
1121 // If target is not a collection mount, it must be located within the
1122 // output directory, otherwise it is an error.
1123 if derefErr == ErrNotInOutputDir {
1124 err = fmt.Errorf("Symlink in output %q points to invalid location %q, must point to path within the output directory.",
1125 path[len(runner.HostOutputDir):], readlinktgt)
1129 if info.Mode().IsRegular() {
1130 return "", walkUpload.UploadFile(relocated, tgt)
1133 if info.Mode().IsDir() {
1134 // Symlink leads to directory. Walk() doesn't follow
1135 // directory symlinks, so we walk the target directory
1136 // instead. Within the walk, file paths are relocated
1137 // so they appear under the original symlink path.
1138 err = filepath.Walk(tgt, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
1140 m, walkerr = runner.UploadOutputFile(walkpath, walkinfo, walkerr,
1141 binds, walkUpload, tgt, relocated, followed+1)
1143 manifestText = manifestText + m
1153 // HandleOutput sets the output, unmounts the FUSE mount, and deletes temporary directories
1154 func (runner *ContainerRunner) CaptureOutput() error {
1155 if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
1156 // Output may have been set directly by the container, so
1157 // refresh the container record to check.
1158 err := runner.ArvClient.Get("containers", runner.Container.UUID,
1159 nil, &runner.Container)
1163 if runner.Container.Output != "" {
1164 // Container output is already set.
1165 runner.OutputPDH = &runner.Container.Output
1170 if runner.HostOutputDir == "" {
1174 _, err := os.Stat(runner.HostOutputDir)
1176 return fmt.Errorf("While checking host output path: %v", err)
1179 // Pre-populate output from the configured mount points
1181 for bind, mnt := range runner.Container.Mounts {
1182 if mnt.Kind == "collection" {
1183 binds = append(binds, bind)
1188 var manifestText string
1190 collectionMetafile := fmt.Sprintf("%s/.arvados#collection", runner.HostOutputDir)
1191 _, err = os.Stat(collectionMetafile)
1193 // Regular directory
1195 cw := CollectionWriter{0, runner.Kc, nil, nil, sync.Mutex{}}
1196 walkUpload := cw.BeginUpload(runner.HostOutputDir, runner.CrunchLog.Logger)
1199 err = filepath.Walk(runner.HostOutputDir, func(path string, info os.FileInfo, err error) error {
1200 m, err = runner.UploadOutputFile(path, info, err, binds, walkUpload, "", "", 0)
1202 manifestText = manifestText + m
1207 cw.EndUpload(walkUpload)
1210 return fmt.Errorf("While uploading output files: %v", err)
1213 m, err = cw.ManifestText()
1214 manifestText = manifestText + m
1216 return fmt.Errorf("While uploading output files: %v", err)
1219 // FUSE mount directory
1220 file, openerr := os.Open(collectionMetafile)
1222 return fmt.Errorf("While opening FUSE metafile: %v", err)
1226 var rec arvados.Collection
1227 err = json.NewDecoder(file).Decode(&rec)
1229 return fmt.Errorf("While reading FUSE metafile: %v", err)
1231 manifestText = rec.ManifestText
1234 for _, bind := range binds {
1235 mnt := runner.Container.Mounts[bind]
1237 bindSuffix := strings.TrimPrefix(bind, runner.Container.OutputPath)
1239 if bindSuffix == bind || len(bindSuffix) <= 0 {
1240 // either does not start with OutputPath or is OutputPath itself
1244 if mnt.ExcludeFromOutput == true {
1248 // append to manifest_text
1249 m, err := runner.getCollectionManifestForPath(mnt, bindSuffix)
1254 manifestText = manifestText + m
1258 var response arvados.Collection
1259 manifest := manifest.Manifest{Text: manifestText}
1260 manifestText = manifest.Extract(".", ".").Text
1261 err = runner.ArvClient.Create("collections",
1263 "ensure_unique_name": true,
1264 "collection": arvadosclient.Dict{
1266 "name": "output for " + runner.Container.UUID,
1267 "manifest_text": manifestText}},
1270 return fmt.Errorf("While creating output collection: %v", err)
1272 runner.OutputPDH = &response.PortableDataHash
1276 var outputCollections = make(map[string]arvados.Collection)
1278 // Fetch the collection for the mnt.PortableDataHash
1279 // Return the manifest_text fragment corresponding to the specified mnt.Path
1280 // after making any required updates.
1282 // If mnt.Path is not specified,
1283 // return the entire manifest_text after replacing any "." with bindSuffix
1284 // If mnt.Path corresponds to one stream,
1285 // return the manifest_text for that stream after replacing that stream name with bindSuffix
1286 // Otherwise, check if a filename in any one stream is being sought. Return the manifest_text
1287 // for that stream after replacing stream name with bindSuffix minus the last word
1288 // and the file name with last word of the bindSuffix
1289 // Allowed path examples:
1291 // "path":"/subdir1"
1292 // "path":"/subdir1/subdir2"
1293 // "path":"/subdir/filename" etc
1294 func (runner *ContainerRunner) getCollectionManifestForPath(mnt arvados.Mount, bindSuffix string) (string, error) {
1295 collection := outputCollections[mnt.PortableDataHash]
1296 if collection.PortableDataHash == "" {
1297 err := runner.ArvClient.Get("collections", mnt.PortableDataHash, nil, &collection)
1299 return "", fmt.Errorf("While getting collection for %v: %v", mnt.PortableDataHash, err)
1301 outputCollections[mnt.PortableDataHash] = collection
1304 if collection.ManifestText == "" {
1305 runner.CrunchLog.Printf("No manifest text for collection %v", collection.PortableDataHash)
1309 mft := manifest.Manifest{Text: collection.ManifestText}
1310 extracted := mft.Extract(mnt.Path, bindSuffix)
1311 if extracted.Err != nil {
1312 return "", fmt.Errorf("Error parsing manifest for %v: %v", mnt.PortableDataHash, extracted.Err.Error())
1314 return extracted.Text, nil
1317 func (runner *ContainerRunner) CleanupDirs() {
1318 if runner.ArvMount != nil {
1320 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1321 umount.Stdout = runner.CrunchLog
1322 umount.Stderr = runner.CrunchLog
1323 runner.CrunchLog.Printf("Running %v", umount.Args)
1324 umnterr := umount.Start()
1327 runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1329 // If arv-mount --unmount gets stuck for any reason, we
1330 // don't want to wait for it forever. Do Wait() in a goroutine
1331 // so it doesn't block crunch-run.
1332 umountExit := make(chan error)
1334 mnterr := umount.Wait()
1336 runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1338 umountExit <- mnterr
1341 for again := true; again; {
1347 case <-runner.ArvMountExit:
1349 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1350 runner.CrunchLog.Printf("Timed out waiting for unmount")
1352 umount.Process.Kill()
1354 runner.ArvMount.Process.Kill()
1360 if runner.ArvMountPoint != "" {
1361 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1362 runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1366 for _, tmpdir := range runner.CleanupTempDir {
1367 if rmerr := os.RemoveAll(tmpdir); rmerr != nil {
1368 runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", tmpdir, rmerr)
1373 // CommitLogs posts the collection containing the final container logs.
1374 func (runner *ContainerRunner) CommitLogs() error {
1375 runner.CrunchLog.Print(runner.finalState)
1377 if runner.arvMountLog != nil {
1378 runner.arvMountLog.Close()
1380 runner.CrunchLog.Close()
1382 // Closing CrunchLog above allows them to be committed to Keep at this
1383 // point, but re-open crunch log with ArvClient in case there are any
1384 // other further errors (such as failing to write the log to Keep!)
1385 // while shutting down
1386 runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{ArvClient: runner.ArvClient,
1387 UUID: runner.Container.UUID, loggingStream: "crunch-run", writeCloser: nil})
1388 runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
1390 if runner.LogsPDH != nil {
1391 // If we have already assigned something to LogsPDH,
1392 // we must be closing the re-opened log, which won't
1393 // end up getting attached to the container record and
1394 // therefore doesn't need to be saved as a collection
1395 // -- it exists only to send logs to other channels.
1399 mt, err := runner.LogCollection.ManifestText()
1401 return fmt.Errorf("While creating log manifest: %v", err)
1404 var response arvados.Collection
1405 err = runner.ArvClient.Create("collections",
1407 "ensure_unique_name": true,
1408 "collection": arvadosclient.Dict{
1410 "name": "logs for " + runner.Container.UUID,
1411 "manifest_text": mt}},
1414 return fmt.Errorf("While creating log collection: %v", err)
1416 runner.LogsPDH = &response.PortableDataHash
1420 // UpdateContainerRunning updates the container state to "Running"
1421 func (runner *ContainerRunner) UpdateContainerRunning() error {
1422 runner.cStateLock.Lock()
1423 defer runner.cStateLock.Unlock()
1424 if runner.cCancelled {
1427 return runner.ArvClient.Update("containers", runner.Container.UUID,
1428 arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running"}}, nil)
1431 // ContainerToken returns the api_token the container (and any
1432 // arv-mount processes) are allowed to use.
1433 func (runner *ContainerRunner) ContainerToken() (string, error) {
1434 if runner.token != "" {
1435 return runner.token, nil
1438 var auth arvados.APIClientAuthorization
1439 err := runner.ArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1443 runner.token = auth.APIToken
1444 return runner.token, nil
1447 // UpdateContainerComplete updates the container record state on API
1448 // server to "Complete" or "Cancelled"
1449 func (runner *ContainerRunner) UpdateContainerFinal() error {
1450 update := arvadosclient.Dict{}
1451 update["state"] = runner.finalState
1452 if runner.LogsPDH != nil {
1453 update["log"] = *runner.LogsPDH
1455 if runner.finalState == "Complete" {
1456 if runner.ExitCode != nil {
1457 update["exit_code"] = *runner.ExitCode
1459 if runner.OutputPDH != nil {
1460 update["output"] = *runner.OutputPDH
1463 return runner.ArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
1466 // IsCancelled returns the value of Cancelled, with goroutine safety.
1467 func (runner *ContainerRunner) IsCancelled() bool {
1468 runner.cStateLock.Lock()
1469 defer runner.cStateLock.Unlock()
1470 return runner.cCancelled
1473 // NewArvLogWriter creates an ArvLogWriter
1474 func (runner *ContainerRunner) NewArvLogWriter(name string) io.WriteCloser {
1475 return &ArvLogWriter{
1476 ArvClient: runner.ArvClient,
1477 UUID: runner.Container.UUID,
1478 loggingStream: name,
1479 writeCloser: runner.LogCollection.Open(name + ".txt")}
1482 // Run the full container lifecycle.
1483 func (runner *ContainerRunner) Run() (err error) {
1484 runner.CrunchLog.Printf("crunch-run %s started", version)
1485 runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
1487 hostname, hosterr := os.Hostname()
1489 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1491 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1494 runner.finalState = "Queued"
1497 runner.stopSignals()
1498 runner.CleanupDirs()
1500 runner.CrunchLog.Printf("crunch-run finished")
1501 runner.CrunchLog.Close()
1505 // checkErr prints e (unless it's nil) and sets err to
1506 // e (unless err is already non-nil). Thus, if err
1507 // hasn't already been assigned when Run() returns,
1508 // this cleanup func will cause Run() to return the
1509 // first non-nil error that is passed to checkErr().
1510 checkErr := func(e error) {
1514 runner.CrunchLog.Print(e)
1518 if runner.finalState == "Complete" {
1519 // There was an error in the finalization.
1520 runner.finalState = "Cancelled"
1524 // Log the error encountered in Run(), if any
1527 if runner.finalState == "Queued" {
1528 runner.UpdateContainerFinal()
1532 if runner.IsCancelled() {
1533 runner.finalState = "Cancelled"
1534 // but don't return yet -- we still want to
1535 // capture partial output and write logs
1538 checkErr(runner.CaptureOutput())
1539 checkErr(runner.stopHoststat())
1540 checkErr(runner.CommitLogs())
1541 checkErr(runner.UpdateContainerFinal())
1544 err = runner.fetchContainerRecord()
1548 runner.setupSignals()
1549 runner.startHoststat()
1551 // check for and/or load image
1552 err = runner.LoadImage()
1554 if !runner.checkBrokenNode(err) {
1555 // Failed to load image but not due to a "broken node"
1556 // condition, probably user error.
1557 runner.finalState = "Cancelled"
1559 err = fmt.Errorf("While loading container image: %v", err)
1563 // set up FUSE mount and binds
1564 err = runner.SetupMounts()
1566 runner.finalState = "Cancelled"
1567 err = fmt.Errorf("While setting up mounts: %v", err)
1571 err = runner.CreateContainer()
1575 err = runner.LogHostInfo()
1579 err = runner.LogNodeRecord()
1583 err = runner.LogContainerRecord()
1588 if runner.IsCancelled() {
1592 err = runner.UpdateContainerRunning()
1596 runner.finalState = "Cancelled"
1598 runner.startCrunchstat()
1600 err = runner.StartContainer()
1602 runner.checkBrokenNode(err)
1606 err = runner.WaitFinish()
1607 if err == nil && !runner.IsCancelled() {
1608 runner.finalState = "Complete"
1613 // Fetch the current container record (uuid = runner.Container.UUID)
1614 // into runner.Container.
1615 func (runner *ContainerRunner) fetchContainerRecord() error {
1616 reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1618 return fmt.Errorf("error fetching container record: %v", err)
1620 defer reader.Close()
1622 dec := json.NewDecoder(reader)
1624 err = dec.Decode(&runner.Container)
1626 return fmt.Errorf("error decoding container record: %v", err)
1631 // NewContainerRunner creates a new container runner.
1632 func NewContainerRunner(api IArvadosClient,
1634 docker ThinDockerClient,
1635 containerUUID string) *ContainerRunner {
1637 cr := &ContainerRunner{ArvClient: api, Kc: kc, Docker: docker}
1638 cr.NewLogWriter = cr.NewArvLogWriter
1639 cr.RunArvMount = cr.ArvMountCmd
1640 cr.MkTempDir = ioutil.TempDir
1641 cr.LogCollection = &CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
1642 cr.Container.UUID = containerUUID
1643 cr.CrunchLog = NewThrottledLogger(cr.NewLogWriter("crunch-run"))
1644 cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
1646 loadLogThrottleParams(api)
1652 statInterval := flag.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1653 cgroupRoot := flag.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
1654 cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
1655 cgroupParentSubsystem := flag.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
1656 caCertsPath := flag.String("ca-certs", "", "Path to TLS root certificates")
1657 enableNetwork := flag.String("container-enable-networking", "default",
1658 `Specify if networking should be enabled for container. One of 'default', 'always':
1659 default: only enable networking if container requests it.
1660 always: containers always have networking enabled
1662 networkMode := flag.String("container-network-mode", "default",
1663 `Set networking mode for container. Corresponds to Docker network mode (--net).
1665 memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
1666 getVersion := flag.Bool("version", false, "Print version information and exit.")
1669 // Print version information if requested
1671 fmt.Printf("crunch-run %s\n", version)
1675 log.Printf("crunch-run %s started", version)
1677 containerId := flag.Arg(0)
1679 if *caCertsPath != "" {
1680 arvadosclient.CertFiles = []string{*caCertsPath}
1683 api, err := arvadosclient.MakeArvadosClient()
1685 log.Fatalf("%s: %v", containerId, err)
1689 kc, kcerr := keepclient.MakeKeepClient(api)
1691 log.Fatalf("%s: %v", containerId, kcerr)
1693 kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
1696 // API version 1.21 corresponds to Docker 1.9, which is currently the
1697 // minimum version we want to support.
1698 docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
1700 cr := NewContainerRunner(api, kc, docker, containerId)
1701 if dockererr != nil {
1702 cr.CrunchLog.Printf("%s: %v", containerId, dockererr)
1703 cr.checkBrokenNode(dockererr)
1704 cr.CrunchLog.Close()
1708 cr.statInterval = *statInterval
1709 cr.cgroupRoot = *cgroupRoot
1710 cr.expectCgroupParent = *cgroupParent
1711 cr.enableNetwork = *enableNetwork
1712 cr.networkMode = *networkMode
1713 if *cgroupParentSubsystem != "" {
1714 p := findCgroup(*cgroupParentSubsystem)
1715 cr.setCgroupParent = p
1716 cr.expectCgroupParent = p
1721 if *memprofile != "" {
1722 f, err := os.Create(*memprofile)
1724 log.Printf("could not create memory profile: ", err)
1726 runtime.GC() // get up-to-date statistics
1727 if err := pprof.WriteHeapProfile(f); err != nil {
1728 log.Printf("could not write memory profile: ", err)
1730 closeerr := f.Close()
1731 if closeerr != nil {
1732 log.Printf("closing memprofile file: ", err)
1737 log.Fatalf("%s: %v", containerId, runerr)