1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
30 "git.curoverse.com/arvados.git/lib/crunchstat"
31 "git.curoverse.com/arvados.git/sdk/go/arvados"
32 "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
33 "git.curoverse.com/arvados.git/sdk/go/keepclient"
34 "git.curoverse.com/arvados.git/sdk/go/manifest"
35 "golang.org/x/net/context"
37 dockertypes "github.com/docker/docker/api/types"
38 dockercontainer "github.com/docker/docker/api/types/container"
39 dockernetwork "github.com/docker/docker/api/types/network"
40 dockerclient "github.com/docker/docker/client"
45 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
46 type IArvadosClient interface {
47 Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
48 Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
49 Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
50 Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
51 CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
52 Discovery(key string) (interface{}, error)
55 // ErrCancelled is the error returned when the container is cancelled.
56 var ErrCancelled = errors.New("Cancelled")
58 // IKeepClient is the minimal Keep API methods used by crunch-run.
59 type IKeepClient interface {
60 PutHB(hash string, buf []byte) (string, int, error)
61 ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
65 // NewLogWriter is a factory function to create a new log writer.
66 type NewLogWriter func(name string) io.WriteCloser
68 type RunArvMount func(args []string, tok string) (*exec.Cmd, error)
70 type MkTempDir func(string, string) (string, error)
72 // ThinDockerClient is the minimal Docker client interface used by crunch-run.
73 type ThinDockerClient interface {
74 ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error)
75 ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
76 networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error)
77 ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error
78 ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error
79 ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error)
80 ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error)
81 ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error)
82 ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
85 // ContainerRunner is the main stateful struct used for a single execution of a
87 type ContainerRunner struct {
88 Docker ThinDockerClient
89 ArvClient IArvadosClient
92 ContainerConfig dockercontainer.Config
93 dockercontainer.HostConfig
99 CrunchLog *ThrottledLogger
100 Stdout io.WriteCloser
101 Stderr io.WriteCloser
102 LogCollection *CollectionWriter
109 CleanupTempDir []string
111 Volumes map[string]struct{}
113 SigChan chan os.Signal
114 ArvMountExit chan error
117 statLogger io.WriteCloser
118 statReporter *crunchstat.Reporter
119 statInterval time.Duration
121 // What we expect the container's cgroup parent to be.
122 expectCgroupParent string
123 // What we tell docker to use as the container's cgroup
124 // parent. Note: Ideally we would use the same field for both
125 // expectCgroupParent and setCgroupParent, and just make it
126 // default to "docker". However, when using docker < 1.10 with
127 // systemd, specifying a non-empty cgroup parent (even the
128 // default value "docker") hits a docker bug
129 // (https://github.com/docker/docker/issues/17126). Using two
130 // separate fields makes it possible to use the "expect cgroup
131 // parent to be X" feature even on sites where the "specify
132 // cgroup parent" feature breaks.
133 setCgroupParent string
135 cStateLock sync.Mutex
136 cCancelled bool // StopContainer() invoked
138 enableNetwork string // one of "default" or "always"
139 networkMode string // passed through to HostConfig.NetworkMode
140 arvMountLog *ThrottledLogger
143 // setupSignals sets up signal handling to gracefully terminate the underlying
144 // Docker container and update state when receiving a TERM, INT or QUIT signal.
145 func (runner *ContainerRunner) setupSignals() {
146 runner.SigChan = make(chan os.Signal, 1)
147 signal.Notify(runner.SigChan, syscall.SIGTERM)
148 signal.Notify(runner.SigChan, syscall.SIGINT)
149 signal.Notify(runner.SigChan, syscall.SIGQUIT)
151 go func(sig chan os.Signal) {
153 runner.CrunchLog.Printf("caught signal: %v", s)
159 // stop the underlying Docker container.
160 func (runner *ContainerRunner) stop() {
161 runner.cStateLock.Lock()
162 defer runner.cStateLock.Unlock()
163 if runner.ContainerID == "" {
166 runner.cCancelled = true
167 runner.CrunchLog.Printf("removing container")
168 err := runner.Docker.ContainerRemove(context.TODO(), runner.ContainerID, dockertypes.ContainerRemoveOptions{Force: true})
170 runner.CrunchLog.Printf("error removing container: %s", err)
174 func (runner *ContainerRunner) stopSignals() {
175 if runner.SigChan != nil {
176 signal.Stop(runner.SigChan)
180 var errorBlacklist = []string{
181 "(?ms).*[Cc]annot connect to the Docker daemon.*",
182 "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
184 var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
186 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
187 for _, d := range errorBlacklist {
188 if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
189 runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
190 if *brokenNodeHook == "" {
191 runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.")
193 runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
195 c := exec.Command(*brokenNodeHook)
196 c.Stdout = runner.CrunchLog
197 c.Stderr = runner.CrunchLog
200 runner.CrunchLog.Printf("Error running broken node hook: %v", err)
209 // LoadImage determines the docker image id from the container record and
210 // checks if it is available in the local Docker image store. If not, it loads
211 // the image from Keep.
212 func (runner *ContainerRunner) LoadImage() (err error) {
214 runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
216 var collection arvados.Collection
217 err = runner.ArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection)
219 return fmt.Errorf("While getting container image collection: %v", err)
221 manifest := manifest.Manifest{Text: collection.ManifestText}
222 var img, imageID string
223 for ms := range manifest.StreamIter() {
224 img = ms.FileStreamSegments[0].Name
225 if !strings.HasSuffix(img, ".tar") {
226 return fmt.Errorf("First file in the container image collection does not end in .tar")
228 imageID = img[:len(img)-4]
231 runner.CrunchLog.Printf("Using Docker image id '%s'", imageID)
233 _, _, err = runner.Docker.ImageInspectWithRaw(context.TODO(), imageID)
235 runner.CrunchLog.Print("Loading Docker image from keep")
237 var readCloser io.ReadCloser
238 readCloser, err = runner.Kc.ManifestFileReader(manifest, img)
240 return fmt.Errorf("While creating ManifestFileReader for container image: %v", err)
243 response, err := runner.Docker.ImageLoad(context.TODO(), readCloser, true)
245 return fmt.Errorf("While loading container image into Docker: %v", err)
248 defer response.Body.Close()
249 rbody, err := ioutil.ReadAll(response.Body)
251 return fmt.Errorf("Reading response to image load: %v", err)
253 runner.CrunchLog.Printf("Docker response: %s", rbody)
255 runner.CrunchLog.Print("Docker image is available")
258 runner.ContainerConfig.Image = imageID
260 runner.Kc.ClearBlockCache()
265 func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (c *exec.Cmd, err error) {
266 c = exec.Command("arv-mount", arvMountCmd...)
268 // Copy our environment, but override ARVADOS_API_TOKEN with
269 // the container auth token.
271 for _, s := range os.Environ() {
272 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
273 c.Env = append(c.Env, s)
276 c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
278 runner.arvMountLog = NewThrottledLogger(runner.NewLogWriter("arv-mount"))
279 c.Stdout = runner.arvMountLog
280 c.Stderr = runner.arvMountLog
282 runner.CrunchLog.Printf("Running %v", c.Args)
289 statReadme := make(chan bool)
290 runner.ArvMountExit = make(chan error)
295 time.Sleep(100 * time.Millisecond)
296 _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
308 runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
310 runner.ArvMountExit <- mnterr
311 close(runner.ArvMountExit)
317 case err := <-runner.ArvMountExit:
318 runner.ArvMount = nil
326 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
327 if runner.ArvMountPoint == "" {
328 runner.ArvMountPoint, err = runner.MkTempDir("", prefix)
333 func (runner *ContainerRunner) SetupMounts() (err error) {
334 err = runner.SetupArvMountPoint("keep")
336 return fmt.Errorf("While creating keep mount temp dir: %v", err)
339 token, err := runner.ContainerToken()
341 return fmt.Errorf("could not get container token: %s", err)
346 arvMountCmd := []string{
350 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
352 if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
353 arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
356 collectionPaths := []string{}
358 runner.Volumes = make(map[string]struct{})
359 needCertMount := true
362 for bind := range runner.Container.Mounts {
363 binds = append(binds, bind)
367 for _, bind := range binds {
368 mnt := runner.Container.Mounts[bind]
369 if bind == "stdout" || bind == "stderr" {
370 // Is it a "file" mount kind?
371 if mnt.Kind != "file" {
372 return fmt.Errorf("Unsupported mount kind '%s' for %s. Only 'file' is supported.", mnt.Kind, bind)
375 // Does path start with OutputPath?
376 prefix := runner.Container.OutputPath
377 if !strings.HasSuffix(prefix, "/") {
380 if !strings.HasPrefix(mnt.Path, prefix) {
381 return fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
386 // Is it a "collection" mount kind?
387 if mnt.Kind != "collection" && mnt.Kind != "json" {
388 return fmt.Errorf("Unsupported mount kind '%s' for stdin. Only 'collection' or 'json' are supported.", mnt.Kind)
392 if bind == "/etc/arvados/ca-certificates.crt" {
393 needCertMount = false
396 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
397 if mnt.Kind != "collection" {
398 return fmt.Errorf("Only mount points of kind 'collection' are supported underneath the output_path: %v", bind)
403 case mnt.Kind == "collection" && bind != "stdin":
405 if mnt.UUID != "" && mnt.PortableDataHash != "" {
406 return fmt.Errorf("Cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
410 return fmt.Errorf("Writing to existing collections currently not permitted.")
413 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
414 } else if mnt.PortableDataHash != "" {
416 return fmt.Errorf("Can never write to a collection specified by portable data hash")
418 idx := strings.Index(mnt.PortableDataHash, "/")
420 mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
421 mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
422 runner.Container.Mounts[bind] = mnt
424 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
425 if mnt.Path != "" && mnt.Path != "." {
426 if strings.HasPrefix(mnt.Path, "./") {
427 mnt.Path = mnt.Path[2:]
428 } else if strings.HasPrefix(mnt.Path, "/") {
429 mnt.Path = mnt.Path[1:]
431 src += "/" + mnt.Path
434 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
435 arvMountCmd = append(arvMountCmd, "--mount-tmp")
436 arvMountCmd = append(arvMountCmd, fmt.Sprintf("tmp%d", tmpcount))
440 if bind == runner.Container.OutputPath {
441 runner.HostOutputDir = src
442 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
443 return fmt.Errorf("Writable mount points are not permitted underneath the output_path: %v", bind)
445 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
447 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind))
449 collectionPaths = append(collectionPaths, src)
451 case mnt.Kind == "tmp":
453 tmpdir, err = runner.MkTempDir("", "")
455 return fmt.Errorf("While creating mount temp dir: %v", err)
457 st, staterr := os.Stat(tmpdir)
459 return fmt.Errorf("While Stat on temp dir: %v", staterr)
461 err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
463 return fmt.Errorf("While Chmod temp dir: %v", err)
465 runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
466 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", tmpdir, bind))
467 if bind == runner.Container.OutputPath {
468 runner.HostOutputDir = tmpdir
471 case mnt.Kind == "json":
472 jsondata, err := json.Marshal(mnt.Content)
474 return fmt.Errorf("encoding json data: %v", err)
476 // Create a tempdir with a single file
477 // (instead of just a tempfile): this way we
478 // can ensure the file is world-readable
479 // inside the container, without having to
480 // make it world-readable on the docker host.
481 tmpdir, err := runner.MkTempDir("", "")
483 return fmt.Errorf("creating temp dir: %v", err)
485 runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
486 tmpfn := filepath.Join(tmpdir, "mountdata.json")
487 err = ioutil.WriteFile(tmpfn, jsondata, 0644)
489 return fmt.Errorf("writing temp file: %v", err)
491 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
493 case mnt.Kind == "git_tree":
494 tmpdir, err := runner.MkTempDir("", "")
496 return fmt.Errorf("creating temp dir: %v", err)
498 runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
499 err = gitMount(mnt).extractTree(runner.ArvClient, tmpdir, token)
503 runner.Binds = append(runner.Binds, tmpdir+":"+bind+":ro")
507 if runner.HostOutputDir == "" {
508 return fmt.Errorf("Output path does not correspond to a writable mount point")
511 if wantAPI := runner.Container.RuntimeConstraints.API; needCertMount && wantAPI != nil && *wantAPI {
512 for _, certfile := range arvadosclient.CertFiles {
513 _, err := os.Stat(certfile)
515 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:/etc/arvados/ca-certificates.crt:ro", certfile))
522 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
524 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
526 arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
528 runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
530 return fmt.Errorf("While trying to start arv-mount: %v", err)
533 for _, p := range collectionPaths {
536 return fmt.Errorf("While checking that input files exist: %v", err)
543 func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
544 // Handle docker log protocol
545 // https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container
547 header := make([]byte, 8)
549 _, readerr := io.ReadAtLeast(containerReader, header, 8)
552 readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
555 _, readerr = io.CopyN(runner.Stdout, containerReader, readsize)
558 _, readerr = io.CopyN(runner.Stderr, containerReader, readsize)
563 if readerr != io.EOF {
564 runner.CrunchLog.Printf("While reading docker logs: %v", readerr)
567 closeerr := runner.Stdout.Close()
569 runner.CrunchLog.Printf("While closing stdout logs: %v", closeerr)
572 closeerr = runner.Stderr.Close()
574 runner.CrunchLog.Printf("While closing stderr logs: %v", closeerr)
577 if runner.statReporter != nil {
578 runner.statReporter.Stop()
579 closeerr = runner.statLogger.Close()
581 runner.CrunchLog.Printf("While closing crunchstat logs: %v", closeerr)
585 close(runner.loggingDone)
591 func (runner *ContainerRunner) StartCrunchstat() {
592 runner.statLogger = NewThrottledLogger(runner.NewLogWriter("crunchstat"))
593 runner.statReporter = &crunchstat.Reporter{
594 CID: runner.ContainerID,
595 Logger: log.New(runner.statLogger, "", 0),
596 CgroupParent: runner.expectCgroupParent,
597 CgroupRoot: runner.cgroupRoot,
598 PollPeriod: runner.statInterval,
600 runner.statReporter.Start()
603 type infoCommand struct {
608 // LogHostInfo logs info about the current host, for debugging and
609 // accounting purposes. Although it's logged as "node-info", this is
610 // about the environment where crunch-run is actually running, which
611 // might differ from what's described in the node record (see
613 func (runner *ContainerRunner) LogHostInfo() (err error) {
614 w := runner.NewLogWriter("node-info")
616 commands := []infoCommand{
618 label: "Host Information",
619 cmd: []string{"uname", "-a"},
622 label: "CPU Information",
623 cmd: []string{"cat", "/proc/cpuinfo"},
626 label: "Memory Information",
627 cmd: []string{"cat", "/proc/meminfo"},
631 cmd: []string{"df", "-m", "/", os.TempDir()},
634 label: "Disk INodes",
635 cmd: []string{"df", "-i", "/", os.TempDir()},
639 // Run commands with informational output to be logged.
640 for _, command := range commands {
641 fmt.Fprintln(w, command.label)
642 cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
645 if err := cmd.Run(); err != nil {
646 err = fmt.Errorf("While running command %q: %v", command.cmd, err)
655 return fmt.Errorf("While closing node-info logs: %v", err)
660 // LogContainerRecord gets and saves the raw JSON container record from the API server
661 func (runner *ContainerRunner) LogContainerRecord() error {
662 logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
663 if !logged && err == nil {
664 err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
669 // LogNodeRecord logs arvados#node record corresponding to the current host.
670 func (runner *ContainerRunner) LogNodeRecord() error {
671 hostname := os.Getenv("SLURMD_NODENAME")
673 hostname, _ = os.Hostname()
675 _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
676 // The "info" field has admin-only info when obtained
677 // with a privileged token, and should not be logged.
678 node, ok := resp.(map[string]interface{})
686 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
688 ArvClient: runner.ArvClient,
689 UUID: runner.Container.UUID,
690 loggingStream: label,
691 writeCloser: runner.LogCollection.Open(label + ".json"),
694 reader, err := runner.ArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
696 return false, fmt.Errorf("error getting %s record: %v", label, err)
700 dec := json.NewDecoder(reader)
702 var resp map[string]interface{}
703 if err = dec.Decode(&resp); err != nil {
704 return false, fmt.Errorf("error decoding %s list response: %v", label, err)
706 items, ok := resp["items"].([]interface{})
708 return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
709 } else if len(items) < 1 {
715 // Re-encode it using indentation to improve readability
716 enc := json.NewEncoder(w)
717 enc.SetIndent("", " ")
718 if err = enc.Encode(items[0]); err != nil {
719 return false, fmt.Errorf("error logging %s record: %v", label, err)
723 return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
728 // AttachStreams connects the docker container stdin, stdout and stderr logs
729 // to the Arvados logger which logs to Keep and the API server logs table.
730 func (runner *ContainerRunner) AttachStreams() (err error) {
732 runner.CrunchLog.Print("Attaching container streams")
734 // If stdin mount is provided, attach it to the docker container
735 var stdinRdr arvados.File
737 if stdinMnt, ok := runner.Container.Mounts["stdin"]; ok {
738 if stdinMnt.Kind == "collection" {
739 var stdinColl arvados.Collection
740 collId := stdinMnt.UUID
742 collId = stdinMnt.PortableDataHash
744 err = runner.ArvClient.Get("collections", collId, nil, &stdinColl)
746 return fmt.Errorf("While getting stding collection: %v", err)
749 stdinRdr, err = runner.Kc.ManifestFileReader(manifest.Manifest{Text: stdinColl.ManifestText}, stdinMnt.Path)
750 if os.IsNotExist(err) {
751 return fmt.Errorf("stdin collection path not found: %v", stdinMnt.Path)
752 } else if err != nil {
753 return fmt.Errorf("While getting stdin collection path %v: %v", stdinMnt.Path, err)
755 } else if stdinMnt.Kind == "json" {
756 stdinJson, err = json.Marshal(stdinMnt.Content)
758 return fmt.Errorf("While encoding stdin json data: %v", err)
763 stdinUsed := stdinRdr != nil || len(stdinJson) != 0
764 response, err := runner.Docker.ContainerAttach(context.TODO(), runner.ContainerID,
765 dockertypes.ContainerAttachOptions{Stream: true, Stdin: stdinUsed, Stdout: true, Stderr: true})
767 return fmt.Errorf("While attaching container stdout/stderr streams: %v", err)
770 runner.loggingDone = make(chan bool)
772 if stdoutMnt, ok := runner.Container.Mounts["stdout"]; ok {
773 stdoutFile, err := runner.getStdoutFile(stdoutMnt.Path)
777 runner.Stdout = stdoutFile
779 runner.Stdout = NewThrottledLogger(runner.NewLogWriter("stdout"))
782 if stderrMnt, ok := runner.Container.Mounts["stderr"]; ok {
783 stderrFile, err := runner.getStdoutFile(stderrMnt.Path)
787 runner.Stderr = stderrFile
789 runner.Stderr = NewThrottledLogger(runner.NewLogWriter("stderr"))
794 _, err := io.Copy(response.Conn, stdinRdr)
796 runner.CrunchLog.Print("While writing stdin collection to docker container %q", err)
800 response.CloseWrite()
802 } else if len(stdinJson) != 0 {
804 _, err := io.Copy(response.Conn, bytes.NewReader(stdinJson))
806 runner.CrunchLog.Print("While writing stdin json to docker container %q", err)
809 response.CloseWrite()
813 go runner.ProcessDockerAttach(response.Reader)
818 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
819 stdoutPath := mntPath[len(runner.Container.OutputPath):]
820 index := strings.LastIndex(stdoutPath, "/")
822 subdirs := stdoutPath[:index]
824 st, err := os.Stat(runner.HostOutputDir)
826 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
828 stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
829 err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
831 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
835 stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
837 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
840 return stdoutFile, nil
843 // CreateContainer creates the docker container.
844 func (runner *ContainerRunner) CreateContainer() error {
845 runner.CrunchLog.Print("Creating Docker container")
847 runner.ContainerConfig.Cmd = runner.Container.Command
848 if runner.Container.Cwd != "." {
849 runner.ContainerConfig.WorkingDir = runner.Container.Cwd
852 for k, v := range runner.Container.Environment {
853 runner.ContainerConfig.Env = append(runner.ContainerConfig.Env, k+"="+v)
856 runner.ContainerConfig.Volumes = runner.Volumes
858 runner.HostConfig = dockercontainer.HostConfig{
860 LogConfig: dockercontainer.LogConfig{
863 Resources: dockercontainer.Resources{
864 CgroupParent: runner.setCgroupParent,
868 if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
869 tok, err := runner.ContainerToken()
873 runner.ContainerConfig.Env = append(runner.ContainerConfig.Env,
874 "ARVADOS_API_TOKEN="+tok,
875 "ARVADOS_API_HOST="+os.Getenv("ARVADOS_API_HOST"),
876 "ARVADOS_API_HOST_INSECURE="+os.Getenv("ARVADOS_API_HOST_INSECURE"),
878 runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
880 if runner.enableNetwork == "always" {
881 runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
883 runner.HostConfig.NetworkMode = dockercontainer.NetworkMode("none")
887 _, stdinUsed := runner.Container.Mounts["stdin"]
888 runner.ContainerConfig.OpenStdin = stdinUsed
889 runner.ContainerConfig.StdinOnce = stdinUsed
890 runner.ContainerConfig.AttachStdin = stdinUsed
891 runner.ContainerConfig.AttachStdout = true
892 runner.ContainerConfig.AttachStderr = true
894 createdBody, err := runner.Docker.ContainerCreate(context.TODO(), &runner.ContainerConfig, &runner.HostConfig, nil, runner.Container.UUID)
896 return fmt.Errorf("While creating container: %v", err)
899 runner.ContainerID = createdBody.ID
901 return runner.AttachStreams()
904 // StartContainer starts the docker container created by CreateContainer.
905 func (runner *ContainerRunner) StartContainer() error {
906 runner.CrunchLog.Printf("Starting Docker container id '%s'", runner.ContainerID)
907 runner.cStateLock.Lock()
908 defer runner.cStateLock.Unlock()
909 if runner.cCancelled {
912 err := runner.Docker.ContainerStart(context.TODO(), runner.ContainerID,
913 dockertypes.ContainerStartOptions{})
916 if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
917 advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
919 return fmt.Errorf("could not start container: %v%s", err, advice)
924 // WaitFinish waits for the container to terminate, capture the exit code, and
925 // close the stdout/stderr logging.
926 func (runner *ContainerRunner) WaitFinish() error {
927 runner.CrunchLog.Print("Waiting for container to finish")
929 waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, dockercontainer.WaitConditionNotRunning)
930 arvMountExit := runner.ArvMountExit
933 case waitBody := <-waitOk:
934 runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
935 code := int(waitBody.StatusCode)
936 runner.ExitCode = &code
938 // wait for stdout/stderr to complete
942 case err := <-waitErr:
943 return fmt.Errorf("container wait: %v", err)
946 runner.CrunchLog.Printf("arv-mount exited while container is still running. Stopping container.")
948 // arvMountExit will always be ready now that
949 // it's closed, but that doesn't interest us.
955 var ErrNotInOutputDir = fmt.Errorf("Must point to path within the output directory")
957 func (runner *ContainerRunner) derefOutputSymlink(path string, startinfo os.FileInfo) (tgt string, readlinktgt string, info os.FileInfo, err error) {
958 // Follow symlinks if necessary
963 for followed := 0; info.Mode()&os.ModeSymlink != 0; followed++ {
964 if followed >= limitFollowSymlinks {
965 // Got stuck in a loop or just a pathological number of links, give up.
966 err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
970 readlinktgt, err = os.Readlink(nextlink)
976 if !strings.HasPrefix(tgt, "/") {
977 // Relative symlink, resolve it to host path
978 tgt = filepath.Join(filepath.Dir(path), tgt)
980 if strings.HasPrefix(tgt, runner.Container.OutputPath+"/") && !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
981 // Absolute symlink to container output path, adjust it to host output path.
982 tgt = filepath.Join(runner.HostOutputDir, tgt[len(runner.Container.OutputPath):])
984 if !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
985 // After dereferencing, symlink target must either be
986 // within output directory, or must point to a
988 err = ErrNotInOutputDir
992 info, err = os.Lstat(tgt)
995 err = fmt.Errorf("Symlink in output %q points to invalid location %q: %v",
996 path[len(runner.HostOutputDir):], readlinktgt, err)
1006 var limitFollowSymlinks = 10
1008 // UploadFile uploads files within the output directory, with special handling
1009 // for symlinks. If the symlink leads to a keep mount, copy the manifest text
1010 // from the keep mount into the output manifestText. Ensure that whether
1011 // symlinks are relative or absolute, every symlink target (even targets that
1012 // are symlinks themselves) must point to a path in either the output directory
1013 // or a collection mount.
1015 // Assumes initial value of "path" is absolute, and located within runner.HostOutputDir.
1016 func (runner *ContainerRunner) UploadOutputFile(
1021 walkUpload *WalkUpload,
1022 relocateFrom string,
1024 followed int) (manifestText string, err error) {
1030 if info.Mode().IsDir() {
1031 // if empty, need to create a .keep file
1032 dir, direrr := os.Open(path)
1037 names, eof := dir.Readdirnames(1)
1038 if len(names) == 0 && eof == io.EOF && path != runner.HostOutputDir {
1039 containerPath := runner.OutputPath + path[len(runner.HostOutputDir):]
1040 for _, bind := range binds {
1041 mnt := runner.Container.Mounts[bind]
1042 // Check if there is a bind for this
1043 // directory, in which case assume we don't need .keep
1044 if (containerPath == bind || strings.HasPrefix(containerPath, bind+"/")) && mnt.PortableDataHash != "d41d8cd98f00b204e9800998ecf8427e+0" {
1048 outputSuffix := path[len(runner.HostOutputDir)+1:]
1049 return fmt.Sprintf("./%v d41d8cd98f00b204e9800998ecf8427e+0 0:0:.keep\n", outputSuffix), nil
1054 if followed >= limitFollowSymlinks {
1055 // Got stuck in a loop or just a pathological number of
1056 // directory links, give up.
1057 err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
1061 // "path" is the actual path we are visiting
1062 // "tgt" is the target of "path" (a non-symlink) after following symlinks
1063 // "relocated" is the path in the output manifest where the file should be placed,
1064 // but has HostOutputDir as a prefix.
1066 // The destination path in the output manifest may need to be
1067 // logically relocated to some other path in order to appear
1068 // in the correct location as a result of following a symlink.
1069 // Remove the relocateFrom prefix and replace it with
1071 relocated := relocateTo + path[len(relocateFrom):]
1073 tgt, readlinktgt, info, derefErr := runner.derefOutputSymlink(path, info)
1074 if derefErr != nil && derefErr != ErrNotInOutputDir {
1078 // go through mounts and try reverse map to collection reference
1079 for _, bind := range binds {
1080 mnt := runner.Container.Mounts[bind]
1081 if tgt == bind || strings.HasPrefix(tgt, bind+"/") {
1082 // get path relative to bind
1083 targetSuffix := tgt[len(bind):]
1085 // Copy mount and adjust the path to add path relative to the bind
1086 adjustedMount := mnt
1087 adjustedMount.Path = filepath.Join(adjustedMount.Path, targetSuffix)
1089 // Terminates in this keep mount, so add the
1090 // manifest text at appropriate location.
1091 outputSuffix := relocated[len(runner.HostOutputDir):]
1092 manifestText, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix)
1097 // If target is not a collection mount, it must be located within the
1098 // output directory, otherwise it is an error.
1099 if derefErr == ErrNotInOutputDir {
1100 err = fmt.Errorf("Symlink in output %q points to invalid location %q, must point to path within the output directory.",
1101 path[len(runner.HostOutputDir):], readlinktgt)
1105 if info.Mode().IsRegular() {
1106 return "", walkUpload.UploadFile(relocated, tgt)
1109 if info.Mode().IsDir() {
1110 // Symlink leads to directory. Walk() doesn't follow
1111 // directory symlinks, so we walk the target directory
1112 // instead. Within the walk, file paths are relocated
1113 // so they appear under the original symlink path.
1114 err = filepath.Walk(tgt, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
1116 m, walkerr = runner.UploadOutputFile(walkpath, walkinfo, walkerr,
1117 binds, walkUpload, tgt, relocated, followed+1)
1119 manifestText = manifestText + m
1129 // HandleOutput sets the output, unmounts the FUSE mount, and deletes temporary directories
1130 func (runner *ContainerRunner) CaptureOutput() error {
1131 if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
1132 // Output may have been set directly by the container, so
1133 // refresh the container record to check.
1134 err := runner.ArvClient.Get("containers", runner.Container.UUID,
1135 nil, &runner.Container)
1139 if runner.Container.Output != "" {
1140 // Container output is already set.
1141 runner.OutputPDH = &runner.Container.Output
1146 if runner.HostOutputDir == "" {
1150 _, err := os.Stat(runner.HostOutputDir)
1152 return fmt.Errorf("While checking host output path: %v", err)
1155 // Pre-populate output from the configured mount points
1157 for bind, mnt := range runner.Container.Mounts {
1158 if mnt.Kind == "collection" {
1159 binds = append(binds, bind)
1164 var manifestText string
1166 collectionMetafile := fmt.Sprintf("%s/.arvados#collection", runner.HostOutputDir)
1167 _, err = os.Stat(collectionMetafile)
1169 // Regular directory
1171 cw := CollectionWriter{0, runner.Kc, nil, nil, sync.Mutex{}}
1172 walkUpload := cw.BeginUpload(runner.HostOutputDir, runner.CrunchLog.Logger)
1175 err = filepath.Walk(runner.HostOutputDir, func(path string, info os.FileInfo, err error) error {
1176 m, err = runner.UploadOutputFile(path, info, err, binds, walkUpload, "", "", 0)
1178 manifestText = manifestText + m
1183 cw.EndUpload(walkUpload)
1186 return fmt.Errorf("While uploading output files: %v", err)
1189 m, err = cw.ManifestText()
1190 manifestText = manifestText + m
1192 return fmt.Errorf("While uploading output files: %v", err)
1195 // FUSE mount directory
1196 file, openerr := os.Open(collectionMetafile)
1198 return fmt.Errorf("While opening FUSE metafile: %v", err)
1202 var rec arvados.Collection
1203 err = json.NewDecoder(file).Decode(&rec)
1205 return fmt.Errorf("While reading FUSE metafile: %v", err)
1207 manifestText = rec.ManifestText
1210 for _, bind := range binds {
1211 mnt := runner.Container.Mounts[bind]
1213 bindSuffix := strings.TrimPrefix(bind, runner.Container.OutputPath)
1215 if bindSuffix == bind || len(bindSuffix) <= 0 {
1216 // either does not start with OutputPath or is OutputPath itself
1220 if mnt.ExcludeFromOutput == true {
1224 // append to manifest_text
1225 m, err := runner.getCollectionManifestForPath(mnt, bindSuffix)
1230 manifestText = manifestText + m
1234 var response arvados.Collection
1235 manifest := manifest.Manifest{Text: manifestText}
1236 manifestText = manifest.Extract(".", ".").Text
1237 err = runner.ArvClient.Create("collections",
1239 "ensure_unique_name": true,
1240 "collection": arvadosclient.Dict{
1242 "name": "output for " + runner.Container.UUID,
1243 "manifest_text": manifestText}},
1246 return fmt.Errorf("While creating output collection: %v", err)
1248 runner.OutputPDH = &response.PortableDataHash
1252 var outputCollections = make(map[string]arvados.Collection)
1254 // Fetch the collection for the mnt.PortableDataHash
1255 // Return the manifest_text fragment corresponding to the specified mnt.Path
1256 // after making any required updates.
1258 // If mnt.Path is not specified,
1259 // return the entire manifest_text after replacing any "." with bindSuffix
1260 // If mnt.Path corresponds to one stream,
1261 // return the manifest_text for that stream after replacing that stream name with bindSuffix
1262 // Otherwise, check if a filename in any one stream is being sought. Return the manifest_text
1263 // for that stream after replacing stream name with bindSuffix minus the last word
1264 // and the file name with last word of the bindSuffix
1265 // Allowed path examples:
1267 // "path":"/subdir1"
1268 // "path":"/subdir1/subdir2"
1269 // "path":"/subdir/filename" etc
1270 func (runner *ContainerRunner) getCollectionManifestForPath(mnt arvados.Mount, bindSuffix string) (string, error) {
1271 collection := outputCollections[mnt.PortableDataHash]
1272 if collection.PortableDataHash == "" {
1273 err := runner.ArvClient.Get("collections", mnt.PortableDataHash, nil, &collection)
1275 return "", fmt.Errorf("While getting collection for %v: %v", mnt.PortableDataHash, err)
1277 outputCollections[mnt.PortableDataHash] = collection
1280 if collection.ManifestText == "" {
1281 runner.CrunchLog.Printf("No manifest text for collection %v", collection.PortableDataHash)
1285 mft := manifest.Manifest{Text: collection.ManifestText}
1286 extracted := mft.Extract(mnt.Path, bindSuffix)
1287 if extracted.Err != nil {
1288 return "", fmt.Errorf("Error parsing manifest for %v: %v", mnt.PortableDataHash, extracted.Err.Error())
1290 return extracted.Text, nil
1293 func (runner *ContainerRunner) CleanupDirs() {
1294 if runner.ArvMount != nil {
1296 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1297 umount.Stdout = runner.CrunchLog
1298 umount.Stderr = runner.CrunchLog
1299 runner.CrunchLog.Printf("Running %v", umount.Args)
1300 umnterr := umount.Start()
1303 runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1305 // If arv-mount --unmount gets stuck for any reason, we
1306 // don't want to wait for it forever. Do Wait() in a goroutine
1307 // so it doesn't block crunch-run.
1308 umountExit := make(chan error)
1310 mnterr := umount.Wait()
1312 runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1314 umountExit <- mnterr
1317 for again := true; again; {
1323 case <-runner.ArvMountExit:
1325 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1326 runner.CrunchLog.Printf("Timed out waiting for unmount")
1328 umount.Process.Kill()
1330 runner.ArvMount.Process.Kill()
1336 if runner.ArvMountPoint != "" {
1337 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1338 runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1342 for _, tmpdir := range runner.CleanupTempDir {
1343 if rmerr := os.RemoveAll(tmpdir); rmerr != nil {
1344 runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", tmpdir, rmerr)
1349 // CommitLogs posts the collection containing the final container logs.
1350 func (runner *ContainerRunner) CommitLogs() error {
1351 runner.CrunchLog.Print(runner.finalState)
1353 if runner.arvMountLog != nil {
1354 runner.arvMountLog.Close()
1356 runner.CrunchLog.Close()
1358 // Closing CrunchLog above allows them to be committed to Keep at this
1359 // point, but re-open crunch log with ArvClient in case there are any
1360 // other further errors (such as failing to write the log to Keep!)
1361 // while shutting down
1362 runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{ArvClient: runner.ArvClient,
1363 UUID: runner.Container.UUID, loggingStream: "crunch-run", writeCloser: nil})
1364 runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
1366 if runner.LogsPDH != nil {
1367 // If we have already assigned something to LogsPDH,
1368 // we must be closing the re-opened log, which won't
1369 // end up getting attached to the container record and
1370 // therefore doesn't need to be saved as a collection
1371 // -- it exists only to send logs to other channels.
1375 mt, err := runner.LogCollection.ManifestText()
1377 return fmt.Errorf("While creating log manifest: %v", err)
1380 var response arvados.Collection
1381 err = runner.ArvClient.Create("collections",
1383 "ensure_unique_name": true,
1384 "collection": arvadosclient.Dict{
1386 "name": "logs for " + runner.Container.UUID,
1387 "manifest_text": mt}},
1390 return fmt.Errorf("While creating log collection: %v", err)
1392 runner.LogsPDH = &response.PortableDataHash
1396 // UpdateContainerRunning updates the container state to "Running"
1397 func (runner *ContainerRunner) UpdateContainerRunning() error {
1398 runner.cStateLock.Lock()
1399 defer runner.cStateLock.Unlock()
1400 if runner.cCancelled {
1403 return runner.ArvClient.Update("containers", runner.Container.UUID,
1404 arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running"}}, nil)
1407 // ContainerToken returns the api_token the container (and any
1408 // arv-mount processes) are allowed to use.
1409 func (runner *ContainerRunner) ContainerToken() (string, error) {
1410 if runner.token != "" {
1411 return runner.token, nil
1414 var auth arvados.APIClientAuthorization
1415 err := runner.ArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1419 runner.token = auth.APIToken
1420 return runner.token, nil
1423 // UpdateContainerComplete updates the container record state on API
1424 // server to "Complete" or "Cancelled"
1425 func (runner *ContainerRunner) UpdateContainerFinal() error {
1426 update := arvadosclient.Dict{}
1427 update["state"] = runner.finalState
1428 if runner.LogsPDH != nil {
1429 update["log"] = *runner.LogsPDH
1431 if runner.finalState == "Complete" {
1432 if runner.ExitCode != nil {
1433 update["exit_code"] = *runner.ExitCode
1435 if runner.OutputPDH != nil {
1436 update["output"] = *runner.OutputPDH
1439 return runner.ArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
1442 // IsCancelled returns the value of Cancelled, with goroutine safety.
1443 func (runner *ContainerRunner) IsCancelled() bool {
1444 runner.cStateLock.Lock()
1445 defer runner.cStateLock.Unlock()
1446 return runner.cCancelled
1449 // NewArvLogWriter creates an ArvLogWriter
1450 func (runner *ContainerRunner) NewArvLogWriter(name string) io.WriteCloser {
1451 return &ArvLogWriter{
1452 ArvClient: runner.ArvClient,
1453 UUID: runner.Container.UUID,
1454 loggingStream: name,
1455 writeCloser: runner.LogCollection.Open(name + ".txt")}
1458 // Run the full container lifecycle.
1459 func (runner *ContainerRunner) Run() (err error) {
1460 runner.CrunchLog.Printf("crunch-run %s started", version)
1461 runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
1463 hostname, hosterr := os.Hostname()
1465 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1467 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1470 runner.finalState = "Queued"
1473 runner.stopSignals()
1474 runner.CleanupDirs()
1476 runner.CrunchLog.Printf("crunch-run finished")
1477 runner.CrunchLog.Close()
1481 // checkErr prints e (unless it's nil) and sets err to
1482 // e (unless err is already non-nil). Thus, if err
1483 // hasn't already been assigned when Run() returns,
1484 // this cleanup func will cause Run() to return the
1485 // first non-nil error that is passed to checkErr().
1486 checkErr := func(e error) {
1490 runner.CrunchLog.Print(e)
1494 if runner.finalState == "Complete" {
1495 // There was an error in the finalization.
1496 runner.finalState = "Cancelled"
1500 // Log the error encountered in Run(), if any
1503 if runner.finalState == "Queued" {
1504 runner.UpdateContainerFinal()
1508 if runner.IsCancelled() {
1509 runner.finalState = "Cancelled"
1510 // but don't return yet -- we still want to
1511 // capture partial output and write logs
1514 checkErr(runner.CaptureOutput())
1515 checkErr(runner.CommitLogs())
1516 checkErr(runner.UpdateContainerFinal())
1519 err = runner.fetchContainerRecord()
1524 // setup signal handling
1525 runner.setupSignals()
1527 // check for and/or load image
1528 err = runner.LoadImage()
1530 if !runner.checkBrokenNode(err) {
1531 // Failed to load image but not due to a "broken node"
1532 // condition, probably user error.
1533 runner.finalState = "Cancelled"
1535 err = fmt.Errorf("While loading container image: %v", err)
1539 // set up FUSE mount and binds
1540 err = runner.SetupMounts()
1542 runner.finalState = "Cancelled"
1543 err = fmt.Errorf("While setting up mounts: %v", err)
1547 err = runner.CreateContainer()
1551 err = runner.LogHostInfo()
1555 err = runner.LogNodeRecord()
1559 err = runner.LogContainerRecord()
1564 if runner.IsCancelled() {
1568 err = runner.UpdateContainerRunning()
1572 runner.finalState = "Cancelled"
1574 runner.StartCrunchstat()
1576 err = runner.StartContainer()
1578 runner.checkBrokenNode(err)
1582 err = runner.WaitFinish()
1583 if err == nil && !runner.IsCancelled() {
1584 runner.finalState = "Complete"
1589 // Fetch the current container record (uuid = runner.Container.UUID)
1590 // into runner.Container.
1591 func (runner *ContainerRunner) fetchContainerRecord() error {
1592 reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1594 return fmt.Errorf("error fetching container record: %v", err)
1596 defer reader.Close()
1598 dec := json.NewDecoder(reader)
1600 err = dec.Decode(&runner.Container)
1602 return fmt.Errorf("error decoding container record: %v", err)
1607 // NewContainerRunner creates a new container runner.
1608 func NewContainerRunner(api IArvadosClient,
1610 docker ThinDockerClient,
1611 containerUUID string) *ContainerRunner {
1613 cr := &ContainerRunner{ArvClient: api, Kc: kc, Docker: docker}
1614 cr.NewLogWriter = cr.NewArvLogWriter
1615 cr.RunArvMount = cr.ArvMountCmd
1616 cr.MkTempDir = ioutil.TempDir
1617 cr.LogCollection = &CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
1618 cr.Container.UUID = containerUUID
1619 cr.CrunchLog = NewThrottledLogger(cr.NewLogWriter("crunch-run"))
1620 cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
1622 loadLogThrottleParams(api)
1628 statInterval := flag.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1629 cgroupRoot := flag.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
1630 cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
1631 cgroupParentSubsystem := flag.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
1632 caCertsPath := flag.String("ca-certs", "", "Path to TLS root certificates")
1633 enableNetwork := flag.String("container-enable-networking", "default",
1634 `Specify if networking should be enabled for container. One of 'default', 'always':
1635 default: only enable networking if container requests it.
1636 always: containers always have networking enabled
1638 networkMode := flag.String("container-network-mode", "default",
1639 `Set networking mode for container. Corresponds to Docker network mode (--net).
1641 memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
1642 getVersion := flag.Bool("version", false, "Print version information and exit.")
1645 // Print version information if requested
1647 fmt.Printf("crunch-run %s\n", version)
1651 log.Printf("crunch-run %s started", version)
1653 containerId := flag.Arg(0)
1655 if *caCertsPath != "" {
1656 arvadosclient.CertFiles = []string{*caCertsPath}
1659 api, err := arvadosclient.MakeArvadosClient()
1661 log.Fatalf("%s: %v", containerId, err)
1665 kc, kcerr := keepclient.MakeKeepClient(api)
1667 log.Fatalf("%s: %v", containerId, kcerr)
1669 kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
1672 // API version 1.21 corresponds to Docker 1.9, which is currently the
1673 // minimum version we want to support.
1674 docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
1676 cr := NewContainerRunner(api, kc, docker, containerId)
1677 if dockererr != nil {
1678 cr.CrunchLog.Printf("%s: %v", containerId, dockererr)
1679 cr.checkBrokenNode(dockererr)
1680 cr.CrunchLog.Close()
1684 cr.statInterval = *statInterval
1685 cr.cgroupRoot = *cgroupRoot
1686 cr.expectCgroupParent = *cgroupParent
1687 cr.enableNetwork = *enableNetwork
1688 cr.networkMode = *networkMode
1689 if *cgroupParentSubsystem != "" {
1690 p := findCgroup(*cgroupParentSubsystem)
1691 cr.setCgroupParent = p
1692 cr.expectCgroupParent = p
1697 if *memprofile != "" {
1698 f, err := os.Create(*memprofile)
1700 log.Printf("could not create memory profile: ", err)
1702 runtime.GC() // get up-to-date statistics
1703 if err := pprof.WriteHeapProfile(f); err != nil {
1704 log.Printf("could not write memory profile: ", err)
1706 closeerr := f.Close()
1707 if closeerr != nil {
1708 log.Printf("closing memprofile file: ", err)
1713 log.Fatalf("%s: %v", containerId, runerr)