X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/060d38d627bd1e51dd2b3c6e7de9af6aa7d7b6f3..80081626101a2193d7b916fe488903b66777576e:/services/crunch-run/crunchrun.go diff --git a/services/crunch-run/crunchrun.go b/services/crunch-run/crunchrun.go index 810955c0da..53815cbe1c 100644 --- a/services/crunch-run/crunchrun.go +++ b/services/crunch-run/crunchrun.go @@ -6,7 +6,6 @@ package main import ( "bytes" - "context" "encoding/json" "errors" "flag" @@ -19,6 +18,9 @@ import ( "os/signal" "path" "path/filepath" + "regexp" + "runtime" + "runtime/pprof" "sort" "strings" "sync" @@ -30,6 +32,7 @@ import ( "git.curoverse.com/arvados.git/sdk/go/arvadosclient" "git.curoverse.com/arvados.git/sdk/go/keepclient" "git.curoverse.com/arvados.git/sdk/go/manifest" + "golang.org/x/net/context" dockertypes "github.com/docker/docker/api/types" dockercontainer "github.com/docker/docker/api/types/container" @@ -37,6 +40,8 @@ import ( dockerclient "github.com/docker/docker/client" ) +var version = "dev" + // IArvadosClient is the minimal Arvados API methods used by crunch-run. type IArvadosClient interface { Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error @@ -54,6 +59,7 @@ var ErrCancelled = errors.New("Cancelled") type IKeepClient interface { PutHB(hash string, buf []byte) (string, int, error) ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error) + ClearBlockCache() } // NewLogWriter is a factory function to create a new log writer. @@ -69,60 +75,13 @@ type ThinDockerClient interface { ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig, networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error) ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error - ContainerStop(ctx context.Context, container string, timeout *time.Duration) error + ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error) ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error) ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) } -// ThinDockerClientProxy is a proxy implementation of ThinDockerClient -// that executes the docker requests on dockerclient.Client -type ThinDockerClientProxy struct { - Docker *dockerclient.Client -} - -// ContainerAttach invokes dockerclient.Client.ContainerAttach -func (proxy ThinDockerClientProxy) ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error) { - return proxy.Docker.ContainerAttach(ctx, container, options) -} - -// ContainerCreate invokes dockerclient.Client.ContainerCreate -func (proxy ThinDockerClientProxy) ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig, - networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error) { - return proxy.Docker.ContainerCreate(ctx, config, hostConfig, networkingConfig, containerName) -} - -// ContainerStart invokes dockerclient.Client.ContainerStart -func (proxy ThinDockerClientProxy) ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error { - return proxy.Docker.ContainerStart(ctx, container, options) -} - -// ContainerStop invokes dockerclient.Client.ContainerStop -func (proxy ThinDockerClientProxy) ContainerStop(ctx context.Context, container string, timeout *time.Duration) error { - return proxy.Docker.ContainerStop(ctx, container, timeout) -} - -// ContainerWait invokes dockerclient.Client.ContainerWait -func (proxy ThinDockerClientProxy) ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error) { - return proxy.Docker.ContainerWait(ctx, container, condition) -} - -// ImageInspectWithRaw invokes dockerclient.Client.ImageInspectWithRaw -func (proxy ThinDockerClientProxy) ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error) { - return proxy.Docker.ImageInspectWithRaw(ctx, image) -} - -// ImageLoad invokes dockerclient.Client.ImageLoad -func (proxy ThinDockerClientProxy) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error) { - return proxy.Docker.ImageLoad(ctx, input, quiet) -} - -// ImageRemove invokes dockerclient.Client.ImageRemove -func (proxy ThinDockerClientProxy) ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) { - return proxy.Docker.ImageRemove(ctx, image, options) -} - // ContainerRunner is the main stateful struct used for a single execution of a // container. type ContainerRunner struct { @@ -144,21 +103,25 @@ type ContainerRunner struct { LogsPDH *string RunArvMount MkTempDir - ArvMount *exec.Cmd - ArvMountPoint string - HostOutputDir string - CleanupTempDir []string - Binds []string - Volumes map[string]struct{} - OutputPDH *string - SigChan chan os.Signal - ArvMountExit chan error - finalState string - - statLogger io.WriteCloser - statReporter *crunchstat.Reporter - statInterval time.Duration - cgroupRoot string + ArvMount *exec.Cmd + ArvMountPoint string + HostOutputDir string + Binds []string + Volumes map[string]struct{} + OutputPDH *string + SigChan chan os.Signal + ArvMountExit chan error + SecretMounts map[string]arvados.Mount + MkArvClient func(token string) (IArvadosClient, error) + finalState string + parentTemp string + + statLogger io.WriteCloser + statReporter *crunchstat.Reporter + hoststatLogger io.WriteCloser + hoststatReporter *crunchstat.Reporter + statInterval time.Duration + cgroupRoot string // What we expect the container's cgroup parent to be. expectCgroupParent string // What we tell docker to use as the container's cgroup @@ -174,43 +137,73 @@ type ContainerRunner struct { setCgroupParent string cStateLock sync.Mutex - cStarted bool // StartContainer() succeeded cCancelled bool // StopContainer() invoked enableNetwork string // one of "default" or "always" networkMode string // passed through to HostConfig.NetworkMode + arvMountLog *ThrottledLogger } -// SetupSignals sets up signal handling to gracefully terminate the underlying +// setupSignals sets up signal handling to gracefully terminate the underlying // Docker container and update state when receiving a TERM, INT or QUIT signal. -func (runner *ContainerRunner) SetupSignals() { +func (runner *ContainerRunner) setupSignals() { runner.SigChan = make(chan os.Signal, 1) signal.Notify(runner.SigChan, syscall.SIGTERM) signal.Notify(runner.SigChan, syscall.SIGINT) signal.Notify(runner.SigChan, syscall.SIGQUIT) go func(sig chan os.Signal) { - <-sig - runner.stop() - signal.Stop(sig) + for s := range sig { + runner.stop(s) + } }(runner.SigChan) } // stop the underlying Docker container. -func (runner *ContainerRunner) stop() { +func (runner *ContainerRunner) stop(sig os.Signal) { runner.cStateLock.Lock() defer runner.cStateLock.Unlock() - if runner.cCancelled { + if sig != nil { + runner.CrunchLog.Printf("caught signal: %v", sig) + } + if runner.ContainerID == "" { return } runner.cCancelled = true - if runner.cStarted { - timeout := time.Duration(10) - err := runner.Docker.ContainerStop(context.TODO(), runner.ContainerID, &(timeout)) - if err != nil { - log.Printf("StopContainer failed: %s", err) + runner.CrunchLog.Printf("removing container") + err := runner.Docker.ContainerRemove(context.TODO(), runner.ContainerID, dockertypes.ContainerRemoveOptions{Force: true}) + if err != nil { + runner.CrunchLog.Printf("error removing container: %s", err) + } +} + +var errorBlacklist = []string{ + "(?ms).*[Cc]annot connect to the Docker daemon.*", + "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*", +} +var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)") + +func (runner *ContainerRunner) checkBrokenNode(goterr error) bool { + for _, d := range errorBlacklist { + if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil { + runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr) + if *brokenNodeHook == "" { + runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.") + } else { + runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook) + // run killme script + c := exec.Command(*brokenNodeHook) + c.Stdout = runner.CrunchLog + c.Stderr = runner.CrunchLog + err := c.Run() + if err != nil { + runner.CrunchLog.Printf("Error running broken node hook: %v", err) + } + } + return true } } + return false } // LoadImage determines the docker image id from the container record and @@ -247,17 +240,25 @@ func (runner *ContainerRunner) LoadImage() (err error) { return fmt.Errorf("While creating ManifestFileReader for container image: %v", err) } - response, err := runner.Docker.ImageLoad(context.TODO(), readCloser, false) + response, err := runner.Docker.ImageLoad(context.TODO(), readCloser, true) if err != nil { return fmt.Errorf("While loading container image into Docker: %v", err) } - response.Body.Close() + + defer response.Body.Close() + rbody, err := ioutil.ReadAll(response.Body) + if err != nil { + return fmt.Errorf("Reading response to image load: %v", err) + } + runner.CrunchLog.Printf("Docker response: %s", rbody) } else { runner.CrunchLog.Print("Docker image is available") } runner.ContainerConfig.Image = imageID + runner.Kc.ClearBlockCache() + return nil } @@ -274,9 +275,11 @@ func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) ( } c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token) - nt := NewThrottledLogger(runner.NewLogWriter("arv-mount")) - c.Stdout = nt - c.Stderr = nt + runner.arvMountLog = NewThrottledLogger(runner.NewLogWriter("arv-mount")) + c.Stdout = runner.arvMountLog + c.Stderr = runner.arvMountLog + + runner.CrunchLog.Printf("Running %v", c.Args) err = c.Start() if err != nil { @@ -300,7 +303,11 @@ func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) ( }() go func() { - runner.ArvMountExit <- c.Wait() + mnterr := c.Wait() + if mnterr != nil { + runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr) + } + runner.ArvMountExit <- mnterr close(runner.ArvMountExit) }() @@ -318,22 +325,60 @@ func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) ( func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) { if runner.ArvMountPoint == "" { - runner.ArvMountPoint, err = runner.MkTempDir("", prefix) + runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix) } return } +func copyfile(src string, dst string) (err error) { + srcfile, err := os.Open(src) + if err != nil { + return + } + + os.MkdirAll(path.Dir(dst), 0777) + + dstfile, err := os.Create(dst) + if err != nil { + return + } + _, err = io.Copy(dstfile, srcfile) + if err != nil { + return + } + + err = srcfile.Close() + err2 := dstfile.Close() + + if err != nil { + return + } + + if err2 != nil { + return err2 + } + + return nil +} + func (runner *ContainerRunner) SetupMounts() (err error) { err = runner.SetupArvMountPoint("keep") if err != nil { return fmt.Errorf("While creating keep mount temp dir: %v", err) } - runner.CleanupTempDir = append(runner.CleanupTempDir, runner.ArvMountPoint) + token, err := runner.ContainerToken() + if err != nil { + return fmt.Errorf("could not get container token: %s", err) + } pdhOnly := true tmpcount := 0 - arvMountCmd := []string{"--foreground", "--allow-other", "--read-write"} + arvMountCmd := []string{ + "--foreground", + "--allow-other", + "--read-write", + fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())} if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 { arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM)) @@ -343,15 +388,34 @@ func (runner *ContainerRunner) SetupMounts() (err error) { runner.Binds = nil runner.Volumes = make(map[string]struct{}) needCertMount := true + type copyFile struct { + src string + bind string + } + var copyFiles []copyFile var binds []string - for bind, _ := range runner.Container.Mounts { + for bind := range runner.Container.Mounts { + binds = append(binds, bind) + } + for bind := range runner.SecretMounts { + if _, ok := runner.Container.Mounts[bind]; ok { + return fmt.Errorf("Secret mount %q conflicts with regular mount", bind) + } + if runner.SecretMounts[bind].Kind != "json" && + runner.SecretMounts[bind].Kind != "text" { + return fmt.Errorf("Secret mount %q type is %q but only 'json' and 'text' are permitted.", + bind, runner.SecretMounts[bind].Kind) + } binds = append(binds, bind) } sort.Strings(binds) for _, bind := range binds { - mnt := runner.Container.Mounts[bind] + mnt, ok := runner.Container.Mounts[bind] + if !ok { + mnt = runner.SecretMounts[bind] + } if bind == "stdout" || bind == "stderr" { // Is it a "file" mount kind? if mnt.Kind != "file" { @@ -380,8 +444,8 @@ func (runner *ContainerRunner) SetupMounts() (err error) { } if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" { - if mnt.Kind != "collection" { - return fmt.Errorf("Only mount points of kind 'collection' are supported underneath the output_path: %v", bind) + if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" { + return fmt.Errorf("Only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind) } } @@ -398,7 +462,7 @@ func (runner *ContainerRunner) SetupMounts() (err error) { pdhOnly = false src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID) } else if mnt.PortableDataHash != "" { - if mnt.Writable { + if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") { return fmt.Errorf("Can never write to a collection specified by portable data hash") } idx := strings.Index(mnt.PortableDataHash, "/") @@ -425,10 +489,12 @@ func (runner *ContainerRunner) SetupMounts() (err error) { if mnt.Writable { if bind == runner.Container.OutputPath { runner.HostOutputDir = src + runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind)) } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") { - return fmt.Errorf("Writable mount points are not permitted underneath the output_path: %v", bind) + copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]}) + } else { + runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind)) } - runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind)) } else { runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind)) } @@ -436,7 +502,7 @@ func (runner *ContainerRunner) SetupMounts() (err error) { case mnt.Kind == "tmp": var tmpdir string - tmpdir, err = runner.MkTempDir("", "") + tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp") if err != nil { return fmt.Errorf("While creating mount temp dir: %v", err) } @@ -448,33 +514,51 @@ func (runner *ContainerRunner) SetupMounts() (err error) { if staterr != nil { return fmt.Errorf("While Chmod temp dir: %v", err) } - runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir) runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", tmpdir, bind)) if bind == runner.Container.OutputPath { runner.HostOutputDir = tmpdir } - case mnt.Kind == "json": - jsondata, err := json.Marshal(mnt.Content) - if err != nil { - return fmt.Errorf("encoding json data: %v", err) + case mnt.Kind == "json" || mnt.Kind == "text": + var filedata []byte + if mnt.Kind == "json" { + filedata, err = json.Marshal(mnt.Content) + if err != nil { + return fmt.Errorf("encoding json data: %v", err) + } + } else { + text, ok := mnt.Content.(string) + if !ok { + return fmt.Errorf("content for mount %q must be a string", bind) + } + filedata = []byte(text) } - // Create a tempdir with a single file - // (instead of just a tempfile): this way we - // can ensure the file is world-readable - // inside the container, without having to - // make it world-readable on the docker host. - tmpdir, err := runner.MkTempDir("", "") + + tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind) if err != nil { return fmt.Errorf("creating temp dir: %v", err) } - runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir) - tmpfn := filepath.Join(tmpdir, "mountdata.json") - err = ioutil.WriteFile(tmpfn, jsondata, 0644) + tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind) + err = ioutil.WriteFile(tmpfn, filedata, 0444) if err != nil { return fmt.Errorf("writing temp file: %v", err) } - runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind)) + if strings.HasPrefix(bind, runner.Container.OutputPath+"/") { + copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]}) + } else { + runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind)) + } + + case mnt.Kind == "git_tree": + tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree") + if err != nil { + return fmt.Errorf("creating temp dir: %v", err) + } + err = gitMount(mnt).extractTree(runner.ArvClient, tmpdir, token) + if err != nil { + return err + } + runner.Binds = append(runner.Binds, tmpdir+":"+bind+":ro") } } @@ -499,11 +583,6 @@ func (runner *ContainerRunner) SetupMounts() (err error) { } arvMountCmd = append(arvMountCmd, runner.ArvMountPoint) - token, err := runner.ContainerToken() - if err != nil { - return fmt.Errorf("could not get container token: %s", err) - } - runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token) if err != nil { return fmt.Errorf("While trying to start arv-mount: %v", err) @@ -516,59 +595,118 @@ func (runner *ContainerRunner) SetupMounts() (err error) { } } + for _, cp := range copyFiles { + st, err := os.Stat(cp.src) + if err != nil { + return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err) + } + if st.IsDir() { + err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error { + if walkerr != nil { + return walkerr + } + target := path.Join(cp.bind, walkpath[len(cp.src):]) + if walkinfo.Mode().IsRegular() { + copyerr := copyfile(walkpath, target) + if copyerr != nil { + return copyerr + } + return os.Chmod(target, walkinfo.Mode()|0777) + } else if walkinfo.Mode().IsDir() { + mkerr := os.MkdirAll(target, 0777) + if mkerr != nil { + return mkerr + } + return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777) + } else { + return fmt.Errorf("Source %q is not a regular file or directory", cp.src) + } + }) + } else if st.Mode().IsRegular() { + err = copyfile(cp.src, cp.bind) + if err == nil { + err = os.Chmod(cp.bind, st.Mode()|0777) + } + } + if err != nil { + return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err) + } + } + return nil } func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) { // Handle docker log protocol // https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container + defer close(runner.loggingDone) header := make([]byte, 8) - for { - _, readerr := io.ReadAtLeast(containerReader, header, 8) - - if readerr == nil { - readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24) - if header[0] == 1 { - // stdout - _, readerr = io.CopyN(runner.Stdout, containerReader, readsize) - } else { - // stderr - _, readerr = io.CopyN(runner.Stderr, containerReader, readsize) + var err error + for err == nil { + _, err = io.ReadAtLeast(containerReader, header, 8) + if err != nil { + if err == io.EOF { + err = nil } + break } + readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24) + if header[0] == 1 { + // stdout + _, err = io.CopyN(runner.Stdout, containerReader, readsize) + } else { + // stderr + _, err = io.CopyN(runner.Stderr, containerReader, readsize) + } + } - if readerr != nil { - if readerr != io.EOF { - runner.CrunchLog.Printf("While reading docker logs: %v", readerr) - } - - closeerr := runner.Stdout.Close() - if closeerr != nil { - runner.CrunchLog.Printf("While closing stdout logs: %v", closeerr) - } + if err != nil { + runner.CrunchLog.Printf("error reading docker logs: %v", err) + } - closeerr = runner.Stderr.Close() - if closeerr != nil { - runner.CrunchLog.Printf("While closing stderr logs: %v", closeerr) - } + err = runner.Stdout.Close() + if err != nil { + runner.CrunchLog.Printf("error closing stdout logs: %v", err) + } - if runner.statReporter != nil { - runner.statReporter.Stop() - closeerr = runner.statLogger.Close() - if closeerr != nil { - runner.CrunchLog.Printf("While closing crunchstat logs: %v", closeerr) - } - } + err = runner.Stderr.Close() + if err != nil { + runner.CrunchLog.Printf("error closing stderr logs: %v", err) + } - runner.loggingDone <- true - close(runner.loggingDone) - return + if runner.statReporter != nil { + runner.statReporter.Stop() + err = runner.statLogger.Close() + if err != nil { + runner.CrunchLog.Printf("error closing crunchstat logs: %v", err) } } } -func (runner *ContainerRunner) StartCrunchstat() { +func (runner *ContainerRunner) stopHoststat() error { + if runner.hoststatReporter == nil { + return nil + } + runner.hoststatReporter.Stop() + err := runner.hoststatLogger.Close() + if err != nil { + return fmt.Errorf("error closing hoststat logs: %v", err) + } + return nil +} + +func (runner *ContainerRunner) startHoststat() { + runner.hoststatLogger = NewThrottledLogger(runner.NewLogWriter("hoststat")) + runner.hoststatReporter = &crunchstat.Reporter{ + Logger: log.New(runner.hoststatLogger, "", 0), + CgroupRoot: runner.cgroupRoot, + PollPeriod: runner.statInterval, + } + runner.hoststatReporter.Start() +} + +func (runner *ContainerRunner) startCrunchstat() { runner.statLogger = NewThrottledLogger(runner.NewLogWriter("crunchstat")) runner.statReporter = &crunchstat.Reporter{ CID: runner.ContainerID, @@ -585,47 +723,49 @@ type infoCommand struct { cmd []string } -// Gather node information and store it on the log for debugging -// purposes. -func (runner *ContainerRunner) LogNodeInfo() (err error) { +// LogHostInfo logs info about the current host, for debugging and +// accounting purposes. Although it's logged as "node-info", this is +// about the environment where crunch-run is actually running, which +// might differ from what's described in the node record (see +// LogNodeRecord). +func (runner *ContainerRunner) LogHostInfo() (err error) { w := runner.NewLogWriter("node-info") - logger := log.New(w, "node-info", 0) commands := []infoCommand{ - infoCommand{ + { label: "Host Information", cmd: []string{"uname", "-a"}, }, - infoCommand{ + { label: "CPU Information", cmd: []string{"cat", "/proc/cpuinfo"}, }, - infoCommand{ + { label: "Memory Information", cmd: []string{"cat", "/proc/meminfo"}, }, - infoCommand{ + { label: "Disk Space", cmd: []string{"df", "-m", "/", os.TempDir()}, }, - infoCommand{ + { label: "Disk INodes", cmd: []string{"df", "-i", "/", os.TempDir()}, }, } // Run commands with informational output to be logged. - var out []byte for _, command := range commands { - out, err = exec.Command(command.cmd[0], command.cmd[1:]...).CombinedOutput() - if err != nil { - return fmt.Errorf("While running command %q: %v", - command.cmd, err) - } - logger.Println(command.label) - for _, line := range strings.Split(string(out), "\n") { - logger.Println(" ", line) + fmt.Fprintln(w, command.label) + cmd := exec.Command(command.cmd[0], command.cmd[1:]...) + cmd.Stdout = w + cmd.Stderr = w + if err := cmd.Run(); err != nil { + err = fmt.Errorf("While running command %q: %v", command.cmd, err) + fmt.Fprintln(w, err) + return err } + fmt.Fprintln(w, "") } err = w.Close() @@ -635,42 +775,72 @@ func (runner *ContainerRunner) LogNodeInfo() (err error) { return nil } -// Get and save the raw JSON container record from the API server -func (runner *ContainerRunner) LogContainerRecord() (err error) { +// LogContainerRecord gets and saves the raw JSON container record from the API server +func (runner *ContainerRunner) LogContainerRecord() error { + logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil) + if !logged && err == nil { + err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID) + } + return err +} + +// LogNodeRecord logs arvados#node record corresponding to the current host. +func (runner *ContainerRunner) LogNodeRecord() error { + hostname := os.Getenv("SLURMD_NODENAME") + if hostname == "" { + hostname, _ = os.Hostname() + } + _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) { + // The "info" field has admin-only info when obtained + // with a privileged token, and should not be logged. + node, ok := resp.(map[string]interface{}) + if ok { + delete(node, "info") + } + }) + return err +} + +func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) { w := &ArvLogWriter{ ArvClient: runner.ArvClient, UUID: runner.Container.UUID, - loggingStream: "container", - writeCloser: runner.LogCollection.Open("container.json"), + loggingStream: label, + writeCloser: runner.LogCollection.Open(label + ".json"), } - // Get Container record JSON from the API Server - reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil) + reader, err := runner.ArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params)) if err != nil { - return fmt.Errorf("While retrieving container record from the API server: %v", err) + return false, fmt.Errorf("error getting %s record: %v", label, err) } defer reader.Close() - // Read the API server response as []byte - json_bytes, err := ioutil.ReadAll(reader) - if err != nil { - return fmt.Errorf("While reading container record API server response: %v", err) + + dec := json.NewDecoder(reader) + dec.UseNumber() + var resp map[string]interface{} + if err = dec.Decode(&resp); err != nil { + return false, fmt.Errorf("error decoding %s list response: %v", label, err) } - // Decode the JSON []byte - var cr map[string]interface{} - if err = json.Unmarshal(json_bytes, &cr); err != nil { - return fmt.Errorf("While decoding the container record JSON response: %v", err) + items, ok := resp["items"].([]interface{}) + if !ok { + return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label) + } else if len(items) < 1 { + return false, nil + } + if munge != nil { + munge(items[0]) } // Re-encode it using indentation to improve readability enc := json.NewEncoder(w) enc.SetIndent("", " ") - if err = enc.Encode(cr); err != nil { - return fmt.Errorf("While logging the JSON container record: %v", err) + if err = enc.Encode(items[0]); err != nil { + return false, fmt.Errorf("error logging %s record: %v", label, err) } err = w.Close() if err != nil { - return fmt.Errorf("While closing container.json log: %v", err) + return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err) } - return nil + return true, nil } // AttachStreams connects the docker container stdin, stdout and stderr logs @@ -742,7 +912,7 @@ func (runner *ContainerRunner) AttachStreams() (err error) { _, err := io.Copy(response.Conn, stdinRdr) if err != nil { runner.CrunchLog.Print("While writing stdin collection to docker container %q", err) - runner.stop() + runner.stop(nil) } stdinRdr.Close() response.CloseWrite() @@ -752,7 +922,7 @@ func (runner *ContainerRunner) AttachStreams() (err error) { _, err := io.Copy(response.Conn, bytes.NewReader(stdinJson)) if err != nil { runner.CrunchLog.Print("While writing stdin json to docker container %q", err) - runner.stop() + runner.stop(nil) } response.CloseWrite() }() @@ -803,6 +973,7 @@ func (runner *ContainerRunner) CreateContainer() error { runner.ContainerConfig.Volumes = runner.Volumes + maxRAM := int64(runner.Container.RuntimeConstraints.RAM) runner.HostConfig = dockercontainer.HostConfig{ Binds: runner.Binds, LogConfig: dockercontainer.LogConfig{ @@ -810,6 +981,10 @@ func (runner *ContainerRunner) CreateContainer() error { }, Resources: dockercontainer.Resources{ CgroupParent: runner.setCgroupParent, + NanoCPUs: int64(runner.Container.RuntimeConstraints.VCPUs) * 1000000000, + Memory: maxRAM, // RAM + MemorySwap: maxRAM, // RAM+swap + KernelMemory: maxRAM, // kernel portion }, } @@ -860,54 +1035,222 @@ func (runner *ContainerRunner) StartContainer() error { err := runner.Docker.ContainerStart(context.TODO(), runner.ContainerID, dockertypes.ContainerStartOptions{}) if err != nil { - return fmt.Errorf("could not start container: %v", err) + var advice string + if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil { + advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0]) + } + return fmt.Errorf("could not start container: %v%s", err, advice) } - runner.cStarted = true return nil } // WaitFinish waits for the container to terminate, capture the exit code, and // close the stdout/stderr logging. -func (runner *ContainerRunner) WaitFinish() (err error) { +func (runner *ContainerRunner) WaitFinish() error { runner.CrunchLog.Print("Waiting for container to finish") - waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, "not-running") + waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, dockercontainer.WaitConditionNotRunning) + arvMountExit := runner.ArvMountExit + for { + select { + case waitBody := <-waitOk: + runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode) + code := int(waitBody.StatusCode) + runner.ExitCode = &code + + // wait for stdout/stderr to complete + <-runner.loggingDone + return nil - var waitBody dockercontainer.ContainerWaitOKBody - select { - case waitBody = <-waitOk: - case err = <-waitErr: + case err := <-waitErr: + return fmt.Errorf("container wait: %v", err) + + case <-arvMountExit: + runner.CrunchLog.Printf("arv-mount exited while container is still running. Stopping container.") + runner.stop(nil) + // arvMountExit will always be ready now that + // it's closed, but that doesn't interest us. + arvMountExit = nil + } } +} - if err != nil { - return fmt.Errorf("container wait: %v", err) +var ErrNotInOutputDir = fmt.Errorf("Must point to path within the output directory") + +func (runner *ContainerRunner) derefOutputSymlink(path string, startinfo os.FileInfo) (tgt string, readlinktgt string, info os.FileInfo, err error) { + // Follow symlinks if necessary + info = startinfo + tgt = path + readlinktgt = "" + nextlink := path + for followed := 0; info.Mode()&os.ModeSymlink != 0; followed++ { + if followed >= limitFollowSymlinks { + // Got stuck in a loop or just a pathological number of links, give up. + err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path) + return + } + + readlinktgt, err = os.Readlink(nextlink) + if err != nil { + return + } + + tgt = readlinktgt + if !strings.HasPrefix(tgt, "/") { + // Relative symlink, resolve it to host path + tgt = filepath.Join(filepath.Dir(path), tgt) + } + if strings.HasPrefix(tgt, runner.Container.OutputPath+"/") && !strings.HasPrefix(tgt, runner.HostOutputDir+"/") { + // Absolute symlink to container output path, adjust it to host output path. + tgt = filepath.Join(runner.HostOutputDir, tgt[len(runner.Container.OutputPath):]) + } + if !strings.HasPrefix(tgt, runner.HostOutputDir+"/") { + // After dereferencing, symlink target must either be + // within output directory, or must point to a + // collection mount. + err = ErrNotInOutputDir + return + } + + info, err = os.Lstat(tgt) + if err != nil { + // tgt + err = fmt.Errorf("Symlink in output %q points to invalid location %q: %v", + path[len(runner.HostOutputDir):], readlinktgt, err) + return + } + + nextlink = tgt } - runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode) - code := int(waitBody.StatusCode) - runner.ExitCode = &code + return +} + +var limitFollowSymlinks = 10 - waitMount := runner.ArvMountExit - select { - case err = <-waitMount: - runner.CrunchLog.Printf("arv-mount exited before container finished: %v", err) - waitMount = nil - runner.stop() - default: +// UploadFile uploads files within the output directory, with special handling +// for symlinks. If the symlink leads to a keep mount, copy the manifest text +// from the keep mount into the output manifestText. Ensure that whether +// symlinks are relative or absolute, every symlink target (even targets that +// are symlinks themselves) must point to a path in either the output directory +// or a collection mount. +// +// Assumes initial value of "path" is absolute, and located within runner.HostOutputDir. +func (runner *ContainerRunner) UploadOutputFile( + path string, + info os.FileInfo, + infoerr error, + binds []string, + walkUpload *WalkUpload, + relocateFrom string, + relocateTo string, + followed int) (manifestText string, err error) { + + if infoerr != nil { + return "", infoerr + } + + if info.Mode().IsDir() { + // if empty, need to create a .keep file + dir, direrr := os.Open(path) + if direrr != nil { + return "", direrr + } + defer dir.Close() + names, eof := dir.Readdirnames(1) + if len(names) == 0 && eof == io.EOF && path != runner.HostOutputDir { + containerPath := runner.OutputPath + path[len(runner.HostOutputDir):] + for _, bind := range binds { + mnt := runner.Container.Mounts[bind] + // Check if there is a bind for this + // directory, in which case assume we don't need .keep + if (containerPath == bind || strings.HasPrefix(containerPath, bind+"/")) && mnt.PortableDataHash != "d41d8cd98f00b204e9800998ecf8427e+0" { + return + } + } + outputSuffix := path[len(runner.HostOutputDir)+1:] + return fmt.Sprintf("./%v d41d8cd98f00b204e9800998ecf8427e+0 0:0:.keep\n", outputSuffix), nil + } + return + } + + if followed >= limitFollowSymlinks { + // Got stuck in a loop or just a pathological number of + // directory links, give up. + err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path) + return } - // wait for stdout/stderr to complete - <-runner.loggingDone + // "path" is the actual path we are visiting + // "tgt" is the target of "path" (a non-symlink) after following symlinks + // "relocated" is the path in the output manifest where the file should be placed, + // but has HostOutputDir as a prefix. - return nil + // The destination path in the output manifest may need to be + // logically relocated to some other path in order to appear + // in the correct location as a result of following a symlink. + // Remove the relocateFrom prefix and replace it with + // relocateTo. + relocated := relocateTo + path[len(relocateFrom):] + + tgt, readlinktgt, info, derefErr := runner.derefOutputSymlink(path, info) + if derefErr != nil && derefErr != ErrNotInOutputDir { + return "", derefErr + } + + // go through mounts and try reverse map to collection reference + for _, bind := range binds { + mnt := runner.Container.Mounts[bind] + if (tgt == bind || strings.HasPrefix(tgt, bind+"/")) && !mnt.Writable { + // get path relative to bind + targetSuffix := tgt[len(bind):] + + // Copy mount and adjust the path to add path relative to the bind + adjustedMount := mnt + adjustedMount.Path = filepath.Join(adjustedMount.Path, targetSuffix) + + // Terminates in this keep mount, so add the + // manifest text at appropriate location. + outputSuffix := relocated[len(runner.HostOutputDir):] + manifestText, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix) + return + } + } + + // If target is not a collection mount, it must be located within the + // output directory, otherwise it is an error. + if derefErr == ErrNotInOutputDir { + err = fmt.Errorf("Symlink in output %q points to invalid location %q, must point to path within the output directory.", + path[len(runner.HostOutputDir):], readlinktgt) + return + } + + if info.Mode().IsRegular() { + return "", walkUpload.UploadFile(relocated, tgt) + } + + if info.Mode().IsDir() { + // Symlink leads to directory. Walk() doesn't follow + // directory symlinks, so we walk the target directory + // instead. Within the walk, file paths are relocated + // so they appear under the original symlink path. + err = filepath.Walk(tgt, func(walkpath string, walkinfo os.FileInfo, walkerr error) error { + var m string + m, walkerr = runner.UploadOutputFile(walkpath, walkinfo, walkerr, + binds, walkUpload, tgt, relocated, followed+1) + if walkerr == nil { + manifestText = manifestText + m + } + return walkerr + }) + return + } + + return } // HandleOutput sets the output, unmounts the FUSE mount, and deletes temporary directories func (runner *ContainerRunner) CaptureOutput() error { - if runner.finalState != "Complete" { - return nil - } - if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI { // Output may have been set directly by the container, so // refresh the container record to check. @@ -941,6 +1284,16 @@ func (runner *ContainerRunner) CaptureOutput() error { } sort.Strings(binds) + // Delete secret mounts so they don't get saved to the output collection. + for bind := range runner.SecretMounts { + if strings.HasPrefix(bind, runner.Container.OutputPath+"/") { + err = os.Remove(runner.HostOutputDir + bind[len(runner.Container.OutputPath):]) + if err != nil { + return fmt.Errorf("Unable to remove secret mount: %v", err) + } + } + } + var manifestText string collectionMetafile := fmt.Sprintf("%s/.arvados#collection", runner.HostOutputDir) @@ -948,74 +1301,25 @@ func (runner *ContainerRunner) CaptureOutput() error { if err != nil { // Regular directory - // Find symlinks to arv-mounted files & dirs. - err = filepath.Walk(runner.HostOutputDir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.Mode()&os.ModeSymlink == 0 { - return nil - } - // read link to get container internal path - // only support 1 level of symlinking here. - var tgt string - tgt, err = os.Readlink(path) - if err != nil { - return err - } + cw := CollectionWriter{0, runner.Kc, nil, nil, sync.Mutex{}} + walkUpload := cw.BeginUpload(runner.HostOutputDir, runner.CrunchLog.Logger) - // get path relative to output dir - outputSuffix := path[len(runner.HostOutputDir):] - - if strings.HasPrefix(tgt, "/") { - // go through mounts and try reverse map to collection reference - for _, bind := range binds { - mnt := runner.Container.Mounts[bind] - if tgt == bind || strings.HasPrefix(tgt, bind+"/") { - // get path relative to bind - targetSuffix := tgt[len(bind):] - - // Copy mount and adjust the path to add path relative to the bind - adjustedMount := mnt - adjustedMount.Path = filepath.Join(adjustedMount.Path, targetSuffix) - - // get manifest text - var m string - m, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix) - if err != nil { - return err - } - manifestText = manifestText + m - // delete symlink so WriteTree won't try to to dereference it. - os.Remove(path) - return nil - } - } + var m string + err = filepath.Walk(runner.HostOutputDir, func(path string, info os.FileInfo, err error) error { + m, err = runner.UploadOutputFile(path, info, err, binds, walkUpload, "", "", 0) + if err == nil { + manifestText = manifestText + m } + return err + }) - // Not a link to a mount. Must be dereferencible and - // point into the output directory. - tgt, err = filepath.EvalSymlinks(path) - if err != nil { - os.Remove(path) - return err - } + cw.EndUpload(walkUpload) - // Symlink target must be within the output directory otherwise it's an error. - if !strings.HasPrefix(tgt, runner.HostOutputDir+"/") { - os.Remove(path) - return fmt.Errorf("Output directory symlink %q points to invalid location %q, must point to mount or output directory.", - outputSuffix, tgt) - } - return nil - }) if err != nil { - return fmt.Errorf("While checking output symlinks: %v", err) + return fmt.Errorf("While uploading output files: %v", err) } - cw := CollectionWriter{0, runner.Kc, nil, nil, sync.Mutex{}} - var m string - m, err = cw.WriteTree(runner.HostOutputDir, runner.CrunchLog.Logger) + m, err = cw.ManifestText() manifestText = manifestText + m if err != nil { return fmt.Errorf("While uploading output files: %v", err) @@ -1046,7 +1350,7 @@ func (runner *ContainerRunner) CaptureOutput() error { continue } - if mnt.ExcludeFromOutput == true { + if mnt.ExcludeFromOutput == true || mnt.Writable { continue } @@ -1121,37 +1425,80 @@ func (runner *ContainerRunner) getCollectionManifestForPath(mnt arvados.Mount, b func (runner *ContainerRunner) CleanupDirs() { if runner.ArvMount != nil { - umount := exec.Command("fusermount", "-z", "-u", runner.ArvMountPoint) - umnterr := umount.Run() + var delay int64 = 8 + umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint) + umount.Stdout = runner.CrunchLog + umount.Stderr = runner.CrunchLog + runner.CrunchLog.Printf("Running %v", umount.Args) + umnterr := umount.Start() + if umnterr != nil { - runner.CrunchLog.Printf("While running fusermount: %v", umnterr) + runner.CrunchLog.Printf("Error unmounting: %v", umnterr) + } else { + // If arv-mount --unmount gets stuck for any reason, we + // don't want to wait for it forever. Do Wait() in a goroutine + // so it doesn't block crunch-run. + umountExit := make(chan error) + go func() { + mnterr := umount.Wait() + if mnterr != nil { + runner.CrunchLog.Printf("Error unmounting: %v", mnterr) + } + umountExit <- mnterr + }() + + for again := true; again; { + again = false + select { + case <-umountExit: + umount = nil + again = true + case <-runner.ArvMountExit: + break + case <-time.After(time.Duration((delay + 1) * int64(time.Second))): + runner.CrunchLog.Printf("Timed out waiting for unmount") + if umount != nil { + umount.Process.Kill() + } + runner.ArvMount.Process.Kill() + } + } } + } - mnterr := <-runner.ArvMountExit - if mnterr != nil { - runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr) + if runner.ArvMountPoint != "" { + if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil { + runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr) } } - for _, tmpdir := range runner.CleanupTempDir { - rmerr := os.RemoveAll(tmpdir) - if rmerr != nil { - runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", tmpdir, rmerr) - } + if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil { + runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr) } } // CommitLogs posts the collection containing the final container logs. func (runner *ContainerRunner) CommitLogs() error { - runner.CrunchLog.Print(runner.finalState) - runner.CrunchLog.Close() + func() { + // Hold cStateLock to prevent races on CrunchLog (e.g., stop()). + runner.cStateLock.Lock() + defer runner.cStateLock.Unlock() + + runner.CrunchLog.Print(runner.finalState) + + if runner.arvMountLog != nil { + runner.arvMountLog.Close() + } + runner.CrunchLog.Close() - // Closing CrunchLog above allows it to be committed to Keep at this - // point, but re-open crunch log with ArvClient in case there are any - // other further (such as failing to write the log to Keep!) while - // shutting down - runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{ArvClient: runner.ArvClient, - UUID: runner.Container.UUID, loggingStream: "crunch-run", writeCloser: nil}) + // Closing CrunchLog above allows them to be committed to Keep at this + // point, but re-open crunch log with ArvClient in case there are any + // other further errors (such as failing to write the log to Keep!) + // while shutting down + runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{ArvClient: runner.ArvClient, + UUID: runner.Container.UUID, loggingStream: "crunch-run", writeCloser: nil}) + runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0) + }() if runner.LogsPDH != nil { // If we have already assigned something to LogsPDH, @@ -1238,12 +1585,16 @@ func (runner *ContainerRunner) IsCancelled() bool { // NewArvLogWriter creates an ArvLogWriter func (runner *ContainerRunner) NewArvLogWriter(name string) io.WriteCloser { - return &ArvLogWriter{ArvClient: runner.ArvClient, UUID: runner.Container.UUID, loggingStream: name, - writeCloser: runner.LogCollection.Open(name + ".txt")} + return &ArvLogWriter{ + ArvClient: runner.ArvClient, + UUID: runner.Container.UUID, + loggingStream: name, + writeCloser: runner.LogCollection.Open(name + ".txt")} } // Run the full container lifecycle. func (runner *ContainerRunner) Run() (err error) { + runner.CrunchLog.Printf("crunch-run %s started", version) runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID) hostname, hosterr := os.Hostname() @@ -1253,12 +1604,15 @@ func (runner *ContainerRunner) Run() (err error) { runner.CrunchLog.Printf("Executing on host '%s'", hostname) } - // Clean up temporary directories _after_ finalizing - // everything (if we've made any by then) - defer runner.CleanupDirs() - runner.finalState = "Queued" + defer func() { + runner.CleanupDirs() + + runner.CrunchLog.Printf("crunch-run finished") + runner.CrunchLog.Close() + }() + defer func() { // checkErr prints e (unless it's nil) and sets err to // e (unless err is already non-nil). Thus, if err @@ -1283,7 +1637,6 @@ func (runner *ContainerRunner) Run() (err error) { checkErr(err) if runner.finalState == "Queued" { - runner.CrunchLog.Close() runner.UpdateContainerFinal() return } @@ -1295,28 +1648,26 @@ func (runner *ContainerRunner) Run() (err error) { } checkErr(runner.CaptureOutput()) + checkErr(runner.stopHoststat()) checkErr(runner.CommitLogs()) checkErr(runner.UpdateContainerFinal()) - - // The real log is already closed, but then we opened - // a new one in case we needed to log anything while - // finalizing. - runner.CrunchLog.Close() }() - err = runner.ArvClient.Get("containers", runner.Container.UUID, nil, &runner.Container) + err = runner.fetchContainerRecord() if err != nil { - err = fmt.Errorf("While getting container record: %v", err) return } - - // setup signal handling - runner.SetupSignals() + runner.setupSignals() + runner.startHoststat() // check for and/or load image err = runner.LoadImage() if err != nil { - runner.finalState = "Cancelled" + if !runner.checkBrokenNode(err) { + // Failed to load image but not due to a "broken node" + // condition, probably user error. + runner.finalState = "Cancelled" + } err = fmt.Errorf("While loading container image: %v", err) return } @@ -1333,20 +1684,19 @@ func (runner *ContainerRunner) Run() (err error) { if err != nil { return } - - // Gather and record node information - err = runner.LogNodeInfo() + err = runner.LogHostInfo() + if err != nil { + return + } + err = runner.LogNodeRecord() if err != nil { return } - // Save container.json record on log collection err = runner.LogContainerRecord() if err != nil { return } - runner.StartCrunchstat() - if runner.IsCancelled() { return } @@ -1357,18 +1707,64 @@ func (runner *ContainerRunner) Run() (err error) { } runner.finalState = "Cancelled" + runner.startCrunchstat() + err = runner.StartContainer() if err != nil { + runner.checkBrokenNode(err) return } err = runner.WaitFinish() - if err == nil { + if err == nil && !runner.IsCancelled() { runner.finalState = "Complete" } return } +// Fetch the current container record (uuid = runner.Container.UUID) +// into runner.Container. +func (runner *ContainerRunner) fetchContainerRecord() error { + reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil) + if err != nil { + return fmt.Errorf("error fetching container record: %v", err) + } + defer reader.Close() + + dec := json.NewDecoder(reader) + dec.UseNumber() + err = dec.Decode(&runner.Container) + if err != nil { + return fmt.Errorf("error decoding container record: %v", err) + } + + var sm struct { + SecretMounts map[string]arvados.Mount `json:"secret_mounts"` + } + + containerToken, err := runner.ContainerToken() + if err != nil { + return fmt.Errorf("error getting container token: %v", err) + } + + containerClient, err := runner.MkArvClient(containerToken) + if err != nil { + return fmt.Errorf("error creating container API client: %v", err) + } + + err = containerClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm) + if err != nil { + if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 { + return fmt.Errorf("error fetching secret_mounts: %v", err) + } + // ok && apierr.HttpStatusCode == 404, which means + // secret_mounts isn't supported by this API server. + } + runner.SecretMounts = sm.SecretMounts + + return nil +} + // NewContainerRunner creates a new container runner. func NewContainerRunner(api IArvadosClient, kc IKeepClient, @@ -1379,6 +1775,14 @@ func NewContainerRunner(api IArvadosClient, cr.NewLogWriter = cr.NewArvLogWriter cr.RunArvMount = cr.ArvMountCmd cr.MkTempDir = ioutil.TempDir + cr.MkArvClient = func(token string) (IArvadosClient, error) { + cl, err := arvadosclient.MakeArvadosClient() + if err != nil { + return nil, err + } + cl.ApiToken = token + return cl, nil + } cr.LogCollection = &CollectionWriter{0, kc, nil, nil, sync.Mutex{}} cr.Container.UUID = containerUUID cr.CrunchLog = NewThrottledLogger(cr.NewLogWriter("crunch-run")) @@ -1403,8 +1807,18 @@ func main() { networkMode := flag.String("container-network-mode", "default", `Set networking mode for container. Corresponds to Docker network mode (--net). `) + memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container") + getVersion := flag.Bool("version", false, "Print version information and exit.") flag.Parse() + // Print version information if requested + if *getVersion { + fmt.Printf("crunch-run %s\n", version) + return + } + + log.Printf("crunch-run %s started", version) + containerId := flag.Arg(0) if *caCertsPath != "" { @@ -1417,24 +1831,31 @@ func main() { } api.Retries = 8 - var kc *keepclient.KeepClient - kc, err = keepclient.MakeKeepClient(api) - if err != nil { - log.Fatalf("%s: %v", containerId, err) + kc, kcerr := keepclient.MakeKeepClient(api) + if kcerr != nil { + log.Fatalf("%s: %v", containerId, kcerr) } + kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2} kc.Retries = 4 - var docker *dockerclient.Client // API version 1.21 corresponds to Docker 1.9, which is currently the // minimum version we want to support. - docker, err = dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil) - if err != nil { - log.Fatalf("%s: %v", containerId, err) + docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil) + + cr := NewContainerRunner(api, kc, docker, containerId) + if dockererr != nil { + cr.CrunchLog.Printf("%s: %v", containerId, dockererr) + cr.checkBrokenNode(dockererr) + cr.CrunchLog.Close() + os.Exit(1) } - dockerClientProxy := ThinDockerClientProxy{Docker: docker} + parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerId+".") + if tmperr != nil { + log.Fatalf("%s: %v", containerId, tmperr) + } - cr := NewContainerRunner(api, kc, dockerClientProxy, containerId) + cr.parentTemp = parentTemp cr.statInterval = *statInterval cr.cgroupRoot = *cgroupRoot cr.expectCgroupParent = *cgroupParent @@ -1446,9 +1867,24 @@ func main() { cr.expectCgroupParent = p } - err = cr.Run() - if err != nil { - log.Fatalf("%s: %v", containerId, err) + runerr := cr.Run() + + if *memprofile != "" { + f, err := os.Create(*memprofile) + if err != nil { + log.Printf("could not create memory profile: ", err) + } + runtime.GC() // get up-to-date statistics + if err := pprof.WriteHeapProfile(f); err != nil { + log.Printf("could not write memory profile: ", err) + } + closeerr := f.Close() + if closeerr != nil { + log.Printf("closing memprofile file: ", err) + } } + if runerr != nil { + log.Fatalf("%s: %v", containerId, runerr) + } }