Merge branch '12764-writable-file' refs #12764
[arvados.git] / services / crunch-run / crunchrun.go
index 83c4ca39761f9c2ad5f2469e49ec863ea6d69160..0582e5418fd4776e11a97224d42c58750b90d8da 100644 (file)
@@ -6,7 +6,6 @@ package main
 
 import (
        "bytes"
-       "context"
        "encoding/json"
        "errors"
        "flag"
@@ -19,6 +18,7 @@ import (
        "os/signal"
        "path"
        "path/filepath"
+       "regexp"
        "runtime"
        "runtime/pprof"
        "sort"
@@ -32,6 +32,7 @@ import (
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "git.curoverse.com/arvados.git/sdk/go/keepclient"
        "git.curoverse.com/arvados.git/sdk/go/manifest"
+       "golang.org/x/net/context"
 
        dockertypes "github.com/docker/docker/api/types"
        dockercontainer "github.com/docker/docker/api/types/container"
@@ -39,6 +40,8 @@ import (
        dockerclient "github.com/docker/docker/client"
 )
 
+var version = "dev"
+
 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
 type IArvadosClient interface {
        Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
@@ -72,60 +75,13 @@ type ThinDockerClient interface {
        ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
                networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error)
        ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error
-       ContainerStop(ctx context.Context, container string, timeout *time.Duration) error
+       ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error
        ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error)
        ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error)
        ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error)
        ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
 }
 
-// ThinDockerClientProxy is a proxy implementation of ThinDockerClient
-// that executes the docker requests on dockerclient.Client
-type ThinDockerClientProxy struct {
-       Docker *dockerclient.Client
-}
-
-// ContainerAttach invokes dockerclient.Client.ContainerAttach
-func (proxy ThinDockerClientProxy) ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error) {
-       return proxy.Docker.ContainerAttach(ctx, container, options)
-}
-
-// ContainerCreate invokes dockerclient.Client.ContainerCreate
-func (proxy ThinDockerClientProxy) ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
-       networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error) {
-       return proxy.Docker.ContainerCreate(ctx, config, hostConfig, networkingConfig, containerName)
-}
-
-// ContainerStart invokes dockerclient.Client.ContainerStart
-func (proxy ThinDockerClientProxy) ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error {
-       return proxy.Docker.ContainerStart(ctx, container, options)
-}
-
-// ContainerStop invokes dockerclient.Client.ContainerStop
-func (proxy ThinDockerClientProxy) ContainerStop(ctx context.Context, container string, timeout *time.Duration) error {
-       return proxy.Docker.ContainerStop(ctx, container, timeout)
-}
-
-// ContainerWait invokes dockerclient.Client.ContainerWait
-func (proxy ThinDockerClientProxy) ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error) {
-       return proxy.Docker.ContainerWait(ctx, container, condition)
-}
-
-// ImageInspectWithRaw invokes dockerclient.Client.ImageInspectWithRaw
-func (proxy ThinDockerClientProxy) ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error) {
-       return proxy.Docker.ImageInspectWithRaw(ctx, image)
-}
-
-// ImageLoad invokes dockerclient.Client.ImageLoad
-func (proxy ThinDockerClientProxy) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error) {
-       return proxy.Docker.ImageLoad(ctx, input, quiet)
-}
-
-// ImageRemove invokes dockerclient.Client.ImageRemove
-func (proxy ThinDockerClientProxy) ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) {
-       return proxy.Docker.ImageRemove(ctx, image, options)
-}
-
 // ContainerRunner is the main stateful struct used for a single execution of a
 // container.
 type ContainerRunner struct {
@@ -158,10 +114,12 @@ type ContainerRunner struct {
        ArvMountExit   chan error
        finalState     string
 
-       statLogger   io.WriteCloser
-       statReporter *crunchstat.Reporter
-       statInterval time.Duration
-       cgroupRoot   string
+       statLogger       io.WriteCloser
+       statReporter     *crunchstat.Reporter
+       hoststatLogger   io.WriteCloser
+       hoststatReporter *crunchstat.Reporter
+       statInterval     time.Duration
+       cgroupRoot       string
        // What we expect the container's cgroup parent to be.
        expectCgroupParent string
        // What we tell docker to use as the container's cgroup
@@ -177,11 +135,11 @@ type ContainerRunner struct {
        setCgroupParent string
 
        cStateLock sync.Mutex
-       cStarted   bool // StartContainer() succeeded
        cCancelled bool // StopContainer() invoked
 
        enableNetwork string // one of "default" or "always"
        networkMode   string // passed through to HostConfig.NetworkMode
+       arvMountLog   *ThrottledLogger
 }
 
 // setupSignals sets up signal handling to gracefully terminate the underlying
@@ -193,8 +151,10 @@ func (runner *ContainerRunner) setupSignals() {
        signal.Notify(runner.SigChan, syscall.SIGQUIT)
 
        go func(sig chan os.Signal) {
-               <-sig
-               runner.stop()
+               for s := range sig {
+                       runner.CrunchLog.Printf("caught signal: %v", s)
+                       runner.stop()
+               }
        }(runner.SigChan)
 }
 
@@ -202,26 +162,52 @@ func (runner *ContainerRunner) setupSignals() {
 func (runner *ContainerRunner) stop() {
        runner.cStateLock.Lock()
        defer runner.cStateLock.Unlock()
-       if runner.cCancelled {
+       if runner.ContainerID == "" {
                return
        }
        runner.cCancelled = true
-       if runner.cStarted {
-               timeout := time.Duration(10)
-               err := runner.Docker.ContainerStop(context.TODO(), runner.ContainerID, &(timeout))
-               if err != nil {
-                       log.Printf("StopContainer failed: %s", err)
-               }
+       runner.CrunchLog.Printf("removing container")
+       err := runner.Docker.ContainerRemove(context.TODO(), runner.ContainerID, dockertypes.ContainerRemoveOptions{Force: true})
+       if err != nil {
+               runner.CrunchLog.Printf("error removing container: %s", err)
        }
 }
 
-func (runner *ContainerRunner) teardown() {
+func (runner *ContainerRunner) stopSignals() {
        if runner.SigChan != nil {
                signal.Stop(runner.SigChan)
-               close(runner.SigChan)
        }
 }
 
+var errorBlacklist = []string{
+       "(?ms).*[Cc]annot connect to the Docker daemon.*",
+       "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
+}
+var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
+
+func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
+       for _, d := range errorBlacklist {
+               if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
+                       runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
+                       if *brokenNodeHook == "" {
+                               runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.")
+                       } else {
+                               runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
+                               // run killme script
+                               c := exec.Command(*brokenNodeHook)
+                               c.Stdout = runner.CrunchLog
+                               c.Stderr = runner.CrunchLog
+                               err := c.Run()
+                               if err != nil {
+                                       runner.CrunchLog.Printf("Error running broken node hook: %v", err)
+                               }
+                       }
+                       return true
+               }
+       }
+       return false
+}
+
 // LoadImage determines the docker image id from the container record and
 // checks if it is available in the local Docker image store.  If not, it loads
 // the image from Keep.
@@ -291,9 +277,11 @@ func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (
        }
        c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
 
-       nt := NewThrottledLogger(runner.NewLogWriter("arv-mount"))
-       c.Stdout = nt
-       c.Stderr = nt
+       runner.arvMountLog = NewThrottledLogger(runner.NewLogWriter("arv-mount"))
+       c.Stdout = runner.arvMountLog
+       c.Stderr = runner.arvMountLog
+
+       runner.CrunchLog.Printf("Running %v", c.Args)
 
        err = c.Start()
        if err != nil {
@@ -317,7 +305,11 @@ func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (
        }()
 
        go func() {
-               runner.ArvMountExit <- c.Wait()
+               mnterr := c.Wait()
+               if mnterr != nil {
+                       runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
+               }
+               runner.ArvMountExit <- mnterr
                close(runner.ArvMountExit)
        }()
 
@@ -340,17 +332,55 @@ func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
        return
 }
 
+func copyfile(src string, dst string) (err error) {
+       srcfile, err := os.Open(src)
+       if err != nil {
+               return
+       }
+
+       os.MkdirAll(path.Dir(dst), 0770)
+
+       dstfile, err := os.Create(dst)
+       if err != nil {
+               return
+       }
+       _, err = io.Copy(dstfile, srcfile)
+       if err != nil {
+               return
+       }
+
+       err = srcfile.Close()
+       err2 := dstfile.Close()
+
+       if err != nil {
+               return
+       }
+
+       if err2 != nil {
+               return err2
+       }
+
+       return nil
+}
+
 func (runner *ContainerRunner) SetupMounts() (err error) {
        err = runner.SetupArvMountPoint("keep")
        if err != nil {
                return fmt.Errorf("While creating keep mount temp dir: %v", err)
        }
 
-       runner.CleanupTempDir = append(runner.CleanupTempDir, runner.ArvMountPoint)
+       token, err := runner.ContainerToken()
+       if err != nil {
+               return fmt.Errorf("could not get container token: %s", err)
+       }
 
        pdhOnly := true
        tmpcount := 0
-       arvMountCmd := []string{"--foreground", "--allow-other", "--read-write"}
+       arvMountCmd := []string{
+               "--foreground",
+               "--allow-other",
+               "--read-write",
+               fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
 
        if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
                arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
@@ -360,6 +390,11 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
        runner.Binds = nil
        runner.Volumes = make(map[string]struct{})
        needCertMount := true
+       type copyFile struct {
+               src  string
+               bind string
+       }
+       var copyFiles []copyFile
 
        var binds []string
        for bind := range runner.Container.Mounts {
@@ -415,7 +450,7 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                                pdhOnly = false
                                src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
                        } else if mnt.PortableDataHash != "" {
-                               if mnt.Writable {
+                               if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
                                        return fmt.Errorf("Can never write to a collection specified by portable data hash")
                                }
                                idx := strings.Index(mnt.PortableDataHash, "/")
@@ -442,10 +477,12 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                        if mnt.Writable {
                                if bind == runner.Container.OutputPath {
                                        runner.HostOutputDir = src
+                                       runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
                                } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
-                                       return fmt.Errorf("Writable mount points are not permitted underneath the output_path: %v", bind)
+                                       copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
+                               } else {
+                                       runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
                                }
-                               runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
                        } else {
                                runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind))
                        }
@@ -492,6 +529,18 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                                return fmt.Errorf("writing temp file: %v", err)
                        }
                        runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
+
+               case mnt.Kind == "git_tree":
+                       tmpdir, err := runner.MkTempDir("", "")
+                       if err != nil {
+                               return fmt.Errorf("creating temp dir: %v", err)
+                       }
+                       runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
+                       err = gitMount(mnt).extractTree(runner.ArvClient, tmpdir, token)
+                       if err != nil {
+                               return err
+                       }
+                       runner.Binds = append(runner.Binds, tmpdir+":"+bind+":ro")
                }
        }
 
@@ -516,11 +565,6 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
        }
        arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
 
-       token, err := runner.ContainerToken()
-       if err != nil {
-               return fmt.Errorf("could not get container token: %s", err)
-       }
-
        runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
        if err != nil {
                return fmt.Errorf("While trying to start arv-mount: %v", err)
@@ -533,59 +577,106 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                }
        }
 
+       for _, cp := range copyFiles {
+               dir, err := os.Stat(cp.src)
+               if err != nil {
+                       return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
+               }
+               if dir.IsDir() {
+                       err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
+                               if walkerr != nil {
+                                       return walkerr
+                               }
+                               if walkinfo.Mode().IsRegular() {
+                                       return copyfile(walkpath, path.Join(cp.bind, walkpath[len(cp.src):]))
+                               } else if walkinfo.Mode().IsDir() {
+                                       return os.MkdirAll(path.Join(cp.bind, walkpath[len(cp.src):]), 0770)
+                               } else {
+                                       return fmt.Errorf("Source %q is not a regular file or directory", cp.src)
+                               }
+                       })
+               } else {
+                       err = copyfile(cp.src, cp.bind)
+               }
+               if err != nil {
+                       return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
+               }
+       }
+
        return nil
 }
 
 func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
        // Handle docker log protocol
        // https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container
+       defer close(runner.loggingDone)
 
        header := make([]byte, 8)
-       for {
-               _, readerr := io.ReadAtLeast(containerReader, header, 8)
-
-               if readerr == nil {
-                       readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
-                       if header[0] == 1 {
-                               // stdout
-                               _, readerr = io.CopyN(runner.Stdout, containerReader, readsize)
-                       } else {
-                               // stderr
-                               _, readerr = io.CopyN(runner.Stderr, containerReader, readsize)
+       var err error
+       for err == nil {
+               _, err = io.ReadAtLeast(containerReader, header, 8)
+               if err != nil {
+                       if err == io.EOF {
+                               err = nil
                        }
+                       break
                }
+               readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
+               if header[0] == 1 {
+                       // stdout
+                       _, err = io.CopyN(runner.Stdout, containerReader, readsize)
+               } else {
+                       // stderr
+                       _, err = io.CopyN(runner.Stderr, containerReader, readsize)
+               }
+       }
 
-               if readerr != nil {
-                       if readerr != io.EOF {
-                               runner.CrunchLog.Printf("While reading docker logs: %v", readerr)
-                       }
-
-                       closeerr := runner.Stdout.Close()
-                       if closeerr != nil {
-                               runner.CrunchLog.Printf("While closing stdout logs: %v", closeerr)
-                       }
+       if err != nil {
+               runner.CrunchLog.Printf("error reading docker logs: %v", err)
+       }
 
-                       closeerr = runner.Stderr.Close()
-                       if closeerr != nil {
-                               runner.CrunchLog.Printf("While closing stderr logs: %v", closeerr)
-                       }
+       err = runner.Stdout.Close()
+       if err != nil {
+               runner.CrunchLog.Printf("error closing stdout logs: %v", err)
+       }
 
-                       if runner.statReporter != nil {
-                               runner.statReporter.Stop()
-                               closeerr = runner.statLogger.Close()
-                               if closeerr != nil {
-                                       runner.CrunchLog.Printf("While closing crunchstat logs: %v", closeerr)
-                               }
-                       }
+       err = runner.Stderr.Close()
+       if err != nil {
+               runner.CrunchLog.Printf("error closing stderr logs: %v", err)
+       }
 
-                       runner.loggingDone <- true
-                       close(runner.loggingDone)
-                       return
+       if runner.statReporter != nil {
+               runner.statReporter.Stop()
+               err = runner.statLogger.Close()
+               if err != nil {
+                       runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
                }
        }
 }
 
-func (runner *ContainerRunner) StartCrunchstat() {
+func (runner *ContainerRunner) stopHoststat() error {
+       if runner.hoststatReporter == nil {
+               return nil
+       }
+       runner.hoststatReporter.Stop()
+       err := runner.hoststatLogger.Close()
+       if err != nil {
+               return fmt.Errorf("error closing hoststat logs: %v", err)
+       }
+       return nil
+}
+
+func (runner *ContainerRunner) startHoststat() {
+       runner.hoststatLogger = NewThrottledLogger(runner.NewLogWriter("hoststat"))
+       runner.hoststatReporter = &crunchstat.Reporter{
+               Logger:     log.New(runner.hoststatLogger, "", 0),
+               CgroupRoot: runner.cgroupRoot,
+               PollPeriod: runner.statInterval,
+       }
+       runner.hoststatReporter.Start()
+}
+
+func (runner *ContainerRunner) startCrunchstat() {
        runner.statLogger = NewThrottledLogger(runner.NewLogWriter("crunchstat"))
        runner.statReporter = &crunchstat.Reporter{
                CID:          runner.ContainerID,
@@ -602,11 +693,13 @@ type infoCommand struct {
        cmd   []string
 }
 
-// Gather node information and store it on the log for debugging
-// purposes.
-func (runner *ContainerRunner) LogNodeInfo() (err error) {
+// LogHostInfo logs info about the current host, for debugging and
+// accounting purposes. Although it's logged as "node-info", this is
+// about the environment where crunch-run is actually running, which
+// might differ from what's described in the node record (see
+// LogNodeRecord).
+func (runner *ContainerRunner) LogHostInfo() (err error) {
        w := runner.NewLogWriter("node-info")
-       logger := log.New(w, "node-info", 0)
 
        commands := []infoCommand{
                {
@@ -632,17 +725,17 @@ func (runner *ContainerRunner) LogNodeInfo() (err error) {
        }
 
        // Run commands with informational output to be logged.
-       var out []byte
        for _, command := range commands {
-               out, err = exec.Command(command.cmd[0], command.cmd[1:]...).CombinedOutput()
-               if err != nil {
-                       return fmt.Errorf("While running command %q: %v",
-                               command.cmd, err)
-               }
-               logger.Println(command.label)
-               for _, line := range strings.Split(string(out), "\n") {
-                       logger.Println(" ", line)
+               fmt.Fprintln(w, command.label)
+               cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
+               cmd.Stdout = w
+               cmd.Stderr = w
+               if err := cmd.Run(); err != nil {
+                       err = fmt.Errorf("While running command %q: %v", command.cmd, err)
+                       fmt.Fprintln(w, err)
+                       return err
                }
+               fmt.Fprintln(w, "")
        }
 
        err = w.Close()
@@ -652,39 +745,72 @@ func (runner *ContainerRunner) LogNodeInfo() (err error) {
        return nil
 }
 
-// Get and save the raw JSON container record from the API server
-func (runner *ContainerRunner) LogContainerRecord() (err error) {
+// LogContainerRecord gets and saves the raw JSON container record from the API server
+func (runner *ContainerRunner) LogContainerRecord() error {
+       logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
+       if !logged && err == nil {
+               err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
+       }
+       return err
+}
+
+// LogNodeRecord logs arvados#node record corresponding to the current host.
+func (runner *ContainerRunner) LogNodeRecord() error {
+       hostname := os.Getenv("SLURMD_NODENAME")
+       if hostname == "" {
+               hostname, _ = os.Hostname()
+       }
+       _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
+               // The "info" field has admin-only info when obtained
+               // with a privileged token, and should not be logged.
+               node, ok := resp.(map[string]interface{})
+               if ok {
+                       delete(node, "info")
+               }
+       })
+       return err
+}
+
+func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
        w := &ArvLogWriter{
                ArvClient:     runner.ArvClient,
                UUID:          runner.Container.UUID,
-               loggingStream: "container",
-               writeCloser:   runner.LogCollection.Open("container.json"),
+               loggingStream: label,
+               writeCloser:   runner.LogCollection.Open(label + ".json"),
        }
 
-       // Get Container record JSON from the API Server
-       reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
+       reader, err := runner.ArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
        if err != nil {
-               return fmt.Errorf("While retrieving container record from the API server: %v", err)
+               return false, fmt.Errorf("error getting %s record: %v", label, err)
        }
        defer reader.Close()
 
        dec := json.NewDecoder(reader)
        dec.UseNumber()
-       var cr map[string]interface{}
-       if err = dec.Decode(&cr); err != nil {
-               return fmt.Errorf("While decoding the container record JSON response: %v", err)
+       var resp map[string]interface{}
+       if err = dec.Decode(&resp); err != nil {
+               return false, fmt.Errorf("error decoding %s list response: %v", label, err)
+       }
+       items, ok := resp["items"].([]interface{})
+       if !ok {
+               return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
+       } else if len(items) < 1 {
+               return false, nil
+       }
+       if munge != nil {
+               munge(items[0])
        }
        // Re-encode it using indentation to improve readability
        enc := json.NewEncoder(w)
        enc.SetIndent("", "    ")
-       if err = enc.Encode(cr); err != nil {
-               return fmt.Errorf("While logging the JSON container record: %v", err)
+       if err = enc.Encode(items[0]); err != nil {
+               return false, fmt.Errorf("error logging %s record: %v", label, err)
        }
        err = w.Close()
        if err != nil {
-               return fmt.Errorf("While closing container.json log: %v", err)
+               return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
        }
-       return nil
+       return true, nil
 }
 
 // AttachStreams connects the docker container stdin, stdout and stderr logs
@@ -875,87 +1001,66 @@ func (runner *ContainerRunner) StartContainer() error {
                dockertypes.ContainerStartOptions{})
        if err != nil {
                var advice string
-               if strings.Contains(err.Error(), "no such file or directory") {
+               if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
                        advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
                }
                return fmt.Errorf("could not start container: %v%s", err, advice)
        }
-       runner.cStarted = true
        return nil
 }
 
 // WaitFinish waits for the container to terminate, capture the exit code, and
 // close the stdout/stderr logging.
-func (runner *ContainerRunner) WaitFinish() (err error) {
+func (runner *ContainerRunner) WaitFinish() error {
        runner.CrunchLog.Print("Waiting for container to finish")
 
-       waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, "not-running")
-
-       var waitBody dockercontainer.ContainerWaitOKBody
-       select {
-       case waitBody = <-waitOk:
-       case err = <-waitErr:
-       }
-
-       if err != nil {
-               return fmt.Errorf("container wait: %v", err)
-       }
+       waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, dockercontainer.WaitConditionNotRunning)
+       arvMountExit := runner.ArvMountExit
+       for {
+               select {
+               case waitBody := <-waitOk:
+                       runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
+                       code := int(waitBody.StatusCode)
+                       runner.ExitCode = &code
+
+                       // wait for stdout/stderr to complete
+                       <-runner.loggingDone
+                       return nil
 
-       runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
-       code := int(waitBody.StatusCode)
-       runner.ExitCode = &code
+               case err := <-waitErr:
+                       return fmt.Errorf("container wait: %v", err)
 
-       waitMount := runner.ArvMountExit
-       select {
-       case err = <-waitMount:
-               runner.CrunchLog.Printf("arv-mount exited before container finished: %v", err)
-               waitMount = nil
-               runner.stop()
-       default:
+               case <-arvMountExit:
+                       runner.CrunchLog.Printf("arv-mount exited while container is still running.  Stopping container.")
+                       runner.stop()
+                       // arvMountExit will always be ready now that
+                       // it's closed, but that doesn't interest us.
+                       arvMountExit = nil
+               }
        }
-
-       // wait for stdout/stderr to complete
-       <-runner.loggingDone
-
-       return nil
 }
 
-// EvalSymlinks follows symlinks within the output directory.  If the symlink
-// leads to a keep mount, copy the manifest text from the keep mount into the
-// output manifestText.  Ensure that whether symlinks are relative or absolute,
-// they must remain within the output directory.
-//
-// Assumes initial value of "path" is absolute, and located within runner.HostOutputDir.
-func (runner *ContainerRunner) EvalSymlinks(path string, binds []string) (manifestText string, symlinksToRemove []string, err error) {
-       var links []string
-
-       defer func() {
-               if err != nil {
-                       symlinksToRemove = append(symlinksToRemove, links...)
-               }
-       }()
-
-       for n := 0; n < 32; n++ {
-               var info os.FileInfo
-               info, err = os.Lstat(path)
-               if err != nil {
-                       return
-               }
-
-               if info.Mode()&os.ModeSymlink == 0 {
-                       // Not a symlink, nothing to do.
+var ErrNotInOutputDir = fmt.Errorf("Must point to path within the output directory")
+
+func (runner *ContainerRunner) derefOutputSymlink(path string, startinfo os.FileInfo) (tgt string, readlinktgt string, info os.FileInfo, err error) {
+       // Follow symlinks if necessary
+       info = startinfo
+       tgt = path
+       readlinktgt = ""
+       nextlink := path
+       for followed := 0; info.Mode()&os.ModeSymlink != 0; followed++ {
+               if followed >= limitFollowSymlinks {
+                       // Got stuck in a loop or just a pathological number of links, give up.
+                       err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
                        return
                }
 
-               // Remember symlink for cleanup later
-               links = append(links, path)
-
-               var readlinktgt string
-               readlinktgt, err = os.Readlink(path)
-               tgt := readlinktgt
+               readlinktgt, err = os.Readlink(nextlink)
                if err != nil {
                        return
                }
+
+               tgt = readlinktgt
                if !strings.HasPrefix(tgt, "/") {
                        // Relative symlink, resolve it to host path
                        tgt = filepath.Join(filepath.Dir(path), tgt)
@@ -964,74 +1069,153 @@ func (runner *ContainerRunner) EvalSymlinks(path string, binds []string) (manife
                        // Absolute symlink to container output path, adjust it to host output path.
                        tgt = filepath.Join(runner.HostOutputDir, tgt[len(runner.Container.OutputPath):])
                }
-
-               runner.CrunchLog.Printf("Resolve %q to %q", path, tgt)
-
-               // go through mounts and try reverse map to collection reference
-               for _, bind := range binds {
-                       mnt := runner.Container.Mounts[bind]
-                       if tgt == bind || strings.HasPrefix(tgt, bind+"/") {
-                               // get path relative to bind
-                               targetSuffix := tgt[len(bind):]
-
-                               // Copy mount and adjust the path to add path relative to the bind
-                               adjustedMount := mnt
-                               adjustedMount.Path = filepath.Join(adjustedMount.Path, targetSuffix)
-
-                               for _, l := range links {
-                                       // The chain of one or more symlinks
-                                       // terminates in this keep mount, so
-                                       // add them all to the manifest text at
-                                       // appropriate locations.
-                                       var m string
-                                       outputSuffix := l[len(runner.HostOutputDir):]
-                                       m, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix)
-                                       if err != nil {
-                                               return
-                                       }
-                                       manifestText = manifestText + m
-                                       symlinksToRemove = append(symlinksToRemove, l)
-                               }
-                               return
-                       }
-               }
-
-               // If target is not a mount, it must be within the output
-               // directory, otherwise it is an error.
                if !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
-                       err = fmt.Errorf("Output directory symlink %q points to invalid location %q, must point to mount or output directory.",
-                               path[len(runner.HostOutputDir):], readlinktgt)
+                       // After dereferencing, symlink target must either be
+                       // within output directory, or must point to a
+                       // collection mount.
+                       err = ErrNotInOutputDir
                        return
                }
 
-               // Update symlink to host FS
-               err = os.Remove(path)
+               info, err = os.Lstat(tgt)
                if err != nil {
-                       err = fmt.Errorf("Error removing symlink %q: %v", path, err)
+                       // tgt
+                       err = fmt.Errorf("Symlink in output %q points to invalid location %q: %v",
+                               path[len(runner.HostOutputDir):], readlinktgt, err)
                        return
                }
 
-               err = os.Symlink(tgt, path)
-               if err != nil {
-                       err = fmt.Errorf("Error updating symlink %q: %v", path, err)
+               nextlink = tgt
+       }
+
+       return
+}
+
+var limitFollowSymlinks = 10
+
+// UploadFile uploads files within the output directory, with special handling
+// for symlinks. If the symlink leads to a keep mount, copy the manifest text
+// from the keep mount into the output manifestText.  Ensure that whether
+// symlinks are relative or absolute, every symlink target (even targets that
+// are symlinks themselves) must point to a path in either the output directory
+// or a collection mount.
+//
+// Assumes initial value of "path" is absolute, and located within runner.HostOutputDir.
+func (runner *ContainerRunner) UploadOutputFile(
+       path string,
+       info os.FileInfo,
+       infoerr error,
+       binds []string,
+       walkUpload *WalkUpload,
+       relocateFrom string,
+       relocateTo string,
+       followed int) (manifestText string, err error) {
+
+       if infoerr != nil {
+               return "", infoerr
+       }
+
+       if info.Mode().IsDir() {
+               // if empty, need to create a .keep file
+               dir, direrr := os.Open(path)
+               if direrr != nil {
+                       return "", direrr
+               }
+               defer dir.Close()
+               names, eof := dir.Readdirnames(1)
+               if len(names) == 0 && eof == io.EOF && path != runner.HostOutputDir {
+                       containerPath := runner.OutputPath + path[len(runner.HostOutputDir):]
+                       for _, bind := range binds {
+                               mnt := runner.Container.Mounts[bind]
+                               // Check if there is a bind for this
+                               // directory, in which case assume we don't need .keep
+                               if (containerPath == bind || strings.HasPrefix(containerPath, bind+"/")) && mnt.PortableDataHash != "d41d8cd98f00b204e9800998ecf8427e+0" {
+                                       return
+                               }
+                       }
+                       outputSuffix := path[len(runner.HostOutputDir)+1:]
+                       return fmt.Sprintf("./%v d41d8cd98f00b204e9800998ecf8427e+0 0:0:.keep\n", outputSuffix), nil
+               }
+               return
+       }
+
+       if followed >= limitFollowSymlinks {
+               // Got stuck in a loop or just a pathological number of
+               // directory links, give up.
+               err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
+               return
+       }
+
+       // "path" is the actual path we are visiting
+       // "tgt" is the target of "path" (a non-symlink) after following symlinks
+       // "relocated" is the path in the output manifest where the file should be placed,
+       // but has HostOutputDir as a prefix.
+
+       // The destination path in the output manifest may need to be
+       // logically relocated to some other path in order to appear
+       // in the correct location as a result of following a symlink.
+       // Remove the relocateFrom prefix and replace it with
+       // relocateTo.
+       relocated := relocateTo + path[len(relocateFrom):]
+
+       tgt, readlinktgt, info, derefErr := runner.derefOutputSymlink(path, info)
+       if derefErr != nil && derefErr != ErrNotInOutputDir {
+               return "", derefErr
+       }
+
+       // go through mounts and try reverse map to collection reference
+       for _, bind := range binds {
+               mnt := runner.Container.Mounts[bind]
+               if (tgt == bind || strings.HasPrefix(tgt, bind+"/")) && !mnt.Writable {
+                       // get path relative to bind
+                       targetSuffix := tgt[len(bind):]
+
+                       // Copy mount and adjust the path to add path relative to the bind
+                       adjustedMount := mnt
+                       adjustedMount.Path = filepath.Join(adjustedMount.Path, targetSuffix)
+
+                       // Terminates in this keep mount, so add the
+                       // manifest text at appropriate location.
+                       outputSuffix := relocated[len(runner.HostOutputDir):]
+                       manifestText, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix)
                        return
                }
+       }
 
-               // Target is within the output directory, so loop and check if
-               // it is also a symlink.
-               path = tgt
+       // If target is not a collection mount, it must be located within the
+       // output directory, otherwise it is an error.
+       if derefErr == ErrNotInOutputDir {
+               err = fmt.Errorf("Symlink in output %q points to invalid location %q, must point to path within the output directory.",
+                       path[len(runner.HostOutputDir):], readlinktgt)
+               return
+       }
+
+       if info.Mode().IsRegular() {
+               return "", walkUpload.UploadFile(relocated, tgt)
        }
-       // Got stuck in a loop or just a pathological number of links, give up.
-       err = fmt.Errorf("Too many symlinks.")
+
+       if info.Mode().IsDir() {
+               // Symlink leads to directory.  Walk() doesn't follow
+               // directory symlinks, so we walk the target directory
+               // instead.  Within the walk, file paths are relocated
+               // so they appear under the original symlink path.
+               err = filepath.Walk(tgt, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
+                       var m string
+                       m, walkerr = runner.UploadOutputFile(walkpath, walkinfo, walkerr,
+                               binds, walkUpload, tgt, relocated, followed+1)
+                       if walkerr == nil {
+                               manifestText = manifestText + m
+                       }
+                       return walkerr
+               })
+               return
+       }
+
        return
 }
 
 // HandleOutput sets the output, unmounts the FUSE mount, and deletes temporary directories
 func (runner *ContainerRunner) CaptureOutput() error {
-       if runner.finalState != "Complete" {
-               return nil
-       }
-
        if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
                // Output may have been set directly by the container, so
                // refresh the container record to check.
@@ -1072,40 +1256,25 @@ func (runner *ContainerRunner) CaptureOutput() error {
        if err != nil {
                // Regular directory
 
-               symlinksToRemove := make(map[string]bool)
+               cw := CollectionWriter{0, runner.Kc, nil, nil, sync.Mutex{}}
+               walkUpload := cw.BeginUpload(runner.HostOutputDir, runner.CrunchLog.Logger)
+
                var m string
-               var srm []string
-               // Find symlinks to arv-mounted files & dirs.
                err = filepath.Walk(runner.HostOutputDir, func(path string, info os.FileInfo, err error) error {
-                       if err != nil {
-                               return err
-                       }
-                       m, srm, err = runner.EvalSymlinks(path, binds)
-                       for _, r := range srm {
-                               symlinksToRemove[r] = true
-                       }
+                       m, err = runner.UploadOutputFile(path, info, err, binds, walkUpload, "", "", 0)
                        if err == nil {
                                manifestText = manifestText + m
                        }
                        return err
                })
-               for l, _ := range symlinksToRemove {
-                       err2 := os.Remove(l)
-                       if err2 != nil {
-                               if err == nil {
-                                       err = fmt.Errorf("Error removing symlink %q: %v", err2)
-                               } else {
-                                       err = fmt.Errorf("%v\nError removing symlink %q: %v",
-                                               err, err2)
-                               }
-                       }
-               }
+
+               cw.EndUpload(walkUpload)
+
                if err != nil {
-                       return fmt.Errorf("While checking output symlinks: %v", err)
+                       return fmt.Errorf("While uploading output files: %v", err)
                }
 
-               cw := CollectionWriter{0, runner.Kc, nil, nil, sync.Mutex{}}
-               m, err = cw.WriteTree(runner.HostOutputDir, runner.CrunchLog.Logger)
+               m, err = cw.ManifestText()
                manifestText = manifestText + m
                if err != nil {
                        return fmt.Errorf("While uploading output files: %v", err)
@@ -1136,7 +1305,7 @@ func (runner *ContainerRunner) CaptureOutput() error {
                        continue
                }
 
-               if mnt.ExcludeFromOutput == true {
+               if mnt.ExcludeFromOutput == true || mnt.Writable {
                        continue
                }
 
@@ -1211,21 +1380,55 @@ func (runner *ContainerRunner) getCollectionManifestForPath(mnt arvados.Mount, b
 
 func (runner *ContainerRunner) CleanupDirs() {
        if runner.ArvMount != nil {
-               umount := exec.Command("fusermount", "-z", "-u", runner.ArvMountPoint)
-               umnterr := umount.Run()
+               var delay int64 = 8
+               umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
+               umount.Stdout = runner.CrunchLog
+               umount.Stderr = runner.CrunchLog
+               runner.CrunchLog.Printf("Running %v", umount.Args)
+               umnterr := umount.Start()
+
                if umnterr != nil {
-                       runner.CrunchLog.Printf("While running fusermount: %v", umnterr)
+                       runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
+               } else {
+                       // If arv-mount --unmount gets stuck for any reason, we
+                       // don't want to wait for it forever.  Do Wait() in a goroutine
+                       // so it doesn't block crunch-run.
+                       umountExit := make(chan error)
+                       go func() {
+                               mnterr := umount.Wait()
+                               if mnterr != nil {
+                                       runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
+                               }
+                               umountExit <- mnterr
+                       }()
+
+                       for again := true; again; {
+                               again = false
+                               select {
+                               case <-umountExit:
+                                       umount = nil
+                                       again = true
+                               case <-runner.ArvMountExit:
+                                       break
+                               case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
+                                       runner.CrunchLog.Printf("Timed out waiting for unmount")
+                                       if umount != nil {
+                                               umount.Process.Kill()
+                                       }
+                                       runner.ArvMount.Process.Kill()
+                               }
+                       }
                }
+       }
 
-               mnterr := <-runner.ArvMountExit
-               if mnterr != nil {
-                       runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
+       if runner.ArvMountPoint != "" {
+               if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
+                       runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
                }
        }
 
        for _, tmpdir := range runner.CleanupTempDir {
-               rmerr := os.RemoveAll(tmpdir)
-               if rmerr != nil {
+               if rmerr := os.RemoveAll(tmpdir); rmerr != nil {
                        runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", tmpdir, rmerr)
                }
        }
@@ -1234,14 +1437,19 @@ func (runner *ContainerRunner) CleanupDirs() {
 // CommitLogs posts the collection containing the final container logs.
 func (runner *ContainerRunner) CommitLogs() error {
        runner.CrunchLog.Print(runner.finalState)
+
+       if runner.arvMountLog != nil {
+               runner.arvMountLog.Close()
+       }
        runner.CrunchLog.Close()
 
-       // Closing CrunchLog above allows it to be committed to Keep at this
+       // Closing CrunchLog above allows them to be committed to Keep at this
        // point, but re-open crunch log with ArvClient in case there are any
-       // other further (such as failing to write the log to Keep!) while
-       // shutting down
+       // other further errors (such as failing to write the log to Keep!)
+       // while shutting down
        runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{ArvClient: runner.ArvClient,
                UUID: runner.Container.UUID, loggingStream: "crunch-run", writeCloser: nil})
+       runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
 
        if runner.LogsPDH != nil {
                // If we have already assigned something to LogsPDH,
@@ -1328,12 +1536,16 @@ func (runner *ContainerRunner) IsCancelled() bool {
 
 // NewArvLogWriter creates an ArvLogWriter
 func (runner *ContainerRunner) NewArvLogWriter(name string) io.WriteCloser {
-       return &ArvLogWriter{ArvClient: runner.ArvClient, UUID: runner.Container.UUID, loggingStream: name,
-               writeCloser: runner.LogCollection.Open(name + ".txt")}
+       return &ArvLogWriter{
+               ArvClient:     runner.ArvClient,
+               UUID:          runner.Container.UUID,
+               loggingStream: name,
+               writeCloser:   runner.LogCollection.Open(name + ".txt")}
 }
 
 // Run the full container lifecycle.
 func (runner *ContainerRunner) Run() (err error) {
+       runner.CrunchLog.Printf("crunch-run %s started", version)
        runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
 
        hostname, hosterr := os.Hostname()
@@ -1343,12 +1555,16 @@ func (runner *ContainerRunner) Run() (err error) {
                runner.CrunchLog.Printf("Executing on host '%s'", hostname)
        }
 
-       // Clean up temporary directories _after_ finalizing
-       // everything (if we've made any by then)
-       defer runner.CleanupDirs()
-
        runner.finalState = "Queued"
 
+       defer func() {
+               runner.stopSignals()
+               runner.CleanupDirs()
+
+               runner.CrunchLog.Printf("crunch-run finished")
+               runner.CrunchLog.Close()
+       }()
+
        defer func() {
                // checkErr prints e (unless it's nil) and sets err to
                // e (unless err is already non-nil). Thus, if err
@@ -1373,7 +1589,6 @@ func (runner *ContainerRunner) Run() (err error) {
                checkErr(err)
 
                if runner.finalState == "Queued" {
-                       runner.CrunchLog.Close()
                        runner.UpdateContainerFinal()
                        return
                }
@@ -1385,29 +1600,26 @@ func (runner *ContainerRunner) Run() (err error) {
                }
 
                checkErr(runner.CaptureOutput())
+               checkErr(runner.stopHoststat())
                checkErr(runner.CommitLogs())
                checkErr(runner.UpdateContainerFinal())
-
-               // The real log is already closed, but then we opened
-               // a new one in case we needed to log anything while
-               // finalizing.
-               runner.CrunchLog.Close()
-
-               runner.teardown()
        }()
 
        err = runner.fetchContainerRecord()
        if err != nil {
                return
        }
-
-       // setup signal handling
        runner.setupSignals()
+       runner.startHoststat()
 
        // check for and/or load image
        err = runner.LoadImage()
        if err != nil {
-               runner.finalState = "Cancelled"
+               if !runner.checkBrokenNode(err) {
+                       // Failed to load image but not due to a "broken node"
+                       // condition, probably user error.
+                       runner.finalState = "Cancelled"
+               }
                err = fmt.Errorf("While loading container image: %v", err)
                return
        }
@@ -1424,20 +1636,19 @@ func (runner *ContainerRunner) Run() (err error) {
        if err != nil {
                return
        }
-
-       // Gather and record node information
-       err = runner.LogNodeInfo()
+       err = runner.LogHostInfo()
+       if err != nil {
+               return
+       }
+       err = runner.LogNodeRecord()
        if err != nil {
                return
        }
-       // Save container.json record on log collection
        err = runner.LogContainerRecord()
        if err != nil {
                return
        }
 
-       runner.StartCrunchstat()
-
        if runner.IsCancelled() {
                return
        }
@@ -1448,13 +1659,16 @@ func (runner *ContainerRunner) Run() (err error) {
        }
        runner.finalState = "Cancelled"
 
+       runner.startCrunchstat()
+
        err = runner.StartContainer()
        if err != nil {
+               runner.checkBrokenNode(err)
                return
        }
 
        err = runner.WaitFinish()
-       if err == nil {
+       if err == nil && !runner.IsCancelled() {
                runner.finalState = "Complete"
        }
        return
@@ -1513,8 +1727,17 @@ func main() {
                `Set networking mode for container.  Corresponds to Docker network mode (--net).
        `)
        memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
+       getVersion := flag.Bool("version", false, "Print version information and exit.")
        flag.Parse()
 
+       // Print version information if requested
+       if *getVersion {
+               fmt.Printf("crunch-run %s\n", version)
+               return
+       }
+
+       log.Printf("crunch-run %s started", version)
+
        containerId := flag.Arg(0)
 
        if *caCertsPath != "" {
@@ -1527,25 +1750,25 @@ func main() {
        }
        api.Retries = 8
 
-       var kc *keepclient.KeepClient
-       kc, err = keepclient.MakeKeepClient(api)
-       if err != nil {
-               log.Fatalf("%s: %v", containerId, err)
+       kc, kcerr := keepclient.MakeKeepClient(api)
+       if kcerr != nil {
+               log.Fatalf("%s: %v", containerId, kcerr)
        }
        kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
        kc.Retries = 4
 
-       var docker *dockerclient.Client
        // API version 1.21 corresponds to Docker 1.9, which is currently the
        // minimum version we want to support.
-       docker, err = dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
-       if err != nil {
-               log.Fatalf("%s: %v", containerId, err)
-       }
+       docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
 
-       dockerClientProxy := ThinDockerClientProxy{Docker: docker}
+       cr := NewContainerRunner(api, kc, docker, containerId)
+       if dockererr != nil {
+               cr.CrunchLog.Printf("%s: %v", containerId, dockererr)
+               cr.checkBrokenNode(dockererr)
+               cr.CrunchLog.Close()
+               os.Exit(1)
+       }
 
-       cr := NewContainerRunner(api, kc, dockerClientProxy, containerId)
        cr.statInterval = *statInterval
        cr.cgroupRoot = *cgroupRoot
        cr.expectCgroupParent = *cgroupParent