Merge branch '10666-report-version'
[arvados.git] / services / crunch-run / crunchrun.go
index 810955c0da8b2119dd1cec42fda365a782a37896..f3f754b59d227c3d410ad1255a9a38da1dd9400b 100644 (file)
@@ -19,6 +19,9 @@ import (
        "os/signal"
        "path"
        "path/filepath"
+       "regexp"
+       "runtime"
+       "runtime/pprof"
        "sort"
        "strings"
        "sync"
@@ -37,6 +40,8 @@ import (
        dockerclient "github.com/docker/docker/client"
 )
 
+var version = "dev"
+
 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
 type IArvadosClient interface {
        Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
@@ -54,6 +59,7 @@ var ErrCancelled = errors.New("Cancelled")
 type IKeepClient interface {
        PutHB(hash string, buf []byte) (string, int, error)
        ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
+       ClearBlockCache()
 }
 
 // NewLogWriter is a factory function to create a new log writer.
@@ -179,20 +185,23 @@ type ContainerRunner struct {
 
        enableNetwork string // one of "default" or "always"
        networkMode   string // passed through to HostConfig.NetworkMode
+       arvMountLog   *ThrottledLogger
 }
 
-// SetupSignals sets up signal handling to gracefully terminate the underlying
+// setupSignals sets up signal handling to gracefully terminate the underlying
 // Docker container and update state when receiving a TERM, INT or QUIT signal.
-func (runner *ContainerRunner) SetupSignals() {
+func (runner *ContainerRunner) setupSignals() {
        runner.SigChan = make(chan os.Signal, 1)
        signal.Notify(runner.SigChan, syscall.SIGTERM)
        signal.Notify(runner.SigChan, syscall.SIGINT)
        signal.Notify(runner.SigChan, syscall.SIGQUIT)
 
        go func(sig chan os.Signal) {
-               <-sig
+               s := <-sig
+               if s != nil {
+                       runner.CrunchLog.Printf("Caught signal %v", s)
+               }
                runner.stop()
-               signal.Stop(sig)
        }(runner.SigChan)
 }
 
@@ -208,9 +217,47 @@ func (runner *ContainerRunner) stop() {
                timeout := time.Duration(10)
                err := runner.Docker.ContainerStop(context.TODO(), runner.ContainerID, &(timeout))
                if err != nil {
-                       log.Printf("StopContainer failed: %s", err)
+                       runner.CrunchLog.Printf("StopContainer failed: %s", err)
+               }
+               // Suppress multiple calls to stop()
+               runner.cStarted = false
+       }
+}
+
+func (runner *ContainerRunner) stopSignals() {
+       if runner.SigChan != nil {
+               signal.Stop(runner.SigChan)
+               close(runner.SigChan)
+       }
+}
+
+var errorBlacklist = []string{
+       "(?ms).*[Cc]annot connect to the Docker daemon.*",
+       "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
+}
+var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
+
+func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
+       for _, d := range errorBlacklist {
+               if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
+                       runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
+                       if *brokenNodeHook == "" {
+                               runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.")
+                       } else {
+                               runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
+                               // run killme script
+                               c := exec.Command(*brokenNodeHook)
+                               c.Stdout = runner.CrunchLog
+                               c.Stderr = runner.CrunchLog
+                               err := c.Run()
+                               if err != nil {
+                                       runner.CrunchLog.Printf("Error running broken node hook: %v", err)
+                               }
+                       }
+                       return true
                }
        }
+       return false
 }
 
 // LoadImage determines the docker image id from the container record and
@@ -247,17 +294,25 @@ func (runner *ContainerRunner) LoadImage() (err error) {
                        return fmt.Errorf("While creating ManifestFileReader for container image: %v", err)
                }
 
-               response, err := runner.Docker.ImageLoad(context.TODO(), readCloser, false)
+               response, err := runner.Docker.ImageLoad(context.TODO(), readCloser, true)
                if err != nil {
                        return fmt.Errorf("While loading container image into Docker: %v", err)
                }
-               response.Body.Close()
+
+               defer response.Body.Close()
+               rbody, err := ioutil.ReadAll(response.Body)
+               if err != nil {
+                       return fmt.Errorf("Reading response to image load: %v", err)
+               }
+               runner.CrunchLog.Printf("Docker response: %s", rbody)
        } else {
                runner.CrunchLog.Print("Docker image is available")
        }
 
        runner.ContainerConfig.Image = imageID
 
+       runner.Kc.ClearBlockCache()
+
        return nil
 }
 
@@ -274,9 +329,11 @@ func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (
        }
        c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
 
-       nt := NewThrottledLogger(runner.NewLogWriter("arv-mount"))
-       c.Stdout = nt
-       c.Stderr = nt
+       runner.arvMountLog = NewThrottledLogger(runner.NewLogWriter("arv-mount"))
+       c.Stdout = runner.arvMountLog
+       c.Stderr = runner.arvMountLog
+
+       runner.CrunchLog.Printf("Running %v", c.Args)
 
        err = c.Start()
        if err != nil {
@@ -300,7 +357,11 @@ func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (
        }()
 
        go func() {
-               runner.ArvMountExit <- c.Wait()
+               mnterr := c.Wait()
+               if mnterr != nil {
+                       runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
+               }
+               runner.ArvMountExit <- mnterr
                close(runner.ArvMountExit)
        }()
 
@@ -329,11 +390,13 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                return fmt.Errorf("While creating keep mount temp dir: %v", err)
        }
 
-       runner.CleanupTempDir = append(runner.CleanupTempDir, runner.ArvMountPoint)
-
        pdhOnly := true
        tmpcount := 0
-       arvMountCmd := []string{"--foreground", "--allow-other", "--read-write"}
+       arvMountCmd := []string{
+               "--foreground",
+               "--allow-other",
+               "--read-write",
+               fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
 
        if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
                arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
@@ -345,7 +408,7 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
        needCertMount := true
 
        var binds []string
-       for bind, _ := range runner.Container.Mounts {
+       for bind := range runner.Container.Mounts {
                binds = append(binds, bind)
        }
        sort.Strings(binds)
@@ -585,30 +648,30 @@ type infoCommand struct {
        cmd   []string
 }
 
-// Gather node information and store it on the log for debugging
+// LogNodeInfo gathers node information and store it on the log for debugging
 // purposes.
 func (runner *ContainerRunner) LogNodeInfo() (err error) {
        w := runner.NewLogWriter("node-info")
        logger := log.New(w, "node-info", 0)
 
        commands := []infoCommand{
-               infoCommand{
+               {
                        label: "Host Information",
                        cmd:   []string{"uname", "-a"},
                },
-               infoCommand{
+               {
                        label: "CPU Information",
                        cmd:   []string{"cat", "/proc/cpuinfo"},
                },
-               infoCommand{
+               {
                        label: "Memory Information",
                        cmd:   []string{"cat", "/proc/meminfo"},
                },
-               infoCommand{
+               {
                        label: "Disk Space",
                        cmd:   []string{"df", "-m", "/", os.TempDir()},
                },
-               infoCommand{
+               {
                        label: "Disk INodes",
                        cmd:   []string{"df", "-i", "/", os.TempDir()},
                },
@@ -635,7 +698,7 @@ func (runner *ContainerRunner) LogNodeInfo() (err error) {
        return nil
 }
 
-// Get and save the raw JSON container record from the API server
+// LogContainerRecord gets and saves the raw JSON container record from the API server
 func (runner *ContainerRunner) LogContainerRecord() (err error) {
        w := &ArvLogWriter{
                ArvClient:     runner.ArvClient,
@@ -650,14 +713,11 @@ func (runner *ContainerRunner) LogContainerRecord() (err error) {
                return fmt.Errorf("While retrieving container record from the API server: %v", err)
        }
        defer reader.Close()
-       // Read the API server response as []byte
-       json_bytes, err := ioutil.ReadAll(reader)
-       if err != nil {
-               return fmt.Errorf("While reading container record API server response: %v", err)
-       }
-       // Decode the JSON []byte
+
+       dec := json.NewDecoder(reader)
+       dec.UseNumber()
        var cr map[string]interface{}
-       if err = json.Unmarshal(json_bytes, &cr); err != nil {
+       if err = dec.Decode(&cr); err != nil {
                return fmt.Errorf("While decoding the container record JSON response: %v", err)
        }
        // Re-encode it using indentation to improve readability
@@ -860,7 +920,11 @@ func (runner *ContainerRunner) StartContainer() error {
        err := runner.Docker.ContainerStart(context.TODO(), runner.ContainerID,
                dockertypes.ContainerStartOptions{})
        if err != nil {
-               return fmt.Errorf("could not start container: %v", err)
+               var advice string
+               if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
+                       advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
+               }
+               return fmt.Errorf("could not start container: %v%s", err, advice)
        }
        runner.cStarted = true
        return nil
@@ -873,12 +937,23 @@ func (runner *ContainerRunner) WaitFinish() (err error) {
 
        waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, "not-running")
 
+       go func() {
+               <-runner.ArvMountExit
+               if runner.cStarted {
+                       runner.CrunchLog.Printf("arv-mount exited while container is still running.  Stopping container.")
+                       runner.stop()
+               }
+       }()
+
        var waitBody dockercontainer.ContainerWaitOKBody
        select {
        case waitBody = <-waitOk:
        case err = <-waitErr:
        }
 
+       // Container isn't running any more
+       runner.cStarted = false
+
        if err != nil {
                return fmt.Errorf("container wait: %v", err)
        }
@@ -887,21 +962,159 @@ func (runner *ContainerRunner) WaitFinish() (err error) {
        code := int(waitBody.StatusCode)
        runner.ExitCode = &code
 
-       waitMount := runner.ArvMountExit
-       select {
-       case err = <-waitMount:
-               runner.CrunchLog.Printf("arv-mount exited before container finished: %v", err)
-               waitMount = nil
-               runner.stop()
-       default:
-       }
-
        // wait for stdout/stderr to complete
        <-runner.loggingDone
 
        return nil
 }
 
+var ErrNotInOutputDir = fmt.Errorf("Must point to path within the output directory")
+
+func (runner *ContainerRunner) derefOutputSymlink(path string, startinfo os.FileInfo) (tgt string, readlinktgt string, info os.FileInfo, err error) {
+       // Follow symlinks if necessary
+       info = startinfo
+       tgt = path
+       readlinktgt = ""
+       nextlink := path
+       for followed := 0; info.Mode()&os.ModeSymlink != 0; followed++ {
+               if followed >= limitFollowSymlinks {
+                       // Got stuck in a loop or just a pathological number of links, give up.
+                       err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
+                       return
+               }
+
+               readlinktgt, err = os.Readlink(nextlink)
+               if err != nil {
+                       return
+               }
+
+               tgt = readlinktgt
+               if !strings.HasPrefix(tgt, "/") {
+                       // Relative symlink, resolve it to host path
+                       tgt = filepath.Join(filepath.Dir(path), tgt)
+               }
+               if strings.HasPrefix(tgt, runner.Container.OutputPath+"/") && !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
+                       // Absolute symlink to container output path, adjust it to host output path.
+                       tgt = filepath.Join(runner.HostOutputDir, tgt[len(runner.Container.OutputPath):])
+               }
+               if !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
+                       // After dereferencing, symlink target must either be
+                       // within output directory, or must point to a
+                       // collection mount.
+                       err = ErrNotInOutputDir
+                       return
+               }
+
+               info, err = os.Lstat(tgt)
+               if err != nil {
+                       // tgt
+                       err = fmt.Errorf("Symlink in output %q points to invalid location %q: %v",
+                               path[len(runner.HostOutputDir):], readlinktgt, err)
+                       return
+               }
+
+               nextlink = tgt
+       }
+
+       return
+}
+
+var limitFollowSymlinks = 10
+
+// UploadFile uploads files within the output directory, with special handling
+// for symlinks. If the symlink leads to a keep mount, copy the manifest text
+// from the keep mount into the output manifestText.  Ensure that whether
+// symlinks are relative or absolute, every symlink target (even targets that
+// are symlinks themselves) must point to a path in either the output directory
+// or a collection mount.
+//
+// Assumes initial value of "path" is absolute, and located within runner.HostOutputDir.
+func (runner *ContainerRunner) UploadOutputFile(
+       path string,
+       info os.FileInfo,
+       infoerr error,
+       binds []string,
+       walkUpload *WalkUpload,
+       relocateFrom string,
+       relocateTo string,
+       followed int) (manifestText string, err error) {
+
+       if info.Mode().IsDir() {
+               return
+       }
+
+       if infoerr != nil {
+               return "", infoerr
+       }
+
+       if followed >= limitFollowSymlinks {
+               // Got stuck in a loop or just a pathological number of
+               // directory links, give up.
+               err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
+               return
+       }
+
+       // When following symlinks, the source path may need to be logically
+       // relocated to some other path within the output collection.  Remove
+       // the relocateFrom prefix and replace it with relocateTo.
+       relocated := relocateTo + path[len(relocateFrom):]
+
+       tgt, readlinktgt, info, derefErr := runner.derefOutputSymlink(path, info)
+       if derefErr != nil && derefErr != ErrNotInOutputDir {
+               return "", derefErr
+       }
+
+       // go through mounts and try reverse map to collection reference
+       for _, bind := range binds {
+               mnt := runner.Container.Mounts[bind]
+               if tgt == bind || strings.HasPrefix(tgt, bind+"/") {
+                       // get path relative to bind
+                       targetSuffix := tgt[len(bind):]
+
+                       // Copy mount and adjust the path to add path relative to the bind
+                       adjustedMount := mnt
+                       adjustedMount.Path = filepath.Join(adjustedMount.Path, targetSuffix)
+
+                       // Terminates in this keep mount, so add the
+                       // manifest text at appropriate location.
+                       outputSuffix := path[len(runner.HostOutputDir):]
+                       manifestText, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix)
+                       return
+               }
+       }
+
+       // If target is not a collection mount, it must be located within the
+       // output directory, otherwise it is an error.
+       if derefErr == ErrNotInOutputDir {
+               err = fmt.Errorf("Symlink in output %q points to invalid location %q, must point to path within the output directory.",
+                       path[len(runner.HostOutputDir):], readlinktgt)
+               return
+       }
+
+       if info.Mode().IsRegular() {
+               return "", walkUpload.UploadFile(relocated, tgt)
+       }
+
+       if info.Mode().IsDir() {
+               // Symlink leads to directory.  Walk() doesn't follow
+               // directory symlinks, so we walk the target directory
+               // instead.  Within the walk, file paths are relocated
+               // so they appear under the original symlink path.
+               err = filepath.Walk(tgt, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
+                       var m string
+                       m, walkerr = runner.UploadOutputFile(walkpath, walkinfo, walkerr,
+                               binds, walkUpload, tgt, relocated, followed+1)
+                       if walkerr == nil {
+                               manifestText = manifestText + m
+                       }
+                       return walkerr
+               })
+               return
+       }
+
+       return
+}
+
 // HandleOutput sets the output, unmounts the FUSE mount, and deletes temporary directories
 func (runner *ContainerRunner) CaptureOutput() error {
        if runner.finalState != "Complete" {
@@ -948,74 +1161,25 @@ func (runner *ContainerRunner) CaptureOutput() error {
        if err != nil {
                // Regular directory
 
-               // Find symlinks to arv-mounted files & dirs.
-               err = filepath.Walk(runner.HostOutputDir, func(path string, info os.FileInfo, err error) error {
-                       if err != nil {
-                               return err
-                       }
-                       if info.Mode()&os.ModeSymlink == 0 {
-                               return nil
-                       }
-                       // read link to get container internal path
-                       // only support 1 level of symlinking here.
-                       var tgt string
-                       tgt, err = os.Readlink(path)
-                       if err != nil {
-                               return err
-                       }
-
-                       // get path relative to output dir
-                       outputSuffix := path[len(runner.HostOutputDir):]
+               cw := CollectionWriter{0, runner.Kc, nil, nil, sync.Mutex{}}
+               walkUpload := cw.BeginUpload(runner.HostOutputDir, runner.CrunchLog.Logger)
 
-                       if strings.HasPrefix(tgt, "/") {
-                               // go through mounts and try reverse map to collection reference
-                               for _, bind := range binds {
-                                       mnt := runner.Container.Mounts[bind]
-                                       if tgt == bind || strings.HasPrefix(tgt, bind+"/") {
-                                               // get path relative to bind
-                                               targetSuffix := tgt[len(bind):]
-
-                                               // Copy mount and adjust the path to add path relative to the bind
-                                               adjustedMount := mnt
-                                               adjustedMount.Path = filepath.Join(adjustedMount.Path, targetSuffix)
-
-                                               // get manifest text
-                                               var m string
-                                               m, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix)
-                                               if err != nil {
-                                                       return err
-                                               }
-                                               manifestText = manifestText + m
-                                               // delete symlink so WriteTree won't try to to dereference it.
-                                               os.Remove(path)
-                                               return nil
-                                       }
-                               }
+               var m string
+               err = filepath.Walk(runner.HostOutputDir, func(path string, info os.FileInfo, err error) error {
+                       m, err = runner.UploadOutputFile(path, info, err, binds, walkUpload, "", "", 0)
+                       if err == nil {
+                               manifestText = manifestText + m
                        }
+                       return err
+               })
 
-                       // Not a link to a mount.  Must be dereferencible and
-                       // point into the output directory.
-                       tgt, err = filepath.EvalSymlinks(path)
-                       if err != nil {
-                               os.Remove(path)
-                               return err
-                       }
+               cw.EndUpload(walkUpload)
 
-                       // Symlink target must be within the output directory otherwise it's an error.
-                       if !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
-                               os.Remove(path)
-                               return fmt.Errorf("Output directory symlink %q points to invalid location %q, must point to mount or output directory.",
-                                       outputSuffix, tgt)
-                       }
-                       return nil
-               })
                if err != nil {
-                       return fmt.Errorf("While checking output symlinks: %v", err)
+                       return fmt.Errorf("While uploading output files: %v", err)
                }
 
-               cw := CollectionWriter{0, runner.Kc, nil, nil, sync.Mutex{}}
-               var m string
-               m, err = cw.WriteTree(runner.HostOutputDir, runner.CrunchLog.Logger)
+               m, err = cw.ManifestText()
                manifestText = manifestText + m
                if err != nil {
                        return fmt.Errorf("While uploading output files: %v", err)
@@ -1121,21 +1285,55 @@ func (runner *ContainerRunner) getCollectionManifestForPath(mnt arvados.Mount, b
 
 func (runner *ContainerRunner) CleanupDirs() {
        if runner.ArvMount != nil {
-               umount := exec.Command("fusermount", "-z", "-u", runner.ArvMountPoint)
-               umnterr := umount.Run()
+               var delay int64 = 8
+               umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
+               umount.Stdout = runner.CrunchLog
+               umount.Stderr = runner.CrunchLog
+               runner.CrunchLog.Printf("Running %v", umount.Args)
+               umnterr := umount.Start()
+
                if umnterr != nil {
-                       runner.CrunchLog.Printf("While running fusermount: %v", umnterr)
+                       runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
+               } else {
+                       // If arv-mount --unmount gets stuck for any reason, we
+                       // don't want to wait for it forever.  Do Wait() in a goroutine
+                       // so it doesn't block crunch-run.
+                       umountExit := make(chan error)
+                       go func() {
+                               mnterr := umount.Wait()
+                               if mnterr != nil {
+                                       runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
+                               }
+                               umountExit <- mnterr
+                       }()
+
+                       for again := true; again; {
+                               again = false
+                               select {
+                               case <-umountExit:
+                                       umount = nil
+                                       again = true
+                               case <-runner.ArvMountExit:
+                                       break
+                               case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
+                                       runner.CrunchLog.Printf("Timed out waiting for unmount")
+                                       if umount != nil {
+                                               umount.Process.Kill()
+                                       }
+                                       runner.ArvMount.Process.Kill()
+                               }
+                       }
                }
+       }
 
-               mnterr := <-runner.ArvMountExit
-               if mnterr != nil {
-                       runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
+       if runner.ArvMountPoint != "" {
+               if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
+                       runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
                }
        }
 
        for _, tmpdir := range runner.CleanupTempDir {
-               rmerr := os.RemoveAll(tmpdir)
-               if rmerr != nil {
+               if rmerr := os.RemoveAll(tmpdir); rmerr != nil {
                        runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", tmpdir, rmerr)
                }
        }
@@ -1144,14 +1342,19 @@ func (runner *ContainerRunner) CleanupDirs() {
 // CommitLogs posts the collection containing the final container logs.
 func (runner *ContainerRunner) CommitLogs() error {
        runner.CrunchLog.Print(runner.finalState)
+
+       if runner.arvMountLog != nil {
+               runner.arvMountLog.Close()
+       }
        runner.CrunchLog.Close()
 
-       // Closing CrunchLog above allows it to be committed to Keep at this
+       // Closing CrunchLog above allows them to be committed to Keep at this
        // point, but re-open crunch log with ArvClient in case there are any
-       // other further (such as failing to write the log to Keep!) while
-       // shutting down
+       // other further errors (such as failing to write the log to Keep!)
+       // while shutting down
        runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{ArvClient: runner.ArvClient,
                UUID: runner.Container.UUID, loggingStream: "crunch-run", writeCloser: nil})
+       runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
 
        if runner.LogsPDH != nil {
                // If we have already assigned something to LogsPDH,
@@ -1238,12 +1441,16 @@ func (runner *ContainerRunner) IsCancelled() bool {
 
 // NewArvLogWriter creates an ArvLogWriter
 func (runner *ContainerRunner) NewArvLogWriter(name string) io.WriteCloser {
-       return &ArvLogWriter{ArvClient: runner.ArvClient, UUID: runner.Container.UUID, loggingStream: name,
-               writeCloser: runner.LogCollection.Open(name + ".txt")}
+       return &ArvLogWriter{
+               ArvClient:     runner.ArvClient,
+               UUID:          runner.Container.UUID,
+               loggingStream: name,
+               writeCloser:   runner.LogCollection.Open(name + ".txt")}
 }
 
 // Run the full container lifecycle.
 func (runner *ContainerRunner) Run() (err error) {
+       runner.CrunchLog.Printf("crunch-run %s started", version)
        runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
 
        hostname, hosterr := os.Hostname()
@@ -1253,12 +1460,16 @@ func (runner *ContainerRunner) Run() (err error) {
                runner.CrunchLog.Printf("Executing on host '%s'", hostname)
        }
 
-       // Clean up temporary directories _after_ finalizing
-       // everything (if we've made any by then)
-       defer runner.CleanupDirs()
-
        runner.finalState = "Queued"
 
+       defer func() {
+               runner.stopSignals()
+               runner.CleanupDirs()
+
+               runner.CrunchLog.Printf("crunch-run finished")
+               runner.CrunchLog.Close()
+       }()
+
        defer func() {
                // checkErr prints e (unless it's nil) and sets err to
                // e (unless err is already non-nil). Thus, if err
@@ -1283,7 +1494,6 @@ func (runner *ContainerRunner) Run() (err error) {
                checkErr(err)
 
                if runner.finalState == "Queued" {
-                       runner.CrunchLog.Close()
                        runner.UpdateContainerFinal()
                        return
                }
@@ -1297,26 +1507,24 @@ func (runner *ContainerRunner) Run() (err error) {
                checkErr(runner.CaptureOutput())
                checkErr(runner.CommitLogs())
                checkErr(runner.UpdateContainerFinal())
-
-               // The real log is already closed, but then we opened
-               // a new one in case we needed to log anything while
-               // finalizing.
-               runner.CrunchLog.Close()
        }()
 
-       err = runner.ArvClient.Get("containers", runner.Container.UUID, nil, &runner.Container)
+       err = runner.fetchContainerRecord()
        if err != nil {
-               err = fmt.Errorf("While getting container record: %v", err)
                return
        }
 
        // setup signal handling
-       runner.SetupSignals()
+       runner.setupSignals()
 
        // check for and/or load image
        err = runner.LoadImage()
        if err != nil {
-               runner.finalState = "Cancelled"
+               if !runner.checkBrokenNode(err) {
+                       // Failed to load image but not due to a "broken node"
+                       // condition, probably user error.
+                       runner.finalState = "Cancelled"
+               }
                err = fmt.Errorf("While loading container image: %v", err)
                return
        }
@@ -1345,8 +1553,6 @@ func (runner *ContainerRunner) Run() (err error) {
                return
        }
 
-       runner.StartCrunchstat()
-
        if runner.IsCancelled() {
                return
        }
@@ -1357,8 +1563,11 @@ func (runner *ContainerRunner) Run() (err error) {
        }
        runner.finalState = "Cancelled"
 
+       runner.StartCrunchstat()
+
        err = runner.StartContainer()
        if err != nil {
+               runner.checkBrokenNode(err)
                return
        }
 
@@ -1369,6 +1578,24 @@ func (runner *ContainerRunner) Run() (err error) {
        return
 }
 
+// Fetch the current container record (uuid = runner.Container.UUID)
+// into runner.Container.
+func (runner *ContainerRunner) fetchContainerRecord() error {
+       reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
+       if err != nil {
+               return fmt.Errorf("error fetching container record: %v", err)
+       }
+       defer reader.Close()
+
+       dec := json.NewDecoder(reader)
+       dec.UseNumber()
+       err = dec.Decode(&runner.Container)
+       if err != nil {
+               return fmt.Errorf("error decoding container record: %v", err)
+       }
+       return nil
+}
+
 // NewContainerRunner creates a new container runner.
 func NewContainerRunner(api IArvadosClient,
        kc IKeepClient,
@@ -1403,8 +1630,18 @@ func main() {
        networkMode := flag.String("container-network-mode", "default",
                `Set networking mode for container.  Corresponds to Docker network mode (--net).
        `)
+       memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
+       getVersion := flag.Bool("version", false, "Print version information and exit.")
        flag.Parse()
 
+       // Print version information if requested
+       if *getVersion {
+               fmt.Printf("crunch-run %s\n", version)
+               return
+       }
+
+       log.Printf("crunch-run %s started", version)
+
        containerId := flag.Arg(0)
 
        if *caCertsPath != "" {
@@ -1417,24 +1654,27 @@ func main() {
        }
        api.Retries = 8
 
-       var kc *keepclient.KeepClient
-       kc, err = keepclient.MakeKeepClient(api)
-       if err != nil {
-               log.Fatalf("%s: %v", containerId, err)
+       kc, kcerr := keepclient.MakeKeepClient(api)
+       if kcerr != nil {
+               log.Fatalf("%s: %v", containerId, kcerr)
        }
+       kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
        kc.Retries = 4
 
-       var docker *dockerclient.Client
        // API version 1.21 corresponds to Docker 1.9, which is currently the
        // minimum version we want to support.
-       docker, err = dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
-       if err != nil {
-               log.Fatalf("%s: %v", containerId, err)
-       }
-
+       docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
        dockerClientProxy := ThinDockerClientProxy{Docker: docker}
 
        cr := NewContainerRunner(api, kc, dockerClientProxy, containerId)
+
+       if dockererr != nil {
+               cr.CrunchLog.Printf("%s: %v", containerId, dockererr)
+               cr.checkBrokenNode(dockererr)
+               cr.CrunchLog.Close()
+               os.Exit(1)
+       }
+
        cr.statInterval = *statInterval
        cr.cgroupRoot = *cgroupRoot
        cr.expectCgroupParent = *cgroupParent
@@ -1446,9 +1686,24 @@ func main() {
                cr.expectCgroupParent = p
        }
 
-       err = cr.Run()
-       if err != nil {
-               log.Fatalf("%s: %v", containerId, err)
+       runerr := cr.Run()
+
+       if *memprofile != "" {
+               f, err := os.Create(*memprofile)
+               if err != nil {
+                       log.Printf("could not create memory profile: ", err)
+               }
+               runtime.GC() // get up-to-date statistics
+               if err := pprof.WriteHeapProfile(f); err != nil {
+                       log.Printf("could not write memory profile: ", err)
+               }
+               closeerr := f.Close()
+               if closeerr != nil {
+                       log.Printf("closing memprofile file: ", err)
+               }
        }
 
+       if runerr != nil {
+               log.Fatalf("%s: %v", containerId, runerr)
+       }
 }