14360: Merge branch 'master'
[arvados.git] / services / crunch-run / crunchrun.go
index 53815cbe1c8222d4e6c9614ce889d649224af7e1..1c6c58009fac71bd5fa5015a4922821937e61d5a 100644 (file)
@@ -57,13 +57,15 @@ var ErrCancelled = errors.New("Cancelled")
 
 // IKeepClient is the minimal Keep API methods used by crunch-run.
 type IKeepClient interface {
-       PutHB(hash string, buf []byte) (string, int, error)
+       PutB(buf []byte) (string, int, error)
+       ReadAt(locator string, p []byte, off int) (int, error)
        ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
+       LocalLocator(locator string) (string, error)
        ClearBlockCache()
 }
 
 // NewLogWriter is a factory function to create a new log writer.
-type NewLogWriter func(name string) io.WriteCloser
+type NewLogWriter func(name string) (io.WriteCloser, error)
 
 type RunArvMount func(args []string, tok string) (*exec.Cmd, error)
 
@@ -77,44 +79,72 @@ type ThinDockerClient interface {
        ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error
        ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error
        ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error)
+       ContainerInspect(ctx context.Context, id string) (dockertypes.ContainerJSON, error)
        ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error)
        ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error)
        ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
 }
 
+type PsProcess interface {
+       CmdlineSlice() ([]string, error)
+}
+
 // ContainerRunner is the main stateful struct used for a single execution of a
 // container.
 type ContainerRunner struct {
-       Docker    ThinDockerClient
-       ArvClient IArvadosClient
-       Kc        IKeepClient
-       arvados.Container
+       Docker ThinDockerClient
+
+       // Dispatcher client is initialized with the Dispatcher token.
+       // This is a priviledged token used to manage container status
+       // and logs.
+       //
+       // We have both dispatcherClient and DispatcherArvClient
+       // because there are two different incompatible Arvados Go
+       // SDKs and we have to use both (hopefully this gets fixed in
+       // #14467)
+       dispatcherClient     *arvados.Client
+       DispatcherArvClient  IArvadosClient
+       DispatcherKeepClient IKeepClient
+
+       // Container client is initialized with the Container token
+       // This token controls the permissions of the container, and
+       // must be used for operations such as reading collections.
+       //
+       // Same comment as above applies to
+       // containerClient/ContainerArvClient.
+       containerClient     *arvados.Client
+       ContainerArvClient  IArvadosClient
+       ContainerKeepClient IKeepClient
+
+       Container       arvados.Container
        ContainerConfig dockercontainer.Config
-       dockercontainer.HostConfig
-       token       string
-       ContainerID string
-       ExitCode    *int
-       NewLogWriter
-       loggingDone   chan bool
-       CrunchLog     *ThrottledLogger
-       Stdout        io.WriteCloser
-       Stderr        io.WriteCloser
-       LogCollection *CollectionWriter
-       LogsPDH       *string
-       RunArvMount
-       MkTempDir
-       ArvMount      *exec.Cmd
-       ArvMountPoint string
-       HostOutputDir string
-       Binds         []string
-       Volumes       map[string]struct{}
-       OutputPDH     *string
-       SigChan       chan os.Signal
-       ArvMountExit  chan error
-       SecretMounts  map[string]arvados.Mount
-       MkArvClient   func(token string) (IArvadosClient, error)
-       finalState    string
-       parentTemp    string
+       HostConfig      dockercontainer.HostConfig
+       token           string
+       ContainerID     string
+       ExitCode        *int
+       NewLogWriter    NewLogWriter
+       loggingDone     chan bool
+       CrunchLog       *ThrottledLogger
+       Stdout          io.WriteCloser
+       Stderr          io.WriteCloser
+       logUUID         string
+       logMtx          sync.Mutex
+       LogCollection   arvados.CollectionFileSystem
+       LogsPDH         *string
+       RunArvMount     RunArvMount
+       MkTempDir       MkTempDir
+       ArvMount        *exec.Cmd
+       ArvMountPoint   string
+       HostOutputDir   string
+       Binds           []string
+       Volumes         map[string]struct{}
+       OutputPDH       *string
+       SigChan         chan os.Signal
+       ArvMountExit    chan error
+       SecretMounts    map[string]arvados.Mount
+       MkArvClient     func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error)
+       finalState      string
+       parentTemp      string
 
        statLogger       io.WriteCloser
        statReporter     *crunchstat.Reporter
@@ -138,10 +168,13 @@ type ContainerRunner struct {
 
        cStateLock sync.Mutex
        cCancelled bool // StopContainer() invoked
+       cRemoved   bool // docker confirmed the container no longer exists
 
        enableNetwork string // one of "default" or "always"
        networkMode   string // passed through to HostConfig.NetworkMode
        arvMountLog   *ThrottledLogger
+
+       containerWatchdogInterval time.Duration
 }
 
 // setupSignals sets up signal handling to gracefully terminate the underlying
@@ -175,31 +208,39 @@ func (runner *ContainerRunner) stop(sig os.Signal) {
        if err != nil {
                runner.CrunchLog.Printf("error removing container: %s", err)
        }
+       if err == nil || strings.Contains(err.Error(), "No such container: "+runner.ContainerID) {
+               runner.cRemoved = true
+       }
 }
 
 var errorBlacklist = []string{
        "(?ms).*[Cc]annot connect to the Docker daemon.*",
        "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
+       "(?ms).*grpc: the connection is unavailable.*",
 }
 var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
 
+func (runner *ContainerRunner) runBrokenNodeHook() {
+       if *brokenNodeHook == "" {
+               runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.")
+       } else {
+               runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
+               // run killme script
+               c := exec.Command(*brokenNodeHook)
+               c.Stdout = runner.CrunchLog
+               c.Stderr = runner.CrunchLog
+               err := c.Run()
+               if err != nil {
+                       runner.CrunchLog.Printf("Error running broken node hook: %v", err)
+               }
+       }
+}
+
 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
        for _, d := range errorBlacklist {
                if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
                        runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
-                       if *brokenNodeHook == "" {
-                               runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.")
-                       } else {
-                               runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
-                               // run killme script
-                               c := exec.Command(*brokenNodeHook)
-                               c.Stdout = runner.CrunchLog
-                               c.Stderr = runner.CrunchLog
-                               err := c.Run()
-                               if err != nil {
-                                       runner.CrunchLog.Printf("Error running broken node hook: %v", err)
-                               }
-                       }
+                       runner.runBrokenNodeHook()
                        return true
                }
        }
@@ -214,7 +255,7 @@ func (runner *ContainerRunner) LoadImage() (err error) {
        runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
 
        var collection arvados.Collection
-       err = runner.ArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection)
+       err = runner.ContainerArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection)
        if err != nil {
                return fmt.Errorf("While getting container image collection: %v", err)
        }
@@ -235,7 +276,7 @@ func (runner *ContainerRunner) LoadImage() (err error) {
                runner.CrunchLog.Print("Loading Docker image from keep")
 
                var readCloser io.ReadCloser
-               readCloser, err = runner.Kc.ManifestFileReader(manifest, img)
+               readCloser, err = runner.ContainerKeepClient.ManifestFileReader(manifest, img)
                if err != nil {
                        return fmt.Errorf("While creating ManifestFileReader for container image: %v", err)
                }
@@ -257,7 +298,7 @@ func (runner *ContainerRunner) LoadImage() (err error) {
 
        runner.ContainerConfig.Image = imageID
 
-       runner.Kc.ClearBlockCache()
+       runner.ContainerKeepClient.ClearBlockCache()
 
        return nil
 }
@@ -275,7 +316,11 @@ func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (
        }
        c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
 
-       runner.arvMountLog = NewThrottledLogger(runner.NewLogWriter("arv-mount"))
+       w, err := runner.NewLogWriter("arv-mount")
+       if err != nil {
+               return nil, err
+       }
+       runner.arvMountLog = NewThrottledLogger(w)
        c.Stdout = runner.arvMountLog
        c.Stderr = runner.arvMountLog
 
@@ -554,7 +599,7 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                        if err != nil {
                                return fmt.Errorf("creating temp dir: %v", err)
                        }
-                       err = gitMount(mnt).extractTree(runner.ArvClient, tmpdir, token)
+                       err = gitMount(mnt).extractTree(runner.ContainerArvClient, tmpdir, token)
                        if err != nil {
                                return err
                        }
@@ -696,26 +741,37 @@ func (runner *ContainerRunner) stopHoststat() error {
        return nil
 }
 
-func (runner *ContainerRunner) startHoststat() {
-       runner.hoststatLogger = NewThrottledLogger(runner.NewLogWriter("hoststat"))
+func (runner *ContainerRunner) startHoststat() error {
+       w, err := runner.NewLogWriter("hoststat")
+       if err != nil {
+               return err
+       }
+       runner.hoststatLogger = NewThrottledLogger(w)
        runner.hoststatReporter = &crunchstat.Reporter{
                Logger:     log.New(runner.hoststatLogger, "", 0),
                CgroupRoot: runner.cgroupRoot,
                PollPeriod: runner.statInterval,
        }
        runner.hoststatReporter.Start()
+       return nil
 }
 
-func (runner *ContainerRunner) startCrunchstat() {
-       runner.statLogger = NewThrottledLogger(runner.NewLogWriter("crunchstat"))
+func (runner *ContainerRunner) startCrunchstat() error {
+       w, err := runner.NewLogWriter("crunchstat")
+       if err != nil {
+               return err
+       }
+       runner.statLogger = NewThrottledLogger(w)
        runner.statReporter = &crunchstat.Reporter{
                CID:          runner.ContainerID,
                Logger:       log.New(runner.statLogger, "", 0),
                CgroupParent: runner.expectCgroupParent,
                CgroupRoot:   runner.cgroupRoot,
                PollPeriod:   runner.statInterval,
+               TempDir:      runner.parentTemp,
        }
        runner.statReporter.Start()
+       return nil
 }
 
 type infoCommand struct {
@@ -729,7 +785,10 @@ type infoCommand struct {
 // might differ from what's described in the node record (see
 // LogNodeRecord).
 func (runner *ContainerRunner) LogHostInfo() (err error) {
-       w := runner.NewLogWriter("node-info")
+       w, err := runner.NewLogWriter("node-info")
+       if err != nil {
+               return
+       }
 
        commands := []infoCommand{
                {
@@ -802,14 +861,18 @@ func (runner *ContainerRunner) LogNodeRecord() error {
 }
 
 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
+       writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
+       if err != nil {
+               return false, err
+       }
        w := &ArvLogWriter{
-               ArvClient:     runner.ArvClient,
+               ArvClient:     runner.DispatcherArvClient,
                UUID:          runner.Container.UUID,
                loggingStream: label,
-               writeCloser:   runner.LogCollection.Open(label + ".json"),
+               writeCloser:   writer,
        }
 
-       reader, err := runner.ArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
+       reader, err := runner.DispatcherArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
        if err != nil {
                return false, fmt.Errorf("error getting %s record: %v", label, err)
        }
@@ -859,12 +922,14 @@ func (runner *ContainerRunner) AttachStreams() (err error) {
                        if collId == "" {
                                collId = stdinMnt.PortableDataHash
                        }
-                       err = runner.ArvClient.Get("collections", collId, nil, &stdinColl)
+                       err = runner.ContainerArvClient.Get("collections", collId, nil, &stdinColl)
                        if err != nil {
-                               return fmt.Errorf("While getting stding collection: %v", err)
+                               return fmt.Errorf("While getting stdin collection: %v", err)
                        }
 
-                       stdinRdr, err = runner.Kc.ManifestFileReader(manifest.Manifest{Text: stdinColl.ManifestText}, stdinMnt.Path)
+                       stdinRdr, err = runner.ContainerKeepClient.ManifestFileReader(
+                               manifest.Manifest{Text: stdinColl.ManifestText},
+                               stdinMnt.Path)
                        if os.IsNotExist(err) {
                                return fmt.Errorf("stdin collection path not found: %v", stdinMnt.Path)
                        } else if err != nil {
@@ -893,8 +958,10 @@ func (runner *ContainerRunner) AttachStreams() (err error) {
                        return err
                }
                runner.Stdout = stdoutFile
+       } else if w, err := runner.NewLogWriter("stdout"); err != nil {
+               return err
        } else {
-               runner.Stdout = NewThrottledLogger(runner.NewLogWriter("stdout"))
+               runner.Stdout = NewThrottledLogger(w)
        }
 
        if stderrMnt, ok := runner.Container.Mounts["stderr"]; ok {
@@ -903,8 +970,10 @@ func (runner *ContainerRunner) AttachStreams() (err error) {
                        return err
                }
                runner.Stderr = stderrFile
+       } else if w, err := runner.NewLogWriter("stderr"); err != nil {
+               return err
        } else {
-               runner.Stderr = NewThrottledLogger(runner.NewLogWriter("stderr"))
+               runner.Stderr = NewThrottledLogger(w)
        }
 
        if stdinRdr != nil {
@@ -974,6 +1043,10 @@ func (runner *ContainerRunner) CreateContainer() error {
        runner.ContainerConfig.Volumes = runner.Volumes
 
        maxRAM := int64(runner.Container.RuntimeConstraints.RAM)
+       if maxRAM < 4*1024*1024 {
+               // Docker daemon won't let you set a limit less than 4 MiB
+               maxRAM = 4 * 1024 * 1024
+       }
        runner.HostConfig = dockercontainer.HostConfig{
                Binds: runner.Binds,
                LogConfig: dockercontainer.LogConfig{
@@ -1047,10 +1120,41 @@ func (runner *ContainerRunner) StartContainer() error {
 // WaitFinish waits for the container to terminate, capture the exit code, and
 // close the stdout/stderr logging.
 func (runner *ContainerRunner) WaitFinish() error {
+       var runTimeExceeded <-chan time.Time
        runner.CrunchLog.Print("Waiting for container to finish")
 
        waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, dockercontainer.WaitConditionNotRunning)
        arvMountExit := runner.ArvMountExit
+       if timeout := runner.Container.SchedulingParameters.MaxRunTime; timeout > 0 {
+               runTimeExceeded = time.After(time.Duration(timeout) * time.Second)
+       }
+
+       containerGone := make(chan struct{})
+       go func() {
+               defer close(containerGone)
+               if runner.containerWatchdogInterval < 1 {
+                       runner.containerWatchdogInterval = time.Minute
+               }
+               for range time.NewTicker(runner.containerWatchdogInterval).C {
+                       ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(runner.containerWatchdogInterval))
+                       ctr, err := runner.Docker.ContainerInspect(ctx, runner.ContainerID)
+                       cancel()
+                       runner.cStateLock.Lock()
+                       done := runner.cRemoved || runner.ExitCode != nil
+                       runner.cStateLock.Unlock()
+                       if done {
+                               return
+                       } else if err != nil {
+                               runner.CrunchLog.Printf("Error inspecting container: %s", err)
+                               runner.checkBrokenNode(err)
+                               return
+                       } else if ctr.State == nil || !(ctr.State.Running || ctr.State.Status == "created") {
+                               runner.CrunchLog.Printf("Container is not running: State=%v", ctr.State)
+                               return
+                       }
+               }
+       }()
+
        for {
                select {
                case waitBody := <-waitOk:
@@ -1071,190 +1175,73 @@ func (runner *ContainerRunner) WaitFinish() error {
                        // arvMountExit will always be ready now that
                        // it's closed, but that doesn't interest us.
                        arvMountExit = nil
+
+               case <-runTimeExceeded:
+                       runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
+                       runner.stop(nil)
+                       runTimeExceeded = nil
+
+               case <-containerGone:
+                       return errors.New("docker client never returned status")
                }
        }
 }
 
-var ErrNotInOutputDir = fmt.Errorf("Must point to path within the output directory")
-
-func (runner *ContainerRunner) derefOutputSymlink(path string, startinfo os.FileInfo) (tgt string, readlinktgt string, info os.FileInfo, err error) {
-       // Follow symlinks if necessary
-       info = startinfo
-       tgt = path
-       readlinktgt = ""
-       nextlink := path
-       for followed := 0; info.Mode()&os.ModeSymlink != 0; followed++ {
-               if followed >= limitFollowSymlinks {
-                       // Got stuck in a loop or just a pathological number of links, give up.
-                       err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
-                       return
-               }
+func (runner *ContainerRunner) updateLogs() {
+       ticker := time.NewTicker(crunchLogUpdatePeriod / 360)
+       defer ticker.Stop()
 
-               readlinktgt, err = os.Readlink(nextlink)
-               if err != nil {
-                       return
-               }
+       sigusr1 := make(chan os.Signal, 1)
+       signal.Notify(sigusr1, syscall.SIGUSR1)
+       defer signal.Stop(sigusr1)
 
-               tgt = readlinktgt
-               if !strings.HasPrefix(tgt, "/") {
-                       // Relative symlink, resolve it to host path
-                       tgt = filepath.Join(filepath.Dir(path), tgt)
-               }
-               if strings.HasPrefix(tgt, runner.Container.OutputPath+"/") && !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
-                       // Absolute symlink to container output path, adjust it to host output path.
-                       tgt = filepath.Join(runner.HostOutputDir, tgt[len(runner.Container.OutputPath):])
-               }
-               if !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
-                       // After dereferencing, symlink target must either be
-                       // within output directory, or must point to a
-                       // collection mount.
-                       err = ErrNotInOutputDir
-                       return
-               }
-
-               info, err = os.Lstat(tgt)
-               if err != nil {
-                       // tgt
-                       err = fmt.Errorf("Symlink in output %q points to invalid location %q: %v",
-                               path[len(runner.HostOutputDir):], readlinktgt, err)
+       saveAtTime := time.Now().Add(crunchLogUpdatePeriod)
+       saveAtSize := crunchLogUpdateSize
+       var savedSize int64
+       for {
+               select {
+               case <-ticker.C:
+               case <-sigusr1:
+                       saveAtTime = time.Now()
+               }
+               runner.logMtx.Lock()
+               done := runner.LogsPDH != nil
+               runner.logMtx.Unlock()
+               if done {
                        return
                }
-
-               nextlink = tgt
-       }
-
-       return
-}
-
-var limitFollowSymlinks = 10
-
-// UploadFile uploads files within the output directory, with special handling
-// for symlinks. If the symlink leads to a keep mount, copy the manifest text
-// from the keep mount into the output manifestText.  Ensure that whether
-// symlinks are relative or absolute, every symlink target (even targets that
-// are symlinks themselves) must point to a path in either the output directory
-// or a collection mount.
-//
-// Assumes initial value of "path" is absolute, and located within runner.HostOutputDir.
-func (runner *ContainerRunner) UploadOutputFile(
-       path string,
-       info os.FileInfo,
-       infoerr error,
-       binds []string,
-       walkUpload *WalkUpload,
-       relocateFrom string,
-       relocateTo string,
-       followed int) (manifestText string, err error) {
-
-       if infoerr != nil {
-               return "", infoerr
-       }
-
-       if info.Mode().IsDir() {
-               // if empty, need to create a .keep file
-               dir, direrr := os.Open(path)
-               if direrr != nil {
-                       return "", direrr
+               size := runner.LogCollection.Size()
+               if size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) {
+                       continue
                }
-               defer dir.Close()
-               names, eof := dir.Readdirnames(1)
-               if len(names) == 0 && eof == io.EOF && path != runner.HostOutputDir {
-                       containerPath := runner.OutputPath + path[len(runner.HostOutputDir):]
-                       for _, bind := range binds {
-                               mnt := runner.Container.Mounts[bind]
-                               // Check if there is a bind for this
-                               // directory, in which case assume we don't need .keep
-                               if (containerPath == bind || strings.HasPrefix(containerPath, bind+"/")) && mnt.PortableDataHash != "d41d8cd98f00b204e9800998ecf8427e+0" {
-                                       return
-                               }
-                       }
-                       outputSuffix := path[len(runner.HostOutputDir)+1:]
-                       return fmt.Sprintf("./%v d41d8cd98f00b204e9800998ecf8427e+0 0:0:.keep\n", outputSuffix), nil
+               saveAtTime = time.Now().Add(crunchLogUpdatePeriod)
+               saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize
+               saved, err := runner.saveLogCollection(false)
+               if err != nil {
+                       runner.CrunchLog.Printf("error updating log collection: %s", err)
+                       continue
                }
-               return
-       }
-
-       if followed >= limitFollowSymlinks {
-               // Got stuck in a loop or just a pathological number of
-               // directory links, give up.
-               err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
-               return
-       }
-
-       // "path" is the actual path we are visiting
-       // "tgt" is the target of "path" (a non-symlink) after following symlinks
-       // "relocated" is the path in the output manifest where the file should be placed,
-       // but has HostOutputDir as a prefix.
-
-       // The destination path in the output manifest may need to be
-       // logically relocated to some other path in order to appear
-       // in the correct location as a result of following a symlink.
-       // Remove the relocateFrom prefix and replace it with
-       // relocateTo.
-       relocated := relocateTo + path[len(relocateFrom):]
 
-       tgt, readlinktgt, info, derefErr := runner.derefOutputSymlink(path, info)
-       if derefErr != nil && derefErr != ErrNotInOutputDir {
-               return "", derefErr
-       }
-
-       // go through mounts and try reverse map to collection reference
-       for _, bind := range binds {
-               mnt := runner.Container.Mounts[bind]
-               if (tgt == bind || strings.HasPrefix(tgt, bind+"/")) && !mnt.Writable {
-                       // get path relative to bind
-                       targetSuffix := tgt[len(bind):]
-
-                       // Copy mount and adjust the path to add path relative to the bind
-                       adjustedMount := mnt
-                       adjustedMount.Path = filepath.Join(adjustedMount.Path, targetSuffix)
-
-                       // Terminates in this keep mount, so add the
-                       // manifest text at appropriate location.
-                       outputSuffix := relocated[len(runner.HostOutputDir):]
-                       manifestText, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix)
-                       return
+               var updated arvados.Container
+               err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
+                       "container": arvadosclient.Dict{"log": saved.PortableDataHash},
+               }, &updated)
+               if err != nil {
+                       runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
+                       continue
                }
-       }
-
-       // If target is not a collection mount, it must be located within the
-       // output directory, otherwise it is an error.
-       if derefErr == ErrNotInOutputDir {
-               err = fmt.Errorf("Symlink in output %q points to invalid location %q, must point to path within the output directory.",
-                       path[len(runner.HostOutputDir):], readlinktgt)
-               return
-       }
-
-       if info.Mode().IsRegular() {
-               return "", walkUpload.UploadFile(relocated, tgt)
-       }
 
-       if info.Mode().IsDir() {
-               // Symlink leads to directory.  Walk() doesn't follow
-               // directory symlinks, so we walk the target directory
-               // instead.  Within the walk, file paths are relocated
-               // so they appear under the original symlink path.
-               err = filepath.Walk(tgt, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
-                       var m string
-                       m, walkerr = runner.UploadOutputFile(walkpath, walkinfo, walkerr,
-                               binds, walkUpload, tgt, relocated, followed+1)
-                       if walkerr == nil {
-                               manifestText = manifestText + m
-                       }
-                       return walkerr
-               })
-               return
+               savedSize = size
        }
-
-       return
 }
 
-// HandleOutput sets the output, unmounts the FUSE mount, and deletes temporary directories
+// CaptureOutput saves data from the container's output directory if
+// needed, and updates the container output accordingly.
 func (runner *ContainerRunner) CaptureOutput() error {
        if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
                // Output may have been set directly by the container, so
                // refresh the container record to check.
-               err := runner.ArvClient.Get("containers", runner.Container.UUID,
+               err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID,
                        nil, &runner.Container)
                if err != nil {
                        return err
@@ -1266,163 +1253,47 @@ func (runner *ContainerRunner) CaptureOutput() error {
                }
        }
 
-       if runner.HostOutputDir == "" {
-               return nil
-       }
-
-       _, err := os.Stat(runner.HostOutputDir)
+       txt, err := (&copier{
+               client:        runner.containerClient,
+               arvClient:     runner.ContainerArvClient,
+               keepClient:    runner.ContainerKeepClient,
+               hostOutputDir: runner.HostOutputDir,
+               ctrOutputDir:  runner.Container.OutputPath,
+               binds:         runner.Binds,
+               mounts:        runner.Container.Mounts,
+               secretMounts:  runner.SecretMounts,
+               logger:        runner.CrunchLog,
+       }).Copy()
        if err != nil {
-               return fmt.Errorf("While checking host output path: %v", err)
+               return err
        }
-
-       // Pre-populate output from the configured mount points
-       var binds []string
-       for bind, mnt := range runner.Container.Mounts {
-               if mnt.Kind == "collection" {
-                       binds = append(binds, bind)
-               }
-       }
-       sort.Strings(binds)
-
-       // Delete secret mounts so they don't get saved to the output collection.
-       for bind := range runner.SecretMounts {
-               if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
-                       err = os.Remove(runner.HostOutputDir + bind[len(runner.Container.OutputPath):])
-                       if err != nil {
-                               return fmt.Errorf("Unable to remove secret mount: %v", err)
-                       }
-               }
-       }
-
-       var manifestText string
-
-       collectionMetafile := fmt.Sprintf("%s/.arvados#collection", runner.HostOutputDir)
-       _, err = os.Stat(collectionMetafile)
-       if err != nil {
-               // Regular directory
-
-               cw := CollectionWriter{0, runner.Kc, nil, nil, sync.Mutex{}}
-               walkUpload := cw.BeginUpload(runner.HostOutputDir, runner.CrunchLog.Logger)
-
-               var m string
-               err = filepath.Walk(runner.HostOutputDir, func(path string, info os.FileInfo, err error) error {
-                       m, err = runner.UploadOutputFile(path, info, err, binds, walkUpload, "", "", 0)
-                       if err == nil {
-                               manifestText = manifestText + m
-                       }
-                       return err
-               })
-
-               cw.EndUpload(walkUpload)
-
-               if err != nil {
-                       return fmt.Errorf("While uploading output files: %v", err)
-               }
-
-               m, err = cw.ManifestText()
-               manifestText = manifestText + m
+       if n := len(regexp.MustCompile(` [0-9a-f]+\+\S*\+R`).FindAllStringIndex(txt, -1)); n > 0 {
+               runner.CrunchLog.Printf("Copying %d data blocks from remote input collections...", n)
+               fs, err := (&arvados.Collection{ManifestText: txt}).FileSystem(runner.containerClient, runner.ContainerKeepClient)
                if err != nil {
-                       return fmt.Errorf("While uploading output files: %v", err)
-               }
-       } else {
-               // FUSE mount directory
-               file, openerr := os.Open(collectionMetafile)
-               if openerr != nil {
-                       return fmt.Errorf("While opening FUSE metafile: %v", err)
-               }
-               defer file.Close()
-
-               var rec arvados.Collection
-               err = json.NewDecoder(file).Decode(&rec)
-               if err != nil {
-                       return fmt.Errorf("While reading FUSE metafile: %v", err)
-               }
-               manifestText = rec.ManifestText
-       }
-
-       for _, bind := range binds {
-               mnt := runner.Container.Mounts[bind]
-
-               bindSuffix := strings.TrimPrefix(bind, runner.Container.OutputPath)
-
-               if bindSuffix == bind || len(bindSuffix) <= 0 {
-                       // either does not start with OutputPath or is OutputPath itself
-                       continue
-               }
-
-               if mnt.ExcludeFromOutput == true || mnt.Writable {
-                       continue
+                       return err
                }
-
-               // append to manifest_text
-               m, err := runner.getCollectionManifestForPath(mnt, bindSuffix)
+               txt, err = fs.MarshalManifest(".")
                if err != nil {
                        return err
                }
-
-               manifestText = manifestText + m
-       }
-
-       // Save output
-       var response arvados.Collection
-       manifest := manifest.Manifest{Text: manifestText}
-       manifestText = manifest.Extract(".", ".").Text
-       err = runner.ArvClient.Create("collections",
-               arvadosclient.Dict{
-                       "ensure_unique_name": true,
-                       "collection": arvadosclient.Dict{
-                               "is_trashed":    true,
-                               "name":          "output for " + runner.Container.UUID,
-                               "manifest_text": manifestText}},
-               &response)
+       }
+       var resp arvados.Collection
+       err = runner.ContainerArvClient.Create("collections", arvadosclient.Dict{
+               "ensure_unique_name": true,
+               "collection": arvadosclient.Dict{
+                       "is_trashed":    true,
+                       "name":          "output for " + runner.Container.UUID,
+                       "manifest_text": txt,
+               },
+       }, &resp)
        if err != nil {
-               return fmt.Errorf("While creating output collection: %v", err)
+               return fmt.Errorf("error creating output collection: %v", err)
        }
-       runner.OutputPDH = &response.PortableDataHash
+       runner.OutputPDH = &resp.PortableDataHash
        return nil
 }
 
-var outputCollections = make(map[string]arvados.Collection)
-
-// Fetch the collection for the mnt.PortableDataHash
-// Return the manifest_text fragment corresponding to the specified mnt.Path
-//  after making any required updates.
-//  Ex:
-//    If mnt.Path is not specified,
-//      return the entire manifest_text after replacing any "." with bindSuffix
-//    If mnt.Path corresponds to one stream,
-//      return the manifest_text for that stream after replacing that stream name with bindSuffix
-//    Otherwise, check if a filename in any one stream is being sought. Return the manifest_text
-//      for that stream after replacing stream name with bindSuffix minus the last word
-//      and the file name with last word of the bindSuffix
-//  Allowed path examples:
-//    "path":"/"
-//    "path":"/subdir1"
-//    "path":"/subdir1/subdir2"
-//    "path":"/subdir/filename" etc
-func (runner *ContainerRunner) getCollectionManifestForPath(mnt arvados.Mount, bindSuffix string) (string, error) {
-       collection := outputCollections[mnt.PortableDataHash]
-       if collection.PortableDataHash == "" {
-               err := runner.ArvClient.Get("collections", mnt.PortableDataHash, nil, &collection)
-               if err != nil {
-                       return "", fmt.Errorf("While getting collection for %v: %v", mnt.PortableDataHash, err)
-               }
-               outputCollections[mnt.PortableDataHash] = collection
-       }
-
-       if collection.ManifestText == "" {
-               runner.CrunchLog.Printf("No manifest text for collection %v", collection.PortableDataHash)
-               return "", nil
-       }
-
-       mft := manifest.Manifest{Text: collection.ManifestText}
-       extracted := mft.Extract(mnt.Path, bindSuffix)
-       if extracted.Err != nil {
-               return "", fmt.Errorf("Error parsing manifest for %v: %v", mnt.PortableDataHash, extracted.Err.Error())
-       }
-       return extracted.Text, nil
-}
-
 func (runner *ContainerRunner) CleanupDirs() {
        if runner.ArvMount != nil {
                var delay int64 = 8
@@ -1495,8 +1366,12 @@ func (runner *ContainerRunner) CommitLogs() error {
                // point, but re-open crunch log with ArvClient in case there are any
                // other further errors (such as failing to write the log to Keep!)
                // while shutting down
-               runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{ArvClient: runner.ArvClient,
-                       UUID: runner.Container.UUID, loggingStream: "crunch-run", writeCloser: nil})
+               runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{
+                       ArvClient:     runner.DispatcherArvClient,
+                       UUID:          runner.Container.UUID,
+                       loggingStream: "crunch-run",
+                       writeCloser:   nil,
+               })
                runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
        }()
 
@@ -1508,26 +1383,51 @@ func (runner *ContainerRunner) CommitLogs() error {
                // -- it exists only to send logs to other channels.
                return nil
        }
+       saved, err := runner.saveLogCollection(true)
+       if err != nil {
+               return fmt.Errorf("error saving log collection: %s", err)
+       }
+       runner.logMtx.Lock()
+       defer runner.logMtx.Unlock()
+       runner.LogsPDH = &saved.PortableDataHash
+       return nil
+}
 
-       mt, err := runner.LogCollection.ManifestText()
+func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
+       runner.logMtx.Lock()
+       defer runner.logMtx.Unlock()
+       if runner.LogsPDH != nil {
+               // Already finalized.
+               return
+       }
+       mt, err := runner.LogCollection.MarshalManifest(".")
        if err != nil {
-               return fmt.Errorf("While creating log manifest: %v", err)
-       }
-
-       var response arvados.Collection
-       err = runner.ArvClient.Create("collections",
-               arvadosclient.Dict{
-                       "ensure_unique_name": true,
-                       "collection": arvadosclient.Dict{
-                               "is_trashed":    true,
-                               "name":          "logs for " + runner.Container.UUID,
-                               "manifest_text": mt}},
-               &response)
+               err = fmt.Errorf("error creating log manifest: %v", err)
+               return
+       }
+       updates := arvadosclient.Dict{
+               "name":          "logs for " + runner.Container.UUID,
+               "manifest_text": mt,
+       }
+       if final {
+               updates["is_trashed"] = true
+       } else {
+               exp := time.Now().Add(crunchLogUpdatePeriod * 24)
+               updates["trash_at"] = exp
+               updates["delete_at"] = exp
+       }
+       reqBody := arvadosclient.Dict{"collection": updates}
+       if runner.logUUID == "" {
+               reqBody["ensure_unique_name"] = true
+               err = runner.DispatcherArvClient.Create("collections", reqBody, &response)
+       } else {
+               err = runner.DispatcherArvClient.Update("collections", runner.logUUID, reqBody, &response)
+       }
        if err != nil {
-               return fmt.Errorf("While creating log collection: %v", err)
+               return
        }
-       runner.LogsPDH = &response.PortableDataHash
-       return nil
+       runner.logUUID = response.UUID
+       return
 }
 
 // UpdateContainerRunning updates the container state to "Running"
@@ -1537,7 +1437,7 @@ func (runner *ContainerRunner) UpdateContainerRunning() error {
        if runner.cCancelled {
                return ErrCancelled
        }
-       return runner.ArvClient.Update("containers", runner.Container.UUID,
+       return runner.DispatcherArvClient.Update("containers", runner.Container.UUID,
                arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running"}}, nil)
 }
 
@@ -1549,11 +1449,11 @@ func (runner *ContainerRunner) ContainerToken() (string, error) {
        }
 
        var auth arvados.APIClientAuthorization
-       err := runner.ArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
+       err := runner.DispatcherArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
        if err != nil {
                return "", err
        }
-       runner.token = auth.APIToken
+       runner.token = fmt.Sprintf("v2/%s/%s/%s", auth.UUID, auth.APIToken, runner.Container.UUID)
        return runner.token, nil
 }
 
@@ -1573,7 +1473,7 @@ func (runner *ContainerRunner) UpdateContainerFinal() error {
                        update["output"] = *runner.OutputPDH
                }
        }
-       return runner.ArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
+       return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
 }
 
 // IsCancelled returns the value of Cancelled, with goroutine safety.
@@ -1584,12 +1484,17 @@ func (runner *ContainerRunner) IsCancelled() bool {
 }
 
 // NewArvLogWriter creates an ArvLogWriter
-func (runner *ContainerRunner) NewArvLogWriter(name string) io.WriteCloser {
+func (runner *ContainerRunner) NewArvLogWriter(name string) (io.WriteCloser, error) {
+       writer, err := runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
+       if err != nil {
+               return nil, err
+       }
        return &ArvLogWriter{
-               ArvClient:     runner.ArvClient,
+               ArvClient:     runner.DispatcherArvClient,
                UUID:          runner.Container.UUID,
                loggingStream: name,
-               writeCloser:   runner.LogCollection.Open(name + ".txt")}
+               writeCloser:   writer,
+       }, nil
 }
 
 // Run the full container lifecycle.
@@ -1619,11 +1524,11 @@ func (runner *ContainerRunner) Run() (err error) {
                // hasn't already been assigned when Run() returns,
                // this cleanup func will cause Run() to return the
                // first non-nil error that is passed to checkErr().
-               checkErr := func(e error) {
+               checkErr := func(errorIn string, e error) {
                        if e == nil {
                                return
                        }
-                       runner.CrunchLog.Print(e)
+                       runner.CrunchLog.Printf("error in %s: %v", errorIn, e)
                        if err == nil {
                                err = e
                        }
@@ -1634,7 +1539,7 @@ func (runner *ContainerRunner) Run() (err error) {
                }
 
                // Log the error encountered in Run(), if any
-               checkErr(err)
+               checkErr("Run", err)
 
                if runner.finalState == "Queued" {
                        runner.UpdateContainerFinal()
@@ -1647,10 +1552,10 @@ func (runner *ContainerRunner) Run() (err error) {
                        // capture partial output and write logs
                }
 
-               checkErr(runner.CaptureOutput())
-               checkErr(runner.stopHoststat())
-               checkErr(runner.CommitLogs())
-               checkErr(runner.UpdateContainerFinal())
+               checkErr("CaptureOutput", runner.CaptureOutput())
+               checkErr("stopHoststat", runner.stopHoststat())
+               checkErr("CommitLogs", runner.CommitLogs())
+               checkErr("UpdateContainerFinal", runner.UpdateContainerFinal())
        }()
 
        err = runner.fetchContainerRecord()
@@ -1658,7 +1563,10 @@ func (runner *ContainerRunner) Run() (err error) {
                return
        }
        runner.setupSignals()
-       runner.startHoststat()
+       err = runner.startHoststat()
+       if err != nil {
+               return
+       }
 
        // check for and/or load image
        err = runner.LoadImage()
@@ -1707,7 +1615,10 @@ func (runner *ContainerRunner) Run() (err error) {
        }
        runner.finalState = "Cancelled"
 
-       runner.startCrunchstat()
+       err = runner.startCrunchstat()
+       if err != nil {
+               return
+       }
 
        err = runner.StartContainer()
        if err != nil {
@@ -1725,7 +1636,7 @@ func (runner *ContainerRunner) Run() (err error) {
 // Fetch the current container record (uuid = runner.Container.UUID)
 // into runner.Container.
 func (runner *ContainerRunner) fetchContainerRecord() error {
-       reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
+       reader, err := runner.DispatcherArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
        if err != nil {
                return fmt.Errorf("error fetching container record: %v", err)
        }
@@ -1747,12 +1658,13 @@ func (runner *ContainerRunner) fetchContainerRecord() error {
                return fmt.Errorf("error getting container token: %v", err)
        }
 
-       containerClient, err := runner.MkArvClient(containerToken)
+       runner.ContainerArvClient, runner.ContainerKeepClient,
+               runner.containerClient, err = runner.MkArvClient(containerToken)
        if err != nil {
                return fmt.Errorf("error creating container API client: %v", err)
        }
 
-       err = containerClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
+       err = runner.ContainerArvClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
        if err != nil {
                if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
                        return fmt.Errorf("error fetching secret_mounts: %v", err)
@@ -1766,31 +1678,52 @@ func (runner *ContainerRunner) fetchContainerRecord() error {
 }
 
 // NewContainerRunner creates a new container runner.
-func NewContainerRunner(api IArvadosClient,
-       kc IKeepClient,
+func NewContainerRunner(dispatcherClient *arvados.Client,
+       dispatcherArvClient IArvadosClient,
+       dispatcherKeepClient IKeepClient,
        docker ThinDockerClient,
-       containerUUID string) *ContainerRunner {
+       containerUUID string) (*ContainerRunner, error) {
 
-       cr := &ContainerRunner{ArvClient: api, Kc: kc, Docker: docker}
+       cr := &ContainerRunner{
+               dispatcherClient:     dispatcherClient,
+               DispatcherArvClient:  dispatcherArvClient,
+               DispatcherKeepClient: dispatcherKeepClient,
+               Docker:               docker,
+       }
        cr.NewLogWriter = cr.NewArvLogWriter
        cr.RunArvMount = cr.ArvMountCmd
        cr.MkTempDir = ioutil.TempDir
-       cr.MkArvClient = func(token string) (IArvadosClient, error) {
+       cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
                cl, err := arvadosclient.MakeArvadosClient()
                if err != nil {
-                       return nil, err
+                       return nil, nil, nil, err
                }
                cl.ApiToken = token
-               return cl, nil
+               kc, err := keepclient.MakeKeepClient(cl)
+               if err != nil {
+                       return nil, nil, nil, err
+               }
+               c2 := arvados.NewClientFromEnv()
+               c2.AuthToken = token
+               return cl, kc, c2, nil
+       }
+       var err error
+       cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.dispatcherClient, cr.DispatcherKeepClient)
+       if err != nil {
+               return nil, err
        }
-       cr.LogCollection = &CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
        cr.Container.UUID = containerUUID
-       cr.CrunchLog = NewThrottledLogger(cr.NewLogWriter("crunch-run"))
+       w, err := cr.NewLogWriter("crunch-run")
+       if err != nil {
+               return nil, err
+       }
+       cr.CrunchLog = NewThrottledLogger(w)
        cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
 
-       loadLogThrottleParams(api)
+       loadLogThrottleParams(dispatcherArvClient)
+       go cr.updateLogs()
 
-       return cr
+       return cr, nil
 }
 
 func main() {
@@ -1799,6 +1732,10 @@ func main() {
        cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
        cgroupParentSubsystem := flag.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
        caCertsPath := flag.String("ca-certs", "", "Path to TLS root certificates")
+       detach := flag.Bool("detach", false, "Detach from parent process and run in the background")
+       sleep := flag.Duration("sleep", 0, "Delay before starting (testing use only)")
+       kill := flag.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID")
+       list := flag.Bool("list", false, "List UUIDs of existing crunch-run processes")
        enableNetwork := flag.String("container-enable-networking", "default",
                `Specify if networking should be enabled for container.  One of 'default', 'always':
        default: only enable networking if container requests it.
@@ -1809,8 +1746,31 @@ func main() {
        `)
        memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
        getVersion := flag.Bool("version", false, "Print version information and exit.")
+       flag.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.")
+
+       detached := false
+       if len(os.Args) > 1 && os.Args[1] == "-detached" {
+               // This process was invoked by a parent process, which
+               // has passed along its own arguments, including
+               // -detach, after the leading -detached flag.  Strip
+               // the leading -detached flag (it's not recognized by
+               // flag.Parse()) ... and remember not to detach all
+               // over again in this process.
+               os.Args = append([]string{os.Args[0]}, os.Args[2:]...)
+               detached = true
+       }
+
        flag.Parse()
 
+       switch {
+       case *detach && !detached:
+               os.Exit(Detach(flag.Arg(0), os.Args, os.Stdout, os.Stderr))
+       case *kill >= 0:
+               os.Exit(KillProcess(flag.Arg(0), syscall.Signal(*kill), os.Stdout, os.Stderr))
+       case *list:
+               os.Exit(ListProcesses(os.Stdout, os.Stderr))
+       }
+
        // Print version information if requested
        if *getVersion {
                fmt.Printf("crunch-run %s\n", version)
@@ -1818,6 +1778,7 @@ func main() {
        }
 
        log.Printf("crunch-run %s started", version)
+       time.Sleep(*sleep)
 
        containerId := flag.Arg(0)
 
@@ -1842,7 +1803,10 @@ func main() {
        // minimum version we want to support.
        docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
 
-       cr := NewContainerRunner(api, kc, docker, containerId)
+       cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, docker, containerId)
+       if err != nil {
+               log.Fatal(err)
+       }
        if dockererr != nil {
                cr.CrunchLog.Printf("%s: %v", containerId, dockererr)
                cr.checkBrokenNode(dockererr)
@@ -1872,15 +1836,15 @@ func main() {
        if *memprofile != "" {
                f, err := os.Create(*memprofile)
                if err != nil {
-                       log.Printf("could not create memory profile: ", err)
+                       log.Printf("could not create memory profile: %s", err)
                }
                runtime.GC() // get up-to-date statistics
                if err := pprof.WriteHeapProfile(f); err != nil {
-                       log.Printf("could not write memory profile: ", err)
+                       log.Printf("could not write memory profile: %s", err)
                }
                closeerr := f.Close()
                if closeerr != nil {
-                       log.Printf("closing memprofile file: ", err)
+                       log.Printf("closing memprofile file: %s", err)
                }
        }