9397: if mount.Path ends with "/", trim it.
[arvados.git] / services / crunch-run / crunchrun.go
index c4ea92938af5e7d374bade3f8fa0d2fd500caa39..bd4c5cc5b4551337f09014858276feaaea2185a6 100644 (file)
@@ -5,6 +5,7 @@ import (
        "errors"
        "flag"
        "fmt"
+       "git.curoverse.com/arvados.git/lib/crunchstat"
        "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "git.curoverse.com/arvados.git/sdk/go/keepclient"
@@ -17,6 +18,8 @@ import (
        "os/exec"
        "os/signal"
        "path"
+       "path/filepath"
+       "sort"
        "strings"
        "sync"
        "syscall"
@@ -27,8 +30,9 @@ import (
 type IArvadosClient interface {
        Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
        Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
-       Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) (err error)
-       Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) (err error)
+       Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
+       Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
+       Discovery(key string) (interface{}, error)
 }
 
 // ErrCancelled is the error returned when the container is cancelled.
@@ -37,13 +41,7 @@ var ErrCancelled = errors.New("Cancelled")
 // IKeepClient is the minimal Keep API methods used by crunch-run.
 type IKeepClient interface {
        PutHB(hash string, buf []byte) (string, int, error)
-       ManifestFileReader(m manifest.Manifest, filename string) (keepclient.ReadCloserWithLen, error)
-}
-
-// Collection record returned by the API server.
-type CollectionRecord struct {
-       ManifestText     string `json:"manifest_text"`
-       PortableDataHash string `json:"portable_data_hash"`
+       ManifestFileReader(m manifest.Manifest, filename string) (keepclient.Reader, error)
 }
 
 // NewLogWriter is a factory function to create a new log writer.
@@ -97,6 +95,25 @@ type ContainerRunner struct {
        SigChan        chan os.Signal
        ArvMountExit   chan error
        finalState     string
+       trashLifetime  time.Duration
+
+       statLogger   io.WriteCloser
+       statReporter *crunchstat.Reporter
+       statInterval time.Duration
+       cgroupRoot   string
+       // What we expect the container's cgroup parent to be.
+       expectCgroupParent string
+       // What we tell docker to use as the container's cgroup
+       // parent. Note: Ideally we would use the same field for both
+       // expectCgroupParent and setCgroupParent, and just make it
+       // default to "docker". However, when using docker < 1.10 with
+       // systemd, specifying a non-empty cgroup parent (even the
+       // default value "docker") hits a docker bug
+       // (https://github.com/docker/docker/issues/17126). Using two
+       // separate fields makes it possible to use the "expect cgroup
+       // parent to be X" feature even on sites where the "specify
+       // cgroup parent" feature breaks.
+       setCgroupParent string
 }
 
 // SetupSignals sets up signal handling to gracefully terminate the underlying
@@ -108,7 +125,7 @@ func (runner *ContainerRunner) SetupSignals() {
        signal.Notify(runner.SigChan, syscall.SIGQUIT)
 
        go func(sig <-chan os.Signal) {
-               for _ = range sig {
+               for range sig {
                        if !runner.Cancelled {
                                runner.CancelLock.Lock()
                                runner.Cancelled = true
@@ -128,7 +145,7 @@ func (runner *ContainerRunner) LoadImage() (err error) {
 
        runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
 
-       var collection CollectionRecord
+       var collection arvados.Collection
        err = runner.ArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection)
        if err != nil {
                return fmt.Errorf("While getting container image collection: %v", err)
@@ -223,8 +240,17 @@ func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (
        return c, nil
 }
 
+var tmpBackedOutputDir = false
+
+func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
+       if runner.ArvMountPoint == "" {
+               runner.ArvMountPoint, err = runner.MkTempDir("", prefix)
+       }
+       return
+}
+
 func (runner *ContainerRunner) SetupMounts() (err error) {
-       runner.ArvMountPoint, err = runner.MkTempDir("", "keep")
+       err = runner.SetupArvMountPoint("keep")
        if err != nil {
                return fmt.Errorf("While creating keep mount temp dir: %v", err)
        }
@@ -234,10 +260,23 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
        pdhOnly := true
        tmpcount := 0
        arvMountCmd := []string{"--foreground", "--allow-other", "--read-write"}
+
+       if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
+               arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
+       }
+
        collectionPaths := []string{}
        runner.Binds = nil
+       needCertMount := true
 
-       for bind, mnt := range runner.Container.Mounts {
+       var binds []string
+       for bind, _ := range runner.Container.Mounts {
+               binds = append(binds, bind)
+       }
+       sort.Strings(binds)
+
+       for _, bind := range binds {
+               mnt := runner.Container.Mounts[bind]
                if bind == "stdout" {
                        // Is it a "file" mount kind?
                        if mnt.Kind != "file" {
@@ -254,7 +293,18 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                        }
                }
 
-               if mnt.Kind == "collection" {
+               if bind == "/etc/arvados/ca-certificates.crt" {
+                       needCertMount = false
+               }
+
+               if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
+                       if mnt.Kind != "collection" {
+                               return fmt.Errorf("Only mount points of kind 'collection' are supported underneath the output_path: %v", bind)
+                       }
+               }
+
+               switch {
+               case mnt.Kind == "collection":
                        var src string
                        if mnt.UUID != "" && mnt.PortableDataHash != "" {
                                return fmt.Errorf("Cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
@@ -279,31 +329,56 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                        if mnt.Writable {
                                if bind == runner.Container.OutputPath {
                                        runner.HostOutputDir = src
+                               } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
+                                       return fmt.Errorf("Writable mount points are not permitted underneath the output_path: %v", bind)
                                }
                                runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
                        } else {
                                runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind))
                        }
                        collectionPaths = append(collectionPaths, src)
-               } else if mnt.Kind == "tmp" {
-                       if bind == runner.Container.OutputPath {
-                               runner.HostOutputDir, err = runner.MkTempDir("", "")
-                               if err != nil {
-                                       return fmt.Errorf("While creating mount temp dir: %v", err)
-                               }
-                               st, staterr := os.Stat(runner.HostOutputDir)
-                               if staterr != nil {
-                                       return fmt.Errorf("While Stat on temp dir: %v", staterr)
-                               }
-                               err = os.Chmod(runner.HostOutputDir, st.Mode()|os.ModeSetgid|0777)
-                               if staterr != nil {
-                                       return fmt.Errorf("While Chmod temp dir: %v", err)
-                               }
-                               runner.CleanupTempDir = append(runner.CleanupTempDir, runner.HostOutputDir)
-                               runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", runner.HostOutputDir, bind))
-                       } else {
-                               runner.Binds = append(runner.Binds, bind)
+
+               case mnt.Kind == "tmp" && bind == runner.Container.OutputPath:
+                       runner.HostOutputDir, err = runner.MkTempDir("", "")
+                       if err != nil {
+                               return fmt.Errorf("While creating mount temp dir: %v", err)
+                       }
+                       st, staterr := os.Stat(runner.HostOutputDir)
+                       if staterr != nil {
+                               return fmt.Errorf("While Stat on temp dir: %v", staterr)
+                       }
+                       err = os.Chmod(runner.HostOutputDir, st.Mode()|os.ModeSetgid|0777)
+                       if staterr != nil {
+                               return fmt.Errorf("While Chmod temp dir: %v", err)
                        }
+                       runner.CleanupTempDir = append(runner.CleanupTempDir, runner.HostOutputDir)
+                       runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", runner.HostOutputDir, bind))
+                       tmpBackedOutputDir = true
+
+               case mnt.Kind == "tmp":
+                       runner.Binds = append(runner.Binds, bind)
+
+               case mnt.Kind == "json":
+                       jsondata, err := json.Marshal(mnt.Content)
+                       if err != nil {
+                               return fmt.Errorf("encoding json data: %v", err)
+                       }
+                       // Create a tempdir with a single file
+                       // (instead of just a tempfile): this way we
+                       // can ensure the file is world-readable
+                       // inside the container, without having to
+                       // make it world-readable on the docker host.
+                       tmpdir, err := runner.MkTempDir("", "")
+                       if err != nil {
+                               return fmt.Errorf("creating temp dir: %v", err)
+                       }
+                       runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
+                       tmpfn := filepath.Join(tmpdir, "mountdata.json")
+                       err = ioutil.WriteFile(tmpfn, jsondata, 0644)
+                       if err != nil {
+                               return fmt.Errorf("writing temp file: %v", err)
+                       }
+                       runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
                }
        }
 
@@ -311,6 +386,16 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                return fmt.Errorf("Output path does not correspond to a writable mount point")
        }
 
+       if wantAPI := runner.Container.RuntimeConstraints.API; needCertMount && wantAPI != nil && *wantAPI {
+               for _, certfile := range arvadosclient.CertFiles {
+                       _, err := os.Stat(certfile)
+                       if err == nil {
+                               runner.Binds = append(runner.Binds, fmt.Sprintf("%s:/etc/arvados/ca-certificates.crt:ro", certfile))
+                               break
+                       }
+               }
+       }
+
        if pdhOnly {
                arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
        } else {
@@ -372,6 +457,14 @@ func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
                                runner.CrunchLog.Printf("While closing stderr logs: %v", closeerr)
                        }
 
+                       if runner.statReporter != nil {
+                               runner.statReporter.Stop()
+                               closeerr = runner.statLogger.Close()
+                               if closeerr != nil {
+                                       runner.CrunchLog.Printf("While closing crunchstat logs: %v", closeerr)
+                               }
+                       }
+
                        runner.loggingDone <- true
                        close(runner.loggingDone)
                        return
@@ -379,6 +472,18 @@ func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
        }
 }
 
+func (runner *ContainerRunner) StartCrunchstat() {
+       runner.statLogger = NewThrottledLogger(runner.NewLogWriter("crunchstat"))
+       runner.statReporter = &crunchstat.Reporter{
+               CID:          runner.ContainerID,
+               Logger:       log.New(runner.statLogger, "", 0),
+               CgroupParent: runner.expectCgroupParent,
+               CgroupRoot:   runner.cgroupRoot,
+               PollPeriod:   runner.statInterval,
+       }
+       runner.statReporter.Start()
+}
+
 // AttachLogs connects the docker container stdout and stderr logs to the
 // Arvados logger which logs to Keep and the API server logs table.
 func (runner *ContainerRunner) AttachStreams() (err error) {
@@ -459,8 +564,13 @@ func (runner *ContainerRunner) CreateContainer() error {
                return fmt.Errorf("While creating container: %v", err)
        }
 
-       runner.HostConfig = dockerclient.HostConfig{Binds: runner.Binds,
-               LogConfig: dockerclient.LogConfig{Type: "none"}}
+       runner.HostConfig = dockerclient.HostConfig{
+               Binds:        runner.Binds,
+               CgroupParent: runner.setCgroupParent,
+               LogConfig: dockerclient.LogConfig{
+                       Type: "none",
+               },
+       }
 
        return runner.AttachStreams()
 }
@@ -499,6 +609,21 @@ func (runner *ContainerRunner) CaptureOutput() error {
                return nil
        }
 
+       if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
+               // Output may have been set directly by the container, so
+               // refresh the container record to check.
+               err := runner.ArvClient.Get("containers", runner.Container.UUID,
+                       nil, &runner.Container)
+               if err != nil {
+                       return err
+               }
+               if runner.Container.Output != "" {
+                       // Container output is already set.
+                       runner.OutputPDH = &runner.Container.Output
+                       return nil
+               }
+       }
+
        if runner.HostOutputDir == "" {
                return nil
        }
@@ -527,7 +652,7 @@ func (runner *ContainerRunner) CaptureOutput() error {
                }
                defer file.Close()
 
-               rec := CollectionRecord{}
+               var rec arvados.Collection
                err = json.NewDecoder(file).Decode(&rec)
                if err != nil {
                        return fmt.Errorf("While reading FUSE metafile: %v", err)
@@ -535,20 +660,162 @@ func (runner *ContainerRunner) CaptureOutput() error {
                manifestText = rec.ManifestText
        }
 
-       var response CollectionRecord
+       // Pre-populate output from the configured mount points
+       var binds []string
+       for bind, _ := range runner.Container.Mounts {
+               binds = append(binds, bind)
+       }
+       sort.Strings(binds)
+
+       for _, bind := range binds {
+               mnt := runner.Container.Mounts[bind]
+
+               bindSuffix := strings.TrimPrefix(bind, runner.Container.OutputPath)
+
+               if bindSuffix == bind || len(bindSuffix) <= 0 {
+                       // either does not start with OutputPath or is OutputPath itself
+                       continue
+               }
+
+               if strings.Index(bindSuffix, "/") != 0 {
+                       return fmt.Errorf("Expected bind to be of the format '%v/*' but found: %v", runner.Container.OutputPath, bind)
+               }
+
+               jsondata, err := json.Marshal(mnt.Content)
+               if err != nil {
+                       return fmt.Errorf("While marshal of mount content: %v", err)
+               }
+               var content map[string]interface{}
+               err = json.Unmarshal(jsondata, &content)
+               if err != nil {
+                       return fmt.Errorf("While unmarshal of mount content: %v", err)
+               }
+
+               if content["exclude_from_output"] == true {
+                       continue
+               }
+
+               idx := strings.Index(mnt.PortableDataHash, "/")
+               if idx > 0 {
+                       mnt.Path = mnt.PortableDataHash[idx:]
+                       mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
+               }
+
+               // append to manifest_text
+               m, err := runner.getCollectionManifestForPath(mnt, bindSuffix)
+               if err != nil {
+                       return err
+               }
+
+               manifestText = manifestText + m
+       }
+
+       // Save output
+       var response arvados.Collection
        err = runner.ArvClient.Create("collections",
                arvadosclient.Dict{
                        "collection": arvadosclient.Dict{
+                               "trash_at":      time.Now().Add(runner.trashLifetime).Format(time.RFC3339),
+                               "name":          "output for " + runner.Container.UUID,
                                "manifest_text": manifestText}},
                &response)
        if err != nil {
                return fmt.Errorf("While creating output collection: %v", err)
        }
+       runner.OutputPDH = &response.PortableDataHash
+       return nil
+}
+
+// Fetch the collection for the mnt.PortableDataHash
+// Return the manifest_text fragment corresponding to the specified mnt.Path
+//  after making any required updates.
+//  Ex:
+//    If mnt.Path is not specified,
+//      return the entire manifest_text after replacing any "." with bindSuffix
+//    If mnt.Path corresponds to one stream,
+//      return the manifest_text for that stream after replacing that stream name with bindSuffix
+//    Otherwise, check if a filename in any one stream is being sought. Return the manifest_text
+//      for that stream after replacing stream name with bindSuffix minus the last word
+//      and the file name with last word of the bindSuffix
+//  Allowed path examples:
+//    "path":"/"
+//    "path":"/subdir1"
+//    "path":"/subdir1/subdir2"
+//    "path":"/subdir/filename" etc
+func (runner *ContainerRunner) getCollectionManifestForPath(mnt arvados.Mount, bindSuffix string) (string, error) {
+       var collection arvados.Collection
+       err := runner.ArvClient.Get("collections", mnt.PortableDataHash, nil, &collection)
+       if err != nil {
+               return "", fmt.Errorf("While getting collection for %v: %v", mnt.PortableDataHash, err)
+       }
 
-       runner.OutputPDH = new(string)
-       *runner.OutputPDH = response.PortableDataHash
+       manifestText := ""
+       if mnt.Path == "" || mnt.Path == "/" {
+               // no path specified; return the entire manifest text
+               manifestText = collection.ManifestText
+               manifestText = strings.Replace(manifestText, "./", "."+bindSuffix+"/", -1)
+               manifestText = strings.Replace(manifestText, ". ", "."+bindSuffix+" ", -1)
+       } else {
+               // either a single stream or file from a stream is being sought
+               bindIdx := strings.LastIndex(bindSuffix, "/")
+               var bindSubdir, bindFileName string
+               if bindIdx >= 0 {
+                       bindSubdir = "." + bindSuffix[0:bindIdx]
+                       bindFileName = bindSuffix[bindIdx+1:]
+               }
+               mntPath := mnt.Path
+               if strings.HasSuffix(mntPath, "/") {
+                       mntPath = mntPath[0 : len(mntPath)-1]
+               }
+               pathIdx := strings.LastIndex(mntPath, "/")
+               var pathSubdir, pathFileName string
+               if pathIdx >= 0 {
+                       pathSubdir = "." + mntPath[0:pathIdx]
+                       pathFileName = mntPath[pathIdx+1:]
+               }
+               streams := strings.Split(collection.ManifestText, "\n")
+               for _, stream := range streams {
+                       tokens := strings.Split(stream, " ")
+                       if tokens[0] == "."+mntPath {
+                               // path refers to this complete stream
+                               adjustedStream := strings.Replace(stream, "."+mntPath, "."+bindSuffix, -1)
+                               manifestText = adjustedStream + "\n"
+                               break
+                       } else {
+                               // look for a matching file in this stream
+                               if tokens[0] == pathSubdir {
+                                       // path refers to a file in this stream
+                                       for _, token := range tokens {
+                                               if strings.Index(token, ":"+pathFileName) > 0 {
+                                                       // found the file in the stream; discard all other file tokens
+                                                       for _, t := range tokens {
+                                                               if strings.Index(t, ":") == -1 {
+                                                                       manifestText += (" " + t)
+                                                               } else {
+                                                                       break // done reading all non-file tokens
+                                                               }
+                                                       }
+                                                       manifestText = strings.Trim(manifestText, " ")
+                                                       token = strings.Replace(token, ":"+pathFileName, ":"+bindFileName, -1)
+                                                       manifestText += (" " + token + "\n")
+                                                       manifestText = strings.Replace(manifestText, pathSubdir, bindSubdir, -1)
+                                                       break
+                                               }
+                                       }
+                               }
+                       }
+               }
+       }
 
-       return nil
+       return manifestText, nil
+}
+
+func (runner *ContainerRunner) loadDiscoveryVars() {
+       tl, err := runner.ArvClient.Discovery("defaultTrashLifetime")
+       if err != nil {
+               log.Fatalf("getting defaultTrashLifetime from discovery document: %s", err)
+       }
+       runner.trashLifetime = time.Duration(tl.(float64)) * time.Second
 }
 
 func (runner *ContainerRunner) CleanupDirs() {
@@ -599,19 +866,18 @@ func (runner *ContainerRunner) CommitLogs() error {
                return fmt.Errorf("While creating log manifest: %v", err)
        }
 
-       var response CollectionRecord
+       var response arvados.Collection
        err = runner.ArvClient.Create("collections",
                arvadosclient.Dict{
                        "collection": arvadosclient.Dict{
+                               "trash_at":      time.Now().Add(runner.trashLifetime).Format(time.RFC3339),
                                "name":          "logs for " + runner.Container.UUID,
                                "manifest_text": mt}},
                &response)
        if err != nil {
                return fmt.Errorf("While creating log collection: %v", err)
        }
-
        runner.LogsPDH = &response.PortableDataHash
-
        return nil
 }
 
@@ -647,10 +913,10 @@ func (runner *ContainerRunner) ContainerToken() (string, error) {
 func (runner *ContainerRunner) UpdateContainerFinal() error {
        update := arvadosclient.Dict{}
        update["state"] = runner.finalState
+       if runner.LogsPDH != nil {
+               update["log"] = *runner.LogsPDH
+       }
        if runner.finalState == "Complete" {
-               if runner.LogsPDH != nil {
-                       update["log"] = *runner.LogsPDH
-               }
                if runner.ExitCode != nil {
                        update["exit_code"] = *runner.ExitCode
                }
@@ -710,6 +976,7 @@ func (runner *ContainerRunner) Run() (err error) {
                checkErr(err)
 
                if runner.finalState == "Queued" {
+                       runner.CrunchLog.Close()
                        runner.UpdateContainerFinal()
                        return
                }
@@ -742,6 +1009,7 @@ func (runner *ContainerRunner) Run() (err error) {
        // check for and/or load image
        err = runner.LoadImage()
        if err != nil {
+               runner.finalState = "Cancelled"
                err = fmt.Errorf("While loading container image: %v", err)
                return
        }
@@ -749,6 +1017,7 @@ func (runner *ContainerRunner) Run() (err error) {
        // set up FUSE mount and binds
        err = runner.SetupMounts()
        if err != nil {
+               runner.finalState = "Cancelled"
                err = fmt.Errorf("While setting up mounts: %v", err)
                return
        }
@@ -758,6 +1027,8 @@ func (runner *ContainerRunner) Run() (err error) {
                return
        }
 
+       runner.StartCrunchstat()
+
        if runner.IsCancelled() {
                return
        }
@@ -794,14 +1065,24 @@ func NewContainerRunner(api IArvadosClient,
        cr.Container.UUID = containerUUID
        cr.CrunchLog = NewThrottledLogger(cr.NewLogWriter("crunch-run"))
        cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
+       cr.loadDiscoveryVars()
        return cr
 }
 
 func main() {
+       statInterval := flag.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
+       cgroupRoot := flag.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
+       cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
+       cgroupParentSubsystem := flag.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
+       caCertsPath := flag.String("ca-certs", "", "Path to TLS root certificates")
        flag.Parse()
 
        containerId := flag.Arg(0)
 
+       if *caCertsPath != "" {
+               arvadosclient.CertFiles = []string{*caCertsPath}
+       }
+
        api, err := arvadosclient.MakeArvadosClient()
        if err != nil {
                log.Fatalf("%s: %v", containerId, err)
@@ -809,7 +1090,7 @@ func main() {
        api.Retries = 8
 
        var kc *keepclient.KeepClient
-       kc, err = keepclient.MakeKeepClient(&api)
+       kc, err = keepclient.MakeKeepClient(api)
        if err != nil {
                log.Fatalf("%s: %v", containerId, err)
        }
@@ -822,6 +1103,14 @@ func main() {
        }
 
        cr := NewContainerRunner(api, kc, docker, containerId)
+       cr.statInterval = *statInterval
+       cr.cgroupRoot = *cgroupRoot
+       cr.expectCgroupParent = *cgroupParent
+       if *cgroupParentSubsystem != "" {
+               p := findCgroup(*cgroupParentSubsystem)
+               cr.setCgroupParent = p
+               cr.expectCgroupParent = p
+       }
 
        err = cr.Run()
        if err != nil {