X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/19ad5dbdf1dc18d46f7fad9ca30b69126b224c96..a54e88868ac259443e2cd8d5f6fddb4b8154acb9:/services/crunch-run/crunchrun.go diff --git a/services/crunch-run/crunchrun.go b/services/crunch-run/crunchrun.go index c4ea92938a..e0d707a5a5 100644 --- a/services/crunch-run/crunchrun.go +++ b/services/crunch-run/crunchrun.go @@ -5,11 +5,6 @@ import ( "errors" "flag" "fmt" - "git.curoverse.com/arvados.git/sdk/go/arvados" - "git.curoverse.com/arvados.git/sdk/go/arvadosclient" - "git.curoverse.com/arvados.git/sdk/go/keepclient" - "git.curoverse.com/arvados.git/sdk/go/manifest" - "github.com/curoverse/dockerclient" "io" "io/ioutil" "log" @@ -17,18 +12,29 @@ import ( "os/exec" "os/signal" "path" + "path/filepath" + "sort" "strings" "sync" "syscall" "time" + + "git.curoverse.com/arvados.git/lib/crunchstat" + "git.curoverse.com/arvados.git/sdk/go/arvados" + "git.curoverse.com/arvados.git/sdk/go/arvadosclient" + "git.curoverse.com/arvados.git/sdk/go/keepclient" + "git.curoverse.com/arvados.git/sdk/go/manifest" + "github.com/curoverse/dockerclient" ) // IArvadosClient is the minimal Arvados API methods used by crunch-run. type IArvadosClient interface { Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error - Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) (err error) - Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) (err error) + Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error + Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error + CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error) + Discovery(key string) (interface{}, error) } // ErrCancelled is the error returned when the container is cancelled. @@ -37,13 +43,7 @@ var ErrCancelled = errors.New("Cancelled") // IKeepClient is the minimal Keep API methods used by crunch-run. type IKeepClient interface { PutHB(hash string, buf []byte) (string, int, error) - ManifestFileReader(m manifest.Manifest, filename string) (keepclient.ReadCloserWithLen, error) -} - -// Collection record returned by the API server. -type CollectionRecord struct { - ManifestText string `json:"manifest_text"` - PortableDataHash string `json:"portable_data_hash"` + ManifestFileReader(m manifest.Manifest, filename string) (keepclient.Reader, error) } // NewLogWriter is a factory function to create a new log writer. @@ -92,11 +92,31 @@ type ContainerRunner struct { CleanupTempDir []string Binds []string OutputPDH *string - CancelLock sync.Mutex - Cancelled bool SigChan chan os.Signal ArvMountExit chan error finalState string + + statLogger io.WriteCloser + statReporter *crunchstat.Reporter + statInterval time.Duration + cgroupRoot string + // What we expect the container's cgroup parent to be. + expectCgroupParent string + // What we tell docker to use as the container's cgroup + // parent. Note: Ideally we would use the same field for both + // expectCgroupParent and setCgroupParent, and just make it + // default to "docker". However, when using docker < 1.10 with + // systemd, specifying a non-empty cgroup parent (even the + // default value "docker") hits a docker bug + // (https://github.com/docker/docker/issues/17126). Using two + // separate fields makes it possible to use the "expect cgroup + // parent to be X" feature even on sites where the "specify + // cgroup parent" feature breaks. + setCgroupParent string + + cStateLock sync.Mutex + cStarted bool // StartContainer() succeeded + cCancelled bool // StopContainer() invoked } // SetupSignals sets up signal handling to gracefully terminate the underlying @@ -107,20 +127,29 @@ func (runner *ContainerRunner) SetupSignals() { signal.Notify(runner.SigChan, syscall.SIGINT) signal.Notify(runner.SigChan, syscall.SIGQUIT) - go func(sig <-chan os.Signal) { - for _ = range sig { - if !runner.Cancelled { - runner.CancelLock.Lock() - runner.Cancelled = true - if runner.ContainerID != "" { - runner.Docker.StopContainer(runner.ContainerID, 10) - } - runner.CancelLock.Unlock() - } - } + go func(sig chan os.Signal) { + <-sig + runner.stop() + signal.Stop(sig) }(runner.SigChan) } +// stop the underlying Docker container. +func (runner *ContainerRunner) stop() { + runner.cStateLock.Lock() + defer runner.cStateLock.Unlock() + if runner.cCancelled { + return + } + runner.cCancelled = true + if runner.cStarted { + err := runner.Docker.StopContainer(runner.ContainerID, 10) + if err != nil { + log.Printf("StopContainer failed: %s", err) + } + } +} + // LoadImage determines the docker image id from the container record and // checks if it is available in the local Docker image store. If not, it loads // the image from Keep. @@ -128,7 +157,7 @@ func (runner *ContainerRunner) LoadImage() (err error) { runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage) - var collection CollectionRecord + var collection arvados.Collection err = runner.ArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection) if err != nil { return fmt.Errorf("While getting container image collection: %v", err) @@ -223,8 +252,15 @@ func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) ( return c, nil } +func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) { + if runner.ArvMountPoint == "" { + runner.ArvMountPoint, err = runner.MkTempDir("", prefix) + } + return +} + func (runner *ContainerRunner) SetupMounts() (err error) { - runner.ArvMountPoint, err = runner.MkTempDir("", "keep") + err = runner.SetupArvMountPoint("keep") if err != nil { return fmt.Errorf("While creating keep mount temp dir: %v", err) } @@ -234,10 +270,23 @@ func (runner *ContainerRunner) SetupMounts() (err error) { pdhOnly := true tmpcount := 0 arvMountCmd := []string{"--foreground", "--allow-other", "--read-write"} + + if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 { + arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM)) + } + collectionPaths := []string{} runner.Binds = nil + needCertMount := true - for bind, mnt := range runner.Container.Mounts { + var binds []string + for bind, _ := range runner.Container.Mounts { + binds = append(binds, bind) + } + sort.Strings(binds) + + for _, bind := range binds { + mnt := runner.Container.Mounts[bind] if bind == "stdout" { // Is it a "file" mount kind? if mnt.Kind != "file" { @@ -254,7 +303,18 @@ func (runner *ContainerRunner) SetupMounts() (err error) { } } - if mnt.Kind == "collection" { + if bind == "/etc/arvados/ca-certificates.crt" { + needCertMount = false + } + + if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" { + if mnt.Kind != "collection" { + return fmt.Errorf("Only mount points of kind 'collection' are supported underneath the output_path: %v", bind) + } + } + + switch { + case mnt.Kind == "collection": var src string if mnt.UUID != "" && mnt.PortableDataHash != "" { return fmt.Errorf("Cannot specify both 'uuid' and 'portable_data_hash' for a collection mount") @@ -269,7 +329,21 @@ func (runner *ContainerRunner) SetupMounts() (err error) { if mnt.Writable { return fmt.Errorf("Can never write to a collection specified by portable data hash") } + idx := strings.Index(mnt.PortableDataHash, "/") + if idx > 0 { + mnt.Path = path.Clean(mnt.PortableDataHash[idx:]) + mnt.PortableDataHash = mnt.PortableDataHash[0:idx] + runner.Container.Mounts[bind] = mnt + } src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash) + if mnt.Path != "" && mnt.Path != "." { + if strings.HasPrefix(mnt.Path, "./") { + mnt.Path = mnt.Path[2:] + } else if strings.HasPrefix(mnt.Path, "/") { + mnt.Path = mnt.Path[1:] + } + src += "/" + mnt.Path + } } else { src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount) arvMountCmd = append(arvMountCmd, "--mount-tmp") @@ -279,31 +353,55 @@ func (runner *ContainerRunner) SetupMounts() (err error) { if mnt.Writable { if bind == runner.Container.OutputPath { runner.HostOutputDir = src + } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") { + return fmt.Errorf("Writable mount points are not permitted underneath the output_path: %v", bind) } runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind)) } else { runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind)) } collectionPaths = append(collectionPaths, src) - } else if mnt.Kind == "tmp" { - if bind == runner.Container.OutputPath { - runner.HostOutputDir, err = runner.MkTempDir("", "") - if err != nil { - return fmt.Errorf("While creating mount temp dir: %v", err) - } - st, staterr := os.Stat(runner.HostOutputDir) - if staterr != nil { - return fmt.Errorf("While Stat on temp dir: %v", staterr) - } - err = os.Chmod(runner.HostOutputDir, st.Mode()|os.ModeSetgid|0777) - if staterr != nil { - return fmt.Errorf("While Chmod temp dir: %v", err) - } - runner.CleanupTempDir = append(runner.CleanupTempDir, runner.HostOutputDir) - runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", runner.HostOutputDir, bind)) - } else { - runner.Binds = append(runner.Binds, bind) + + case mnt.Kind == "tmp" && bind == runner.Container.OutputPath: + runner.HostOutputDir, err = runner.MkTempDir("", "") + if err != nil { + return fmt.Errorf("While creating mount temp dir: %v", err) + } + st, staterr := os.Stat(runner.HostOutputDir) + if staterr != nil { + return fmt.Errorf("While Stat on temp dir: %v", staterr) + } + err = os.Chmod(runner.HostOutputDir, st.Mode()|os.ModeSetgid|0777) + if staterr != nil { + return fmt.Errorf("While Chmod temp dir: %v", err) } + runner.CleanupTempDir = append(runner.CleanupTempDir, runner.HostOutputDir) + runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", runner.HostOutputDir, bind)) + + case mnt.Kind == "tmp": + runner.Binds = append(runner.Binds, bind) + + case mnt.Kind == "json": + jsondata, err := json.Marshal(mnt.Content) + if err != nil { + return fmt.Errorf("encoding json data: %v", err) + } + // Create a tempdir with a single file + // (instead of just a tempfile): this way we + // can ensure the file is world-readable + // inside the container, without having to + // make it world-readable on the docker host. + tmpdir, err := runner.MkTempDir("", "") + if err != nil { + return fmt.Errorf("creating temp dir: %v", err) + } + runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir) + tmpfn := filepath.Join(tmpdir, "mountdata.json") + err = ioutil.WriteFile(tmpfn, jsondata, 0644) + if err != nil { + return fmt.Errorf("writing temp file: %v", err) + } + runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind)) } } @@ -311,6 +409,16 @@ func (runner *ContainerRunner) SetupMounts() (err error) { return fmt.Errorf("Output path does not correspond to a writable mount point") } + if wantAPI := runner.Container.RuntimeConstraints.API; needCertMount && wantAPI != nil && *wantAPI { + for _, certfile := range arvadosclient.CertFiles { + _, err := os.Stat(certfile) + if err == nil { + runner.Binds = append(runner.Binds, fmt.Sprintf("%s:/etc/arvados/ca-certificates.crt:ro", certfile)) + break + } + } + } + if pdhOnly { arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id") } else { @@ -372,6 +480,14 @@ func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) { runner.CrunchLog.Printf("While closing stderr logs: %v", closeerr) } + if runner.statReporter != nil { + runner.statReporter.Stop() + closeerr = runner.statLogger.Close() + if closeerr != nil { + runner.CrunchLog.Printf("While closing crunchstat logs: %v", closeerr) + } + } + runner.loggingDone <- true close(runner.loggingDone) return @@ -379,6 +495,117 @@ func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) { } } +func (runner *ContainerRunner) StartCrunchstat() { + runner.statLogger = NewThrottledLogger(runner.NewLogWriter("crunchstat")) + runner.statReporter = &crunchstat.Reporter{ + CID: runner.ContainerID, + Logger: log.New(runner.statLogger, "", 0), + CgroupParent: runner.expectCgroupParent, + CgroupRoot: runner.cgroupRoot, + PollPeriod: runner.statInterval, + } + runner.statReporter.Start() +} + +type infoCommand struct { + label string + cmd []string +} + +// Gather node information and store it on the log for debugging +// purposes. +func (runner *ContainerRunner) LogNodeInfo() (err error) { + w := runner.NewLogWriter("node-info") + logger := log.New(w, "node-info", 0) + + commands := []infoCommand{ + infoCommand{ + label: "Host Information", + cmd: []string{"uname", "-a"}, + }, + infoCommand{ + label: "CPU Information", + cmd: []string{"cat", "/proc/cpuinfo"}, + }, + infoCommand{ + label: "Memory Information", + cmd: []string{"cat", "/proc/meminfo"}, + }, + infoCommand{ + label: "Disk Space", + cmd: []string{"df", "-m", "/"}, + }, + infoCommand{ + label: "Disk Space", + cmd: []string{"df", "-m", os.TempDir()}, + }, + infoCommand{ + label: "Disk INodes", + cmd: []string{"df", "-i", "/"}, + }, + infoCommand{ + label: "Disk INodes", + cmd: []string{"df", "-i", os.TempDir()}, + }, + } + + // Run commands with informational output to be logged. + var out []byte + for _, command := range commands { + out, err = exec.Command(command.cmd[0], command.cmd[1:]...).CombinedOutput() + if err != nil { + return fmt.Errorf("While running command %q: %v", + command.cmd, err) + } + logger.Println(command.label) + for _, line := range strings.Split(string(out), "\n") { + logger.Println(" ", line) + } + } + + err = w.Close() + if err != nil { + return fmt.Errorf("While closing node-info logs: %v", err) + } + return nil +} + +// Get and save the raw JSON container record from the API server +func (runner *ContainerRunner) LogContainerRecord() (err error) { + w := &ArvLogWriter{ + runner.ArvClient, + runner.Container.UUID, + "container", + runner.LogCollection.Open("container.json"), + } + // Get Container record JSON from the API Server + reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil) + if err != nil { + return fmt.Errorf("While retrieving container record from the API server: %v", err) + } + // Read the API server response as []byte + json_bytes, err := ioutil.ReadAll(reader) + if err != nil { + return fmt.Errorf("While reading container record API server response: %v", err) + } + // Decode the JSON []byte + var cr map[string]interface{} + if err = json.Unmarshal(json_bytes, &cr); err != nil { + return fmt.Errorf("While decoding the container record JSON response: %v", err) + } + // Re-encode it using indentation to improve readability + enc := json.NewEncoder(w) + enc.SetIndent("", " ") + if err = enc.Encode(cr); err != nil { + return fmt.Errorf("While logging the JSON container record: %v", err) + } + err = w.Close() + if err != nil { + return fmt.Errorf("While closing container.json log: %v", err) + } + return nil +} + // AttachLogs connects the docker container stdout and stderr logs to the // Arvados logger which logs to Keep and the API server logs table. func (runner *ContainerRunner) AttachStreams() (err error) { @@ -459,8 +686,13 @@ func (runner *ContainerRunner) CreateContainer() error { return fmt.Errorf("While creating container: %v", err) } - runner.HostConfig = dockerclient.HostConfig{Binds: runner.Binds, - LogConfig: dockerclient.LogConfig{Type: "none"}} + runner.HostConfig = dockerclient.HostConfig{ + Binds: runner.Binds, + CgroupParent: runner.setCgroupParent, + LogConfig: dockerclient.LogConfig{ + Type: "none", + }, + } return runner.AttachStreams() } @@ -468,10 +700,16 @@ func (runner *ContainerRunner) CreateContainer() error { // StartContainer starts the docker container created by CreateContainer. func (runner *ContainerRunner) StartContainer() error { runner.CrunchLog.Printf("Starting Docker container id '%s'", runner.ContainerID) + runner.cStateLock.Lock() + defer runner.cStateLock.Unlock() + if runner.cCancelled { + return ErrCancelled + } err := runner.Docker.StartContainer(runner.ContainerID, &runner.HostConfig) if err != nil { return fmt.Errorf("could not start container: %v", err) } + runner.cStarted = true return nil } @@ -480,12 +718,22 @@ func (runner *ContainerRunner) StartContainer() error { func (runner *ContainerRunner) WaitFinish() error { runner.CrunchLog.Print("Waiting for container to finish") - result := runner.Docker.Wait(runner.ContainerID) - wr := <-result - if wr.Error != nil { - return fmt.Errorf("While waiting for container to finish: %v", wr.Error) + waitDocker := runner.Docker.Wait(runner.ContainerID) + waitMount := runner.ArvMountExit + for waitDocker != nil { + select { + case err := <-waitMount: + runner.CrunchLog.Printf("arv-mount exited before container finished: %v", err) + waitMount = nil + runner.stop() + case wr := <-waitDocker: + if wr.Error != nil { + return fmt.Errorf("While waiting for container to finish: %v", wr.Error) + } + runner.ExitCode = &wr.ExitCode + waitDocker = nil + } } - runner.ExitCode = &wr.ExitCode // wait for stdout/stderr to complete <-runner.loggingDone @@ -499,6 +747,21 @@ func (runner *ContainerRunner) CaptureOutput() error { return nil } + if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI { + // Output may have been set directly by the container, so + // refresh the container record to check. + err := runner.ArvClient.Get("containers", runner.Container.UUID, + nil, &runner.Container) + if err != nil { + return err + } + if runner.Container.Output != "" { + // Container output is already set. + runner.OutputPDH = &runner.Container.Output + return nil + } + } + if runner.HostOutputDir == "" { return nil } @@ -514,7 +777,7 @@ func (runner *ContainerRunner) CaptureOutput() error { _, err = os.Stat(collectionMetafile) if err != nil { // Regular directory - cw := CollectionWriter{runner.Kc, nil, sync.Mutex{}} + cw := CollectionWriter{0, runner.Kc, nil, nil, sync.Mutex{}} manifestText, err = cw.WriteTree(runner.HostOutputDir, runner.CrunchLog.Logger) if err != nil { return fmt.Errorf("While uploading output files: %v", err) @@ -527,7 +790,7 @@ func (runner *ContainerRunner) CaptureOutput() error { } defer file.Close() - rec := CollectionRecord{} + var rec arvados.Collection err = json.NewDecoder(file).Decode(&rec) if err != nil { return fmt.Errorf("While reading FUSE metafile: %v", err) @@ -535,20 +798,94 @@ func (runner *ContainerRunner) CaptureOutput() error { manifestText = rec.ManifestText } - var response CollectionRecord + // Pre-populate output from the configured mount points + var binds []string + for bind, _ := range runner.Container.Mounts { + binds = append(binds, bind) + } + sort.Strings(binds) + + for _, bind := range binds { + mnt := runner.Container.Mounts[bind] + + bindSuffix := strings.TrimPrefix(bind, runner.Container.OutputPath) + + if bindSuffix == bind || len(bindSuffix) <= 0 { + // either does not start with OutputPath or is OutputPath itself + continue + } + + if mnt.ExcludeFromOutput == true { + continue + } + + // append to manifest_text + m, err := runner.getCollectionManifestForPath(mnt, bindSuffix) + if err != nil { + return err + } + + manifestText = manifestText + m + } + + // Save output + var response arvados.Collection + manifest := manifest.Manifest{Text: manifestText} + manifestText = manifest.Extract(".", ".").Text err = runner.ArvClient.Create("collections", arvadosclient.Dict{ + "ensure_unique_name": true, "collection": arvadosclient.Dict{ + "is_trashed": true, + "name": "output for " + runner.Container.UUID, "manifest_text": manifestText}}, &response) if err != nil { return fmt.Errorf("While creating output collection: %v", err) } + runner.OutputPDH = &response.PortableDataHash + return nil +} - runner.OutputPDH = new(string) - *runner.OutputPDH = response.PortableDataHash +var outputCollections = make(map[string]arvados.Collection) + +// Fetch the collection for the mnt.PortableDataHash +// Return the manifest_text fragment corresponding to the specified mnt.Path +// after making any required updates. +// Ex: +// If mnt.Path is not specified, +// return the entire manifest_text after replacing any "." with bindSuffix +// If mnt.Path corresponds to one stream, +// return the manifest_text for that stream after replacing that stream name with bindSuffix +// Otherwise, check if a filename in any one stream is being sought. Return the manifest_text +// for that stream after replacing stream name with bindSuffix minus the last word +// and the file name with last word of the bindSuffix +// Allowed path examples: +// "path":"/" +// "path":"/subdir1" +// "path":"/subdir1/subdir2" +// "path":"/subdir/filename" etc +func (runner *ContainerRunner) getCollectionManifestForPath(mnt arvados.Mount, bindSuffix string) (string, error) { + collection := outputCollections[mnt.PortableDataHash] + if collection.PortableDataHash == "" { + err := runner.ArvClient.Get("collections", mnt.PortableDataHash, nil, &collection) + if err != nil { + return "", fmt.Errorf("While getting collection for %v: %v", mnt.PortableDataHash, err) + } + outputCollections[mnt.PortableDataHash] = collection + } - return nil + if collection.ManifestText == "" { + runner.CrunchLog.Printf("No manifest text for collection %v", collection.PortableDataHash) + return "", nil + } + + mft := manifest.Manifest{Text: collection.ManifestText} + extracted := mft.Extract(mnt.Path, bindSuffix) + if extracted.Err != nil { + return "", fmt.Errorf("Error parsing manifest for %v: %v", mnt.PortableDataHash, extracted.Err.Error()) + } + return extracted.Text, nil } func (runner *ContainerRunner) CleanupDirs() { @@ -599,27 +936,27 @@ func (runner *ContainerRunner) CommitLogs() error { return fmt.Errorf("While creating log manifest: %v", err) } - var response CollectionRecord + var response arvados.Collection err = runner.ArvClient.Create("collections", arvadosclient.Dict{ + "ensure_unique_name": true, "collection": arvadosclient.Dict{ + "is_trashed": true, "name": "logs for " + runner.Container.UUID, "manifest_text": mt}}, &response) if err != nil { return fmt.Errorf("While creating log collection: %v", err) } - runner.LogsPDH = &response.PortableDataHash - return nil } // UpdateContainerRunning updates the container state to "Running" func (runner *ContainerRunner) UpdateContainerRunning() error { - runner.CancelLock.Lock() - defer runner.CancelLock.Unlock() - if runner.Cancelled { + runner.cStateLock.Lock() + defer runner.cStateLock.Unlock() + if runner.cCancelled { return ErrCancelled } return runner.ArvClient.Update("containers", runner.Container.UUID, @@ -647,10 +984,10 @@ func (runner *ContainerRunner) ContainerToken() (string, error) { func (runner *ContainerRunner) UpdateContainerFinal() error { update := arvadosclient.Dict{} update["state"] = runner.finalState + if runner.LogsPDH != nil { + update["log"] = *runner.LogsPDH + } if runner.finalState == "Complete" { - if runner.LogsPDH != nil { - update["log"] = *runner.LogsPDH - } if runner.ExitCode != nil { update["exit_code"] = *runner.ExitCode } @@ -663,9 +1000,9 @@ func (runner *ContainerRunner) UpdateContainerFinal() error { // IsCancelled returns the value of Cancelled, with goroutine safety. func (runner *ContainerRunner) IsCancelled() bool { - runner.CancelLock.Lock() - defer runner.CancelLock.Unlock() - return runner.Cancelled + runner.cStateLock.Lock() + defer runner.cStateLock.Unlock() + return runner.cCancelled } // NewArvLogWriter creates an ArvLogWriter @@ -710,6 +1047,7 @@ func (runner *ContainerRunner) Run() (err error) { checkErr(err) if runner.finalState == "Queued" { + runner.CrunchLog.Close() runner.UpdateContainerFinal() return } @@ -742,6 +1080,7 @@ func (runner *ContainerRunner) Run() (err error) { // check for and/or load image err = runner.LoadImage() if err != nil { + runner.finalState = "Cancelled" err = fmt.Errorf("While loading container image: %v", err) return } @@ -749,6 +1088,7 @@ func (runner *ContainerRunner) Run() (err error) { // set up FUSE mount and binds err = runner.SetupMounts() if err != nil { + runner.finalState = "Cancelled" err = fmt.Errorf("While setting up mounts: %v", err) return } @@ -758,6 +1098,19 @@ func (runner *ContainerRunner) Run() (err error) { return } + // Gather and record node information + err = runner.LogNodeInfo() + if err != nil { + return + } + // Save container.json record on log collection + err = runner.LogContainerRecord() + if err != nil { + return + } + + runner.StartCrunchstat() + if runner.IsCancelled() { return } @@ -790,7 +1143,7 @@ func NewContainerRunner(api IArvadosClient, cr.NewLogWriter = cr.NewArvLogWriter cr.RunArvMount = cr.ArvMountCmd cr.MkTempDir = ioutil.TempDir - cr.LogCollection = &CollectionWriter{kc, nil, sync.Mutex{}} + cr.LogCollection = &CollectionWriter{0, kc, nil, nil, sync.Mutex{}} cr.Container.UUID = containerUUID cr.CrunchLog = NewThrottledLogger(cr.NewLogWriter("crunch-run")) cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0) @@ -798,10 +1151,19 @@ func NewContainerRunner(api IArvadosClient, } func main() { + statInterval := flag.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting") + cgroupRoot := flag.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree") + cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)") + cgroupParentSubsystem := flag.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container") + caCertsPath := flag.String("ca-certs", "", "Path to TLS root certificates") flag.Parse() containerId := flag.Arg(0) + if *caCertsPath != "" { + arvadosclient.CertFiles = []string{*caCertsPath} + } + api, err := arvadosclient.MakeArvadosClient() if err != nil { log.Fatalf("%s: %v", containerId, err) @@ -809,7 +1171,7 @@ func main() { api.Retries = 8 var kc *keepclient.KeepClient - kc, err = keepclient.MakeKeepClient(&api) + kc, err = keepclient.MakeKeepClient(api) if err != nil { log.Fatalf("%s: %v", containerId, err) } @@ -822,6 +1184,14 @@ func main() { } cr := NewContainerRunner(api, kc, docker, containerId) + cr.statInterval = *statInterval + cr.cgroupRoot = *cgroupRoot + cr.expectCgroupParent = *cgroupParent + if *cgroupParentSubsystem != "" { + p := findCgroup(*cgroupParentSubsystem) + cr.setCgroupParent = p + cr.expectCgroupParent = p + } err = cr.Run() if err != nil {