"os/exec"
"os/signal"
"path"
+ "path/filepath"
+ "sort"
"strings"
"sync"
"syscall"
type IArvadosClient interface {
Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
- Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) (err error)
- Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) (err error)
+ Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
+ Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
+ Discovery(key string) (interface{}, error)
}
// ErrCancelled is the error returned when the container is cancelled.
// IKeepClient is the minimal Keep API methods used by crunch-run.
type IKeepClient interface {
PutHB(hash string, buf []byte) (string, int, error)
- ManifestFileReader(m manifest.Manifest, filename string) (keepclient.ReadCloserWithLen, error)
+ ManifestFileReader(m manifest.Manifest, filename string) (keepclient.Reader, error)
}
// NewLogWriter is a factory function to create a new log writer.
SigChan chan os.Signal
ArvMountExit chan error
finalState string
+ trashLifetime time.Duration
statLogger io.WriteCloser
statReporter *crunchstat.Reporter
statInterval time.Duration
cgroupRoot string
- cgroupParent string
+ // What we expect the container's cgroup parent to be.
+ expectCgroupParent string
+ // What we tell docker to use as the container's cgroup
+ // parent. Note: Ideally we would use the same field for both
+ // expectCgroupParent and setCgroupParent, and just make it
+ // default to "docker". However, when using docker < 1.10 with
+ // systemd, specifying a non-empty cgroup parent (even the
+ // default value "docker") hits a docker bug
+ // (https://github.com/docker/docker/issues/17126). Using two
+ // separate fields makes it possible to use the "expect cgroup
+ // parent to be X" feature even on sites where the "specify
+ // cgroup parent" feature breaks.
+ setCgroupParent string
}
// SetupSignals sets up signal handling to gracefully terminate the underlying
return c, nil
}
+var tmpBackedOutputDir = false
+
+func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
+ if runner.ArvMountPoint == "" {
+ runner.ArvMountPoint, err = runner.MkTempDir("", prefix)
+ }
+ return
+}
+
func (runner *ContainerRunner) SetupMounts() (err error) {
- runner.ArvMountPoint, err = runner.MkTempDir("", "keep")
+ err = runner.SetupArvMountPoint("keep")
if err != nil {
return fmt.Errorf("While creating keep mount temp dir: %v", err)
}
pdhOnly := true
tmpcount := 0
arvMountCmd := []string{"--foreground", "--allow-other", "--read-write"}
+
+ if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
+ arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
+ }
+
collectionPaths := []string{}
runner.Binds = nil
+ needCertMount := true
+
+ var binds []string
+ for bind, _ := range runner.Container.Mounts {
+ binds = append(binds, bind)
+ }
+ sort.Strings(binds)
- for bind, mnt := range runner.Container.Mounts {
+ for _, bind := range binds {
+ mnt := runner.Container.Mounts[bind]
if bind == "stdout" {
// Is it a "file" mount kind?
if mnt.Kind != "file" {
}
}
- if mnt.Kind == "collection" {
+ if bind == "/etc/arvados/ca-certificates.crt" {
+ needCertMount = false
+ }
+
+ if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
+ if mnt.Kind != "collection" {
+ return fmt.Errorf("Only mount points of kind 'collection' are supported underneath the output_path: %v", bind)
+ }
+ }
+
+ switch {
+ case mnt.Kind == "collection":
var src string
if mnt.UUID != "" && mnt.PortableDataHash != "" {
return fmt.Errorf("Cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
if mnt.Writable {
if bind == runner.Container.OutputPath {
runner.HostOutputDir = src
+ } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
+ return fmt.Errorf("Writable mount points are not permitted underneath the output_path: %v", bind)
}
runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
} else {
runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind))
}
collectionPaths = append(collectionPaths, src)
- } else if mnt.Kind == "tmp" {
- if bind == runner.Container.OutputPath {
- runner.HostOutputDir, err = runner.MkTempDir("", "")
- if err != nil {
- return fmt.Errorf("While creating mount temp dir: %v", err)
- }
- st, staterr := os.Stat(runner.HostOutputDir)
- if staterr != nil {
- return fmt.Errorf("While Stat on temp dir: %v", staterr)
- }
- err = os.Chmod(runner.HostOutputDir, st.Mode()|os.ModeSetgid|0777)
- if staterr != nil {
- return fmt.Errorf("While Chmod temp dir: %v", err)
- }
- runner.CleanupTempDir = append(runner.CleanupTempDir, runner.HostOutputDir)
- runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", runner.HostOutputDir, bind))
- } else {
- runner.Binds = append(runner.Binds, bind)
+
+ case mnt.Kind == "tmp" && bind == runner.Container.OutputPath:
+ runner.HostOutputDir, err = runner.MkTempDir("", "")
+ if err != nil {
+ return fmt.Errorf("While creating mount temp dir: %v", err)
+ }
+ st, staterr := os.Stat(runner.HostOutputDir)
+ if staterr != nil {
+ return fmt.Errorf("While Stat on temp dir: %v", staterr)
+ }
+ err = os.Chmod(runner.HostOutputDir, st.Mode()|os.ModeSetgid|0777)
+ if staterr != nil {
+ return fmt.Errorf("While Chmod temp dir: %v", err)
}
+ runner.CleanupTempDir = append(runner.CleanupTempDir, runner.HostOutputDir)
+ runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", runner.HostOutputDir, bind))
+ tmpBackedOutputDir = true
+
+ case mnt.Kind == "tmp":
+ runner.Binds = append(runner.Binds, bind)
+
+ case mnt.Kind == "json":
+ jsondata, err := json.Marshal(mnt.Content)
+ if err != nil {
+ return fmt.Errorf("encoding json data: %v", err)
+ }
+ // Create a tempdir with a single file
+ // (instead of just a tempfile): this way we
+ // can ensure the file is world-readable
+ // inside the container, without having to
+ // make it world-readable on the docker host.
+ tmpdir, err := runner.MkTempDir("", "")
+ if err != nil {
+ return fmt.Errorf("creating temp dir: %v", err)
+ }
+ runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
+ tmpfn := filepath.Join(tmpdir, "mountdata.json")
+ err = ioutil.WriteFile(tmpfn, jsondata, 0644)
+ if err != nil {
+ return fmt.Errorf("writing temp file: %v", err)
+ }
+ runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
}
}
return fmt.Errorf("Output path does not correspond to a writable mount point")
}
+ if wantAPI := runner.Container.RuntimeConstraints.API; needCertMount && wantAPI != nil && *wantAPI {
+ for _, certfile := range arvadosclient.CertFiles {
+ _, err := os.Stat(certfile)
+ if err == nil {
+ runner.Binds = append(runner.Binds, fmt.Sprintf("%s:/etc/arvados/ca-certificates.crt:ro", certfile))
+ break
+ }
+ }
+ }
+
if pdhOnly {
arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
} else {
runner.statReporter = &crunchstat.Reporter{
CID: runner.ContainerID,
Logger: log.New(runner.statLogger, "", 0),
- CgroupParent: runner.cgroupParent,
+ CgroupParent: runner.expectCgroupParent,
CgroupRoot: runner.cgroupRoot,
PollPeriod: runner.statInterval,
}
return fmt.Errorf("While creating container: %v", err)
}
- runner.HostConfig = dockerclient.HostConfig{Binds: runner.Binds,
- LogConfig: dockerclient.LogConfig{Type: "none"}}
+ runner.HostConfig = dockerclient.HostConfig{
+ Binds: runner.Binds,
+ CgroupParent: runner.setCgroupParent,
+ LogConfig: dockerclient.LogConfig{
+ Type: "none",
+ },
+ }
return runner.AttachStreams()
}
return nil
}
+ if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
+ // Output may have been set directly by the container, so
+ // refresh the container record to check.
+ err := runner.ArvClient.Get("containers", runner.Container.UUID,
+ nil, &runner.Container)
+ if err != nil {
+ return err
+ }
+ if runner.Container.Output != "" {
+ // Container output is already set.
+ runner.OutputPDH = &runner.Container.Output
+ return nil
+ }
+ }
+
if runner.HostOutputDir == "" {
return nil
}
manifestText = rec.ManifestText
}
+ // Pre-populate output from the configured mount points
+ var binds []string
+ for bind, _ := range runner.Container.Mounts {
+ binds = append(binds, bind)
+ }
+ sort.Strings(binds)
+
+ for _, bind := range binds {
+ mnt := runner.Container.Mounts[bind]
+
+ bindSuffix := strings.TrimPrefix(bind, runner.Container.OutputPath)
+
+ if bindSuffix == bind || len(bindSuffix) <= 0 {
+ // either does not start with OutputPath or is OutputPath itself
+ continue
+ }
+
+ if strings.Index(bindSuffix, "/") != 0 {
+ return fmt.Errorf("Expected bind to be of the format '%v/*' but found: %v", runner.Container.OutputPath, bind)
+ }
+
+ jsondata, err := json.Marshal(mnt.Content)
+ if err != nil {
+ return fmt.Errorf("While marshal of mount content: %v", err)
+ }
+ var content map[string]interface{}
+ err = json.Unmarshal(jsondata, &content)
+ if err != nil {
+ return fmt.Errorf("While unmarshal of mount content: %v", err)
+ }
+
+ if content["exclude_from_output"] == true {
+ continue
+ }
+
+ idx := strings.Index(mnt.PortableDataHash, "/")
+ if idx > 0 {
+ mnt.Path = mnt.PortableDataHash[idx:]
+ mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
+ }
+
+ // append to manifest_text
+ m, err := runner.getCollectionManifestForPath(mnt, bindSuffix)
+ if err != nil {
+ return err
+ }
+
+ manifestText = manifestText + m
+ }
+
+ // Save output
var response arvados.Collection
err = runner.ArvClient.Create("collections",
arvadosclient.Dict{
"collection": arvadosclient.Dict{
+ "trash_at": time.Now().Add(runner.trashLifetime).Format(time.RFC3339),
+ "name": "output for " + runner.Container.UUID,
"manifest_text": manifestText}},
&response)
if err != nil {
return fmt.Errorf("While creating output collection: %v", err)
}
+ runner.OutputPDH = &response.PortableDataHash
+ return nil
+}
+
+// Fetch the collection for the mnt.PortableDataHash
+// Return the manifest_text fragment corresponding to the specified mnt.Path
+// after making any required updates.
+// Ex:
+// If mnt.Path is not specified,
+// return the entire manifest_text after replacing any "." with bindSuffix
+// If mnt.Path corresponds to one stream,
+// return the manifest_text for that stream after replacing that stream name with bindSuffix
+// Otherwise, check if a filename in any one stream is being sought. Return the manifest_text
+// for that stream after replacing stream name with bindSuffix minus the last word
+// and the file name with last word of the bindSuffix
+// Allowed path examples:
+// "path":"/"
+// "path":"/subdir1"
+// "path":"/subdir1/subdir2"
+// "path":"/subdir/filename" etc
+func (runner *ContainerRunner) getCollectionManifestForPath(mnt arvados.Mount, bindSuffix string) (string, error) {
+ var collection arvados.Collection
+ err := runner.ArvClient.Get("collections", mnt.PortableDataHash, nil, &collection)
+ if err != nil {
+ return "", fmt.Errorf("While getting collection for %v: %v", mnt.PortableDataHash, err)
+ }
- runner.OutputPDH = new(string)
- *runner.OutputPDH = response.PortableDataHash
+ manifestText := ""
+ if mnt.Path == "" || mnt.Path == "/" {
+ // no path specified; return the entire manifest text
+ manifestText = collection.ManifestText
+ manifestText = strings.Replace(manifestText, "./", "."+bindSuffix+"/", -1)
+ manifestText = strings.Replace(manifestText, ". ", "."+bindSuffix+" ", -1)
+ } else {
+ // either a single stream or file from a stream is being sought
+ bindIdx := strings.LastIndex(bindSuffix, "/")
+ var bindSubdir, bindFileName string
+ if bindIdx >= 0 {
+ bindSubdir = "." + bindSuffix[0:bindIdx]
+ bindFileName = bindSuffix[bindIdx+1:]
+ }
+ mntPath := mnt.Path
+ if strings.HasSuffix(mntPath, "/") {
+ mntPath = mntPath[0 : len(mntPath)-1]
+ }
+ pathIdx := strings.LastIndex(mntPath, "/")
+ var pathSubdir, pathFileName string
+ if pathIdx >= 0 {
+ pathSubdir = "." + mntPath[0:pathIdx]
+ pathFileName = mntPath[pathIdx+1:]
+ }
+ streams := strings.Split(collection.ManifestText, "\n")
+ for _, stream := range streams {
+ tokens := strings.Split(stream, " ")
+ if tokens[0] == "."+mntPath {
+ // path refers to this complete stream
+ adjustedStream := strings.Replace(stream, "."+mntPath, "."+bindSuffix, -1)
+ manifestText = adjustedStream + "\n"
+ break
+ } else {
+ // look for a matching file in this stream
+ if tokens[0] == pathSubdir {
+ // path refers to a file in this stream
+ for _, token := range tokens {
+ if strings.Index(token, ":"+pathFileName) > 0 {
+ // found the file in the stream; discard all other file tokens
+ for _, t := range tokens {
+ if strings.Index(t, ":") == -1 {
+ manifestText += (" " + t)
+ } else {
+ break // done reading all non-file tokens
+ }
+ }
+ manifestText = strings.Trim(manifestText, " ")
+ token = strings.Replace(token, ":"+pathFileName, ":"+bindFileName, -1)
+ manifestText += (" " + token + "\n")
+ manifestText = strings.Replace(manifestText, pathSubdir, bindSubdir, -1)
+ break
+ }
+ }
+ }
+ }
+ }
+ }
- return nil
+ return manifestText, nil
+}
+
+func (runner *ContainerRunner) loadDiscoveryVars() {
+ tl, err := runner.ArvClient.Discovery("defaultTrashLifetime")
+ if err != nil {
+ log.Fatalf("getting defaultTrashLifetime from discovery document: %s", err)
+ }
+ runner.trashLifetime = time.Duration(tl.(float64)) * time.Second
}
func (runner *ContainerRunner) CleanupDirs() {
err = runner.ArvClient.Create("collections",
arvadosclient.Dict{
"collection": arvadosclient.Dict{
+ "trash_at": time.Now().Add(runner.trashLifetime).Format(time.RFC3339),
"name": "logs for " + runner.Container.UUID,
"manifest_text": mt}},
&response)
if err != nil {
return fmt.Errorf("While creating log collection: %v", err)
}
-
runner.LogsPDH = &response.PortableDataHash
-
return nil
}
func (runner *ContainerRunner) UpdateContainerFinal() error {
update := arvadosclient.Dict{}
update["state"] = runner.finalState
+ if runner.LogsPDH != nil {
+ update["log"] = *runner.LogsPDH
+ }
if runner.finalState == "Complete" {
- if runner.LogsPDH != nil {
- update["log"] = *runner.LogsPDH
- }
if runner.ExitCode != nil {
update["exit_code"] = *runner.ExitCode
}
checkErr(err)
if runner.finalState == "Queued" {
+ runner.CrunchLog.Close()
runner.UpdateContainerFinal()
return
}
// check for and/or load image
err = runner.LoadImage()
if err != nil {
+ runner.finalState = "Cancelled"
err = fmt.Errorf("While loading container image: %v", err)
return
}
// set up FUSE mount and binds
err = runner.SetupMounts()
if err != nil {
+ runner.finalState = "Cancelled"
err = fmt.Errorf("While setting up mounts: %v", err)
return
}
cr.Container.UUID = containerUUID
cr.CrunchLog = NewThrottledLogger(cr.NewLogWriter("crunch-run"))
cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
+ cr.loadDiscoveryVars()
return cr
}
func main() {
statInterval := flag.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
cgroupRoot := flag.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
- cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup")
+ cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
+ cgroupParentSubsystem := flag.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
+ caCertsPath := flag.String("ca-certs", "", "Path to TLS root certificates")
flag.Parse()
containerId := flag.Arg(0)
+ if *caCertsPath != "" {
+ arvadosclient.CertFiles = []string{*caCertsPath}
+ }
+
api, err := arvadosclient.MakeArvadosClient()
if err != nil {
log.Fatalf("%s: %v", containerId, err)
api.Retries = 8
var kc *keepclient.KeepClient
- kc, err = keepclient.MakeKeepClient(&api)
+ kc, err = keepclient.MakeKeepClient(api)
if err != nil {
log.Fatalf("%s: %v", containerId, err)
}
cr := NewContainerRunner(api, kc, docker, containerId)
cr.statInterval = *statInterval
cr.cgroupRoot = *cgroupRoot
- cr.cgroupParent = *cgroupParent
+ cr.expectCgroupParent = *cgroupParent
+ if *cgroupParentSubsystem != "" {
+ p := findCgroup(*cgroupParentSubsystem)
+ cr.setCgroupParent = p
+ cr.expectCgroupParent = p
+ }
err = cr.Run()
if err != nil {