X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/827879be023e90d58eb681b3c930154739a0b27f..5d00ecb0932f86e4d2aced3d9258b96522ef38bd:/services/crunch-run/crunchrun.go diff --git a/services/crunch-run/crunchrun.go b/services/crunch-run/crunchrun.go index 0e979c1a4a..812525db69 100644 --- a/services/crunch-run/crunchrun.go +++ b/services/crunch-run/crunchrun.go @@ -1,16 +1,12 @@ package main import ( + "bytes" + "context" "encoding/json" "errors" "flag" "fmt" - "git.curoverse.com/arvados.git/lib/crunchstat" - "git.curoverse.com/arvados.git/sdk/go/arvados" - "git.curoverse.com/arvados.git/sdk/go/arvadosclient" - "git.curoverse.com/arvados.git/sdk/go/keepclient" - "git.curoverse.com/arvados.git/sdk/go/manifest" - "github.com/curoverse/dockerclient" "io" "io/ioutil" "log" @@ -19,10 +15,22 @@ import ( "os/signal" "path" "path/filepath" + "sort" "strings" "sync" "syscall" "time" + + "git.curoverse.com/arvados.git/lib/crunchstat" + "git.curoverse.com/arvados.git/sdk/go/arvados" + "git.curoverse.com/arvados.git/sdk/go/arvadosclient" + "git.curoverse.com/arvados.git/sdk/go/keepclient" + "git.curoverse.com/arvados.git/sdk/go/manifest" + + dockertypes "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + dockernetwork "github.com/docker/docker/api/types/network" + dockerclient "github.com/docker/docker/client" ) // IArvadosClient is the minimal Arvados API methods used by crunch-run. @@ -31,6 +39,7 @@ type IArvadosClient interface { Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error + CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error) Discovery(key string) (interface{}, error) } @@ -52,14 +61,62 @@ type MkTempDir func(string, string) (string, error) // ThinDockerClient is the minimal Docker client interface used by crunch-run. type ThinDockerClient interface { - StopContainer(id string, timeout int) error - InspectImage(id string) (*dockerclient.ImageInfo, error) - LoadImage(reader io.Reader) error - CreateContainer(config *dockerclient.ContainerConfig, name string, authConfig *dockerclient.AuthConfig) (string, error) - StartContainer(id string, config *dockerclient.HostConfig) error - AttachContainer(id string, options *dockerclient.AttachOptions) (io.ReadCloser, error) - Wait(id string) <-chan dockerclient.WaitResult - RemoveImage(name string, force bool) ([]*dockerclient.ImageDelete, error) + ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error) + ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig, + networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error) + ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error + ContainerStop(ctx context.Context, container string, timeout *time.Duration) error + ContainerWait(ctx context.Context, container string) (int64, error) + ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error) + ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) +} + +// ThinDockerClientProxy is a proxy implementation of ThinDockerClient +// that executes the docker requests on dockerclient.Client +type ThinDockerClientProxy struct { + Docker *dockerclient.Client +} + +// ContainerAttach invokes dockerclient.Client.ContainerAttach +func (proxy ThinDockerClientProxy) ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error) { + return proxy.Docker.ContainerAttach(ctx, container, options) +} + +// ContainerCreate invokes dockerclient.Client.ContainerCreate +func (proxy ThinDockerClientProxy) ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig, + networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error) { + return proxy.Docker.ContainerCreate(ctx, config, hostConfig, networkingConfig, containerName) +} + +// ContainerStart invokes dockerclient.Client.ContainerStart +func (proxy ThinDockerClientProxy) ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error { + return proxy.Docker.ContainerStart(ctx, container, options) +} + +// ContainerStop invokes dockerclient.Client.ContainerStop +func (proxy ThinDockerClientProxy) ContainerStop(ctx context.Context, container string, timeout *time.Duration) error { + return proxy.Docker.ContainerStop(ctx, container, timeout) +} + +// ContainerWait invokes dockerclient.Client.ContainerWait +func (proxy ThinDockerClientProxy) ContainerWait(ctx context.Context, container string) (int64, error) { + return proxy.Docker.ContainerWait(ctx, container) +} + +// ImageInspectWithRaw invokes dockerclient.Client.ImageInspectWithRaw +func (proxy ThinDockerClientProxy) ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error) { + return proxy.Docker.ImageInspectWithRaw(ctx, image) +} + +// ImageLoad invokes dockerclient.Client.ImageLoad +func (proxy ThinDockerClientProxy) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error) { + return proxy.Docker.ImageLoad(ctx, input, quiet) +} + +// ImageRemove invokes dockerclient.Client.ImageRemove +func (proxy ThinDockerClientProxy) ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) { + return proxy.Docker.ImageRemove(ctx, image, options) } // ContainerRunner is the main stateful struct used for a single execution of a @@ -69,8 +126,8 @@ type ContainerRunner struct { ArvClient IArvadosClient Kc IKeepClient arvados.Container - dockerclient.ContainerConfig - dockerclient.HostConfig + ContainerConfig dockercontainer.Config + dockercontainer.HostConfig token string ContainerID string ExitCode *int @@ -78,7 +135,7 @@ type ContainerRunner struct { loggingDone chan bool CrunchLog *ThrottledLogger Stdout io.WriteCloser - Stderr *ThrottledLogger + Stderr io.WriteCloser LogCollection *CollectionWriter LogsPDH *string RunArvMount @@ -88,13 +145,11 @@ type ContainerRunner struct { HostOutputDir string CleanupTempDir []string Binds []string + Volumes map[string]struct{} OutputPDH *string - CancelLock sync.Mutex - Cancelled bool SigChan chan os.Signal ArvMountExit chan error finalState string - trashLifetime time.Duration statLogger io.WriteCloser statReporter *crunchstat.Reporter @@ -113,6 +168,13 @@ type ContainerRunner struct { // parent to be X" feature even on sites where the "specify // cgroup parent" feature breaks. setCgroupParent string + + cStateLock sync.Mutex + cStarted bool // StartContainer() succeeded + cCancelled bool // StopContainer() invoked + + enableNetwork string // one of "default" or "always" + networkMode string // passed through to HostConfig.NetworkMode } // SetupSignals sets up signal handling to gracefully terminate the underlying @@ -123,20 +185,30 @@ func (runner *ContainerRunner) SetupSignals() { signal.Notify(runner.SigChan, syscall.SIGINT) signal.Notify(runner.SigChan, syscall.SIGQUIT) - go func(sig <-chan os.Signal) { - for range sig { - if !runner.Cancelled { - runner.CancelLock.Lock() - runner.Cancelled = true - if runner.ContainerID != "" { - runner.Docker.StopContainer(runner.ContainerID, 10) - } - runner.CancelLock.Unlock() - } - } + go func(sig chan os.Signal) { + <-sig + runner.stop() + signal.Stop(sig) }(runner.SigChan) } +// stop the underlying Docker container. +func (runner *ContainerRunner) stop() { + runner.cStateLock.Lock() + defer runner.cStateLock.Unlock() + if runner.cCancelled { + return + } + runner.cCancelled = true + if runner.cStarted { + timeout := time.Duration(10) + err := runner.Docker.ContainerStop(context.TODO(), runner.ContainerID, &(timeout)) + if err != nil { + log.Printf("StopContainer failed: %s", err) + } + } +} + // LoadImage determines the docker image id from the container record and // checks if it is available in the local Docker image store. If not, it loads // the image from Keep. @@ -161,7 +233,7 @@ func (runner *ContainerRunner) LoadImage() (err error) { runner.CrunchLog.Printf("Using Docker image id '%s'", imageID) - _, err = runner.Docker.InspectImage(imageID) + _, _, err = runner.Docker.ImageInspectWithRaw(context.TODO(), imageID) if err != nil { runner.CrunchLog.Print("Loading Docker image from keep") @@ -171,10 +243,11 @@ func (runner *ContainerRunner) LoadImage() (err error) { return fmt.Errorf("While creating ManifestFileReader for container image: %v", err) } - err = runner.Docker.LoadImage(readCloser) + response, err := runner.Docker.ImageLoad(context.TODO(), readCloser, false) if err != nil { return fmt.Errorf("While loading container image into Docker: %v", err) } + response.Body.Close() } else { runner.CrunchLog.Print("Docker image is available") } @@ -239,8 +312,15 @@ func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) ( return c, nil } +func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) { + if runner.ArvMountPoint == "" { + runner.ArvMountPoint, err = runner.MkTempDir("", prefix) + } + return +} + func (runner *ContainerRunner) SetupMounts() (err error) { - runner.ArvMountPoint, err = runner.MkTempDir("", "keep") + err = runner.SetupArvMountPoint("keep") if err != nil { return fmt.Errorf("While creating keep mount temp dir: %v", err) } @@ -257,13 +337,21 @@ func (runner *ContainerRunner) SetupMounts() (err error) { collectionPaths := []string{} runner.Binds = nil + runner.Volumes = make(map[string]struct{}) needCertMount := true - for bind, mnt := range runner.Container.Mounts { - if bind == "stdout" { + var binds []string + for bind, _ := range runner.Container.Mounts { + binds = append(binds, bind) + } + sort.Strings(binds) + + for _, bind := range binds { + mnt := runner.Container.Mounts[bind] + if bind == "stdout" || bind == "stderr" { // Is it a "file" mount kind? if mnt.Kind != "file" { - return fmt.Errorf("Unsupported mount kind '%s' for stdout. Only 'file' is supported.", mnt.Kind) + return fmt.Errorf("Unsupported mount kind '%s' for %s. Only 'file' is supported.", mnt.Kind, bind) } // Does path start with OutputPath? @@ -272,15 +360,29 @@ func (runner *ContainerRunner) SetupMounts() (err error) { prefix += "/" } if !strings.HasPrefix(mnt.Path, prefix) { - return fmt.Errorf("Stdout path does not start with OutputPath: %s, %s", mnt.Path, prefix) + return fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix) + } + } + + if bind == "stdin" { + // Is it a "collection" mount kind? + if mnt.Kind != "collection" && mnt.Kind != "json" { + return fmt.Errorf("Unsupported mount kind '%s' for stdin. Only 'collection' or 'json' are supported.", mnt.Kind) } } + if bind == "/etc/arvados/ca-certificates.crt" { needCertMount = false } + if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" { + if mnt.Kind != "collection" { + return fmt.Errorf("Only mount points of kind 'collection' are supported underneath the output_path: %v", bind) + } + } + switch { - case mnt.Kind == "collection": + case mnt.Kind == "collection" && bind != "stdin": var src string if mnt.UUID != "" && mnt.PortableDataHash != "" { return fmt.Errorf("Cannot specify both 'uuid' and 'portable_data_hash' for a collection mount") @@ -295,7 +397,21 @@ func (runner *ContainerRunner) SetupMounts() (err error) { if mnt.Writable { return fmt.Errorf("Can never write to a collection specified by portable data hash") } + idx := strings.Index(mnt.PortableDataHash, "/") + if idx > 0 { + mnt.Path = path.Clean(mnt.PortableDataHash[idx:]) + mnt.PortableDataHash = mnt.PortableDataHash[0:idx] + runner.Container.Mounts[bind] = mnt + } src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash) + if mnt.Path != "" && mnt.Path != "." { + if strings.HasPrefix(mnt.Path, "./") { + mnt.Path = mnt.Path[2:] + } else if strings.HasPrefix(mnt.Path, "/") { + mnt.Path = mnt.Path[1:] + } + src += "/" + mnt.Path + } } else { src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount) arvMountCmd = append(arvMountCmd, "--mount-tmp") @@ -305,6 +421,8 @@ func (runner *ContainerRunner) SetupMounts() (err error) { if mnt.Writable { if bind == runner.Container.OutputPath { runner.HostOutputDir = src + } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") { + return fmt.Errorf("Writable mount points are not permitted underneath the output_path: %v", bind) } runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind)) } else { @@ -329,7 +447,7 @@ func (runner *ContainerRunner) SetupMounts() (err error) { runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", runner.HostOutputDir, bind)) case mnt.Kind == "tmp": - runner.Binds = append(runner.Binds, bind) + runner.Volumes[bind] = struct{}{} case mnt.Kind == "json": jsondata, err := json.Marshal(mnt.Content) @@ -457,15 +575,137 @@ func (runner *ContainerRunner) StartCrunchstat() { runner.statReporter.Start() } -// AttachLogs connects the docker container stdout and stderr logs to the -// Arvados logger which logs to Keep and the API server logs table. +type infoCommand struct { + label string + cmd []string +} + +// Gather node information and store it on the log for debugging +// purposes. +func (runner *ContainerRunner) LogNodeInfo() (err error) { + w := runner.NewLogWriter("node-info") + logger := log.New(w, "node-info", 0) + + commands := []infoCommand{ + infoCommand{ + label: "Host Information", + cmd: []string{"uname", "-a"}, + }, + infoCommand{ + label: "CPU Information", + cmd: []string{"cat", "/proc/cpuinfo"}, + }, + infoCommand{ + label: "Memory Information", + cmd: []string{"cat", "/proc/meminfo"}, + }, + infoCommand{ + label: "Disk Space", + cmd: []string{"df", "-m", "/", os.TempDir()}, + }, + infoCommand{ + label: "Disk INodes", + cmd: []string{"df", "-i", "/", os.TempDir()}, + }, + } + + // Run commands with informational output to be logged. + var out []byte + for _, command := range commands { + out, err = exec.Command(command.cmd[0], command.cmd[1:]...).CombinedOutput() + if err != nil { + return fmt.Errorf("While running command %q: %v", + command.cmd, err) + } + logger.Println(command.label) + for _, line := range strings.Split(string(out), "\n") { + logger.Println(" ", line) + } + } + + err = w.Close() + if err != nil { + return fmt.Errorf("While closing node-info logs: %v", err) + } + return nil +} + +// Get and save the raw JSON container record from the API server +func (runner *ContainerRunner) LogContainerRecord() (err error) { + w := &ArvLogWriter{ + ArvClient: runner.ArvClient, + UUID: runner.Container.UUID, + loggingStream: "container", + writeCloser: runner.LogCollection.Open("container.json"), + } + + // Get Container record JSON from the API Server + reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil) + if err != nil { + return fmt.Errorf("While retrieving container record from the API server: %v", err) + } + defer reader.Close() + // Read the API server response as []byte + json_bytes, err := ioutil.ReadAll(reader) + if err != nil { + return fmt.Errorf("While reading container record API server response: %v", err) + } + // Decode the JSON []byte + var cr map[string]interface{} + if err = json.Unmarshal(json_bytes, &cr); err != nil { + return fmt.Errorf("While decoding the container record JSON response: %v", err) + } + // Re-encode it using indentation to improve readability + enc := json.NewEncoder(w) + enc.SetIndent("", " ") + if err = enc.Encode(cr); err != nil { + return fmt.Errorf("While logging the JSON container record: %v", err) + } + err = w.Close() + if err != nil { + return fmt.Errorf("While closing container.json log: %v", err) + } + return nil +} + +// AttachStreams connects the docker container stdin, stdout and stderr logs +// to the Arvados logger which logs to Keep and the API server logs table. func (runner *ContainerRunner) AttachStreams() (err error) { runner.CrunchLog.Print("Attaching container streams") - var containerReader io.Reader - containerReader, err = runner.Docker.AttachContainer(runner.ContainerID, - &dockerclient.AttachOptions{Stream: true, Stdout: true, Stderr: true}) + // If stdin mount is provided, attach it to the docker container + var stdinRdr keepclient.Reader + var stdinJson []byte + if stdinMnt, ok := runner.Container.Mounts["stdin"]; ok { + if stdinMnt.Kind == "collection" { + var stdinColl arvados.Collection + collId := stdinMnt.UUID + if collId == "" { + collId = stdinMnt.PortableDataHash + } + err = runner.ArvClient.Get("collections", collId, nil, &stdinColl) + if err != nil { + return fmt.Errorf("While getting stding collection: %v", err) + } + + stdinRdr, err = runner.Kc.ManifestFileReader(manifest.Manifest{Text: stdinColl.ManifestText}, stdinMnt.Path) + if os.IsNotExist(err) { + return fmt.Errorf("stdin collection path not found: %v", stdinMnt.Path) + } else if err != nil { + return fmt.Errorf("While getting stdin collection path %v: %v", stdinMnt.Path, err) + } + } else if stdinMnt.Kind == "json" { + stdinJson, err = json.Marshal(stdinMnt.Content) + if err != nil { + return fmt.Errorf("While encoding stdin json data: %v", err) + } + } + } + + stdinUsed := stdinRdr != nil || len(stdinJson) != 0 + response, err := runner.Docker.ContainerAttach(context.TODO(), runner.ContainerID, + dockertypes.ContainerAttachOptions{Stream: true, Stdin: stdinUsed, Stdout: true, Stderr: true}) if err != nil { return fmt.Errorf("While attaching container stdout/stderr streams: %v", err) } @@ -473,37 +713,76 @@ func (runner *ContainerRunner) AttachStreams() (err error) { runner.loggingDone = make(chan bool) if stdoutMnt, ok := runner.Container.Mounts["stdout"]; ok { - stdoutPath := stdoutMnt.Path[len(runner.Container.OutputPath):] - index := strings.LastIndex(stdoutPath, "/") - if index > 0 { - subdirs := stdoutPath[:index] - if subdirs != "" { - st, err := os.Stat(runner.HostOutputDir) - if err != nil { - return fmt.Errorf("While Stat on temp dir: %v", err) - } - stdoutPath := path.Join(runner.HostOutputDir, subdirs) - err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777) - if err != nil { - return fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err) - } - } - } - stdoutFile, err := os.Create(path.Join(runner.HostOutputDir, stdoutPath)) + stdoutFile, err := runner.getStdoutFile(stdoutMnt.Path) if err != nil { - return fmt.Errorf("While creating stdout file: %v", err) + return err } runner.Stdout = stdoutFile } else { runner.Stdout = NewThrottledLogger(runner.NewLogWriter("stdout")) } - runner.Stderr = NewThrottledLogger(runner.NewLogWriter("stderr")) - go runner.ProcessDockerAttach(containerReader) + if stderrMnt, ok := runner.Container.Mounts["stderr"]; ok { + stderrFile, err := runner.getStdoutFile(stderrMnt.Path) + if err != nil { + return err + } + runner.Stderr = stderrFile + } else { + runner.Stderr = NewThrottledLogger(runner.NewLogWriter("stderr")) + } + + if stdinRdr != nil { + go func() { + _, err := io.Copy(response.Conn, stdinRdr) + if err != nil { + runner.CrunchLog.Print("While writing stdin collection to docker container %q", err) + runner.stop() + } + stdinRdr.Close() + response.CloseWrite() + }() + } else if len(stdinJson) != 0 { + go func() { + _, err := io.Copy(response.Conn, bytes.NewReader(stdinJson)) + if err != nil { + runner.CrunchLog.Print("While writing stdin json to docker container %q", err) + runner.stop() + } + response.CloseWrite() + }() + } + + go runner.ProcessDockerAttach(response.Reader) return nil } +func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) { + stdoutPath := mntPath[len(runner.Container.OutputPath):] + index := strings.LastIndex(stdoutPath, "/") + if index > 0 { + subdirs := stdoutPath[:index] + if subdirs != "" { + st, err := os.Stat(runner.HostOutputDir) + if err != nil { + return nil, fmt.Errorf("While Stat on temp dir: %v", err) + } + stdoutPath := path.Join(runner.HostOutputDir, subdirs) + err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777) + if err != nil { + return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err) + } + } + } + stdoutFile, err := os.Create(path.Join(runner.HostOutputDir, stdoutPath)) + if err != nil { + return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err) + } + + return stdoutFile, nil +} + // CreateContainer creates the docker container. func (runner *ContainerRunner) CreateContainer() error { runner.CrunchLog.Print("Creating Docker container") @@ -516,6 +795,17 @@ func (runner *ContainerRunner) CreateContainer() error { for k, v := range runner.Container.Environment { runner.ContainerConfig.Env = append(runner.ContainerConfig.Env, k+"="+v) } + + runner.ContainerConfig.Volumes = runner.Volumes + + runner.HostConfig = dockercontainer.HostConfig{ + Binds: runner.Binds, + Cgroup: dockercontainer.CgroupSpec(runner.setCgroupParent), + LogConfig: dockercontainer.LogConfig{ + Type: "none", + }, + } + if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI { tok, err := runner.ContainerToken() if err != nil { @@ -526,24 +816,28 @@ func (runner *ContainerRunner) CreateContainer() error { "ARVADOS_API_HOST="+os.Getenv("ARVADOS_API_HOST"), "ARVADOS_API_HOST_INSECURE="+os.Getenv("ARVADOS_API_HOST_INSECURE"), ) - runner.ContainerConfig.NetworkDisabled = false + runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode) } else { - runner.ContainerConfig.NetworkDisabled = true + if runner.enableNetwork == "always" { + runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode) + } else { + runner.HostConfig.NetworkMode = dockercontainer.NetworkMode("none") + } } - var err error - runner.ContainerID, err = runner.Docker.CreateContainer(&runner.ContainerConfig, "", nil) + _, stdinUsed := runner.Container.Mounts["stdin"] + runner.ContainerConfig.OpenStdin = stdinUsed + runner.ContainerConfig.StdinOnce = stdinUsed + runner.ContainerConfig.AttachStdin = stdinUsed + runner.ContainerConfig.AttachStdout = true + runner.ContainerConfig.AttachStderr = true + + createdBody, err := runner.Docker.ContainerCreate(context.TODO(), &runner.ContainerConfig, &runner.HostConfig, nil, runner.Container.UUID) if err != nil { return fmt.Errorf("While creating container: %v", err) } - runner.HostConfig = dockerclient.HostConfig{ - Binds: runner.Binds, - CgroupParent: runner.setCgroupParent, - LogConfig: dockerclient.LogConfig{ - Type: "none", - }, - } + runner.ContainerID = createdBody.ID return runner.AttachStreams() } @@ -551,10 +845,17 @@ func (runner *ContainerRunner) CreateContainer() error { // StartContainer starts the docker container created by CreateContainer. func (runner *ContainerRunner) StartContainer() error { runner.CrunchLog.Printf("Starting Docker container id '%s'", runner.ContainerID) - err := runner.Docker.StartContainer(runner.ContainerID, &runner.HostConfig) + runner.cStateLock.Lock() + defer runner.cStateLock.Unlock() + if runner.cCancelled { + return ErrCancelled + } + err := runner.Docker.ContainerStart(context.TODO(), runner.ContainerID, + dockertypes.ContainerStartOptions{}) if err != nil { return fmt.Errorf("could not start container: %v", err) } + runner.cStarted = true return nil } @@ -563,12 +864,23 @@ func (runner *ContainerRunner) StartContainer() error { func (runner *ContainerRunner) WaitFinish() error { runner.CrunchLog.Print("Waiting for container to finish") - result := runner.Docker.Wait(runner.ContainerID) - wr := <-result - if wr.Error != nil { - return fmt.Errorf("While waiting for container to finish: %v", wr.Error) + waitDocker, err := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID) + if err != nil { + return fmt.Errorf("container wait: %v", err) + } + + runner.CrunchLog.Printf("Container exited with code: %v", waitDocker) + code := int(waitDocker) + runner.ExitCode = &code + + waitMount := runner.ArvMountExit + select { + case err := <-waitMount: + runner.CrunchLog.Printf("arv-mount exited before container finished: %v", err) + waitMount = nil + runner.stop() + default: } - runner.ExitCode = &wr.ExitCode // wait for stdout/stderr to complete <-runner.loggingDone @@ -612,7 +924,7 @@ func (runner *ContainerRunner) CaptureOutput() error { _, err = os.Stat(collectionMetafile) if err != nil { // Regular directory - cw := CollectionWriter{runner.Kc, nil, sync.Mutex{}} + cw := CollectionWriter{0, runner.Kc, nil, nil, sync.Mutex{}} manifestText, err = cw.WriteTree(runner.HostOutputDir, runner.CrunchLog.Logger) if err != nil { return fmt.Errorf("While uploading output files: %v", err) @@ -633,11 +945,45 @@ func (runner *ContainerRunner) CaptureOutput() error { manifestText = rec.ManifestText } + // Pre-populate output from the configured mount points + var binds []string + for bind, _ := range runner.Container.Mounts { + binds = append(binds, bind) + } + sort.Strings(binds) + + for _, bind := range binds { + mnt := runner.Container.Mounts[bind] + + bindSuffix := strings.TrimPrefix(bind, runner.Container.OutputPath) + + if bindSuffix == bind || len(bindSuffix) <= 0 { + // either does not start with OutputPath or is OutputPath itself + continue + } + + if mnt.ExcludeFromOutput == true { + continue + } + + // append to manifest_text + m, err := runner.getCollectionManifestForPath(mnt, bindSuffix) + if err != nil { + return err + } + + manifestText = manifestText + m + } + + // Save output var response arvados.Collection + manifest := manifest.Manifest{Text: manifestText} + manifestText = manifest.Extract(".", ".").Text err = runner.ArvClient.Create("collections", arvadosclient.Dict{ + "ensure_unique_name": true, "collection": arvadosclient.Dict{ - "trash_at": time.Now().Add(runner.trashLifetime).Format(time.RFC3339), + "is_trashed": true, "name": "output for " + runner.Container.UUID, "manifest_text": manifestText}}, &response) @@ -648,12 +994,45 @@ func (runner *ContainerRunner) CaptureOutput() error { return nil } -func (runner *ContainerRunner) loadDiscoveryVars() { - tl, err := runner.ArvClient.Discovery("defaultTrashLifetime") - if err != nil { - log.Fatalf("getting defaultTrashLifetime from discovery document: %s", err) +var outputCollections = make(map[string]arvados.Collection) + +// Fetch the collection for the mnt.PortableDataHash +// Return the manifest_text fragment corresponding to the specified mnt.Path +// after making any required updates. +// Ex: +// If mnt.Path is not specified, +// return the entire manifest_text after replacing any "." with bindSuffix +// If mnt.Path corresponds to one stream, +// return the manifest_text for that stream after replacing that stream name with bindSuffix +// Otherwise, check if a filename in any one stream is being sought. Return the manifest_text +// for that stream after replacing stream name with bindSuffix minus the last word +// and the file name with last word of the bindSuffix +// Allowed path examples: +// "path":"/" +// "path":"/subdir1" +// "path":"/subdir1/subdir2" +// "path":"/subdir/filename" etc +func (runner *ContainerRunner) getCollectionManifestForPath(mnt arvados.Mount, bindSuffix string) (string, error) { + collection := outputCollections[mnt.PortableDataHash] + if collection.PortableDataHash == "" { + err := runner.ArvClient.Get("collections", mnt.PortableDataHash, nil, &collection) + if err != nil { + return "", fmt.Errorf("While getting collection for %v: %v", mnt.PortableDataHash, err) + } + outputCollections[mnt.PortableDataHash] = collection + } + + if collection.ManifestText == "" { + runner.CrunchLog.Printf("No manifest text for collection %v", collection.PortableDataHash) + return "", nil + } + + mft := manifest.Manifest{Text: collection.ManifestText} + extracted := mft.Extract(mnt.Path, bindSuffix) + if extracted.Err != nil { + return "", fmt.Errorf("Error parsing manifest for %v: %v", mnt.PortableDataHash, extracted.Err.Error()) } - runner.trashLifetime = time.Duration(tl.(float64)) * time.Second + return extracted.Text, nil } func (runner *ContainerRunner) CleanupDirs() { @@ -687,8 +1066,8 @@ func (runner *ContainerRunner) CommitLogs() error { // point, but re-open crunch log with ArvClient in case there are any // other further (such as failing to write the log to Keep!) while // shutting down - runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{runner.ArvClient, runner.Container.UUID, - "crunch-run", nil}) + runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{ArvClient: runner.ArvClient, + UUID: runner.Container.UUID, loggingStream: "crunch-run", writeCloser: nil}) if runner.LogsPDH != nil { // If we have already assigned something to LogsPDH, @@ -707,8 +1086,9 @@ func (runner *ContainerRunner) CommitLogs() error { var response arvados.Collection err = runner.ArvClient.Create("collections", arvadosclient.Dict{ + "ensure_unique_name": true, "collection": arvadosclient.Dict{ - "trash_at": time.Now().Add(runner.trashLifetime).Format(time.RFC3339), + "is_trashed": true, "name": "logs for " + runner.Container.UUID, "manifest_text": mt}}, &response) @@ -721,9 +1101,9 @@ func (runner *ContainerRunner) CommitLogs() error { // UpdateContainerRunning updates the container state to "Running" func (runner *ContainerRunner) UpdateContainerRunning() error { - runner.CancelLock.Lock() - defer runner.CancelLock.Unlock() - if runner.Cancelled { + runner.cStateLock.Lock() + defer runner.cStateLock.Unlock() + if runner.cCancelled { return ErrCancelled } return runner.ArvClient.Update("containers", runner.Container.UUID, @@ -767,14 +1147,15 @@ func (runner *ContainerRunner) UpdateContainerFinal() error { // IsCancelled returns the value of Cancelled, with goroutine safety. func (runner *ContainerRunner) IsCancelled() bool { - runner.CancelLock.Lock() - defer runner.CancelLock.Unlock() - return runner.Cancelled + runner.cStateLock.Lock() + defer runner.cStateLock.Unlock() + return runner.cCancelled } // NewArvLogWriter creates an ArvLogWriter func (runner *ContainerRunner) NewArvLogWriter(name string) io.WriteCloser { - return &ArvLogWriter{runner.ArvClient, runner.Container.UUID, name, runner.LogCollection.Open(name + ".txt")} + return &ArvLogWriter{ArvClient: runner.ArvClient, UUID: runner.Container.UUID, loggingStream: name, + writeCloser: runner.LogCollection.Open(name + ".txt")} } // Run the full container lifecycle. @@ -865,6 +1246,17 @@ func (runner *ContainerRunner) Run() (err error) { return } + // Gather and record node information + err = runner.LogNodeInfo() + if err != nil { + return + } + // Save container.json record on log collection + err = runner.LogContainerRecord() + if err != nil { + return + } + runner.StartCrunchstat() if runner.IsCancelled() { @@ -899,11 +1291,13 @@ func NewContainerRunner(api IArvadosClient, cr.NewLogWriter = cr.NewArvLogWriter cr.RunArvMount = cr.ArvMountCmd cr.MkTempDir = ioutil.TempDir - cr.LogCollection = &CollectionWriter{kc, nil, sync.Mutex{}} + cr.LogCollection = &CollectionWriter{0, kc, nil, nil, sync.Mutex{}} cr.Container.UUID = containerUUID cr.CrunchLog = NewThrottledLogger(cr.NewLogWriter("crunch-run")) cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0) - cr.loadDiscoveryVars() + + loadLogThrottleParams(api) + return cr } @@ -913,6 +1307,14 @@ func main() { cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)") cgroupParentSubsystem := flag.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container") caCertsPath := flag.String("ca-certs", "", "Path to TLS root certificates") + enableNetwork := flag.String("container-enable-networking", "default", + `Specify if networking should be enabled for container. One of 'default', 'always': + default: only enable networking if container requests it. + always: containers always have networking enabled + `) + networkMode := flag.String("container-network-mode", "default", + `Set networking mode for container. Corresponds to Docker network mode (--net). + `) flag.Parse() containerId := flag.Arg(0) @@ -934,16 +1336,22 @@ func main() { } kc.Retries = 4 - var docker *dockerclient.DockerClient - docker, err = dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil) + var docker *dockerclient.Client + // API version 1.21 corresponds to Docker 1.9, which is currently the + // minimum version we want to support. + docker, err = dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil) if err != nil { log.Fatalf("%s: %v", containerId, err) } - cr := NewContainerRunner(api, kc, docker, containerId) + dockerClientProxy := ThinDockerClientProxy{Docker: docker} + + cr := NewContainerRunner(api, kc, dockerClientProxy, containerId) cr.statInterval = *statInterval cr.cgroupRoot = *cgroupRoot cr.expectCgroupParent = *cgroupParent + cr.enableNetwork = *enableNetwork + cr.networkMode = *networkMode if *cgroupParentSubsystem != "" { p := findCgroup(*cgroupParentSubsystem) cr.setCgroupParent = p