X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/0b0b4c7b23e96a6efb3cfd88b0ba7224158e9544..7000c1ebd170001e10807b583a29e9e7e9570b23:/lib/crunchrun/docker.go diff --git a/lib/crunchrun/docker.go b/lib/crunchrun/docker.go index a39b754b3d..e62f2a39ba 100644 --- a/lib/crunchrun/docker.go +++ b/lib/crunchrun/docker.go @@ -11,6 +11,7 @@ import ( "strings" "time" + "git.arvados.org/arvados.git/sdk/go/arvados" dockertypes "github.com/docker/docker/api/types" dockercontainer "github.com/docker/docker/api/types/container" dockerclient "github.com/docker/docker/client" @@ -45,13 +46,17 @@ func newDockerExecutor(containerUUID string, logf func(string, ...interface{}), }, err } -func (e *dockerExecutor) ImageLoaded(imageID string) bool { +func (e *dockerExecutor) Runtime() string { return "docker" } + +func (e *dockerExecutor) LoadImage(imageID string, imageTarballPath string, container arvados.Container, arvMountPoint string, + containerClient *arvados.Client) error { _, _, err := e.dockerclient.ImageInspectWithRaw(context.TODO(), imageID) - return err == nil -} + if err == nil { + // already loaded + return nil + } -func (e *dockerExecutor) LoadImage(filename string) error { - f, err := os.Open(filename) + f, err := os.Open(imageTarballPath) if err != nil { return err } @@ -66,7 +71,7 @@ func (e *dockerExecutor) LoadImage(filename string) error { return nil } -func (e *dockerExecutor) Create(spec containerSpec) error { +func (e *dockerExecutor) config(spec containerSpec) (dockercontainer.Config, dockercontainer.HostConfig) { e.logf("Creating Docker container") cfg := dockercontainer.Config{ Image: spec.Image, @@ -101,6 +106,43 @@ func (e *dockerExecutor) Create(spec containerSpec) error { KernelMemory: spec.RAM, // kernel portion }, } + if spec.CUDADeviceCount != 0 { + var deviceIds []string + if cudaVisibleDevices := os.Getenv("CUDA_VISIBLE_DEVICES"); cudaVisibleDevices != "" { + // If a resource manager such as slurm or LSF told + // us to select specific devices we need to propagate that. + deviceIds = strings.Split(cudaVisibleDevices, ",") + } + + deviceCount := spec.CUDADeviceCount + if len(deviceIds) > 0 { + // Docker won't accept both non-empty + // DeviceIDs and a non-zero Count + // + // (it turns out "Count" is a dumb fallback + // that just allocates device 0, 1, 2, ..., + // Count-1) + deviceCount = 0 + } + + // Capabilities are confusing. The driver has generic + // capabilities "gpu" and "nvidia" but then there's + // additional capabilities "compute" and "utility" + // that are passed to nvidia-container-cli. + // + // "compute" means include the CUDA libraries and + // "utility" means include the CUDA utility programs + // (like nvidia-smi). + // + // https://github.com/moby/moby/blob/7b9275c0da707b030e62c96b679a976f31f929d3/daemon/nvidia_linux.go#L37 + // https://github.com/containerd/containerd/blob/main/contrib/nvidia/nvidia.go + hostCfg.Resources.DeviceRequests = append(hostCfg.Resources.DeviceRequests, dockercontainer.DeviceRequest{ + Driver: "nvidia", + Count: deviceCount, + DeviceIDs: deviceIds, + Capabilities: [][]string{[]string{"gpu", "nvidia", "compute", "utility"}}, + }) + } for path, mount := range spec.BindMounts { bind := mount.HostPath + ":" + path if mount.ReadOnly { @@ -111,7 +153,11 @@ func (e *dockerExecutor) Create(spec containerSpec) error { if spec.EnableNetwork { hostCfg.NetworkMode = dockercontainer.NetworkMode(spec.NetworkMode) } + return cfg, hostCfg +} +func (e *dockerExecutor) Create(spec containerSpec) error { + cfg, hostCfg := e.config(spec) created, err := e.dockerclient.ContainerCreate(context.TODO(), &cfg, &hostCfg, nil, e.containerUUID) if err != nil { return fmt.Errorf("While creating container: %v", err) @@ -169,7 +215,6 @@ func (e *dockerExecutor) Wait(ctx context.Context) (int, error) { for { select { case waitBody := <-waitOk: - e.logf("Container exited with code: %v", waitBody.StatusCode) // wait for stdout/stderr to complete <-e.doneIO return int(waitBody.StatusCode), nil @@ -186,7 +231,7 @@ func (e *dockerExecutor) Wait(ctx context.Context) (int, error) { } } -func (e *dockerExecutor) startIO(stdin io.ReadCloser, stdout, stderr io.WriteCloser) error { +func (e *dockerExecutor) startIO(stdin io.Reader, stdout, stderr io.Writer) error { resp, err := e.dockerclient.ContainerAttach(context.TODO(), e.containerID, dockertypes.ContainerAttachOptions{ Stream: true, Stdin: stdin != nil, @@ -213,8 +258,7 @@ func (e *dockerExecutor) startIO(stdin io.ReadCloser, stdout, stderr io.WriteClo return nil } -func (e *dockerExecutor) handleStdin(stdin io.ReadCloser, conn io.Writer, closeConn func() error) error { - defer stdin.Close() +func (e *dockerExecutor) handleStdin(stdin io.Reader, conn io.Writer, closeConn func() error) error { defer closeConn() _, err := io.Copy(conn, stdin) if err != nil { @@ -225,7 +269,7 @@ func (e *dockerExecutor) handleStdin(stdin io.ReadCloser, conn io.Writer, closeC // Handle docker log protocol; see // https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container -func (e *dockerExecutor) handleStdoutStderr(stdout, stderr io.WriteCloser, reader io.Reader) error { +func (e *dockerExecutor) handleStdoutStderr(stdout, stderr io.Writer, reader io.Reader) error { header := make([]byte, 8) var err error for err == nil { @@ -247,14 +291,6 @@ func (e *dockerExecutor) handleStdoutStderr(stdout, stderr io.WriteCloser, reade if err != nil { return fmt.Errorf("error copying stdout/stderr from docker: %v", err) } - err = stdout.Close() - if err != nil { - return fmt.Errorf("error writing stdout: close: %v", err) - } - err = stderr.Close() - if err != nil { - return fmt.Errorf("error writing stderr: close: %v", err) - } return nil }