X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/d7c8ef4e435b88e9a45e5cd9fc2365fb82c9ab36..844ff7cc1dc1c93a29b7ad8eca2987b987cf89e6:/lib/crunchrun/docker.go diff --git a/lib/crunchrun/docker.go b/lib/crunchrun/docker.go index ab00273ef3..f3808cb357 100644 --- a/lib/crunchrun/docker.go +++ b/lib/crunchrun/docker.go @@ -46,7 +46,20 @@ func newDockerExecutor(containerUUID string, logf func(string, ...interface{}), }, err } -func (e *dockerExecutor) Runtime() string { return "docker" } +func (e *dockerExecutor) Runtime() string { + v, _ := e.dockerclient.ServerVersion(context.Background()) + info := "" + for _, cv := range v.Components { + if info != "" { + info += ", " + } + info += cv.Name + " " + cv.Version + } + if info == "" { + info = "(unknown version)" + } + return "docker " + info +} func (e *dockerExecutor) LoadImage(imageID string, imageTarballPath string, container arvados.Container, arvMountPoint string, containerClient *arvados.Client) error { @@ -107,10 +120,40 @@ func (e *dockerExecutor) config(spec containerSpec) (dockercontainer.Config, doc }, } if spec.CUDADeviceCount != 0 { + var deviceIds []string + if cudaVisibleDevices := os.Getenv("CUDA_VISIBLE_DEVICES"); cudaVisibleDevices != "" { + // If a resource manager such as slurm or LSF told + // us to select specific devices we need to propagate that. + deviceIds = strings.Split(cudaVisibleDevices, ",") + } + + deviceCount := spec.CUDADeviceCount + if len(deviceIds) > 0 { + // Docker won't accept both non-empty + // DeviceIDs and a non-zero Count + // + // (it turns out "Count" is a dumb fallback + // that just allocates device 0, 1, 2, ..., + // Count-1) + deviceCount = 0 + } + + // Capabilities are confusing. The driver has generic + // capabilities "gpu" and "nvidia" but then there's + // additional capabilities "compute" and "utility" + // that are passed to nvidia-container-cli. + // + // "compute" means include the CUDA libraries and + // "utility" means include the CUDA utility programs + // (like nvidia-smi). + // + // https://github.com/moby/moby/blob/7b9275c0da707b030e62c96b679a976f31f929d3/daemon/nvidia_linux.go#L37 + // https://github.com/containerd/containerd/blob/main/contrib/nvidia/nvidia.go hostCfg.Resources.DeviceRequests = append(hostCfg.Resources.DeviceRequests, dockercontainer.DeviceRequest{ Driver: "nvidia", - Count: spec.CUDADeviceCount, - Capabilities: [][]string{[]string{"gpu", "nvidia", "compute"}}, + Count: deviceCount, + DeviceIDs: deviceIds, + Capabilities: [][]string{[]string{"gpu", "nvidia", "compute", "utility"}}, }) } for path, mount := range spec.BindMounts { @@ -185,7 +228,6 @@ func (e *dockerExecutor) Wait(ctx context.Context) (int, error) { for { select { case waitBody := <-waitOk: - e.logf("Container exited with code: %v", waitBody.StatusCode) // wait for stdout/stderr to complete <-e.doneIO return int(waitBody.StatusCode), nil