19094: Note docker/singularity/arv-mount versions in container log.
[arvados.git] / lib / crunchrun / docker.go
index f437d6a0c39d6c25d42fec9b5d312ca73a01d5d9..f3808cb357aa5778e8c92c27a25b02edc6ee3419 100644 (file)
@@ -46,7 +46,20 @@ func newDockerExecutor(containerUUID string, logf func(string, ...interface{}),
        }, err
 }
 
-func (e *dockerExecutor) Runtime() string { return "docker" }
+func (e *dockerExecutor) Runtime() string {
+       v, _ := e.dockerclient.ServerVersion(context.Background())
+       info := ""
+       for _, cv := range v.Components {
+               if info != "" {
+                       info += ", "
+               }
+               info += cv.Name + " " + cv.Version
+       }
+       if info == "" {
+               info = "(unknown version)"
+       }
+       return "docker " + info
+}
 
 func (e *dockerExecutor) LoadImage(imageID string, imageTarballPath string, container arvados.Container, arvMountPoint string,
        containerClient *arvados.Client) error {
@@ -108,25 +121,39 @@ func (e *dockerExecutor) config(spec containerSpec) (dockercontainer.Config, doc
        }
        if spec.CUDADeviceCount != 0 {
                var deviceIds []string
-               for _, s := range os.Environ() {
+               if cudaVisibleDevices := os.Getenv("CUDA_VISIBLE_DEVICES"); cudaVisibleDevices != "" {
                        // If a resource manager such as slurm or LSF told
                        // us to select specific devices we need to propagate that.
-                       if strings.HasPrefix(s, "CUDA_VISIBLE_DEVICES=") {
-                               deviceIds = strings.SplitN(strings.SplitN(s, "=", 2)[1], ",")
-                       }
+                       deviceIds = strings.Split(cudaVisibleDevices, ",")
                }
+
                deviceCount := spec.CUDADeviceCount
                if len(deviceIds) > 0 {
                        // Docker won't accept both non-empty
                        // DeviceIDs and a non-zero Count
+                       //
+                       // (it turns out "Count" is a dumb fallback
+                       // that just allocates device 0, 1, 2, ...,
+                       // Count-1)
                        deviceCount = 0
                }
 
+               // Capabilities are confusing.  The driver has generic
+               // capabilities "gpu" and "nvidia" but then there's
+               // additional capabilities "compute" and "utility"
+               // that are passed to nvidia-container-cli.
+               //
+               // "compute" means include the CUDA libraries and
+               // "utility" means include the CUDA utility programs
+               // (like nvidia-smi).
+               //
+               // https://github.com/moby/moby/blob/7b9275c0da707b030e62c96b679a976f31f929d3/daemon/nvidia_linux.go#L37
+               // https://github.com/containerd/containerd/blob/main/contrib/nvidia/nvidia.go
                hostCfg.Resources.DeviceRequests = append(hostCfg.Resources.DeviceRequests, dockercontainer.DeviceRequest{
                        Driver:       "nvidia",
                        Count:        deviceCount,
                        DeviceIDs:    deviceIds,
-                       Capabilities: [][]string{[]string{"gpu", "nvidia"}},
+                       Capabilities: [][]string{[]string{"gpu", "nvidia", "compute", "utility"}},
                })
        }
        for path, mount := range spec.BindMounts {
@@ -201,7 +228,6 @@ func (e *dockerExecutor) Wait(ctx context.Context) (int, error) {
        for {
                select {
                case waitBody := <-waitOk:
-                       e.logf("Container exited with code: %v", waitBody.StatusCode)
                        // wait for stdout/stderr to complete
                        <-e.doneIO
                        return int(waitBody.StatusCode), nil