}, err
}
-func (e *dockerExecutor) Runtime() string { return "docker" }
+func (e *dockerExecutor) Runtime() string {
+ v, _ := e.dockerclient.ServerVersion(context.Background())
+ info := ""
+ for _, cv := range v.Components {
+ if info != "" {
+ info += ", "
+ }
+ info += cv.Name + " " + cv.Version
+ }
+ if info == "" {
+ info = "(unknown version)"
+ }
+ return "docker " + info
+}
func (e *dockerExecutor) LoadImage(imageID string, imageTarballPath string, container arvados.Container, arvMountPoint string,
containerClient *arvados.Client) error {
}
if spec.CUDADeviceCount != 0 {
var deviceIds []string
- for _, s := range os.Environ() {
+ if cudaVisibleDevices := os.Getenv("CUDA_VISIBLE_DEVICES"); cudaVisibleDevices != "" {
// If a resource manager such as slurm or LSF told
// us to select specific devices we need to propagate that.
- if strings.HasPrefix(s, "CUDA_VISIBLE_DEVICES=") {
- deviceIds = strings.SplitN(strings.SplitN(s, "=", 2)[1], ",")
- }
+ deviceIds = strings.Split(cudaVisibleDevices, ",")
}
+
deviceCount := spec.CUDADeviceCount
if len(deviceIds) > 0 {
// Docker won't accept both non-empty
// DeviceIDs and a non-zero Count
+ //
+ // (it turns out "Count" is a dumb fallback
+ // that just allocates device 0, 1, 2, ...,
+ // Count-1)
deviceCount = 0
}
+ // Capabilities are confusing. The driver has generic
+ // capabilities "gpu" and "nvidia" but then there's
+ // additional capabilities "compute" and "utility"
+ // that are passed to nvidia-container-cli.
+ //
+ // "compute" means include the CUDA libraries and
+ // "utility" means include the CUDA utility programs
+ // (like nvidia-smi).
+ //
+ // https://github.com/moby/moby/blob/7b9275c0da707b030e62c96b679a976f31f929d3/daemon/nvidia_linux.go#L37
+ // https://github.com/containerd/containerd/blob/main/contrib/nvidia/nvidia.go
hostCfg.Resources.DeviceRequests = append(hostCfg.Resources.DeviceRequests, dockercontainer.DeviceRequest{
Driver: "nvidia",
Count: deviceCount,
DeviceIDs: deviceIds,
- Capabilities: [][]string{[]string{"gpu", "nvidia"}},
+ Capabilities: [][]string{[]string{"gpu", "nvidia", "compute", "utility"}},
})
}
for path, mount := range spec.BindMounts {
for {
select {
case waitBody := <-waitOk:
- e.logf("Container exited with code: %v", waitBody.StatusCode)
// wait for stdout/stderr to complete
<-e.doneIO
return int(waitBody.StatusCode), nil