"strings"
"time"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
dockerclient "github.com/docker/docker/client"
}, err
}
-func (e *dockerExecutor) ImageLoaded(imageID string) bool {
+func (e *dockerExecutor) Runtime() string { return "docker" }
+
+func (e *dockerExecutor) LoadImage(imageID string, imageTarballPath string, container arvados.Container, arvMountPoint string,
+ containerClient *arvados.Client) error {
_, _, err := e.dockerclient.ImageInspectWithRaw(context.TODO(), imageID)
- return err == nil
-}
+ if err == nil {
+ // already loaded
+ return nil
+ }
-func (e *dockerExecutor) LoadImage(filename string) error {
- f, err := os.Open(filename)
+ f, err := os.Open(imageTarballPath)
if err != nil {
return err
}
return nil
}
-func (e *dockerExecutor) Create(spec containerSpec) error {
+func (e *dockerExecutor) config(spec containerSpec) (dockercontainer.Config, dockercontainer.HostConfig) {
e.logf("Creating Docker container")
cfg := dockercontainer.Config{
Image: spec.Image,
KernelMemory: spec.RAM, // kernel portion
},
}
+ if spec.CUDADeviceCount != 0 {
+ var deviceIds []string
+ if cudaVisibleDevices := os.Getenv("CUDA_VISIBLE_DEVICES"); cudaVisibleDevices != "" {
+ // If a resource manager such as slurm or LSF told
+ // us to select specific devices we need to propagate that.
+ deviceIds = strings.Split(cudaVisibleDevices, ",")
+ }
+
+ deviceCount := spec.CUDADeviceCount
+ if len(deviceIds) > 0 {
+ // Docker won't accept both non-empty
+ // DeviceIDs and a non-zero Count
+ //
+ // (it turns out "Count" is a dumb fallback
+ // that just allocates device 0, 1, 2, ...,
+ // Count-1)
+ deviceCount = 0
+ }
+
+ // Capabilities are confusing. The driver has generic
+ // capabilities "gpu" and "nvidia" but then there's
+ // additional capabilities "compute" and "utility"
+ // that are passed to nvidia-container-cli.
+ //
+ // "compute" means include the CUDA libraries and
+ // "utility" means include the CUDA utility programs
+ // (like nvidia-smi).
+ //
+ // https://github.com/moby/moby/blob/7b9275c0da707b030e62c96b679a976f31f929d3/daemon/nvidia_linux.go#L37
+ // https://github.com/containerd/containerd/blob/main/contrib/nvidia/nvidia.go
+ hostCfg.Resources.DeviceRequests = append(hostCfg.Resources.DeviceRequests, dockercontainer.DeviceRequest{
+ Driver: "nvidia",
+ Count: deviceCount,
+ DeviceIDs: deviceIds,
+ Capabilities: [][]string{[]string{"gpu", "nvidia", "compute", "utility"}},
+ })
+ }
for path, mount := range spec.BindMounts {
bind := mount.HostPath + ":" + path
if mount.ReadOnly {
if spec.EnableNetwork {
hostCfg.NetworkMode = dockercontainer.NetworkMode(spec.NetworkMode)
}
+ return cfg, hostCfg
+}
+func (e *dockerExecutor) Create(spec containerSpec) error {
+ cfg, hostCfg := e.config(spec)
created, err := e.dockerclient.ContainerCreate(context.TODO(), &cfg, &hostCfg, nil, e.containerUUID)
if err != nil {
return fmt.Errorf("While creating container: %v", err)
for {
select {
case waitBody := <-waitOk:
- e.logf("Container exited with code: %v", waitBody.StatusCode)
// wait for stdout/stderr to complete
<-e.doneIO
return int(waitBody.StatusCode), nil