KernelMemory: spec.RAM, // kernel portion
},
}
+ if spec.EnableCUDA {
+ hostCfg.Resources.DeviceRequests = append(hostCfg.Resources.DeviceRequests, dockercontainer.DeviceRequest{
+ Driver: "nvidia",
+ Count: -1,
+ Capabilities: [][]string{[]string{"gpu", "nvidia", "compute"}},
+ })
+ }
for path, mount := range spec.BindMounts {
bind := mount.HostPath + ":" + path
if mount.ReadOnly {
BindMounts map[string]bindmount
Command []string
EnableNetwork bool
+ EnableCUDA bool
NetworkMode string // docker network mode, normally "default"
CgroupParent string
Stdin io.Reader
if !e.spec.EnableNetwork {
args = append(args, "--net", "--network=none")
}
+
+ if e.spec.EnableCUDA {
+ args = append(args, "--nv")
+ }
+
readonlyflag := map[bool]string{
false: "rw",
true: "ro",
// RuntimeConstraints specify a container's compute resources (RAM,
// CPU) and network connectivity.
type RuntimeConstraints struct {
- API bool `json:"API"`
- RAM int64 `json:"ram"`
- VCPUs int `json:"vcpus"`
- KeepCacheRAM int64 `json:"keep_cache_ram"`
+ API bool `json:"API"`
+ RAM int64 `json:"ram"`
+ VCPUs int `json:"vcpus"`
+ KeepCacheRAM int64 `json:"keep_cache_ram"`
+ CUDADriverVersion string `json:"cuda_driver_version"`
+ CUDACubinHardwareCapability []string `json:"cuda_cubin_hardware_capability"`
+ CUDAPTXHardwardCapability string `json:"cuda_ptx_hardware_capability"`
+ CUDADeviceCount int `json:"cuda_device_count"`
}
// SchedulingParameters specify a container's scheduling parameters