"os"
"os/exec"
"sort"
+ "strings"
"syscall"
"time"
}, nil
}
-func (e *singularityExecutor) Runtime() string { return "singularity" }
+func (e *singularityExecutor) Runtime() string {
+ buf, err := exec.Command("singularity", "--version").CombinedOutput()
+ if err != nil {
+ return "singularity (unknown version)"
+ }
+ return strings.TrimSuffix(string(buf), "\n")
+}
func (e *singularityExecutor) getOrCreateProject(ownerUuid string, name string, containerClient *arvados.Client) (*arvados.Group, error) {
var gp arvados.GroupList
return nil
}
-func (e *singularityExecutor) Start() error {
- args := []string{"singularity", "exec", "--containall", "--cleanenv", "--pwd", e.spec.WorkingDir}
+func (e *singularityExecutor) execCmd(path string) *exec.Cmd {
+ args := []string{path, "exec", "--containall", "--cleanenv", "--pwd", e.spec.WorkingDir}
if !e.spec.EnableNetwork {
args = append(args, "--net", "--network=none")
}
- if e.spec.EnableCUDA {
+ if e.spec.CUDADeviceCount != 0 {
args = append(args, "--nv")
}
env = append(env, "SINGULARITYENV_"+k+"="+v)
}
+ // Singularity always makes all nvidia devices visible to the
+ // container. If a resource manager such as slurm or LSF told
+ // us to select specific devices we need to propagate that.
+ if cudaVisibleDevices := os.Getenv("CUDA_VISIBLE_DEVICES"); cudaVisibleDevices != "" {
+ // If a resource manager such as slurm or LSF told
+ // us to select specific devices we need to propagate that.
+ env = append(env, "SINGULARITYENV_CUDA_VISIBLE_DEVICES="+cudaVisibleDevices)
+ }
+
args = append(args, e.imageFilename)
args = append(args, e.spec.Command...)
- path, err := exec.LookPath(args[0])
- if err != nil {
- return err
- }
- child := &exec.Cmd{
+ return &exec.Cmd{
Path: path,
Args: args,
Env: env,
Stdout: e.spec.Stdout,
Stderr: e.spec.Stderr,
}
+}
+
+func (e *singularityExecutor) Start() error {
+ path, err := exec.LookPath("singularity")
+ if err != nil {
+ return err
+ }
+ child := e.execCmd(path)
err = child.Start()
if err != nil {
return err