X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/4e7f7c1155803dd3ca16185fdcea529f2f9153bc..7ebe828a435dcaa1b5668b72adbaad495059f211:/lib/crunchrun/singularity.go diff --git a/lib/crunchrun/singularity.go b/lib/crunchrun/singularity.go index 70ad653b7d..6ba65200d2 100644 --- a/lib/crunchrun/singularity.go +++ b/lib/crunchrun/singularity.go @@ -5,6 +5,7 @@ package crunchrun import ( + "errors" "fmt" "io/ioutil" "os" @@ -160,7 +161,22 @@ func (e *singularityExecutor) LoadImage(dockerImageID string, imageTarballPath s return err } + // Set up a cache and tmp dir for singularity build + err = os.Mkdir(e.tmpdir+"/cache", 0700) + if err != nil { + return err + } + defer os.RemoveAll(e.tmpdir + "/cache") + err = os.Mkdir(e.tmpdir+"/tmp", 0700) + if err != nil { + return err + } + defer os.RemoveAll(e.tmpdir + "/tmp") + build := exec.Command("singularity", "build", imageFilename, "docker-archive://"+e.tmpdir+"/image.tar") + build.Env = os.Environ() + build.Env = append(build.Env, "SINGULARITY_CACHEDIR="+e.tmpdir+"/cache") + build.Env = append(build.Env, "SINGULARITY_TMPDIR="+e.tmpdir+"/tmp") e.logf("%v", build.Args) out, err := build.CombinedOutput() // INFO: Starting build... @@ -226,11 +242,16 @@ func (e *singularityExecutor) Create(spec containerSpec) error { return nil } -func (e *singularityExecutor) Start() error { - args := []string{"singularity", "exec", "--containall", "--cleanenv", "--pwd", e.spec.WorkingDir} +func (e *singularityExecutor) execCmd(path string) *exec.Cmd { + args := []string{path, "exec", "--containall", "--cleanenv", "--pwd", e.spec.WorkingDir} if !e.spec.EnableNetwork { args = append(args, "--net", "--network=none") } + + if e.spec.CUDADeviceCount != 0 { + args = append(args, "--nv") + } + readonlyflag := map[bool]string{ false: "rw", true: "ro", @@ -264,14 +285,19 @@ func (e *singularityExecutor) Start() error { env = append(env, "SINGULARITYENV_"+k+"="+v) } + // Singularity always makes all nvidia devices visible to the + // container. If a resource manager such as slurm or LSF told + // us to select specific devices we need to propagate that. + if cudaVisibleDevices := os.Getenv("CUDA_VISIBLE_DEVICES"); cudaVisibleDevices != "" { + // If a resource manager such as slurm or LSF told + // us to select specific devices we need to propagate that. + env = append(env, "SINGULARITYENV_CUDA_VISIBLE_DEVICES="+cudaVisibleDevices) + } + args = append(args, e.imageFilename) args = append(args, e.spec.Command...) - path, err := exec.LookPath(args[0]) - if err != nil { - return err - } - child := &exec.Cmd{ + return &exec.Cmd{ Path: path, Args: args, Env: env, @@ -279,6 +305,14 @@ func (e *singularityExecutor) Start() error { Stdout: e.spec.Stdout, Stderr: e.spec.Stderr, } +} + +func (e *singularityExecutor) Start() error { + path, err := exec.LookPath("singularity") + if err != nil { + return err + } + child := e.execCmd(path) err = child.Start() if err != nil { return err @@ -316,3 +350,11 @@ func (e *singularityExecutor) Close() { e.logf("error removing temp dir: %s", err) } } + +func (e *singularityExecutor) InjectCommand(ctx context.Context, detachKeys, username string, usingTTY bool, injectcmd []string) (*exec.Cmd, error) { + return nil, errors.New("unimplemented") +} + +func (e *singularityExecutor) IPAddress() (string, error) { + return "", errors.New("unimplemented") +}