Merge branch '19081-singularity-no-eval'
[arvados.git] / lib / crunchrun / singularity.go
index f9ae073dc12c82b7b848020ce8e270ed4480cdef..1af0d420e4ca814c0da717b9f21ed1a432a59161 100644 (file)
@@ -10,6 +10,7 @@ import (
        "os"
        "os/exec"
        "sort"
+       "strings"
        "syscall"
        "time"
 
@@ -36,6 +37,14 @@ func newSingularityExecutor(logf func(string, ...interface{})) (*singularityExec
        }, nil
 }
 
+func (e *singularityExecutor) Runtime() string {
+       buf, err := exec.Command("singularity", "--version").CombinedOutput()
+       if err != nil {
+               return "singularity (unknown version)"
+       }
+       return strings.TrimSuffix(string(buf), "\n")
+}
+
 func (e *singularityExecutor) getOrCreateProject(ownerUuid string, name string, containerClient *arvados.Client) (*arvados.Group, error) {
        var gp arvados.GroupList
        err := containerClient.RequestAndDecode(&gp,
@@ -72,16 +81,16 @@ func (e *singularityExecutor) getOrCreateProject(ownerUuid string, name string,
 }
 
 func (e *singularityExecutor) checkImageCache(dockerImageID string, container arvados.Container, arvMountPoint string,
-       containerClient *arvados.Client) (collectionUuid string, err error) {
+       containerClient *arvados.Client) (collection *arvados.Collection, err error) {
 
        // Cache the image to keep
        cacheGroup, err := e.getOrCreateProject(container.RuntimeUserUUID, ".cache", containerClient)
        if err != nil {
-               return "", fmt.Errorf("error getting '.cache' project: %v", err)
+               return nil, fmt.Errorf("error getting '.cache' project: %v", err)
        }
        imageGroup, err := e.getOrCreateProject(cacheGroup.UUID, "auto-generated singularity images", containerClient)
        if err != nil {
-               return "", fmt.Errorf("error getting 'auto-generated singularity images' project: %s", err)
+               return nil, fmt.Errorf("error getting 'auto-generated singularity images' project: %s", err)
        }
 
        collectionName := fmt.Sprintf("singularity image for %v", dockerImageID)
@@ -95,14 +104,14 @@ func (e *singularityExecutor) checkImageCache(dockerImageID string, container ar
                },
                        Limit: 1})
        if err != nil {
-               return "", fmt.Errorf("error querying for collection '%v': %v", collectionName, err)
+               return nil, fmt.Errorf("error querying for collection '%v': %v", collectionName, err)
        }
        var imageCollection arvados.Collection
        if len(cl.Items) == 1 {
                imageCollection = cl.Items[0]
        } else {
-               collectionName := collectionName + " " + time.Now().UTC().Format(time.RFC3339)
-
+               collectionName := "converting " + collectionName
+               exp := time.Now().Add(24 * 7 * 2 * time.Hour)
                err = containerClient.RequestAndDecode(&imageCollection,
                        arvados.EndpointCollectionCreate.Method,
                        arvados.EndpointCollectionCreate.Path,
@@ -110,15 +119,17 @@ func (e *singularityExecutor) checkImageCache(dockerImageID string, container ar
                                "collection": map[string]string{
                                        "owner_uuid": imageGroup.UUID,
                                        "name":       collectionName,
+                                       "trash_at":   exp.UTC().Format(time.RFC3339),
                                },
+                               "ensure_unique_name": true,
                        })
                if err != nil {
-                       e.logf("error creating '%v' collection: %s", collectionName, err)
+                       return nil, fmt.Errorf("error creating '%v' collection: %s", collectionName, err)
                }
 
        }
 
-       return imageCollection.UUID, nil
+       return &imageCollection, nil
 }
 
 // LoadImage will satisfy ContainerExecuter interface transforming
@@ -126,20 +137,25 @@ func (e *singularityExecutor) checkImageCache(dockerImageID string, container ar
 func (e *singularityExecutor) LoadImage(dockerImageID string, imageTarballPath string, container arvados.Container, arvMountPoint string,
        containerClient *arvados.Client) error {
 
-       var sifCollectionUUID string
        var imageFilename string
+       var sifCollection *arvados.Collection
+       var err error
        if containerClient != nil {
-               sifCollectionUUID, err := e.checkImageCache(dockerImageID, container, arvMountPoint, containerClient)
+               sifCollection, err = e.checkImageCache(dockerImageID, container, arvMountPoint, containerClient)
                if err != nil {
                        return err
                }
-               imageFilename = fmt.Sprintf("%s/by_uuid/%s/image.sif", arvMountPoint, sifCollectionUUID)
+               imageFilename = fmt.Sprintf("%s/by_uuid/%s/image.sif", arvMountPoint, sifCollection.UUID)
        } else {
                imageFilename = e.tmpdir + "/image.sif"
        }
 
        if _, err := os.Stat(imageFilename); os.IsNotExist(err) {
-               exec.Command("find", arvMountPoint+"/by_id/").Run()
+               // Make sure the docker image is readable, and error
+               // out if not.
+               if _, err := os.Stat(imageTarballPath); err != nil {
+                       return err
+               }
 
                e.logf("building singularity image")
                // "singularity build" does not accept a
@@ -151,7 +167,22 @@ func (e *singularityExecutor) LoadImage(dockerImageID string, imageTarballPath s
                        return err
                }
 
+               // Set up a cache and tmp dir for singularity build
+               err = os.Mkdir(e.tmpdir+"/cache", 0700)
+               if err != nil {
+                       return err
+               }
+               defer os.RemoveAll(e.tmpdir + "/cache")
+               err = os.Mkdir(e.tmpdir+"/tmp", 0700)
+               if err != nil {
+                       return err
+               }
+               defer os.RemoveAll(e.tmpdir + "/tmp")
+
                build := exec.Command("singularity", "build", imageFilename, "docker-archive://"+e.tmpdir+"/image.tar")
+               build.Env = os.Environ()
+               build.Env = append(build.Env, "SINGULARITY_CACHEDIR="+e.tmpdir+"/cache")
+               build.Env = append(build.Env, "SINGULARITY_TMPDIR="+e.tmpdir+"/tmp")
                e.logf("%v", build.Args)
                out, err := build.CombinedOutput()
                // INFO:    Starting build...
@@ -177,7 +208,7 @@ func (e *singularityExecutor) LoadImage(dockerImageID string, imageTarballPath s
        // update TTL to now + two weeks
        exp := time.Now().Add(24 * 7 * 2 * time.Hour)
 
-       uuidPath, err := containerClient.PathForUUID("update", sifCollectionUUID)
+       uuidPath, err := containerClient.PathForUUID("update", sifCollection.UUID)
        if err != nil {
                e.logf("error PathForUUID: %v", err)
                return nil
@@ -192,10 +223,22 @@ func (e *singularityExecutor) LoadImage(dockerImageID string, imageTarballPath s
                                "trash_at": exp.UTC().Format(time.RFC3339),
                        },
                })
+       if err == nil {
+               // If we just wrote the image to the cache, the
+               // response also returns the updated PDH
+               e.imageFilename = fmt.Sprintf("%s/by_id/%s/image.sif", arvMountPoint, imageCollection.PortableDataHash)
+               return nil
+       }
+
+       e.logf("error updating/renaming collection for cached sif image: %v", err)
+       // Failed to update but maybe it lost a race and there is
+       // another cached collection in the same place, so check the cache
+       // again
+       sifCollection, err = e.checkImageCache(dockerImageID, container, arvMountPoint, containerClient)
        if err != nil {
-               e.logf("error updating collection trash_at: %v", err)
+               return err
        }
-       e.imageFilename = fmt.Sprintf("%s/by_id/%s/image.sif", arvMountPoint, imageCollection.PortableDataHash)
+       e.imageFilename = fmt.Sprintf("%s/by_id/%s/image.sif", arvMountPoint, sifCollection.PortableDataHash)
 
        return nil
 }
@@ -205,11 +248,16 @@ func (e *singularityExecutor) Create(spec containerSpec) error {
        return nil
 }
 
-func (e *singularityExecutor) Start() error {
-       args := []string{"singularity", "exec", "--containall", "--no-home", "--cleanenv", "--pwd", e.spec.WorkingDir}
+func (e *singularityExecutor) execCmd(path string) *exec.Cmd {
+       args := []string{path, "exec", "--containall", "--cleanenv", "--pwd", e.spec.WorkingDir}
        if !e.spec.EnableNetwork {
                args = append(args, "--net", "--network=none")
        }
+
+       if e.spec.CUDADeviceCount != 0 {
+               args = append(args, "--nv")
+       }
+
        readonlyflag := map[bool]string{
                false: "rw",
                true:  "ro",
@@ -221,7 +269,12 @@ func (e *singularityExecutor) Start() error {
        sort.Strings(binds)
        for _, path := range binds {
                mount := e.spec.BindMounts[path]
-               args = append(args, "--bind", mount.HostPath+":"+path+":"+readonlyflag[mount.ReadOnly])
+               if path == e.spec.Env["HOME"] {
+                       // Singularity treates $HOME as special case
+                       args = append(args, "--home", mount.HostPath+":"+path)
+               } else {
+                       args = append(args, "--bind", mount.HostPath+":"+path+":"+readonlyflag[mount.ReadOnly])
+               }
        }
 
        // This is for singularity 3.5.2. There are some behaviors
@@ -231,21 +284,34 @@ func (e *singularityExecutor) Start() error {
        env := make([]string, 0, len(e.spec.Env))
        for k, v := range e.spec.Env {
                if k == "HOME" {
-                       // $HOME is a special case
-                       args = append(args, "--home="+v)
-               } else {
-                       env = append(env, "SINGULARITYENV_"+k+"="+v)
+                       // Singularity treates $HOME as special case, this is handled
+                       // with --home above
+                       continue
                }
+               env = append(env, "SINGULARITYENV_"+k+"="+v)
+       }
+
+       // Singularity always makes all nvidia devices visible to the
+       // container.  If a resource manager such as slurm or LSF told
+       // us to select specific devices we need to propagate that.
+       if cudaVisibleDevices := os.Getenv("CUDA_VISIBLE_DEVICES"); cudaVisibleDevices != "" {
+               // If a resource manager such as slurm or LSF told
+               // us to select specific devices we need to propagate that.
+               env = append(env, "SINGULARITYENV_CUDA_VISIBLE_DEVICES="+cudaVisibleDevices)
        }
+       // Singularity's default behavior is to evaluate each
+       // SINGULARITYENV_* env var with a shell as a double-quoted
+       // string and pass the result to the contained
+       // process. Singularity 3.10+ has an option to pass env vars
+       // through literally without evaluating, which is what we
+       // want. See https://github.com/sylabs/singularity/pull/704
+       // and https://dev.arvados.org/issues/19081
+       env = append(env, "SINGULARITY_NO_EVAL=1")
 
        args = append(args, e.imageFilename)
        args = append(args, e.spec.Command...)
 
-       path, err := exec.LookPath(args[0])
-       if err != nil {
-               return err
-       }
-       child := &exec.Cmd{
+       return &exec.Cmd{
                Path:   path,
                Args:   args,
                Env:    env,
@@ -253,6 +319,14 @@ func (e *singularityExecutor) Start() error {
                Stdout: e.spec.Stdout,
                Stderr: e.spec.Stderr,
        }
+}
+
+func (e *singularityExecutor) Start() error {
+       path, err := exec.LookPath("singularity")
+       if err != nil {
+               return err
+       }
+       child := e.execCmd(path)
        err = child.Start()
        if err != nil {
                return err