package crunchrun
import (
+ "bytes"
+ "errors"
"fmt"
- "io"
"io/ioutil"
+ "net"
"os"
"os/exec"
+ "os/user"
+ "regexp"
"sort"
+ "strconv"
"strings"
"syscall"
+ "time"
"git.arvados.org/arvados.git/sdk/go/arvados"
"golang.org/x/net/context"
)
type singularityExecutor struct {
- logf func(string, ...interface{})
- spec containerSpec
- tmpdir string
- child *exec.Cmd
- imageFilename string // "sif" image
- containerClient *arvados.Client
- container arvados.Container
- keepClient IKeepClient
- keepMount string
+ logf func(string, ...interface{})
+ fakeroot bool // use --fakeroot flag, allow --network=bridge when non-root (currently only used by tests)
+ spec containerSpec
+ tmpdir string
+ child *exec.Cmd
+ imageFilename string // "sif" image
}
func newSingularityExecutor(logf func(string, ...interface{})) (*singularityExecutor, error) {
}, nil
}
-func (e *singularityExecutor) getOrCreateProject(ownerUuid string, name string, create bool) (*arvados.Group, error) {
+func (e *singularityExecutor) Runtime() string {
+ buf, err := exec.Command("singularity", "--version").CombinedOutput()
+ if err != nil {
+ return "singularity (unknown version)"
+ }
+ return strings.TrimSuffix(string(buf), "\n")
+}
+
+func (e *singularityExecutor) getOrCreateProject(ownerUuid string, name string, containerClient *arvados.Client) (*arvados.Group, error) {
var gp arvados.GroupList
- err := e.containerClient.RequestAndDecode(&gp,
+ err := containerClient.RequestAndDecode(&gp,
arvados.EndpointGroupList.Method,
arvados.EndpointGroupList.Path,
nil, arvados.ListOptions{Filters: []arvados.Filter{
if len(gp.Items) == 1 {
return &gp.Items[0], nil
}
- if !create {
- return nil, nil
- }
+
var rgroup arvados.Group
- err = e.containerClient.RequestAndDecode(&rgroup,
+ err = containerClient.RequestAndDecode(&rgroup,
arvados.EndpointGroupCreate.Method,
arvados.EndpointGroupCreate.Path,
nil, map[string]interface{}{
return &rgroup, nil
}
-func (e *singularityExecutor) ImageLoaded(imageId string) bool {
- // Check if docker image is cached in keep & if so set imageFilename
+func (e *singularityExecutor) checkImageCache(dockerImageID string, container arvados.Container, arvMountPoint string,
+ containerClient *arvados.Client) (collection *arvados.Collection, err error) {
// Cache the image to keep
- cacheGroup, err := e.getOrCreateProject(e.container.RuntimeUserUUID, ".cache", false)
+ cacheGroup, err := e.getOrCreateProject(container.RuntimeUserUUID, ".cache", containerClient)
if err != nil {
- e.logf("error getting '.cache' project: %v", err)
- return false
+ return nil, fmt.Errorf("error getting '.cache' project: %v", err)
}
- imageGroup, err := e.getOrCreateProject(cacheGroup.UUID, "auto-generated singularity images", false)
+ imageGroup, err := e.getOrCreateProject(cacheGroup.UUID, "auto-generated singularity images", containerClient)
if err != nil {
- e.logf("error getting 'auto-generated singularity images' project: %s", err)
- return false
+ return nil, fmt.Errorf("error getting 'auto-generated singularity images' project: %s", err)
}
- collectionName := fmt.Sprintf("singularity image for %v", imageId)
+ collectionName := fmt.Sprintf("singularity image for %v", dockerImageID)
var cl arvados.CollectionList
- err = e.containerClient.RequestAndDecode(&cl,
+ err = containerClient.RequestAndDecode(&cl,
arvados.EndpointCollectionList.Method,
arvados.EndpointCollectionList.Path,
nil, arvados.ListOptions{Filters: []arvados.Filter{
},
Limit: 1})
if err != nil {
- e.logf("error getting collection '%v' project: %v", err)
- return false
- }
- if len(cl.Items) == 0 {
- e.logf("no cached image '%v' found", collectionName)
- return false
+ return nil, fmt.Errorf("error querying for collection '%v': %v", collectionName, err)
}
+ var imageCollection arvados.Collection
+ if len(cl.Items) == 1 {
+ imageCollection = cl.Items[0]
+ } else {
+ collectionName := "converting " + collectionName
+ exp := time.Now().Add(24 * 7 * 2 * time.Hour)
+ err = containerClient.RequestAndDecode(&imageCollection,
+ arvados.EndpointCollectionCreate.Method,
+ arvados.EndpointCollectionCreate.Path,
+ nil, map[string]interface{}{
+ "collection": map[string]string{
+ "owner_uuid": imageGroup.UUID,
+ "name": collectionName,
+ "trash_at": exp.UTC().Format(time.RFC3339),
+ },
+ "ensure_unique_name": true,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("error creating '%v' collection: %s", collectionName, err)
+ }
- path := fmt.Sprintf("%s/by_id/%s/image.sif", e.keepMount, cl.Items[0].PortableDataHash)
- e.logf("Looking for %v", path)
- if _, err = os.Stat(path); os.IsNotExist(err) {
- return false
}
- e.imageFilename = path
- return true
+ return &imageCollection, nil
}
// LoadImage will satisfy ContainerExecuter interface transforming
// containerImage into a sif file for later use.
-func (e *singularityExecutor) LoadImage(imageTarballPath string) error {
- if e.imageFilename != "" {
- e.logf("using singularity image %v", e.imageFilename)
+func (e *singularityExecutor) LoadImage(dockerImageID string, imageTarballPath string, container arvados.Container, arvMountPoint string,
+ containerClient *arvados.Client) error {
- // was set by ImageLoaded
- return nil
+ var imageFilename string
+ var sifCollection *arvados.Collection
+ var err error
+ if containerClient != nil {
+ sifCollection, err = e.checkImageCache(dockerImageID, container, arvMountPoint, containerClient)
+ if err != nil {
+ return err
+ }
+ imageFilename = fmt.Sprintf("%s/by_uuid/%s/image.sif", arvMountPoint, sifCollection.UUID)
+ } else {
+ imageFilename = e.tmpdir + "/image.sif"
}
- e.logf("building singularity image")
- // "singularity build" does not accept a
- // docker-archive://... filename containing a ":" character,
- // as in "/path/to/sha256:abcd...1234.tar". Workaround: make a
- // symlink that doesn't have ":" chars.
- err := os.Symlink(imageTarballPath, e.tmpdir+"/image.tar")
- if err != nil {
- return err
- }
- e.imageFilename = e.tmpdir + "/image.sif"
- build := exec.Command("singularity", "build", e.imageFilename, "docker-archive://"+e.tmpdir+"/image.tar")
- e.logf("%v", build.Args)
- out, err := build.CombinedOutput()
- // INFO: Starting build...
- // Getting image source signatures
- // Copying blob ab15617702de done
- // Copying config 651e02b8a2 done
- // Writing manifest to image destination
- // Storing signatures
- // 2021/04/22 14:42:14 info unpack layer: sha256:21cbfd3a344c52b197b9fa36091e66d9cbe52232703ff78d44734f85abb7ccd3
- // INFO: Creating SIF file...
- // INFO: Build complete: arvados-jobs.latest.sif
- e.logf("%s", out)
- if err != nil {
- return err
- }
+ if _, err := os.Stat(imageFilename); os.IsNotExist(err) {
+ // Make sure the docker image is readable, and error
+ // out if not.
+ if _, err := os.Stat(imageTarballPath); err != nil {
+ return err
+ }
- // Cache the image to keep
- cacheGroup, err := e.getOrCreateProject(e.container.RuntimeUserUUID, ".cache", true)
- if err != nil {
- e.logf("error getting '.cache' project: %v", err)
- return nil
- }
- imageGroup, err := e.getOrCreateProject(cacheGroup.UUID, "auto-generated singularity images", true)
- if err != nil {
- e.logf("error getting 'auto-generated singularity images' project: %v", err)
- return nil
- }
-
- parts := strings.Split(imageTarballPath, "/")
- imageId := parts[len(parts)-1]
- if strings.HasSuffix(imageId, ".tar") {
- imageId = imageId[0 : len(imageId)-4]
- }
+ e.logf("building singularity image")
+ // "singularity build" does not accept a
+ // docker-archive://... filename containing a ":" character,
+ // as in "/path/to/sha256:abcd...1234.tar". Workaround: make a
+ // symlink that doesn't have ":" chars.
+ err := os.Symlink(imageTarballPath, e.tmpdir+"/image.tar")
+ if err != nil {
+ return err
+ }
- fs, err := (&arvados.Collection{ManifestText: ""}).FileSystem(e.containerClient, e.keepClient)
- if err != nil {
- e.logf("error creating FileSystem: %s", err)
- }
+ // Set up a cache and tmp dir for singularity build
+ err = os.Mkdir(e.tmpdir+"/cache", 0700)
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(e.tmpdir + "/cache")
+ err = os.Mkdir(e.tmpdir+"/tmp", 0700)
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(e.tmpdir + "/tmp")
- dst, err := fs.OpenFile("image.sif", os.O_CREATE|os.O_WRONLY, 0666)
- if err != nil {
- e.logf("error creating opening collection file for writing: %s", err)
+ build := exec.Command("singularity", "build", imageFilename, "docker-archive://"+e.tmpdir+"/image.tar")
+ build.Env = os.Environ()
+ build.Env = append(build.Env, "SINGULARITY_CACHEDIR="+e.tmpdir+"/cache")
+ build.Env = append(build.Env, "SINGULARITY_TMPDIR="+e.tmpdir+"/tmp")
+ e.logf("%v", build.Args)
+ out, err := build.CombinedOutput()
+ // INFO: Starting build...
+ // Getting image source signatures
+ // Copying blob ab15617702de done
+ // Copying config 651e02b8a2 done
+ // Writing manifest to image destination
+ // Storing signatures
+ // 2021/04/22 14:42:14 info unpack layer: sha256:21cbfd3a344c52b197b9fa36091e66d9cbe52232703ff78d44734f85abb7ccd3
+ // INFO: Creating SIF file...
+ // INFO: Build complete: arvados-jobs.latest.sif
+ e.logf("%s", out)
+ if err != nil {
+ return err
+ }
}
- src, err := os.Open(e.imageFilename)
- if err != nil {
- dst.Close()
- return nil
- }
- defer src.Close()
- _, err = io.Copy(dst, src)
- if err != nil {
- dst.Close()
+ if containerClient == nil {
+ e.imageFilename = imageFilename
return nil
}
- manifestText, err := fs.MarshalManifest(".")
+ // update TTL to now + two weeks
+ exp := time.Now().Add(24 * 7 * 2 * time.Hour)
+
+ uuidPath, err := containerClient.PathForUUID("update", sifCollection.UUID)
if err != nil {
- e.logf("error creating manifest text: %s", err)
+ e.logf("error PathForUUID: %v", err)
+ return nil
}
-
var imageCollection arvados.Collection
- collectionName := fmt.Sprintf("singularity image for %s", imageId)
- err = e.containerClient.RequestAndDecode(&imageCollection,
- arvados.EndpointCollectionCreate.Method,
- arvados.EndpointCollectionCreate.Path,
+ err = containerClient.RequestAndDecode(&imageCollection,
+ arvados.EndpointCollectionUpdate.Method,
+ uuidPath,
nil, map[string]interface{}{
"collection": map[string]string{
- "owner_uuid": imageGroup.UUID,
- "name": collectionName,
- "manifest_text": manifestText,
+ "name": fmt.Sprintf("singularity image for %v", dockerImageID),
+ "trash_at": exp.UTC().Format(time.RFC3339),
},
})
+ if err == nil {
+ // If we just wrote the image to the cache, the
+ // response also returns the updated PDH
+ e.imageFilename = fmt.Sprintf("%s/by_id/%s/image.sif", arvMountPoint, imageCollection.PortableDataHash)
+ return nil
+ }
+
+ e.logf("error updating/renaming collection for cached sif image: %v", err)
+ // Failed to update but maybe it lost a race and there is
+ // another cached collection in the same place, so check the cache
+ // again
+ sifCollection, err = e.checkImageCache(dockerImageID, container, arvMountPoint, containerClient)
if err != nil {
- e.logf("error creating '%v' collection: %s", collectionName, err)
+ return err
}
+ e.imageFilename = fmt.Sprintf("%s/by_id/%s/image.sif", arvMountPoint, sifCollection.PortableDataHash)
return nil
}
return nil
}
-func (e *singularityExecutor) Start() error {
- args := []string{"singularity", "exec", "--containall", "--no-home", "--cleanenv", "--pwd", e.spec.WorkingDir}
+func (e *singularityExecutor) execCmd(path string) *exec.Cmd {
+ args := []string{path, "exec", "--containall", "--cleanenv", "--pwd=" + e.spec.WorkingDir}
+ if e.fakeroot {
+ args = append(args, "--fakeroot")
+ }
if !e.spec.EnableNetwork {
args = append(args, "--net", "--network=none")
+ } else if u, err := user.Current(); err == nil && u.Uid == "0" || e.fakeroot {
+ // Specifying --network=bridge fails unless (a) we are
+ // root, (b) we are using --fakeroot, or (c)
+ // singularity has been configured to allow our
+ // uid/gid to use it like so:
+ //
+ // singularity config global --set 'allow net networks' bridge
+ // singularity config global --set 'allow net groups' mygroup
+ args = append(args, "--net", "--network=bridge")
+ }
+ if e.spec.CUDADeviceCount != 0 {
+ args = append(args, "--nv")
}
+
readonlyflag := map[bool]string{
false: "rw",
true: "ro",
sort.Strings(binds)
for _, path := range binds {
mount := e.spec.BindMounts[path]
- args = append(args, "--bind", mount.HostPath+":"+path+":"+readonlyflag[mount.ReadOnly])
+ if path == e.spec.Env["HOME"] {
+ // Singularity treats $HOME as special case
+ args = append(args, "--home", mount.HostPath+":"+path)
+ } else {
+ args = append(args, "--bind", mount.HostPath+":"+path+":"+readonlyflag[mount.ReadOnly])
+ }
}
- args = append(args, e.imageFilename)
- args = append(args, e.spec.Command...)
// This is for singularity 3.5.2. There are some behaviors
// that will change in singularity 3.6, please see:
// https://sylabs.io/guides/3.5/user-guide/environment_and_metadata.html
env := make([]string, 0, len(e.spec.Env))
for k, v := range e.spec.Env {
+ if k == "HOME" {
+ // Singularity treats $HOME as special case,
+ // this is handled with --home above
+ continue
+ }
env = append(env, "SINGULARITYENV_"+k+"="+v)
}
- path, err := exec.LookPath(args[0])
- if err != nil {
- return err
- }
- child := &exec.Cmd{
+ // Singularity always makes all nvidia devices visible to the
+ // container. If a resource manager such as slurm or LSF told
+ // us to select specific devices we need to propagate that.
+ if cudaVisibleDevices := os.Getenv("CUDA_VISIBLE_DEVICES"); cudaVisibleDevices != "" {
+ // If a resource manager such as slurm or LSF told
+ // us to select specific devices we need to propagate that.
+ env = append(env, "SINGULARITYENV_CUDA_VISIBLE_DEVICES="+cudaVisibleDevices)
+ }
+ // Singularity's default behavior is to evaluate each
+ // SINGULARITYENV_* env var with a shell as a double-quoted
+ // string and pass the result to the contained
+ // process. Singularity 3.10+ has an option to pass env vars
+ // through literally without evaluating, which is what we
+ // want. See https://github.com/sylabs/singularity/pull/704
+ // and https://dev.arvados.org/issues/19081
+ env = append(env, "SINGULARITY_NO_EVAL=1")
+
+ args = append(args, e.imageFilename)
+ args = append(args, e.spec.Command...)
+
+ return &exec.Cmd{
Path: path,
Args: args,
Env: env,
Stdout: e.spec.Stdout,
Stderr: e.spec.Stderr,
}
+}
+
+func (e *singularityExecutor) Start() error {
+ path, err := exec.LookPath("singularity")
+ if err != nil {
+ return err
+ }
+ child := e.execCmd(path)
err = child.Start()
if err != nil {
return err
}
}
-func (e *singularityExecutor) SetArvadoClient(containerClient *arvados.Client, keepClient IKeepClient, container arvados.Container, keepMount string) {
- e.containerClient = containerClient
- e.container = container
- e.keepClient = keepClient
- e.keepMount = keepMount
+func (e *singularityExecutor) InjectCommand(ctx context.Context, detachKeys, username string, usingTTY bool, injectcmd []string) (*exec.Cmd, error) {
+ target, err := e.containedProcess()
+ if err != nil {
+ return nil, err
+ }
+ return exec.CommandContext(ctx, "nsenter", append([]string{fmt.Sprintf("--target=%d", target), "--all"}, injectcmd...)...), nil
+}
+
+var (
+ errContainerHasNoIPAddress = errors.New("container has no IP address distinct from host")
+)
+
+func (e *singularityExecutor) IPAddress() (string, error) {
+ target, err := e.containedProcess()
+ if err != nil {
+ return "", err
+ }
+ targetIPs, err := processIPs(target)
+ if err != nil {
+ return "", err
+ }
+ selfIPs, err := processIPs(os.Getpid())
+ if err != nil {
+ return "", err
+ }
+ for ip := range targetIPs {
+ if !selfIPs[ip] {
+ return ip, nil
+ }
+ }
+ return "", errContainerHasNoIPAddress
+}
+
+func processIPs(pid int) (map[string]bool, error) {
+ fibtrie, err := os.ReadFile(fmt.Sprintf("/proc/%d/net/fib_trie", pid))
+ if err != nil {
+ return nil, err
+ }
+
+ addrs := map[string]bool{}
+ // When we see a pair of lines like this:
+ //
+ // |-- 10.1.2.3
+ // /32 host LOCAL
+ //
+ // ...we set addrs["10.1.2.3"] = true
+ lines := bytes.Split(fibtrie, []byte{'\n'})
+ for linenumber, line := range lines {
+ if !bytes.HasSuffix(line, []byte("/32 host LOCAL")) {
+ continue
+ }
+ if linenumber < 1 {
+ continue
+ }
+ i := bytes.LastIndexByte(lines[linenumber-1], ' ')
+ if i < 0 || i >= len(line)-7 {
+ continue
+ }
+ addr := string(lines[linenumber-1][i+1:])
+ if net.ParseIP(addr).To4() != nil {
+ addrs[addr] = true
+ }
+ }
+ return addrs, nil
+}
+
+var (
+ errContainerNotStarted = errors.New("container has not started yet")
+ errCannotFindChild = errors.New("failed to find any process inside the container")
+ reProcStatusPPid = regexp.MustCompile(`\nPPid:\t(\d+)\n`)
+)
+
+// Return the PID of a process that is inside the container (not
+// necessarily the topmost/pid=1 process in the container).
+func (e *singularityExecutor) containedProcess() (int, error) {
+ if e.child == nil || e.child.Process == nil {
+ return 0, errContainerNotStarted
+ }
+ lsns, err := exec.Command("lsns").CombinedOutput()
+ if err != nil {
+ return 0, fmt.Errorf("lsns: %w", err)
+ }
+ for _, line := range bytes.Split(lsns, []byte{'\n'}) {
+ fields := bytes.Fields(line)
+ if len(fields) < 4 {
+ continue
+ }
+ if !bytes.Equal(fields[1], []byte("pid")) {
+ continue
+ }
+ pid, err := strconv.ParseInt(string(fields[3]), 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("error parsing PID field in lsns output: %q", fields[3])
+ }
+ for parent := pid; ; {
+ procstatus, err := os.ReadFile(fmt.Sprintf("/proc/%d/status", parent))
+ if err != nil {
+ break
+ }
+ m := reProcStatusPPid.FindSubmatch(procstatus)
+ if m == nil {
+ break
+ }
+ parent, err = strconv.ParseInt(string(m[1]), 10, 64)
+ if err != nil {
+ break
+ }
+ if int(parent) == e.child.Process.Pid {
+ return int(pid), nil
+ }
+ }
+ }
+ return 0, errCannotFindChild
}