package crunchrun
import (
+ "bytes"
+ "context"
+ "errors"
"fmt"
"io/ioutil"
+ "net"
"os"
"os/exec"
+ "os/user"
+ "regexp"
"sort"
+ "strconv"
+ "strings"
"syscall"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "golang.org/x/net/context"
)
type singularityExecutor struct {
logf func(string, ...interface{})
+ fakeroot bool // use --fakeroot flag, allow --network=bridge when non-root (currently only used by tests)
spec containerSpec
tmpdir string
child *exec.Cmd
}, nil
}
-func (e *singularityExecutor) Runtime() string { return "singularity" }
+func (e *singularityExecutor) Runtime() string {
+ buf, err := exec.Command("singularity", "--version").CombinedOutput()
+ if err != nil {
+ return "singularity (unknown version)"
+ }
+ return strings.TrimSuffix(string(buf), "\n")
+}
func (e *singularityExecutor) getOrCreateProject(ownerUuid string, name string, containerClient *arvados.Client) (*arvados.Group, error) {
var gp arvados.GroupList
return nil
}
-func (e *singularityExecutor) Start() error {
- args := []string{"singularity", "exec", "--containall", "--cleanenv", "--pwd", e.spec.WorkingDir}
+func (e *singularityExecutor) execCmd(path string) *exec.Cmd {
+ args := []string{path, "exec", "--containall", "--cleanenv", "--pwd=" + e.spec.WorkingDir}
+ if e.fakeroot {
+ args = append(args, "--fakeroot")
+ }
if !e.spec.EnableNetwork {
args = append(args, "--net", "--network=none")
+ } else if u, err := user.Current(); err == nil && u.Uid == "0" || e.fakeroot {
+ // Specifying --network=bridge fails unless (a) we are
+ // root, (b) we are using --fakeroot, or (c)
+ // singularity has been configured to allow our
+ // uid/gid to use it like so:
+ //
+ // singularity config global --set 'allow net networks' bridge
+ // singularity config global --set 'allow net groups' mygroup
+ args = append(args, "--net", "--network=bridge")
+ }
+ if e.spec.CUDADeviceCount != 0 {
+ args = append(args, "--nv")
+ }
+
+ // If we ask for resource limits that aren't supported,
+ // singularity will not run the container at all. So we probe
+ // for support first, and only apply the limits that appear to
+ // be supported.
+ //
+ // Default debian configuration lets non-root users set memory
+ // limits but not CPU limits, so we enable/disable those
+ // limits independently.
+ //
+ // https://rootlesscontaine.rs/getting-started/common/cgroup2/
+ checkCgroupSupport(e.logf)
+ if e.spec.VCPUs > 0 {
+ if cgroupSupport["cpu"] {
+ args = append(args, "--cpus", fmt.Sprintf("%d", e.spec.VCPUs))
+ } else {
+ e.logf("cpu limits are not supported by current systemd/cgroup configuration, not setting --cpu %d", e.spec.VCPUs)
+ }
}
+ if e.spec.RAM > 0 {
+ if cgroupSupport["memory"] {
+ args = append(args, "--memory", fmt.Sprintf("%d", e.spec.RAM))
+ } else {
+ e.logf("memory limits are not supported by current systemd/cgroup configuration, not setting --memory %d", e.spec.RAM)
+ }
+ }
+
readonlyflag := map[bool]string{
false: "rw",
true: "ro",
for _, path := range binds {
mount := e.spec.BindMounts[path]
if path == e.spec.Env["HOME"] {
- // Singularity treates $HOME as special case
+ // Singularity treats $HOME as special case
args = append(args, "--home", mount.HostPath+":"+path)
} else {
args = append(args, "--bind", mount.HostPath+":"+path+":"+readonlyflag[mount.ReadOnly])
env := make([]string, 0, len(e.spec.Env))
for k, v := range e.spec.Env {
if k == "HOME" {
- // Singularity treates $HOME as special case, this is handled
- // with --home above
+ // Singularity treats $HOME as special case,
+ // this is handled with --home above
continue
}
env = append(env, "SINGULARITYENV_"+k+"="+v)
}
+ // Singularity always makes all nvidia devices visible to the
+ // container. If a resource manager such as slurm or LSF told
+ // us to select specific devices we need to propagate that.
+ if cudaVisibleDevices := os.Getenv("CUDA_VISIBLE_DEVICES"); cudaVisibleDevices != "" {
+ // If a resource manager such as slurm or LSF told
+ // us to select specific devices we need to propagate that.
+ env = append(env, "SINGULARITYENV_CUDA_VISIBLE_DEVICES="+cudaVisibleDevices)
+ }
+ // Singularity's default behavior is to evaluate each
+ // SINGULARITYENV_* env var with a shell as a double-quoted
+ // string and pass the result to the contained
+ // process. Singularity 3.10+ has an option to pass env vars
+ // through literally without evaluating, which is what we
+ // want. See https://github.com/sylabs/singularity/pull/704
+ // and https://dev.arvados.org/issues/19081
+ env = append(env, "SINGULARITY_NO_EVAL=1")
+
+ // If we don't propagate XDG_RUNTIME_DIR and
+ // DBUS_SESSION_BUS_ADDRESS, singularity resource limits fail
+ // with "FATAL: container creation failed: while applying
+ // cgroups config: system configuration does not support
+ // cgroup management" or "FATAL: container creation failed:
+ // while applying cgroups config: rootless cgroups require a
+ // D-Bus session - check that XDG_RUNTIME_DIR and
+ // DBUS_SESSION_BUS_ADDRESS are set".
+ env = append(env, "XDG_RUNTIME_DIR="+os.Getenv("XDG_RUNTIME_DIR"))
+ env = append(env, "DBUS_SESSION_BUS_ADDRESS="+os.Getenv("DBUS_SESSION_BUS_ADDRESS"))
+
args = append(args, e.imageFilename)
args = append(args, e.spec.Command...)
- path, err := exec.LookPath(args[0])
- if err != nil {
- return err
- }
- child := &exec.Cmd{
+ return &exec.Cmd{
Path: path,
Args: args,
Env: env,
Stdout: e.spec.Stdout,
Stderr: e.spec.Stderr,
}
+}
+
+func (e *singularityExecutor) Start() error {
+ path, err := exec.LookPath("singularity")
+ if err != nil {
+ return err
+ }
+ child := e.execCmd(path)
err = child.Start()
if err != nil {
return err
return nil
}
-func (e *singularityExecutor) CgroupID() string {
- return ""
+func (e *singularityExecutor) Pid() int {
+ childproc, err := e.containedProcess()
+ if err != nil {
+ return 0
+ }
+ return childproc
}
func (e *singularityExecutor) Stop() error {
+ if e.child == nil || e.child.Process == nil {
+ // no process started, or Wait already called
+ return nil
+ }
if err := e.child.Process.Signal(syscall.Signal(0)); err != nil {
// process already exited
return nil
e.logf("error removing temp dir: %s", err)
}
}
+
+func (e *singularityExecutor) InjectCommand(ctx context.Context, detachKeys, username string, usingTTY bool, injectcmd []string) (*exec.Cmd, error) {
+ target, err := e.containedProcess()
+ if err != nil {
+ return nil, err
+ }
+ return exec.CommandContext(ctx, "nsenter", append([]string{fmt.Sprintf("--target=%d", target), "--all"}, injectcmd...)...), nil
+}
+
+var (
+ errContainerHasNoIPAddress = errors.New("container has no IP address distinct from host")
+)
+
+func (e *singularityExecutor) IPAddress() (string, error) {
+ target, err := e.containedProcess()
+ if err != nil {
+ return "", err
+ }
+ targetIPs, err := processIPs(target)
+ if err != nil {
+ return "", err
+ }
+ selfIPs, err := processIPs(os.Getpid())
+ if err != nil {
+ return "", err
+ }
+ for ip := range targetIPs {
+ if !selfIPs[ip] {
+ return ip, nil
+ }
+ }
+ return "", errContainerHasNoIPAddress
+}
+
+func processIPs(pid int) (map[string]bool, error) {
+ fibtrie, err := os.ReadFile(fmt.Sprintf("/proc/%d/net/fib_trie", pid))
+ if err != nil {
+ return nil, err
+ }
+
+ addrs := map[string]bool{}
+ // When we see a pair of lines like this:
+ //
+ // |-- 10.1.2.3
+ // /32 host LOCAL
+ //
+ // ...we set addrs["10.1.2.3"] = true
+ lines := bytes.Split(fibtrie, []byte{'\n'})
+ for linenumber, line := range lines {
+ if !bytes.HasSuffix(line, []byte("/32 host LOCAL")) {
+ continue
+ }
+ if linenumber < 1 {
+ continue
+ }
+ i := bytes.LastIndexByte(lines[linenumber-1], ' ')
+ if i < 0 || i >= len(line)-7 {
+ continue
+ }
+ addr := string(lines[linenumber-1][i+1:])
+ if net.ParseIP(addr).To4() != nil {
+ addrs[addr] = true
+ }
+ }
+ return addrs, nil
+}
+
+var (
+ errContainerNotStarted = errors.New("container has not started yet")
+ errCannotFindChild = errors.New("failed to find any process inside the container")
+ reProcStatusPPid = regexp.MustCompile(`\nPPid:\t(\d+)\n`)
+)
+
+// Return the PID of a process that is inside the container (not
+// necessarily the topmost/pid=1 process in the container).
+func (e *singularityExecutor) containedProcess() (int, error) {
+ if e.child == nil || e.child.Process == nil {
+ return 0, errContainerNotStarted
+ }
+ lsns, err := exec.Command("lsns").CombinedOutput()
+ if err != nil {
+ return 0, fmt.Errorf("lsns: %w", err)
+ }
+ for _, line := range bytes.Split(lsns, []byte{'\n'}) {
+ fields := bytes.Fields(line)
+ if len(fields) < 4 {
+ continue
+ }
+ if !bytes.Equal(fields[1], []byte("pid")) {
+ continue
+ }
+ pid, err := strconv.ParseInt(string(fields[3]), 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("error parsing PID field in lsns output: %q", fields[3])
+ }
+ for parent := pid; ; {
+ procstatus, err := os.ReadFile(fmt.Sprintf("/proc/%d/status", parent))
+ if err != nil {
+ break
+ }
+ m := reProcStatusPPid.FindSubmatch(procstatus)
+ if m == nil {
+ break
+ }
+ parent, err = strconv.ParseInt(string(m[1]), 10, 64)
+ if err != nil {
+ break
+ }
+ if int(parent) == e.child.Process.Pid {
+ return int(pid), nil
+ }
+ }
+ }
+ return 0, errCannotFindChild
+}