1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
24 "git.arvados.org/arvados.git/sdk/go/arvados"
27 type singularityExecutor struct {
28 logf func(string, ...interface{})
29 sudo bool // use sudo to run singularity (only used by tests)
33 imageFilename string // "sif" image
36 func newSingularityExecutor(logf func(string, ...interface{})) (*singularityExecutor, error) {
37 tmpdir, err := ioutil.TempDir("", "crunch-run-singularity-")
41 return &singularityExecutor{
47 func (e *singularityExecutor) Runtime() string {
48 buf, err := exec.Command("singularity", "--version").CombinedOutput()
50 return "singularity (unknown version)"
52 return strings.TrimSuffix(string(buf), "\n")
55 func (e *singularityExecutor) getOrCreateProject(ownerUuid string, name string, containerClient *arvados.Client) (*arvados.Group, error) {
56 var gp arvados.GroupList
57 err := containerClient.RequestAndDecode(&gp,
58 arvados.EndpointGroupList.Method,
59 arvados.EndpointGroupList.Path,
60 nil, arvados.ListOptions{Filters: []arvados.Filter{
61 arvados.Filter{"owner_uuid", "=", ownerUuid},
62 arvados.Filter{"name", "=", name},
63 arvados.Filter{"group_class", "=", "project"},
69 if len(gp.Items) == 1 {
70 return &gp.Items[0], nil
73 var rgroup arvados.Group
74 err = containerClient.RequestAndDecode(&rgroup,
75 arvados.EndpointGroupCreate.Method,
76 arvados.EndpointGroupCreate.Path,
77 nil, map[string]interface{}{
78 "group": map[string]string{
79 "owner_uuid": ownerUuid,
81 "group_class": "project",
90 func (e *singularityExecutor) checkImageCache(dockerImageID string, container arvados.Container, arvMountPoint string,
91 containerClient *arvados.Client) (collection *arvados.Collection, err error) {
93 // Cache the image to keep
94 cacheGroup, err := e.getOrCreateProject(container.RuntimeUserUUID, ".cache", containerClient)
96 return nil, fmt.Errorf("error getting '.cache' project: %v", err)
98 imageGroup, err := e.getOrCreateProject(cacheGroup.UUID, "auto-generated singularity images", containerClient)
100 return nil, fmt.Errorf("error getting 'auto-generated singularity images' project: %s", err)
103 collectionName := fmt.Sprintf("singularity image for %v", dockerImageID)
104 var cl arvados.CollectionList
105 err = containerClient.RequestAndDecode(&cl,
106 arvados.EndpointCollectionList.Method,
107 arvados.EndpointCollectionList.Path,
108 nil, arvados.ListOptions{Filters: []arvados.Filter{
109 arvados.Filter{"owner_uuid", "=", imageGroup.UUID},
110 arvados.Filter{"name", "=", collectionName},
114 return nil, fmt.Errorf("error querying for collection '%v': %v", collectionName, err)
116 var imageCollection arvados.Collection
117 if len(cl.Items) == 1 {
118 imageCollection = cl.Items[0]
120 collectionName := "converting " + collectionName
121 exp := time.Now().Add(24 * 7 * 2 * time.Hour)
122 err = containerClient.RequestAndDecode(&imageCollection,
123 arvados.EndpointCollectionCreate.Method,
124 arvados.EndpointCollectionCreate.Path,
125 nil, map[string]interface{}{
126 "collection": map[string]string{
127 "owner_uuid": imageGroup.UUID,
128 "name": collectionName,
129 "trash_at": exp.UTC().Format(time.RFC3339),
131 "ensure_unique_name": true,
134 return nil, fmt.Errorf("error creating '%v' collection: %s", collectionName, err)
138 return &imageCollection, nil
141 // LoadImage will satisfy ContainerExecuter interface transforming
142 // containerImage into a sif file for later use.
143 func (e *singularityExecutor) LoadImage(dockerImageID string, imageTarballPath string, container arvados.Container, arvMountPoint string,
144 containerClient *arvados.Client) error {
146 var imageFilename string
147 var sifCollection *arvados.Collection
149 if containerClient != nil {
150 sifCollection, err = e.checkImageCache(dockerImageID, container, arvMountPoint, containerClient)
154 imageFilename = fmt.Sprintf("%s/by_uuid/%s/image.sif", arvMountPoint, sifCollection.UUID)
156 imageFilename = e.tmpdir + "/image.sif"
159 if _, err := os.Stat(imageFilename); os.IsNotExist(err) {
160 // Make sure the docker image is readable, and error
162 if _, err := os.Stat(imageTarballPath); err != nil {
166 e.logf("building singularity image")
167 // "singularity build" does not accept a
168 // docker-archive://... filename containing a ":" character,
169 // as in "/path/to/sha256:abcd...1234.tar". Workaround: make a
170 // symlink that doesn't have ":" chars.
171 err := os.Symlink(imageTarballPath, e.tmpdir+"/image.tar")
176 // Set up a cache and tmp dir for singularity build
177 err = os.Mkdir(e.tmpdir+"/cache", 0700)
181 defer os.RemoveAll(e.tmpdir + "/cache")
182 err = os.Mkdir(e.tmpdir+"/tmp", 0700)
186 defer os.RemoveAll(e.tmpdir + "/tmp")
188 build := exec.Command("singularity", "build", imageFilename, "docker-archive://"+e.tmpdir+"/image.tar")
189 build.Env = os.Environ()
190 build.Env = append(build.Env, "SINGULARITY_CACHEDIR="+e.tmpdir+"/cache")
191 build.Env = append(build.Env, "SINGULARITY_TMPDIR="+e.tmpdir+"/tmp")
192 e.logf("%v", build.Args)
193 out, err := build.CombinedOutput()
194 // INFO: Starting build...
195 // Getting image source signatures
196 // Copying blob ab15617702de done
197 // Copying config 651e02b8a2 done
198 // Writing manifest to image destination
199 // Storing signatures
200 // 2021/04/22 14:42:14 info unpack layer: sha256:21cbfd3a344c52b197b9fa36091e66d9cbe52232703ff78d44734f85abb7ccd3
201 // INFO: Creating SIF file...
202 // INFO: Build complete: arvados-jobs.latest.sif
209 if containerClient == nil {
210 e.imageFilename = imageFilename
214 // update TTL to now + two weeks
215 exp := time.Now().Add(24 * 7 * 2 * time.Hour)
217 uuidPath, err := containerClient.PathForUUID("update", sifCollection.UUID)
219 e.logf("error PathForUUID: %v", err)
222 var imageCollection arvados.Collection
223 err = containerClient.RequestAndDecode(&imageCollection,
224 arvados.EndpointCollectionUpdate.Method,
226 nil, map[string]interface{}{
227 "collection": map[string]string{
228 "name": fmt.Sprintf("singularity image for %v", dockerImageID),
229 "trash_at": exp.UTC().Format(time.RFC3339),
233 // If we just wrote the image to the cache, the
234 // response also returns the updated PDH
235 e.imageFilename = fmt.Sprintf("%s/by_id/%s/image.sif", arvMountPoint, imageCollection.PortableDataHash)
239 e.logf("error updating/renaming collection for cached sif image: %v", err)
240 // Failed to update but maybe it lost a race and there is
241 // another cached collection in the same place, so check the cache
243 sifCollection, err = e.checkImageCache(dockerImageID, container, arvMountPoint, containerClient)
247 e.imageFilename = fmt.Sprintf("%s/by_id/%s/image.sif", arvMountPoint, sifCollection.PortableDataHash)
252 func (e *singularityExecutor) Create(spec containerSpec) error {
257 func (e *singularityExecutor) execCmd(path string) *exec.Cmd {
258 args := []string{path, "exec", "--containall", "--cleanenv", "--pwd=" + e.spec.WorkingDir}
259 if !e.spec.EnableNetwork {
260 args = append(args, "--net", "--network=none")
261 } else if u, err := user.Current(); err == nil && u.Uid == "0" || e.sudo {
262 // Specifying --network=bridge fails unless
263 // singularity is running as root.
265 // Note this used to be possible with --fakeroot, or
266 // configuring singularity like so:
268 // singularity config global --set 'allow net networks' bridge
269 // singularity config global --set 'allow net groups' mygroup
271 // However, these options no longer work (as of debian
272 // bookworm) because iptables now refuses to run in a
273 // setuid environment.
274 args = append(args, "--net", "--network=bridge")
276 // If we don't pass a --net argument at all, the
277 // container will be in the same network namespace as
280 // Note this allows the container to listen on the
281 // host's external ports.
283 if e.spec.CUDADeviceCount != 0 {
284 args = append(args, "--nv")
287 // If we ask for resource limits that aren't supported,
288 // singularity will not run the container at all. So we probe
289 // for support first, and only apply the limits that appear to
292 // Default debian configuration lets non-root users set memory
293 // limits but not CPU limits, so we enable/disable those
294 // limits independently.
296 // https://rootlesscontaine.rs/getting-started/common/cgroup2/
297 checkCgroupSupport(e.logf)
298 if e.spec.VCPUs > 0 {
299 if cgroupSupport["cpu"] {
300 args = append(args, "--cpus", fmt.Sprintf("%d", e.spec.VCPUs))
302 e.logf("cpu limits are not supported by current systemd/cgroup configuration, not setting --cpu %d", e.spec.VCPUs)
306 if cgroupSupport["memory"] {
307 args = append(args, "--memory", fmt.Sprintf("%d", e.spec.RAM))
309 e.logf("memory limits are not supported by current systemd/cgroup configuration, not setting --memory %d", e.spec.RAM)
313 readonlyflag := map[bool]string{
318 for path, _ := range e.spec.BindMounts {
319 binds = append(binds, path)
322 for _, path := range binds {
323 mount := e.spec.BindMounts[path]
324 if path == e.spec.Env["HOME"] {
325 // Singularity treats $HOME as special case
326 args = append(args, "--home", mount.HostPath+":"+path)
328 args = append(args, "--bind", mount.HostPath+":"+path+":"+readonlyflag[mount.ReadOnly])
332 // This is for singularity 3.5.2. There are some behaviors
333 // that will change in singularity 3.6, please see:
334 // https://sylabs.io/guides/3.7/user-guide/environment_and_metadata.html
335 // https://sylabs.io/guides/3.5/user-guide/environment_and_metadata.html
336 env := make([]string, 0, len(e.spec.Env))
337 for k, v := range e.spec.Env {
339 // Singularity treats $HOME as special case,
340 // this is handled with --home above
343 env = append(env, "SINGULARITYENV_"+k+"="+v)
346 // Singularity always makes all nvidia devices visible to the
347 // container. If a resource manager such as slurm or LSF told
348 // us to select specific devices we need to propagate that.
349 if cudaVisibleDevices := os.Getenv("CUDA_VISIBLE_DEVICES"); cudaVisibleDevices != "" {
350 // If a resource manager such as slurm or LSF told
351 // us to select specific devices we need to propagate that.
352 env = append(env, "SINGULARITYENV_CUDA_VISIBLE_DEVICES="+cudaVisibleDevices)
354 // Singularity's default behavior is to evaluate each
355 // SINGULARITYENV_* env var with a shell as a double-quoted
356 // string and pass the result to the contained
357 // process. Singularity 3.10+ has an option to pass env vars
358 // through literally without evaluating, which is what we
359 // want. See https://github.com/sylabs/singularity/pull/704
360 // and https://dev.arvados.org/issues/19081
361 env = append(env, "SINGULARITY_NO_EVAL=1")
363 // If we don't propagate XDG_RUNTIME_DIR and
364 // DBUS_SESSION_BUS_ADDRESS, singularity resource limits fail
365 // with "FATAL: container creation failed: while applying
366 // cgroups config: system configuration does not support
367 // cgroup management" or "FATAL: container creation failed:
368 // while applying cgroups config: rootless cgroups require a
369 // D-Bus session - check that XDG_RUNTIME_DIR and
370 // DBUS_SESSION_BUS_ADDRESS are set".
371 env = append(env, "XDG_RUNTIME_DIR="+os.Getenv("XDG_RUNTIME_DIR"))
372 env = append(env, "DBUS_SESSION_BUS_ADDRESS="+os.Getenv("DBUS_SESSION_BUS_ADDRESS"))
374 args = append(args, e.imageFilename)
375 args = append(args, e.spec.Command...)
382 Stdout: e.spec.Stdout,
383 Stderr: e.spec.Stderr,
387 func (e *singularityExecutor) Start() error {
388 path, err := exec.LookPath("singularity")
392 child := e.execCmd(path)
394 child.Args = append([]string{child.Path}, child.Args...)
395 child.Path, err = exec.LookPath("sudo")
408 func (e *singularityExecutor) Pid() int {
409 childproc, err := e.containedProcess()
416 func (e *singularityExecutor) Stop() error {
417 if e.child == nil || e.child.Process == nil {
418 // no process started, or Wait already called
421 if err := e.child.Process.Signal(syscall.Signal(0)); err != nil {
422 // process already exited
425 return e.child.Process.Signal(syscall.SIGKILL)
428 func (e *singularityExecutor) Wait(context.Context) (int, error) {
429 err := e.child.Wait()
430 if err, ok := err.(*exec.ExitError); ok {
431 return err.ProcessState.ExitCode(), nil
436 return e.child.ProcessState.ExitCode(), nil
439 func (e *singularityExecutor) Close() {
440 err := os.RemoveAll(e.tmpdir)
442 e.logf("error removing temp dir: %s", err)
446 func (e *singularityExecutor) InjectCommand(ctx context.Context, detachKeys, username string, usingTTY bool, injectcmd []string) (*exec.Cmd, error) {
447 target, err := e.containedProcess()
451 return exec.CommandContext(ctx, "nsenter", append([]string{fmt.Sprintf("--target=%d", target), "--all"}, injectcmd...)...), nil
455 errContainerHasNoIPAddress = errors.New("container has no IP address distinct from host")
458 func (e *singularityExecutor) IPAddress() (string, error) {
459 target, err := e.containedProcess()
463 targetIPs, err := processIPs(target)
467 selfIPs, err := processIPs(os.Getpid())
471 for ip := range targetIPs {
476 return "", errContainerHasNoIPAddress
479 func processIPs(pid int) (map[string]bool, error) {
480 fibtrie, err := os.ReadFile(fmt.Sprintf("/proc/%d/net/fib_trie", pid))
485 addrs := map[string]bool{}
486 // When we see a pair of lines like this:
491 // ...we set addrs["10.1.2.3"] = true
492 lines := bytes.Split(fibtrie, []byte{'\n'})
493 for linenumber, line := range lines {
494 if !bytes.HasSuffix(line, []byte("/32 host LOCAL")) {
500 i := bytes.LastIndexByte(lines[linenumber-1], ' ')
501 if i < 0 || i >= len(line)-7 {
504 addr := string(lines[linenumber-1][i+1:])
505 if net.ParseIP(addr).To4() != nil {
513 errContainerNotStarted = errors.New("container has not started yet")
514 errCannotFindChild = errors.New("failed to find any process inside the container")
515 reProcStatusPPid = regexp.MustCompile(`\nPPid:\t(\d+)\n`)
518 // Return the PID of a process that is inside the container (not
519 // necessarily the topmost/pid=1 process in the container).
520 func (e *singularityExecutor) containedProcess() (int, error) {
521 if e.child == nil || e.child.Process == nil {
522 return 0, errContainerNotStarted
524 cmd := exec.Command("lsns")
526 cmd = exec.Command("sudo", "lsns")
528 lsns, err := cmd.CombinedOutput()
530 return 0, fmt.Errorf("lsns: %w", err)
532 for _, line := range bytes.Split(lsns, []byte{'\n'}) {
533 fields := bytes.Fields(line)
537 if !bytes.Equal(fields[1], []byte("pid")) {
540 pid, err := strconv.ParseInt(string(fields[3]), 10, 64)
542 return 0, fmt.Errorf("error parsing PID field in lsns output: %q", fields[3])
544 for parent := pid; ; {
545 procstatus, err := os.ReadFile(fmt.Sprintf("/proc/%d/status", parent))
549 m := reProcStatusPPid.FindSubmatch(procstatus)
553 parent, err = strconv.ParseInt(string(m[1]), 10, 64)
557 if int(parent) == e.child.Process.Pid {
562 return 0, errCannotFindChild