1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
26 "git.arvados.org/arvados.git/sdk/go/arvados"
29 type singularityExecutor struct {
30 logf func(string, ...interface{})
31 sudo bool // use sudo to run singularity (only used by tests)
35 imageFilename string // "sif" image
38 func newSingularityExecutor(logf func(string, ...interface{})) (*singularityExecutor, error) {
39 tmpdir, err := ioutil.TempDir("", "crunch-run-singularity-")
43 sudo := os.Getenv("ARVADOS_TEST_PRIVESC") == "sudo" && strings.Contains(os.Args[0], "crunch-run~")
44 return &singularityExecutor{
51 func (e *singularityExecutor) Runtime() string {
52 buf, err := exec.Command("singularity", "--version").CombinedOutput()
54 return "singularity (unknown version)"
56 return strings.TrimSuffix(string(buf), "\n")
59 func (e *singularityExecutor) getOrCreateProject(ownerUuid string, name string, containerClient *arvados.Client) (*arvados.Group, error) {
60 var gp arvados.GroupList
61 err := containerClient.RequestAndDecode(&gp,
62 arvados.EndpointGroupList.Method,
63 arvados.EndpointGroupList.Path,
64 nil, arvados.ListOptions{Filters: []arvados.Filter{
65 arvados.Filter{"owner_uuid", "=", ownerUuid},
66 arvados.Filter{"name", "=", name},
67 arvados.Filter{"group_class", "=", "project"},
73 if len(gp.Items) == 1 {
74 return &gp.Items[0], nil
77 var rgroup arvados.Group
78 err = containerClient.RequestAndDecode(&rgroup,
79 arvados.EndpointGroupCreate.Method,
80 arvados.EndpointGroupCreate.Path,
81 nil, map[string]interface{}{
82 "group": map[string]string{
83 "owner_uuid": ownerUuid,
85 "group_class": "project",
94 func (e *singularityExecutor) getImageCacheProject(userUUID string, containerClient *arvados.Client) (*arvados.Group, error) {
95 cacheProject, err := e.getOrCreateProject(userUUID, ".cache", containerClient)
97 return nil, fmt.Errorf("error getting '.cache' project: %v", err)
99 imageProject, err := e.getOrCreateProject(cacheProject.UUID, "auto-generated singularity images", containerClient)
101 return nil, fmt.Errorf("error getting 'auto-generated singularity images' project: %s", err)
103 return imageProject, nil
106 func (e *singularityExecutor) imageCacheExp() time.Time {
107 return time.Now().Add(e.imageCacheTTL()).UTC()
110 func (e *singularityExecutor) imageCacheTTL() time.Duration {
111 return 24 * 7 * 2 * time.Hour
114 // getCacheCollection returns an existing collection with a cached
115 // singularity image with the given name, or nil if none exists.
117 // Note that if there is no existing collection, this is not
118 // considered an error -- all return values will be nil/empty.
119 func (e *singularityExecutor) getCacheCollection(collectionName string, containerClient *arvados.Client, cacheProject *arvados.Group, arvMountPoint string) (collection *arvados.Collection, imageFile string, err error) {
120 var cl arvados.CollectionList
121 err = containerClient.RequestAndDecode(&cl,
122 arvados.EndpointCollectionList.Method,
123 arvados.EndpointCollectionList.Path,
124 nil, arvados.ListOptions{Filters: []arvados.Filter{
125 arvados.Filter{"owner_uuid", "=", cacheProject.UUID},
126 arvados.Filter{"name", "=", collectionName},
130 return nil, "", fmt.Errorf("error querying for collection %q in project %s: %w", collectionName, cacheProject.UUID, err)
132 if len(cl.Items) == 0 {
133 // Successfully discovered that there's no cached
137 // Check that the collection actually contains an "image.sif"
138 // file. If not, we can't use it, and trying to create a new
139 // cache collection will probably fail too, so the caller
140 // should not bother trying.
142 sifFile := path.Join(arvMountPoint, "by_id", coll.PortableDataHash, "image.sif")
143 _, err = os.Stat(sifFile)
145 return nil, "", fmt.Errorf("found collection %s (%s), but it did not contain an image file: %s", coll.UUID, coll.PortableDataHash, err)
147 if coll.TrashAt != nil && coll.TrashAt.Sub(time.Now()) < e.imageCacheTTL()*9/10 {
148 // If the remaining TTL is less than 90% of our target
149 // TTL, extend trash_at. This avoids prematurely
150 // trashing and re-converting images that are being
152 err = containerClient.RequestAndDecode(nil,
153 arvados.EndpointCollectionUpdate.Method,
154 "arvados/v1/collections/"+coll.UUID,
155 nil, map[string]interface{}{
156 "collection": map[string]string{
157 "trash_at": e.imageCacheExp().Format(time.RFC3339),
161 e.logf("could not update expiry time of cached image collection (proceeding anyway): %s", err)
164 return &coll, sifFile, nil
167 func (e *singularityExecutor) createCacheCollection(collectionName string, containerClient *arvados.Client, cacheProject *arvados.Group) (*arvados.Collection, error) {
168 var coll arvados.Collection
169 err := containerClient.RequestAndDecode(&coll,
170 arvados.EndpointCollectionCreate.Method,
171 arvados.EndpointCollectionCreate.Path,
172 nil, map[string]interface{}{
173 "collection": map[string]string{
174 "owner_uuid": cacheProject.UUID,
175 "name": collectionName,
176 "trash_at": e.imageCacheExp().Format(time.RFC3339),
178 "ensure_unique_name": true,
181 return nil, fmt.Errorf("error creating '%v' collection: %s", collectionName, err)
186 func (e *singularityExecutor) convertDockerImage(srcPath, dstPath string) error {
187 // Make sure the docker image is readable.
188 if _, err := os.Stat(srcPath); err != nil {
192 e.logf("building singularity image")
193 // "singularity build" does not accept a
194 // docker-archive://... filename containing a ":" character,
195 // as in "/path/to/sha256:abcd...1234.tar". Workaround: make a
196 // symlink that doesn't have ":" chars.
197 err := os.Symlink(srcPath, e.tmpdir+"/image.tar")
202 // Set up a cache and tmp dir for singularity build
203 err = os.Mkdir(e.tmpdir+"/cache", 0700)
207 defer os.RemoveAll(e.tmpdir + "/cache")
208 err = os.Mkdir(e.tmpdir+"/tmp", 0700)
212 defer os.RemoveAll(e.tmpdir + "/tmp")
214 build := exec.Command("singularity", "build", dstPath, "docker-archive://"+e.tmpdir+"/image.tar")
215 build.Env = os.Environ()
216 build.Env = append(build.Env, "SINGULARITY_CACHEDIR="+e.tmpdir+"/cache")
217 build.Env = append(build.Env, "SINGULARITY_TMPDIR="+e.tmpdir+"/tmp")
218 e.logf("%v", build.Args)
219 out, err := build.CombinedOutput()
220 // INFO: Starting build...
221 // Getting image source signatures
222 // Copying blob ab15617702de done
223 // Copying config 651e02b8a2 done
224 // Writing manifest to image destination
225 // Storing signatures
226 // 2021/04/22 14:42:14 info unpack layer: sha256:21cbfd3a344c52b197b9fa36091e66d9cbe52232703ff78d44734f85abb7ccd3
227 // INFO: Creating SIF file...
228 // INFO: Build complete: arvados-jobs.latest.sif
233 // LoadImage converts the given docker image to a singularity
236 // If containerClient is not nil, LoadImage first tries to use an
237 // existing image (in Home -> .cache -> auto-generated singularity
238 // images) and, if none was found there and the image was converted on
239 // the fly, tries to save the converted image to the cache so it can
240 // be reused next time.
242 // If containerClient is nil or a cache project/collection cannot be
243 // found or created, LoadImage converts the image on the fly and
244 // writes it to the local filesystem instead.
245 func (e *singularityExecutor) LoadImage(dockerImageID string, imageTarballPath string, container arvados.Container, arvMountPoint string, containerClient *arvados.Client) error {
246 convertWithoutCache := func(err error) error {
248 e.logf("cannot use singularity image cache: %s", err)
250 e.imageFilename = path.Join(e.tmpdir, "image.sif")
251 return e.convertDockerImage(imageTarballPath, e.imageFilename)
254 if containerClient == nil {
255 return convertWithoutCache(nil)
257 cacheProject, err := e.getImageCacheProject(container.RuntimeUserUUID, containerClient)
259 return convertWithoutCache(err)
261 cacheCollectionName := fmt.Sprintf("singularity image for %s", dockerImageID)
262 existingCollection, sifFile, err := e.getCacheCollection(cacheCollectionName, containerClient, cacheProject, arvMountPoint)
264 return convertWithoutCache(err)
266 if existingCollection != nil {
267 e.imageFilename = sifFile
271 newCollection, err := e.createCacheCollection("converting "+cacheCollectionName, containerClient, cacheProject)
273 return convertWithoutCache(err)
275 dstDir := path.Join(arvMountPoint, "by_uuid", newCollection.UUID)
276 dstFile := path.Join(dstDir, "image.sif")
277 err = e.convertDockerImage(imageTarballPath, dstFile)
281 buf, err := os.ReadFile(path.Join(dstDir, ".arvados#collection"))
283 return fmt.Errorf("could not sync image collection: %w", err)
285 var synced arvados.Collection
286 err = json.Unmarshal(buf, &synced)
288 return fmt.Errorf("could not parse .arvados#collection: %w", err)
290 e.logf("saved converted image in %s with PDH %s", newCollection.UUID, synced.PortableDataHash)
291 e.imageFilename = path.Join(arvMountPoint, "by_id", synced.PortableDataHash, "image.sif")
293 if errRename := containerClient.RequestAndDecode(nil,
294 arvados.EndpointCollectionUpdate.Method,
295 "arvados/v1/collections/"+newCollection.UUID,
296 nil, map[string]interface{}{
297 "collection": map[string]string{
298 "name": cacheCollectionName,
300 }); errRename != nil {
301 // Error is probably a name collision caused by
302 // another crunch-run process is converting the same
303 // image concurrently. In that case, we prefer to use
304 // the one that won the race -- the resulting images
305 // should be equivalent, but if they do differ at all,
306 // it's better if all containers use the same
308 if existingCollection, sifFile, err := e.getCacheCollection(cacheCollectionName, containerClient, cacheProject, arvMountPoint); err == nil {
309 e.logf("lost race -- abandoning our conversion in %s (%s) and using image from %s (%s) instead", newCollection.UUID, synced.PortableDataHash, existingCollection.UUID, existingCollection.PortableDataHash)
310 e.imageFilename = sifFile
312 e.logf("using newly converted image anyway, despite error renaming collection: %v", errRename)
318 func (e *singularityExecutor) Create(spec containerSpec) error {
323 func (e *singularityExecutor) execCmd(path string) *exec.Cmd {
324 args := []string{path, "exec", "--containall", "--cleanenv", "--pwd=" + e.spec.WorkingDir}
325 if !e.spec.EnableNetwork {
326 args = append(args, "--net", "--network=none")
327 } else if u, err := user.Current(); err == nil && u.Uid == "0" || e.sudo {
328 // Specifying --network=bridge fails unless
329 // singularity is running as root.
331 // Note this used to be possible with --fakeroot, or
332 // configuring singularity like so:
334 // singularity config global --set 'allow net networks' bridge
335 // singularity config global --set 'allow net groups' mygroup
337 // However, these options no longer work (as of debian
338 // bookworm) because iptables now refuses to run in a
339 // setuid environment.
340 args = append(args, "--net", "--network=bridge")
342 // If we don't pass a --net argument at all, the
343 // container will be in the same network namespace as
346 // Note this allows the container to listen on the
347 // host's external ports.
349 if e.spec.GPUStack == "cuda" && e.spec.GPUDeviceCount > 0 {
350 args = append(args, "--nv")
352 if e.spec.GPUStack == "rocm" && e.spec.GPUDeviceCount > 0 {
353 args = append(args, "--rocm")
356 // If we ask for resource limits that aren't supported,
357 // singularity will not run the container at all. So we probe
358 // for support first, and only apply the limits that appear to
361 // Default debian configuration lets non-root users set memory
362 // limits but not CPU limits, so we enable/disable those
363 // limits independently.
365 // https://rootlesscontaine.rs/getting-started/common/cgroup2/
366 checkCgroupSupport(e.logf)
367 if e.spec.VCPUs > 0 {
368 if cgroupSupport["cpu"] {
369 args = append(args, "--cpus", fmt.Sprintf("%d", e.spec.VCPUs))
371 e.logf("cpu limits are not supported by current systemd/cgroup configuration, not setting --cpu %d", e.spec.VCPUs)
375 if cgroupSupport["memory"] {
376 args = append(args, "--memory", fmt.Sprintf("%d", e.spec.RAM))
378 e.logf("memory limits are not supported by current systemd/cgroup configuration, not setting --memory %d", e.spec.RAM)
382 readonlyflag := map[bool]string{
387 for path, _ := range e.spec.BindMounts {
388 binds = append(binds, path)
391 for _, path := range binds {
392 mount := e.spec.BindMounts[path]
393 if path == e.spec.Env["HOME"] {
394 // Singularity treats $HOME as special case
395 args = append(args, "--home", mount.HostPath+":"+path)
397 args = append(args, "--bind", mount.HostPath+":"+path+":"+readonlyflag[mount.ReadOnly])
401 // This is for singularity 3.5.2. There are some behaviors
402 // that will change in singularity 3.6, please see:
403 // https://sylabs.io/guides/3.7/user-guide/environment_and_metadata.html
404 // https://sylabs.io/guides/3.5/user-guide/environment_and_metadata.html
405 env := make([]string, 0, len(e.spec.Env))
406 for k, v := range e.spec.Env {
408 // Singularity treats $HOME as special case,
409 // this is handled with --home above
412 env = append(env, "SINGULARITYENV_"+k+"="+v)
415 // Singularity always makes all nvidia devices visible to the
416 // container. If a resource manager such as slurm or LSF told
417 // us to select specific devices we need to propagate that.
418 if cudaVisibleDevices := os.Getenv("CUDA_VISIBLE_DEVICES"); cudaVisibleDevices != "" {
419 // If a resource manager such as slurm or LSF told
420 // us to select specific devices we need to propagate that.
421 env = append(env, "SINGULARITYENV_CUDA_VISIBLE_DEVICES="+cudaVisibleDevices)
423 // Singularity's default behavior is to evaluate each
424 // SINGULARITYENV_* env var with a shell as a double-quoted
425 // string and pass the result to the contained
426 // process. Singularity 3.10+ has an option to pass env vars
427 // through literally without evaluating, which is what we
428 // want. See https://github.com/sylabs/singularity/pull/704
429 // and https://dev.arvados.org/issues/19081
430 env = append(env, "SINGULARITY_NO_EVAL=1")
432 // If we don't propagate XDG_RUNTIME_DIR and
433 // DBUS_SESSION_BUS_ADDRESS, singularity resource limits fail
434 // with "FATAL: container creation failed: while applying
435 // cgroups config: system configuration does not support
436 // cgroup management" or "FATAL: container creation failed:
437 // while applying cgroups config: rootless cgroups require a
438 // D-Bus session - check that XDG_RUNTIME_DIR and
439 // DBUS_SESSION_BUS_ADDRESS are set".
440 env = append(env, "XDG_RUNTIME_DIR="+os.Getenv("XDG_RUNTIME_DIR"))
441 env = append(env, "DBUS_SESSION_BUS_ADDRESS="+os.Getenv("DBUS_SESSION_BUS_ADDRESS"))
443 args = append(args, e.imageFilename)
444 args = append(args, e.spec.Command...)
451 Stdout: e.spec.Stdout,
452 Stderr: e.spec.Stderr,
456 func (e *singularityExecutor) Start() error {
457 path, err := exec.LookPath("singularity")
461 child := e.execCmd(path)
463 child.Args = append([]string{child.Path}, child.Args...)
464 child.Path, err = exec.LookPath("sudo")
477 func (e *singularityExecutor) Pid() int {
478 childproc, err := e.containedProcess()
485 func (e *singularityExecutor) Stop() error {
486 if e.child == nil || e.child.Process == nil {
487 // no process started, or Wait already called
490 if err := e.child.Process.Signal(syscall.Signal(0)); err != nil {
491 // process already exited
494 return e.child.Process.Signal(syscall.SIGKILL)
497 func (e *singularityExecutor) Wait(context.Context) (int, error) {
498 err := e.child.Wait()
499 if err, ok := err.(*exec.ExitError); ok {
500 return err.ProcessState.ExitCode(), nil
505 return e.child.ProcessState.ExitCode(), nil
508 func (e *singularityExecutor) Close() {
509 err := os.RemoveAll(e.tmpdir)
511 e.logf("error removing temp dir: %s", err)
515 func (e *singularityExecutor) InjectCommand(ctx context.Context, detachKeys, username string, usingTTY bool, injectcmd []string) (*exec.Cmd, error) {
516 target, err := e.containedProcess()
520 return exec.CommandContext(ctx, "nsenter", append([]string{fmt.Sprintf("--target=%d", target), "--all"}, injectcmd...)...), nil
524 errContainerHasNoIPAddress = errors.New("container has no IP address distinct from host")
527 func (e *singularityExecutor) IPAddress() (string, error) {
528 target, err := e.containedProcess()
532 targetIPs, err := processIPs(target)
536 selfIPs, err := processIPs(os.Getpid())
540 for ip := range targetIPs {
545 return "", errContainerHasNoIPAddress
548 func processIPs(pid int) (map[string]bool, error) {
549 fibtrie, err := os.ReadFile(fmt.Sprintf("/proc/%d/net/fib_trie", pid))
554 addrs := map[string]bool{}
555 // When we see a pair of lines like this:
560 // ...we set addrs["10.1.2.3"] = true
561 lines := bytes.Split(fibtrie, []byte{'\n'})
562 for linenumber, line := range lines {
563 if !bytes.HasSuffix(line, []byte("/32 host LOCAL")) {
569 i := bytes.LastIndexByte(lines[linenumber-1], ' ')
570 if i < 0 || i >= len(line)-7 {
573 addr := string(lines[linenumber-1][i+1:])
574 if net.ParseIP(addr).To4() != nil {
582 errContainerNotStarted = errors.New("container has not started yet")
583 errCannotFindChild = errors.New("failed to find any process inside the container")
584 reProcStatusPPid = regexp.MustCompile(`\nPPid:\t(\d+)\n`)
587 // Return the PID of a process that is inside the container (not
588 // necessarily the topmost/pid=1 process in the container).
589 func (e *singularityExecutor) containedProcess() (int, error) {
590 if e.child == nil || e.child.Process == nil {
591 return 0, errContainerNotStarted
593 cmd := exec.Command("lsns")
595 cmd = exec.Command("sudo", "lsns")
597 lsns, err := cmd.CombinedOutput()
599 return 0, fmt.Errorf("lsns: %w", err)
601 for _, line := range bytes.Split(lsns, []byte{'\n'}) {
602 fields := bytes.Fields(line)
606 if !bytes.Equal(fields[1], []byte("pid")) {
609 pid, err := strconv.ParseInt(string(fields[3]), 10, 64)
611 return 0, fmt.Errorf("error parsing PID field in lsns output: %q", fields[3])
613 for parent := pid; ; {
614 procstatus, err := os.ReadFile(fmt.Sprintf("/proc/%d/status", parent))
618 m := reProcStatusPPid.FindSubmatch(procstatus)
622 parent, err = strconv.ParseInt(string(m[1]), 10, 64)
626 if int(parent) == e.child.Process.Pid {
631 return 0, errCannotFindChild