import (
"errors"
- "os/exec"
+ "log"
+ "regexp"
"sort"
- "strings"
- "time"
+ "strconv"
"git.curoverse.com/arvados.git/sdk/go/arvados"
)
-var (
- ErrInstanceTypesNotConfigured = errors.New("site configuration does not list any instance types")
- discountConfiguredRAMPercent = 5
-)
+var ErrInstanceTypesNotConfigured = errors.New("site configuration does not list any instance types")
+
+var discountConfiguredRAMPercent = 5
// ConstraintsNotSatisfiableError includes a list of available instance types
// to be reported back to the user.
AvailableTypes []arvados.InstanceType
}
+ var pdhRegexp = regexp.MustCompile(`^[0-9a-f]{32}\+(\d+)$`)
+
+ // estimateDockerImageSize estimates how much disk space will be used
+ // by a Docker image, given the PDH of a collection containing a
+ // Docker image that was created by "arv-keepdocker". Returns
+ // estimated number of bytes of disk space that should be reserved.
+ func estimateDockerImageSize(collectionPDH string) int64 {
+ m := pdhRegexp.FindStringSubmatch(collectionPDH)
+ if m == nil {
+ log.Printf("estimateDockerImageSize: '%v' did not match pdhRegexp, returning 0", collectionPDH)
+ return 0
+ }
+ n, err := strconv.ParseInt(m[1], 10, 64)
+ if err != nil || n < 122 {
+ log.Printf("estimateDockerImageSize: short manifest %v or error (%v), returning 0", n, err)
+ return 0
+ }
+ // To avoid having to fetch the collection, take advantage of
+ // the fact that the manifest storing a container image
+ // uploaded by arv-keepdocker has a predictable format, which
+ // allows us to estimate the size of the image based on just
+ // the size of the manifest.
+ //
+ // Use the following heuristic:
+ // - Start with the length of the mainfest (n)
+ // - Subtract 80 characters for the filename and file segment
+ // - Divide by 42 to get the number of block identifiers ('hash\+size\ ' is 32+1+8+1)
+ // - Assume each block is full, multiply by 64 MiB
+ return ((n - 80) / 42) * (64 * 1024 * 1024)
+ }
+
+ // EstimateScratchSpace estimates how much available disk space (in
+ // bytes) is needed to run the container by summing the capacity
+ // requested by 'tmp' mounts plus disk space required to load the
+ // Docker image.
+ func EstimateScratchSpace(ctr *arvados.Container) (needScratch int64) {
+ for _, m := range ctr.Mounts {
+ if m.Kind == "tmp" {
+ needScratch += m.Capacity
+ }
+ }
+
+ // Account for disk space usage by Docker, assumes the following behavior:
+ // - Layer tarballs are buffered to disk during "docker load".
+ // - Individual layer tarballs are extracted from buffered
+ // copy to the filesystem
+ dockerImageSize := estimateDockerImageSize(ctr.ContainerImage)
+
+ // The buffer is only needed during image load, so make sure
+ // the baseline scratch space at least covers dockerImageSize,
+ // and assume it will be released to the job afterwards.
+ if needScratch < dockerImageSize {
+ needScratch = dockerImageSize
+ }
+
+ // Now reserve space for the extracted image on disk.
+ needScratch += dockerImageSize
+
+ return
+ }
+
// ChooseInstanceType returns the cheapest available
// arvados.InstanceType big enough to run ctr.
func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) (best arvados.InstanceType, err error) {
return
}
- needScratch := int64(0)
- for _, m := range ctr.Mounts {
- if m.Kind == "tmp" {
- needScratch += m.Capacity
- }
- }
+ needScratch := EstimateScratchSpace(ctr)
needVCPUs := ctr.RuntimeConstraints.VCPUs
}
return
}
-
-// SlurmNodeTypeFeatureKludge ensures SLURM accepts every instance
-// type name as a valid feature name, even if no instances of that
-// type have appeared yet.
-//
-// It takes advantage of some SLURM peculiarities:
-//
-// (1) A feature is valid after it has been offered by a node, even if
-// it is no longer offered by any node. So, to make a feature name
-// valid, we can add it to a dummy node ("compute0"), then remove it.
-//
-// (2) To test whether a set of feature names are valid without
-// actually submitting a job, we can call srun --test-only with the
-// desired features.
-//
-// SlurmNodeTypeFeatureKludge does a test-and-fix operation
-// immediately, and then periodically, in case slurm restarts and
-// forgets the list of valid features. It never returns (unless there
-// are no node types configured, in which case it returns
-// immediately), so it should generally be invoked with "go".
-func SlurmNodeTypeFeatureKludge(cc *arvados.Cluster) {
- if len(cc.InstanceTypes) == 0 {
- return
- }
- var features []string
- for _, it := range cc.InstanceTypes {
- features = append(features, "instancetype="+it.Name)
- }
- for {
- slurmKludge(features)
- time.Sleep(2 * time.Second)
- }
-}
-
-const slurmDummyNode = "compute0"
-
-func slurmKludge(features []string) {
- allFeatures := strings.Join(features, ",")
-
- cmd := exec.Command("sinfo", "--nodes="+slurmDummyNode, "--format=%f", "--noheader")
- out, err := cmd.CombinedOutput()
- if err != nil {
- log.Printf("running %q %q: %s (output was %q)", cmd.Path, cmd.Args, err, out)
- return
- }
- if string(out) == allFeatures+"\n" {
- // Already configured correctly, nothing to do.
- return
- }
-
- log.Printf("configuring node %q with all node type features", slurmDummyNode)
- cmd = exec.Command("scontrol", "update", "NodeName="+slurmDummyNode, "Features="+allFeatures)
- log.Printf("running: %q %q", cmd.Path, cmd.Args)
- out, err = cmd.CombinedOutput()
- if err != nil {
- log.Printf("error: scontrol: %s (output was %q)", err, out)
- }
-}
runner.CrunchLog.Close()
}()
+ err = runner.fetchContainerRecord()
+ if err != nil {
+ return
+ }
+ if runner.Container.State != "Locked" {
+ return fmt.Errorf("dispatch error detected: container %q has state %q", runner.Container.UUID, runner.Container.State)
+ }
+
defer func() {
// checkErr prints e (unless it's nil) and sets err to
// e (unless err is already non-nil). Thus, if err
checkErr("UpdateContainerFinal", runner.UpdateContainerFinal())
}()
- err = runner.fetchContainerRecord()
- if err != nil {
- return
- }
runner.setupSignals()
err = runner.startHoststat()
if err != nil {
cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
cgroupParentSubsystem := flag.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
caCertsPath := flag.String("ca-certs", "", "Path to TLS root certificates")
+ detach := flag.Bool("detach", false, "Detach from parent process and run in the background")
+ sleep := flag.Duration("sleep", 0, "Delay before starting (testing use only)")
+ kill := flag.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID")
+ list := flag.Bool("list", false, "List UUIDs of existing crunch-run processes")
enableNetwork := flag.String("container-enable-networking", "default",
`Specify if networking should be enabled for container. One of 'default', 'always':
default: only enable networking if container requests it.
memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
getVersion := flag.Bool("version", false, "Print version information and exit.")
flag.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.")
+
+ ignoreDetachFlag := false
+ if len(os.Args) > 1 && os.Args[1] == "-no-detach" {
+ // This process was invoked by a parent process, which
+ // has passed along its own arguments, including
+ // -detach, after the leading -no-detach flag. Strip
+ // the leading -no-detach flag (it's not recognized by
+ // flag.Parse()) and ignore the -detach flag that
+ // comes later.
+ os.Args = append([]string{os.Args[0]}, os.Args[2:]...)
+ ignoreDetachFlag = true
+ }
+
flag.Parse()
+ switch {
+ case *detach && !ignoreDetachFlag:
+ os.Exit(Detach(flag.Arg(0), os.Args, os.Stdout, os.Stderr))
+ case *kill >= 0:
+ os.Exit(KillProcess(flag.Arg(0), syscall.Signal(*kill), os.Stdout, os.Stderr))
+ case *list:
+ os.Exit(ListProcesses(os.Stdout, os.Stderr))
+ }
+
// Print version information if requested
if *getVersion {
fmt.Printf("crunch-run %s\n", version)
}
log.Printf("crunch-run %s started", version)
+ time.Sleep(*sleep)
containerId := flag.Arg(0)