package dispatchcloud
import (
- "bytes"
"errors"
"log"
"os/exec"
+ "regexp"
+ "sort"
+ "strconv"
"strings"
"time"
)
var (
- ErrConstraintsNotSatisfiable = errors.New("constraints not satisfiable by any configured instance type")
ErrInstanceTypesNotConfigured = errors.New("site configuration does not list any instance types")
discountConfiguredRAMPercent = 5
)
+// ConstraintsNotSatisfiableError includes a list of available instance types
+// to be reported back to the user.
+type ConstraintsNotSatisfiableError struct {
+ error
+ AvailableTypes []arvados.InstanceType
+}
+
+var pdhRegexp = regexp.MustCompile(`^[0-9a-f]{32}\+(\d+)$`)
+
+// estimateDockerImageSize estimates how much disk space will be used
+// by a Docker image, given the PDH of a collection containing a
+// Docker image that was created by "arv-keepdocker". Returns
+// estimated number of bytes of disk space that should be reserved.
+func estimateDockerImageSize(collectionPDH string) int64 {
+ m := pdhRegexp.FindStringSubmatch(collectionPDH)
+ if m == nil {
+ log.Printf("estimateDockerImageSize: '%v' did not match pdhRegexp, returning 0", collectionPDH)
+ return 0
+ }
+ n, err := strconv.ParseInt(m[1], 10, 64)
+ if err != nil || n < 122 {
+ log.Printf("estimateDockerImageSize: short manifest %v or error (%v), returning 0", n, err)
+ return 0
+ }
+ // To avoid having to fetch the collection, take advantage of
+ // the fact that the manifest storing a container image
+ // uploaded by arv-keepdocker has a predictable format, which
+ // allows us to estimate the size of the image based on just
+ // the size of the manifest.
+ //
+ // Use the following heuristic:
+ // - Start with the length of the mainfest (n)
+ // - Subtract 80 characters for the filename and file segment
+ // - Divide by 42 to get the number of block identifiers ('hash\+size\ ' is 32+1+8+1)
+ // - Assume each block is full, multiply by 64 MiB
+ return ((n - 80) / 42) * (64 * 1024 * 1024)
+}
+
+// EstimateScratchSpace estimates how much available disk space (in
+// bytes) is needed to run the container by summing the capacity
+// requested by 'tmp' mounts plus disk space required to load the
+// Docker image.
+func EstimateScratchSpace(ctr *arvados.Container) (needScratch int64) {
+ for _, m := range ctr.Mounts {
+ if m.Kind == "tmp" {
+ needScratch += m.Capacity
+ }
+ }
+
+ // Account for disk space usage by Docker, assumes the following behavior:
+ // - Layer tarballs are buffered to disk during "docker load".
+ // - Individual layer tarballs are extracted from buffered
+ // copy to the filesystem
+ dockerImageSize := estimateDockerImageSize(ctr.ContainerImage)
+
+ // The buffer is only needed during image load, so make sure
+ // the baseline scratch space at least covers dockerImageSize,
+ // and assume it will be released to the job afterwards.
+ if needScratch < dockerImageSize {
+ needScratch = dockerImageSize
+ }
+
+ // Now reserve space for the extracted image on disk.
+ needScratch += dockerImageSize
+
+ return
+}
+
// ChooseInstanceType returns the cheapest available
// arvados.InstanceType big enough to run ctr.
func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) (best arvados.InstanceType, err error) {
- needVCPUs := ctr.RuntimeConstraints.VCPUs
- needRAM := ctr.RuntimeConstraints.RAM + ctr.RuntimeConstraints.KeepCacheRAM
-
- needRAM = (needRAM * 100) / int64(100-discountConfiguredRAMPercent)
-
if len(cc.InstanceTypes) == 0 {
err = ErrInstanceTypesNotConfigured
return
}
- err = ErrConstraintsNotSatisfiable
+ needScratch := EstimateScratchSpace(ctr)
+
+ needVCPUs := ctr.RuntimeConstraints.VCPUs
+
+ needRAM := ctr.RuntimeConstraints.RAM + ctr.RuntimeConstraints.KeepCacheRAM
+ needRAM = (needRAM * 100) / int64(100-discountConfiguredRAMPercent)
+
+ ok := false
for _, it := range cc.InstanceTypes {
switch {
- case err == nil && it.Price > best.Price:
- case it.RAM < needRAM:
+ case ok && it.Price > best.Price:
+ case int64(it.Scratch) < needScratch:
+ case int64(it.RAM) < needRAM:
case it.VCPUs < needVCPUs:
+ case it.Preemptible != ctr.SchedulingParameters.Preemptible:
case it.Price == best.Price && (it.RAM < best.RAM || it.VCPUs < best.VCPUs):
// Equal price, but worse specs
default:
// Lower price || (same price && better specs)
best = it
- err = nil
+ ok = true
+ }
+ }
+ if !ok {
+ availableTypes := make([]arvados.InstanceType, 0, len(cc.InstanceTypes))
+ for _, t := range cc.InstanceTypes {
+ availableTypes = append(availableTypes, t)
+ }
+ sort.Slice(availableTypes, func(a, b int) bool {
+ return availableTypes[a].Price < availableTypes[b].Price
+ })
+ err = ConstraintsNotSatisfiableError{
+ errors.New("constraints not satisfiable by any configured instance type"),
+ availableTypes,
}
+ return
}
return
}
// it is no longer offered by any node. So, to make a feature name
// valid, we can add it to a dummy node ("compute0"), then remove it.
//
-// (2) when srun is given an invalid --gres argument and an invalid
-// --constraint argument, the error message mentions "Invalid feature
-// specification". So, to test whether a feature name is valid without
-// actually submitting a job, we can call srun with the feature name
-// and an invalid --gres argument.
+// (2) To test whether a set of feature names are valid without
+// actually submitting a job, we can call srun --test-only with the
+// desired features.
//
// SlurmNodeTypeFeatureKludge does a test-and-fix operation
// immediately, and then periodically, in case slurm restarts and
}
for {
slurmKludge(features)
- time.Sleep(time.Minute)
+ time.Sleep(2 * time.Second)
}
}
-var (
- slurmDummyNode = "compute0"
- slurmErrBadFeature = "Invalid feature"
- slurmErrBadGres = "Invalid generic resource"
-)
+const slurmDummyNode = "compute0"
func slurmKludge(features []string) {
- cmd := exec.Command("srun", "--gres=invalid-gres-specification", "--constraint="+strings.Join(features, "&"), "true")
- out, err := cmd.CombinedOutput()
- switch {
- case err == nil:
- log.Printf("warning: guaranteed-to-fail srun command did not fail: %q %q", cmd.Path, cmd.Args)
- log.Printf("output was: %q", out)
-
- case bytes.Contains(out, []byte(slurmErrBadFeature)):
- log.Printf("temporarily configuring node %q with all node type features", slurmDummyNode)
- for _, nodeFeatures := range []string{strings.Join(features, ","), ""} {
- cmd = exec.Command("scontrol", "update", "NodeName="+slurmDummyNode, "Features="+nodeFeatures)
- log.Printf("running: %q %q", cmd.Path, cmd.Args)
- out, err := cmd.CombinedOutput()
- if err != nil {
- log.Printf("error: scontrol: %s (output was %q)", err, out)
- }
- }
+ allFeatures := strings.Join(features, ",")
- case bytes.Contains(out, []byte(slurmErrBadGres)):
- // Evidently our node-type feature names are all valid.
+ cmd := exec.Command("sinfo", "--nodes="+slurmDummyNode, "--format=%f", "--noheader")
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ log.Printf("running %q %q: %s (output was %q)", cmd.Path, cmd.Args, err, out)
+ return
+ }
+ if string(out) == allFeatures+"\n" {
+ // Already configured correctly, nothing to do.
+ return
+ }
- default:
- log.Printf("warning: expected srun error %q or %q, but output was %q", slurmErrBadFeature, slurmErrBadGres, out)
+ log.Printf("configuring node %q with all node type features", slurmDummyNode)
+ cmd = exec.Command("scontrol", "update", "NodeName="+slurmDummyNode, "Features="+allFeatures)
+ log.Printf("running: %q %q", cmd.Path, cmd.Args)
+ out, err = cmd.CombinedOutput()
+ if err != nil {
+ log.Printf("error: scontrol: %s (output was %q)", err, out)
}
}