1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
17 "git.curoverse.com/arvados.git/sdk/go/arvados"
21 ErrInstanceTypesNotConfigured = errors.New("site configuration does not list any instance types")
22 discountConfiguredRAMPercent = 5
25 // ConstraintsNotSatisfiableError includes a list of available instance types
26 // to be reported back to the user.
27 type ConstraintsNotSatisfiableError struct {
29 AvailableTypes []arvados.InstanceType
32 var pdhRegexp = regexp.MustCompile(`^[0-9a-f]{32}\+(\d+)$`)
34 // estimateDockerImageSize estimates how much disk space will be used
35 // by a Docker image, given the PDH of a collection containing a
36 // Docker image that was created by "arv-keepdocker". Returns
37 // estimated number of bytes of disk space that should be reserved.
38 func estimateDockerImageSize(collectionPDH string) int64 {
39 m := pdhRegexp.FindStringSubmatch(collectionPDH)
43 n, err := strconv.ParseInt(m[1], 10, 64)
44 if err != nil || n < 122 {
47 // To avoid having to fetch the collection, take advantage of
48 // the fact that the manifest storing a container image
49 // uploaded by arv-keepdocker has a predictable format, which
50 // allows us to estimate the size of the image based on just
51 // the size of the manifest.
53 // Use the following heuristic:
54 // - Start with the length of the mainfest (n)
55 // - Subtract 80 characters for the filename and file segment
56 // - Divide by 42 to get the number of block identifiers ('hash\+size\ ' is 32+1+8+1)
57 // - Assume each block is full, multiply by 64 MiB
58 return ((n - 80) / 42) * (64 * 1024 * 1024)
61 // EstimateScratchSpace estimates how much available disk space (in
62 // bytes) is needed to run the container by summing the capacity
63 // requested by 'tmp' mounts plus disk space required to load the
65 func EstimateScratchSpace(ctr *arvados.Container) (needScratch int64) {
66 for _, m := range ctr.Mounts {
68 needScratch += m.Capacity
72 // Account for disk space usage by Docker, assumes the following behavior:
73 // - Layer tarballs are buffered to disk during "docker load".
74 // - Individual layer tarballs are extracted from buffered
75 // copy to the filesystem
76 dockerImageSize := estimateDockerImageSize(ctr.ContainerImage)
78 // The buffer is only needed during image load, so make sure
79 // the baseline scratch space at least covers dockerImageSize,
80 // and assume it will be released to the job afterwards.
81 if needScratch < dockerImageSize {
82 needScratch = dockerImageSize
85 // Now reserve space for the extracted image on disk.
86 needScratch += dockerImageSize
91 // ChooseInstanceType returns the cheapest available
92 // arvados.InstanceType big enough to run ctr.
93 func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) (best arvados.InstanceType, err error) {
94 if len(cc.InstanceTypes) == 0 {
95 err = ErrInstanceTypesNotConfigured
99 needScratch := EstimateScratchSpace(ctr)
101 needVCPUs := ctr.RuntimeConstraints.VCPUs
103 needRAM := ctr.RuntimeConstraints.RAM + ctr.RuntimeConstraints.KeepCacheRAM
104 needRAM = (needRAM * 100) / int64(100-discountConfiguredRAMPercent)
107 for _, it := range cc.InstanceTypes {
109 case ok && it.Price > best.Price:
110 case int64(it.Scratch) < needScratch:
111 case int64(it.RAM) < needRAM:
112 case it.VCPUs < needVCPUs:
113 case it.Preemptible != ctr.SchedulingParameters.Preemptible:
114 case it.Price == best.Price && (it.RAM < best.RAM || it.VCPUs < best.VCPUs):
115 // Equal price, but worse specs
117 // Lower price || (same price && better specs)
123 availableTypes := make([]arvados.InstanceType, 0, len(cc.InstanceTypes))
124 for _, t := range cc.InstanceTypes {
125 availableTypes = append(availableTypes, t)
127 sort.Slice(availableTypes, func(a, b int) bool {
128 return availableTypes[a].Price < availableTypes[b].Price
130 err = ConstraintsNotSatisfiableError{
131 errors.New("constraints not satisfiable by any configured instance type"),
139 // SlurmNodeTypeFeatureKludge ensures SLURM accepts every instance
140 // type name as a valid feature name, even if no instances of that
141 // type have appeared yet.
143 // It takes advantage of some SLURM peculiarities:
145 // (1) A feature is valid after it has been offered by a node, even if
146 // it is no longer offered by any node. So, to make a feature name
147 // valid, we can add it to a dummy node ("compute0"), then remove it.
149 // (2) To test whether a set of feature names are valid without
150 // actually submitting a job, we can call srun --test-only with the
153 // SlurmNodeTypeFeatureKludge does a test-and-fix operation
154 // immediately, and then periodically, in case slurm restarts and
155 // forgets the list of valid features. It never returns (unless there
156 // are no node types configured, in which case it returns
157 // immediately), so it should generally be invoked with "go".
158 func SlurmNodeTypeFeatureKludge(cc *arvados.Cluster) {
159 if len(cc.InstanceTypes) == 0 {
162 var features []string
163 for _, it := range cc.InstanceTypes {
164 features = append(features, "instancetype="+it.Name)
167 slurmKludge(features)
168 time.Sleep(2 * time.Second)
172 const slurmDummyNode = "compute0"
174 func slurmKludge(features []string) {
175 allFeatures := strings.Join(features, ",")
177 cmd := exec.Command("sinfo", "--nodes="+slurmDummyNode, "--format=%f", "--noheader")
178 out, err := cmd.CombinedOutput()
180 log.Printf("running %q %q: %s (output was %q)", cmd.Path, cmd.Args, err, out)
183 if string(out) == allFeatures+"\n" {
184 // Already configured correctly, nothing to do.
188 log.Printf("configuring node %q with all node type features", slurmDummyNode)
189 cmd = exec.Command("scontrol", "update", "NodeName="+slurmDummyNode, "Features="+allFeatures)
190 log.Printf("running: %q %q", cmd.Path, cmd.Args)
191 out, err = cmd.CombinedOutput()
193 log.Printf("error: scontrol: %s (output was %q)", err, out)