17967: Read from volumes with high-priority storage classes first.
authorTom Clegg <tom@curii.com>
Thu, 5 Aug 2021 03:21:45 +0000 (23:21 -0400)
committerTom Clegg <tom@curii.com>
Thu, 5 Aug 2021 03:21:45 +0000 (23:21 -0400)
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom@curii.com>

services/keepstore/handler_test.go
services/keepstore/volume.go

index db64449e48bed2234ae32a97fbc122b360a77576..00ef11b6efa5b18bf20895e0ef3caee3b5295754 100644 (file)
@@ -320,6 +320,54 @@ func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
        }
 }
 
+func (s *HandlerSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
+       s.cluster.Volumes = map[string]arvados.Volume{
+               "zzzzz-nyw5e-111111111111111": {
+                       Driver:         "mock",
+                       Replication:    1,
+                       StorageClasses: map[string]bool{"class1": true}},
+               "zzzzz-nyw5e-222222222222222": {
+                       Driver:         "mock",
+                       Replication:    1,
+                       StorageClasses: map[string]bool{"class2": true, "class3": true}},
+       }
+
+       for _, trial := range []struct {
+               priority1 int // priority of class1, thus vol1
+               priority2 int // priority of class2
+               priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3))
+               get1      int // expected number of "get" ops on vol1
+               get2      int // expected number of "get" ops on vol2
+       }{
+               {100, 50, 50, 1, 0},   // class1 has higher priority => try vol1 first, no need to try vol2
+               {100, 100, 100, 1, 0}, // same priority, vol1 is first lexicographically => try vol1 first and succeed
+               {66, 99, 33, 1, 1},    // class2 has higher priority => try vol2 first, then try vol1
+               {66, 33, 99, 1, 1},    // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
+       } {
+               c.Logf("%+v", trial)
+               s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
+                       "class1": {Priority: trial.priority1},
+                       "class2": {Priority: trial.priority2},
+                       "class3": {Priority: trial.priority3},
+               }
+               c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+               IssueRequest(s.handler,
+                       &RequestTester{
+                               method:         "PUT",
+                               uri:            "/" + TestHash,
+                               requestBody:    TestBlock,
+                               storageClasses: "class1",
+                       })
+               IssueRequest(s.handler,
+                       &RequestTester{
+                               method: "GET",
+                               uri:    "/" + TestHash,
+                       })
+               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get1)
+               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get2)
+       }
+}
+
 // Test TOUCH requests.
 func (s *HandlerSuite) TestTouchHandler(c *check.C) {
        c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
index 26e6b731828f9be0861044cb6a7c4e10d097d05f..9bfc6ca3e5191d2953ceac75f915a07cab19c69f 100644 (file)
@@ -10,6 +10,7 @@ import (
        "fmt"
        "io"
        "math/big"
+       "sort"
        "sync/atomic"
        "time"
 
@@ -343,6 +344,27 @@ func makeRRVolumeManager(logger logrus.FieldLogger, cluster *arvados.Cluster, my
                        vm.writables = append(vm.writables, mnt)
                }
        }
+       // pri(i): return highest priority of any storage class
+       // offered by vm.readables[i]
+       pri := func(i int) int {
+               any, best := false, 0
+               for class := range vm.readables[i].KeepMount.StorageClasses {
+                       if p := cluster.StorageClasses[class].Priority; !any || best < p {
+                               best = p
+                               any = true
+                       }
+               }
+               return best
+       }
+       // sort vm.readables, first by highest priority of any offered
+       // storage class (highest->lowest), then by volume UUID
+       sort.Slice(vm.readables, func(i, j int) bool {
+               if pi, pj := pri(i), pri(j); pi != pj {
+                       return pi > pj
+               } else {
+                       return vm.readables[i].KeepMount.UUID < vm.readables[j].KeepMount.UUID
+               }
+       })
        return vm, nil
 }