Merge branch '20318-disk-cache'
[arvados.git] / lib / dispatchcloud / dispatcher_test.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package dispatchcloud
6
7 import (
8         "context"
9         "crypto/tls"
10         "encoding/json"
11         "io/ioutil"
12         "math/rand"
13         "net/http"
14         "net/http/httptest"
15         "net/url"
16         "os"
17         "sync"
18         "sync/atomic"
19         "time"
20
21         "git.arvados.org/arvados.git/lib/config"
22         "git.arvados.org/arvados.git/lib/dispatchcloud/test"
23         "git.arvados.org/arvados.git/sdk/go/arvados"
24         "git.arvados.org/arvados.git/sdk/go/arvadostest"
25         "git.arvados.org/arvados.git/sdk/go/ctxlog"
26         "github.com/prometheus/client_golang/prometheus"
27         "golang.org/x/crypto/ssh"
28         check "gopkg.in/check.v1"
29 )
30
31 var _ = check.Suite(&DispatcherSuite{})
32
33 type DispatcherSuite struct {
34         ctx            context.Context
35         cancel         context.CancelFunc
36         cluster        *arvados.Cluster
37         stubDriver     *test.StubDriver
38         disp           *dispatcher
39         error503Server *httptest.Server
40 }
41
42 func (s *DispatcherSuite) SetUpTest(c *check.C) {
43         s.ctx, s.cancel = context.WithCancel(context.Background())
44         s.ctx = ctxlog.Context(s.ctx, ctxlog.TestLogger(c))
45         dispatchpub, _ := test.LoadTestKey(c, "test/sshkey_dispatch")
46         dispatchprivraw, err := ioutil.ReadFile("test/sshkey_dispatch")
47         c.Assert(err, check.IsNil)
48
49         _, hostpriv := test.LoadTestKey(c, "test/sshkey_vm")
50         s.stubDriver = &test.StubDriver{
51                 HostKey:                   hostpriv,
52                 AuthorizedKeys:            []ssh.PublicKey{dispatchpub},
53                 ErrorRateCreate:           0.1,
54                 ErrorRateDestroy:          0.1,
55                 MinTimeBetweenCreateCalls: time.Millisecond,
56                 QuotaMaxInstances:         10,
57         }
58
59         // We need the postgresql connection info from the integration
60         // test config.
61         cfg, err := config.NewLoader(nil, ctxlog.FromContext(s.ctx)).Load()
62         c.Assert(err, check.IsNil)
63         testcluster, err := cfg.GetCluster("")
64         c.Assert(err, check.IsNil)
65
66         s.cluster = &arvados.Cluster{
67                 ManagementToken: "test-management-token",
68                 PostgreSQL:      testcluster.PostgreSQL,
69                 Containers: arvados.ContainersConfig{
70                         CrunchRunCommand:       "crunch-run",
71                         CrunchRunArgumentsList: []string{"--foo", "--extra='args'"},
72                         DispatchPrivateKey:     string(dispatchprivraw),
73                         StaleLockTimeout:       arvados.Duration(5 * time.Millisecond),
74                         RuntimeEngine:          "stub",
75                         MaxDispatchAttempts:    10,
76                         MaximumPriceFactor:     1.5,
77                         CloudVMs: arvados.CloudVMsConfig{
78                                 Driver:               "test",
79                                 SyncInterval:         arvados.Duration(10 * time.Millisecond),
80                                 TimeoutIdle:          arvados.Duration(150 * time.Millisecond),
81                                 TimeoutBooting:       arvados.Duration(150 * time.Millisecond),
82                                 TimeoutProbe:         arvados.Duration(15 * time.Millisecond),
83                                 TimeoutShutdown:      arvados.Duration(5 * time.Millisecond),
84                                 MaxCloudOpsPerSecond: 500,
85                                 InitialQuotaEstimate: 8,
86                                 PollInterval:         arvados.Duration(5 * time.Millisecond),
87                                 ProbeInterval:        arvados.Duration(5 * time.Millisecond),
88                                 MaxProbesPerSecond:   1000,
89                                 TimeoutSignal:        arvados.Duration(3 * time.Millisecond),
90                                 TimeoutStaleRunLock:  arvados.Duration(3 * time.Millisecond),
91                                 TimeoutTERM:          arvados.Duration(20 * time.Millisecond),
92                                 ResourceTags:         map[string]string{"testtag": "test value"},
93                                 TagKeyPrefix:         "test:",
94                         },
95                 },
96                 InstanceTypes: arvados.InstanceTypeMap{
97                         test.InstanceType(1).Name:  test.InstanceType(1),
98                         test.InstanceType(2).Name:  test.InstanceType(2),
99                         test.InstanceType(3).Name:  test.InstanceType(3),
100                         test.InstanceType(4).Name:  test.InstanceType(4),
101                         test.InstanceType(6).Name:  test.InstanceType(6),
102                         test.InstanceType(8).Name:  test.InstanceType(8),
103                         test.InstanceType(16).Name: test.InstanceType(16),
104                 },
105         }
106         arvadostest.SetServiceURL(&s.cluster.Services.DispatchCloud, "http://localhost:/")
107         arvadostest.SetServiceURL(&s.cluster.Services.Controller, "https://"+os.Getenv("ARVADOS_API_HOST")+"/")
108
109         arvClient, err := arvados.NewClientFromConfig(s.cluster)
110         c.Assert(err, check.IsNil)
111         // Disable auto-retry
112         arvClient.Timeout = 0
113
114         s.error503Server = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
115                 c.Logf("503 stub: returning 503")
116                 w.WriteHeader(http.StatusServiceUnavailable)
117         }))
118         arvClient.Client = &http.Client{
119                 Transport: &http.Transport{
120                         Proxy: s.arvClientProxy(c),
121                         TLSClientConfig: &tls.Config{
122                                 InsecureSkipVerify: true}}}
123
124         s.disp = &dispatcher{
125                 Cluster:   s.cluster,
126                 Context:   s.ctx,
127                 ArvClient: arvClient,
128                 AuthToken: arvadostest.AdminToken,
129                 Registry:  prometheus.NewRegistry(),
130         }
131         // Test cases can modify s.cluster before calling
132         // initialize(), and then modify private state before calling
133         // go run().
134 }
135
136 func (s *DispatcherSuite) TearDownTest(c *check.C) {
137         s.cancel()
138         s.disp.Close()
139         s.error503Server.Close()
140 }
141
142 // Intercept outgoing API requests for "/503" and respond HTTP
143 // 503. This lets us force (*arvados.Client)Last503() to return
144 // something.
145 func (s *DispatcherSuite) arvClientProxy(c *check.C) func(*http.Request) (*url.URL, error) {
146         return func(req *http.Request) (*url.URL, error) {
147                 if req.URL.Path == "/503" {
148                         c.Logf("arvClientProxy: proxying to 503 stub")
149                         return url.Parse(s.error503Server.URL)
150                 } else {
151                         return nil, nil
152                 }
153         }
154 }
155
156 // DispatchToStubDriver checks that the dispatcher wires everything
157 // together effectively. It uses a real scheduler and worker pool with
158 // a fake queue and cloud driver. The fake cloud driver injects
159 // artificial errors in order to exercise a variety of code paths.
160 func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
161         Drivers["test"] = s.stubDriver
162         s.disp.setupOnce.Do(s.disp.initialize)
163         queue := &test.Queue{
164                 MaxDispatchAttempts: 5,
165                 ChooseType: func(ctr *arvados.Container) ([]arvados.InstanceType, error) {
166                         return ChooseInstanceType(s.cluster, ctr)
167                 },
168                 Logger: ctxlog.TestLogger(c),
169         }
170         for i := 0; i < 200; i++ {
171                 queue.Containers = append(queue.Containers, arvados.Container{
172                         UUID:     test.ContainerUUID(i + 1),
173                         State:    arvados.ContainerStateQueued,
174                         Priority: int64(i%20 + 1),
175                         RuntimeConstraints: arvados.RuntimeConstraints{
176                                 RAM:   int64(i%3+1) << 30,
177                                 VCPUs: i%8 + 1,
178                         },
179                 })
180         }
181         s.disp.queue = queue
182
183         var mtx sync.Mutex
184         done := make(chan struct{})
185         waiting := map[string]struct{}{}
186         for _, ctr := range queue.Containers {
187                 waiting[ctr.UUID] = struct{}{}
188         }
189         finishContainer := func(ctr arvados.Container) {
190                 mtx.Lock()
191                 defer mtx.Unlock()
192                 if _, ok := waiting[ctr.UUID]; !ok {
193                         c.Errorf("container completed twice: %s", ctr.UUID)
194                         return
195                 }
196                 delete(waiting, ctr.UUID)
197                 if len(waiting) == 100 {
198                         // trigger scheduler maxConcurrency limit
199                         c.Logf("test: requesting 503 in order to trigger maxConcurrency limit")
200                         s.disp.ArvClient.RequestAndDecode(nil, "GET", "503", nil, nil)
201                 }
202                 if len(waiting) == 0 {
203                         close(done)
204                 }
205         }
206         executeContainer := func(ctr arvados.Container) int {
207                 finishContainer(ctr)
208                 return int(rand.Uint32() & 0x3)
209         }
210         var type4BrokenUntil time.Time
211         var countCapacityErrors int64
212         vmCount := int32(0)
213         s.stubDriver.Queue = queue
214         s.stubDriver.SetupVM = func(stubvm *test.StubVM) error {
215                 if pt := stubvm.Instance().ProviderType(); pt == test.InstanceType(6).ProviderType {
216                         c.Logf("test: returning capacity error for instance type %s", pt)
217                         atomic.AddInt64(&countCapacityErrors, 1)
218                         return test.CapacityError{InstanceTypeSpecific: true}
219                 }
220                 n := atomic.AddInt32(&vmCount, 1)
221                 c.Logf("SetupVM: instance %s n=%d", stubvm.Instance(), n)
222                 stubvm.Boot = time.Now().Add(time.Duration(rand.Int63n(int64(5 * time.Millisecond))))
223                 stubvm.CrunchRunDetachDelay = time.Duration(rand.Int63n(int64(10 * time.Millisecond)))
224                 stubvm.ExecuteContainer = executeContainer
225                 stubvm.CrashRunningContainer = finishContainer
226                 stubvm.ExtraCrunchRunArgs = "'--runtime-engine=stub' '--foo' '--extra='\\''args'\\'''"
227                 switch {
228                 case stubvm.Instance().ProviderType() == test.InstanceType(4).ProviderType &&
229                         (type4BrokenUntil.IsZero() || time.Now().Before(type4BrokenUntil)):
230                         // Initially (at least 2*TimeoutBooting), all
231                         // instances of this type are completely
232                         // broken. This ensures the
233                         // boot_outcomes{outcome="failure"} metric is
234                         // not zero.
235                         stubvm.Broken = time.Now()
236                         if type4BrokenUntil.IsZero() {
237                                 type4BrokenUntil = time.Now().Add(2 * s.cluster.Containers.CloudVMs.TimeoutBooting.Duration())
238                         }
239                 case n%7 == 0:
240                         // some instances start out OK but then stop
241                         // running any commands
242                         stubvm.Broken = time.Now().Add(time.Duration(rand.Int63n(90)) * time.Millisecond)
243                 case n%7 == 1:
244                         // some instances never pass a run-probe
245                         stubvm.CrunchRunMissing = true
246                 case n%7 == 2:
247                         // some instances start out OK but then start
248                         // reporting themselves as broken
249                         stubvm.ReportBroken = time.Now().Add(time.Duration(rand.Int63n(200)) * time.Millisecond)
250                 default:
251                         stubvm.CrunchRunCrashRate = 0.1
252                         stubvm.ArvMountDeadlockRate = 0.1
253                 }
254                 return nil
255         }
256         s.stubDriver.Bugf = c.Errorf
257
258         start := time.Now()
259         go s.disp.run()
260         err := s.disp.CheckHealth()
261         c.Check(err, check.IsNil)
262
263         for len(waiting) > 0 {
264                 waswaiting := len(waiting)
265                 select {
266                 case <-done:
267                         // loop will end because len(waiting)==0
268                 case <-time.After(5 * time.Second):
269                         if len(waiting) >= waswaiting {
270                                 c.Fatalf("timed out; no progress in 5 s while waiting for %d containers: %q", len(waiting), waiting)
271                         }
272                 }
273         }
274         c.Logf("containers finished (%s), waiting for instances to shutdown and queue to clear", time.Since(start))
275
276         deadline := time.Now().Add(5 * time.Second)
277         for range time.NewTicker(10 * time.Millisecond).C {
278                 insts, err := s.stubDriver.InstanceSets()[0].Instances(nil)
279                 c.Check(err, check.IsNil)
280                 queue.Update()
281                 ents, _ := queue.Entries()
282                 if len(ents) == 0 && len(insts) == 0 {
283                         break
284                 }
285                 if time.Now().After(deadline) {
286                         c.Fatalf("timed out with %d containers (%v), %d instances (%+v)", len(ents), ents, len(insts), insts)
287                 }
288         }
289
290         c.Check(countCapacityErrors, check.Not(check.Equals), int64(0))
291
292         req := httptest.NewRequest("GET", "/metrics", nil)
293         req.Header.Set("Authorization", "Bearer "+s.cluster.ManagementToken)
294         resp := httptest.NewRecorder()
295         s.disp.ServeHTTP(resp, req)
296         c.Check(resp.Code, check.Equals, http.StatusOK)
297         c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="0",operation="Create"} [^0].*`)
298         c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="0",operation="List"} [^0].*`)
299         c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="0",operation="Destroy"} [^0].*`)
300         c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="1",operation="Create"} [^0].*`)
301         c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="1",operation="List"} 0\n.*`)
302         c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="aborted"} [0-9]+\n.*`)
303         c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="disappeared"} [^0].*`)
304         c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="failure"} [^0].*`)
305         c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="success"} [^0].*`)
306         c.Check(resp.Body.String(), check.Matches, `(?ms).*instances_disappeared{state="shutdown"} [^0].*`)
307         c.Check(resp.Body.String(), check.Matches, `(?ms).*instances_disappeared{state="unknown"} 0\n.*`)
308         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ssh_seconds{quantile="0.95"} [0-9.]*`)
309         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ssh_seconds_count [0-9]*`)
310         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ssh_seconds_sum [0-9.]*`)
311         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ready_for_container_seconds{quantile="0.95"} [0-9.]*`)
312         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ready_for_container_seconds_count [0-9]*`)
313         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ready_for_container_seconds_sum [0-9.]*`)
314         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_shutdown_request_to_disappearance_seconds_count [0-9]*`)
315         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_shutdown_request_to_disappearance_seconds_sum [0-9.]*`)
316         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_queue_to_crunch_run_seconds_count [0-9]*`)
317         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_queue_to_crunch_run_seconds_sum [0-9e+.]*`)
318         c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_count{outcome="success"} [0-9]*`)
319         c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome="success"} [0-9e+.]*`)
320         c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_count{outcome="fail"} [0-9]*`)
321         c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome="fail"} [0-9e+.]*`)
322         c.Check(resp.Body.String(), check.Matches, `(?ms).*last_503_time [1-9][0-9e+.]*`)
323         c.Check(resp.Body.String(), check.Matches, `(?ms).*max_concurrent_containers [1-9][0-9e+.]*`)
324 }
325
326 func (s *DispatcherSuite) TestAPIPermissions(c *check.C) {
327         s.cluster.ManagementToken = "abcdefgh"
328         Drivers["test"] = s.stubDriver
329         s.disp.setupOnce.Do(s.disp.initialize)
330         s.disp.queue = &test.Queue{}
331         go s.disp.run()
332
333         for _, token := range []string{"abc", ""} {
334                 req := httptest.NewRequest("GET", "/arvados/v1/dispatch/instances", nil)
335                 if token != "" {
336                         req.Header.Set("Authorization", "Bearer "+token)
337                 }
338                 resp := httptest.NewRecorder()
339                 s.disp.ServeHTTP(resp, req)
340                 if token == "" {
341                         c.Check(resp.Code, check.Equals, http.StatusUnauthorized)
342                 } else {
343                         c.Check(resp.Code, check.Equals, http.StatusForbidden)
344                 }
345         }
346 }
347
348 func (s *DispatcherSuite) TestAPIDisabled(c *check.C) {
349         s.cluster.ManagementToken = ""
350         Drivers["test"] = s.stubDriver
351         s.disp.setupOnce.Do(s.disp.initialize)
352         s.disp.queue = &test.Queue{}
353         go s.disp.run()
354
355         for _, token := range []string{"abc", ""} {
356                 req := httptest.NewRequest("GET", "/arvados/v1/dispatch/instances", nil)
357                 if token != "" {
358                         req.Header.Set("Authorization", "Bearer "+token)
359                 }
360                 resp := httptest.NewRecorder()
361                 s.disp.ServeHTTP(resp, req)
362                 c.Check(resp.Code, check.Equals, http.StatusForbidden)
363         }
364 }
365
366 func (s *DispatcherSuite) TestInstancesAPI(c *check.C) {
367         s.cluster.ManagementToken = "abcdefgh"
368         s.cluster.Containers.CloudVMs.TimeoutBooting = arvados.Duration(time.Second)
369         Drivers["test"] = s.stubDriver
370         s.disp.setupOnce.Do(s.disp.initialize)
371         s.disp.queue = &test.Queue{}
372         go s.disp.run()
373
374         type instance struct {
375                 Instance             string
376                 WorkerState          string `json:"worker_state"`
377                 Price                float64
378                 LastContainerUUID    string `json:"last_container_uuid"`
379                 ArvadosInstanceType  string `json:"arvados_instance_type"`
380                 ProviderInstanceType string `json:"provider_instance_type"`
381         }
382         type instancesResponse struct {
383                 Items []instance
384         }
385         getInstances := func() instancesResponse {
386                 req := httptest.NewRequest("GET", "/arvados/v1/dispatch/instances", nil)
387                 req.Header.Set("Authorization", "Bearer abcdefgh")
388                 resp := httptest.NewRecorder()
389                 s.disp.ServeHTTP(resp, req)
390                 var sr instancesResponse
391                 c.Check(resp.Code, check.Equals, http.StatusOK)
392                 err := json.Unmarshal(resp.Body.Bytes(), &sr)
393                 c.Check(err, check.IsNil)
394                 return sr
395         }
396
397         sr := getInstances()
398         c.Check(len(sr.Items), check.Equals, 0)
399
400         s.stubDriver.ErrorRateCreate = 0
401         ch := s.disp.pool.Subscribe()
402         defer s.disp.pool.Unsubscribe(ch)
403         ok := s.disp.pool.Create(test.InstanceType(1))
404         c.Check(ok, check.Equals, true)
405         <-ch
406
407         for deadline := time.Now().Add(time.Second); time.Now().Before(deadline); {
408                 sr = getInstances()
409                 if len(sr.Items) > 0 {
410                         break
411                 }
412                 time.Sleep(time.Millisecond)
413         }
414         c.Assert(len(sr.Items), check.Equals, 1)
415         c.Check(sr.Items[0].Instance, check.Matches, "inst.*")
416         c.Check(sr.Items[0].WorkerState, check.Equals, "booting")
417         c.Check(sr.Items[0].Price, check.Equals, 0.123)
418         c.Check(sr.Items[0].LastContainerUUID, check.Equals, "")
419         c.Check(sr.Items[0].ProviderInstanceType, check.Equals, test.InstanceType(1).ProviderType)
420         c.Check(sr.Items[0].ArvadosInstanceType, check.Equals, test.InstanceType(1).Name)
421 }