1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
23 "git.arvados.org/arvados.git/lib/config"
24 "git.arvados.org/arvados.git/lib/dispatchcloud/test"
25 "git.arvados.org/arvados.git/sdk/go/arvados"
26 "git.arvados.org/arvados.git/sdk/go/arvadostest"
27 "git.arvados.org/arvados.git/sdk/go/ctxlog"
28 "github.com/prometheus/client_golang/prometheus"
29 "golang.org/x/crypto/ssh"
30 check "gopkg.in/check.v1"
33 var _ = check.Suite(&DispatcherSuite{})
35 type DispatcherSuite struct {
37 cancel context.CancelFunc
38 cluster *arvados.Cluster
39 stubDriver *test.StubDriver
41 error503Server *httptest.Server
44 func (s *DispatcherSuite) SetUpTest(c *check.C) {
45 s.ctx, s.cancel = context.WithCancel(context.Background())
46 s.ctx = ctxlog.Context(s.ctx, ctxlog.TestLogger(c))
47 dispatchpub, _ := test.LoadTestKey(c, "test/sshkey_dispatch")
48 dispatchprivraw, err := ioutil.ReadFile("test/sshkey_dispatch")
49 c.Assert(err, check.IsNil)
51 _, hostpriv := test.LoadTestKey(c, "test/sshkey_vm")
52 s.stubDriver = &test.StubDriver{
54 AuthorizedKeys: []ssh.PublicKey{dispatchpub},
56 ErrorRateDestroy: 0.1,
57 MinTimeBetweenCreateCalls: time.Millisecond,
58 QuotaMaxInstances: 10,
61 // We need the postgresql connection info from the integration
63 cfg, err := config.NewLoader(nil, ctxlog.FromContext(s.ctx)).Load()
64 c.Assert(err, check.IsNil)
65 testcluster, err := cfg.GetCluster("")
66 c.Assert(err, check.IsNil)
68 s.cluster = &arvados.Cluster{
69 ManagementToken: "test-management-token",
70 PostgreSQL: testcluster.PostgreSQL,
71 Containers: arvados.ContainersConfig{
72 CrunchRunCommand: "crunch-run",
73 CrunchRunArgumentsList: []string{"--foo", "--extra='args'"},
74 DispatchPrivateKey: string(dispatchprivraw),
75 StaleLockTimeout: arvados.Duration(5 * time.Millisecond),
76 RuntimeEngine: "stub",
77 MaxDispatchAttempts: 10,
78 MaximumPriceFactor: 1.5,
79 CloudVMs: arvados.CloudVMsConfig{
81 SyncInterval: arvados.Duration(10 * time.Millisecond),
82 TimeoutIdle: arvados.Duration(150 * time.Millisecond),
83 TimeoutBooting: arvados.Duration(150 * time.Millisecond),
84 TimeoutProbe: arvados.Duration(15 * time.Millisecond),
85 TimeoutShutdown: arvados.Duration(5 * time.Millisecond),
86 MaxCloudOpsPerSecond: 500,
87 InitialQuotaEstimate: 8,
88 PollInterval: arvados.Duration(5 * time.Millisecond),
89 ProbeInterval: arvados.Duration(5 * time.Millisecond),
90 MaxProbesPerSecond: 1000,
91 TimeoutSignal: arvados.Duration(3 * time.Millisecond),
92 TimeoutStaleRunLock: arvados.Duration(3 * time.Millisecond),
93 TimeoutTERM: arvados.Duration(20 * time.Millisecond),
94 ResourceTags: map[string]string{"testtag": "test value"},
95 TagKeyPrefix: "test:",
98 InstanceTypes: arvados.InstanceTypeMap{
99 test.InstanceType(1).Name: test.InstanceType(1),
100 test.InstanceType(2).Name: test.InstanceType(2),
101 test.InstanceType(3).Name: test.InstanceType(3),
102 test.InstanceType(4).Name: test.InstanceType(4),
103 test.InstanceType(6).Name: test.InstanceType(6),
104 test.InstanceType(8).Name: test.InstanceType(8),
105 test.InstanceType(16).Name: test.InstanceType(16),
108 arvadostest.SetServiceURL(&s.cluster.Services.DispatchCloud, "http://localhost:/")
109 arvadostest.SetServiceURL(&s.cluster.Services.Controller, "https://"+os.Getenv("ARVADOS_API_HOST")+"/")
111 arvClient, err := arvados.NewClientFromConfig(s.cluster)
112 c.Assert(err, check.IsNil)
113 // Disable auto-retry
114 arvClient.Timeout = 0
116 s.error503Server = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
117 c.Logf("503 stub: returning 503")
118 w.WriteHeader(http.StatusServiceUnavailable)
120 arvClient.Client = &http.Client{
121 Transport: &http.Transport{
122 Proxy: s.arvClientProxy(c),
123 TLSClientConfig: &tls.Config{
124 InsecureSkipVerify: true}}}
126 s.disp = &dispatcher{
129 ArvClient: arvClient,
130 AuthToken: arvadostest.AdminToken,
131 Registry: prometheus.NewRegistry(),
132 // Providing a stub queue here prevents
133 // disp.initialize() from making a real one that uses
134 // the integration test servers/database.
135 queue: &test.Queue{},
137 // Test cases can modify s.cluster before calling
138 // initialize(), and then modify private state before calling
142 func (s *DispatcherSuite) TearDownTest(c *check.C) {
145 s.error503Server.Close()
148 // Intercept outgoing API requests for "/503" and respond HTTP
149 // 503. This lets us force (*arvados.Client)Last503() to return
151 func (s *DispatcherSuite) arvClientProxy(c *check.C) func(*http.Request) (*url.URL, error) {
152 return func(req *http.Request) (*url.URL, error) {
153 if req.URL.Path == "/503" {
154 c.Logf("arvClientProxy: proxying to 503 stub")
155 return url.Parse(s.error503Server.URL)
162 // DispatchToStubDriver checks that the dispatcher wires everything
163 // together effectively. It uses a real scheduler and worker pool with
164 // a fake queue and cloud driver. The fake cloud driver injects
165 // artificial errors in order to exercise a variety of code paths.
166 func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
167 Drivers["test"] = s.stubDriver
168 queue := &test.Queue{
169 MaxDispatchAttempts: 5,
170 ChooseType: func(ctr *arvados.Container) ([]arvados.InstanceType, error) {
171 return ChooseInstanceType(s.cluster, ctr)
173 Logger: ctxlog.TestLogger(c),
175 for i := 0; i < 200; i++ {
176 queue.Containers = append(queue.Containers, arvados.Container{
177 UUID: test.ContainerUUID(i + 1),
178 State: arvados.ContainerStateQueued,
179 Priority: int64(i%20 + 1),
180 RuntimeConstraints: arvados.RuntimeConstraints{
181 RAM: int64(i%3+1) << 30,
187 s.disp.setupOnce.Do(s.disp.initialize)
190 done := make(chan struct{})
191 waiting := map[string]struct{}{}
192 for _, ctr := range queue.Containers {
193 waiting[ctr.UUID] = struct{}{}
195 finishContainer := func(ctr arvados.Container) {
198 if _, ok := waiting[ctr.UUID]; !ok {
199 c.Errorf("container completed twice: %s", ctr.UUID)
202 delete(waiting, ctr.UUID)
203 if len(waiting) == 100 {
204 // trigger scheduler maxConcurrency limit
205 c.Logf("test: requesting 503 in order to trigger maxConcurrency limit")
206 s.disp.ArvClient.RequestAndDecode(nil, "GET", "503", nil, nil)
208 if len(waiting) == 0 {
212 executeContainer := func(ctr arvados.Container) int {
214 return int(rand.Uint32() & 0x3)
216 var type4BrokenUntil time.Time
217 var countCapacityErrors int64
219 s.stubDriver.Queue = queue
220 s.stubDriver.SetupVM = func(stubvm *test.StubVM) error {
221 if pt := stubvm.Instance().ProviderType(); pt == test.InstanceType(6).ProviderType {
222 c.Logf("test: returning capacity error for instance type %s", pt)
223 atomic.AddInt64(&countCapacityErrors, 1)
224 return test.CapacityError{InstanceTypeSpecific: true}
226 n := atomic.AddInt32(&vmCount, 1)
227 c.Logf("SetupVM: instance %s n=%d", stubvm.Instance(), n)
228 stubvm.Boot = time.Now().Add(time.Duration(rand.Int63n(int64(5 * time.Millisecond))))
229 stubvm.CrunchRunDetachDelay = time.Duration(rand.Int63n(int64(10 * time.Millisecond)))
230 stubvm.ExecuteContainer = executeContainer
231 stubvm.CrashRunningContainer = finishContainer
232 stubvm.ExtraCrunchRunArgs = "'--runtime-engine=stub' '--foo' '--extra='\\''args'\\'''"
234 case stubvm.Instance().ProviderType() == test.InstanceType(4).ProviderType &&
235 (type4BrokenUntil.IsZero() || time.Now().Before(type4BrokenUntil)):
236 // Initially (at least 2*TimeoutBooting), all
237 // instances of this type are completely
238 // broken. This ensures the
239 // boot_outcomes{outcome="failure"} metric is
241 stubvm.Broken = time.Now()
242 if type4BrokenUntil.IsZero() {
243 type4BrokenUntil = time.Now().Add(2 * s.cluster.Containers.CloudVMs.TimeoutBooting.Duration())
246 // some instances start out OK but then stop
247 // running any commands
248 stubvm.Broken = time.Now().Add(time.Duration(rand.Int63n(90)) * time.Millisecond)
250 // some instances never pass a run-probe
251 stubvm.CrunchRunMissing = true
253 // some instances start out OK but then start
254 // reporting themselves as broken
255 stubvm.ReportBroken = time.Now().Add(time.Duration(rand.Int63n(200)) * time.Millisecond)
257 stubvm.CrunchRunCrashRate = 0.1
258 stubvm.ArvMountDeadlockRate = 0.1
262 s.stubDriver.Bugf = c.Errorf
266 err := s.disp.CheckHealth()
267 c.Check(err, check.IsNil)
269 for len(waiting) > 0 {
270 waswaiting := len(waiting)
273 // loop will end because len(waiting)==0
274 case <-time.After(5 * time.Second):
275 if len(waiting) >= waswaiting {
276 c.Fatalf("timed out; no progress in 5 s while waiting for %d containers: %q", len(waiting), waiting)
280 c.Logf("containers finished (%s), waiting for instances to shutdown and queue to clear", time.Since(start))
282 deadline := time.Now().Add(5 * time.Second)
283 for range time.NewTicker(10 * time.Millisecond).C {
284 insts, err := s.stubDriver.InstanceSets()[0].Instances(nil)
285 c.Check(err, check.IsNil)
287 ents, _ := queue.Entries()
288 if len(ents) == 0 && len(insts) == 0 {
291 if time.Now().After(deadline) {
292 c.Fatalf("timed out with %d containers (%v), %d instances (%+v)", len(ents), ents, len(insts), insts)
296 c.Check(countCapacityErrors, check.Not(check.Equals), int64(0))
298 req := httptest.NewRequest("GET", "/metrics", nil)
299 req.Header.Set("Authorization", "Bearer "+s.cluster.ManagementToken)
300 resp := httptest.NewRecorder()
301 s.disp.ServeHTTP(resp, req)
302 c.Check(resp.Code, check.Equals, http.StatusOK)
303 c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="0",operation="Create"} [^0].*`)
304 c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="0",operation="List"} [^0].*`)
305 c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="0",operation="Destroy"} [^0].*`)
306 c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="1",operation="Create"} [^0].*`)
307 c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="1",operation="List"} 0\n.*`)
308 c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="aborted"} [0-9]+\n.*`)
309 c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="disappeared"} [^0].*`)
310 c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="failure"} [^0].*`)
311 c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="success"} [^0].*`)
312 c.Check(resp.Body.String(), check.Matches, `(?ms).*instances_disappeared{state="shutdown"} [^0].*`)
313 c.Check(resp.Body.String(), check.Matches, `(?ms).*instances_disappeared{state="unknown"} 0\n.*`)
314 c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ssh_seconds{quantile="0.95"} [0-9.]*`)
315 c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ssh_seconds_count [0-9]*`)
316 c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ssh_seconds_sum [0-9.]*`)
317 c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ready_for_container_seconds{quantile="0.95"} [0-9.]*`)
318 c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ready_for_container_seconds_count [0-9]*`)
319 c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ready_for_container_seconds_sum [0-9.]*`)
320 c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_shutdown_request_to_disappearance_seconds_count [0-9]*`)
321 c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_shutdown_request_to_disappearance_seconds_sum [0-9.]*`)
322 c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_queue_to_crunch_run_seconds_count [0-9]*`)
323 c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_queue_to_crunch_run_seconds_sum [0-9e+.]*`)
324 c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_count{outcome="success"} [0-9]*`)
325 c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome="success"} [0-9e+.]*`)
326 c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_count{outcome="fail"} [0-9]*`)
327 c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome="fail"} [0-9e+.]*`)
328 c.Check(resp.Body.String(), check.Matches, `(?ms).*last_503_time [1-9][0-9e+.]*`)
329 c.Check(resp.Body.String(), check.Matches, `(?ms).*max_concurrent_containers [1-9][0-9e+.]*`)
332 func (s *DispatcherSuite) TestManagementAPI_Permissions(c *check.C) {
333 s.cluster.ManagementToken = "abcdefgh"
334 Drivers["test"] = s.stubDriver
335 s.disp.setupOnce.Do(s.disp.initialize)
338 for _, token := range []string{"abc", ""} {
339 req := httptest.NewRequest("GET", "/arvados/v1/dispatch/instances", nil)
341 req.Header.Set("Authorization", "Bearer "+token)
343 resp := httptest.NewRecorder()
344 s.disp.ServeHTTP(resp, req)
346 c.Check(resp.Code, check.Equals, http.StatusUnauthorized)
348 c.Check(resp.Code, check.Equals, http.StatusForbidden)
353 func (s *DispatcherSuite) TestManagementAPI_Disabled(c *check.C) {
354 s.cluster.ManagementToken = ""
355 Drivers["test"] = s.stubDriver
356 s.disp.setupOnce.Do(s.disp.initialize)
359 for _, token := range []string{"abc", ""} {
360 req := httptest.NewRequest("GET", "/arvados/v1/dispatch/instances", nil)
362 req.Header.Set("Authorization", "Bearer "+token)
364 resp := httptest.NewRecorder()
365 s.disp.ServeHTTP(resp, req)
366 c.Check(resp.Code, check.Equals, http.StatusForbidden)
370 func (s *DispatcherSuite) TestManagementAPI_Containers(c *check.C) {
371 s.cluster.ManagementToken = "abcdefgh"
372 s.cluster.Containers.CloudVMs.InitialQuotaEstimate = 4
373 Drivers["test"] = s.stubDriver
374 queue := &test.Queue{
375 MaxDispatchAttempts: 5,
376 ChooseType: func(ctr *arvados.Container) ([]arvados.InstanceType, error) {
377 return ChooseInstanceType(s.cluster, ctr)
379 Logger: ctxlog.TestLogger(c),
381 s.stubDriver.Queue = queue
382 s.stubDriver.QuotaMaxInstances = 4
383 s.stubDriver.SetupVM = func(stubvm *test.StubVM) error {
384 if stubvm.Instance().ProviderType() >= test.InstanceType(4).ProviderType {
385 return test.CapacityError{InstanceTypeSpecific: true}
387 stubvm.ExecuteContainer = func(ctr arvados.Container) int {
388 time.Sleep(5 * time.Second)
394 s.disp.setupOnce.Do(s.disp.initialize)
398 type queueEnt struct {
399 Container arvados.Container
400 InstanceType arvados.InstanceType `json:"instance_type"`
401 SchedulingStatus string `json:"scheduling_status"`
403 type containersResponse struct {
406 getContainers := func() containersResponse {
407 schedQueueRefresh = time.Millisecond
408 req := httptest.NewRequest("GET", "/arvados/v1/dispatch/containers", nil)
409 req.Header.Set("Authorization", "Bearer abcdefgh")
410 resp := httptest.NewRecorder()
411 s.disp.ServeHTTP(resp, req)
412 var cresp containersResponse
413 c.Check(resp.Code, check.Equals, http.StatusOK)
414 err := json.Unmarshal(resp.Body.Bytes(), &cresp)
415 c.Check(err, check.IsNil)
419 c.Check(getContainers().Items, check.HasLen, 0)
421 for i := 0; i < 20; i++ {
422 queue.Containers = append(queue.Containers, arvados.Container{
423 UUID: test.ContainerUUID(i),
424 State: arvados.ContainerStateQueued,
425 Priority: int64(100 - i),
426 RuntimeConstraints: arvados.RuntimeConstraints{
427 RAM: int64(i%3+1) << 30,
435 0 zzzzz-dz642-000000000000000 (Running) ""
436 1 zzzzz-dz642-000000000000001 (Running) ""
437 2 zzzzz-dz642-000000000000002 (Locked) "waiting for suitable instance type to become available: queue position 1"
438 3 zzzzz-dz642-000000000000003 (Locked) "waiting for suitable instance type to become available: queue position 2"
439 4 zzzzz-dz642-000000000000004 (Queued) "waiting while cluster is running at capacity: queue position 3"
440 5 zzzzz-dz642-000000000000005 (Queued) "waiting while cluster is running at capacity: queue position 4"
441 6 zzzzz-dz642-000000000000006 (Queued) "waiting while cluster is running at capacity: queue position 5"
442 7 zzzzz-dz642-000000000000007 (Queued) "waiting while cluster is running at capacity: queue position 6"
443 8 zzzzz-dz642-000000000000008 (Queued) "waiting while cluster is running at capacity: queue position 7"
444 9 zzzzz-dz642-000000000000009 (Queued) "waiting while cluster is running at capacity: queue position 8"
445 10 zzzzz-dz642-000000000000010 (Queued) "waiting while cluster is running at capacity: queue position 9"
446 11 zzzzz-dz642-000000000000011 (Queued) "waiting while cluster is running at capacity: queue position 10"
447 12 zzzzz-dz642-000000000000012 (Queued) "waiting while cluster is running at capacity: queue position 11"
448 13 zzzzz-dz642-000000000000013 (Queued) "waiting while cluster is running at capacity: queue position 12"
449 14 zzzzz-dz642-000000000000014 (Queued) "waiting while cluster is running at capacity: queue position 13"
450 15 zzzzz-dz642-000000000000015 (Queued) "waiting while cluster is running at capacity: queue position 14"
451 16 zzzzz-dz642-000000000000016 (Queued) "waiting while cluster is running at capacity: queue position 15"
452 17 zzzzz-dz642-000000000000017 (Queued) "waiting while cluster is running at capacity: queue position 16"
453 18 zzzzz-dz642-000000000000018 (Queued) "waiting while cluster is running at capacity: queue position 17"
454 19 zzzzz-dz642-000000000000019 (Queued) "waiting while cluster is running at capacity: queue position 18"
456 sequence := make(map[string][]string)
458 for deadline := time.Now().Add(time.Second); time.Now().Before(deadline); time.Sleep(time.Millisecond) {
459 cresp := getContainers()
461 for i, ent := range cresp.Items {
462 summary += fmt.Sprintf("% 2d %s (%s) %q\n", i, ent.Container.UUID, ent.Container.State, ent.SchedulingStatus)
463 s := sequence[ent.Container.UUID]
464 if len(s) == 0 || s[len(s)-1] != ent.SchedulingStatus {
465 sequence[ent.Container.UUID] = append(s, ent.SchedulingStatus)
468 if summary == expect {
472 c.Check(summary, check.Equals, expect)
473 for i := 0; i < 5; i++ {
474 c.Logf("sequence for container %d:\n... %s", i, strings.Join(sequence[test.ContainerUUID(i)], "\n... "))
478 func (s *DispatcherSuite) TestManagementAPI_Instances(c *check.C) {
479 s.cluster.ManagementToken = "abcdefgh"
480 s.cluster.Containers.CloudVMs.TimeoutBooting = arvados.Duration(time.Second)
481 Drivers["test"] = s.stubDriver
482 s.disp.setupOnce.Do(s.disp.initialize)
486 type instance struct {
488 WorkerState string `json:"worker_state"`
490 LastContainerUUID string `json:"last_container_uuid"`
491 ArvadosInstanceType string `json:"arvados_instance_type"`
492 ProviderInstanceType string `json:"provider_instance_type"`
494 type instancesResponse struct {
497 getInstances := func() instancesResponse {
498 req := httptest.NewRequest("GET", "/arvados/v1/dispatch/instances", nil)
499 req.Header.Set("Authorization", "Bearer abcdefgh")
500 resp := httptest.NewRecorder()
501 s.disp.ServeHTTP(resp, req)
502 var sr instancesResponse
503 c.Check(resp.Code, check.Equals, http.StatusOK)
504 err := json.Unmarshal(resp.Body.Bytes(), &sr)
505 c.Check(err, check.IsNil)
510 c.Check(len(sr.Items), check.Equals, 0)
512 s.stubDriver.ErrorRateCreate = 0
513 ch := s.disp.pool.Subscribe()
514 defer s.disp.pool.Unsubscribe(ch)
515 ok := s.disp.pool.Create(test.InstanceType(1))
516 c.Check(ok, check.Equals, true)
519 for deadline := time.Now().Add(time.Second); time.Now().Before(deadline); {
521 if len(sr.Items) > 0 {
524 time.Sleep(time.Millisecond)
526 c.Assert(len(sr.Items), check.Equals, 1)
527 c.Check(sr.Items[0].Instance, check.Matches, "inst.*")
528 c.Check(sr.Items[0].WorkerState, check.Equals, "booting")
529 c.Check(sr.Items[0].Price, check.Equals, 0.123)
530 c.Check(sr.Items[0].LastContainerUUID, check.Equals, "")
531 c.Check(sr.Items[0].ProviderInstanceType, check.Equals, test.InstanceType(1).ProviderType)
532 c.Check(sr.Items[0].ArvadosInstanceType, check.Equals, test.InstanceType(1).Name)