e7465d65b1673b35b29566c8ab176347f5931d32
[arvados.git] / lib / dispatchcloud / dispatcher_test.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package dispatchcloud
6
7 import (
8         "context"
9         "crypto/tls"
10         "encoding/json"
11         "fmt"
12         "io/ioutil"
13         "math/rand"
14         "net/http"
15         "net/http/httptest"
16         "net/url"
17         "os"
18         "strings"
19         "sync"
20         "sync/atomic"
21         "time"
22
23         "git.arvados.org/arvados.git/lib/config"
24         "git.arvados.org/arvados.git/lib/dispatchcloud/test"
25         "git.arvados.org/arvados.git/sdk/go/arvados"
26         "git.arvados.org/arvados.git/sdk/go/arvadostest"
27         "git.arvados.org/arvados.git/sdk/go/ctxlog"
28         "github.com/prometheus/client_golang/prometheus"
29         "golang.org/x/crypto/ssh"
30         check "gopkg.in/check.v1"
31 )
32
33 var _ = check.Suite(&DispatcherSuite{})
34
35 type DispatcherSuite struct {
36         ctx            context.Context
37         cancel         context.CancelFunc
38         cluster        *arvados.Cluster
39         stubDriver     *test.StubDriver
40         disp           *dispatcher
41         error503Server *httptest.Server
42 }
43
44 func (s *DispatcherSuite) SetUpTest(c *check.C) {
45         s.ctx, s.cancel = context.WithCancel(context.Background())
46         s.ctx = ctxlog.Context(s.ctx, ctxlog.TestLogger(c))
47         dispatchpub, _ := test.LoadTestKey(c, "test/sshkey_dispatch")
48         dispatchprivraw, err := ioutil.ReadFile("test/sshkey_dispatch")
49         c.Assert(err, check.IsNil)
50
51         _, hostpriv := test.LoadTestKey(c, "test/sshkey_vm")
52         s.stubDriver = &test.StubDriver{
53                 HostKey:                   hostpriv,
54                 AuthorizedKeys:            []ssh.PublicKey{dispatchpub},
55                 ErrorRateCreate:           0.1,
56                 ErrorRateDestroy:          0.1,
57                 MinTimeBetweenCreateCalls: time.Millisecond,
58                 QuotaMaxInstances:         10,
59         }
60
61         // We need the postgresql connection info from the integration
62         // test config.
63         cfg, err := config.NewLoader(nil, ctxlog.FromContext(s.ctx)).Load()
64         c.Assert(err, check.IsNil)
65         testcluster, err := cfg.GetCluster("")
66         c.Assert(err, check.IsNil)
67
68         s.cluster = &arvados.Cluster{
69                 ManagementToken: "test-management-token",
70                 PostgreSQL:      testcluster.PostgreSQL,
71                 Containers: arvados.ContainersConfig{
72                         CrunchRunCommand:       "crunch-run",
73                         CrunchRunArgumentsList: []string{"--foo", "--extra='args'"},
74                         DispatchPrivateKey:     string(dispatchprivraw),
75                         StaleLockTimeout:       arvados.Duration(5 * time.Millisecond),
76                         RuntimeEngine:          "stub",
77                         MaxDispatchAttempts:    10,
78                         MaximumPriceFactor:     1.5,
79                         CloudVMs: arvados.CloudVMsConfig{
80                                 Driver:               "test",
81                                 SyncInterval:         arvados.Duration(10 * time.Millisecond),
82                                 TimeoutIdle:          arvados.Duration(150 * time.Millisecond),
83                                 TimeoutBooting:       arvados.Duration(150 * time.Millisecond),
84                                 TimeoutProbe:         arvados.Duration(15 * time.Millisecond),
85                                 TimeoutShutdown:      arvados.Duration(5 * time.Millisecond),
86                                 MaxCloudOpsPerSecond: 500,
87                                 InitialQuotaEstimate: 8,
88                                 PollInterval:         arvados.Duration(5 * time.Millisecond),
89                                 ProbeInterval:        arvados.Duration(5 * time.Millisecond),
90                                 MaxProbesPerSecond:   1000,
91                                 TimeoutSignal:        arvados.Duration(3 * time.Millisecond),
92                                 TimeoutStaleRunLock:  arvados.Duration(3 * time.Millisecond),
93                                 TimeoutTERM:          arvados.Duration(20 * time.Millisecond),
94                                 ResourceTags:         map[string]string{"testtag": "test value"},
95                                 TagKeyPrefix:         "test:",
96                         },
97                 },
98                 InstanceTypes: arvados.InstanceTypeMap{
99                         test.InstanceType(1).Name:  test.InstanceType(1),
100                         test.InstanceType(2).Name:  test.InstanceType(2),
101                         test.InstanceType(3).Name:  test.InstanceType(3),
102                         test.InstanceType(4).Name:  test.InstanceType(4),
103                         test.InstanceType(6).Name:  test.InstanceType(6),
104                         test.InstanceType(8).Name:  test.InstanceType(8),
105                         test.InstanceType(16).Name: test.InstanceType(16),
106                 },
107         }
108         arvadostest.SetServiceURL(&s.cluster.Services.DispatchCloud, "http://localhost:/")
109         arvadostest.SetServiceURL(&s.cluster.Services.Controller, "https://"+os.Getenv("ARVADOS_API_HOST")+"/")
110
111         arvClient, err := arvados.NewClientFromConfig(s.cluster)
112         c.Assert(err, check.IsNil)
113         // Disable auto-retry
114         arvClient.Timeout = 0
115
116         s.error503Server = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
117                 c.Logf("503 stub: returning 503")
118                 w.WriteHeader(http.StatusServiceUnavailable)
119         }))
120         arvClient.Client = &http.Client{
121                 Transport: &http.Transport{
122                         Proxy: s.arvClientProxy(c),
123                         TLSClientConfig: &tls.Config{
124                                 InsecureSkipVerify: true}}}
125
126         s.disp = &dispatcher{
127                 Cluster:   s.cluster,
128                 Context:   s.ctx,
129                 ArvClient: arvClient,
130                 AuthToken: arvadostest.AdminToken,
131                 Registry:  prometheus.NewRegistry(),
132         }
133         // Test cases can modify s.cluster before calling
134         // initialize(), and then modify private state before calling
135         // go run().
136 }
137
138 func (s *DispatcherSuite) TearDownTest(c *check.C) {
139         s.cancel()
140         s.disp.Close()
141         s.error503Server.Close()
142 }
143
144 // Intercept outgoing API requests for "/503" and respond HTTP
145 // 503. This lets us force (*arvados.Client)Last503() to return
146 // something.
147 func (s *DispatcherSuite) arvClientProxy(c *check.C) func(*http.Request) (*url.URL, error) {
148         return func(req *http.Request) (*url.URL, error) {
149                 if req.URL.Path == "/503" {
150                         c.Logf("arvClientProxy: proxying to 503 stub")
151                         return url.Parse(s.error503Server.URL)
152                 } else {
153                         return nil, nil
154                 }
155         }
156 }
157
158 // DispatchToStubDriver checks that the dispatcher wires everything
159 // together effectively. It uses a real scheduler and worker pool with
160 // a fake queue and cloud driver. The fake cloud driver injects
161 // artificial errors in order to exercise a variety of code paths.
162 func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
163         Drivers["test"] = s.stubDriver
164         queue := &test.Queue{
165                 MaxDispatchAttempts: 5,
166                 ChooseType: func(ctr *arvados.Container) ([]arvados.InstanceType, error) {
167                         return ChooseInstanceType(s.cluster, ctr)
168                 },
169                 Logger: ctxlog.TestLogger(c),
170         }
171         for i := 0; i < 200; i++ {
172                 queue.Containers = append(queue.Containers, arvados.Container{
173                         UUID:     test.ContainerUUID(i + 1),
174                         State:    arvados.ContainerStateQueued,
175                         Priority: int64(i%20 + 1),
176                         RuntimeConstraints: arvados.RuntimeConstraints{
177                                 RAM:   int64(i%3+1) << 30,
178                                 VCPUs: i%8 + 1,
179                         },
180                 })
181         }
182         s.disp.queue = queue
183         s.disp.setupOnce.Do(s.disp.initialize)
184
185         var mtx sync.Mutex
186         done := make(chan struct{})
187         waiting := map[string]struct{}{}
188         for _, ctr := range queue.Containers {
189                 waiting[ctr.UUID] = struct{}{}
190         }
191         finishContainer := func(ctr arvados.Container) {
192                 mtx.Lock()
193                 defer mtx.Unlock()
194                 if _, ok := waiting[ctr.UUID]; !ok {
195                         c.Errorf("container completed twice: %s", ctr.UUID)
196                         return
197                 }
198                 delete(waiting, ctr.UUID)
199                 if len(waiting) == 100 {
200                         // trigger scheduler maxConcurrency limit
201                         c.Logf("test: requesting 503 in order to trigger maxConcurrency limit")
202                         s.disp.ArvClient.RequestAndDecode(nil, "GET", "503", nil, nil)
203                 }
204                 if len(waiting) == 0 {
205                         close(done)
206                 }
207         }
208         executeContainer := func(ctr arvados.Container) int {
209                 finishContainer(ctr)
210                 return int(rand.Uint32() & 0x3)
211         }
212         var type4BrokenUntil time.Time
213         var countCapacityErrors int64
214         vmCount := int32(0)
215         s.stubDriver.Queue = queue
216         s.stubDriver.SetupVM = func(stubvm *test.StubVM) error {
217                 if pt := stubvm.Instance().ProviderType(); pt == test.InstanceType(6).ProviderType {
218                         c.Logf("test: returning capacity error for instance type %s", pt)
219                         atomic.AddInt64(&countCapacityErrors, 1)
220                         return test.CapacityError{InstanceTypeSpecific: true}
221                 }
222                 n := atomic.AddInt32(&vmCount, 1)
223                 c.Logf("SetupVM: instance %s n=%d", stubvm.Instance(), n)
224                 stubvm.Boot = time.Now().Add(time.Duration(rand.Int63n(int64(5 * time.Millisecond))))
225                 stubvm.CrunchRunDetachDelay = time.Duration(rand.Int63n(int64(10 * time.Millisecond)))
226                 stubvm.ExecuteContainer = executeContainer
227                 stubvm.CrashRunningContainer = finishContainer
228                 stubvm.ExtraCrunchRunArgs = "'--runtime-engine=stub' '--foo' '--extra='\\''args'\\'''"
229                 switch {
230                 case stubvm.Instance().ProviderType() == test.InstanceType(4).ProviderType &&
231                         (type4BrokenUntil.IsZero() || time.Now().Before(type4BrokenUntil)):
232                         // Initially (at least 2*TimeoutBooting), all
233                         // instances of this type are completely
234                         // broken. This ensures the
235                         // boot_outcomes{outcome="failure"} metric is
236                         // not zero.
237                         stubvm.Broken = time.Now()
238                         if type4BrokenUntil.IsZero() {
239                                 type4BrokenUntil = time.Now().Add(2 * s.cluster.Containers.CloudVMs.TimeoutBooting.Duration())
240                         }
241                 case n%7 == 0:
242                         // some instances start out OK but then stop
243                         // running any commands
244                         stubvm.Broken = time.Now().Add(time.Duration(rand.Int63n(90)) * time.Millisecond)
245                 case n%7 == 1:
246                         // some instances never pass a run-probe
247                         stubvm.CrunchRunMissing = true
248                 case n%7 == 2:
249                         // some instances start out OK but then start
250                         // reporting themselves as broken
251                         stubvm.ReportBroken = time.Now().Add(time.Duration(rand.Int63n(200)) * time.Millisecond)
252                 default:
253                         stubvm.CrunchRunCrashRate = 0.1
254                         stubvm.ArvMountDeadlockRate = 0.1
255                 }
256                 return nil
257         }
258         s.stubDriver.Bugf = c.Errorf
259
260         start := time.Now()
261         go s.disp.run()
262         err := s.disp.CheckHealth()
263         c.Check(err, check.IsNil)
264
265         for len(waiting) > 0 {
266                 waswaiting := len(waiting)
267                 select {
268                 case <-done:
269                         // loop will end because len(waiting)==0
270                 case <-time.After(5 * time.Second):
271                         if len(waiting) >= waswaiting {
272                                 c.Fatalf("timed out; no progress in 5 s while waiting for %d containers: %q", len(waiting), waiting)
273                         }
274                 }
275         }
276         c.Logf("containers finished (%s), waiting for instances to shutdown and queue to clear", time.Since(start))
277
278         deadline := time.Now().Add(5 * time.Second)
279         for range time.NewTicker(10 * time.Millisecond).C {
280                 insts, err := s.stubDriver.InstanceSets()[0].Instances(nil)
281                 c.Check(err, check.IsNil)
282                 queue.Update()
283                 ents, _ := queue.Entries()
284                 if len(ents) == 0 && len(insts) == 0 {
285                         break
286                 }
287                 if time.Now().After(deadline) {
288                         c.Fatalf("timed out with %d containers (%v), %d instances (%+v)", len(ents), ents, len(insts), insts)
289                 }
290         }
291
292         c.Check(countCapacityErrors, check.Not(check.Equals), int64(0))
293
294         req := httptest.NewRequest("GET", "/metrics", nil)
295         req.Header.Set("Authorization", "Bearer "+s.cluster.ManagementToken)
296         resp := httptest.NewRecorder()
297         s.disp.ServeHTTP(resp, req)
298         c.Check(resp.Code, check.Equals, http.StatusOK)
299         c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="0",operation="Create"} [^0].*`)
300         c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="0",operation="List"} [^0].*`)
301         c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="0",operation="Destroy"} [^0].*`)
302         c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="1",operation="Create"} [^0].*`)
303         c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="1",operation="List"} 0\n.*`)
304         c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="aborted"} [0-9]+\n.*`)
305         c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="disappeared"} [^0].*`)
306         c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="failure"} [^0].*`)
307         c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="success"} [^0].*`)
308         c.Check(resp.Body.String(), check.Matches, `(?ms).*instances_disappeared{state="shutdown"} [^0].*`)
309         c.Check(resp.Body.String(), check.Matches, `(?ms).*instances_disappeared{state="unknown"} 0\n.*`)
310         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ssh_seconds{quantile="0.95"} [0-9.]*`)
311         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ssh_seconds_count [0-9]*`)
312         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ssh_seconds_sum [0-9.]*`)
313         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ready_for_container_seconds{quantile="0.95"} [0-9.]*`)
314         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ready_for_container_seconds_count [0-9]*`)
315         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ready_for_container_seconds_sum [0-9.]*`)
316         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_shutdown_request_to_disappearance_seconds_count [0-9]*`)
317         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_shutdown_request_to_disappearance_seconds_sum [0-9.]*`)
318         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_queue_to_crunch_run_seconds_count [0-9]*`)
319         c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_queue_to_crunch_run_seconds_sum [0-9e+.]*`)
320         c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_count{outcome="success"} [0-9]*`)
321         c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome="success"} [0-9e+.]*`)
322         c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_count{outcome="fail"} [0-9]*`)
323         c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome="fail"} [0-9e+.]*`)
324         c.Check(resp.Body.String(), check.Matches, `(?ms).*last_503_time [1-9][0-9e+.]*`)
325         c.Check(resp.Body.String(), check.Matches, `(?ms).*max_concurrent_containers [1-9][0-9e+.]*`)
326 }
327
328 func (s *DispatcherSuite) TestManagementAPI_Permissions(c *check.C) {
329         s.cluster.ManagementToken = "abcdefgh"
330         Drivers["test"] = s.stubDriver
331         s.disp.setupOnce.Do(s.disp.initialize)
332         s.disp.queue = &test.Queue{}
333         go s.disp.run()
334
335         for _, token := range []string{"abc", ""} {
336                 req := httptest.NewRequest("GET", "/arvados/v1/dispatch/instances", nil)
337                 if token != "" {
338                         req.Header.Set("Authorization", "Bearer "+token)
339                 }
340                 resp := httptest.NewRecorder()
341                 s.disp.ServeHTTP(resp, req)
342                 if token == "" {
343                         c.Check(resp.Code, check.Equals, http.StatusUnauthorized)
344                 } else {
345                         c.Check(resp.Code, check.Equals, http.StatusForbidden)
346                 }
347         }
348 }
349
350 func (s *DispatcherSuite) TestManagementAPI_Disabled(c *check.C) {
351         s.cluster.ManagementToken = ""
352         Drivers["test"] = s.stubDriver
353         s.disp.setupOnce.Do(s.disp.initialize)
354         s.disp.queue = &test.Queue{}
355         go s.disp.run()
356
357         for _, token := range []string{"abc", ""} {
358                 req := httptest.NewRequest("GET", "/arvados/v1/dispatch/instances", nil)
359                 if token != "" {
360                         req.Header.Set("Authorization", "Bearer "+token)
361                 }
362                 resp := httptest.NewRecorder()
363                 s.disp.ServeHTTP(resp, req)
364                 c.Check(resp.Code, check.Equals, http.StatusForbidden)
365         }
366 }
367
368 func (s *DispatcherSuite) TestManagementAPI_Containers(c *check.C) {
369         s.cluster.ManagementToken = "abcdefgh"
370         s.cluster.Containers.CloudVMs.InitialQuotaEstimate = 4
371         Drivers["test"] = s.stubDriver
372         queue := &test.Queue{
373                 MaxDispatchAttempts: 5,
374                 ChooseType: func(ctr *arvados.Container) ([]arvados.InstanceType, error) {
375                         return ChooseInstanceType(s.cluster, ctr)
376                 },
377                 Logger: ctxlog.TestLogger(c),
378         }
379         s.stubDriver.Queue = queue
380         s.stubDriver.QuotaMaxInstances = 4
381         s.stubDriver.SetupVM = func(stubvm *test.StubVM) error {
382                 if stubvm.Instance().ProviderType() >= test.InstanceType(4).ProviderType {
383                         return test.CapacityError{InstanceTypeSpecific: true}
384                 }
385                 stubvm.ExecuteContainer = func(ctr arvados.Container) int {
386                         time.Sleep(5 * time.Second)
387                         return 0
388                 }
389                 return nil
390         }
391         s.disp.queue = queue
392         s.disp.setupOnce.Do(s.disp.initialize)
393
394         go s.disp.run()
395
396         type queueEnt struct {
397                 Container        arvados.Container
398                 InstanceType     arvados.InstanceType `json:"instance_type"`
399                 SchedulingStatus string               `json:"scheduling_status"`
400         }
401         type containersResponse struct {
402                 Items []queueEnt
403         }
404         getContainers := func() containersResponse {
405                 sQueueRefresh = time.Millisecond
406                 req := httptest.NewRequest("GET", "/arvados/v1/dispatch/containers", nil)
407                 req.Header.Set("Authorization", "Bearer abcdefgh")
408                 resp := httptest.NewRecorder()
409                 s.disp.ServeHTTP(resp, req)
410                 var cresp containersResponse
411                 c.Check(resp.Code, check.Equals, http.StatusOK)
412                 err := json.Unmarshal(resp.Body.Bytes(), &cresp)
413                 c.Check(err, check.IsNil)
414                 return cresp
415         }
416
417         c.Check(getContainers().Items, check.HasLen, 0)
418
419         for i := 0; i < 20; i++ {
420                 queue.Containers = append(queue.Containers, arvados.Container{
421                         UUID:     test.ContainerUUID(i),
422                         State:    arvados.ContainerStateQueued,
423                         Priority: int64(100 - i),
424                         RuntimeConstraints: arvados.RuntimeConstraints{
425                                 RAM:   int64(i%3+1) << 30,
426                                 VCPUs: i%8 + 1,
427                         },
428                 })
429         }
430         queue.Update()
431
432         expect := `
433  0 zzzzz-dz642-000000000000000 (Running) ""
434  1 zzzzz-dz642-000000000000001 (Running) ""
435  2 zzzzz-dz642-000000000000002 (Locked) "waiting for suitable instance type to become available: queue position 1"
436  3 zzzzz-dz642-000000000000003 (Locked) "waiting for suitable instance type to become available: queue position 2"
437  4 zzzzz-dz642-000000000000004 (Queued) "waiting while cluster is running at capacity: queue position 3"
438  5 zzzzz-dz642-000000000000005 (Queued) "waiting while cluster is running at capacity: queue position 4"
439  6 zzzzz-dz642-000000000000006 (Queued) "waiting while cluster is running at capacity: queue position 5"
440  7 zzzzz-dz642-000000000000007 (Queued) "waiting while cluster is running at capacity: queue position 6"
441  8 zzzzz-dz642-000000000000008 (Queued) "waiting while cluster is running at capacity: queue position 7"
442  9 zzzzz-dz642-000000000000009 (Queued) "waiting while cluster is running at capacity: queue position 8"
443  10 zzzzz-dz642-000000000000010 (Queued) "waiting while cluster is running at capacity: queue position 9"
444  11 zzzzz-dz642-000000000000011 (Queued) "waiting while cluster is running at capacity: queue position 10"
445  12 zzzzz-dz642-000000000000012 (Queued) "waiting while cluster is running at capacity: queue position 11"
446  13 zzzzz-dz642-000000000000013 (Queued) "waiting while cluster is running at capacity: queue position 12"
447  14 zzzzz-dz642-000000000000014 (Queued) "waiting while cluster is running at capacity: queue position 13"
448  15 zzzzz-dz642-000000000000015 (Queued) "waiting while cluster is running at capacity: queue position 14"
449  16 zzzzz-dz642-000000000000016 (Queued) "waiting while cluster is running at capacity: queue position 15"
450  17 zzzzz-dz642-000000000000017 (Queued) "waiting while cluster is running at capacity: queue position 16"
451  18 zzzzz-dz642-000000000000018 (Queued) "waiting while cluster is running at capacity: queue position 17"
452  19 zzzzz-dz642-000000000000019 (Queued) "waiting while cluster is running at capacity: queue position 18"
453 `
454         sequence := make(map[string][]string)
455         var summary string
456         for deadline := time.Now().Add(time.Second); time.Now().Before(deadline); time.Sleep(time.Millisecond) {
457                 cresp := getContainers()
458                 summary = "\n"
459                 for i, ent := range cresp.Items {
460                         summary += fmt.Sprintf("% 2d %s (%s) %q\n", i, ent.Container.UUID, ent.Container.State, ent.SchedulingStatus)
461                         s := sequence[ent.Container.UUID]
462                         if len(s) == 0 || s[len(s)-1] != ent.SchedulingStatus {
463                                 sequence[ent.Container.UUID] = append(s, ent.SchedulingStatus)
464                         }
465                 }
466                 if summary == expect {
467                         break
468                 }
469         }
470         c.Check(summary, check.Equals, expect)
471         for i := 0; i < 5; i++ {
472                 c.Logf("sequence for container %d:\n... %s", i, strings.Join(sequence[test.ContainerUUID(i)], "\n... "))
473         }
474 }
475
476 func (s *DispatcherSuite) TestManagementAPI_Instances(c *check.C) {
477         s.cluster.ManagementToken = "abcdefgh"
478         s.cluster.Containers.CloudVMs.TimeoutBooting = arvados.Duration(time.Second)
479         Drivers["test"] = s.stubDriver
480         s.disp.setupOnce.Do(s.disp.initialize)
481         s.disp.queue = &test.Queue{}
482         go s.disp.run()
483         defer s.disp.Close()
484
485         type instance struct {
486                 Instance             string
487                 WorkerState          string `json:"worker_state"`
488                 Price                float64
489                 LastContainerUUID    string `json:"last_container_uuid"`
490                 ArvadosInstanceType  string `json:"arvados_instance_type"`
491                 ProviderInstanceType string `json:"provider_instance_type"`
492         }
493         type instancesResponse struct {
494                 Items []instance
495         }
496         getInstances := func() instancesResponse {
497                 req := httptest.NewRequest("GET", "/arvados/v1/dispatch/instances", nil)
498                 req.Header.Set("Authorization", "Bearer abcdefgh")
499                 resp := httptest.NewRecorder()
500                 s.disp.ServeHTTP(resp, req)
501                 var sr instancesResponse
502                 c.Check(resp.Code, check.Equals, http.StatusOK)
503                 err := json.Unmarshal(resp.Body.Bytes(), &sr)
504                 c.Check(err, check.IsNil)
505                 return sr
506         }
507
508         sr := getInstances()
509         c.Check(len(sr.Items), check.Equals, 0)
510
511         s.stubDriver.ErrorRateCreate = 0
512         ch := s.disp.pool.Subscribe()
513         defer s.disp.pool.Unsubscribe(ch)
514         ok := s.disp.pool.Create(test.InstanceType(1))
515         c.Check(ok, check.Equals, true)
516         <-ch
517
518         for deadline := time.Now().Add(time.Second); time.Now().Before(deadline); {
519                 sr = getInstances()
520                 if len(sr.Items) > 0 {
521                         break
522                 }
523                 time.Sleep(time.Millisecond)
524         }
525         c.Assert(len(sr.Items), check.Equals, 1)
526         c.Check(sr.Items[0].Instance, check.Matches, "inst.*")
527         c.Check(sr.Items[0].WorkerState, check.Equals, "booting")
528         c.Check(sr.Items[0].Price, check.Equals, 0.123)
529         c.Check(sr.Items[0].LastContainerUUID, check.Equals, "")
530         c.Check(sr.Items[0].ProviderInstanceType, check.Equals, test.InstanceType(1).ProviderType)
531         c.Check(sr.Items[0].ArvadosInstanceType, check.Equals, test.InstanceType(1).Name)
532 }