13647: Use cluster config instead of custom keepstore config.
[arvados.git] / lib / config / deprecated_keepstore_test.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package config
6
7 import (
8         "bytes"
9         "encoding/json"
10         "fmt"
11         "io"
12         "io/ioutil"
13         "os"
14         "sort"
15         "strconv"
16         "strings"
17         "text/template"
18         "time"
19
20         "git.curoverse.com/arvados.git/sdk/go/arvados"
21         "git.curoverse.com/arvados.git/sdk/go/arvadostest"
22         check "gopkg.in/check.v1"
23 )
24
25 type KeepstoreMigrationSuite struct {
26         hostname string // blank = use test system's hostname
27         ksByPort map[int]arvados.KeepService
28 }
29
30 var _ = check.Suite(&KeepstoreMigrationSuite{})
31
32 func (s *KeepstoreMigrationSuite) SetUpSuite(c *check.C) {
33         // We don't need the keepstore servers, but we do need
34         // keep_services listings that point to localhost, rather than
35         // the apiserver fixtures that point to fictional hosts
36         // keep*.zzzzz.arvadosapi.com.
37
38         client := arvados.NewClientFromEnv()
39
40         // Delete existing non-proxy listings.
41         var svcList arvados.KeepServiceList
42         err := client.RequestAndDecode(&svcList, "GET", "arvados/v1/keep_services", nil, nil)
43         c.Assert(err, check.IsNil)
44         for _, ks := range svcList.Items {
45                 if ks.ServiceType != "proxy" {
46                         err = client.RequestAndDecode(new(struct{}), "DELETE", "arvados/v1/keep_services/"+ks.UUID, nil, nil)
47                         c.Assert(err, check.IsNil)
48                 }
49         }
50         // Add new fake listings.
51         s.ksByPort = map[int]arvados.KeepService{}
52         for _, port := range []int{25107, 25108} {
53                 var ks arvados.KeepService
54                 err = client.RequestAndDecode(&ks, "POST", "arvados/v1/keep_services", nil, map[string]interface{}{
55                         "keep_service": map[string]interface{}{
56                                 "service_type": "disk",
57                                 "service_host": "localhost",
58                                 "service_port": port,
59                         },
60                 })
61                 c.Assert(err, check.IsNil)
62                 s.ksByPort[port] = ks
63         }
64 }
65
66 func (s *KeepstoreMigrationSuite) checkEquivalentWithKeepstoreConfig(c *check.C, keepstoreconfig, clusterconfig, expectedconfig string) {
67         keepstorefile, err := ioutil.TempFile("", "")
68         c.Assert(err, check.IsNil)
69         defer os.Remove(keepstorefile.Name())
70         _, err = io.WriteString(keepstorefile, keepstoreconfig)
71         c.Assert(err, check.IsNil)
72         err = keepstorefile.Close()
73         c.Assert(err, check.IsNil)
74
75         gotldr := testLoader(c, clusterconfig, nil)
76         gotldr.KeepstorePath = keepstorefile.Name()
77         expectedldr := testLoader(c, expectedconfig, nil)
78         checkEquivalentLoaders(c, gotldr, expectedldr)
79 }
80
81 func (s *KeepstoreMigrationSuite) TestDeprecatedKeepstoreConfig(c *check.C) {
82         keyfile, err := ioutil.TempFile("", "")
83         c.Assert(err, check.IsNil)
84         defer os.Remove(keyfile.Name())
85         io.WriteString(keyfile, "blobsigningkey\n")
86
87         hostname, err := os.Hostname()
88         c.Assert(err, check.IsNil)
89
90         s.checkEquivalentWithKeepstoreConfig(c, `
91 Listen: ":12345"
92 Debug: true
93 LogFormat: text
94 MaxBuffers: 1234
95 MaxRequests: 2345
96 BlobSignatureTTL: 123m
97 BlobSigningKeyFile: `+keyfile.Name()+`
98 `, `
99 Clusters:
100   z1111:
101     {}
102 `, `
103 Clusters:
104   z1111:
105     Services:
106       Keepstore:
107         InternalURLs:
108           "http://`+hostname+`:12345": {}
109     SystemLogs:
110       Format: text
111       LogLevel: debug
112     API:
113       MaxKeepBlockBuffers: 1234
114       MaxConcurrentRequests: 2345
115     Collections:
116       BlobSigningTTL: 123m
117       BlobSigningKey: blobsigningkey
118 `)
119 }
120
121 func (s *KeepstoreMigrationSuite) TestDiscoverLocalVolumes(c *check.C) {
122         tmpd, err := ioutil.TempDir("", "")
123         c.Assert(err, check.IsNil)
124         defer os.RemoveAll(tmpd)
125         err = os.Mkdir(tmpd+"/keep", 0777)
126         c.Assert(err, check.IsNil)
127
128         tmpf, err := ioutil.TempFile("", "")
129         c.Assert(err, check.IsNil)
130         defer os.Remove(tmpf.Name())
131
132         // read/write
133         _, err = fmt.Fprintf(tmpf, "/dev/xvdb %s ext4 rw,noexec 0 0\n", tmpd)
134         c.Assert(err, check.IsNil)
135
136         s.testDeprecatedVolume(c, "DiscoverVolumesFromMountsFile: "+tmpf.Name(), arvados.Volume{
137                 Driver:      "Directory",
138                 ReadOnly:    false,
139                 Replication: 1,
140         }, &arvados.DirectoryVolumeDriverParameters{
141                 Root:      tmpd + "/keep",
142                 Serialize: false,
143         }, &arvados.DirectoryVolumeDriverParameters{})
144
145         // read-only
146         tmpf.Seek(0, os.SEEK_SET)
147         tmpf.Truncate(0)
148         _, err = fmt.Fprintf(tmpf, "/dev/xvdb %s ext4 ro,noexec 0 0\n", tmpd)
149         c.Assert(err, check.IsNil)
150
151         s.testDeprecatedVolume(c, "DiscoverVolumesFromMountsFile: "+tmpf.Name(), arvados.Volume{
152                 Driver:      "Directory",
153                 ReadOnly:    true,
154                 Replication: 1,
155         }, &arvados.DirectoryVolumeDriverParameters{
156                 Root:      tmpd + "/keep",
157                 Serialize: false,
158         }, &arvados.DirectoryVolumeDriverParameters{})
159 }
160
161 func (s *KeepstoreMigrationSuite) TestDeprecatedVolumes(c *check.C) {
162         accesskeyfile, err := ioutil.TempFile("", "")
163         c.Assert(err, check.IsNil)
164         defer os.Remove(accesskeyfile.Name())
165         io.WriteString(accesskeyfile, "accesskeydata\n")
166
167         secretkeyfile, err := ioutil.TempFile("", "")
168         c.Assert(err, check.IsNil)
169         defer os.Remove(secretkeyfile.Name())
170         io.WriteString(secretkeyfile, "secretkeydata\n")
171
172         // s3, empty/default
173         s.testDeprecatedVolume(c, `
174 Volumes:
175 - Type: S3
176 `, arvados.Volume{
177                 Driver:      "S3",
178                 Replication: 1,
179         }, &arvados.S3VolumeDriverParameters{}, &arvados.S3VolumeDriverParameters{})
180
181         // s3, fully configured
182         s.testDeprecatedVolume(c, `
183 Volumes:
184 - Type: S3
185   AccessKeyFile: `+accesskeyfile.Name()+`
186   SecretKeyFile: `+secretkeyfile.Name()+`
187   Endpoint: https://storage.googleapis.com
188   Region: us-east-1z
189   Bucket: testbucket
190   LocationConstraint: true
191   IndexPageSize: 1234
192   S3Replication: 4
193   ConnectTimeout: 3m
194   ReadTimeout: 4m
195   RaceWindow: 5m
196   UnsafeDelete: true
197 `, arvados.Volume{
198                 Driver:      "S3",
199                 Replication: 4,
200         }, &arvados.S3VolumeDriverParameters{
201                 AccessKey:          "accesskeydata",
202                 SecretKey:          "secretkeydata",
203                 Endpoint:           "https://storage.googleapis.com",
204                 Region:             "us-east-1z",
205                 Bucket:             "testbucket",
206                 LocationConstraint: true,
207                 IndexPageSize:      1234,
208                 ConnectTimeout:     arvados.Duration(time.Minute * 3),
209                 ReadTimeout:        arvados.Duration(time.Minute * 4),
210                 RaceWindow:         arvados.Duration(time.Minute * 5),
211                 UnsafeDelete:       true,
212         }, &arvados.S3VolumeDriverParameters{})
213
214         // azure, empty/default
215         s.testDeprecatedVolume(c, `
216 Volumes:
217 - Type: Azure
218 `, arvados.Volume{
219                 Driver:      "Azure",
220                 Replication: 1,
221         }, &arvados.AzureVolumeDriverParameters{}, &arvados.AzureVolumeDriverParameters{})
222
223         // azure, fully configured
224         s.testDeprecatedVolume(c, `
225 Volumes:
226 - Type: Azure
227   ReadOnly: true
228   StorageAccountName: storageacctname
229   StorageAccountKeyFile: `+secretkeyfile.Name()+`
230   StorageBaseURL: https://example.example
231   ContainerName: testctr
232   LocationConstraint: true
233   AzureReplication: 4
234   RequestTimeout: 3m
235   ListBlobsRetryDelay: 4m
236   ListBlobsMaxAttempts: 5
237 `, arvados.Volume{
238                 Driver:      "Azure",
239                 ReadOnly:    true,
240                 Replication: 4,
241         }, &arvados.AzureVolumeDriverParameters{
242                 StorageAccountName:   "storageacctname",
243                 StorageAccountKey:    "secretkeydata",
244                 StorageBaseURL:       "https://example.example",
245                 ContainerName:        "testctr",
246                 RequestTimeout:       arvados.Duration(time.Minute * 3),
247                 ListBlobsRetryDelay:  arvados.Duration(time.Minute * 4),
248                 ListBlobsMaxAttempts: 5,
249         }, &arvados.AzureVolumeDriverParameters{})
250
251         // directory, empty/default
252         s.testDeprecatedVolume(c, `
253 Volumes:
254 - Type: Directory
255   Root: /tmp/xyzzy
256 `, arvados.Volume{
257                 Driver:      "Directory",
258                 Replication: 1,
259         }, &arvados.DirectoryVolumeDriverParameters{
260                 Root: "/tmp/xyzzy",
261         }, &arvados.DirectoryVolumeDriverParameters{})
262
263         // directory, fully configured
264         s.testDeprecatedVolume(c, `
265 Volumes:
266 - Type: Directory
267   ReadOnly: true
268   Root: /tmp/xyzzy
269   DirectoryReplication: 4
270   Serialize: true
271 `, arvados.Volume{
272                 Driver:      "Directory",
273                 ReadOnly:    true,
274                 Replication: 4,
275         }, &arvados.DirectoryVolumeDriverParameters{
276                 Root:      "/tmp/xyzzy",
277                 Serialize: true,
278         }, &arvados.DirectoryVolumeDriverParameters{})
279 }
280
281 func (s *KeepstoreMigrationSuite) testDeprecatedVolume(c *check.C, oldconfigdata string, expectvol arvados.Volume, expectparams interface{}, paramsdst interface{}) {
282         hostname := s.hostname
283         if hostname == "" {
284                 h, err := os.Hostname()
285                 c.Assert(err, check.IsNil)
286                 hostname = h
287         }
288
289         oldconfig, err := ioutil.TempFile("", "")
290         c.Assert(err, check.IsNil)
291         defer os.Remove(oldconfig.Name())
292         io.WriteString(oldconfig, "Listen: :12345\n"+oldconfigdata)
293         if !strings.Contains(oldconfigdata, "DiscoverVolumesFromMountsFile") {
294                 // Prevent tests from looking at the real /proc/mounts on the test host.
295                 io.WriteString(oldconfig, "\nDiscoverVolumesFromMountsFile: /dev/null\n")
296         }
297
298         ldr := testLoader(c, "Clusters: {z1111: {}}", nil)
299         ldr.KeepstorePath = oldconfig.Name()
300         cfg, err := ldr.Load()
301         c.Assert(err, check.IsNil)
302         cc := cfg.Clusters["z1111"]
303         c.Check(cc.Volumes, check.HasLen, 1)
304         for uuid, v := range cc.Volumes {
305                 c.Check(uuid, check.HasLen, 27)
306                 c.Check(v.Driver, check.Equals, expectvol.Driver)
307                 c.Check(v.Replication, check.Equals, expectvol.Replication)
308
309                 avh, ok := v.AccessViaHosts[arvados.URL{Scheme: "http", Host: hostname + ":12345"}]
310                 c.Check(ok, check.Equals, true)
311                 c.Check(avh.ReadOnly, check.Equals, expectvol.ReadOnly)
312
313                 err := json.Unmarshal(v.DriverParameters, paramsdst)
314                 c.Check(err, check.IsNil)
315                 c.Check(paramsdst, check.DeepEquals, expectparams)
316         }
317 }
318
319 // How we handle a volume from a legacy keepstore config file depends
320 // on whether it's writable, whether a volume using the same cloud
321 // backend already exists in the cluster config, and (if so) whether
322 // it already has an AccessViaHosts entry for this host.
323 //
324 // In all cases, we should end up with an AccessViaHosts entry for
325 // this host, to indicate that the current host's volumes have been
326 // migrated.
327
328 // Same backend already referenced in cluster config, this host
329 // already listed in AccessViaHosts --> no change, except possibly
330 // updating the ReadOnly flag on the AccessViaHosts entry.
331 func (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_AlreadyMigrated(c *check.C) {
332         before, after := s.loadWithKeepstoreConfig(c, `
333 Listen: :12345
334 Volumes:
335 - Type: S3
336   Endpoint: https://storage.googleapis.com
337   Region: us-east-1z
338   Bucket: alreadymigrated
339   S3Replication: 3
340 `)
341         checkEqualYAML(c, after, before)
342 }
343
344 // Writable volume, same cloud backend already referenced in cluster
345 // config --> change UUID to match this keepstore's UUID.
346 func (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_UpdateUUID(c *check.C) {
347         port, expectUUID := s.getTestKeepstorePortAndMatchingVolumeUUID(c)
348
349         before, after := s.loadWithKeepstoreConfig(c, `
350 Listen: :`+strconv.Itoa(port)+`
351 Volumes:
352 - Type: S3
353   Endpoint: https://storage.googleapis.com
354   Region: us-east-1z
355   Bucket: readonlyonother
356   S3Replication: 3
357 `)
358         c.Check(after, check.HasLen, len(before))
359         newuuids := s.findAddedVolumes(c, before, after, 1)
360         newvol := after[newuuids[0]]
361
362         var params arvados.S3VolumeDriverParameters
363         json.Unmarshal(newvol.DriverParameters, &params)
364         c.Check(params.Bucket, check.Equals, "readonlyonother")
365         c.Check(newuuids[0], check.Equals, expectUUID)
366 }
367
368 // Writable volume, same cloud backend not yet referenced --> add a
369 // new volume, with UUID to match this keepstore's UUID.
370 func (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_AddCloudVolume(c *check.C) {
371         port, expectUUID := s.getTestKeepstorePortAndMatchingVolumeUUID(c)
372
373         before, after := s.loadWithKeepstoreConfig(c, `
374 Listen: :`+strconv.Itoa(port)+`
375 Volumes:
376 - Type: S3
377   Endpoint: https://storage.googleapis.com
378   Region: us-east-1z
379   Bucket: bucket-to-migrate
380   S3Replication: 3
381 `)
382         newuuids := s.findAddedVolumes(c, before, after, 1)
383         newvol := after[newuuids[0]]
384
385         var params arvados.S3VolumeDriverParameters
386         json.Unmarshal(newvol.DriverParameters, &params)
387         c.Check(params.Bucket, check.Equals, "bucket-to-migrate")
388         c.Check(newvol.Replication, check.Equals, 3)
389
390         c.Check(newuuids[0], check.Equals, expectUUID)
391 }
392
393 // Writable volume, same filesystem backend already referenced in
394 // cluster config, but this host isn't in AccessViaHosts --> add a new
395 // volume, with UUID to match this keepstore's UUID (filesystem-backed
396 // volumes are assumed to be different on different hosts, even if
397 // paths are the same).
398 func (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_AddLocalVolume(c *check.C) {
399         before, after := s.loadWithKeepstoreConfig(c, `
400 Listen: :12345
401 Volumes:
402 - Type: Directory
403   Root: /data/sdd
404   DirectoryReplication: 2
405 `)
406         newuuids := s.findAddedVolumes(c, before, after, 1)
407         newvol := after[newuuids[0]]
408
409         var params arvados.DirectoryVolumeDriverParameters
410         json.Unmarshal(newvol.DriverParameters, &params)
411         c.Check(params.Root, check.Equals, "/data/sdd")
412         c.Check(newvol.Replication, check.Equals, 2)
413 }
414
415 // Writable volume, same filesystem backend already referenced in
416 // cluster config, and this host is already listed in AccessViaHosts
417 // --> already migrated, don't change anything.
418 func (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_LocalVolumeAlreadyMigrated(c *check.C) {
419         before, after := s.loadWithKeepstoreConfig(c, `
420 Listen: :12345
421 Volumes:
422 - Type: Directory
423   Root: /data/sde
424   DirectoryReplication: 2
425 `)
426         checkEqualYAML(c, after, before)
427 }
428
429 // Multiple writable cloud-backed volumes --> one of them will get a
430 // UUID matching this keepstore's UUID.
431 func (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_AddMultipleCloudVolumes(c *check.C) {
432         port, expectUUID := s.getTestKeepstorePortAndMatchingVolumeUUID(c)
433
434         before, after := s.loadWithKeepstoreConfig(c, `
435 Listen: :`+strconv.Itoa(port)+`
436 Volumes:
437 - Type: S3
438   Endpoint: https://storage.googleapis.com
439   Region: us-east-1z
440   Bucket: first-bucket-to-migrate
441   S3Replication: 3
442 - Type: S3
443   Endpoint: https://storage.googleapis.com
444   Region: us-east-1z
445   Bucket: second-bucket-to-migrate
446   S3Replication: 3
447 `)
448         newuuids := s.findAddedVolumes(c, before, after, 2)
449         // Sort by bucket name (so "first" comes before "second")
450         params := map[string]arvados.S3VolumeDriverParameters{}
451         for _, uuid := range newuuids {
452                 var p arvados.S3VolumeDriverParameters
453                 json.Unmarshal(after[uuid].DriverParameters, &p)
454                 params[uuid] = p
455         }
456         sort.Slice(newuuids, func(i, j int) bool { return params[newuuids[i]].Bucket < params[newuuids[j]].Bucket })
457         newvol0, newvol1 := after[newuuids[0]], after[newuuids[1]]
458         params0, params1 := params[newuuids[0]], params[newuuids[1]]
459
460         c.Check(params0.Bucket, check.Equals, "first-bucket-to-migrate")
461         c.Check(newvol0.Replication, check.Equals, 3)
462
463         c.Check(params1.Bucket, check.Equals, "second-bucket-to-migrate")
464         c.Check(newvol1.Replication, check.Equals, 3)
465
466         // Don't care which one gets the special UUID
467         if newuuids[0] != expectUUID {
468                 c.Check(newuuids[1], check.Equals, expectUUID)
469         }
470 }
471
472 // Non-writable volume, same cloud backend already referenced in
473 // cluster config --> add this host to AccessViaHosts with
474 // ReadOnly==true
475 func (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_UpdateWithReadOnly(c *check.C) {
476         port, _ := s.getTestKeepstorePortAndMatchingVolumeUUID(c)
477         before, after := s.loadWithKeepstoreConfig(c, `
478 Listen: :`+strconv.Itoa(port)+`
479 Volumes:
480 - Type: S3
481   Endpoint: https://storage.googleapis.com
482   Region: us-east-1z
483   Bucket: readonlyonother
484   S3Replication: 3
485   ReadOnly: true
486 `)
487         hostname, err := os.Hostname()
488         c.Assert(err, check.IsNil)
489         url := arvados.URL{
490                 Scheme: "http",
491                 Host:   fmt.Sprintf("%s:%d", hostname, port),
492         }
493         _, ok := before["zzzzz-nyw5e-readonlyonother"].AccessViaHosts[url]
494         c.Check(ok, check.Equals, false)
495         _, ok = after["zzzzz-nyw5e-readonlyonother"].AccessViaHosts[url]
496         c.Check(ok, check.Equals, true)
497 }
498
499 // Writable volume, same cloud backend already writable by another
500 // keepstore server --> add this host to AccessViaHosts with
501 // ReadOnly==true
502 func (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_UpdateAlreadyWritable(c *check.C) {
503         port, _ := s.getTestKeepstorePortAndMatchingVolumeUUID(c)
504         before, after := s.loadWithKeepstoreConfig(c, `
505 Listen: :`+strconv.Itoa(port)+`
506 Volumes:
507 - Type: S3
508   Endpoint: https://storage.googleapis.com
509   Region: us-east-1z
510   Bucket: writableonother
511   S3Replication: 3
512   ReadOnly: false
513 `)
514         hostname, err := os.Hostname()
515         c.Assert(err, check.IsNil)
516         url := arvados.URL{
517                 Scheme: "http",
518                 Host:   fmt.Sprintf("%s:%d", hostname, port),
519         }
520         _, ok := before["zzzzz-nyw5e-writableonother"].AccessViaHosts[url]
521         c.Check(ok, check.Equals, false)
522         _, ok = after["zzzzz-nyw5e-writableonother"].AccessViaHosts[url]
523         c.Check(ok, check.Equals, true)
524 }
525
526 // Non-writable volume, same cloud backend not already referenced in
527 // cluster config --> assign a new random volume UUID.
528 func (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_AddReadOnly(c *check.C) {
529         port, _ := s.getTestKeepstorePortAndMatchingVolumeUUID(c)
530         before, after := s.loadWithKeepstoreConfig(c, `
531 Listen: :`+strconv.Itoa(port)+`
532 Volumes:
533 - Type: S3
534   Endpoint: https://storage.googleapis.com
535   Region: us-east-1z
536   Bucket: differentbucket
537   S3Replication: 3
538 `)
539         newuuids := s.findAddedVolumes(c, before, after, 1)
540         newvol := after[newuuids[0]]
541
542         var params arvados.S3VolumeDriverParameters
543         json.Unmarshal(newvol.DriverParameters, &params)
544         c.Check(params.Bucket, check.Equals, "differentbucket")
545
546         hostname, err := os.Hostname()
547         c.Assert(err, check.IsNil)
548         _, ok := newvol.AccessViaHosts[arvados.URL{Scheme: "http", Host: fmt.Sprintf("%s:%d", hostname, port)}]
549         c.Check(ok, check.Equals, true)
550 }
551
552 const clusterConfigForKeepstoreMigrationTest = `
553 Clusters:
554   zzzzz:
555     SystemRootToken: ` + arvadostest.AdminToken + `
556     Services:
557       Keepstore:
558         InternalURLs:
559           "http://{{.hostname}}:12345": {}
560       Controller:
561         ExternalURL: "https://{{.controller}}"
562     TLS:
563       Insecure: true
564     Volumes:
565
566       zzzzz-nyw5e-alreadymigrated:
567         AccessViaHosts:
568           "http://{{.hostname}}:12345": {}
569         Driver: S3
570         DriverParameters:
571           Endpoint: https://storage.googleapis.com
572           Region: us-east-1z
573           Bucket: alreadymigrated
574         Replication: 3
575
576       zzzzz-nyw5e-readonlyonother:
577         AccessViaHosts:
578           "http://other.host.example:12345": {ReadOnly: true}
579         Driver: S3
580         DriverParameters:
581           Endpoint: https://storage.googleapis.com
582           Region: us-east-1z
583           Bucket: readonlyonother
584         Replication: 3
585
586       zzzzz-nyw5e-writableonother:
587         AccessViaHosts:
588           "http://other.host.example:12345": {}
589         Driver: S3
590         DriverParameters:
591           Endpoint: https://storage.googleapis.com
592           Region: us-east-1z
593           Bucket: writableonother
594         Replication: 3
595
596       zzzzz-nyw5e-localfilesystem:
597         Driver: Directory
598         DriverParameters:
599           Root: /data/sdd
600         Replication: 1
601
602       zzzzz-nyw5e-localismigrated:
603         AccessViaHosts:
604           "http://{{.hostname}}:12345": {}
605         Driver: Directory
606         DriverParameters:
607           Root: /data/sde
608         Replication: 1
609 `
610
611 // Determine the effect of combining the given legacy keepstore config
612 // YAML (just the "Volumes" entries of an old keepstore config file)
613 // with the example clusterConfigForKeepstoreMigrationTest config.
614 //
615 // Return two Volumes configs -- one without loading
616 // keepstoreconfigdata ("before") and one with ("after") -- for the
617 // caller to compare.
618 func (s *KeepstoreMigrationSuite) loadWithKeepstoreConfig(c *check.C, keepstoreVolumesYAML string) (before, after map[string]arvados.Volume) {
619         ldr := testLoader(c, s.clusterConfigYAML(c), nil)
620         cBefore, err := ldr.Load()
621         c.Assert(err, check.IsNil)
622
623         keepstoreconfig, err := ioutil.TempFile("", "")
624         c.Assert(err, check.IsNil)
625         defer os.Remove(keepstoreconfig.Name())
626         io.WriteString(keepstoreconfig, keepstoreVolumesYAML)
627
628         ldr = testLoader(c, s.clusterConfigYAML(c), nil)
629         ldr.KeepstorePath = keepstoreconfig.Name()
630         cAfter, err := ldr.Load()
631         c.Assert(err, check.IsNil)
632
633         return cBefore.Clusters["zzzzz"].Volumes, cAfter.Clusters["zzzzz"].Volumes
634 }
635
636 func (s *KeepstoreMigrationSuite) clusterConfigYAML(c *check.C) string {
637         hostname, err := os.Hostname()
638         c.Assert(err, check.IsNil)
639
640         tmpl := template.Must(template.New("config").Parse(clusterConfigForKeepstoreMigrationTest))
641
642         var clusterconfigdata bytes.Buffer
643         err = tmpl.Execute(&clusterconfigdata, map[string]interface{}{
644                 "hostname":   hostname,
645                 "controller": os.Getenv("ARVADOS_API_HOST"),
646         })
647         c.Assert(err, check.IsNil)
648
649         return clusterconfigdata.String()
650 }
651
652 // Return the uuids of volumes that appear in "after" but not
653 // "before".
654 //
655 // Assert the returned slice has at least minAdded entries.
656 func (s *KeepstoreMigrationSuite) findAddedVolumes(c *check.C, before, after map[string]arvados.Volume, minAdded int) (uuids []string) {
657         for uuid := range after {
658                 if _, ok := before[uuid]; !ok {
659                         uuids = append(uuids, uuid)
660                 }
661         }
662         if len(uuids) < minAdded {
663                 c.Assert(uuids, check.HasLen, minAdded)
664         }
665         return
666 }
667
668 func (s *KeepstoreMigrationSuite) getTestKeepstorePortAndMatchingVolumeUUID(c *check.C) (int, string) {
669         for port, ks := range s.ksByPort {
670                 c.Assert(ks.UUID, check.HasLen, 27)
671                 return port, "zzzzz-nyw5e-" + ks.UUID[12:]
672         }
673         c.Fatal("s.ksByPort is empty")
674         return 0, ""
675 }
676
677 func (s *KeepstoreMigrationSuite) TestKeepServiceIsMe(c *check.C) {
678         for i, trial := range []struct {
679                 match       bool
680                 hostname    string
681                 listen      string
682                 serviceHost string
683                 servicePort int
684         }{
685                 {true, "keep0", "keep0", "keep0", 80},
686                 {true, "keep0", "[::1]:http", "keep0", 80},
687                 {true, "keep0", "[::]:http", "keep0", 80},
688                 {true, "keep0", "keep0:25107", "keep0", 25107},
689                 {true, "keep0", ":25107", "keep0", 25107},
690                 {true, "keep0.domain", ":25107", "keep0.domain.example", 25107},
691                 {true, "keep0.domain.example", ":25107", "keep0.domain.example", 25107},
692                 {true, "keep0", ":25107", "keep0.domain.example", 25107},
693                 {true, "keep0", ":25107", "Keep0.domain.example", 25107},
694                 {true, "keep0", ":http", "keep0.domain.example", 80},
695                 {true, "keep0", ":25107", "localhost", 25107},
696                 {true, "keep0", ":25107", "::1", 25107},
697                 {false, "keep0", ":25107", "keep0", 1111},              // different port
698                 {false, "keep0", ":25107", "localhost", 1111},          // different port
699                 {false, "keep0", ":http", "keep0.domain.example", 443}, // different port
700                 {false, "keep0", ":bogussss", "keep0", 25107},          // unresolvable port
701                 {false, "keep0", ":25107", "keep1", 25107},             // different hostname
702                 {false, "keep1", ":25107", "keep10", 25107},            // different hostname (prefix, but not on a "." boundary)
703         } {
704                 c.Check(keepServiceIsMe(arvados.KeepService{ServiceHost: trial.serviceHost, ServicePort: trial.servicePort}, trial.hostname, trial.listen), check.Equals, trial.match, check.Commentf("trial #%d: %#v", i, trial))
705         }
706 }