19164: add a few flags to the compute image builder script.
[arvados.git] / tools / keep-exercise / keep-exercise.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 // Testing tool for Keep services.
6 //
7 // keepexercise helps measure throughput and test reliability under
8 // various usage patterns.
9 //
10 // By default, it reads and writes blocks containing 2^26 NUL
11 // bytes. This generates network traffic without consuming much disk
12 // space.
13 //
14 // For a more realistic test, enable -vary-request. Warning: this will
15 // fill your storage volumes with random data if you leave it running,
16 // which can cost you money or leave you with too little room for
17 // useful data.
18 //
19 package main
20
21 import (
22         "bufio"
23         "context"
24         "crypto/rand"
25         "encoding/binary"
26         "flag"
27         "fmt"
28         "io"
29         "io/ioutil"
30         "log"
31         mathRand "math/rand"
32         "net/http"
33         "os"
34         "os/signal"
35         "strings"
36         "sync/atomic"
37         "syscall"
38         "time"
39
40         "git.arvados.org/arvados.git/lib/cmd"
41         "git.arvados.org/arvados.git/lib/config"
42         "git.arvados.org/arvados.git/sdk/go/arvados"
43         "git.arvados.org/arvados.git/sdk/go/arvadosclient"
44         "git.arvados.org/arvados.git/sdk/go/keepclient"
45 )
46
47 var version = "dev"
48
49 // Command line config knobs
50 var (
51         BlockSize     = flag.Int("block-size", keepclient.BLOCKSIZE, "bytes per read/write op")
52         ReadThreads   = flag.Int("rthreads", 1, "number of concurrent readers")
53         WriteThreads  = flag.Int("wthreads", 1, "number of concurrent writers")
54         VaryRequest   = flag.Bool("vary-request", false, "vary the data for each request: consumes disk space, exercises write behavior")
55         VaryThread    = flag.Bool("vary-thread", false, "use -wthreads different data blocks")
56         Replicas      = flag.Int("replicas", 1, "replication level for writing")
57         StatsInterval = flag.Duration("stats-interval", time.Second, "time interval between IO stats reports, or 0 to disable")
58         ServiceURL    = flag.String("url", "", "specify scheme://host of a single keep service to exercise (instead of using all advertised services like normal clients)")
59         ServiceUUID   = flag.String("uuid", "", "specify UUID of a single advertised keep service to exercise")
60         getVersion    = flag.Bool("version", false, "Print version information and exit.")
61         RunTime       = flag.Duration("run-time", 0, "time to run (e.g. 60s), or 0 to run indefinitely (default)")
62         Repeat        = flag.Int("repeat", 1, "number of times to repeat the experiment (default 1)")
63         UseIndex      = flag.Bool("use-index", false, "use the GetIndex call to get a list of blocks to read. Requires the SystemRoot token. Use this to rule out caching effects when reading.")
64 )
65
66 func createKeepClient(lgr *log.Logger) (kc *keepclient.KeepClient) {
67         arv, err := arvadosclient.MakeArvadosClient()
68         if err != nil {
69                 lgr.Fatal(err)
70         }
71         kc, err = keepclient.MakeKeepClient(arv)
72         if err != nil {
73                 lgr.Fatal(err)
74         }
75         kc.Want_replicas = *Replicas
76
77         kc.HTTPClient = &http.Client{
78                 Timeout: 10 * time.Minute,
79                 // It's not safe to copy *http.DefaultTransport
80                 // because it has a mutex (which might be locked)
81                 // protecting a private map (which might not be nil).
82                 // So we build our own, using the Go 1.12 default
83                 // values.
84                 Transport: &http.Transport{
85                         TLSClientConfig: arvadosclient.MakeTLSConfig(arv.ApiInsecure),
86                 },
87         }
88         overrideServices(kc, lgr)
89         return kc
90 }
91
92 func main() {
93         if ok, code := cmd.ParseFlags(flag.CommandLine, os.Args[0], os.Args[1:], "", os.Stderr); !ok {
94                 os.Exit(code)
95         } else if *getVersion {
96                 fmt.Printf("%s %s\n", os.Args[0], version)
97                 return
98         }
99
100         lgr := log.New(os.Stderr, "", log.LstdFlags)
101
102         if *ReadThreads > 0 && *WriteThreads == 0 && !*UseIndex {
103                 lgr.Fatal("At least one write thread is required if rthreads is non-zero and -use-index is not enabled")
104         }
105
106         if *ReadThreads == 0 && *WriteThreads == 0 {
107                 lgr.Fatal("Nothing to do!")
108         }
109
110         kc := createKeepClient(lgr)
111
112         // When UseIndex is set, we need a KeepClient with SystemRoot powers to get
113         // the block index from the Keepstore. We use the SystemRootToken from
114         // the Arvados config.yml for that.
115         var cluster *arvados.Cluster
116         if *ReadThreads > 0 && *UseIndex {
117                 cluster = loadConfig(lgr)
118                 kc.Arvados.ApiToken = cluster.SystemRootToken
119         }
120
121         ctx, cancel := context.WithCancel(context.Background())
122         defer cancel()
123         sigChan := make(chan os.Signal, 1)
124         signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
125         go func() {
126                 <-sigChan
127                 // FIXME
128                 //fmt.Print("\r") // Suppress the ^C print
129                 cancel()
130         }()
131
132         csvHeader := "Timestamp,Elapsed,Read (bytes),Avg Read Speed (MiB/s),Peak Read Speed (MiB/s),Written (bytes),Avg Write Speed (MiB/s),Peak Write Speed (MiB/s),Errors,ReadThreads,WriteThreads,VaryRequest,VaryThread,BlockSize,Replicas,StatsInterval,ServiceURL,ServiceUUID,UseIndex,RunTime,Repeat"
133         var summary string
134
135         var nextBufs []chan []byte
136         for i := 0; i < *WriteThreads; i++ {
137                 nextBuf := make(chan []byte, 1)
138                 nextBufs = append(nextBufs, nextBuf)
139                 go makeBufs(nextBuf, i, lgr)
140         }
141
142         for i := 0; i < *Repeat && ctx.Err() == nil; i++ {
143                 summary = runExperiment(ctx, cluster, kc, nextBufs, summary, csvHeader, lgr)
144                 lgr.Printf("*************************** experiment %d complete ******************************\n", i)
145                 summary += fmt.Sprintf(",%d\n", i)
146         }
147
148         lgr.Println("Summary:")
149         lgr.Println()
150         fmt.Println()
151         fmt.Println(csvHeader + ",Experiment")
152         fmt.Println(summary)
153 }
154
155 func runExperiment(ctx context.Context, cluster *arvados.Cluster, kc *keepclient.KeepClient, nextBufs []chan []byte, summary string, csvHeader string, lgr *log.Logger) (newSummary string) {
156         // Send 1234 to bytesInChan when we receive 1234 bytes from keepstore.
157         var bytesInChan = make(chan uint64)
158         var bytesOutChan = make(chan uint64)
159         // Send struct{}{} to errorsChan when an error happens.
160         var errorsChan = make(chan struct{})
161
162         var nextLocator atomic.Value
163         // when UseIndex is set, this channel is used instead of nextLocator
164         var indexLocatorChan = make(chan string, 2)
165
166         newSummary = summary
167
168         // Start warmup
169         ready := make(chan struct{})
170         var warmup bool
171         if *ReadThreads > 0 {
172                 warmup = true
173                 if !*UseIndex {
174                         lgr.Printf("Start warmup phase, waiting for 1 available block before reading starts\n")
175                 } else {
176                         lgr.Printf("Start warmup phase, waiting for block index before reading starts\n")
177                 }
178         }
179         if warmup && !*UseIndex {
180                 go func() {
181                         locator, _, err := kc.PutB(<-nextBufs[0])
182                         if err != nil {
183                                 lgr.Print(err)
184                                 errorsChan <- struct{}{}
185                         }
186                         nextLocator.Store(locator)
187                         lgr.Println("Warmup complete!")
188                         close(ready)
189                 }()
190         } else if warmup && *UseIndex {
191                 // Get list of blocks to read
192                 go getIndexLocators(ctx, cluster, kc, indexLocatorChan, lgr)
193                 select {
194                 case <-ctx.Done():
195                         return
196                 case <-indexLocatorChan:
197                         lgr.Println("Warmup complete!")
198                         close(ready)
199                 }
200         } else {
201                 close(ready)
202         }
203         select {
204         case <-ctx.Done():
205                 return
206         case <-ready:
207         }
208
209         // Warmup complete
210         ctx, cancel := context.WithDeadline(ctx, time.Now().Add(*RunTime))
211         defer cancel()
212
213         for i := 0; i < *WriteThreads; i++ {
214                 go doWrites(ctx, kc, nextBufs[i], &nextLocator, bytesOutChan, errorsChan, lgr)
215         }
216         if *UseIndex {
217                 for i := 0; i < *ReadThreads; i++ {
218                         go doReads(ctx, kc, nil, indexLocatorChan, bytesInChan, errorsChan, lgr)
219                 }
220         } else {
221                 for i := 0; i < *ReadThreads; i++ {
222                         go doReads(ctx, kc, &nextLocator, nil, bytesInChan, errorsChan, lgr)
223                 }
224         }
225
226         t0 := time.Now()
227         var tickChan <-chan time.Time
228         if *StatsInterval > 0 {
229                 tickChan = time.NewTicker(*StatsInterval).C
230         }
231         var bytesIn uint64
232         var bytesOut uint64
233         var errors uint64
234         var rateIn, rateOut float64
235         var maxRateIn, maxRateOut float64
236         var exit, printCsv bool
237         csv := log.New(os.Stdout, "", 0)
238         csv.Println()
239         csv.Println(csvHeader)
240         for {
241                 select {
242                 case <-ctx.Done():
243                         printCsv = true
244                         exit = true
245                 case <-tickChan:
246                         printCsv = true
247                 case i := <-bytesInChan:
248                         bytesIn += i
249                 case o := <-bytesOutChan:
250                         bytesOut += o
251                 case <-errorsChan:
252                         errors++
253                 }
254                 if printCsv {
255                         elapsed := time.Since(t0)
256                         rateIn = float64(bytesIn) / elapsed.Seconds() / 1048576
257                         if rateIn > maxRateIn {
258                                 maxRateIn = rateIn
259                         }
260                         rateOut = float64(bytesOut) / elapsed.Seconds() / 1048576
261                         if rateOut > maxRateOut {
262                                 maxRateOut = rateOut
263                         }
264                         line := fmt.Sprintf("%v,%v,%v,%.1f,%.1f,%v,%.1f,%.1f,%d,%d,%d,%t,%t,%d,%d,%s,%s,%s,%t,%s,%d",
265                                 time.Now().Format("2006/01/02 15:04:05"),
266                                 elapsed,
267                                 bytesIn, rateIn, maxRateIn,
268                                 bytesOut, rateOut, maxRateOut,
269                                 errors,
270                                 *ReadThreads,
271                                 *WriteThreads,
272                                 *VaryRequest,
273                                 *VaryThread,
274                                 *BlockSize,
275                                 *Replicas,
276                                 *StatsInterval,
277                                 *ServiceURL,
278                                 *ServiceUUID,
279                                 *UseIndex,
280                                 *RunTime,
281                                 *Repeat,
282                         )
283                         csv.Println(line)
284                         if exit {
285                                 newSummary += line
286                                 return
287                         }
288                         printCsv = false
289                 }
290         }
291 }
292
293 func makeBufs(nextBuf chan<- []byte, threadID int, lgr *log.Logger) {
294         buf := make([]byte, *BlockSize)
295         if *VaryThread {
296                 binary.PutVarint(buf, int64(threadID))
297         }
298         randSize := 524288
299         if randSize > *BlockSize {
300                 randSize = *BlockSize
301         }
302         for {
303                 if *VaryRequest {
304                         rnd := make([]byte, randSize)
305                         if _, err := io.ReadFull(rand.Reader, rnd); err != nil {
306                                 lgr.Fatal(err)
307                         }
308                         buf = append(rnd, buf[randSize:]...)
309                 }
310                 nextBuf <- buf
311         }
312 }
313
314 func doWrites(ctx context.Context, kc *keepclient.KeepClient, nextBuf <-chan []byte, nextLocator *atomic.Value, bytesOutChan chan<- uint64, errorsChan chan<- struct{}, lgr *log.Logger) {
315         for ctx.Err() == nil {
316                 //lgr.Printf("%s nextbuf %s, waiting for nextBuf\n",nextBuf,time.Now())
317                 buf := <-nextBuf
318                 //lgr.Printf("%s nextbuf %s, done waiting for nextBuf\n",nextBuf,time.Now())
319                 locator, _, err := kc.PutB(buf)
320                 if err != nil {
321                         lgr.Print(err)
322                         errorsChan <- struct{}{}
323                         continue
324                 }
325                 bytesOutChan <- uint64(len(buf))
326                 nextLocator.Store(locator)
327         }
328 }
329
330 func getIndexLocators(ctx context.Context, cluster *arvados.Cluster, kc *keepclient.KeepClient, indexLocatorChan chan<- string, lgr *log.Logger) {
331         if ctx.Err() != nil {
332                 return
333         }
334         locatorsMap := make(map[string]bool)
335         var locators []string
336         var count int64
337         for uuid := range kc.LocalRoots() {
338                 reader, err := kc.GetIndex(uuid, "")
339                 if err != nil {
340                         lgr.Fatalf("Error getting index: %s\n", err)
341                 }
342                 scanner := bufio.NewScanner(reader)
343                 for scanner.Scan() {
344                         locatorsMap[strings.Split(scanner.Text(), " ")[0]] = true
345                         count++
346                 }
347         }
348         for l := range locatorsMap {
349                 locators = append(locators, l)
350         }
351         lgr.Printf("Found %d locators\n", count)
352         lgr.Printf("Found %d locators (deduplicated)\n", len(locators))
353         if len(locators) < 1 {
354                 lgr.Fatal("Error: no locators found. The keepstores do not seem to contain any data. Remove the -use-index cli argument.")
355         }
356
357         mathRand.Seed(time.Now().UnixNano())
358         mathRand.Shuffle(len(locators), func(i, j int) { locators[i], locators[j] = locators[j], locators[i] })
359
360         for _, locator := range locators {
361                 // We need the Collections.BlobSigningKey to sign our block requests. This requires access to /etc/arvados/config.yml
362                 signedLocator := arvados.SignLocator(locator, kc.Arvados.ApiToken, time.Now().Local().Add(1*time.Hour), cluster.Collections.BlobSigningTTL.Duration(), []byte(cluster.Collections.BlobSigningKey))
363                 select {
364                 case <-ctx.Done():
365                         return
366                 case indexLocatorChan <- signedLocator:
367                 }
368         }
369         lgr.Fatal("Error: ran out of locators to read!")
370 }
371
372 func loadConfig(lgr *log.Logger) (cluster *arvados.Cluster) {
373         loader := config.NewLoader(os.Stdin, nil)
374         loader.SkipLegacy = true
375
376         cfg, err := loader.Load()
377         if err != nil {
378                 lgr.Fatal(err)
379         }
380         cluster, err = cfg.GetCluster("")
381         if err != nil {
382                 lgr.Fatal(err)
383         }
384         return
385 }
386
387 func doReads(ctx context.Context, kc *keepclient.KeepClient, nextLocator *atomic.Value, indexLocatorChan <-chan string, bytesInChan chan<- uint64, errorsChan chan<- struct{}, lgr *log.Logger) {
388         for ctx.Err() == nil {
389                 var locator string
390                 if indexLocatorChan != nil {
391                         select {
392                         case <-ctx.Done():
393                                 return
394                         case locator = <-indexLocatorChan:
395                         }
396                 } else {
397                         locator = nextLocator.Load().(string)
398                 }
399                 rdr, size, url, err := kc.Get(locator)
400                 if err != nil {
401                         lgr.Print(err)
402                         errorsChan <- struct{}{}
403                         continue
404                 }
405                 n, err := io.Copy(ioutil.Discard, rdr)
406                 rdr.Close()
407                 if n != size || err != nil {
408                         lgr.Printf("Got %d bytes (expected %d) from %s: %v", n, size, url, err)
409                         errorsChan <- struct{}{}
410                         continue
411                         // Note we don't count the bytes received in
412                         // partial/corrupt responses: we are measuring
413                         // throughput, not resource consumption.
414                 }
415                 bytesInChan <- uint64(n)
416         }
417 }
418
419 func overrideServices(kc *keepclient.KeepClient, lgr *log.Logger) {
420         roots := make(map[string]string)
421         if *ServiceURL != "" {
422                 roots["zzzzz-bi6l4-000000000000000"] = *ServiceURL
423         } else if *ServiceUUID != "" {
424                 for uuid, url := range kc.GatewayRoots() {
425                         if uuid == *ServiceUUID {
426                                 roots[uuid] = url
427                                 break
428                         }
429                 }
430                 if len(roots) == 0 {
431                         lgr.Fatalf("Service %q was not in list advertised by API %+q", *ServiceUUID, kc.GatewayRoots())
432                 }
433         } else {
434                 return
435         }
436         kc.SetServiceRoots(roots, roots, roots)
437 }