21907: Rename s3SecretCacheTidyInterval
[arvados.git] / services / keep-web / metrics.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package keepweb
6
7 import (
8         "io"
9         "math"
10         "net/http"
11         "time"
12
13         "github.com/prometheus/client_golang/prometheus"
14 )
15
16 type metrics struct {
17         mDownloadSpeed        *prometheus.HistogramVec
18         mDownloadBackendSpeed *prometheus.HistogramVec
19         mUploadSpeed          *prometheus.HistogramVec
20         mUploadSyncDelay      *prometheus.HistogramVec
21 }
22
23 func newMetrics(reg *prometheus.Registry) *metrics {
24         m := &metrics{
25                 mDownloadSpeed: prometheus.NewHistogramVec(prometheus.HistogramOpts{
26                         Namespace: "arvados",
27                         Subsystem: "keepweb",
28                         Name:      "download_speed",
29                         Help:      "Download speed (bytes per second) bucketed by transfer size range",
30                         Buckets:   []float64{10_000, 1_000_000, 10_000_000, 100_000_000, 1_000_000_000, math.Inf(+1)},
31                 }, []string{"size_range"}),
32                 mDownloadBackendSpeed: prometheus.NewHistogramVec(prometheus.HistogramOpts{
33                         Namespace: "arvados",
34                         Subsystem: "keepweb",
35                         Name:      "download_apparent_backend_speed",
36                         Help:      "Apparent download speed from the backend (bytes per second) when serving file downloads, bucketed by transfer size range (see https://dev.arvados.org/projects/arvados/wiki/WebDAV_performance_metrics for explanation)",
37                         Buckets:   []float64{10_000, 1_000_000, 10_000_000, 100_000_000, 1_000_000_000, math.Inf(+1)},
38                 }, []string{"size_range"}),
39                 mUploadSpeed: prometheus.NewHistogramVec(prometheus.HistogramOpts{
40                         Namespace: "arvados",
41                         Subsystem: "keepweb",
42                         Name:      "upload_speed",
43                         Help:      "Upload speed (bytes per second) bucketed by transfer size range",
44                         Buckets:   []float64{10_000, 1_000_000, 10_000_000, 100_000_000, 1_000_000_000, math.Inf(+1)},
45                 }, []string{"size_range"}),
46                 mUploadSyncDelay: prometheus.NewHistogramVec(prometheus.HistogramOpts{
47                         Namespace: "arvados",
48                         Subsystem: "keepweb",
49                         Name:      "upload_sync_delay_seconds",
50                         Help:      "Upload sync delay (time from last byte received to HTTP response)",
51                 }, []string{"size_range"}),
52         }
53         reg.MustRegister(m.mDownloadSpeed)
54         reg.MustRegister(m.mDownloadBackendSpeed)
55         reg.MustRegister(m.mUploadSpeed)
56         reg.MustRegister(m.mUploadSyncDelay)
57         return m
58 }
59
60 // run handler(w,r) and record upload/download metrics as applicable.
61 func (m *metrics) track(handler http.Handler, w http.ResponseWriter, r *http.Request) {
62         switch r.Method {
63         case http.MethodGet:
64                 dt := newDownloadTracker(w)
65                 handler.ServeHTTP(dt, r)
66                 size := dt.bytesOut
67                 if size == 0 {
68                         return
69                 }
70                 bucket := sizeRange(size)
71                 m.mDownloadSpeed.WithLabelValues(bucket).Observe(float64(dt.bytesOut) / time.Since(dt.t0).Seconds())
72                 m.mDownloadBackendSpeed.WithLabelValues(bucket).Observe(float64(size) / (dt.backendWait + time.Since(dt.lastByte)).Seconds())
73         case http.MethodPut:
74                 ut := newUploadTracker(r)
75                 handler.ServeHTTP(w, r)
76                 d := ut.lastByte.Sub(ut.t0)
77                 if d <= 0 {
78                         // Read() was not called, or did not return
79                         // any data
80                         return
81                 }
82                 size := ut.bytesIn
83                 bucket := sizeRange(size)
84                 m.mUploadSpeed.WithLabelValues(bucket).Observe(float64(ut.bytesIn) / d.Seconds())
85                 m.mUploadSyncDelay.WithLabelValues(bucket).Observe(time.Since(ut.lastByte).Seconds())
86         default:
87                 handler.ServeHTTP(w, r)
88         }
89 }
90
91 // Assign a sizeRange based on number of bytes transferred (not the
92 // same as file size in the case of a Range request or interrupted
93 // transfer).
94 func sizeRange(size int64) string {
95         switch {
96         case size < 1_000_000:
97                 return "0"
98         case size < 10_000_000:
99                 return "1M"
100         case size < 100_000_000:
101                 return "10M"
102         default:
103                 return "100M"
104         }
105 }
106
107 type downloadTracker struct {
108         http.ResponseWriter
109         t0 time.Time
110
111         firstByte   time.Time     // time of first call to Write
112         lastByte    time.Time     // time of most recent call to Write
113         bytesOut    int64         // bytes sent to client so far
114         backendWait time.Duration // total of intervals between Write calls
115 }
116
117 func newDownloadTracker(w http.ResponseWriter) *downloadTracker {
118         return &downloadTracker{ResponseWriter: w, t0: time.Now()}
119 }
120
121 func (dt *downloadTracker) Write(p []byte) (int, error) {
122         if dt.lastByte.IsZero() {
123                 dt.backendWait += time.Since(dt.t0)
124         } else {
125                 dt.backendWait += time.Since(dt.lastByte)
126         }
127         if dt.firstByte.IsZero() {
128                 dt.firstByte = time.Now()
129         }
130         n, err := dt.ResponseWriter.Write(p)
131         dt.bytesOut += int64(n)
132         dt.lastByte = time.Now()
133         return n, err
134 }
135
136 type uploadTracker struct {
137         io.ReadCloser
138         t0       time.Time
139         lastByte time.Time
140         bytesIn  int64
141 }
142
143 func newUploadTracker(r *http.Request) *uploadTracker {
144         now := time.Now()
145         ut := &uploadTracker{ReadCloser: r.Body, t0: now}
146         r.Body = ut
147         return ut
148 }
149
150 func (ut *uploadTracker) Read(p []byte) (int, error) {
151         n, err := ut.ReadCloser.Read(p)
152         ut.lastByte = time.Now()
153         ut.bytesIn += int64(n)
154         return n, err
155 }