1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: Apache-2.0
23 "git.arvados.org/arvados.git/sdk/go/arvados"
24 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
25 "git.arvados.org/arvados.git/sdk/go/asyncbuf"
28 // DebugPrintf emits debug messages. The easiest way to enable
29 // keepclient debug messages in your application is to assign
30 // log.Printf to DebugPrintf.
31 var DebugPrintf = func(string, ...interface{}) {}
34 if arvadosclient.StringBool(os.Getenv("ARVADOS_DEBUG")) {
35 DebugPrintf = log.Printf
39 type keepService struct {
40 Uuid string `json:"uuid"`
41 Hostname string `json:"service_host"`
42 Port int `json:"service_port"`
43 SSL bool `json:"service_ssl_flag"`
44 SvcType string `json:"service_type"`
45 ReadOnly bool `json:"read_only"`
48 // Md5String returns md5 hash for the bytes in the given string
49 func Md5String(s string) string {
50 return fmt.Sprintf("%x", md5.Sum([]byte(s)))
54 Items []keepService `json:"items"`
57 type uploadStatus struct {
62 classesStored map[string]int
66 func (kc *KeepClient) uploadToKeepServer(host string, hash string, classesTodo []string, body io.Reader,
67 uploadStatusChan chan<- uploadStatus, expectedLength int, reqid string) {
71 var url = fmt.Sprintf("%s/%s", host, hash)
72 if req, err = http.NewRequest("PUT", url, nil); err != nil {
73 DebugPrintf("DEBUG: [%s] Error creating request PUT %v error: %v", reqid, url, err.Error())
74 uploadStatusChan <- uploadStatus{err, url, 0, 0, nil, ""}
78 req.ContentLength = int64(expectedLength)
79 if expectedLength > 0 {
80 req.Body = ioutil.NopCloser(body)
82 // "For client requests, a value of 0 means unknown if
83 // Body is not nil." In this case we do want the body
84 // to be empty, so don't set req.Body.
87 req.Header.Add("X-Request-Id", reqid)
88 req.Header.Add("Authorization", "OAuth2 "+kc.Arvados.ApiToken)
89 req.Header.Add("Content-Type", "application/octet-stream")
90 req.Header.Add(XKeepDesiredReplicas, fmt.Sprint(kc.Want_replicas))
91 if len(classesTodo) > 0 {
92 req.Header.Add(XKeepStorageClasses, strings.Join(classesTodo, ", "))
95 var resp *http.Response
96 if resp, err = kc.httpClient().Do(req); err != nil {
97 DebugPrintf("DEBUG: [%s] Upload failed %v error: %v", reqid, url, err.Error())
98 uploadStatusChan <- uploadStatus{err, url, 0, 0, nil, err.Error()}
103 if xr := resp.Header.Get(XKeepReplicasStored); xr != "" {
104 fmt.Sscanf(xr, "%d", &rep)
106 scc := resp.Header.Get(XKeepStorageClassesConfirmed)
107 classesStored, err := parseStorageClassesConfirmedHeader(scc)
109 DebugPrintf("DEBUG: [%s] Ignoring invalid %s header %q: %s", reqid, XKeepStorageClassesConfirmed, scc, err)
112 defer resp.Body.Close()
113 defer io.Copy(ioutil.Discard, resp.Body)
115 respbody, err2 := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: 4096})
116 response := strings.TrimSpace(string(respbody))
117 if err2 != nil && err2 != io.EOF {
118 DebugPrintf("DEBUG: [%s] Upload %v error: %v response: %v", reqid, url, err2.Error(), response)
119 uploadStatusChan <- uploadStatus{err2, url, resp.StatusCode, rep, classesStored, response}
120 } else if resp.StatusCode == http.StatusOK {
121 DebugPrintf("DEBUG: [%s] Upload %v success", reqid, url)
122 uploadStatusChan <- uploadStatus{nil, url, resp.StatusCode, rep, classesStored, response}
124 if resp.StatusCode >= 300 && response == "" {
125 response = resp.Status
127 DebugPrintf("DEBUG: [%s] Upload %v error: %v response: %v", reqid, url, resp.StatusCode, response)
128 uploadStatusChan <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, classesStored, response}
132 func (kc *KeepClient) httpBlockWrite(ctx context.Context, req arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {
133 var resp arvados.BlockWriteResponse
134 var getReader func() io.Reader
135 if req.Data == nil && req.Reader == nil {
136 return resp, errors.New("invalid BlockWriteOptions: Data and Reader are both nil")
138 if req.DataSize < 0 {
139 return resp, fmt.Errorf("invalid BlockWriteOptions: negative DataSize %d", req.DataSize)
141 if req.DataSize > BLOCKSIZE || len(req.Data) > BLOCKSIZE {
142 return resp, ErrOversizeBlock
145 if req.DataSize > len(req.Data) {
146 return resp, errors.New("invalid BlockWriteOptions: DataSize > len(Data)")
148 if req.DataSize == 0 {
149 req.DataSize = len(req.Data)
151 getReader = func() io.Reader { return bytes.NewReader(req.Data[:req.DataSize]) }
153 buf := asyncbuf.NewBuffer(make([]byte, 0, req.DataSize))
156 reader = HashCheckingReader{req.Reader, md5.New(), req.Hash}
159 _, err := io.Copy(buf, reader)
160 buf.CloseWithError(err)
162 getReader = buf.NewReader
166 _, err := io.Copy(m, getReader())
170 req.Hash = fmt.Sprintf("%x", m.Sum(nil))
172 if req.StorageClasses == nil {
173 if len(kc.StorageClasses) > 0 {
174 req.StorageClasses = kc.StorageClasses
176 req.StorageClasses = kc.DefaultStorageClasses
179 if req.Replicas == 0 {
180 req.Replicas = kc.Want_replicas
182 if req.RequestID == "" {
183 req.RequestID = kc.getRequestID()
185 if req.Attempts == 0 {
186 req.Attempts = 1 + kc.Retries
189 // Calculate the ordering for uploading to servers
190 sv := NewRootSorter(kc.WritableLocalRoots(), req.Hash).GetSortedRoots()
192 // The next server to try contacting
195 // The number of active writers
198 // Used to communicate status from the upload goroutines
199 uploadStatusChan := make(chan uploadStatus)
201 // Wait for any abandoned uploads (e.g., we started
202 // two uploads and the first replied with replicas=2)
203 // to finish before closing the status channel.
208 close(uploadStatusChan)
212 replicasTodo := map[string]int{}
213 for _, c := range req.StorageClasses {
214 replicasTodo[c] = req.Replicas
217 replicasPerThread := kc.replicasPerService
218 if replicasPerThread < 1 {
219 // unlimited or unknown
220 replicasPerThread = req.Replicas
223 delay := delayCalculator{InitialMaxDelay: kc.RetryDelay}
224 retriesRemaining := req.Attempts
225 var retryServers []string
227 lastError := make(map[string]string)
228 trackingClasses := len(replicasTodo) > 0
230 for retriesRemaining > 0 {
233 retryServers = []string{}
235 var classesTodo []string
236 var maxConcurrency int
237 for sc, r := range replicasTodo {
238 classesTodo = append(classesTodo, sc)
239 if maxConcurrency == 0 || maxConcurrency > r {
240 // Having more than r
242 // would overreplicate
247 if !trackingClasses {
248 maxConcurrency = req.Replicas - resp.Replicas
250 if maxConcurrency < 1 {
251 // If there are no non-zero entries in
252 // replicasTodo, we're done.
255 for active*replicasPerThread < maxConcurrency {
256 // Start some upload requests
257 if nextServer < len(sv) {
258 DebugPrintf("DEBUG: [%s] Begin upload %s to %s", req.RequestID, req.Hash, sv[nextServer])
259 go kc.uploadToKeepServer(sv[nextServer], req.Hash, classesTodo, getReader(), uploadStatusChan, req.DataSize, req.RequestID)
263 if active == 0 && retriesRemaining == 0 {
264 msg := "Could not write sufficient replicas: "
265 for _, resp := range lastError {
268 msg = msg[:len(msg)-2]
269 return resp, InsufficientReplicasError{error: errors.New(msg)}
275 DebugPrintf("DEBUG: [%s] Replicas remaining to write: %v active uploads: %v", req.RequestID, replicasTodo, active)
280 // Wait for something to happen.
281 status := <-uploadStatusChan
284 if status.statusCode == http.StatusOK {
285 delete(lastError, status.url)
286 resp.Replicas += status.replicasStored
287 if len(status.classesStored) == 0 {
288 // Server doesn't report
289 // storage classes. Give up
290 // trying to track which ones
291 // are satisfied; just rely on
293 trackingClasses = false
295 for className, replicas := range status.classesStored {
296 if replicasTodo[className] > replicas {
297 replicasTodo[className] -= replicas
299 delete(replicasTodo, className)
302 resp.Locator = status.response
304 msg := fmt.Sprintf("[%d] %s", status.statusCode, status.response)
308 lastError[status.url] = msg
311 if status.statusCode == 0 || status.statusCode == 408 || status.statusCode == 429 ||
312 (status.statusCode >= 500 && status.statusCode != http.StatusInsufficientStorage) {
313 // Timeout, too many requests, or other server side failure
314 // (do not auto-retry status 507 "full")
315 retryServers = append(retryServers, status.url[0:strings.LastIndex(status.url, "/")])
321 time.Sleep(delay.Next())
328 func parseStorageClassesConfirmedHeader(hdr string) (map[string]int, error) {
332 classesStored := map[string]int{}
333 for _, cr := range strings.Split(hdr, ",") {
334 cr = strings.TrimSpace(cr)
338 fields := strings.SplitN(cr, "=", 2)
339 if len(fields) != 2 {
340 return nil, fmt.Errorf("expected exactly one '=' char in entry %q", cr)
342 className := fields[0]
344 return nil, fmt.Errorf("empty class name in entry %q", cr)
346 replicas, err := strconv.Atoi(fields[1])
347 if err != nil || replicas < 1 {
348 return nil, fmt.Errorf("invalid replica count %q", fields[1])
350 classesStored[className] = replicas
352 return classesStored, nil
355 // delayCalculator calculates a series of delays for implementing
356 // exponential backoff with jitter. The first call to Next() returns
357 // a random duration between MinimumRetryDelay and the specified
358 // InitialMaxDelay (or DefaultRetryDelay if 0). The max delay is
359 // doubled on each subsequent call to Next(), up to 10x the initial
361 type delayCalculator struct {
362 InitialMaxDelay time.Duration
363 n int // number of delays returned so far
364 nextmax time.Duration
368 func (dc *delayCalculator) Next() time.Duration {
369 if dc.nextmax <= MinimumRetryDelay {
371 if dc.InitialMaxDelay > 0 {
372 dc.nextmax = dc.InitialMaxDelay
374 dc.nextmax = DefaultRetryDelay
376 dc.limit = 10 * dc.nextmax
378 d := time.Duration(rand.Float64() * float64(dc.nextmax))
379 if d < MinimumRetryDelay {
380 d = MinimumRetryDelay
383 if dc.nextmax > dc.limit {
384 dc.nextmax = dc.limit