"fmt"
"io"
"io/ioutil"
- "log"
+ "math/rand"
"net/http"
- "os"
"strconv"
"strings"
+ "time"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/asyncbuf"
)
-// DebugPrintf emits debug messages. The easiest way to enable
-// keepclient debug messages in your application is to assign
-// log.Printf to DebugPrintf.
-var DebugPrintf = func(string, ...interface{}) {}
-
-func init() {
- if arvadosclient.StringBool(os.Getenv("ARVADOS_DEBUG")) {
- DebugPrintf = log.Printf
- }
-}
-
type keepService struct {
Uuid string `json:"uuid"`
Hostname string `json:"service_host"`
var err error
var url = fmt.Sprintf("%s/%s", host, hash)
if req, err = http.NewRequest("PUT", url, nil); err != nil {
- DebugPrintf("DEBUG: [%s] Error creating request PUT %v error: %v", reqid, url, err.Error())
+ kc.debugf("[%s] Error creating request: PUT %s error: %s", reqid, url, err)
uploadStatusChan <- uploadStatus{err, url, 0, 0, nil, ""}
return
}
}
req.Header.Add("X-Request-Id", reqid)
- req.Header.Add("Authorization", "OAuth2 "+kc.Arvados.ApiToken)
+ req.Header.Add("Authorization", "Bearer "+kc.Arvados.ApiToken)
req.Header.Add("Content-Type", "application/octet-stream")
req.Header.Add(XKeepDesiredReplicas, fmt.Sprint(kc.Want_replicas))
if len(classesTodo) > 0 {
var resp *http.Response
if resp, err = kc.httpClient().Do(req); err != nil {
- DebugPrintf("DEBUG: [%s] Upload failed %v error: %v", reqid, url, err.Error())
+ kc.debugf("[%s] Upload failed: %s error: %s", reqid, url, err)
uploadStatusChan <- uploadStatus{err, url, 0, 0, nil, err.Error()}
return
}
scc := resp.Header.Get(XKeepStorageClassesConfirmed)
classesStored, err := parseStorageClassesConfirmedHeader(scc)
if err != nil {
- DebugPrintf("DEBUG: [%s] Ignoring invalid %s header %q: %s", reqid, XKeepStorageClassesConfirmed, scc, err)
+ kc.debugf("[%s] Ignoring invalid %s header %q: %s", reqid, XKeepStorageClassesConfirmed, scc, err)
}
defer resp.Body.Close()
respbody, err2 := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: 4096})
response := strings.TrimSpace(string(respbody))
if err2 != nil && err2 != io.EOF {
- DebugPrintf("DEBUG: [%s] Upload %v error: %v response: %v", reqid, url, err2.Error(), response)
+ kc.debugf("[%s] Upload %s error: %s response: %s", reqid, url, err2, response)
uploadStatusChan <- uploadStatus{err2, url, resp.StatusCode, rep, classesStored, response}
} else if resp.StatusCode == http.StatusOK {
- DebugPrintf("DEBUG: [%s] Upload %v success", reqid, url)
+ kc.debugf("[%s] Upload %s success", reqid, url)
uploadStatusChan <- uploadStatus{nil, url, resp.StatusCode, rep, classesStored, response}
} else {
if resp.StatusCode >= 300 && response == "" {
response = resp.Status
}
- DebugPrintf("DEBUG: [%s] Upload %v error: %v response: %v", reqid, url, resp.StatusCode, response)
+ kc.debugf("[%s] Upload %s status: %d %s", reqid, url, resp.StatusCode, response)
uploadStatusChan <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, classesStored, response}
}
}
-func (kc *KeepClient) BlockWrite(ctx context.Context, req arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {
+func (kc *KeepClient) httpBlockWrite(ctx context.Context, req arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {
var resp arvados.BlockWriteResponse
var getReader func() io.Reader
if req.Data == nil && req.Reader == nil {
getReader = func() io.Reader { return bytes.NewReader(req.Data[:req.DataSize]) }
} else {
buf := asyncbuf.NewBuffer(make([]byte, 0, req.DataSize))
+ reader := req.Reader
+ if req.Hash != "" {
+ reader = HashCheckingReader{req.Reader, md5.New(), req.Hash}
+ }
go func() {
- _, err := io.Copy(buf, HashCheckingReader{req.Reader, md5.New(), req.Hash})
+ _, err := io.Copy(buf, reader)
buf.CloseWithError(err)
}()
getReader = buf.NewReader
req.Hash = fmt.Sprintf("%x", m.Sum(nil))
}
if req.StorageClasses == nil {
- req.StorageClasses = kc.StorageClasses
+ if len(kc.StorageClasses) > 0 {
+ req.StorageClasses = kc.StorageClasses
+ } else {
+ req.StorageClasses = kc.DefaultStorageClasses
+ }
}
if req.Replicas == 0 {
req.Replicas = kc.Want_replicas
replicasPerThread = req.Replicas
}
+ delay := delayCalculator{InitialMaxDelay: kc.RetryDelay}
retriesRemaining := req.Attempts
var retryServers []string
for active*replicasPerThread < maxConcurrency {
// Start some upload requests
if nextServer < len(sv) {
- DebugPrintf("DEBUG: [%s] Begin upload %s to %s", req.RequestID, req.Hash, sv[nextServer])
+ kc.debugf("[%s] Begin upload %s to %s", req.RequestID, req.Hash, sv[nextServer])
go kc.uploadToKeepServer(sv[nextServer], req.Hash, classesTodo, getReader(), uploadStatusChan, req.DataSize, req.RequestID)
nextServer++
active++
msg += resp + "; "
}
msg = msg[:len(msg)-2]
- return resp, InsufficientReplicasError(errors.New(msg))
+ return resp, InsufficientReplicasError{error: errors.New(msg)}
}
break
}
}
- DebugPrintf("DEBUG: [%s] Replicas remaining to write: %v active uploads: %v", req.RequestID, replicasTodo, active)
+ kc.debugf("[%s] Replicas remaining to write: %d active uploads: %d", req.RequestID, replicasTodo, active)
if active < 1 {
break
}
}
if status.statusCode == 0 || status.statusCode == 408 || status.statusCode == 429 ||
- (status.statusCode >= 500 && status.statusCode != 503) {
+ (status.statusCode >= 500 && status.statusCode != http.StatusInsufficientStorage) {
// Timeout, too many requests, or other server side failure
- // Do not retry when status code is 503, which means the keep server is full
+ // (do not auto-retry status 507 "full")
retryServers = append(retryServers, status.url[0:strings.LastIndex(status.url, "/")])
}
}
sv = retryServers
+ if len(sv) > 0 {
+ time.Sleep(delay.Next())
+ }
}
return resp, nil
}
return classesStored, nil
}
+
+// delayCalculator calculates a series of delays for implementing
+// exponential backoff with jitter. The first call to Next() returns
+// a random duration between MinimumRetryDelay and the specified
+// InitialMaxDelay (or DefaultRetryDelay if 0). The max delay is
+// doubled on each subsequent call to Next(), up to 10x the initial
+// max delay.
+type delayCalculator struct {
+ InitialMaxDelay time.Duration
+ n int // number of delays returned so far
+ nextmax time.Duration
+ limit time.Duration
+}
+
+func (dc *delayCalculator) Next() time.Duration {
+ if dc.nextmax <= MinimumRetryDelay {
+ // initialize
+ if dc.InitialMaxDelay > 0 {
+ dc.nextmax = dc.InitialMaxDelay
+ } else {
+ dc.nextmax = DefaultRetryDelay
+ }
+ dc.limit = 10 * dc.nextmax
+ }
+ d := time.Duration(rand.Float64() * float64(dc.nextmax))
+ if d < MinimumRetryDelay {
+ d = MinimumRetryDelay
+ }
+ dc.nextmax *= 2
+ if dc.nextmax > dc.limit {
+ dc.nextmax = dc.limit
+ }
+ return d
+}