-/* Internal methods to support keepclient.go */
package keepclient
import (
- "git.curoverse.com/arvados.git/sdk/go/streamer"
+ "crypto/md5"
"errors"
"fmt"
+ "git.curoverse.com/arvados.git/sdk/go/streamer"
"io"
"io/ioutil"
"log"
+ "net"
"net/http"
- "os"
- "strconv"
"strings"
+ "time"
)
type keepDisk struct {
+ Uuid string `json:"uuid"`
Hostname string `json:"service_host"`
Port int `json:"service_port"`
SSL bool `json:"service_ssl_flag"`
SvcType string `json:"service_type"`
+ ReadOnly bool `json:"read_only"`
}
-func (this *KeepClient) DiscoverKeepServers() error {
- if prx := os.Getenv("ARVADOS_KEEP_PROXY"); prx != "" {
- this.SetServiceRoots([]string{prx})
- this.Using_proxy = true
- return nil
+func Md5String(s string) string {
+ return fmt.Sprintf("%x", md5.Sum([]byte(s)))
+}
+
+// Set timeouts apply when connecting to keepproxy services (assumed to be over
+// the Internet).
+func (this *KeepClient) setClientSettingsProxy() {
+ if this.Client.Timeout == 0 {
+ // Maximum time to wait for a complete response
+ this.Client.Timeout = 300 * time.Second
+
+ // TCP and TLS connection settings
+ this.Client.Transport = &http.Transport{
+ Dial: (&net.Dialer{
+ // The maximum time to wait to set up
+ // the initial TCP connection.
+ Timeout: 30 * time.Second,
+
+ // The TCP keep alive heartbeat
+ // interval.
+ KeepAlive: 120 * time.Second,
+ }).Dial,
+
+ TLSHandshakeTimeout: 10 * time.Second,
+ }
+ }
+
+}
+
+// Set timeouts apply when connecting to keepstore services directly (assumed
+// to be on the local network).
+func (this *KeepClient) setClientSettingsStore() {
+ if this.Client.Timeout == 0 {
+ // Maximum time to wait for a complete response
+ this.Client.Timeout = 20 * time.Second
+
+ // TCP and TLS connection timeouts
+ this.Client.Transport = &http.Transport{
+ Dial: (&net.Dialer{
+ // The maximum time to wait to set up
+ // the initial TCP connection.
+ Timeout: 2 * time.Second,
+
+ // The TCP keep alive heartbeat
+ // interval.
+ KeepAlive: 180 * time.Second,
+ }).Dial,
+
+ TLSHandshakeTimeout: 4 * time.Second,
+ }
}
+}
+func (this *KeepClient) DiscoverKeepServers() error {
type svcList struct {
Items []keepDisk `json:"items"`
}
}
listed := make(map[string]bool)
- service_roots := make([]string, 0, len(m.Items))
-
- for _, element := range m.Items {
- n := ""
-
- if element.SSL {
- n = "s"
+ localRoots := make(map[string]string)
+ gatewayRoots := make(map[string]string)
+ writableLocalRoots := make(map[string]string)
+
+ for _, service := range m.Items {
+ scheme := "http"
+ if service.SSL {
+ scheme = "https"
}
-
- // Construct server URL
- url := fmt.Sprintf("http%s://%s:%d", n, element.Hostname, element.Port)
+ url := fmt.Sprintf("%s://%s:%d", scheme, service.Hostname, service.Port)
// Skip duplicates
- if !listed[url] {
- listed[url] = true
- service_roots = append(service_roots, url)
+ if listed[url] {
+ continue
}
- if element.SvcType == "proxy" {
+ listed[url] = true
+
+ switch service.SvcType {
+ case "disk":
+ localRoots[service.Uuid] = url
+ case "proxy":
+ localRoots[service.Uuid] = url
this.Using_proxy = true
}
- }
-
- this.SetServiceRoots(service_roots)
-
- return nil
-}
-func (this KeepClient) shuffledServiceRoots(hash string) (pseq []string) {
- // Build an ordering with which to query the Keep servers based on the
- // contents of the hash. "hash" is a hex-encoded number at least 8
- // digits (32 bits) long
-
- // seed used to calculate the next keep server from 'pool' to be added
- // to 'pseq'
- seed := hash
-
- // Keep servers still to be added to the ordering
- service_roots := this.ServiceRoots()
- pool := make([]string, len(service_roots))
- copy(pool, service_roots)
-
- // output probe sequence
- pseq = make([]string, 0, len(service_roots))
-
- // iterate while there are servers left to be assigned
- for len(pool) > 0 {
-
- if len(seed) < 8 {
- // ran out of digits in the seed
- if len(pseq) < (len(hash) / 4) {
- // the number of servers added to the probe
- // sequence is less than the number of 4-digit
- // slices in 'hash' so refill the seed with the
- // last 4 digits.
- seed = hash[len(hash)-4:]
- }
- seed += hash
+ if service.ReadOnly == false {
+ writableLocalRoots[service.Uuid] = url
}
- // Take the next 8 digits (32 bytes) and interpret as an integer,
- // then modulus with the size of the remaining pool to get the next
- // selected server.
- probe, _ := strconv.ParseUint(seed[0:8], 16, 32)
- probe %= uint64(len(pool))
-
- // Append the selected server to the probe sequence and remove it
- // from the pool.
- pseq = append(pseq, pool[probe])
- pool = append(pool[:probe], pool[probe+1:]...)
+ // Gateway services are only used when specified by
+ // UUID, so there's nothing to gain by filtering them
+ // by service type. Including all accessible services
+ // (gateway and otherwise) merely accommodates more
+ // service configurations.
+ gatewayRoots[service.Uuid] = url
+ }
- // Remove the digits just used from the seed
- seed = seed[8:]
+ if this.Using_proxy {
+ this.setClientSettingsProxy()
+ } else {
+ this.setClientSettingsStore()
}
- return pseq
+
+ this.SetServiceRoots(localRoots, writableLocalRoots, gatewayRoots)
+ return nil
}
type uploadStatus struct {
}
func (this KeepClient) uploadToKeepServer(host string, hash string, body io.ReadCloser,
- upload_status chan<- uploadStatus, expectedLength int64) {
-
- log.Printf("Uploading %s to %s", hash, host)
+ upload_status chan<- uploadStatus, expectedLength int64, requestId string) {
var req *http.Request
var err error
var url = fmt.Sprintf("%s/%s", host, hash)
if req, err = http.NewRequest("PUT", url, nil); err != nil {
+ log.Printf("[%v] Error creating request PUT %v error: %v", requestId, url, err.Error())
upload_status <- uploadStatus{err, url, 0, 0, ""}
body.Close()
return
}
+ req.ContentLength = expectedLength
if expectedLength > 0 {
- req.ContentLength = expectedLength
+ // http.Client.Do will close the body ReadCloser when it is
+ // done with it.
+ req.Body = body
+ } else {
+ // "For client requests, a value of 0 means unknown if Body is
+ // not nil." In this case we do want the body to be empty, so
+ // don't set req.Body. However, we still need to close the
+ // body ReadCloser.
+ body.Close()
}
req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.Arvados.ApiToken))
req.Header.Add(X_Keep_Desired_Replicas, fmt.Sprint(this.Want_replicas))
}
- req.Body = body
-
var resp *http.Response
if resp, err = this.Client.Do(req); err != nil {
+ log.Printf("[%v] Upload failed %v error: %v", requestId, url, err.Error())
upload_status <- uploadStatus{err, url, 0, 0, ""}
- body.Close()
return
}
defer io.Copy(ioutil.Discard, resp.Body)
respbody, err2 := ioutil.ReadAll(&io.LimitedReader{resp.Body, 4096})
+ response := strings.TrimSpace(string(respbody))
if err2 != nil && err2 != io.EOF {
- upload_status <- uploadStatus{err2, url, resp.StatusCode, rep, string(respbody)}
- return
- }
-
- locator := strings.TrimSpace(string(respbody))
-
- if resp.StatusCode == http.StatusOK {
- upload_status <- uploadStatus{nil, url, resp.StatusCode, rep, locator}
+ log.Printf("[%v] Upload %v error: %v response: %v", requestId, url, err2.Error(), response)
+ upload_status <- uploadStatus{err2, url, resp.StatusCode, rep, response}
+ } else if resp.StatusCode == http.StatusOK {
+ log.Printf("[%v] Upload %v success", requestId, url)
+ upload_status <- uploadStatus{nil, url, resp.StatusCode, rep, response}
} else {
- upload_status <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, locator}
+ log.Printf("[%v] Upload %v error: %v response: %v", requestId, url, resp.StatusCode, response)
+ upload_status <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, response}
}
}
tr *streamer.AsyncStream,
expectedLength int64) (locator string, replicas int, err error) {
+ // Take the hash of locator and timestamp in order to identify this
+ // specific transaction in log statements.
+ requestId := fmt.Sprintf("%x", md5.Sum([]byte(locator+time.Now().String())))[0:8]
+
// Calculate the ordering for uploading to servers
- sv := this.shuffledServiceRoots(hash)
+ sv := NewRootSorter(this.WritableLocalRoots(), hash).GetSortedRoots()
// The next server to try contacting
next_server := 0
defer close(upload_status)
// Desired number of replicas
-
remaining_replicas := this.Want_replicas
for remaining_replicas > 0 {
for active < remaining_replicas {
// Start some upload requests
if next_server < len(sv) {
- go this.uploadToKeepServer(sv[next_server], hash, tr.MakeStreamReader(), upload_status, expectedLength)
+ log.Printf("[%v] Begin upload %s to %s", requestId, hash, sv[next_server])
+ go this.uploadToKeepServer(sv[next_server], hash, tr.MakeStreamReader(), upload_status, expectedLength, requestId)
next_server += 1
active += 1
} else {
}
}
}
+ log.Printf("[%v] Replicas remaining to write: %v active uploads: %v",
+ requestId, remaining_replicas, active)
// Now wait for something to happen.
status := <-upload_status
+ active -= 1
+
if status.statusCode == 200 {
// good news!
remaining_replicas -= status.replicas_stored
locator = status.response
- } else {
- // writing to keep server failed for some reason
- log.Printf("Keep server put to %v failed with '%v'",
- status.url, status.err)
}
- active -= 1
- log.Printf("Upload to %v status code: %v remaining replicas: %v active: %v", status.url, status.statusCode, remaining_replicas, active)
}
return locator, this.Want_replicas, nil