1885: Added logging of invalid requests. Added logging when the server list is
[arvados.git] / sdk / go / src / arvados.org / keepclient / keepclient.go
index 2738cefa7c3144ec613aa8b6f8d6922e33debc44..8d26b32790dc4f5f164d526332163bce80162b38 100644 (file)
@@ -1,18 +1,20 @@
+/* Provides low-level Get/Put primitives for accessing Arvados Keep blocks. */
 package keepclient
 
 import (
+       "arvados.org/streamer"
        "crypto/md5"
        "crypto/tls"
-       "encoding/json"
        "errors"
        "fmt"
        "io"
        "io/ioutil"
-       "log"
        "net/http"
        "os"
        "sort"
-       "strconv"
+       "sync"
+       "sync/atomic"
+       "unsafe"
 )
 
 // A Keep "block" is 64MB.
@@ -20,524 +22,107 @@ const BLOCKSIZE = 64 * 1024 * 1024
 
 var BlockNotFound = errors.New("Block not found")
 var InsufficientReplicasError = errors.New("Could not write sufficient replicas")
+var OversizeBlockError = errors.New("Block too big")
+var MissingArvadosApiHost = errors.New("Missing required environment variable ARVADOS_API_HOST")
+var MissingArvadosApiToken = errors.New("Missing required environment variable ARVADOS_API_TOKEN")
+
+const X_Keep_Desired_Replicas = "X-Keep-Desired-Replicas"
+const X_Keep_Replicas_Stored = "X-Keep-Replicas-Stored"
 
+// Information about Arvados and Keep servers.
 type KeepClient struct {
        ApiServer     string
        ApiToken      string
        ApiInsecure   bool
-       Service_roots []string
        Want_replicas int
        Client        *http.Client
+       Using_proxy   bool
+       External      bool
+       service_roots *[]string
+       lock          sync.Mutex
 }
 
-type KeepDisk struct {
-       Hostname string `json:"service_host"`
-       Port     int    `json:"service_port"`
-       SSL      bool   `json:"service_ssl_flag"`
-}
-
+// Create a new KeepClient, initialized with standard Arvados environment
+// variables ARVADOS_API_HOST, ARVADOS_API_TOKEN, and (optionally)
+// ARVADOS_API_HOST_INSECURE.  This will contact the API server to discover
+// Keep servers.
 func MakeKeepClient() (kc KeepClient, err error) {
-       tr := &http.Transport{
-               TLSClientConfig: &tls.Config{InsecureSkipVerify: kc.ApiInsecure},
-       }
+       insecure := (os.Getenv("ARVADOS_API_HOST_INSECURE") == "true")
+       external := (os.Getenv("ARVADOS_EXTERNAL_CLIENT") == "true")
 
        kc = KeepClient{
                ApiServer:     os.Getenv("ARVADOS_API_HOST"),
                ApiToken:      os.Getenv("ARVADOS_API_TOKEN"),
-               ApiInsecure:   (os.Getenv("ARVADOS_API_HOST_INSECURE") != ""),
+               ApiInsecure:   insecure,
                Want_replicas: 2,
-               Client:        &http.Client{Transport: tr}}
-
-       err = (&kc).DiscoverKeepServers()
-
-       return kc, err
-}
-
-func (this *KeepClient) DiscoverKeepServers() error {
-       // Construct request of keep disk list
-       var req *http.Request
-       var err error
-       if req, err = http.NewRequest("GET", fmt.Sprintf("https://%s/arvados/v1/keep_disks", this.ApiServer), nil); err != nil {
-               return err
-       }
-
-       // Add api token header
-       req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.ApiToken))
-
-       // Make the request
-       var resp *http.Response
-       if resp, err = this.Client.Do(req); err != nil {
-               return err
-       }
-
-       type SvcList struct {
-               Items []KeepDisk `json:"items"`
-       }
-
-       // Decode json reply
-       dec := json.NewDecoder(resp.Body)
-       var m SvcList
-       if err := dec.Decode(&m); err != nil {
-               return err
-       }
-
-       listed := make(map[string]bool)
-       this.Service_roots = make([]string, 0, len(m.Items))
-
-       for _, element := range m.Items {
-               n := ""
-               if element.SSL {
-                       n = "s"
-               }
-
-               // Construct server URL
-               url := fmt.Sprintf("http%s://%s:%d", n, element.Hostname, element.Port)
-
-               // Skip duplicates
-               if !listed[url] {
-                       listed[url] = true
-                       this.Service_roots = append(this.Service_roots, url)
-               }
-       }
-
-       // Must be sorted for ShuffledServiceRoots() to produce consistent
-       // results.
-       sort.Strings(this.Service_roots)
-
-       return nil
-}
-
-func (this KeepClient) ShuffledServiceRoots(hash string) (pseq []string) {
-       // Build an ordering with which to query the Keep servers based on the
-       // contents of the hash.  "hash" is a hex-encoded number at least 8
-       // digits (32 bits) long
-
-       // seed used to calculate the next keep server from 'pool' to be added
-       // to 'pseq'
-       seed := hash
-
-       // Keep servers still to be added to the ordering
-       pool := make([]string, len(this.Service_roots))
-       copy(pool, this.Service_roots)
-
-       // output probe sequence
-       pseq = make([]string, 0, len(this.Service_roots))
-
-       // iterate while there are servers left to be assigned
-       for len(pool) > 0 {
-
-               if len(seed) < 8 {
-                       // ran out of digits in the seed
-                       if len(pseq) < (len(hash) / 4) {
-                               // the number of servers added to the probe
-                               // sequence is less than the number of 4-digit
-                               // slices in 'hash' so refill the seed with the
-                               // last 4 digits.
-                               seed = hash[len(hash)-4:]
-                       }
-                       seed += hash
-               }
-
-               // Take the next 8 digits (32 bytes) and interpret as an integer,
-               // then modulus with the size of the remaining pool to get the next
-               // selected server.
-               probe, _ := strconv.ParseUint(seed[0:8], 16, 32)
-               probe %= uint64(len(pool))
-
-               // Append the selected server to the probe sequence and remove it
-               // from the pool.
-               pseq = append(pseq, pool[probe])
-               pool = append(pool[:probe], pool[probe+1:]...)
-
-               // Remove the digits just used from the seed
-               seed = seed[8:]
-       }
-       return pseq
-}
-
-type ReaderSlice struct {
-       slice        []byte
-       reader_error error
-}
-
-// Read repeatedly from the reader into the specified buffer, and report each
-// read to channel 'c'.  Completes when Reader 'r' reports on the error channel
-// and closes channel 'c'.
-func ReadIntoBuffer(buffer []byte, r io.Reader, slices chan<- ReaderSlice) {
-       defer close(slices)
-
-       // Initially use entire buffer as scratch space
-       ptr := buffer[:]
-       for {
-               var n int
-               var err error
-               if len(ptr) > 0 {
-                       // Read into the scratch space
-                       n, err = r.Read(ptr)
-               } else {
-                       // Ran out of scratch space, try reading one more byte
-                       var b [1]byte
-                       n, err = r.Read(b[:])
-
-                       if n > 0 {
-                               // Reader has more data but we have nowhere to
-                               // put it, so we're stuffed
-                               slices <- ReaderSlice{nil, io.ErrShortBuffer}
-                       } else {
-                               // Return some other error (hopefully EOF)
-                               slices <- ReaderSlice{nil, err}
-                       }
-                       return
-               }
-
-               // End on error (includes EOF)
-               if err != nil {
-                       slices <- ReaderSlice{nil, err}
-                       return
-               }
-
-               if n > 0 {
-                       // Make a slice with the contents of the read
-                       slices <- ReaderSlice{ptr[:n], nil}
-
-                       // Adjust the scratch space slice
-                       ptr = ptr[n:]
-               }
-       }
-}
-
-// A read request to the Transfer() function
-type ReadRequest struct {
-       offset  int
-       maxsize int
-       result  chan<- ReadResult
-}
-
-// A read result from the Transfer() function
-type ReadResult struct {
-       slice []byte
-       err   error
-}
-
-// Reads from the buffer managed by the Transfer()
-type BufferReader struct {
-       offset    *int
-       requests  chan<- ReadRequest
-       responses chan ReadResult
-}
-
-func MakeBufferReader(requests chan<- ReadRequest) BufferReader {
-       return BufferReader{new(int), requests, make(chan ReadResult)}
-}
+               Client: &http.Client{Transport: &http.Transport{
+                       TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure}}},
+               Using_proxy: false,
+               External:    external}
 
-// Reads from the buffer managed by the Transfer()
-func (this BufferReader) Read(p []byte) (n int, err error) {
-       this.requests <- ReadRequest{*this.offset, len(p), this.responses}
-       rr, valid := <-this.responses
-       if valid {
-               *this.offset += len(rr.slice)
-               return copy(p, rr.slice), rr.err
-       } else {
-               return 0, io.ErrUnexpectedEOF
-       }
-}
-
-func (this BufferReader) WriteTo(dest io.Writer) (written int64, err error) {
-       // Record starting offset in order to correctly report the number of bytes sent
-       starting_offset := *this.offset
-       for {
-               this.requests <- ReadRequest{*this.offset, 32 * 1024, this.responses}
-               rr, valid := <-this.responses
-               if valid {
-                       log.Printf("WriteTo slice %v %d %v", *this.offset, len(rr.slice), rr.err)
-                       *this.offset += len(rr.slice)
-                       if rr.err != nil {
-                               if rr.err == io.EOF {
-                                       // EOF is not an error.
-                                       return int64(*this.offset - starting_offset), nil
-                               } else {
-                                       return int64(*this.offset - starting_offset), rr.err
-                               }
-                       } else {
-                               dest.Write(rr.slice)
-                       }
-               } else {
-                       return int64(*this.offset), io.ErrUnexpectedEOF
-               }
-       }
-}
-
-// Close the responses channel
-func (this BufferReader) Close() error {
-       close(this.responses)
-       return nil
-}
-
-// Handle a read request.  Returns true if a response was sent, and false if
-// the request should be queued.
-func HandleReadRequest(req ReadRequest, body []byte, complete bool) bool {
-       log.Printf("HandleReadRequest %d %d %d", req.offset, req.maxsize, len(body))
-       if req.offset < len(body) {
-               var end int
-               if req.offset+req.maxsize < len(body) {
-                       end = req.offset + req.maxsize
-               } else {
-                       end = len(body)
-               }
-               req.result <- ReadResult{body[req.offset:end], nil}
-               return true
-       } else if complete && req.offset >= len(body) {
-               req.result <- ReadResult{nil, io.EOF}
-               return true
-       } else {
-               return false
-       }
-}
-
-// If 'source_reader' is not nil, reads data from 'source_reader' and stores it
-// in the provided buffer.  Otherwise, use the contents of 'buffer' as is.
-// Accepts read requests on the buffer on the 'requests' channel.  Completes
-// when 'requests' channel is closed.
-func Transfer(source_buffer []byte, source_reader io.Reader, requests <-chan ReadRequest, reader_error chan error) {
-       // currently buffered data
-       var body []byte
-
-       // for receiving slices from ReadIntoBuffer
-       var slices chan ReaderSlice = nil
-
-       // indicates whether the buffered data is complete
-       var complete bool = false
-
-       if source_reader != nil {
-               // 'body' is the buffer slice representing the body content read so far
-               body = source_buffer[:0]
-
-               // used to communicate slices of the buffer as they are
-               // ReadIntoBuffer will close 'slices' when it is done with it
-               slices = make(chan ReaderSlice)
-
-               // Spin it off
-               go ReadIntoBuffer(source_buffer, source_reader, slices)
-       } else {
-               // use the whole buffer
-               body = source_buffer[:]
-
-               // buffer is complete
-               complete = true
-       }
-
-       pending_requests := make([]ReadRequest, 0)
-
-       for {
-               select {
-               case req, valid := <-requests:
-                       // Handle a buffer read request
-                       if valid {
-                               if !HandleReadRequest(req, body, complete) {
-                                       pending_requests = append(pending_requests, req)
-                               }
-                       } else {
-                               // closed 'requests' channel indicates we're done
-                               return
-                       }
-
-               case bk, valid := <-slices:
-                       // Got a new slice from the reader
-                       if valid {
-                               if bk.reader_error != nil {
-                                       reader_error <- bk.reader_error
-                                       if bk.reader_error == io.EOF {
-                                               // EOF indicates the reader is done
-                                               // sending, so our buffer is complete.
-                                               complete = true
-                                       } else {
-                                               // some other reader error
-                                               return
-                                       }
-                               }
-
-                               if bk.slice != nil {
-                                       // adjust body bounds now that another slice has been read
-                                       body = source_buffer[0 : len(body)+len(bk.slice)]
-                               }
-
-                               // handle pending reads
-                               n := 0
-                               for n < len(pending_requests) {
-                                       if HandleReadRequest(pending_requests[n], body, complete) {
-
-                                               // move the element from the
-                                               // back of the slice to
-                                               // position 'n', then shorten
-                                               // the slice by one element
-                                               pending_requests[n] = pending_requests[len(pending_requests)-1]
-                                               pending_requests = pending_requests[0 : len(pending_requests)-1]
-                                       } else {
-
-                                               // Request wasn't handled, so keep it in the request slice
-                                               n += 1
-                                       }
-                               }
-                       } else {
-                               if complete {
-                                       // no more reads
-                                       slices = nil
-                               } else {
-                                       // reader channel closed without signaling EOF
-                                       reader_error <- io.ErrUnexpectedEOF
-                                       return
-                               }
-                       }
-               }
-       }
-}
-
-type UploadStatus struct {
-       Err        error
-       Url        string
-       StatusCode int
-}
-
-func (this KeepClient) uploadToKeepServer(host string, hash string, body io.ReadCloser,
-       upload_status chan<- UploadStatus, expectedLength int64) {
-
-       log.Printf("Uploading to %s", host)
-
-       var req *http.Request
-       var err error
-       var url = fmt.Sprintf("%s/%s", host, hash)
-       if req, err = http.NewRequest("PUT", url, nil); err != nil {
-               upload_status <- UploadStatus{err, url, 0}
-               return
+       if os.Getenv("ARVADOS_API_HOST") == "" {
+               return kc, MissingArvadosApiHost
        }
-
-       if expectedLength > 0 {
-               req.ContentLength = expectedLength
+       if os.Getenv("ARVADOS_API_TOKEN") == "" {
+               return kc, MissingArvadosApiToken
        }
 
-       req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.ApiToken))
-       req.Header.Add("Content-Type", "application/octet-stream")
-       req.Body = body
-
-       var resp *http.Response
-       if resp, err = this.Client.Do(req); err != nil {
-               upload_status <- UploadStatus{err, url, 0}
-               return
-       }
-
-       if resp.StatusCode == http.StatusOK {
-               upload_status <- UploadStatus{nil, url, resp.StatusCode}
-       } else {
-               upload_status <- UploadStatus{errors.New(resp.Status), url, resp.StatusCode}
-       }
-}
-
-func (this KeepClient) putReplicas(
-       hash string,
-       requests chan ReadRequest,
-       reader_status chan error,
-       expectedLength int64) (replicas int, err error) {
-
-       // Calculate the ordering for uploading to servers
-       sv := this.ShuffledServiceRoots(hash)
-
-       // The next server to try contacting
-       next_server := 0
-
-       // The number of active writers
-       active := 0
-
-       // Used to communicate status from the upload goroutines
-       upload_status := make(chan UploadStatus)
-       defer close(upload_status)
-
-       // Desired number of replicas
-       remaining_replicas := this.Want_replicas
-
-       for remaining_replicas > 0 {
-               for active < remaining_replicas {
-                       // Start some upload requests
-                       if next_server < len(sv) {
-                               go this.uploadToKeepServer(sv[next_server], hash, MakeBufferReader(requests), upload_status, expectedLength)
-                               next_server += 1
-                               active += 1
-                       } else {
-                               return (this.Want_replicas - remaining_replicas), InsufficientReplicasError
-                       }
-               }
-
-               // Now wait for something to happen.
-               select {
-               case status := <-reader_status:
-                       if status == io.EOF {
-                               // good news!
-                       } else {
-                               // bad news
-                               return (this.Want_replicas - remaining_replicas), status
-                       }
-               case status := <-upload_status:
-                       if status.StatusCode == 200 {
-                               // good news!
-                               remaining_replicas -= 1
-                       } else {
-                               // writing to keep server failed for some reason
-                               log.Printf("Keep server put to %v failed with '%v'",
-                                       status.Url, status.Err)
-                       }
-                       active -= 1
-                       log.Printf("Upload status %v %v %v", status.StatusCode, remaining_replicas, active)
-               }
-       }
+       err = (&kc).DiscoverKeepServers()
 
-       return (this.Want_replicas - remaining_replicas), nil
+       return kc, err
 }
 
-var OversizeBlockError = errors.New("Block too big")
-
+// Put a block given the block hash, a reader with the block data, and the
+// expected length of that data.  The desired number of replicas is given in
+// KeepClient.Want_replicas.  Returns the number of replicas that were written
+// and if there was an error.  Note this will return InsufficientReplias
+// whenever 0 <= replicas < this.Wants_replicas.
 func (this KeepClient) PutHR(hash string, r io.Reader, expectedLength int64) (replicas int, err error) {
 
        // Buffer for reads from 'r'
-       var buffer []byte
+       var bufsize int
        if expectedLength > 0 {
                if expectedLength > BLOCKSIZE {
                        return 0, OversizeBlockError
                }
-               buffer = make([]byte, expectedLength)
+               bufsize = int(expectedLength)
        } else {
-               buffer = make([]byte, BLOCKSIZE)
+               bufsize = BLOCKSIZE
        }
 
-       // Read requests on Transfer() buffer
-       requests := make(chan ReadRequest)
-       defer close(requests)
-
-       // Reporting reader error states
-       reader_status := make(chan error)
-       defer close(reader_status)
-
-       // Start the transfer goroutine
-       go Transfer(buffer, r, requests, reader_status)
+       t := streamer.AsyncStreamFromReader(bufsize, HashCheckingReader{r, md5.New(), hash})
+       defer t.Close()
 
-       return this.putReplicas(hash, requests, reader_status, expectedLength)
+       return this.putReplicas(hash, t, expectedLength)
 }
 
-func (this KeepClient) PutHB(hash string, buffer []byte) (replicas int, err error) {
-       // Read requests on Transfer() buffer
-       requests := make(chan ReadRequest)
-       defer close(requests)
+// Put a block given the block hash and a byte buffer.  The desired number of
+// replicas is given in KeepClient.Want_replicas.  Returns the number of
+// replicas that were written and if there was an error.  Note this will return
+// InsufficientReplias whenever 0 <= replicas < this.Wants_replicas.
+func (this KeepClient) PutHB(hash string, buf []byte) (replicas int, err error) {
+       t := streamer.AsyncStreamFromSlice(buf)
+       defer t.Close()
 
-       // Start the transfer goroutine
-       go Transfer(buffer, nil, requests, nil)
-
-       return this.putReplicas(hash, requests, nil, int64(len(buffer)))
+       return this.putReplicas(hash, t, int64(len(buf)))
 }
 
+// Put a block given a buffer.  The hash will be computed.  The desired number
+// of replicas is given in KeepClient.Want_replicas.  Returns the number of
+// replicas that were written and if there was an error.  Note this will return
+// InsufficientReplias whenever 0 <= replicas < this.Wants_replicas.
 func (this KeepClient) PutB(buffer []byte) (hash string, replicas int, err error) {
        hash = fmt.Sprintf("%x", md5.Sum(buffer))
        replicas, err = this.PutHB(hash, buffer)
        return hash, replicas, err
 }
 
+// Put a block, given a Reader.  This will read the entire reader into a buffer
+// to computed the hash.  The desired number of replicas is given in
+// KeepClient.Want_replicas.  Returns the number of replicas that were written
+// and if there was an error.  Note this will return InsufficientReplias
+// whenever 0 <= replicas < this.Wants_replicas.  Also nhote that if the block
+// hash and data size are available, PutHR() is more efficient.
 func (this KeepClient) PutR(r io.Reader) (hash string, replicas int, err error) {
        if buffer, err := ioutil.ReadAll(r); err != nil {
                return "", 0, err
@@ -546,18 +131,27 @@ func (this KeepClient) PutR(r io.Reader) (hash string, replicas int, err error)
        }
 }
 
+// Get a block given a hash.  Return a reader, the expected data length, the
+// URL the block was fetched from, and if there was an error.  If the block
+// checksum does not match, the final Read() on the reader returned by this
+// method will return a BadChecksum error instead of EOF.
 func (this KeepClient) Get(hash string) (reader io.ReadCloser,
        contentLength int64, url string, err error) {
        return this.AuthorizedGet(hash, "", "")
 }
 
+// Get a block given a hash, with additional authorization provided by
+// signature and timestamp.  Return a reader, the expected data length, the URL
+// the block was fetched from, and if there was an error.  If the block
+// checksum does not match, the final Read() on the reader returned by this
+// method will return a BadChecksum error instead of EOF.
 func (this KeepClient) AuthorizedGet(hash string,
        signature string,
        timestamp string) (reader io.ReadCloser,
        contentLength int64, url string, err error) {
 
        // Calculate the ordering for asking servers
-       sv := this.ShuffledServiceRoots(hash)
+       sv := this.shuffledServiceRoots(hash)
 
        for _, host := range sv {
                var req *http.Request
@@ -581,21 +175,25 @@ func (this KeepClient) AuthorizedGet(hash string,
                }
 
                if resp.StatusCode == http.StatusOK {
-                       return resp.Body, resp.ContentLength, url, nil
+                       return HashCheckingReader{resp.Body, md5.New(), hash}, resp.ContentLength, url, nil
                }
        }
 
        return nil, 0, "", BlockNotFound
 }
 
+// Determine if a block with the given hash is available and readable, but does
+// not return the block contents.
 func (this KeepClient) Ask(hash string) (contentLength int64, url string, err error) {
        return this.AuthorizedAsk(hash, "", "")
 }
 
+// Determine if a block with the given hash is available and readable with the
+// given signature and timestamp, but does not return the block contents.
 func (this KeepClient) AuthorizedAsk(hash string, signature string,
        timestamp string) (contentLength int64, url string, err error) {
        // Calculate the ordering for asking servers
-       sv := this.ShuffledServiceRoots(hash)
+       sv := this.shuffledServiceRoots(hash)
 
        for _, host := range sv {
                var req *http.Request
@@ -626,3 +224,22 @@ func (this KeepClient) AuthorizedAsk(hash string, signature string,
        return 0, "", BlockNotFound
 
 }
+
+// Atomically read the service_roots field.
+func (this *KeepClient) ServiceRoots() []string {
+       r := (*[]string)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&this.service_roots))))
+       return *r
+}
+
+// Atomically update the service_roots field.  Enables you to update
+// service_roots without disrupting any GET or PUT operations that might
+// already be in progress.
+func (this *KeepClient) SetServiceRoots(svc []string) {
+       // Must be sorted for ShuffledServiceRoots() to produce consistent
+       // results.
+       roots := make([]string, len(svc))
+       copy(roots, svc)
+       sort.Strings(roots)
+       atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&this.service_roots)),
+               unsafe.Pointer(&roots))
+}