Merge branch 'master' into 2798-go-keep-client
authorPeter Amstutz <peter.amstutz@curoverse.com>
Wed, 14 May 2014 19:45:05 +0000 (15:45 -0400)
committerPeter Amstutz <peter.amstutz@curoverse.com>
Wed, 14 May 2014 19:45:05 +0000 (15:45 -0400)
sdk/go/build.sh [new file with mode: 0755]
sdk/go/src/arvados.org/keepclient/keepclient.go [new file with mode: 0644]
sdk/go/src/arvados.org/keepclient/keepclient_test.go [new file with mode: 0644]
sdk/python/arvados/keep.py
sdk/python/run_test_server.py

diff --git a/sdk/go/build.sh b/sdk/go/build.sh
new file mode 100755 (executable)
index 0000000..ed95228
--- /dev/null
@@ -0,0 +1,37 @@
+#! /bin/sh
+
+# This script builds a Keep executable and installs it in
+# ./bin/keep.
+#
+# In idiomatic Go style, a user would install Keep with something
+# like:
+#
+#     go get arvados.org/keep
+#     go install arvados.org/keep
+#
+# which would download both the Keep source and any third-party
+# packages it depends on.
+#
+# Since the Keep source is bundled within the overall Arvados source,
+# "go get" is not the primary tool for delivering Keep source and this
+# process doesn't work.  Instead, this script sets the environment
+# properly and fetches any necessary dependencies by hand.
+
+if [ -z "$GOPATH" ]
+then
+    GOPATH=$(pwd)
+else
+    GOPATH=$(pwd):${GOPATH}
+fi
+
+export GOPATH
+
+set -o errexit   # fail if any command returns an error
+
+mkdir -p pkg
+mkdir -p bin
+go get gopkg.in/check.v1
+go install arvados.org/keepclient
+if ls -l pkg/*/arvados.org/keepclient.a ; then
+    echo "success!"
+fi
diff --git a/sdk/go/src/arvados.org/keepclient/keepclient.go b/sdk/go/src/arvados.org/keepclient/keepclient.go
new file mode 100644 (file)
index 0000000..aeb805b
--- /dev/null
@@ -0,0 +1,575 @@
+package keepclient
+
+import (
+       "crypto/md5"
+       "crypto/tls"
+       "encoding/json"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "net/http"
+       "os"
+       "sort"
+       "strconv"
+)
+
+type KeepClient struct {
+       ApiServer     string
+       ApiToken      string
+       ApiInsecure   bool
+       Service_roots []string
+       Want_replicas int
+       client        *http.Client
+}
+
+type KeepDisk struct {
+       Hostname string `json:"service_host"`
+       Port     int    `json:"service_port"`
+       SSL      bool   `json:"service_ssl_flag"`
+}
+
+func MakeKeepClient() (kc *KeepClient, err error) {
+       kc = &KeepClient{
+               ApiServer:     os.Getenv("ARVADOS_API_HOST"),
+               ApiToken:      os.Getenv("ARVADOS_API_TOKEN"),
+               ApiInsecure:   (os.Getenv("ARVADOS_API_HOST_INSECURE") != ""),
+               Want_replicas: 2}
+
+       tr := &http.Transport{
+               TLSClientConfig: &tls.Config{InsecureSkipVerify: kc.ApiInsecure},
+       }
+
+       kc.client = &http.Client{Transport: tr}
+
+       err = kc.DiscoverKeepDisks()
+
+       return kc, err
+}
+
+func (this *KeepClient) DiscoverKeepDisks() error {
+       // Construct request of keep disk list
+       var req *http.Request
+       var err error
+       if req, err = http.NewRequest("GET", fmt.Sprintf("https://%s/arvados/v1/keep_disks", this.ApiServer), nil); err != nil {
+               return err
+       }
+
+       // Add api token header
+       req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.ApiToken))
+
+       // Make the request
+       var resp *http.Response
+       if resp, err = this.client.Do(req); err != nil {
+               return err
+       }
+
+       type SvcList struct {
+               Items []KeepDisk `json:"items"`
+       }
+
+       // Decode json reply
+       dec := json.NewDecoder(resp.Body)
+       var m SvcList
+       if err := dec.Decode(&m); err != nil {
+               return err
+       }
+
+       listed := make(map[string]bool)
+       this.Service_roots = make([]string, 0, len(m.Items))
+
+       for _, element := range m.Items {
+               n := ""
+               if element.SSL {
+                       n = "s"
+               }
+
+               // Construct server URL
+               url := fmt.Sprintf("http%s://%s:%d", n, element.Hostname, element.Port)
+
+               // Skip duplicates
+               if !listed[url] {
+                       listed[url] = true
+                       this.Service_roots = append(this.Service_roots, url)
+               }
+       }
+
+       // Must be sorted for ShuffledServiceRoots() to produce consistent
+       // results.
+       sort.Strings(this.Service_roots)
+
+       return nil
+}
+
+func (this KeepClient) ShuffledServiceRoots(hash string) (pseq []string) {
+       // Build an ordering with which to query the Keep servers based on the
+       // contents of the hash.  "hash" is a hex-encoded number at least 8
+       // digits (32 bits) long
+
+       // seed used to calculate the next keep server from 'pool' to be added
+       // to 'pseq'
+       seed := hash
+
+       // Keep servers still to be added to the ordering
+       pool := make([]string, len(this.Service_roots))
+       copy(pool, this.Service_roots)
+
+       // output probe sequence
+       pseq = make([]string, 0, len(this.Service_roots))
+
+       // iterate while there are servers left to be assigned
+       for len(pool) > 0 {
+
+               if len(seed) < 8 {
+                       // ran out of digits in the seed
+                       if len(pseq) < (len(hash) / 4) {
+                               // the number of servers added to the probe
+                               // sequence is less than the number of 4-digit
+                               // slices in 'hash' so refill the seed with the
+                               // last 4 digits.
+                               seed = hash[len(hash)-4:]
+                       }
+                       seed += hash
+               }
+
+               // Take the next 8 digits (32 bytes) and interpret as an integer,
+               // then modulus with the size of the remaining pool to get the next
+               // selected server.
+               probe, _ := strconv.ParseUint(seed[0:8], 16, 32)
+               probe %= uint64(len(pool))
+
+               // Append the selected server to the probe sequence and remove it
+               // from the pool.
+               pseq = append(pseq, pool[probe])
+               pool = append(pool[:probe], pool[probe+1:]...)
+
+               // Remove the digits just used from the seed
+               seed = seed[8:]
+       }
+       return pseq
+}
+
+type ReaderSlice struct {
+       slice        []byte
+       reader_error error
+}
+
+// Read repeatedly from the reader into the specified buffer, and report each
+// read to channel 'c'.  Completes when Reader 'r' reports on the error channel
+// and closes channel 'c'.
+func ReadIntoBuffer(buffer []byte, r io.Reader, slices chan<- ReaderSlice) {
+       defer close(slices)
+
+       // Initially use entire buffer as scratch space
+       ptr := buffer[:]
+       for {
+               var n int
+               var err error
+               if len(ptr) > 0 {
+                       // Read into the scratch space
+                       n, err = r.Read(ptr)
+               } else {
+                       // Ran out of scratch space, try reading one more byte
+                       var b [1]byte
+                       n, err = r.Read(b[:])
+
+                       if n > 0 {
+                               // Reader has more data but we have nowhere to
+                               // put it, so we're stuffed
+                               slices <- ReaderSlice{nil, io.ErrShortBuffer}
+                       } else {
+                               // Return some other error (hopefully EOF)
+                               slices <- ReaderSlice{nil, err}
+                       }
+                       return
+               }
+
+               // End on error (includes EOF)
+               if err != nil {
+                       slices <- ReaderSlice{nil, err}
+                       return
+               }
+
+               if n > 0 {
+                       // Make a slice with the contents of the read
+                       slices <- ReaderSlice{ptr[:n], nil}
+
+                       // Adjust the scratch space slice
+                       ptr = ptr[n:]
+               }
+       }
+}
+
+// A read request to the Transfer() function
+type ReadRequest struct {
+       offset  int
+       maxsize int
+       result  chan<- ReadResult
+}
+
+// A read result from the Transfer() function
+type ReadResult struct {
+       slice []byte
+       err   error
+}
+
+// Reads from the buffer managed by the Transfer()
+type BufferReader struct {
+       offset    *int
+       requests  chan<- ReadRequest
+       responses chan ReadResult
+}
+
+func MakeBufferReader(requests chan<- ReadRequest) BufferReader {
+       return BufferReader{new(int), requests, make(chan ReadResult)}
+}
+
+// Reads from the buffer managed by the Transfer()
+func (this BufferReader) Read(p []byte) (n int, err error) {
+       this.requests <- ReadRequest{*this.offset, len(p), this.responses}
+       rr, valid := <-this.responses
+       if valid {
+               *this.offset += len(rr.slice)
+               return copy(p, rr.slice), rr.err
+       } else {
+               return 0, io.ErrUnexpectedEOF
+       }
+}
+
+func (this BufferReader) WriteTo(dest io.Writer) (written int64, err error) {
+       // Record starting offset in order to correctly report the number of bytes sent
+       starting_offset := *this.offset
+       for {
+               this.requests <- ReadRequest{*this.offset, 32 * 1024, this.responses}
+               rr, valid := <-this.responses
+               if valid {
+                       log.Printf("WriteTo slice %v %d %v", *this.offset, len(rr.slice), rr.err)
+                       *this.offset += len(rr.slice)
+                       if rr.err != nil {
+                               if rr.err == io.EOF {
+                                       // EOF is not an error.
+                                       return int64(*this.offset - starting_offset), nil
+                               } else {
+                                       return int64(*this.offset - starting_offset), rr.err
+                               }
+                       } else {
+                               dest.Write(rr.slice)
+                       }
+               } else {
+                       return int64(*this.offset), io.ErrUnexpectedEOF
+               }
+       }
+}
+
+// Close the responses channel
+func (this BufferReader) Close() error {
+       close(this.responses)
+       return nil
+}
+
+// Handle a read request.  Returns true if a response was sent, and false if
+// the request should be queued.
+func HandleReadRequest(req ReadRequest, body []byte, complete bool) bool {
+       log.Printf("HandleReadRequest %d %d %d", req.offset, req.maxsize, len(body))
+       if req.offset < len(body) {
+               var end int
+               if req.offset+req.maxsize < len(body) {
+                       end = req.offset + req.maxsize
+               } else {
+                       end = len(body)
+               }
+               req.result <- ReadResult{body[req.offset:end], nil}
+               return true
+       } else if complete && req.offset >= len(body) {
+               req.result <- ReadResult{nil, io.EOF}
+               return true
+       } else {
+               return false
+       }
+}
+
+// If 'source_reader' is not nil, reads data from 'source_reader' and stores it
+// in the provided buffer.  Otherwise, use the contents of 'buffer' as is.
+// Accepts read requests on the buffer on the 'requests' channel.  Completes
+// when 'requests' channel is closed.
+func Transfer(source_buffer []byte, source_reader io.Reader, requests <-chan ReadRequest, reader_error chan error) {
+       // currently buffered data
+       var body []byte
+
+       // for receiving slices from ReadIntoBuffer
+       var slices chan ReaderSlice = nil
+
+       // indicates whether the buffered data is complete
+       var complete bool = false
+
+       if source_reader != nil {
+               // 'body' is the buffer slice representing the body content read so far
+               body = source_buffer[:0]
+
+               // used to communicate slices of the buffer as they are
+               // ReadIntoBuffer will close 'slices' when it is done with it
+               slices = make(chan ReaderSlice)
+
+               // Spin it off
+               go ReadIntoBuffer(source_buffer, source_reader, slices)
+       } else {
+               // use the whole buffer
+               body = source_buffer[:]
+
+               // buffer is complete
+               complete = true
+       }
+
+       pending_requests := make([]ReadRequest, 0)
+
+       for {
+               select {
+               case req, valid := <-requests:
+                       // Handle a buffer read request
+                       if valid {
+                               if !HandleReadRequest(req, body, complete) {
+                                       pending_requests = append(pending_requests, req)
+                               }
+                       } else {
+                               // closed 'requests' channel indicates we're done
+                               return
+                       }
+
+               case bk, valid := <-slices:
+                       // Got a new slice from the reader
+                       if valid {
+                               if bk.reader_error != nil {
+                                       reader_error <- bk.reader_error
+                                       if bk.reader_error == io.EOF {
+                                               // EOF indicates the reader is done
+                                               // sending, so our buffer is complete.
+                                               complete = true
+                                       } else {
+                                               // some other reader error
+                                               return
+                                       }
+                               }
+
+                               if bk.slice != nil {
+                                       // adjust body bounds now that another slice has been read
+                                       body = source_buffer[0 : len(body)+len(bk.slice)]
+                               }
+
+                               // handle pending reads
+                               n := 0
+                               for n < len(pending_requests) {
+                                       if HandleReadRequest(pending_requests[n], body, complete) {
+
+                                               // move the element from the
+                                               // back of the slice to
+                                               // position 'n', then shorten
+                                               // the slice by one element
+                                               pending_requests[n] = pending_requests[len(pending_requests)-1]
+                                               pending_requests = pending_requests[0 : len(pending_requests)-1]
+                                       } else {
+
+                                               // Request wasn't handled, so keep it in the request slice
+                                               n += 1
+                                       }
+                               }
+                       } else {
+                               if complete {
+                                       // no more reads
+                                       slices = nil
+                               } else {
+                                       // reader channel closed without signaling EOF
+                                       reader_error <- io.ErrUnexpectedEOF
+                                       return
+                               }
+                       }
+               }
+       }
+}
+
+type UploadStatus struct {
+       Err        error
+       Url        string
+       StatusCode int
+}
+
+func (this KeepClient) uploadToKeepServer(host string, hash string, body io.ReadCloser,
+       upload_status chan<- UploadStatus, expectedLength int64) {
+
+       log.Printf("Uploading to %s", host)
+
+       var req *http.Request
+       var err error
+       var url = fmt.Sprintf("%s/%s", host, hash)
+       if req, err = http.NewRequest("PUT", url, nil); err != nil {
+               upload_status <- UploadStatus{err, url, 0}
+               return
+       }
+
+       if expectedLength > 0 {
+               req.ContentLength = expectedLength
+       }
+
+       req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.ApiToken))
+       req.Header.Add("Content-Type", "application/octet-stream")
+       req.Body = body
+
+       var resp *http.Response
+       if resp, err = this.client.Do(req); err != nil {
+               upload_status <- UploadStatus{err, url, 0}
+               return
+       }
+
+       if resp.StatusCode == http.StatusOK {
+               upload_status <- UploadStatus{nil, url, resp.StatusCode}
+       } else {
+               upload_status <- UploadStatus{errors.New(resp.Status), url, resp.StatusCode}
+       }
+}
+
+var InsufficientReplicasError = errors.New("Could not write sufficient replicas")
+
+func (this KeepClient) putReplicas(
+       hash string,
+       requests chan ReadRequest,
+       reader_status chan error,
+       expectedLength int64) (replicas int, err error) {
+
+       // Calculate the ordering for uploading to servers
+       sv := this.ShuffledServiceRoots(hash)
+
+       // The next server to try contacting
+       next_server := 0
+
+       // The number of active writers
+       active := 0
+
+       // Used to communicate status from the upload goroutines
+       upload_status := make(chan UploadStatus)
+       defer close(upload_status)
+
+       // Desired number of replicas
+       remaining_replicas := this.Want_replicas
+
+       for remaining_replicas > 0 {
+               for active < remaining_replicas {
+                       // Start some upload requests
+                       if next_server < len(sv) {
+                               go this.uploadToKeepServer(sv[next_server], hash, MakeBufferReader(requests), upload_status, expectedLength)
+                               next_server += 1
+                               active += 1
+                       } else {
+                               return (this.Want_replicas - remaining_replicas), InsufficientReplicasError
+                       }
+               }
+
+               // Now wait for something to happen.
+               select {
+               case status := <-reader_status:
+                       if status == io.EOF {
+                               // good news!
+                       } else {
+                               // bad news
+                               return (this.Want_replicas - remaining_replicas), status
+                       }
+               case status := <-upload_status:
+                       if status.StatusCode == 200 {
+                               // good news!
+                               remaining_replicas -= 1
+                       } else {
+                               // writing to keep server failed for some reason
+                               log.Printf("Keep server put to %v failed with '%v'",
+                                       status.Url, status.Err)
+                       }
+                       active -= 1
+                       log.Printf("Upload status %v %v %v", status.StatusCode, remaining_replicas, active)
+               }
+       }
+
+       return (this.Want_replicas - remaining_replicas), nil
+}
+
+var OversizeBlockError = errors.New("Block too big")
+
+func (this KeepClient) PutHR(hash string, r io.Reader, expectedLength int64) (replicas int, err error) {
+
+       // Buffer for reads from 'r'
+       var buffer []byte
+       if expectedLength > 0 {
+               if expectedLength > 64*1024*1024 {
+                       return 0, OversizeBlockError
+               }
+               buffer = make([]byte, expectedLength)
+       } else {
+               buffer = make([]byte, 64*1024*1024)
+       }
+
+       // Read requests on Transfer() buffer
+       requests := make(chan ReadRequest)
+       defer close(requests)
+
+       // Reporting reader error states
+       reader_status := make(chan error)
+       defer close(reader_status)
+
+       // Start the transfer goroutine
+       go Transfer(buffer, r, requests, reader_status)
+
+       return this.putReplicas(hash, requests, reader_status, expectedLength)
+}
+
+func (this KeepClient) PutHB(hash string, buffer []byte) (replicas int, err error) {
+       // Read requests on Transfer() buffer
+       requests := make(chan ReadRequest)
+       defer close(requests)
+
+       // Start the transfer goroutine
+       go Transfer(buffer, nil, requests, nil)
+
+       return this.putReplicas(hash, requests, nil, int64(len(buffer)))
+}
+
+func (this KeepClient) PutB(buffer []byte) (hash string, replicas int, err error) {
+       hash = fmt.Sprintf("%x", md5.Sum(buffer))
+       replicas, err = this.PutHB(hash, buffer)
+       return hash, replicas, err
+}
+
+func (this KeepClient) PutR(r io.Reader) (hash string, replicas int, err error) {
+       if buffer, err := ioutil.ReadAll(r); err != nil {
+               return "", 0, err
+       } else {
+               return this.PutB(buffer)
+       }
+}
+
+var BlockNotFound = errors.New("Block not found")
+
+func (this KeepClient) Get(hash string) (reader io.ReadCloser,
+       contentLength int64, url string, err error) {
+
+       // Calculate the ordering for uploading to servers
+       sv := this.ShuffledServiceRoots(hash)
+
+       for _, host := range sv {
+               var req *http.Request
+               var err error
+               var url = fmt.Sprintf("%s/%s", host, hash)
+               if req, err = http.NewRequest("GET", url, nil); err != nil {
+                       continue
+               }
+
+               req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.ApiToken))
+
+               var resp *http.Response
+               if resp, err = this.client.Do(req); err != nil {
+                       continue
+               }
+
+               if resp.StatusCode == http.StatusOK {
+                       return resp.Body, resp.ContentLength, url, nil
+               }
+       }
+
+       return nil, 0, "", BlockNotFound
+}
diff --git a/sdk/go/src/arvados.org/keepclient/keepclient_test.go b/sdk/go/src/arvados.org/keepclient/keepclient_test.go
new file mode 100644 (file)
index 0000000..00a2063
--- /dev/null
@@ -0,0 +1,893 @@
+package keepclient
+
+import (
+       "crypto/md5"
+       "flag"
+       "fmt"
+       . "gopkg.in/check.v1"
+       "io"
+       "io/ioutil"
+       "log"
+       "net"
+       "net/http"
+       "os"
+       "os/exec"
+       "sort"
+       "testing"
+       "time"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) { TestingT(t) }
+
+// Gocheck boilerplate
+var _ = Suite(&ServerRequiredSuite{})
+var _ = Suite(&StandaloneSuite{})
+
+var no_server = flag.Bool("no-server", false, "Skip 'ServerRequireSuite'")
+
+// Tests that require the Keep server running
+type ServerRequiredSuite struct{}
+
+// Standalone tests
+type StandaloneSuite struct{}
+
+func (s *ServerRequiredSuite) SetUpSuite(c *C) {
+       if *no_server {
+               c.Skip("Skipping tests that require server")
+       } else {
+               os.Chdir(os.ExpandEnv("$GOPATH../python"))
+               exec.Command("python", "run_test_server.py", "start").Run()
+               exec.Command("python", "run_test_server.py", "start_keep").Run()
+       }
+}
+
+func (s *ServerRequiredSuite) TearDownSuite(c *C) {
+       os.Chdir(os.ExpandEnv("$GOPATH../python"))
+       exec.Command("python", "run_test_server.py", "stop_keep").Run()
+       exec.Command("python", "run_test_server.py", "stop").Run()
+}
+
+func (s *ServerRequiredSuite) TestMakeKeepClient(c *C) {
+       os.Setenv("ARVADOS_API_HOST", "localhost:3001")
+       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+       os.Setenv("ARVADOS_API_HOST_INSECURE", "")
+
+       kc, err := MakeKeepClient()
+       c.Assert(kc.ApiServer, Equals, "localhost:3001")
+       c.Assert(kc.ApiToken, Equals, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+       c.Assert(kc.ApiInsecure, Equals, false)
+
+       os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
+
+       kc, err = MakeKeepClient()
+       c.Assert(kc.ApiServer, Equals, "localhost:3001")
+       c.Assert(kc.ApiToken, Equals, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+       c.Assert(kc.ApiInsecure, Equals, true)
+
+       c.Assert(err, Equals, nil)
+       c.Assert(len(kc.Service_roots), Equals, 2)
+       c.Assert(kc.Service_roots[0], Equals, "http://localhost:25107")
+       c.Assert(kc.Service_roots[1], Equals, "http://localhost:25108")
+}
+
+func (s *StandaloneSuite) TestShuffleServiceRoots(c *C) {
+       kc := KeepClient{Service_roots: []string{"http://localhost:25107", "http://localhost:25108", "http://localhost:25109", "http://localhost:25110", "http://localhost:25111", "http://localhost:25112", "http://localhost:25113", "http://localhost:25114", "http://localhost:25115", "http://localhost:25116", "http://localhost:25117", "http://localhost:25118", "http://localhost:25119", "http://localhost:25120", "http://localhost:25121", "http://localhost:25122", "http://localhost:25123"}}
+
+       // "foo" acbd18db4cc2f85cedef654fccc4a4d8
+       foo_shuffle := []string{"http://localhost:25116", "http://localhost:25120", "http://localhost:25119", "http://localhost:25122", "http://localhost:25108", "http://localhost:25114", "http://localhost:25112", "http://localhost:25107", "http://localhost:25118", "http://localhost:25111", "http://localhost:25113", "http://localhost:25121", "http://localhost:25110", "http://localhost:25117", "http://localhost:25109", "http://localhost:25115", "http://localhost:25123"}
+       c.Check(kc.ShuffledServiceRoots("acbd18db4cc2f85cedef654fccc4a4d8"), DeepEquals, foo_shuffle)
+
+       // "bar" 37b51d194a7513e45b56f6524f2d51f2
+       bar_shuffle := []string{"http://localhost:25108", "http://localhost:25112", "http://localhost:25119", "http://localhost:25107", "http://localhost:25110", "http://localhost:25116", "http://localhost:25122", "http://localhost:25120", "http://localhost:25121", "http://localhost:25117", "http://localhost:25111", "http://localhost:25123", "http://localhost:25118", "http://localhost:25113", "http://localhost:25114", "http://localhost:25115", "http://localhost:25109"}
+       c.Check(kc.ShuffledServiceRoots("37b51d194a7513e45b56f6524f2d51f2"), DeepEquals, bar_shuffle)
+}
+
+func ReadIntoBufferHelper(c *C, bufsize int) {
+       buffer := make([]byte, bufsize)
+
+       reader, writer := io.Pipe()
+       slices := make(chan ReaderSlice)
+
+       go ReadIntoBuffer(buffer, reader, slices)
+
+       {
+               out := make([]byte, 128)
+               for i := 0; i < 128; i += 1 {
+                       out[i] = byte(i)
+               }
+               writer.Write(out)
+               s1 := <-slices
+               c.Check(len(s1.slice), Equals, 128)
+               c.Check(s1.reader_error, Equals, nil)
+               for i := 0; i < 128; i += 1 {
+                       c.Check(s1.slice[i], Equals, byte(i))
+               }
+               for i := 0; i < len(buffer); i += 1 {
+                       if i < 128 {
+                               c.Check(buffer[i], Equals, byte(i))
+                       } else {
+                               c.Check(buffer[i], Equals, byte(0))
+                       }
+               }
+       }
+       {
+               out := make([]byte, 96)
+               for i := 0; i < 96; i += 1 {
+                       out[i] = byte(i / 2)
+               }
+               writer.Write(out)
+               s1 := <-slices
+               c.Check(len(s1.slice), Equals, 96)
+               c.Check(s1.reader_error, Equals, nil)
+               for i := 0; i < 96; i += 1 {
+                       c.Check(s1.slice[i], Equals, byte(i/2))
+               }
+               for i := 0; i < len(buffer); i += 1 {
+                       if i < 128 {
+                               c.Check(buffer[i], Equals, byte(i))
+                       } else if i < (128 + 96) {
+                               c.Check(buffer[i], Equals, byte((i-128)/2))
+                       } else {
+                               c.Check(buffer[i], Equals, byte(0))
+                       }
+               }
+       }
+       {
+               writer.Close()
+               s1 := <-slices
+               c.Check(len(s1.slice), Equals, 0)
+               c.Check(s1.reader_error, Equals, io.EOF)
+       }
+}
+
+func (s *StandaloneSuite) TestReadIntoBuffer(c *C) {
+       ReadIntoBufferHelper(c, 512)
+       ReadIntoBufferHelper(c, 225)
+       ReadIntoBufferHelper(c, 224)
+}
+
+func (s *StandaloneSuite) TestReadIntoShortBuffer(c *C) {
+       buffer := make([]byte, 223)
+       reader, writer := io.Pipe()
+       slices := make(chan ReaderSlice)
+
+       go ReadIntoBuffer(buffer, reader, slices)
+
+       {
+               out := make([]byte, 128)
+               for i := 0; i < 128; i += 1 {
+                       out[i] = byte(i)
+               }
+               writer.Write(out)
+               s1 := <-slices
+               c.Check(len(s1.slice), Equals, 128)
+               c.Check(s1.reader_error, Equals, nil)
+               for i := 0; i < 128; i += 1 {
+                       c.Check(s1.slice[i], Equals, byte(i))
+               }
+               for i := 0; i < len(buffer); i += 1 {
+                       if i < 128 {
+                               c.Check(buffer[i], Equals, byte(i))
+                       } else {
+                               c.Check(buffer[i], Equals, byte(0))
+                       }
+               }
+       }
+       {
+               out := make([]byte, 96)
+               for i := 0; i < 96; i += 1 {
+                       out[i] = byte(i / 2)
+               }
+
+               // Write will deadlock because it can't write all the data, so
+               // spin it off to a goroutine
+               go writer.Write(out)
+               s1 := <-slices
+
+               c.Check(len(s1.slice), Equals, 95)
+               c.Check(s1.reader_error, Equals, nil)
+               for i := 0; i < 95; i += 1 {
+                       c.Check(s1.slice[i], Equals, byte(i/2))
+               }
+               for i := 0; i < len(buffer); i += 1 {
+                       if i < 128 {
+                               c.Check(buffer[i], Equals, byte(i))
+                       } else if i < (128 + 95) {
+                               c.Check(buffer[i], Equals, byte((i-128)/2))
+                       } else {
+                               c.Check(buffer[i], Equals, byte(0))
+                       }
+               }
+       }
+       {
+               writer.Close()
+               s1 := <-slices
+               c.Check(len(s1.slice), Equals, 0)
+               c.Check(s1.reader_error, Equals, io.ErrShortBuffer)
+       }
+
+}
+
+func (s *StandaloneSuite) TestTransfer(c *C) {
+       reader, writer := io.Pipe()
+
+       // Buffer for reads from 'r'
+       buffer := make([]byte, 512)
+
+       // Read requests on Transfer() buffer
+       requests := make(chan ReadRequest)
+       defer close(requests)
+
+       // Reporting reader error states
+       reader_status := make(chan error)
+
+       go Transfer(buffer, reader, requests, reader_status)
+
+       br1 := MakeBufferReader(requests)
+       out := make([]byte, 128)
+
+       {
+               // Write some data, and read into a buffer shorter than
+               // available data
+               for i := 0; i < 128; i += 1 {
+                       out[i] = byte(i)
+               }
+
+               writer.Write(out[:100])
+
+               in := make([]byte, 64)
+               n, err := br1.Read(in)
+
+               c.Check(n, Equals, 64)
+               c.Check(err, Equals, nil)
+
+               for i := 0; i < 64; i += 1 {
+                       c.Check(in[i], Equals, out[i])
+               }
+       }
+
+       {
+               // Write some more data, and read into buffer longer than
+               // available data
+               in := make([]byte, 64)
+               n, err := br1.Read(in)
+               c.Check(n, Equals, 36)
+               c.Check(err, Equals, nil)
+
+               for i := 0; i < 36; i += 1 {
+                       c.Check(in[i], Equals, out[64+i])
+               }
+
+       }
+
+       {
+               // Test read before write
+               type Rd struct {
+                       n   int
+                       err error
+               }
+               rd := make(chan Rd)
+               in := make([]byte, 64)
+
+               go func() {
+                       n, err := br1.Read(in)
+                       rd <- Rd{n, err}
+               }()
+
+               time.Sleep(100 * time.Millisecond)
+               writer.Write(out[100:])
+
+               got := <-rd
+
+               c.Check(got.n, Equals, 28)
+               c.Check(got.err, Equals, nil)
+
+               for i := 0; i < 28; i += 1 {
+                       c.Check(in[i], Equals, out[100+i])
+               }
+       }
+
+       br2 := MakeBufferReader(requests)
+       {
+               // Test 'catch up' reader
+               in := make([]byte, 256)
+               n, err := br2.Read(in)
+
+               c.Check(n, Equals, 128)
+               c.Check(err, Equals, nil)
+
+               for i := 0; i < 128; i += 1 {
+                       c.Check(in[i], Equals, out[i])
+               }
+       }
+
+       {
+               // Test closing the reader
+               writer.Close()
+               status := <-reader_status
+               c.Check(status, Equals, io.EOF)
+
+               in := make([]byte, 256)
+               n1, err1 := br1.Read(in)
+               n2, err2 := br2.Read(in)
+               c.Check(n1, Equals, 0)
+               c.Check(err1, Equals, io.EOF)
+               c.Check(n2, Equals, 0)
+               c.Check(err2, Equals, io.EOF)
+       }
+
+       {
+               // Test 'catch up' reader after closing
+               br3 := MakeBufferReader(requests)
+               in := make([]byte, 256)
+               n, err := br3.Read(in)
+
+               c.Check(n, Equals, 128)
+               c.Check(err, Equals, nil)
+
+               for i := 0; i < 128; i += 1 {
+                       c.Check(in[i], Equals, out[i])
+               }
+
+               n, err = br3.Read(in)
+
+               c.Check(n, Equals, 0)
+               c.Check(err, Equals, io.EOF)
+       }
+}
+
+func (s *StandaloneSuite) TestTransferShortBuffer(c *C) {
+       reader, writer := io.Pipe()
+
+       // Buffer for reads from 'r'
+       buffer := make([]byte, 100)
+
+       // Read requests on Transfer() buffer
+       requests := make(chan ReadRequest)
+       defer close(requests)
+
+       // Reporting reader error states
+       reader_status := make(chan error)
+
+       go Transfer(buffer, reader, requests, reader_status)
+
+       out := make([]byte, 101)
+       go writer.Write(out)
+
+       status := <-reader_status
+       c.Check(status, Equals, io.ErrShortBuffer)
+}
+
+func (s *StandaloneSuite) TestTransferFromBuffer(c *C) {
+       // Buffer for reads from 'r'
+       buffer := make([]byte, 100)
+       for i := 0; i < 100; i += 1 {
+               buffer[i] = byte(i)
+       }
+
+       // Read requests on Transfer() buffer
+       requests := make(chan ReadRequest)
+       defer close(requests)
+
+       go Transfer(buffer, nil, requests, nil)
+
+       br1 := MakeBufferReader(requests)
+
+       in := make([]byte, 64)
+       {
+               n, err := br1.Read(in)
+
+               c.Check(n, Equals, 64)
+               c.Check(err, Equals, nil)
+
+               for i := 0; i < 64; i += 1 {
+                       c.Check(in[i], Equals, buffer[i])
+               }
+       }
+       {
+               n, err := br1.Read(in)
+
+               c.Check(n, Equals, 36)
+               c.Check(err, Equals, nil)
+
+               for i := 0; i < 36; i += 1 {
+                       c.Check(in[i], Equals, buffer[64+i])
+               }
+       }
+       {
+               n, err := br1.Read(in)
+
+               c.Check(n, Equals, 0)
+               c.Check(err, Equals, io.EOF)
+       }
+}
+
+func (s *StandaloneSuite) TestTransferIoCopy(c *C) {
+       // Buffer for reads from 'r'
+       buffer := make([]byte, 100)
+       for i := 0; i < 100; i += 1 {
+               buffer[i] = byte(i)
+       }
+
+       // Read requests on Transfer() buffer
+       requests := make(chan ReadRequest)
+       defer close(requests)
+
+       go Transfer(buffer, nil, requests, nil)
+
+       br1 := MakeBufferReader(requests)
+
+       reader, writer := io.Pipe()
+
+       go func() {
+               p := make([]byte, 100)
+               n, err := reader.Read(p)
+               c.Check(n, Equals, 100)
+               c.Check(err, Equals, nil)
+               c.Check(p, DeepEquals, buffer)
+       }()
+
+       io.Copy(writer, br1)
+}
+
+type StubPutHandler struct {
+       c              *C
+       expectPath     string
+       expectApiToken string
+       expectBody     string
+       handled        chan string
+}
+
+func (this StubPutHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       this.c.Check(req.URL.Path, Equals, "/"+this.expectPath)
+       this.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("OAuth2 %s", this.expectApiToken))
+       body, err := ioutil.ReadAll(req.Body)
+       this.c.Check(err, Equals, nil)
+       this.c.Check(body, DeepEquals, []byte(this.expectBody))
+       resp.WriteHeader(200)
+       this.handled <- fmt.Sprintf("http://%s", req.Host)
+}
+
+func RunBogusKeepServer(st http.Handler, port int) (listener net.Listener, url string) {
+       server := http.Server{Handler: st}
+
+       var err error
+       listener, err = net.ListenTCP("tcp", &net.TCPAddr{Port: port})
+       if err != nil {
+               panic(fmt.Sprintf("Could not listen on tcp port %v", port))
+       }
+
+       url = fmt.Sprintf("http://localhost:%d", listener.Addr().(*net.TCPAddr).Port)
+
+       go server.Serve(listener)
+       return listener, url
+}
+
+func UploadToStubHelper(c *C, st http.Handler, f func(*KeepClient, string,
+       io.ReadCloser, io.WriteCloser, chan UploadStatus)) {
+
+       listener, url := RunBogusKeepServer(st, 2990)
+       defer listener.Close()
+
+       kc, _ := MakeKeepClient()
+       kc.ApiToken = "abc123"
+
+       reader, writer := io.Pipe()
+       upload_status := make(chan UploadStatus)
+
+       f(kc, url, reader, writer, upload_status)
+}
+
+func (s *StandaloneSuite) TestUploadToStubKeepServer(c *C) {
+       st := StubPutHandler{
+               c,
+               "acbd18db4cc2f85cedef654fccc4a4d8",
+               "abc123",
+               "foo",
+               make(chan string)}
+
+       UploadToStubHelper(c, st,
+               func(kc *KeepClient, url string, reader io.ReadCloser,
+                       writer io.WriteCloser, upload_status chan UploadStatus) {
+
+                       go kc.uploadToKeepServer(url, st.expectPath, reader, upload_status, int64(len("foo")))
+
+                       writer.Write([]byte("foo"))
+                       writer.Close()
+
+                       <-st.handled
+                       status := <-upload_status
+                       c.Check(status, DeepEquals, UploadStatus{nil, fmt.Sprintf("%s/%s", url, st.expectPath), 200})
+               })
+}
+
+func (s *StandaloneSuite) TestUploadToStubKeepServerBufferReader(c *C) {
+       st := StubPutHandler{
+               c,
+               "acbd18db4cc2f85cedef654fccc4a4d8",
+               "abc123",
+               "foo",
+               make(chan string)}
+
+       UploadToStubHelper(c, st,
+               func(kc *KeepClient, url string, reader io.ReadCloser,
+                       writer io.WriteCloser, upload_status chan UploadStatus) {
+
+                       // Buffer for reads from 'r'
+                       buffer := make([]byte, 512)
+
+                       // Read requests on Transfer() buffer
+                       requests := make(chan ReadRequest)
+                       defer close(requests)
+
+                       // Reporting reader error states
+                       reader_status := make(chan error)
+
+                       go Transfer(buffer, reader, requests, reader_status)
+
+                       br1 := MakeBufferReader(requests)
+
+                       go kc.uploadToKeepServer(url, st.expectPath, br1, upload_status, 3)
+
+                       writer.Write([]byte("foo"))
+                       writer.Close()
+
+                       <-reader_status
+                       <-st.handled
+
+                       status := <-upload_status
+                       c.Check(status, DeepEquals, UploadStatus{nil, fmt.Sprintf("%s/%s", url, st.expectPath), 200})
+
+                       //c.Check(true, Equals, false)
+               })
+}
+
+type FailHandler struct {
+       handled chan string
+}
+
+func (this FailHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       resp.WriteHeader(400)
+       this.handled <- fmt.Sprintf("http://%s", req.Host)
+}
+
+func (s *StandaloneSuite) TestFailedUploadToStubKeepServer(c *C) {
+       st := FailHandler{
+               make(chan string)}
+
+       hash := "acbd18db4cc2f85cedef654fccc4a4d8"
+
+       UploadToStubHelper(c, st,
+               func(kc *KeepClient, url string, reader io.ReadCloser,
+                       writer io.WriteCloser, upload_status chan UploadStatus) {
+
+                       go kc.uploadToKeepServer(url, hash, reader, upload_status, 3)
+
+                       writer.Write([]byte("foo"))
+                       writer.Close()
+
+                       <-st.handled
+
+                       status := <-upload_status
+                       c.Check(status.Url, Equals, fmt.Sprintf("%s/%s", url, hash))
+                       c.Check(status.StatusCode, Equals, 400)
+               })
+
+}
+
+type KeepServer struct {
+       listener net.Listener
+       url      string
+}
+
+func RunSomeFakeKeepServers(st http.Handler, n int, port int) (ks []KeepServer) {
+       ks = make([]KeepServer, n)
+
+       for i := 0; i < n; i += 1 {
+               boguslistener, bogusurl := RunBogusKeepServer(st, port+i)
+               ks[i] = KeepServer{boguslistener, bogusurl}
+       }
+
+       return ks
+}
+
+func (s *StandaloneSuite) TestPutB(c *C) {
+       log.Printf("TestPutB")
+
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := StubPutHandler{
+               c,
+               hash,
+               "abc123",
+               "foo",
+               make(chan string, 2)}
+
+       kc, _ := MakeKeepClient()
+
+       kc.Want_replicas = 2
+       kc.ApiToken = "abc123"
+       kc.Service_roots = make([]string, 5)
+
+       ks := RunSomeFakeKeepServers(st, 5, 2990)
+
+       for i := 0; i < len(ks); i += 1 {
+               kc.Service_roots[i] = ks[i].url
+               defer ks[i].listener.Close()
+       }
+
+       sort.Strings(kc.Service_roots)
+
+       kc.PutB([]byte("foo"))
+
+       shuff := kc.ShuffledServiceRoots(fmt.Sprintf("%x", md5.Sum([]byte("foo"))))
+
+       c.Check(<-st.handled, Equals, shuff[0])
+       c.Check(<-st.handled, Equals, shuff[1])
+}
+
+func (s *StandaloneSuite) TestPutHR(c *C) {
+       log.Printf("TestPutHR")
+
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := StubPutHandler{
+               c,
+               hash,
+               "abc123",
+               "foo",
+               make(chan string, 2)}
+
+       kc, _ := MakeKeepClient()
+
+       kc.Want_replicas = 2
+       kc.ApiToken = "abc123"
+       kc.Service_roots = make([]string, 5)
+
+       ks := RunSomeFakeKeepServers(st, 5, 2990)
+
+       for i := 0; i < len(ks); i += 1 {
+               kc.Service_roots[i] = ks[i].url
+               defer ks[i].listener.Close()
+       }
+
+       sort.Strings(kc.Service_roots)
+
+       reader, writer := io.Pipe()
+
+       go func() {
+               writer.Write([]byte("foo"))
+               writer.Close()
+       }()
+
+       kc.PutHR(hash, reader, 3)
+
+       shuff := kc.ShuffledServiceRoots(hash)
+       log.Print(shuff)
+
+       s1 := <-st.handled
+       s2 := <-st.handled
+
+       c.Check((s1 == shuff[0] && s2 == shuff[1]) ||
+               (s1 == shuff[1] && s2 == shuff[0]),
+               Equals,
+               true)
+}
+
+func (s *StandaloneSuite) TestPutWithFail(c *C) {
+       log.Printf("TestPutWithFail")
+
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := StubPutHandler{
+               c,
+               hash,
+               "abc123",
+               "foo",
+               make(chan string, 2)}
+
+       fh := FailHandler{
+               make(chan string, 1)}
+
+       kc, _ := MakeKeepClient()
+
+       kc.Want_replicas = 2
+       kc.ApiToken = "abc123"
+       kc.Service_roots = make([]string, 5)
+
+       ks1 := RunSomeFakeKeepServers(st, 4, 2990)
+       ks2 := RunSomeFakeKeepServers(fh, 1, 2995)
+
+       for i, k := range ks1 {
+               kc.Service_roots[i] = k.url
+               defer k.listener.Close()
+       }
+       for i, k := range ks2 {
+               kc.Service_roots[len(ks1)+i] = k.url
+               defer k.listener.Close()
+       }
+
+       sort.Strings(kc.Service_roots)
+
+       shuff := kc.ShuffledServiceRoots(fmt.Sprintf("%x", md5.Sum([]byte("foo"))))
+
+       phash, replicas, err := kc.PutB([]byte("foo"))
+
+       <-fh.handled
+
+       c.Check(err, Equals, nil)
+       c.Check(phash, Equals, hash)
+       c.Check(replicas, Equals, 2)
+       c.Check(<-st.handled, Equals, shuff[1])
+       c.Check(<-st.handled, Equals, shuff[2])
+}
+
+func (s *StandaloneSuite) TestPutWithTooManyFail(c *C) {
+       log.Printf("TestPutWithTooManyFail")
+
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := StubPutHandler{
+               c,
+               hash,
+               "abc123",
+               "foo",
+               make(chan string, 1)}
+
+       fh := FailHandler{
+               make(chan string, 4)}
+
+       kc, _ := MakeKeepClient()
+
+       kc.Want_replicas = 2
+       kc.ApiToken = "abc123"
+       kc.Service_roots = make([]string, 5)
+
+       ks1 := RunSomeFakeKeepServers(st, 1, 2990)
+       ks2 := RunSomeFakeKeepServers(fh, 4, 2991)
+
+       for i, k := range ks1 {
+               kc.Service_roots[i] = k.url
+               defer k.listener.Close()
+       }
+       for i, k := range ks2 {
+               kc.Service_roots[len(ks1)+i] = k.url
+               defer k.listener.Close()
+       }
+
+       sort.Strings(kc.Service_roots)
+
+       shuff := kc.ShuffledServiceRoots(fmt.Sprintf("%x", md5.Sum([]byte("foo"))))
+
+       _, replicas, err := kc.PutB([]byte("foo"))
+
+       c.Check(err, Equals, InsufficientReplicasError)
+       c.Check(replicas, Equals, 1)
+       c.Check(<-st.handled, Equals, shuff[1])
+}
+
+type StubGetHandler struct {
+       c              *C
+       expectPath     string
+       expectApiToken string
+       returnBody     []byte
+}
+
+func (this StubGetHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       this.c.Check(req.URL.Path, Equals, "/"+this.expectPath)
+       this.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("OAuth2 %s", this.expectApiToken))
+       resp.Header().Set("Content-Length", fmt.Sprintf("%d", len(this.returnBody)))
+       resp.Write(this.returnBody)
+}
+
+func (s *StandaloneSuite) TestGet(c *C) {
+
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := StubGetHandler{
+               c,
+               hash,
+               "abc123",
+               []byte("foo")}
+
+       listener, url := RunBogusKeepServer(st, 2990)
+       defer listener.Close()
+
+       kc, _ := MakeKeepClient()
+       kc.ApiToken = "abc123"
+       kc.Service_roots = []string{url}
+
+       r, n, url2, err := kc.Get(hash)
+       c.Check(err, Equals, nil)
+       c.Check(n, Equals, int64(3))
+       c.Check(url2, Equals, fmt.Sprintf("%s/%s", url, hash))
+
+       content, err2 := ioutil.ReadAll(r)
+       c.Check(err2, Equals, nil)
+       c.Check(content, DeepEquals, []byte("foo"))
+}
+
+func (s *StandaloneSuite) TestGetFail(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := FailHandler{make(chan string, 1)}
+
+       listener, url := RunBogusKeepServer(st, 2990)
+       defer listener.Close()
+
+       kc, _ := MakeKeepClient()
+       kc.ApiToken = "abc123"
+       kc.Service_roots = []string{url}
+
+       r, n, url2, err := kc.Get(hash)
+       c.Check(err, Equals, BlockNotFound)
+       c.Check(n, Equals, int64(0))
+       c.Check(url2, Equals, "")
+       c.Check(r, Equals, nil)
+}
+
+func (s *StandaloneSuite) TestGetWithFailures(c *C) {
+
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       fh := FailHandler{
+               make(chan string, 1)}
+
+       st := StubGetHandler{
+               c,
+               hash,
+               "abc123",
+               []byte("foo")}
+
+       kc, _ := MakeKeepClient()
+       kc.ApiToken = "abc123"
+       kc.Service_roots = make([]string, 5)
+
+       ks1 := RunSomeFakeKeepServers(st, 1, 2990)
+       ks2 := RunSomeFakeKeepServers(fh, 4, 2991)
+
+       for i, k := range ks1 {
+               kc.Service_roots[i] = k.url
+               defer k.listener.Close()
+       }
+       for i, k := range ks2 {
+               kc.Service_roots[len(ks1)+i] = k.url
+               defer k.listener.Close()
+       }
+
+       sort.Strings(kc.Service_roots)
+
+       r, n, url2, err := kc.Get(hash)
+       <-fh.handled
+       c.Check(err, Equals, nil)
+       c.Check(n, Equals, int64(3))
+       c.Check(url2, Equals, fmt.Sprintf("%s/%s", ks1[0].url, hash))
+
+       content, err2 := ioutil.ReadAll(r)
+       c.Check(err2, Equals, nil)
+       c.Check(content, DeepEquals, []byte("foo"))
+}
+
+func (s *ServerRequiredSuite) TestPutAndGet(c *C) {
+       os.Setenv("ARVADOS_API_HOST", "localhost:3001")
+       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+       os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
+
+       kc, err := MakeKeepClient()
+       c.Assert(err, Equals, nil)
+
+       hash, replicas, err := kc.PutB([]byte("foo"))
+       c.Check(hash, Equals, fmt.Sprintf("%x", md5.Sum([]byte("foo"))))
+       c.Check(replicas, Equals, 2)
+       c.Check(err, Equals, nil)
+
+       r, n, url2, err := kc.Get(hash)
+       c.Check(err, Equals, nil)
+       c.Check(n, Equals, int64(3))
+       c.Check(url2, Equals, fmt.Sprintf("http://localhost:25108/%s", hash))
+
+       content, err2 := ioutil.ReadAll(r)
+       c.Check(err2, Equals, nil)
+       c.Check(content, DeepEquals, []byte("foo"))
+}
index 88487ae96e672726cfa5dbb3142dcfdeafcbec94..e414d267a1347a51c9ae48354082e14bf48da29d 100644 (file)
@@ -159,18 +159,48 @@ class KeepClient(object):
             finally:
                 self.lock.release()
 
+        # Build an ordering with which to query the Keep servers based on the
+        # contents of the hash.
+        # "hash" is a hex-encoded number at least 8 digits
+        # (32 bits) long
+
+        # seed used to calculate the next keep server from 'pool'
+        # to be added to 'pseq'
         seed = hash
+
+        # Keep servers still to be added to the ordering
         pool = self.service_roots[:]
+
+        # output probe sequence
         pseq = []
+
+        # iterate while there are servers left to be assigned
         while len(pool) > 0:
             if len(seed) < 8:
-                if len(pseq) < len(hash) / 4: # first time around
+                # ran out of digits in the seed
+                if len(pseq) < len(hash) / 4:
+                    # the number of servers added to the probe sequence is less
+                    # than the number of 4-digit slices in 'hash' so refill the
+                    # seed with the last 4 digits and then append the contents
+                    # of 'hash'.
                     seed = hash[-4:] + hash
                 else:
+                    # refill the seed with the contents of 'hash'
                     seed += hash
+
+            # Take the next 8 digits (32 bytes) and interpret as an integer,
+            # then modulus with the size of the remaining pool to get the next
+            # selected server.
             probe = int(seed[0:8], 16) % len(pool)
+
+            print seed[0:8], int(seed[0:8], 16), len(pool), probe
+
+            # Append the selected server to the probe sequence and remove it
+            # from the pool.
             pseq += [pool[probe]]
             pool = pool[:probe] + pool[probe+1:]
+
+            # Remove the digits just used from the seed
             seed = seed[8:]
         logging.debug(str(pseq))
         return pseq
@@ -208,7 +238,7 @@ class KeepClient(object):
             self._cache_lock.release()
 
     def reserve_cache(self, locator):
-        '''Reserve a cache slot for the specified locator, 
+        '''Reserve a cache slot for the specified locator,
         or return the existing slot.'''
         self._cache_lock.acquire()
         try:
@@ -281,8 +311,8 @@ class KeepClient(object):
             with timer.Timer() as t:
                 resp, content = h.request(url.encode('utf-8'), 'GET',
                                           headers=headers)
-            logging.info("Received %s bytes in %s msec (%s MiB/sec)" % (len(content), 
-                                                                        t.msecs, 
+            logging.info("Received %s bytes in %s msec (%s MiB/sec)" % (len(content),
+                                                                        t.msecs,
                                                                         (len(content)/(1024*1024))/t.secs))
             if re.match(r'^2\d\d$', resp['status']):
                 m = hashlib.new('md5')
index 3c9d55b184b41fae8e3c40f4dc88810f5adc84af..9901e14123f92aea4672b52814481faa5327b409 100644 (file)
@@ -149,6 +149,10 @@ def run_keep():
     _start_keep(0)
     _start_keep(1)
 
+
+    os.environ["ARVADOS_API_HOST"] = "127.0.0.1:3001"
+    os.environ["ARVADOS_API_HOST_INSECURE"] = "true"
+
     authorize_with("admin")
     api = arvados.api('v1', cache=False)
     a = api.keep_disks().list().execute()
@@ -210,3 +214,5 @@ if __name__ == "__main__":
         run_keep()
     elif args.action == 'stop_keep':
         stop_keep()
+    else:
+        print('Unrecognized action "{}", actions are "start", "stop", "start_keep", "stop_keep"'.format(args.action))