+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
- "log"
+ "net/http"
+ "net/http/httptest"
"os"
"time"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"github.com/AdRoll/goamz/s3"
"github.com/AdRoll/goamz/s3/s3test"
+ "github.com/ghodss/yaml"
check "gopkg.in/check.v1"
)
c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
}
+type blockingHandler struct {
+ requested chan *http.Request
+ unblock chan struct{}
+}
+
+func (h *blockingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if h.requested != nil {
+ h.requested <- r
+ }
+ if h.unblock != nil {
+ <-h.unblock
+ }
+ http.Error(w, "nothing here", http.StatusNotFound)
+}
+
+func (s *StubbedS3Suite) TestGetContextCancel(c *check.C) {
+ loc := "acbd18db4cc2f85cedef654fccc4a4d8"
+ buf := make([]byte, 3)
+
+ s.testContextCancel(c, func(ctx context.Context, v *TestableS3Volume) error {
+ _, err := v.Get(ctx, loc, buf)
+ return err
+ })
+}
+
+func (s *StubbedS3Suite) TestCompareContextCancel(c *check.C) {
+ loc := "acbd18db4cc2f85cedef654fccc4a4d8"
+ buf := []byte("bar")
+
+ s.testContextCancel(c, func(ctx context.Context, v *TestableS3Volume) error {
+ return v.Compare(ctx, loc, buf)
+ })
+}
+
+func (s *StubbedS3Suite) TestPutContextCancel(c *check.C) {
+ loc := "acbd18db4cc2f85cedef654fccc4a4d8"
+ buf := []byte("foo")
+
+ s.testContextCancel(c, func(ctx context.Context, v *TestableS3Volume) error {
+ return v.Put(ctx, loc, buf)
+ })
+}
+
+func (s *StubbedS3Suite) testContextCancel(c *check.C, testFunc func(context.Context, *TestableS3Volume) error) {
+ handler := &blockingHandler{}
+ srv := httptest.NewServer(handler)
+ defer srv.Close()
+
+ v := s.newTestableVolume(c, 5*time.Minute, false, 2)
+ vol := *v.S3Volume
+ vol.Endpoint = srv.URL
+ v = &TestableS3Volume{S3Volume: &vol}
+ v.Start()
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ handler.requested = make(chan *http.Request)
+ handler.unblock = make(chan struct{})
+ defer close(handler.unblock)
+
+ doneFunc := make(chan struct{})
+ go func() {
+ err := testFunc(ctx, v)
+ c.Check(err, check.Equals, context.Canceled)
+ close(doneFunc)
+ }()
+
+ timeout := time.After(10 * time.Second)
+
+ // Wait for the stub server to receive a request, meaning
+ // Get() is waiting for an s3 operation.
+ select {
+ case <-timeout:
+ c.Fatal("timed out waiting for test func to call our handler")
+ case <-doneFunc:
+ c.Fatal("test func finished without even calling our handler!")
+ case <-handler.requested:
+ }
+
+ cancel()
+
+ select {
+ case <-timeout:
+ c.Fatal("timed out")
+ case <-doneFunc:
+ }
+}
+
func (s *StubbedS3Suite) TestBackendStates(c *check.C) {
defer func(tl, bs arvados.Duration) {
theConfig.TrashLifetime = tl
}
// Call Trash, then check canTrash and canGetAfterTrash
- loc, blk = setupScenario()
+ loc, _ = setupScenario()
err = v.Trash(loc)
c.Check(err == nil, check.Equals, scenario.canTrash)
_, err = v.Get(context.Background(), loc, buf)
}
// Call Untrash, then check canUntrash
- loc, blk = setupScenario()
+ loc, _ = setupScenario()
err = v.Untrash(loc)
c.Check(err == nil, check.Equals, scenario.canUntrash)
if scenario.dataT != none || scenario.trashT != none {
// Call EmptyTrash, then check haveTrashAfterEmpty and
// freshAfterEmpty
- loc, blk = setupScenario()
+ loc, _ = setupScenario()
v.EmptyTrash()
_, err = v.bucket.Head("trash/"+loc, nil)
c.Check(err == nil, check.Equals, scenario.haveTrashAfterEmpty)
srv, err := s3test.NewServer(&s3test.Config{Clock: clock})
c.Assert(err, check.IsNil)
- tmp, err := ioutil.TempFile("", "keepstore")
- c.Assert(err, check.IsNil)
- defer os.Remove(tmp.Name())
- _, err = tmp.Write([]byte("xxx\n"))
- c.Assert(err, check.IsNil)
- c.Assert(tmp.Close(), check.IsNil)
-
v := &TestableS3Volume{
S3Volume: &S3Volume{
Bucket: TestBucketName,
- AccessKeyFile: tmp.Name(),
- SecretKeyFile: tmp.Name(),
Endpoint: srv.URL(),
Region: "test-region-1",
LocationConstraint: true,
ReadOnly: readonly,
IndexPageSize: 1000,
},
+ c: c,
server: srv,
serverClock: clock,
}
- c.Assert(v.Start(), check.IsNil)
+ v.Start()
err = v.bucket.PutBucket(s3.ACL("private"))
c.Assert(err, check.IsNil)
return v
}
+func (s *StubbedS3Suite) TestConfig(c *check.C) {
+ var cfg Config
+ err := yaml.Unmarshal([]byte(`
+Volumes:
+ - Type: S3
+ StorageClasses: ["class_a", "class_b"]
+`), &cfg)
+
+ c.Check(err, check.IsNil)
+ c.Check(cfg.Volumes[0].GetStorageClasses(), check.DeepEquals, []string{"class_a", "class_b"})
+}
+
+func (v *TestableS3Volume) Start() error {
+ tmp, err := ioutil.TempFile("", "keepstore")
+ v.c.Assert(err, check.IsNil)
+ defer os.Remove(tmp.Name())
+ _, err = tmp.Write([]byte("xxx\n"))
+ v.c.Assert(err, check.IsNil)
+ v.c.Assert(tmp.Close(), check.IsNil)
+
+ v.S3Volume.AccessKeyFile = tmp.Name()
+ v.S3Volume.SecretKeyFile = tmp.Name()
+
+ v.c.Assert(v.S3Volume.Start(), check.IsNil)
+ return nil
+}
+
// PutRaw skips the ContentMD5 test
func (v *TestableS3Volume) PutRaw(loc string, block []byte) {
err := v.bucket.Put(loc, block, "application/octet-stream", s3ACL, s3.Options{})
if err != nil {
- log.Printf("PutRaw: %+v", err)
+ log.Printf("PutRaw: %s: %+v", loc, err)
+ }
+ err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
+ if err != nil {
+ log.Printf("PutRaw: recent/%s: %+v", loc, err)
}
}