21621: Add unit test for json copy to clipboard
[arvados.git] / services / keepstore / pull_worker_test.go
index 84e951c454334fc5f7e10921ed2a2b9dab0650f3..d109b56df3cee8e2ac3259ebb784fe4cfdacc20b 100644 (file)
-package main
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
 
 import (
        "bytes"
+       "context"
+       "crypto/md5"
+       "encoding/json"
        "errors"
-       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
-       "git.curoverse.com/arvados.git/sdk/go/keepclient"
-       . "gopkg.in/check.v1"
+       "fmt"
        "io"
        "net/http"
-       "strings"
-       "testing"
+       "net/http/httptest"
+       "sort"
        "time"
-)
-
-var testPullLists map[string]string
-var processedPullLists map[string]string
-
-type PullWorkerTestSuite struct{}
-
-// Gocheck boilerplate
-func Test(t *testing.T) {
-       TestingT(t)
-}
-
-// Gocheck boilerplate
-var _ = Suite(&PullWorkerTestSuite{})
-
-func (s *PullWorkerTestSuite) SetUpSuite(c *C) {
-       // Since keepstore does not come into picture in tests,
-       // we need to explicitly start the goroutine in tests.
-       arv, err := arvadosclient.MakeArvadosClient()
-       c.Assert(err, Equals, nil)
-       keepClient, err := keepclient.MakeKeepClient(&arv)
-       c.Assert(err, Equals, nil)
-       go RunPullWorker(pullq.NextItem, keepClient)
-
-       // When a new pull request arrives, the old one will be overwritten.
-       // This behavior is simulated with delay tests below.
-       testPullLists = make(map[string]string)
-       processedPullLists = make(map[string]string)
-}
-
-func (s *PullWorkerTestSuite) TearDownSuite(c *C) {
-       // give the channel enough time to read and process all pull list entries
-       time.Sleep(1000 * time.Millisecond)
-
-       expectWorkerChannelEmpty(c, pullq.NextItem)
-
-       c.Assert(len(processedPullLists), Not(Equals), len(testPullLists))
-}
-
-var first_pull_list = []byte(`[
-               {
-                       "locator":"locator1",
-                       "servers":[
-                               "server_1",
-                               "server_2"
-                       ]
-               },
-    {
-                       "locator":"locator2",
-                       "servers":[
-                               "server_3"
-                       ]
-               }
-       ]`)
-
-var second_pull_list = []byte(`[
-               {
-                       "locator":"locator3",
-                       "servers":[
-                               "server_1",
-        "server_2"
-                       ]
-               }
-       ]`)
-
-type PullWorkerTestData struct {
-       name          string
-       req           RequestTester
-       response_code int
-       response_body string
-       read_content  string
-       read_error    bool
-       put_error     bool
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_pull_list_with_two_locators(c *C) {
-       defer teardown()
-
-       data_manager_token = "DATA MANAGER TOKEN"
-       testData := PullWorkerTestData{
-               "TestPullWorker_pull_list_with_two_locators",
-               RequestTester{"/pull", data_manager_token, "PUT", first_pull_list},
-               http.StatusOK,
-               "Received 2 pull requests\n",
-               "hello",
-               false,
-               false,
-       }
-
-       performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_pull_list_with_one_locator(c *C) {
-       defer teardown()
-
-       data_manager_token = "DATA MANAGER TOKEN"
-       testData := PullWorkerTestData{
-               "TestPullWorker_pull_list_with_one_locator",
-               RequestTester{"/pull", data_manager_token, "PUT", second_pull_list},
-               http.StatusOK,
-               "Received 1 pull requests\n",
-               "hola",
-               false,
-               false,
-       }
-
-       performTest(testData, c)
-}
-
-// When a new pull request arrives, the old one will be overwritten.
-// Simulate this behavior by inducing delay in GetContent for the delay test(s).
-// To ensure this delay test is not the last one executed and
-// hence we cannot verify this behavior, let's run the delay test twice.
-func (s *PullWorkerTestSuite) TestPullWorker_pull_list_with_one_locator_with_delay_1(c *C) {
-       defer teardown()
-
-       data_manager_token = "DATA MANAGER TOKEN"
-       testData := PullWorkerTestData{
-               "TestPullWorker_pull_list_with_one_locator_with_delay_1",
-               RequestTester{"/pull", data_manager_token, "PUT", second_pull_list},
-               http.StatusOK,
-               "Received 1 pull requests\n",
-               "hola",
-               false,
-               false,
-       }
-
-       performTest(testData, c)
-}
 
-func (s *PullWorkerTestSuite) TestPullWorker_pull_list_with_one_locator_with_delay_2(c *C) {
-       defer teardown()
-
-       data_manager_token = "DATA MANAGER TOKEN"
-       testData := PullWorkerTestData{
-               "TestPullWorker_pull_list_with_one_locator_with_delay_2",
-               RequestTester{"/pull", data_manager_token, "PUT", second_pull_list},
-               http.StatusOK,
-               "Received 1 pull requests\n",
-               "hola",
-               false,
-               false,
-       }
-
-       performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_error_on_get_one_locator(c *C) {
-       defer teardown()
-
-       data_manager_token = "DATA MANAGER TOKEN"
-       testData := PullWorkerTestData{
-               "TestPullWorker_error_on_get_one_locator",
-               RequestTester{"/pull", data_manager_token, "PUT", second_pull_list},
-               http.StatusOK,
-               "Received 1 pull requests\n",
-               "unused",
-               true,
-               false,
-       }
-
-       performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_error_on_get_two_locators(c *C) {
-       defer teardown()
-
-       data_manager_token = "DATA MANAGER TOKEN"
-       testData := PullWorkerTestData{
-               "TestPullWorker_error_on_get_two_locators",
-               RequestTester{"/pull", data_manager_token, "PUT", first_pull_list},
-               http.StatusOK,
-               "Received 2 pull requests\n",
-               "unused",
-               true,
-               false,
-       }
-
-       performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_error_on_put_one_locator(c *C) {
-       defer teardown()
-
-       data_manager_token = "DATA MANAGER TOKEN"
-       testData := PullWorkerTestData{
-               "TestPullWorker_error_on_put_one_locator",
-               RequestTester{"/pull", data_manager_token, "PUT", second_pull_list},
-               http.StatusOK,
-               "Received 1 pull requests\n",
-               "unused",
-               false,
-               true,
-       }
-
-       performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_error_on_put_two_locators(c *C) {
-       defer teardown()
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/arvadostest"
+       "github.com/sirupsen/logrus"
+       . "gopkg.in/check.v1"
+)
 
-       data_manager_token = "DATA MANAGER TOKEN"
-       testData := PullWorkerTestData{
-               "TestPullWorker_error_on_put_two_locators",
-               RequestTester{"/pull", data_manager_token, "PUT", first_pull_list},
-               http.StatusOK,
-               "Received 2 pull requests\n",
-               "unused",
-               false,
-               true,
+func (s *routerSuite) TestPullList_Execute(c *C) {
+       remotecluster := testCluster(c)
+       remotecluster.Volumes = map[string]arvados.Volume{
+               "zzzzz-nyw5e-rrrrrrrrrrrrrrr": {Replication: 1, Driver: "stub"},
        }
-
-       performTest(testData, c)
-}
-
-func performTest(testData PullWorkerTestData, c *C) {
-       testPullLists[testData.name] = testData.response_body
-
-       // We need to make sure the tests have a slight delay so that we can verify the pull list channel overwrites.
-       time.Sleep(25 * time.Millisecond)
-
-       // Override GetContent to mock keepclient functionality
-       GetContent = func(locator string, signedLocator string) (reader io.ReadCloser, contentLength int64, url string, err error) {
-               if strings.HasPrefix(testData.name, "TestPullWorker_pull_list_with_one_locator_with_delay") {
-                       time.Sleep(100 * time.Millisecond)
-               }
-
-               processedPullLists[testData.name] = testData.response_body
-               if testData.read_error {
-                       return nil, 0, "", errors.New("Error getting data")
-               } else {
-                       cb := &ClosingBuffer{bytes.NewBufferString("Hi!")}
-                       var rc io.ReadCloser
-                       rc = cb
-                       return rc, 3, "", nil
+       remoterouter, cancel := testRouter(c, remotecluster, nil)
+       defer cancel()
+       remoteserver := httptest.NewServer(remoterouter)
+       defer remoteserver.Close()
+
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
+
+       executePullList := func(pullList []PullListItem) string {
+               var logbuf bytes.Buffer
+               logger := logrus.New()
+               logger.Out = &logbuf
+               router.keepstore.logger = logger
+
+               listjson, err := json.Marshal(pullList)
+               c.Assert(err, IsNil)
+               resp := call(router, "PUT", "http://example/pull", s.cluster.SystemRootToken, listjson, nil)
+               c.Check(resp.Code, Equals, http.StatusOK)
+               for {
+                       router.puller.cond.L.Lock()
+                       todolen := len(router.puller.todo)
+                       router.puller.cond.L.Unlock()
+                       if todolen == 0 && router.puller.inprogress.Load() == 0 {
+                               break
+                       }
+                       time.Sleep(time.Millisecond)
                }
+               return logbuf.String()
        }
 
-       // Override PutContent to mock PutBlock functionality
-       PutContent = func(content []byte, locator string) (err error) {
-               if testData.put_error {
-                       return errors.New("Error putting data")
-               } else {
-                       return nil
-               }
+       newRemoteBlock := func(datastring string) string {
+               data := []byte(datastring)
+               hash := fmt.Sprintf("%x", md5.Sum(data))
+               locator := fmt.Sprintf("%s+%d", hash, len(data))
+               _, err := remoterouter.keepstore.BlockWrite(context.Background(), arvados.BlockWriteOptions{
+                       Hash: hash,
+                       Data: data,
+               })
+               c.Assert(err, IsNil)
+               return locator
        }
 
-       response := IssueRequest(&testData.req)
-       c.Assert(testData.response_code, Equals, response.Code)
-       c.Assert(testData.response_body, Equals, response.Body.String())
-}
+       mounts := append([]*mount(nil), router.keepstore.mountsR...)
+       sort.Slice(mounts, func(i, j int) bool { return mounts[i].UUID < mounts[j].UUID })
+       var vols []*stubVolume
+       for _, mount := range mounts {
+               vols = append(vols, mount.volume.(*stubVolume))
+       }
 
-type ClosingBuffer struct {
-       *bytes.Buffer
-}
+       ctx := authContext(arvadostest.ActiveTokenV2)
 
-func (cb *ClosingBuffer) Close() (err error) {
-       return
-}
+       locator := newRemoteBlock("pull available block to unspecified volume")
+       executePullList([]PullListItem{{
+               Locator: locator,
+               Servers: []string{remoteserver.URL}}})
+       _, err := router.keepstore.BlockRead(ctx, arvados.BlockReadOptions{
+               Locator: router.keepstore.signLocator(arvadostest.ActiveTokenV2, locator),
+               WriteTo: io.Discard})
+       c.Check(err, IsNil)
 
-func expectWorkerChannelEmpty(c *C, workerChannel <-chan interface{}) {
-       select {
-       case item := <-workerChannel:
-               c.Fatalf("Received value (%v) from channel that was expected to be empty", item)
-       default:
-       }
+       locator0 := newRemoteBlock("pull available block to specified volume 0")
+       locator1 := newRemoteBlock("pull available block to specified volume 1")
+       executePullList([]PullListItem{
+               {
+                       Locator:   locator0,
+                       Servers:   []string{remoteserver.URL},
+                       MountUUID: vols[0].params.UUID},
+               {
+                       Locator:   locator1,
+                       Servers:   []string{remoteserver.URL},
+                       MountUUID: vols[1].params.UUID}})
+       c.Check(vols[0].data[locator0[:32]].data, NotNil)
+       c.Check(vols[1].data[locator1[:32]].data, NotNil)
+
+       locator = fooHash + "+3"
+       logs := executePullList([]PullListItem{{
+               Locator: locator,
+               Servers: []string{remoteserver.URL}}})
+       c.Check(logs, Matches, ".*error pulling data from remote servers.*Block not found.*locator=acbd.*\n")
+
+       locator = fooHash + "+3"
+       logs = executePullList([]PullListItem{{
+               Locator: locator,
+               Servers: []string{"http://0.0.0.0:9/"}}})
+       c.Check(logs, Matches, ".*error pulling data from remote servers.*connection refused.*locator=acbd.*\n")
+
+       locator = newRemoteBlock("log error writing to local volume")
+       vols[0].blockWrite = func(context.Context, string, []byte) error { return errors.New("test error") }
+       vols[1].blockWrite = vols[0].blockWrite
+       logs = executePullList([]PullListItem{{
+               Locator: locator,
+               Servers: []string{remoteserver.URL}}})
+       c.Check(logs, Matches, ".*error writing data to zzzzz-nyw5e-.*error=\"test error\".*locator=.*\n")
+       vols[0].blockWrite = nil
+       vols[1].blockWrite = nil
+
+       locator = newRemoteBlock("log error when destination mount does not exist")
+       logs = executePullList([]PullListItem{{
+               Locator:   locator,
+               Servers:   []string{remoteserver.URL},
+               MountUUID: "bogus-mount-uuid"}})
+       c.Check(logs, Matches, ".*ignoring pull list entry for nonexistent mount bogus-mount-uuid.*locator=.*\n")
+
+       logs = executePullList([]PullListItem{})
+       c.Logf("%s", logs)
 }