"testing"
"time"
+ "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/arvadostest"
"git.curoverse.com/arvados.git/sdk/go/keepclient"
func (s *ServerRequiredSuite) SetUpTest(c *C) {
arvadostest.ResetEnv()
+
+ // reset all variables between tests
+ srcConfig = arvadosclient.APIConfig{}
+ dstConfig = arvadosclient.APIConfig{}
+ blobSigningKey = ""
srcKeepServicesJSON = ""
dstKeepServicesJSON = ""
+ replications = 0
+ prefix = ""
+ arvSrc = arvadosclient.ArvadosClient{}
+ arvDst = arvadosclient.ArvadosClient{}
+ kcSrc = &keepclient.KeepClient{}
+ kcDst = &keepclient.KeepClient{}
}
func (s *ServerRequiredSuite) TearDownSuite(c *C) {
arvadostest.StopAPI()
}
+var testKeepServicesJSON = "{ \"kind\":\"arvados#keepServiceList\", \"etag\":\"\", \"self_link\":\"\", \"offset\":null, \"limit\":null, \"items\":[ { \"href\":\"/keep_services/zzzzz-bi6l4-123456789012340\", \"kind\":\"arvados#keepService\", \"etag\":\"641234567890enhj7hzx432e5\", \"uuid\":\"zzzzz-bi6l4-123456789012340\", \"owner_uuid\":\"zzzzz-tpzed-123456789012345\", \"service_host\":\"keep0.zzzzz.arvadosapi.com\", \"service_port\":25107, \"service_ssl_flag\":false, \"service_type\":\"disk\", \"read_only\":false }, { \"href\":\"/keep_services/zzzzz-bi6l4-123456789012341\", \"kind\":\"arvados#keepService\", \"etag\":\"641234567890enhj7hzx432e5\", \"uuid\":\"zzzzz-bi6l4-123456789012341\", \"owner_uuid\":\"zzzzz-tpzed-123456789012345\", \"service_host\":\"keep0.zzzzz.arvadosapi.com\", \"service_port\":25108, \"service_ssl_flag\":false, \"service_type\":\"disk\", \"read_only\":false } ], \"items_available\":2 }"
+
// Testing keep-rsync needs two sets of keep services: src and dst.
// The test setup hence tweaks keep-rsync initialization to achieve this.
// First invoke initializeKeepRsync and then invoke StartKeepWithParams
// to create the keep servers to be used as destination.
-func setupRsync(c *C, enforcePermissions bool, overwriteReplications bool) {
+func setupRsync(c *C, enforcePermissions bool, setupDstServers bool) {
// srcConfig
srcConfig.APIHost = os.Getenv("ARVADOS_API_HOST")
srcConfig.APIToken = os.Getenv("ARVADOS_API_TOKEN")
// initialize keep-rsync
err := initializeKeepRsync()
- c.Assert(err, Equals, nil)
-
- // Create two more keep servers to be used as destination
- arvadostest.StartKeepWithParams(true, enforcePermissions)
+ c.Check(err, IsNil)
- // set replications to 1 since those many keep servers were created for dst.
- if overwriteReplications {
+ // Create an additional keep server to be used as destination and reload kcDst
+ // Set replications to 1 since those many keep servers were created for dst.
+ if setupDstServers {
+ arvadostest.StartKeepWithParams(true, enforcePermissions)
replications = 1
- }
- // load kcDst
- kcDst, err = keepclient.MakeKeepClient(&arvDst)
- c.Assert(err, Equals, nil)
- kcDst.Want_replicas = 1
+ kcDst, err = keepclient.MakeKeepClient(&arvDst)
+ c.Check(err, IsNil)
+ kcDst.Want_replicas = 1
+ }
}
// Test readConfigFromFile method
func (s *ServerRequiredSuite) TestReadConfigFromFile(c *C) {
// Setup a test config file
file, err := ioutil.TempFile(os.TempDir(), "config")
- c.Assert(err, Equals, nil)
+ c.Check(err, IsNil)
defer os.Remove(file.Name())
fileContent := "ARVADOS_API_HOST=testhost\n"
// Invoke readConfigFromFile method with this test filename
config, err := readConfigFromFile(file.Name())
- c.Assert(err, Equals, nil)
+ c.Check(err, IsNil)
c.Assert(config.APIHost, Equals, "testhost")
c.Assert(config.APIToken, Equals, "testtoken")
c.Assert(config.APIHostInsecure, Equals, true)
c.Check(err, Equals, nil)
reader, blocklen, _, err := kcSrc.Get(locatorInSrc)
- c.Assert(err, Equals, nil)
+ c.Check(err, IsNil)
c.Check(blocklen, Equals, int64(10))
all, err := ioutil.ReadAll(reader)
c.Check(all, DeepEquals, srcData)
c.Check(err, Equals, nil)
reader, blocklen, _, err = kcDst.Get(locatorInDst)
- c.Assert(err, Equals, nil)
+ c.Check(err, IsNil)
c.Check(blocklen, Equals, int64(10))
all, err = ioutil.ReadAll(reader)
c.Check(all, DeepEquals, dstData)
// Test keep-rsync initialization, with srcKeepServicesJSON
func (s *ServerRequiredSuite) TestRsyncInitializeWithKeepServicesJSON(c *C) {
- srcKeepServicesJSON = "{ \"kind\":\"arvados#keepServiceList\", \"etag\":\"\", \"self_link\":\"\", \"offset\":null, \"limit\":null, \"items\":[ { \"href\":\"/keep_services/zzzzz-bi6l4-123456789012340\", \"kind\":\"arvados#keepService\", \"etag\":\"641234567890enhj7hzx432e5\", \"uuid\":\"zzzzz-bi6l4-123456789012340\", \"owner_uuid\":\"zzzzz-tpzed-123456789012345\", \"service_host\":\"keep0.zzzzz.arvadosapi.com\", \"service_port\":25107, \"service_ssl_flag\":false, \"service_type\":\"disk\", \"read_only\":false }, { \"href\":\"/keep_services/zzzzz-bi6l4-123456789012341\", \"kind\":\"arvados#keepService\", \"etag\":\"641234567890enhj7hzx432e5\", \"uuid\":\"zzzzz-bi6l4-123456789012341\", \"owner_uuid\":\"zzzzz-tpzed-123456789012345\", \"service_host\":\"keep0.zzzzz.arvadosapi.com\", \"service_port\":25108, \"service_ssl_flag\":false, \"service_type\":\"disk\", \"read_only\":false } ], \"items_available\":2 }"
+ srcKeepServicesJSON = testKeepServicesJSON
setupRsync(c, false, true)
signedLocator := keepclient.SignLocator(locatorInSrc, arvSrc.ApiToken, tomorrow, []byte(blobSigningKey))
reader, blocklen, _, err := kcSrc.Get(signedLocator)
- c.Assert(err, Equals, nil)
+ c.Check(err, IsNil)
c.Check(blocklen, Equals, int64(10))
all, err := ioutil.ReadAll(reader)
c.Check(all, DeepEquals, srcData)
signedLocator = keepclient.SignLocator(locatorInDst, arvDst.ApiToken, tomorrow, []byte(blobSigningKey))
reader, blocklen, _, err = kcDst.Get(signedLocator)
- c.Assert(err, Equals, nil)
+ c.Check(err, IsNil)
c.Check(blocklen, Equals, int64(10))
all, err = ioutil.ReadAll(reader)
c.Check(all, DeepEquals, dstData)
prefix = indexPrefix
+ // setupTestData
+ setupTestData(c, enforcePermissions, prefix)
+
+ err := performKeepRsync()
+ c.Check(err, IsNil)
+
+ // Now GetIndex from dst and verify that all 5 from src and the 2 extra blocks are found
+ dstIndex, err := getUniqueLocators(kcDst, "")
+ c.Check(err, IsNil)
+
+ if prefix == "" {
+ for _, locator := range srcLocators {
+ _, ok := dstIndex[locator]
+ c.Assert(ok, Equals, true)
+ }
+ } else {
+ for _, locator := range srcLocatorsMatchingPrefix {
+ _, ok := dstIndex[locator]
+ c.Assert(ok, Equals, true)
+ }
+ }
+
+ for _, locator := range extraDstLocators {
+ _, ok := dstIndex[locator]
+ c.Assert(ok, Equals, true)
+ }
+
+ if prefix == "" {
+ // all blocks from src and the two extra blocks
+ c.Assert(len(dstIndex), Equals, len(srcLocators)+len(extraDstLocators))
+ } else {
+ // one matching prefix, 2 that were initially copied into dst along with src, and the extra blocks
+ c.Assert(len(dstIndex), Equals, len(srcLocatorsMatchingPrefix)+len(extraDstLocators)+2)
+ }
+}
+
+// Setup test data in src and dst.
+var srcLocators []string
+var srcLocatorsMatchingPrefix []string
+var dstLocators []string
+var extraDstLocators []string
+
+func setupTestData(c *C, enforcePermissions bool, indexPrefix string) {
+ srcLocators = []string{}
+ srcLocatorsMatchingPrefix = []string{}
+ dstLocators = []string{}
+ extraDstLocators = []string{}
+
tomorrow := time.Now().AddDate(0, 0, 1)
// Put a few blocks in src using kcSrc
- var srcLocators []string
- var srcLocatorsMatchingPrefix []string
for i := 0; i < 5; i++ {
data := []byte(fmt.Sprintf("test-data-%d", i))
hash := fmt.Sprintf("%x", md5.Sum(data))
hash2, rep, err := kcSrc.PutB(data)
c.Check(hash2, Matches, fmt.Sprintf(`^%s\+11(\+.+)?$`, hash))
c.Check(rep, Equals, 2)
- c.Check(err, Equals, nil)
+ c.Check(err, IsNil)
getLocator := hash
if enforcePermissions {
}
reader, blocklen, _, err := kcSrc.Get(getLocator)
- c.Assert(err, Equals, nil)
+ c.Check(err, IsNil)
c.Check(blocklen, Equals, int64(11))
all, err := ioutil.ReadAll(reader)
c.Check(all, DeepEquals, data)
}
// Put first two of those src blocks in dst using kcDst
- var dstLocators []string
for i := 0; i < 2; i++ {
data := []byte(fmt.Sprintf("test-data-%d", i))
hash := fmt.Sprintf("%x", md5.Sum(data))
hash2, rep, err := kcDst.PutB(data)
c.Check(hash2, Matches, fmt.Sprintf(`^%s\+11(\+.+)?$`, hash))
c.Check(rep, Equals, 1)
- c.Check(err, Equals, nil)
+ c.Check(err, IsNil)
getLocator := hash
if enforcePermissions {
}
reader, blocklen, _, err := kcDst.Get(getLocator)
- c.Assert(err, Equals, nil)
+ c.Check(err, IsNil)
c.Check(blocklen, Equals, int64(11))
all, err := ioutil.ReadAll(reader)
c.Check(all, DeepEquals, data)
}
// Put two more blocks in dst; they are not in src at all
- var extraDstLocators []string
for i := 0; i < 2; i++ {
data := []byte(fmt.Sprintf("other-data-%d", i))
hash := fmt.Sprintf("%x", md5.Sum(data))
hash2, rep, err := kcDst.PutB(data)
c.Check(hash2, Matches, fmt.Sprintf(`^%s\+12(\+.+)?$`, hash))
c.Check(rep, Equals, 1)
- c.Check(err, Equals, nil)
+ c.Check(err, IsNil)
getLocator := hash
if enforcePermissions {
}
reader, blocklen, _, err := kcDst.Get(getLocator)
- c.Assert(err, Equals, nil)
+ c.Check(err, IsNil)
c.Check(blocklen, Equals, int64(12))
all, err := ioutil.ReadAll(reader)
c.Check(all, DeepEquals, data)
extraDstLocators = append(extraDstLocators, fmt.Sprintf("%s+%d", hash, blocklen))
}
+}
+
+// Setup rsync using srcKeepServicesJSON with fake keepservers.
+// Expect error during performKeepRsync due to unreachable src keepservers.
+func (s *ServerRequiredSuite) TestErrorDuringRsync_FakeSrcKeepservers(c *C) {
+ srcKeepServicesJSON = testKeepServicesJSON
+
+ setupRsync(c, false, false)
err := performKeepRsync()
- c.Check(err, Equals, nil)
+ c.Check(strings.HasSuffix(err.Error(), "no such host"), Equals, true)
+}
- // Now GetIndex from dst and verify that all 5 from src and the 2 extra blocks are found
- dstIndex, err := getUniqueLocators(kcDst, "")
- c.Check(err, Equals, nil)
+// Setup rsync using dstKeepServicesJSON with fake keepservers.
+// Expect error during performKeepRsync due to unreachable dst keepservers.
+func (s *ServerRequiredSuite) TestErrorDuringRsync_FakeDstKeepservers(c *C) {
+ dstKeepServicesJSON = testKeepServicesJSON
- if prefix == "" {
- for _, locator := range srcLocators {
- _, ok := dstIndex[locator]
- c.Assert(ok, Equals, true)
- }
- } else {
- for _, locator := range srcLocatorsMatchingPrefix {
- _, ok := dstIndex[locator]
- c.Assert(ok, Equals, true)
- }
- }
+ setupRsync(c, false, false)
- for _, locator := range extraDstLocators {
- _, ok := dstIndex[locator]
- c.Assert(ok, Equals, true)
- }
+ err := performKeepRsync()
+ c.Check(strings.HasSuffix(err.Error(), "no such host"), Equals, true)
+}
- if prefix == "" {
- // all blocks from src and the two extra blocks
- c.Assert(len(dstIndex), Equals, len(srcLocators)+len(extraDstLocators))
- } else {
- // one matching prefix, 2 that were initially copied into dst along with src, and the extra blocks
- c.Assert(len(dstIndex), Equals, len(srcLocatorsMatchingPrefix)+len(extraDstLocators)+2)
- }
+// Test rsync with signature error during Get from src.
+func (s *ServerRequiredSuite) TestErrorDuringRsync_ErrorGettingBlockFromSrc(c *C) {
+ setupRsync(c, true, true)
+
+ // put some blocks in src and dst
+ setupTestData(c, true, "")
+
+ // Change blob signing key to a fake key, so that Get from src fails
+ blobSigningKey = "123456789012345678901234yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc"
+
+ err := performKeepRsync()
+ c.Check(err.Error(), Equals, "Block not found")
+}
+
+// Test rsync with error during Put to src.
+func (s *ServerRequiredSuite) TestErrorDuringRsync_ErrorPuttingBlockInDst(c *C) {
+ setupRsync(c, false, true)
+
+ // put some blocks in src and dst
+ setupTestData(c, true, "")
+
+ // Increase Want_replicas on dst to result in insufficient replicas error during Put
+ kcDst.Want_replicas = 2
+
+ err := performKeepRsync()
+ c.Check(err.Error(), Equals, "Could not write sufficient replicas")
}