"testing"
"time"
+ "git.arvados.org/arvados.git/lib/controller/dblock"
"git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
func (s *HandlerSuite) TearDownTest(c *check.C) {
s.cancel()
+
+ // Wait for dblocks to be released. Otherwise, a subsequent
+ // test might time out waiting to acquire them.
+ timeout := time.After(10 * time.Second)
+ for _, locker := range []*dblock.DBLocker{dblock.TrashSweep, dblock.ContainerLogSweep} {
+ ok := make(chan struct{})
+ go func() {
+ if locker.Lock(context.Background(), s.handler.dbConnector.GetDB) {
+ locker.Unlock()
+ }
+ close(ok)
+ }()
+ select {
+ case <-timeout:
+ c.Log("timed out waiting for dblocks")
+ c.Fail()
+ case <-ok:
+ }
+ }
}
func (s *HandlerSuite) TestConfigExport(c *check.C) {
func (s *HandlerSuite) TestDiscoveryDocCache(c *check.C) {
countRailsReqs := func() int {
+ s.railsSpy.Wait()
n := 0
for _, req := range s.railsSpy.RequestDumps {
if bytes.Contains(req, []byte("/discovery/v1/apis/arvados/v1/rest")) {
defer ent.mtx.Unlock()
}
}
- expireCache := func() {
+ refreshNow := func() {
waitPendingUpdates()
for _, ent := range s.handler.cache {
ent.refreshAfter = time.Now()
}
}
+ expireNow := func() {
+ waitPendingUpdates()
+ for _, ent := range s.handler.cache {
+ ent.expireAfter = time.Now()
+ }
+ }
// Easy path: first req fetches, subsequent reqs use cache.
c.Check(countRailsReqs(), check.Equals, 0)
// Race after expiry: concurrent reqs return the cached data
// but initiate a new fetch in the background.
- expireCache()
+ refreshNow()
holdReqs = make(chan struct{})
wg = getDDConcurrently(5, http.StatusOK, check.Commentf("race after expiry"))
reqsBefore = countRailsReqs()
getDDConcurrently(5, http.StatusOK, check.Commentf("error with warm cache")).Wait()
c.Check(countRailsReqs(), check.Equals, reqsBefore)
- // Error with expired cache => caller gets OK with stale data
+ // Error with stale cache => caller gets OK with stale data
// while the re-fetch is attempted in the background
- expireCache()
+ refreshNow()
wantError, wantBadContent = true, false
reqsBefore = countRailsReqs()
holdReqs = make(chan struct{})
- getDDConcurrently(5, http.StatusOK, check.Commentf("error with expired cache")).Wait()
+ getDDConcurrently(5, http.StatusOK, check.Commentf("error with stale cache")).Wait()
close(holdReqs)
// Only one attempt to re-fetch (holdReqs ensured the first
// update took long enough for the last incoming request to
// arrive)
c.Check(countRailsReqs(), check.Equals, reqsBefore+1)
- expireCache()
+ refreshNow()
wantError, wantBadContent = false, false
reqsBefore = countRailsReqs()
holdReqs = make(chan struct{})
close(holdReqs)
waitPendingUpdates()
c.Check(countRailsReqs(), check.Equals, reqsBefore+1)
+
+ // Make sure expireAfter is getting set
+ waitPendingUpdates()
+ exp := s.handler.cache["/discovery/v1/apis/arvados/v1/rest"].expireAfter.Sub(time.Now())
+ c.Check(exp > cacheTTL, check.Equals, true)
+ c.Check(exp < cacheExpire, check.Equals, true)
+
+ // After the cache *expires* it behaves as if uninitialized:
+ // each incoming request does a new upstream request until one
+ // succeeds.
+ //
+ // First check failure after expiry:
+ expireNow()
+ wantError, wantBadContent = true, false
+ reqsBefore = countRailsReqs()
+ holdReqs = make(chan struct{})
+ wg = getDDConcurrently(5, http.StatusBadGateway, check.Commentf("error after expiry"))
+ close(holdReqs)
+ wg.Wait()
+ c.Check(countRailsReqs(), check.Equals, reqsBefore+5)
+
+ // Success after expiry:
+ wantError, wantBadContent = false, false
+ reqsBefore = countRailsReqs()
+ holdReqs = make(chan struct{})
+ wg = getDDConcurrently(5, http.StatusOK, check.Commentf("success after expiry"))
+ close(holdReqs)
+ wg.Wait()
+ c.Check(countRailsReqs(), check.Equals, reqsBefore+1)
}
func (s *HandlerSuite) TestVocabularyExport(c *check.C) {
req.Header.Set("Authorization", "Bearer "+token)
resp := httptest.NewRecorder()
s.handler.ServeHTTP(resp, req)
- c.Assert(resp.Code, check.Equals, http.StatusOK,
- check.Commentf("Wasn't able to get data from the controller at %q: %q", url, resp.Body.String()))
+ if !c.Check(resp.Code, check.Equals, http.StatusOK,
+ check.Commentf("Wasn't able to get data from the controller at %q: %q", url, resp.Body.String())) {
+ return
+ }
err = json.Unmarshal(resp.Body.Bytes(), &proxied)
c.Check(err, check.Equals, nil)
}
resp2, err := client.Get(s.cluster.Services.RailsAPI.ExternalURL.String() + url + "/?api_token=" + token)
c.Check(err, check.Equals, nil)
- c.Assert(resp2.StatusCode, check.Equals, http.StatusOK,
- check.Commentf("Wasn't able to get data from the RailsAPI at %q", url))
defer resp2.Body.Close()
+ if !c.Check(resp2.StatusCode, check.Equals, http.StatusOK,
+ check.Commentf("Wasn't able to get data from the RailsAPI at %q", url)) {
+ return
+ }
db, err := ioutil.ReadAll(resp2.Body)
c.Check(err, check.Equals, nil)
err = json.Unmarshal(db, &direct)
testCases := map[string]map[string]bool{
"api_clients/" + arvadostest.TrustedWorkbenchAPIClientUUID: nil,
"api_client_authorizations/" + auth.UUID: {"href": true, "modified_by_client_uuid": true, "modified_by_user_uuid": true},
- "authorized_keys/" + arvadostest.AdminAuthorizedKeysUUID: nil,
+ "authorized_keys/" + arvadostest.AdminAuthorizedKeysUUID: {"href": true},
"collections/" + arvadostest.CollectionWithUniqueWordsUUID: {"href": true},
"containers/" + arvadostest.RunningContainerUUID: nil,
"container_requests/" + arvadostest.QueuedContainerRequestUUID: nil,
"groups/" + arvadostest.AProjectUUID: nil,
"keep_services/" + ksUUID: nil,
"links/" + arvadostest.ActiveUserCanReadAllUsersLinkUUID: nil,
- "logs/" + arvadostest.CrunchstatForRunningJobLogUUID: nil,
- "nodes/" + arvadostest.IdleNodeUUID: nil,
- "repositories/" + arvadostest.ArvadosRepoUUID: nil,
+ "logs/" + arvadostest.CrunchstatForRunningContainerLogUUID: nil,
"users/" + arvadostest.ActiveUserUUID: {"href": true},
"virtual_machines/" + arvadostest.TestVMUUID: nil,
"workflows/" + arvadostest.WorkflowWithDefinitionYAMLUUID: nil,
func (s *HandlerSuite) TestContainerLogSweep(c *check.C) {
s.cluster.SystemRootToken = arvadostest.SystemRootToken
- s.cluster.Containers.Logging.SweepInterval = arvados.Duration(time.Second / 10)
+ s.cluster.Collections.TrashSweepInterval = arvados.Duration(2 * time.Second)
s.handler.CheckHealth()
ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
logentry, err := s.handler.federation.LogCreate(ctx, arvados.CreateOptions{Attrs: map[string]interface{}{
"object_uuid": arvadostest.CompletedContainerUUID,
"event_type": "stderr",
"properties": map[string]interface{}{
- "text": "test trash sweep\n",
+ "text": "test container log sweep\n",
},
}})
c.Assert(err, check.IsNil)