"testing"
"time"
+ "git.arvados.org/arvados.git/lib/controller/dblock"
"git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
func (s *HandlerSuite) TearDownTest(c *check.C) {
s.cancel()
+
+ // Wait for dblocks to be released. Otherwise, a subsequent
+ // test might time out waiting to acquire them.
+ timeout := time.After(10 * time.Second)
+ for _, locker := range []*dblock.DBLocker{dblock.TrashSweep, dblock.ContainerLogSweep} {
+ ok := make(chan struct{})
+ go func() {
+ if locker.Lock(context.Background(), s.handler.dbConnector.GetDB) {
+ locker.Unlock()
+ }
+ close(ok)
+ }()
+ select {
+ case <-timeout:
+ c.Log("timed out waiting for dblocks")
+ c.Fail()
+ case <-ok:
+ }
+ }
}
func (s *HandlerSuite) TestConfigExport(c *check.C) {
func (s *HandlerSuite) TestDiscoveryDocCache(c *check.C) {
countRailsReqs := func() int {
+ s.railsSpy.Wait()
n := 0
for _, req := range s.railsSpy.RequestDumps {
if bytes.Contains(req, []byte("/discovery/v1/apis/arvados/v1/rest")) {
// depending on flags.
var wantError, wantBadContent bool
s.railsSpy.Director = func(req *http.Request) {
+ <-holdReqs
if wantError {
req.Method = "MAKE-COFFEE"
} else if wantBadContent {
getDDConcurrently(5, http.StatusOK, check.Commentf("error with warm cache")).Wait()
c.Check(countRailsReqs(), check.Equals, reqsBefore)
+ checkBackgroundRefresh := func(reqsExpected int) {
+ // There is no guarantee that a background refresh has
+ // progressed far enough that we can detect it
+ // directly (the first line of refresh() might not
+ // have run). So, to avoid false positives, we just
+ // need to poll until it happens.
+ for deadline := time.Now().Add(time.Second); countRailsReqs() == reqsBefore && time.Now().Before(deadline); {
+ c.Logf("countRailsReqs = %d", countRailsReqs())
+ time.Sleep(time.Second / 100)
+ }
+ // Similarly, to ensure there are no additional
+ // refreshes, we just need to wait.
+ time.Sleep(time.Second / 2)
+ c.Check(countRailsReqs(), check.Equals, reqsExpected)
+ }
+
// Error with stale cache => caller gets OK with stale data
// while the re-fetch is attempted in the background
refreshNow()
holdReqs = make(chan struct{})
getDDConcurrently(5, http.StatusOK, check.Commentf("error with stale cache")).Wait()
close(holdReqs)
- // Only one attempt to re-fetch (holdReqs ensured the first
- // update took long enough for the last incoming request to
- // arrive)
- c.Check(countRailsReqs(), check.Equals, reqsBefore+1)
+ // After piling up 5 requests (holdReqs having ensured the
+ // first update took long enough for the last incoming request
+ // to arrive) there should be only one attempt to re-fetch.
+ checkBackgroundRefresh(reqsBefore + 1)
refreshNow()
wantError, wantBadContent = false, false
holdReqs = make(chan struct{})
getDDConcurrently(5, http.StatusOK, check.Commentf("refresh cache after error condition clears")).Wait()
close(holdReqs)
- waitPendingUpdates()
- c.Check(countRailsReqs(), check.Equals, reqsBefore+1)
+ checkBackgroundRefresh(reqsBefore + 1)
// Make sure expireAfter is getting set
waitPendingUpdates()
req.Header.Set("Authorization", "Bearer "+token)
resp := httptest.NewRecorder()
s.handler.ServeHTTP(resp, req)
- c.Assert(resp.Code, check.Equals, http.StatusOK,
- check.Commentf("Wasn't able to get data from the controller at %q: %q", url, resp.Body.String()))
+ if !c.Check(resp.Code, check.Equals, http.StatusOK,
+ check.Commentf("Wasn't able to get data from the controller at %q: %q", url, resp.Body.String())) {
+ return
+ }
err = json.Unmarshal(resp.Body.Bytes(), &proxied)
c.Check(err, check.Equals, nil)
}
resp2, err := client.Get(s.cluster.Services.RailsAPI.ExternalURL.String() + url + "/?api_token=" + token)
c.Check(err, check.Equals, nil)
- c.Assert(resp2.StatusCode, check.Equals, http.StatusOK,
- check.Commentf("Wasn't able to get data from the RailsAPI at %q", url))
defer resp2.Body.Close()
+ if !c.Check(resp2.StatusCode, check.Equals, http.StatusOK,
+ check.Commentf("Wasn't able to get data from the RailsAPI at %q", url)) {
+ return
+ }
db, err := ioutil.ReadAll(resp2.Body)
c.Check(err, check.Equals, nil)
err = json.Unmarshal(db, &direct)
"api_client_authorization": {
"owner_uuid": "`+arvadostest.AdminUserUUID+`",
"created_by_ip_address": "::1",
- "last_used_by_ip_address": "::1",
- "default_owner_uuid": "`+arvadostest.AdminUserUUID+`"
+ "last_used_by_ip_address": "::1"
}
}`))
req.Header.Set("Authorization", "Bearer "+arvadostest.SystemRootToken)
c.Assert(auth.UUID, check.Not(check.Equals), "")
testCases := map[string]map[string]bool{
- "api_clients/" + arvadostest.TrustedWorkbenchAPIClientUUID: nil,
"api_client_authorizations/" + auth.UUID: {"href": true, "modified_by_client_uuid": true, "modified_by_user_uuid": true},
"authorized_keys/" + arvadostest.AdminAuthorizedKeysUUID: {"href": true},
"collections/" + arvadostest.CollectionWithUniqueWordsUUID: {"href": true},
"groups/" + arvadostest.AProjectUUID: nil,
"keep_services/" + ksUUID: nil,
"links/" + arvadostest.ActiveUserCanReadAllUsersLinkUUID: nil,
- "logs/" + arvadostest.CrunchstatForRunningJobLogUUID: nil,
- "nodes/" + arvadostest.IdleNodeUUID: nil,
- "repositories/" + arvadostest.ArvadosRepoUUID: nil,
+ "logs/" + arvadostest.CrunchstatForRunningContainerLogUUID: nil,
"users/" + arvadostest.ActiveUserUUID: {"href": true},
"virtual_machines/" + arvadostest.TestVMUUID: nil,
"workflows/" + arvadostest.WorkflowWithDefinitionYAMLUUID: nil,
func (s *HandlerSuite) TestContainerLogSweep(c *check.C) {
s.cluster.SystemRootToken = arvadostest.SystemRootToken
- s.cluster.Containers.Logging.SweepInterval = arvados.Duration(time.Second / 10)
+ s.cluster.Collections.TrashSweepInterval = arvados.Duration(2 * time.Second)
s.handler.CheckHealth()
ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
logentry, err := s.handler.federation.LogCreate(ctx, arvados.CreateOptions{Attrs: map[string]interface{}{
"object_uuid": arvadostest.CompletedContainerUUID,
"event_type": "stderr",
"properties": map[string]interface{}{
- "text": "test trash sweep\n",
+ "text": "test container log sweep\n",
},
}})
c.Assert(err, check.IsNil)
c.Check(rows, check.Equals, 1, check.Commentf("expect 1 row for user uuid %s", userUUID))
}
}
-
-func (s *HandlerSuite) TestLogLimiting(c *check.C) {
- s.handler.Cluster.API.MaxConcurrentRequests = 2
- s.handler.Cluster.API.LogCreateRequestFraction = 0.5
-
- logreq := httptest.NewRequest("POST", "/arvados/v1/logs", strings.NewReader(`{
- "log": {
- "event_type": "test"
- }
- }`))
- logreq.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
-
- // Log create succeeds
- for i := 0; i < 2; i++ {
- resp := httptest.NewRecorder()
- s.handler.ServeHTTP(resp, logreq)
- c.Check(resp.Code, check.Equals, http.StatusOK)
- var lg arvados.Log
- err := json.Unmarshal(resp.Body.Bytes(), &lg)
- c.Check(err, check.IsNil)
- c.Check(lg.UUID, check.Matches, "zzzzz-57u5n-.*")
- }
-
- // Pretend there's a log create in flight
- s.handler.limitLogCreate <- struct{}{}
-
- // Log create should be rejected now
- resp := httptest.NewRecorder()
- s.handler.ServeHTTP(resp, logreq)
- c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
-
- // Other requests still succeed
- req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
- req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp = httptest.NewRecorder()
- s.handler.ServeHTTP(resp, req)
- c.Check(resp.Code, check.Equals, http.StatusOK)
- var u arvados.User
- err := json.Unmarshal(resp.Body.Bytes(), &u)
- c.Check(err, check.IsNil)
- c.Check(u.UUID, check.Equals, arvadostest.ActiveUserUUID)
-
- // log create still fails
- resp = httptest.NewRecorder()
- s.handler.ServeHTTP(resp, logreq)
- c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
-
- // Pretend in-flight log is done
- <-s.handler.limitLogCreate
-
- // log create succeeds again
- resp = httptest.NewRecorder()
- s.handler.ServeHTTP(resp, logreq)
- c.Check(resp.Code, check.Equals, http.StatusOK)
-
-}