"net/http"
"os"
"os/exec"
- "path/filepath"
"strconv"
"strings"
+ "sync"
+ "time"
"git.arvados.org/arvados.git/lib/boot"
- "git.arvados.org/arvados.git/lib/config"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
check "gopkg.in/check.v1"
)
var _ = check.Suite(&IntegrationSuite{})
type IntegrationSuite struct {
- testClusters map[string]*boot.TestCluster
+ super *boot.Supervisor
oidcprovider *arvadostest.OIDCProvider
}
func (s *IntegrationSuite) SetUpSuite(c *check.C) {
- cwd, _ := os.Getwd()
-
s.oidcprovider = arvadostest.NewOIDCProvider(c)
s.oidcprovider.AuthEmail = "user@example.com"
s.oidcprovider.AuthEmailVerified = true
s.oidcprovider.ValidClientID = "clientid"
s.oidcprovider.ValidClientSecret = "clientsecret"
- s.testClusters = map[string]*boot.TestCluster{
- "z1111": nil,
- "z2222": nil,
- "z3333": nil,
- }
hostport := map[string]string{}
- for id := range s.testClusters {
+ for _, id := range []string{"z1111", "z2222", "z3333"} {
hostport[id] = func() string {
// TODO: Instead of expecting random ports on
// 127.0.0.11, 22, 33 to be race-safe, try
return "127.0.0." + id[3:] + ":" + port
}()
}
- for id := range s.testClusters {
- yaml := `Clusters:
+ yaml := "Clusters:\n"
+ for id := range hostport {
+ yaml += `
` + id + `:
Services:
Controller:
LoginCluster: z1111
`
}
-
- loader := config.NewLoader(bytes.NewBufferString(yaml), ctxlog.TestLogger(c))
- loader.Path = "-"
- loader.SkipLegacy = true
- loader.SkipAPICalls = true
- cfg, err := loader.Load()
- c.Assert(err, check.IsNil)
- tc := boot.NewTestCluster(
- filepath.Join(cwd, "..", ".."),
- id, cfg, "127.0.0."+id[3:], c.Log)
- tc.Super.NoWorkbench1 = true
- tc.Start()
- s.testClusters[id] = tc
}
- for _, tc := range s.testClusters {
- ok := tc.WaitReady()
- c.Assert(ok, check.Equals, true)
+ s.super = &boot.Supervisor{
+ ClusterType: "test",
+ ConfigYAML: yaml,
+ Stderr: ctxlog.LogWriter(c.Log),
+ NoWorkbench1: true,
+ NoWorkbench2: true,
+ OwnTemporaryDatabase: true,
}
+
+ // Give up if startup takes longer than 3m
+ timeout := time.AfterFunc(3*time.Minute, s.super.Stop)
+ defer timeout.Stop()
+ s.super.Start(context.Background())
+ ok := s.super.WaitReady()
+ c.Assert(ok, check.Equals, true)
}
func (s *IntegrationSuite) TearDownSuite(c *check.C) {
- for _, c := range s.testClusters {
- c.Super.Stop()
+ if s.super != nil {
+ s.super.Stop()
+ s.super.Wait()
}
}
+func (s *IntegrationSuite) TestDefaultStorageClassesOnCollections(c *check.C) {
+ conn := s.super.Conn("z1111")
+ rootctx, _, _ := s.super.RootClients("z1111")
+ userctx, _, kc, _ := s.super.UserClients("z1111", rootctx, c, conn, s.oidcprovider.AuthEmail, true)
+ c.Assert(len(kc.DefaultStorageClasses) > 0, check.Equals, true)
+ coll, err := conn.CollectionCreate(userctx, arvados.CreateOptions{})
+ c.Assert(err, check.IsNil)
+ c.Assert(coll.StorageClassesDesired, check.DeepEquals, kc.DefaultStorageClasses)
+}
+
func (s *IntegrationSuite) TestGetCollectionByPDH(c *check.C) {
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- conn3 := s.testClusters["z3333"].Conn()
- userctx1, ac1, kc1, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ conn3 := s.super.Conn("z3333")
+ userctx1, ac1, kc1, _ := s.super.UserClients("z1111", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
// Create the collection to find its PDH (but don't save it
// anywhere yet)
c.Check(coll.PortableDataHash, check.Equals, pdh)
}
+// Tests bug #18004
+func (s *IntegrationSuite) TestRemoteUserAndTokenCacheRace(c *check.C) {
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ rootctx2, _, _ := s.super.RootClients("z2222")
+ conn2 := s.super.Conn("z2222")
+ userctx1, _, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, "user2@example.com", true)
+
+ var wg1, wg2 sync.WaitGroup
+ creqs := 100
+
+ // Make concurrent requests to z2222 with a local token to make sure more
+ // than one worker is listening.
+ wg1.Add(1)
+ for i := 0; i < creqs; i++ {
+ wg2.Add(1)
+ go func() {
+ defer wg2.Done()
+ wg1.Wait()
+ _, err := conn2.UserGetCurrent(rootctx2, arvados.GetOptions{})
+ c.Check(err, check.IsNil, check.Commentf("warm up phase failed"))
+ }()
+ }
+ wg1.Done()
+ wg2.Wait()
+
+ // Real test pass -- use a new remote token than the one used in the warm-up
+ // phase.
+ wg1.Add(1)
+ for i := 0; i < creqs; i++ {
+ wg2.Add(1)
+ go func() {
+ defer wg2.Done()
+ wg1.Wait()
+ // Retrieve the remote collection from cluster z2222.
+ _, err := conn2.UserGetCurrent(userctx1, arvados.GetOptions{})
+ c.Check(err, check.IsNil, check.Commentf("testing phase failed"))
+ }()
+ }
+ wg1.Done()
+ wg2.Wait()
+}
+
func (s *IntegrationSuite) TestS3WithFederatedToken(c *check.C) {
if _, err := exec.LookPath("s3cmd"); err != nil {
c.Skip("s3cmd not in PATH")
testText := "IntegrationSuite.TestS3WithFederatedToken"
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- userctx1, ac1, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
- conn3 := s.testClusters["z3333"].Conn()
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ userctx1, ac1, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+ conn3 := s.super.Conn("z3333")
createColl := func(clusterID string) arvados.Collection {
- _, ac, kc := s.testClusters[clusterID].ClientsWithToken(ac1.AuthToken)
+ _, ac, kc := s.super.ClientsWithToken(clusterID, ac1.AuthToken)
var coll arvados.Collection
fs, err := coll.FileSystem(ac, kc)
c.Assert(err, check.IsNil)
c.Assert(err, check.IsNil)
mtxt, err := fs.MarshalManifest(".")
c.Assert(err, check.IsNil)
- coll, err = s.testClusters[clusterID].Conn().CollectionCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
+ coll, err = s.super.Conn(clusterID).CollectionCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
"manifest_text": mtxt,
}})
c.Assert(err, check.IsNil)
}
func (s *IntegrationSuite) TestGetCollectionAsAnonymous(c *check.C) {
- conn1 := s.testClusters["z1111"].Conn()
- conn3 := s.testClusters["z3333"].Conn()
- rootctx1, rootac1, rootkc1 := s.testClusters["z1111"].RootClients()
- anonctx3, anonac3, _ := s.testClusters["z3333"].AnonymousClients()
+ conn1 := s.super.Conn("z1111")
+ conn3 := s.super.Conn("z3333")
+ rootctx1, rootac1, rootkc1 := s.super.RootClients("z1111")
+ anonctx3, anonac3, _ := s.super.AnonymousClients("z3333")
// Make sure anonymous token was set
c.Assert(anonac3.AuthToken, check.Not(check.Equals), "")
c.Check(err, check.IsNil)
// Make a v2 token of the z3 anonymous user, and use it on z1
- _, anonac1, _ := s.testClusters["z1111"].ClientsWithToken(outAuth.TokenV2())
+ _, anonac1, _ := s.super.ClientsWithToken("z1111", outAuth.TokenV2())
outUser2, err := anonac1.CurrentUser()
c.Check(err, check.IsNil)
// z3 anonymous user will be mapped to the z1 anonymous user
c.Check(coll.PortableDataHash, check.Equals, pdh)
}
+// z3333 should forward the locally-issued anonymous user token to its login
+// cluster z1111. That is no problem because the login cluster controller will
+// map any anonymous user token to its local anonymous user.
+//
+// This needs to work because wb1 has a tendency to slap the local anonymous
+// user token on every request as a reader_token, which gets folded into the
+// request token list controller.
+//
+// Use a z1111 user token and the anonymous token from z3333 passed in as a
+// reader_token to do a request on z3333, asking for the z1111 anonymous user
+// object. The request will be forwarded to the z1111 cluster. The presence of
+// the z3333 anonymous user token should not prohibit the request from being
+// forwarded.
+func (s *IntegrationSuite) TestForwardAnonymousTokenToLoginCluster(c *check.C) {
+ conn1 := s.super.Conn("z1111")
+
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ _, anonac3, _ := s.super.AnonymousClients("z3333")
+
+ // Make a user connection to z3333 (using a z1111 user, because that's the login cluster)
+ _, userac1, _, _ := s.super.UserClients("z3333", rootctx1, c, conn1, "user@example.com", true)
+
+ // Get the anonymous user token for z3333
+ var anon3Auth arvados.APIClientAuthorization
+ err := anonac3.RequestAndDecode(&anon3Auth, "GET", "/arvados/v1/api_client_authorizations/current", nil, nil)
+ c.Check(err, check.IsNil)
+
+ var userList arvados.UserList
+ where := make(map[string]string)
+ where["uuid"] = "z1111-tpzed-anonymouspublic"
+ err = userac1.RequestAndDecode(&userList, "GET", "/arvados/v1/users", nil,
+ map[string]interface{}{
+ "reader_tokens": []string{anon3Auth.TokenV2()},
+ "where": where,
+ },
+ )
+ // The local z3333 anonymous token must be allowed to be forwarded to the login cluster
+ c.Check(err, check.IsNil)
+
+ userac1.AuthToken = "v2/z1111-gj3su-asdfasdfasdfasd/this-token-does-not-validate-so-anonymous-token-will-be-used-instead"
+ err = userac1.RequestAndDecode(&userList, "GET", "/arvados/v1/users", nil,
+ map[string]interface{}{
+ "reader_tokens": []string{anon3Auth.TokenV2()},
+ "where": where,
+ },
+ )
+ c.Check(err, check.IsNil)
+}
+
// Get a token from the login cluster (z1111), use it to submit a
// container request on z2222.
func (s *IntegrationSuite) TestCreateContainerRequestWithFedToken(c *check.C) {
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- _, ac1, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ _, ac1, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
// Use ac2 to get the discovery doc with a blank token, so the
// SDK doesn't magically pass the z1111 token to z2222 before
// we're ready to start our test.
- _, ac2, _ := s.testClusters["z2222"].ClientsWithToken("")
+ _, ac2, _ := s.super.ClientsWithToken("z2222", "")
var dd map[string]interface{}
err := ac2.RequestAndDecode(&dd, "GET", "discovery/v1/apis/arvados/v1/rest", nil, nil)
c.Assert(err, check.IsNil)
}
func (s *IntegrationSuite) TestCreateContainerRequestWithBadToken(c *check.C) {
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- _, ac1, _, au := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, "user@example.com", true)
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ _, ac1, _, au := s.super.UserClients("z1111", rootctx1, c, conn1, "user@example.com", true)
tests := []struct {
name string
}
}
+func (s *IntegrationSuite) TestRequestIDHeader(c *check.C) {
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ userctx1, ac1, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, "user@example.com", true)
+
+ coll, err := conn1.CollectionCreate(userctx1, arvados.CreateOptions{})
+ c.Check(err, check.IsNil)
+ specimen, err := conn1.SpecimenCreate(userctx1, arvados.CreateOptions{})
+ c.Check(err, check.IsNil)
+
+ tests := []struct {
+ path string
+ reqIdProvided bool
+ notFoundRequest bool
+ }{
+ {"/arvados/v1/collections", false, false},
+ {"/arvados/v1/collections", true, false},
+ {"/arvados/v1/nonexistant", false, true},
+ {"/arvados/v1/nonexistant", true, true},
+ {"/arvados/v1/collections/" + coll.UUID, false, false},
+ {"/arvados/v1/collections/" + coll.UUID, true, false},
+ {"/arvados/v1/specimens/" + specimen.UUID, false, false},
+ {"/arvados/v1/specimens/" + specimen.UUID, true, false},
+ // new code path (lib/controller/router etc) - single-cluster request
+ {"/arvados/v1/collections/z1111-4zz18-0123456789abcde", false, true},
+ {"/arvados/v1/collections/z1111-4zz18-0123456789abcde", true, true},
+ // new code path (lib/controller/router etc) - federated request
+ {"/arvados/v1/collections/z2222-4zz18-0123456789abcde", false, true},
+ {"/arvados/v1/collections/z2222-4zz18-0123456789abcde", true, true},
+ // old code path (proxyRailsAPI) - single-cluster request
+ {"/arvados/v1/specimens/z1111-j58dm-0123456789abcde", false, true},
+ {"/arvados/v1/specimens/z1111-j58dm-0123456789abcde", true, true},
+ // old code path (setupProxyRemoteCluster) - federated request
+ {"/arvados/v1/workflows/z2222-7fd4e-0123456789abcde", false, true},
+ {"/arvados/v1/workflows/z2222-7fd4e-0123456789abcde", true, true},
+ }
+
+ for _, tt := range tests {
+ c.Log(c.TestName() + " " + tt.path)
+ req, err := http.NewRequest("GET", "https://"+ac1.APIHost+tt.path, nil)
+ c.Assert(err, check.IsNil)
+ customReqId := "abcdeG"
+ if !tt.reqIdProvided {
+ c.Assert(req.Header.Get("X-Request-Id"), check.Equals, "")
+ } else {
+ req.Header.Set("X-Request-Id", customReqId)
+ }
+ resp, err := ac1.Do(req)
+ c.Assert(err, check.IsNil)
+ if tt.notFoundRequest {
+ c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
+ } else {
+ c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+ }
+ respHdr := resp.Header.Get("X-Request-Id")
+ if tt.reqIdProvided {
+ c.Check(respHdr, check.Equals, customReqId)
+ } else {
+ c.Check(respHdr, check.Matches, `req-[0-9a-zA-Z]{20}`)
+ }
+ if tt.notFoundRequest {
+ var jresp httpserver.ErrorResponse
+ err := json.NewDecoder(resp.Body).Decode(&jresp)
+ c.Check(err, check.IsNil)
+ c.Assert(jresp.Errors, check.HasLen, 1)
+ c.Check(jresp.Errors[0], check.Matches, `.*\(`+respHdr+`\).*`)
+ }
+ }
+}
+
// We test the direct access to the database
-// normally an integration test would not have a database access, but in this case we need
+// normally an integration test would not have a database access, but in this case we need
// to test tokens that are secret, so there is no API response that will give them back
func (s *IntegrationSuite) dbConn(c *check.C, clusterID string) (*sql.DB, *sql.Conn) {
ctx := context.Background()
- db, err := sql.Open("postgres", s.testClusters[clusterID].Super.Cluster().PostgreSQL.Connection.String())
+ db, err := sql.Open("postgres", s.super.Cluster(clusterID).PostgreSQL.Connection.String())
c.Assert(err, check.IsNil)
conn, err := db.Conn(ctx)
db, dbconn := s.dbConn(c, "z1111")
defer db.Close()
defer dbconn.Close()
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- userctx1, ac1, _, au := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, "user@example.com", true)
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ userctx1, ac1, _, au := s.super.UserClients("z1111", rootctx1, c, conn1, "user@example.com", true)
tests := []struct {
name string
// one cluster with another cluster as the destination
// and check the tokens are being handled properly
func (s *IntegrationSuite) TestIntermediateCluster(c *check.C) {
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- uctx1, ac1, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, "user@example.com", true)
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ uctx1, ac1, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, "user@example.com", true)
tests := []struct {
name string
}
}
+// Test for #17785
+func (s *IntegrationSuite) TestFederatedApiClientAuthHandling(c *check.C) {
+ rootctx1, rootclnt1, _ := s.super.RootClients("z1111")
+ conn1 := s.super.Conn("z1111")
+
+ // Make sure LoginCluster is properly configured
+ for _, cls := range []string{"z1111", "z3333"} {
+ c.Check(
+ s.super.Cluster(cls).Login.LoginCluster,
+ check.Equals, "z1111",
+ check.Commentf("incorrect LoginCluster config on cluster %q", cls))
+ }
+ // Get user's UUID & attempt to create a token for it on the remote cluster
+ _, _, _, user := s.super.UserClients("z1111", rootctx1, c, conn1,
+ "user@example.com", true)
+ _, rootclnt3, _ := s.super.ClientsWithToken("z3333", rootclnt1.AuthToken)
+ var resp arvados.APIClientAuthorization
+ err := rootclnt3.RequestAndDecode(
+ &resp, "POST", "arvados/v1/api_client_authorizations", nil,
+ map[string]interface{}{
+ "api_client_authorization": map[string]string{
+ "owner_uuid": user.UUID,
+ },
+ },
+ )
+ c.Assert(err, check.IsNil)
+ c.Assert(resp.APIClientID, check.Not(check.Equals), 0)
+ newTok := resp.TokenV2()
+ c.Assert(newTok, check.Not(check.Equals), "")
+
+ // Confirm the token is from z1111
+ c.Assert(strings.HasPrefix(newTok, "v2/z1111-gj3su-"), check.Equals, true)
+
+ // Confirm the token works and is from the correct user
+ _, rootclnt3bis, _ := s.super.ClientsWithToken("z3333", newTok)
+ var curUser arvados.User
+ err = rootclnt3bis.RequestAndDecode(
+ &curUser, "GET", "arvados/v1/users/current", nil, nil,
+ )
+ c.Assert(err, check.IsNil)
+ c.Assert(curUser.UUID, check.Equals, user.UUID)
+
+ // Request the ApiClientAuthorization list using the new token
+ _, userClient, _ := s.super.ClientsWithToken("z3333", newTok)
+ var acaLst arvados.APIClientAuthorizationList
+ err = userClient.RequestAndDecode(
+ &acaLst, "GET", "arvados/v1/api_client_authorizations", nil, nil,
+ )
+ c.Assert(err, check.IsNil)
+}
+
+// Test for bug #18076
+func (s *IntegrationSuite) TestStaleCachedUserRecord(c *check.C) {
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ _, rootclnt3, _ := s.super.RootClients("z3333")
+ conn1 := s.super.Conn("z1111")
+ conn3 := s.super.Conn("z3333")
+
+ // Make sure LoginCluster is properly configured
+ for _, cls := range []string{"z1111", "z3333"} {
+ c.Check(
+ s.super.Cluster(cls).Login.LoginCluster,
+ check.Equals, "z1111",
+ check.Commentf("incorrect LoginCluster config on cluster %q", cls))
+ }
+
+ for testCaseNr, testCase := range []struct {
+ name string
+ withRepository bool
+ }{
+ {"User without local repository", false},
+ {"User with local repository", true},
+ } {
+ c.Log(c.TestName() + " " + testCase.name)
+ // Create some users, request them on the federated cluster so they're cached.
+ var users []arvados.User
+ for userNr := 0; userNr < 2; userNr++ {
+ _, _, _, user := s.super.UserClients("z1111",
+ rootctx1,
+ c,
+ conn1,
+ fmt.Sprintf("user%d%d@example.com", testCaseNr, userNr),
+ true)
+ c.Assert(user.Username, check.Not(check.Equals), "")
+ users = append(users, user)
+
+ lst, err := conn3.UserList(rootctx1, arvados.ListOptions{Limit: -1})
+ c.Assert(err, check.Equals, nil)
+ userFound := false
+ for _, fedUser := range lst.Items {
+ if fedUser.UUID == user.UUID {
+ c.Assert(fedUser.Username, check.Equals, user.Username)
+ userFound = true
+ break
+ }
+ }
+ c.Assert(userFound, check.Equals, true)
+
+ if testCase.withRepository {
+ var repo interface{}
+ err = rootclnt3.RequestAndDecode(
+ &repo, "POST", "arvados/v1/repositories", nil,
+ map[string]interface{}{
+ "repository": map[string]string{
+ "name": fmt.Sprintf("%s/test", user.Username),
+ "owner_uuid": user.UUID,
+ },
+ },
+ )
+ c.Assert(err, check.IsNil)
+ }
+ }
+
+ // Swap the usernames
+ _, err := conn1.UserUpdate(rootctx1, arvados.UpdateOptions{
+ UUID: users[0].UUID,
+ Attrs: map[string]interface{}{
+ "username": "",
+ },
+ })
+ c.Assert(err, check.Equals, nil)
+ _, err = conn1.UserUpdate(rootctx1, arvados.UpdateOptions{
+ UUID: users[1].UUID,
+ Attrs: map[string]interface{}{
+ "username": users[0].Username,
+ },
+ })
+ c.Assert(err, check.Equals, nil)
+ _, err = conn1.UserUpdate(rootctx1, arvados.UpdateOptions{
+ UUID: users[0].UUID,
+ Attrs: map[string]interface{}{
+ "username": users[1].Username,
+ },
+ })
+ c.Assert(err, check.Equals, nil)
+
+ // Re-request the list on the federated cluster & check for updates
+ lst, err := conn3.UserList(rootctx1, arvados.ListOptions{Limit: -1})
+ c.Assert(err, check.Equals, nil)
+ var user0Found, user1Found bool
+ for _, user := range lst.Items {
+ if user.UUID == users[0].UUID {
+ user0Found = true
+ c.Assert(user.Username, check.Equals, users[1].Username)
+ } else if user.UUID == users[1].UUID {
+ user1Found = true
+ c.Assert(user.Username, check.Equals, users[0].Username)
+ }
+ }
+ c.Assert(user0Found, check.Equals, true)
+ c.Assert(user1Found, check.Equals, true)
+ }
+}
+
// Test for bug #16263
func (s *IntegrationSuite) TestListUsers(c *check.C) {
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- conn1 := s.testClusters["z1111"].Conn()
- conn3 := s.testClusters["z3333"].Conn()
- userctx1, _, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ conn1 := s.super.Conn("z1111")
+ conn3 := s.super.Conn("z3333")
+ userctx1, _, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
// Make sure LoginCluster is properly configured
- for cls := range s.testClusters {
+ for _, cls := range []string{"z1111", "z2222", "z3333"} {
c.Check(
- s.testClusters[cls].Config.Clusters[cls].Login.LoginCluster,
+ s.super.Cluster(cls).Login.LoginCluster,
check.Equals, "z1111",
check.Commentf("incorrect LoginCluster config on cluster %q", cls))
}
for _, user := range lst.Items {
if user.Username == "" {
nullUsername = true
+ break
}
}
c.Assert(nullUsername, check.Equals, true)
}
c.Check(found, check.Equals, true)
- // Deactivated user can see is_active==false via "get current
- // user" API
+ // Deactivated user no longer has working token
user1, err = conn3.UserGetCurrent(userctx1, arvados.GetOptions{})
- c.Assert(err, check.IsNil)
- c.Check(user1.IsActive, check.Equals, false)
+ c.Assert(err, check.ErrorMatches, `.*401 Unauthorized.*`)
}
func (s *IntegrationSuite) TestSetupUserWithVM(c *check.C) {
- conn1 := s.testClusters["z1111"].Conn()
- conn3 := s.testClusters["z3333"].Conn()
- rootctx1, rootac1, _ := s.testClusters["z1111"].RootClients()
+ conn1 := s.super.Conn("z1111")
+ conn3 := s.super.Conn("z3333")
+ rootctx1, rootac1, _ := s.super.RootClients("z1111")
// Create user on LoginCluster z1111
- _, _, _, user := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+ _, _, _, user := s.super.UserClients("z1111", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
// Make a new root token (because rootClients() uses SystemRootToken)
var outAuth arvados.APIClientAuthorization
c.Check(err, check.IsNil)
// Make a v2 root token to communicate with z3333
- rootctx3, rootac3, _ := s.testClusters["z3333"].ClientsWithToken(outAuth.TokenV2())
+ rootctx3, rootac3, _ := s.super.ClientsWithToken("z3333", outAuth.TokenV2())
// Create VM on z3333
var outVM arvados.VirtualMachine
}
func (s *IntegrationSuite) TestOIDCAccessTokenAuth(c *check.C) {
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ s.super.UserClients("z1111", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
accesstoken := s.oidcprovider.ValidAccessToken()
{
c.Logf("save collection to %s", clusterID)
- conn := s.testClusters[clusterID].Conn()
- ctx, ac, kc := s.testClusters[clusterID].ClientsWithToken(accesstoken)
+ conn := s.super.Conn(clusterID)
+ ctx, ac, kc := s.super.ClientsWithToken(clusterID, accesstoken)
fs, err := coll.FileSystem(ac, kc)
c.Assert(err, check.IsNil)
for _, readClusterID := range []string{"z1111", "z2222", "z3333"} {
c.Logf("retrieve %s from %s", coll.UUID, readClusterID)
- conn := s.testClusters[readClusterID].Conn()
- ctx, ac, kc := s.testClusters[readClusterID].ClientsWithToken(accesstoken)
+ conn := s.super.Conn(readClusterID)
+ ctx, ac, kc := s.super.ClientsWithToken(readClusterID, accesstoken)
user, err := conn.UserGetCurrent(ctx, arvados.GetOptions{})
c.Assert(err, check.IsNil)
}
}
}
+
+// z3333 should not forward a locally-issued container runtime token,
+// associated with a z1111 user, to its login cluster z1111. z1111
+// would only call back to z3333 and then reject the response because
+// the user ID does not match the token prefix. See
+// dev.arvados.org/issues/18346
+func (s *IntegrationSuite) TestForwardRuntimeTokenToLoginCluster(c *check.C) {
+ db3, db3conn := s.dbConn(c, "z3333")
+ defer db3.Close()
+ defer db3conn.Close()
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ rootctx3, _, _ := s.super.RootClients("z3333")
+ conn1 := s.super.Conn("z1111")
+ conn3 := s.super.Conn("z3333")
+ userctx1, _, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, "user@example.com", true)
+
+ user1, err := conn1.UserGetCurrent(userctx1, arvados.GetOptions{})
+ c.Assert(err, check.IsNil)
+ c.Logf("user1 %+v", user1)
+
+ imageColl, err := conn3.CollectionCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
+ "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar\n",
+ }})
+ c.Assert(err, check.IsNil)
+ c.Logf("imageColl %+v", imageColl)
+
+ cr, err := conn3.ContainerRequestCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
+ "state": "Committed",
+ "command": []string{"echo"},
+ "container_image": imageColl.PortableDataHash,
+ "cwd": "/",
+ "output_path": "/",
+ "priority": 1,
+ "runtime_constraints": arvados.RuntimeConstraints{
+ VCPUs: 1,
+ RAM: 1000000000,
+ },
+ }})
+ c.Assert(err, check.IsNil)
+ c.Logf("container request %+v", cr)
+ ctr, err := conn3.ContainerLock(rootctx3, arvados.GetOptions{UUID: cr.ContainerUUID})
+ c.Assert(err, check.IsNil)
+ c.Logf("container %+v", ctr)
+
+ // We could use conn3.ContainerAuth() here, but that API
+ // hasn't been added to sdk/go/arvados/api.go yet.
+ row := db3conn.QueryRowContext(context.Background(), `SELECT api_token from api_client_authorizations where uuid=$1`, ctr.AuthUUID)
+ c.Check(row, check.NotNil)
+ var val sql.NullString
+ row.Scan(&val)
+ c.Assert(val.Valid, check.Equals, true)
+ runtimeToken := "v2/" + ctr.AuthUUID + "/" + val.String
+ ctrctx, _, _ := s.super.ClientsWithToken("z3333", runtimeToken)
+ c.Logf("container runtime token %+v", runtimeToken)
+
+ _, err = conn3.UserGet(ctrctx, arvados.GetOptions{UUID: user1.UUID})
+ c.Assert(err, check.NotNil)
+ c.Check(err, check.ErrorMatches, `request failed: .* 401 Unauthorized: cannot use a locally issued token to forward a request to our login cluster \(z1111\)`)
+ c.Check(err, check.Not(check.ErrorMatches), `(?ms).*127\.0\.0\.11.*`)
+}