+func (s *OIDCLoginSuite) TestOIDCAuthorizer(c *check.C) {
+ s.cluster.Login.Google.Enable = false
+ s.cluster.Login.OpenIDConnect.Enable = true
+ json.Unmarshal([]byte(fmt.Sprintf("%q", s.fakeProvider.Issuer.URL)), &s.cluster.Login.OpenIDConnect.Issuer)
+ s.cluster.Login.OpenIDConnect.ClientID = "oidc#client#id"
+ s.cluster.Login.OpenIDConnect.ClientSecret = "oidc#client#secret"
+ s.cluster.Login.OpenIDConnect.AcceptAccessToken = true
+ s.cluster.Login.OpenIDConnect.AcceptAccessTokenScope = ""
+ s.fakeProvider.ValidClientID = "oidc#client#id"
+ s.fakeProvider.ValidClientSecret = "oidc#client#secret"
+ db := arvadostest.DB(c, s.cluster)
+
+ tokenCacheTTL = time.Millisecond
+ tokenCacheRaceWindow = time.Millisecond
+ tokenCacheNegativeTTL = time.Millisecond
+
+ oidcAuthorizer := OIDCAccessTokenAuthorizer(s.cluster, func(context.Context) (*sqlx.DB, error) { return db, nil })
+ accessToken := s.fakeProvider.ValidAccessToken()
+
+ mac := hmac.New(sha256.New, []byte(s.cluster.SystemRootToken))
+ io.WriteString(mac, accessToken)
+ apiToken := fmt.Sprintf("%x", mac.Sum(nil))
+
+ checkTokenInDB := func() time.Time {
+ var exp time.Time
+ err := db.QueryRow(`select expires_at at time zone 'UTC' from api_client_authorizations where api_token=$1`, apiToken).Scan(&exp)
+ c.Check(err, check.IsNil)
+ c.Check(exp.Sub(time.Now()) > -time.Second, check.Equals, true)
+ c.Check(exp.Sub(time.Now()) < time.Second, check.Equals, true)
+ return exp
+ }
+ cleanup := func() {
+ oidcAuthorizer.cache.Purge()
+ _, err := db.Exec(`delete from api_client_authorizations where api_token=$1`, apiToken)
+ c.Check(err, check.IsNil)
+ }
+ cleanup()
+ defer cleanup()
+
+ ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{accessToken}})
+
+ // Check behavior on 5xx/network errors (don't cache) vs 4xx
+ // (do cache)
+ {
+ call := oidcAuthorizer.WrapCalls(func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return nil, nil
+ })
+
+ // If fakeProvider UserInfo endpoint returns 502, we
+ // should fail, return an error, and *not* cache the
+ // negative result.
+ tokenCacheNegativeTTL = time.Minute
+ s.fakeProvider.UserInfoErrorStatus = 502
+ _, err := call(ctx, nil)
+ c.Check(err, check.NotNil)
+
+ // The negative result was not cached, so retrying
+ // immediately (with UserInfo working now) should
+ // succeed.
+ s.fakeProvider.UserInfoErrorStatus = 0
+ _, err = call(ctx, nil)
+ c.Check(err, check.IsNil)
+ checkTokenInDB()
+
+ cleanup()
+
+ // UserInfo 401 => cache the negative result, but
+ // don't return an error (just pass the token through
+ // as a v1 token)
+ s.fakeProvider.UserInfoErrorStatus = 401
+ _, err = call(ctx, nil)
+ c.Check(err, check.IsNil)
+ ent, ok := oidcAuthorizer.cache.Get(accessToken)
+ c.Check(ok, check.Equals, true)
+ c.Check(ent, check.FitsTypeOf, time.Time{})
+
+ // UserInfo succeeds now, but we still have a cached
+ // negative result.
+ s.fakeProvider.UserInfoErrorStatus = 0
+ _, err = call(ctx, nil)
+ c.Check(err, check.IsNil)
+ ent, ok = oidcAuthorizer.cache.Get(accessToken)
+ c.Check(ok, check.Equals, true)
+ c.Check(ent, check.FitsTypeOf, time.Time{})
+
+ tokenCacheNegativeTTL = time.Millisecond
+ cleanup()
+ }
+
+ var exp1 time.Time
+ concurrent := 4
+ s.fakeProvider.HoldUserInfo = make(chan *http.Request)
+ s.fakeProvider.ReleaseUserInfo = make(chan struct{})
+ go func() {
+ for i := 0; ; i++ {
+ if i == concurrent {
+ close(s.fakeProvider.ReleaseUserInfo)
+ }
+ <-s.fakeProvider.HoldUserInfo
+ }
+ }()
+ var wg sync.WaitGroup
+ for i := 0; i < concurrent; i++ {
+ i := i
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ _, err := oidcAuthorizer.WrapCalls(func(ctx context.Context, opts interface{}) (interface{}, error) {
+ c.Logf("concurrent req %d/%d", i, concurrent)
+
+ creds, ok := auth.FromContext(ctx)
+ c.Assert(ok, check.Equals, true)
+ c.Assert(creds.Tokens, check.HasLen, 1)
+ c.Check(creds.Tokens[0], check.Equals, accessToken)
+ exp := checkTokenInDB()
+ if i == 0 {
+ exp1 = exp
+ }
+ return nil, nil
+ })(ctx, nil)
+ c.Check(err, check.IsNil)
+ }()
+ }
+ wg.Wait()
+ if c.Failed() {
+ c.Fatal("giving up")
+ }
+
+ // If the token is used again after the in-memory cache
+ // expires, oidcAuthorizer must re-check the token and update
+ // the expires_at value in the database.
+ time.Sleep(3 * time.Millisecond)
+ oidcAuthorizer.WrapCalls(func(ctx context.Context, opts interface{}) (interface{}, error) {
+ exp := checkTokenInDB()
+ c.Check(exp.Sub(exp1) > 0, check.Equals, true, check.Commentf("expect %v > 0", exp.Sub(exp1)))
+ c.Check(exp.Sub(exp1) < time.Second, check.Equals, true, check.Commentf("expect %v < 1s", exp.Sub(exp1)))
+ return nil, nil
+ })(ctx, nil)
+
+ s.fakeProvider.AccessTokenPayload = map[string]interface{}{"scope": "openid profile foobar"}
+ accessToken = s.fakeProvider.ValidAccessToken()
+ ctx = auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{accessToken}})
+
+ mac = hmac.New(sha256.New, []byte(s.cluster.SystemRootToken))
+ io.WriteString(mac, accessToken)
+ apiToken = fmt.Sprintf("%x", mac.Sum(nil))
+
+ for _, trial := range []struct {
+ configEnable bool
+ configScope string
+ acceptable bool
+ shouldRun bool
+ }{
+ {true, "foobar", true, true},
+ {true, "foo", false, false},
+ {true, "", true, true},
+ {false, "", false, true},
+ {false, "foobar", false, true},
+ } {
+ c.Logf("trial = %+v", trial)
+ cleanup()
+ s.cluster.Login.OpenIDConnect.AcceptAccessToken = trial.configEnable
+ s.cluster.Login.OpenIDConnect.AcceptAccessTokenScope = trial.configScope
+ oidcAuthorizer = OIDCAccessTokenAuthorizer(s.cluster, func(context.Context) (*sqlx.DB, error) { return db, nil })
+ checked := false
+ oidcAuthorizer.WrapCalls(func(ctx context.Context, opts interface{}) (interface{}, error) {
+ var n int
+ err := db.QueryRowContext(ctx, `select count(*) from api_client_authorizations where api_token=$1`, apiToken).Scan(&n)
+ c.Check(err, check.IsNil)
+ if trial.acceptable {
+ c.Check(n, check.Equals, 1)
+ } else {
+ c.Check(n, check.Equals, 0)
+ }
+ checked = true
+ return nil, nil
+ })(ctx, nil)
+ c.Check(checked, check.Equals, trial.shouldRun)
+ }
+}
+