<meta name="author" content="">
<% if current_user %>
<% content_for :js do %>
- window.defaultSession = <%=raw({baseURL: Rails.configuration.Services.Controller.ExternalURL.to_s, token: Thread.current[:arvados_api_token], user: current_user}.to_json)%>
+ window.defaultSession = <%=raw({baseURL: Rails.configuration.Services.Controller.ExternalURL.to_s.gsub(/\/?$/,'/'), token: Thread.current[:arvados_api_token], user: current_user}.to_json)%>
<% end %>
<% end %>
<% if current_user and $arvados_api_client.discovery[:websocketUrl] %>
Login:
# These settings are provided by your OAuth2 provider (eg
# Google) used to perform upstream authentication.
- ProviderAppSecret: ""
ProviderAppID: ""
+ ProviderAppSecret: ""
+
+ # (Experimental) Authenticate with Google, bypassing the
+ # SSO-provider gateway service. Use the Google Cloud console to
+ # generate the Client ID and secret (APIs and Services >
+ # Credentials > Create credentials > OAuth client ID > Web
+ # application) and add your controller's /login URL (e.g.,
+ # "https://zzzzz.example.com/login") as an authorized redirect
+ # URL.
+ #
+ # Requires EnableBetaController14287. ProviderAppID must be
+ # blank.
+ GoogleClientID: ""
+ GoogleClientSecret: ""
# The cluster ID to delegate the user database. When set,
# logins on this cluster will be redirected to the login cluster
type oldKeepWebConfig struct {
Client *arvados.Client
- Listen string
+ Listen *string
- AnonymousTokens []string
- AttachmentOnlyHost string
- TrustAllContent bool
+ AnonymousTokens *[]string
+ AttachmentOnlyHost *string
+ TrustAllContent *bool
Cache struct {
- TTL arvados.Duration
- UUIDTTL arvados.Duration
- MaxCollectionEntries int
- MaxCollectionBytes int64
- MaxPermissionEntries int
- MaxUUIDEntries int
+ TTL *arvados.Duration
+ UUIDTTL *arvados.Duration
+ MaxCollectionEntries *int
+ MaxCollectionBytes *int64
+ MaxPermissionEntries *int
+ MaxUUIDEntries *int
}
// Hack to support old command line flag, which is a bool
// meaning "get actual token from environment".
- deprecatedAllowAnonymous bool
+ deprecatedAllowAnonymous *bool
// Authorization token to be included in all health check requests.
- ManagementToken string
+ ManagementToken *string
}
func (ldr *Loader) loadOldKeepWebConfig(cfg *arvados.Config) error {
loadOldClientConfig(cluster, oc.Client)
- cluster.Services.WebDAV.InternalURLs[arvados.URL{Host: oc.Listen}] = arvados.ServiceInstance{}
- cluster.Services.WebDAVDownload.InternalURLs[arvados.URL{Host: oc.Listen}] = arvados.ServiceInstance{}
- cluster.Services.WebDAVDownload.ExternalURL = arvados.URL{Host: oc.AttachmentOnlyHost}
- cluster.TLS.Insecure = oc.Client.Insecure
- cluster.ManagementToken = oc.ManagementToken
- cluster.Collections.TrustAllContent = oc.TrustAllContent
- cluster.Collections.WebDAVCache.TTL = oc.Cache.TTL
- cluster.Collections.WebDAVCache.UUIDTTL = oc.Cache.UUIDTTL
- cluster.Collections.WebDAVCache.MaxCollectionEntries = oc.Cache.MaxCollectionEntries
- cluster.Collections.WebDAVCache.MaxCollectionBytes = oc.Cache.MaxCollectionBytes
- cluster.Collections.WebDAVCache.MaxPermissionEntries = oc.Cache.MaxPermissionEntries
- cluster.Collections.WebDAVCache.MaxUUIDEntries = oc.Cache.MaxUUIDEntries
- if len(oc.AnonymousTokens) > 0 {
- cluster.Users.AnonymousUserToken = oc.AnonymousTokens[0]
- if len(oc.AnonymousTokens) > 1 {
- ldr.Logger.Warn("More than 1 anonymous tokens configured, using only the first and discarding the rest.")
+ if oc.Listen != nil {
+ cluster.Services.WebDAV.InternalURLs[arvados.URL{Host: *oc.Listen}] = arvados.ServiceInstance{}
+ cluster.Services.WebDAVDownload.InternalURLs[arvados.URL{Host: *oc.Listen}] = arvados.ServiceInstance{}
+ }
+ if oc.AttachmentOnlyHost != nil {
+ cluster.Services.WebDAVDownload.ExternalURL = arvados.URL{Host: *oc.AttachmentOnlyHost}
+ }
+ if oc.ManagementToken != nil {
+ cluster.ManagementToken = *oc.ManagementToken
+ }
+ if oc.TrustAllContent != nil {
+ cluster.Collections.TrustAllContent = *oc.TrustAllContent
+ }
+ if oc.Cache.TTL != nil {
+ cluster.Collections.WebDAVCache.TTL = *oc.Cache.TTL
+ }
+ if oc.Cache.UUIDTTL != nil {
+ cluster.Collections.WebDAVCache.UUIDTTL = *oc.Cache.UUIDTTL
+ }
+ if oc.Cache.MaxCollectionEntries != nil {
+ cluster.Collections.WebDAVCache.MaxCollectionEntries = *oc.Cache.MaxCollectionEntries
+ }
+ if oc.Cache.MaxCollectionBytes != nil {
+ cluster.Collections.WebDAVCache.MaxCollectionBytes = *oc.Cache.MaxCollectionBytes
+ }
+ if oc.Cache.MaxPermissionEntries != nil {
+ cluster.Collections.WebDAVCache.MaxPermissionEntries = *oc.Cache.MaxPermissionEntries
+ }
+ if oc.Cache.MaxUUIDEntries != nil {
+ cluster.Collections.WebDAVCache.MaxUUIDEntries = *oc.Cache.MaxUUIDEntries
+ }
+ if oc.AnonymousTokens != nil {
+ if len(*oc.AnonymousTokens) > 0 {
+ cluster.Users.AnonymousUserToken = (*oc.AnonymousTokens)[0]
+ if len(*oc.AnonymousTokens) > 1 {
+ ldr.Logger.Warn("More than 1 anonymous tokens configured, using only the first and discarding the rest.")
+ }
}
}
type oldGitHttpdConfig struct {
Client *arvados.Client
- Listen string
- GitCommand string
- GitoliteHome string
- RepoRoot string
- ManagementToken string
+ Listen *string
+ GitCommand *string
+ GitoliteHome *string
+ RepoRoot *string
+ ManagementToken *string
}
func (ldr *Loader) loadOldGitHttpdConfig(cfg *arvados.Config) error {
loadOldClientConfig(cluster, oc.Client)
- cluster.Services.GitHTTP.InternalURLs[arvados.URL{Host: oc.Listen}] = arvados.ServiceInstance{}
- cluster.TLS.Insecure = oc.Client.Insecure
- cluster.ManagementToken = oc.ManagementToken
- cluster.Git.GitCommand = oc.GitCommand
- cluster.Git.GitoliteHome = oc.GitoliteHome
- cluster.Git.Repositories = oc.RepoRoot
+ if oc.Listen != nil {
+ cluster.Services.GitHTTP.InternalURLs[arvados.URL{Host: *oc.Listen}] = arvados.ServiceInstance{}
+ }
+ if oc.ManagementToken != nil {
+ cluster.ManagementToken = *oc.ManagementToken
+ }
+ if oc.GitCommand != nil {
+ cluster.Git.GitCommand = *oc.GitCommand
+ }
+ if oc.GitoliteHome != nil {
+ cluster.Git.GitoliteHome = *oc.GitoliteHome
+ }
+ if oc.RepoRoot != nil {
+ cluster.Git.Repositories = *oc.RepoRoot
+ }
cfg.Clusters[cluster.ClusterID] = *cluster
return nil
check "gopkg.in/check.v1"
)
+// Configured at: sdk/python/tests/run_test_server.py
+const TestServerManagementToken = "e687950a23c3a9bceec28c6223a06c79"
+
func testLoadLegacyConfig(content []byte, mungeFlag string, c *check.C) (*arvados.Cluster, error) {
tmpfile, err := ioutil.TempFile("", "example")
if err != nil {
c.Check(cluster.ManagementToken, check.Equals, "xyzzy")
}
+// Tests fix for https://dev.arvados.org/issues/15642
+func (s *LoadSuite) TestLegacyKeepWebConfigDoesntDisableMissingItems(c *check.C) {
+ content := []byte(`
+{
+ "Client": {
+ "Scheme": "",
+ "APIHost": "example.com",
+ "AuthToken": "abcdefg",
+ }
+}
+`)
+ cluster, err := testLoadLegacyConfig(content, "-legacy-keepweb-config", c)
+ c.Check(err, check.IsNil)
+ // The resulting ManagementToken should be the one set up on the test server.
+ c.Check(cluster.ManagementToken, check.Equals, TestServerManagementToken)
+}
+
func (s *LoadSuite) TestLegacyKeepproxyConfig(c *check.C) {
f := "-legacy-keepproxy-config"
content := []byte(fmtKeepproxyConfig("", true))
c.Check(cluster.Services.Keepproxy.InternalURLs[arvados.URL{Host: ":9000"}], check.Equals, arvados.ServiceInstance{})
}
+// Tests fix for https://dev.arvados.org/issues/15642
+func (s *LoadSuite) TestLegacyArvGitHttpdConfigDoesntDisableMissingItems(c *check.C) {
+ content := []byte(`
+{
+ "Client": {
+ "Scheme": "",
+ "APIHost": "example.com",
+ "AuthToken": "abcdefg",
+ }
+}
+`)
+ cluster, err := testLoadLegacyConfig(content, "-legacy-git-httpd-config", c)
+ c.Check(err, check.IsNil)
+ // The resulting ManagementToken should be the one set up on the test server.
+ c.Check(cluster.ManagementToken, check.Equals, TestServerManagementToken)
+}
+
func (s *LoadSuite) TestLegacyKeepBalanceConfig(c *check.C) {
f := "-legacy-keepbalance-config"
content := []byte(fmtKeepBalanceConfig(""))
"InstanceTypes.*": true,
"InstanceTypes.*.*": true,
"Login": true,
- "Login.ProviderAppSecret": false,
+ "Login.GoogleClientID": false,
+ "Login.GoogleClientSecret": false,
"Login.ProviderAppID": false,
+ "Login.ProviderAppSecret": false,
"Login.LoginCluster": true,
"Login.RemoteTokenRefresh": true,
"Mail": false,
Login:
# These settings are provided by your OAuth2 provider (eg
# Google) used to perform upstream authentication.
- ProviderAppSecret: ""
ProviderAppID: ""
+ ProviderAppSecret: ""
+
+ # (Experimental) Authenticate with Google, bypassing the
+ # SSO-provider gateway service. Use the Google Cloud console to
+ # generate the Client ID and secret (APIs and Services >
+ # Credentials > Create credentials > OAuth client ID > Web
+ # application) and add your controller's /login URL (e.g.,
+ # "https://zzzzz.example.com/login") as an authorized redirect
+ # URL.
+ #
+ # Requires EnableBetaController14287. ProviderAppID must be
+ # blank.
+ GoogleClientID: ""
+ GoogleClientSecret: ""
# The cluster ID to delegate the user database. When set,
# logins on this cluster will be redirected to the login cluster
"strings"
"git.curoverse.com/arvados.git/lib/config"
- "git.curoverse.com/arvados.git/lib/controller/railsproxy"
+ "git.curoverse.com/arvados.git/lib/controller/localdb"
"git.curoverse.com/arvados.git/lib/controller/rpc"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/auth"
}
func New(cluster *arvados.Cluster) *Conn {
- local := railsproxy.NewConn(cluster)
+ local := localdb.NewConn(cluster)
remotes := map[string]backend{}
for id, remote := range cluster.RemoteClusters {
if !remote.Proxy {
return json.RawMessage(buf.Bytes()), err
}
+func (conn *Conn) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {
+ if id := conn.cluster.Login.LoginCluster; id != "" && id != conn.cluster.ClusterID {
+ // defer entire login procedure to designated cluster
+ remote, ok := conn.remotes[id]
+ if !ok {
+ return arvados.LoginResponse{}, fmt.Errorf("configuration problem: designated login cluster %q is not defined", id)
+ }
+ baseURL := remote.BaseURL()
+ target, err := baseURL.Parse(arvados.EndpointLogin.Path)
+ if err != nil {
+ return arvados.LoginResponse{}, fmt.Errorf("internal error getting redirect target: %s", err)
+ }
+ target.RawQuery = url.Values{
+ "return_to": []string{options.ReturnTo},
+ "remote": []string{options.Remote},
+ }.Encode()
+ return arvados.LoginResponse{
+ RedirectLocation: target.String(),
+ }, nil
+ } else {
+ return conn.local.Login(ctx, options)
+ }
+}
+
func (conn *Conn) CollectionGet(ctx context.Context, options arvados.GetOptions) (arvados.Collection, error) {
if len(options.UUID) == 27 {
// UUID is really a UUID
return conn.chooseBackend(options.UUID).APIClientAuthorizationCurrent(ctx, options)
}
-type backend interface{ arvados.API }
+type backend interface {
+ arvados.API
+ BaseURL() url.URL
+}
type notFoundError struct{}
s.fed = New(s.cluster)
}
-func (s *FederationSuite) addDirectRemote(c *check.C, id string, backend arvados.API) {
+func (s *FederationSuite) addDirectRemote(c *check.C, id string, backend backend) {
s.cluster.RemoteClusters[id] = arvados.RemoteCluster{
Host: "in-process.local",
}
s.fed.remotes[id] = backend
}
-func (s *FederationSuite) addHTTPRemote(c *check.C, id string, backend arvados.API) {
+func (s *FederationSuite) addHTTPRemote(c *check.C, id string, backend backend) {
srv := httpserver.Server{Addr: ":"}
srv.Handler = router.New(backend)
c.Check(srv.Start(), check.IsNil)
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package federation
+
+import (
+ "context"
+ "net/url"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+ check "gopkg.in/check.v1"
+)
+
+func (s *FederationSuite) TestDeferToLoginCluster(c *check.C) {
+ s.addHTTPRemote(c, "zhome", &arvadostest.APIStub{})
+ s.cluster.Login.LoginCluster = "zhome"
+
+ returnTo := "https://app.example.com/foo?bar"
+ for _, remote := range []string{"", "ccccc"} {
+ resp, err := s.fed.Login(context.Background(), arvados.LoginOptions{Remote: remote, ReturnTo: returnTo})
+ c.Check(err, check.IsNil)
+ c.Logf("remote %q -- RedirectLocation %q", remote, resp.RedirectLocation)
+ target, err := url.Parse(resp.RedirectLocation)
+ c.Check(err, check.IsNil)
+ c.Check(target.Host, check.Equals, s.cluster.RemoteClusters["zhome"].Host)
+ c.Check(target.Scheme, check.Equals, "http")
+ c.Check(target.Query().Get("remote"), check.Equals, remote)
+ c.Check(target.Query().Get("return_to"), check.Equals, returnTo)
+ }
+}
if h.Cluster.EnableBetaController14287 {
mux.Handle("/arvados/v1/collections", rtr)
mux.Handle("/arvados/v1/collections/", rtr)
+ mux.Handle("/login", rtr)
}
hs := http.NotFoundHandler()
}
func (s *HandlerSuite) TestProxyRedirect(c *check.C) {
+ s.cluster.Login.ProviderAppID = "test"
+ s.cluster.Login.ProviderAppSecret = "test"
req := httptest.NewRequest("GET", "https://0.0.0.0:1/login?return_to=foo", nil)
resp := httptest.NewRecorder()
s.handler.ServeHTTP(resp, req)
- c.Check(resp.Code, check.Equals, http.StatusFound)
- c.Check(resp.Header().Get("Location"), check.Matches, `https://0.0.0.0:1/auth/joshid\?return_to=%2Cfoo&?`)
+ if !c.Check(resp.Code, check.Equals, http.StatusFound) {
+ c.Log(resp.Body.String())
+ }
+ // Old "proxy entire request" code path returns an absolute
+ // URL. New lib/controller/federation code path returns a
+ // relative URL.
+ c.Check(resp.Header().Get("Location"), check.Matches, `(https://0.0.0.0:1)?/auth/joshid\?return_to=%2Cfoo&?`)
}
func (s *HandlerSuite) TestValidateV1APIToken(c *check.C) {
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "context"
+ "errors"
+
+ "git.curoverse.com/arvados.git/lib/controller/railsproxy"
+ "git.curoverse.com/arvados.git/lib/controller/rpc"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+type railsProxy = rpc.Conn
+
+type Conn struct {
+ cluster *arvados.Cluster
+ *railsProxy // handles API methods that aren't defined on Conn itself
+
+ googleLoginController
+}
+
+func NewConn(cluster *arvados.Cluster) *Conn {
+ return &Conn{
+ cluster: cluster,
+ railsProxy: railsproxy.NewConn(cluster),
+ }
+}
+
+func (conn *Conn) Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error) {
+ wantGoogle := conn.cluster.Login.GoogleClientID != ""
+ wantSSO := conn.cluster.Login.ProviderAppID != ""
+ if wantGoogle == wantSSO {
+ return arvados.LoginResponse{}, errors.New("configuration problem: exactly one of Login.GoogleClientID and Login.ProviderAppID must be configured")
+ } else if wantGoogle {
+ return conn.googleLoginController.Login(ctx, conn.cluster, conn.railsProxy, opts)
+ } else {
+ // Proxy to RailsAPI, which hands off to sso-provider.
+ return conn.railsProxy.Login(ctx, opts)
+ }
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "bytes"
+ "context"
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net/url"
+ "strings"
+ "sync"
+ "text/template"
+ "time"
+
+ "git.curoverse.com/arvados.git/lib/controller/rpc"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/auth"
+ "github.com/coreos/go-oidc"
+ "golang.org/x/oauth2"
+)
+
+type googleLoginController struct {
+ issuer string // override OIDC issuer URL (normally https://accounts.google.com) for testing
+ provider *oidc.Provider
+ mu sync.Mutex
+}
+
+func (ctrl *googleLoginController) getProvider() (*oidc.Provider, error) {
+ ctrl.mu.Lock()
+ defer ctrl.mu.Unlock()
+ if ctrl.provider == nil {
+ issuer := ctrl.issuer
+ if issuer == "" {
+ issuer = "https://accounts.google.com"
+ }
+ provider, err := oidc.NewProvider(context.Background(), issuer)
+ if err != nil {
+ return nil, err
+ }
+ ctrl.provider = provider
+ }
+ return ctrl.provider, nil
+}
+
+func (ctrl *googleLoginController) Login(ctx context.Context, cluster *arvados.Cluster, railsproxy *railsProxy, opts arvados.LoginOptions) (arvados.LoginResponse, error) {
+ provider, err := ctrl.getProvider()
+ if err != nil {
+ return ctrl.loginError(fmt.Errorf("error setting up OpenID Connect provider: %s", err))
+ }
+ redirURL, err := (*url.URL)(&cluster.Services.Controller.ExternalURL).Parse("/login")
+ if err != nil {
+ return ctrl.loginError(fmt.Errorf("error making redirect URL: %s", err))
+ }
+ conf := &oauth2.Config{
+ ClientID: cluster.Login.GoogleClientID,
+ ClientSecret: cluster.Login.GoogleClientSecret,
+ Endpoint: provider.Endpoint(),
+ Scopes: []string{oidc.ScopeOpenID, "profile", "email"},
+ RedirectURL: redirURL.String(),
+ }
+ verifier := provider.Verifier(&oidc.Config{
+ ClientID: conf.ClientID,
+ })
+ if opts.State == "" {
+ // Initiate Google sign-in.
+ if opts.ReturnTo == "" {
+ return ctrl.loginError(errors.New("missing return_to parameter"))
+ }
+ me := url.URL(cluster.Services.Controller.ExternalURL)
+ callback, err := me.Parse("/" + arvados.EndpointLogin.Path)
+ if err != nil {
+ return ctrl.loginError(err)
+ }
+ conf.RedirectURL = callback.String()
+ state := ctrl.newOAuth2State([]byte(cluster.SystemRootToken), opts.Remote, opts.ReturnTo)
+ return arvados.LoginResponse{
+ RedirectLocation: conf.AuthCodeURL(state.String(),
+ // prompt=select_account tells Google
+ // to show the "choose which Google
+ // account" page, even if the client
+ // is currently logged in to exactly
+ // one Google account.
+ oauth2.SetAuthURLParam("prompt", "select_account")),
+ }, nil
+ } else {
+ // Callback after Google sign-in.
+ state := ctrl.parseOAuth2State(opts.State)
+ if !state.verify([]byte(cluster.SystemRootToken)) {
+ return ctrl.loginError(errors.New("invalid OAuth2 state"))
+ }
+ oauth2Token, err := conf.Exchange(ctx, opts.Code)
+ if err != nil {
+ return ctrl.loginError(fmt.Errorf("error in OAuth2 exchange: %s", err))
+ }
+ rawIDToken, ok := oauth2Token.Extra("id_token").(string)
+ if !ok {
+ return ctrl.loginError(errors.New("error in OAuth2 exchange: no ID token in OAuth2 token"))
+ }
+ idToken, err := verifier.Verify(ctx, rawIDToken)
+ if err != nil {
+ return ctrl.loginError(fmt.Errorf("error verifying ID token: %s", err))
+ }
+ var claims struct {
+ Name string `json:"name"`
+ Email string `json:"email"`
+ Verified bool `json:"email_verified"`
+ }
+ if err := idToken.Claims(&claims); err != nil {
+ return ctrl.loginError(fmt.Errorf("error extracting claims from ID token: %s", err))
+ }
+ if !claims.Verified {
+ return ctrl.loginError(errors.New("cannot authenticate using an unverified email address"))
+ }
+
+ firstname, lastname := strings.TrimSpace(claims.Name), ""
+ if names := strings.Fields(firstname); len(names) > 1 {
+ firstname = strings.Join(names[0:len(names)-1], " ")
+ lastname = names[len(names)-1]
+ }
+
+ ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{cluster.SystemRootToken}})
+ return railsproxy.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
+ ReturnTo: state.Remote + "," + state.ReturnTo,
+ AuthInfo: map[string]interface{}{
+ "email": claims.Email,
+ "first_name": firstname,
+ "last_name": lastname,
+ },
+ })
+ }
+}
+
+func (ctrl *googleLoginController) loginError(sendError error) (resp arvados.LoginResponse, err error) {
+ tmpl, err := template.New("error").Parse(`<h2>Login error:</h2><p>{{.}}</p>`)
+ if err != nil {
+ return
+ }
+ err = tmpl.Execute(&resp.HTML, sendError.Error())
+ return
+}
+
+func (ctrl *googleLoginController) newOAuth2State(key []byte, remote, returnTo string) oauth2State {
+ s := oauth2State{
+ Time: time.Now().Unix(),
+ Remote: remote,
+ ReturnTo: returnTo,
+ }
+ s.HMAC = s.computeHMAC(key)
+ return s
+}
+
+type oauth2State struct {
+ HMAC []byte // hash of other fields; see computeHMAC()
+ Time int64 // creation time (unix timestamp)
+ Remote string // remote cluster if requesting a salted token, otherwise blank
+ ReturnTo string // redirect target
+}
+
+func (ctrl *googleLoginController) parseOAuth2State(encoded string) (s oauth2State) {
+ // Errors are not checked. If decoding/parsing fails, the
+ // token will be rejected by verify().
+ decoded, _ := base64.RawURLEncoding.DecodeString(encoded)
+ f := strings.Split(string(decoded), "\n")
+ if len(f) != 4 {
+ return
+ }
+ fmt.Sscanf(f[0], "%x", &s.HMAC)
+ fmt.Sscanf(f[1], "%x", &s.Time)
+ fmt.Sscanf(f[2], "%s", &s.Remote)
+ fmt.Sscanf(f[3], "%s", &s.ReturnTo)
+ return
+}
+
+func (s oauth2State) verify(key []byte) bool {
+ if delta := time.Now().Unix() - s.Time; delta < 0 || delta > 300 {
+ return false
+ }
+ return hmac.Equal(s.computeHMAC(key), s.HMAC)
+}
+
+func (s oauth2State) String() string {
+ var buf bytes.Buffer
+ enc := base64.NewEncoder(base64.RawURLEncoding, &buf)
+ fmt.Fprintf(enc, "%x\n%x\n%s\n%s", s.HMAC, s.Time, s.Remote, s.ReturnTo)
+ enc.Close()
+ return buf.String()
+}
+
+func (s oauth2State) computeHMAC(key []byte) []byte {
+ mac := hmac.New(sha256.New, key)
+ fmt.Fprintf(mac, "%x %s %s", s.Time, s.Remote, s.ReturnTo)
+ return mac.Sum(nil)
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "crypto/rsa"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strings"
+ "testing"
+ "time"
+
+ "git.curoverse.com/arvados.git/lib/config"
+ "git.curoverse.com/arvados.git/lib/controller/rpc"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+ "git.curoverse.com/arvados.git/sdk/go/auth"
+ "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+ check "gopkg.in/check.v1"
+ jose "gopkg.in/square/go-jose.v2"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+ check.TestingT(t)
+}
+
+var _ = check.Suite(&LoginSuite{})
+
+type LoginSuite struct {
+ cluster *arvados.Cluster
+ ctx context.Context
+ localdb *Conn
+ railsSpy *arvadostest.Proxy
+ fakeIssuer *httptest.Server
+ issuerKey *rsa.PrivateKey
+
+ // expected token request
+ validCode string
+ // desired response from token endpoint
+ authEmail string
+ authEmailVerified bool
+ authName string
+}
+
+func (s *LoginSuite) SetUpTest(c *check.C) {
+ var err error
+ s.issuerKey, err = rsa.GenerateKey(rand.Reader, 2048)
+ c.Assert(err, check.IsNil)
+
+ s.authEmail = "active-user@arvados.local"
+ s.authEmailVerified = true
+ s.authName = "Fake User Name"
+ s.fakeIssuer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ req.ParseForm()
+ c.Logf("fakeIssuer: got req: %s %s %s", req.Method, req.URL, req.Form)
+ w.Header().Set("Content-Type", "application/json")
+ switch req.URL.Path {
+ case "/.well-known/openid-configuration":
+ json.NewEncoder(w).Encode(map[string]interface{}{
+ "issuer": s.fakeIssuer.URL,
+ "authorization_endpoint": s.fakeIssuer.URL + "/auth",
+ "token_endpoint": s.fakeIssuer.URL + "/token",
+ "jwks_uri": s.fakeIssuer.URL + "/jwks",
+ "userinfo_endpoint": s.fakeIssuer.URL + "/userinfo",
+ })
+ case "/token":
+ if req.Form.Get("code") != s.validCode || s.validCode == "" {
+ w.WriteHeader(http.StatusUnauthorized)
+ return
+ }
+ idToken, _ := json.Marshal(map[string]interface{}{
+ "iss": s.fakeIssuer.URL,
+ "aud": []string{"test%client$id"},
+ "sub": "fake-user-id",
+ "exp": time.Now().UTC().Add(time.Minute).UnixNano(),
+ "iat": time.Now().UTC().UnixNano(),
+ "nonce": "fake-nonce",
+ "email": s.authEmail,
+ "email_verified": s.authEmailVerified,
+ "name": s.authName,
+ })
+ json.NewEncoder(w).Encode(struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ RefreshToken string `json:"refresh_token"`
+ ExpiresIn int32 `json:"expires_in"`
+ IDToken string `json:"id_token"`
+ }{
+ AccessToken: s.fakeToken(c, []byte("fake access token")),
+ TokenType: "Bearer",
+ RefreshToken: "test-refresh-token",
+ ExpiresIn: 30,
+ IDToken: s.fakeToken(c, idToken),
+ })
+ case "/jwks":
+ json.NewEncoder(w).Encode(jose.JSONWebKeySet{
+ Keys: []jose.JSONWebKey{
+ {Key: s.issuerKey.Public(), Algorithm: string(jose.RS256), KeyID: ""},
+ },
+ })
+ case "/auth":
+ w.WriteHeader(http.StatusInternalServerError)
+ case "/userinfo":
+ w.WriteHeader(http.StatusInternalServerError)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+ s.cluster, err = cfg.GetCluster("")
+ s.cluster.Login.GoogleClientID = "test%client$id"
+ s.cluster.Login.GoogleClientSecret = "test#client/secret"
+ c.Assert(err, check.IsNil)
+
+ s.localdb = NewConn(s.cluster)
+ s.localdb.googleLoginController.issuer = s.fakeIssuer.URL
+
+ s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
+ s.localdb.railsProxy = rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
+}
+
+func (s *LoginSuite) TearDownTest(c *check.C) {
+ s.railsSpy.Close()
+}
+
+func (s *LoginSuite) TestGoogleLoginStart_Bogus(c *check.C) {
+ resp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{})
+ c.Check(err, check.IsNil)
+ c.Check(resp.RedirectLocation, check.Equals, "")
+ c.Check(resp.HTML.String(), check.Matches, `.*missing return_to parameter.*`)
+}
+
+func (s *LoginSuite) TestGoogleLoginStart(c *check.C) {
+ for _, remote := range []string{"", "zzzzz"} {
+ resp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{Remote: remote, ReturnTo: "https://app.example.com/foo?bar"})
+ c.Check(err, check.IsNil)
+ target, err := url.Parse(resp.RedirectLocation)
+ c.Check(err, check.IsNil)
+ issuerURL, _ := url.Parse(s.fakeIssuer.URL)
+ c.Check(target.Host, check.Equals, issuerURL.Host)
+ q := target.Query()
+ c.Check(q.Get("client_id"), check.Equals, "test%client$id")
+ state := s.localdb.googleLoginController.parseOAuth2State(q.Get("state"))
+ c.Check(state.verify([]byte(s.cluster.SystemRootToken)), check.Equals, true)
+ c.Check(state.Time, check.Not(check.Equals), 0)
+ c.Check(state.Remote, check.Equals, remote)
+ c.Check(state.ReturnTo, check.Equals, "https://app.example.com/foo?bar")
+ }
+}
+
+func (s *LoginSuite) TestGoogleLoginSuccess(c *check.C) {
+ // Initiate login, but instead of following the redirect to
+ // the provider, just grab state from the redirect URL.
+ resp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{ReturnTo: "https://app.example.com/foo?bar"})
+ c.Check(err, check.IsNil)
+ target, err := url.Parse(resp.RedirectLocation)
+ c.Check(err, check.IsNil)
+ state := target.Query().Get("state")
+ c.Check(state, check.Not(check.Equals), "")
+
+ // Prime the fake issuer with a valid code.
+ s.validCode = fmt.Sprintf("abcdefgh-%d", time.Now().Unix())
+
+ // Callback with invalid code.
+ resp, err = s.localdb.Login(context.Background(), arvados.LoginOptions{
+ Code: "first-try-a-bogus-code",
+ State: state,
+ })
+ c.Check(err, check.IsNil)
+ c.Check(resp.RedirectLocation, check.Equals, "")
+ c.Check(resp.HTML.String(), check.Matches, `(?ms).*error in OAuth2 exchange.*cannot fetch token.*`)
+
+ // Callback with invalid state.
+ resp, err = s.localdb.Login(context.Background(), arvados.LoginOptions{
+ Code: s.validCode,
+ State: "bogus-state",
+ })
+ c.Check(err, check.IsNil)
+ c.Check(resp.RedirectLocation, check.Equals, "")
+ c.Check(resp.HTML.String(), check.Matches, `(?ms).*invalid OAuth2 state.*`)
+
+ // Callback with valid code and state.
+ resp, err = s.localdb.Login(context.Background(), arvados.LoginOptions{
+ Code: s.validCode,
+ State: state,
+ })
+ c.Check(err, check.IsNil)
+ c.Check(resp.HTML.String(), check.Equals, "")
+ c.Check(resp.RedirectLocation, check.Not(check.Equals), "")
+ target, err = url.Parse(resp.RedirectLocation)
+ c.Check(err, check.IsNil)
+ c.Check(target.Host, check.Equals, "app.example.com")
+ c.Check(target.Path, check.Equals, "/foo")
+ token := target.Query().Get("api_token")
+ c.Check(token, check.Matches, `v2/zzzzz-gj3su-.{15}/.{32,50}`)
+
+ foundCallback := false
+ for _, dump := range s.railsSpy.RequestDumps {
+ c.Logf("spied request: %q", dump)
+ split := bytes.Split(dump, []byte("\r\n\r\n"))
+ c.Assert(split, check.HasLen, 2)
+ hdr, body := string(split[0]), string(split[1])
+ if strings.Contains(hdr, "POST /auth/controller/callback") {
+ vs, err := url.ParseQuery(body)
+ var authinfo map[string]interface{}
+ c.Check(json.Unmarshal([]byte(vs.Get("auth_info")), &authinfo), check.IsNil)
+ c.Check(err, check.IsNil)
+ c.Check(authinfo["first_name"], check.Equals, "Fake User")
+ c.Check(authinfo["last_name"], check.Equals, "Name")
+ c.Check(authinfo["email"], check.Equals, "active-user@arvados.local")
+ foundCallback = true
+ }
+ }
+ c.Check(foundCallback, check.Equals, true)
+
+ // Try using the returned Arvados token.
+ c.Logf("trying an API call with new token %q", token)
+ ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{token}})
+ cl, err := s.localdb.CollectionList(ctx, arvados.ListOptions{Limit: -1})
+ c.Check(cl.ItemsAvailable, check.Not(check.Equals), 0)
+ c.Check(cl.Items, check.Not(check.HasLen), 0)
+ c.Check(err, check.IsNil)
+
+ // Might as well check that bogus tokens aren't accepted.
+ badtoken := token + "plussomeboguschars"
+ c.Logf("trying an API call with mangled token %q", badtoken)
+ ctx = auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{badtoken}})
+ cl, err = s.localdb.CollectionList(ctx, arvados.ListOptions{Limit: -1})
+ c.Check(cl.Items, check.HasLen, 0)
+ c.Check(err, check.NotNil)
+ c.Check(err, check.ErrorMatches, `.*401 Unauthorized: Not logged in.*`)
+}
+
+func (s *LoginSuite) fakeToken(c *check.C, payload []byte) string {
+ signer, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.RS256, Key: s.issuerKey}, nil)
+ if err != nil {
+ c.Error(err)
+ }
+ object, err := signer.Sign(payload)
+ if err != nil {
+ c.Error(err)
+ }
+ t, err := object.CompactSerialize()
+ if err != nil {
+ c.Error(err)
+ }
+ c.Logf("fakeToken(%q) == %q", payload, t)
+ return t
+}
package railsproxy
import (
- "context"
- "errors"
"fmt"
+ "net/http"
"net/url"
"strings"
"git.curoverse.com/arvados.git/lib/controller/rpc"
"git.curoverse.com/arvados.git/sdk/go/arvados"
- "git.curoverse.com/arvados.git/sdk/go/auth"
)
// For now, FindRailsAPI always uses the rails API running on this
if err != nil {
panic(err)
}
- return rpc.NewConn(cluster.ClusterID, url, insecure, provideIncomingToken)
-}
-
-func provideIncomingToken(ctx context.Context) ([]string, error) {
- incoming, ok := auth.FromContext(ctx)
- if !ok {
- return nil, errors.New("no token provided")
- }
- return incoming.Tokens, nil
+ conn := rpc.NewConn(cluster.ClusterID, url, insecure, rpc.PassthroughTokenProvider)
+ // If Rails is running with force_ssl=true, this
+ // "X-Forwarded-Proto: https" header prevents it from
+ // redirecting our internal request to an invalid https URL.
+ conn.SendHeader = http.Header{"X-Forwarded-Proto": []string{"https"}}
+ return conn
}
return selected
}
-func (rtr *router) sendResponse(w http.ResponseWriter, resp interface{}, opts responseOptions) {
+func (rtr *router) sendResponse(w http.ResponseWriter, req *http.Request, resp interface{}, opts responseOptions) {
var tmp map[string]interface{}
+ if resp, ok := resp.(http.Handler); ok {
+ // resp knows how to write its own http response
+ // header and body.
+ resp.ServeHTTP(w, req)
+ return
+ }
+
err := rtr.transcode(resp, &tmp)
if err != nil {
rtr.sendError(w, err)
}
}
w.Header().Set("Content-Type", "application/json")
- json.NewEncoder(w).Encode(tmp)
+ enc := json.NewEncoder(w)
+ enc.SetEscapeHTML(false)
+ enc.Encode(tmp)
}
func (rtr *router) sendError(w http.ResponseWriter, err error) {
return rtr.fed.ConfigGet(ctx)
},
},
+ {
+ arvados.EndpointLogin,
+ func() interface{} { return &arvados.LoginOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.Login(ctx, *opts.(*arvados.LoginOptions))
+ },
+ },
{
arvados.EndpointCollectionCreate,
func() interface{} { return &arvados.CreateOptions{} },
rtr.sendError(w, err)
return
}
- rtr.sendResponse(w, resp, respOpts)
+ rtr.sendResponse(w, req, resp, respOpts)
})
}
}
type Conn struct {
+ SendHeader http.Header
clusterID string
httpClient http.Client
baseURL url.URL
}
}
return &Conn{
- clusterID: clusterID,
- httpClient: http.Client{Transport: transport},
+ clusterID: clusterID,
+ httpClient: http.Client{
+ CheckRedirect: func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse },
+ Transport: transport,
+ },
baseURL: *url,
tokenProvider: tp,
}
func (conn *Conn) requestAndDecode(ctx context.Context, dst interface{}, ep arvados.APIEndpoint, body io.Reader, opts interface{}) error {
aClient := arvados.Client{
- Client: &conn.httpClient,
- Scheme: conn.baseURL.Scheme,
- APIHost: conn.baseURL.Host,
+ Client: &conn.httpClient,
+ Scheme: conn.baseURL.Scheme,
+ APIHost: conn.baseURL.Host,
+ SendHeader: conn.SendHeader,
}
tokens, err := conn.tokenProvider(ctx)
if err != nil {
return aClient.RequestAndDecodeContext(ctx, dst, ep.Method, path, body, params)
}
+func (conn *Conn) BaseURL() url.URL {
+ return conn.baseURL
+}
+
func (conn *Conn) ConfigGet(ctx context.Context) (json.RawMessage, error) {
ep := arvados.EndpointConfigGet
var resp json.RawMessage
return resp, err
}
+func (conn *Conn) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {
+ ep := arvados.EndpointLogin
+ var resp arvados.LoginResponse
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ resp.RedirectLocation = conn.relativeToBaseURL(resp.RedirectLocation)
+ return resp, err
+}
+
+// If the given location is a valid URL and its origin is the same as
+// conn.baseURL, return it as a relative URL. Otherwise, return it
+// unmodified.
+func (conn *Conn) relativeToBaseURL(location string) string {
+ u, err := url.Parse(location)
+ if err == nil && u.Scheme == conn.baseURL.Scheme && strings.ToLower(u.Host) == strings.ToLower(conn.baseURL.Host) {
+ u.Opaque = ""
+ u.Scheme = ""
+ u.User = nil
+ u.Host = ""
+ return u.String()
+ } else {
+ return location
+ }
+}
+
func (conn *Conn) CollectionCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Collection, error) {
ep := arvados.EndpointCollectionCreate
var resp arvados.Collection
err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
return resp, err
}
+
+type UserSessionCreateOptions struct {
+ AuthInfo map[string]interface{} `json:"auth_info"`
+ ReturnTo string `json:"return_to"`
+}
+
+func (conn *Conn) UserSessionCreate(ctx context.Context, options UserSessionCreateOptions) (arvados.LoginResponse, error) {
+ ep := arvados.APIEndpoint{Method: "POST", Path: "auth/controller/callback"}
+ var resp arvados.LoginResponse
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
var (
EndpointConfigGet = APIEndpoint{"GET", "arvados/v1/config", ""}
+ EndpointLogin = APIEndpoint{"GET", "login", ""}
EndpointCollectionCreate = APIEndpoint{"POST", "arvados/v1/collections", "collection"}
EndpointCollectionUpdate = APIEndpoint{"PATCH", "arvados/v1/collections/:uuid", "collection"}
EndpointCollectionGet = APIEndpoint{"GET", "arvados/v1/collections/:uuid", ""}
UUID string `json:"uuid"`
}
+type LoginOptions struct {
+ ReturnTo string `json:"return_to"` // On success, redirect to this target with api_token=xxx query param
+ Remote string `json:"remote,omitempty"` // Salt token for remote Cluster ID
+ Code string `json:"code,omitempty"` // OAuth2 callback code
+ State string `json:"state,omitempty"` // OAuth2 callback state
+}
+
type API interface {
ConfigGet(ctx context.Context) (json.RawMessage, error)
+ Login(ctx context.Context, options LoginOptions) (LoginResponse, error)
CollectionCreate(ctx context.Context, options CreateOptions) (Collection, error)
CollectionUpdate(ctx context.Context, options UpdateOptions) (Collection, error)
CollectionGet(ctx context.Context, options GetOptions) (Collection, error)
// arvadosclient.ArvadosClient.)
KeepServiceURIs []string `json:",omitempty"`
+ // HTTP headers to add/override in outgoing requests.
+ SendHeader http.Header
+
dd *DiscoveryDocument
ctx context.Context
return c.httpClient().Do(req)
}
+func isRedirectStatus(code int) bool {
+ switch code {
+ case http.StatusMovedPermanently, http.StatusFound, http.StatusSeeOther, http.StatusTemporaryRedirect, http.StatusPermanentRedirect:
+ return true
+ default:
+ return false
+ }
+}
+
// DoAndDecode performs req and unmarshals the response (which must be
// JSON) into dst. Use this instead of RequestAndDecode if you need
// more control of the http.Request object.
+//
+// If the response status indicates an HTTP redirect, the Location
+// header value is unmarshalled to dst as a RedirectLocation
+// key/field.
func (c *Client) DoAndDecode(dst interface{}, req *http.Request) error {
resp, err := c.Do(req)
if err != nil {
if err != nil {
return err
}
- if resp.StatusCode != 200 {
- return newTransactionError(req, resp, buf)
- }
- if dst == nil {
+ switch {
+ case resp.StatusCode == http.StatusOK && dst == nil:
return nil
+ case resp.StatusCode == http.StatusOK:
+ return json.Unmarshal(buf, dst)
+
+ // If the caller uses a client with a custom CheckRedirect
+ // func, Do() might return the 3xx response instead of
+ // following it.
+ case isRedirectStatus(resp.StatusCode) && dst == nil:
+ return nil
+ case isRedirectStatus(resp.StatusCode):
+ // Copy the redirect target URL to dst.RedirectLocation.
+ buf, err := json.Marshal(map[string]string{"RedirectLocation": resp.Header.Get("Location")})
+ if err != nil {
+ return err
+ }
+ return json.Unmarshal(buf, dst)
+
+ default:
+ return newTransactionError(req, resp, buf)
}
- return json.Unmarshal(buf, dst)
}
// Convert an arbitrary struct to url.Values. For example,
}
req = req.WithContext(ctx)
req.Header.Set("Content-type", "application/x-www-form-urlencoded")
+ for k, v := range c.SendHeader {
+ req.Header[k] = v
+ }
return c.DoAndDecode(dst, req)
}
Repositories string
}
Login struct {
- ProviderAppSecret string
+ GoogleClientID string
+ GoogleClientSecret string
ProviderAppID string
+ ProviderAppSecret string
LoginCluster string
RemoteTokenRefresh Duration
}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+ "bytes"
+ "net/http"
+)
+
+type LoginResponse struct {
+ RedirectLocation string
+ HTML bytes.Buffer
+}
+
+func (resp LoginResponse) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ if resp.RedirectLocation != "" {
+ w.Header().Set("Location", resp.RedirectLocation)
+ w.WriteHeader(http.StatusFound)
+ } else {
+ w.Write(resp.HTML.Bytes())
+ }
+}
"context"
"encoding/json"
"errors"
+ "net/url"
"reflect"
"runtime"
"sync"
mtx sync.Mutex
}
+// BaseURL implements federation.backend
+func (as *APIStub) BaseURL() url.URL {
+ return url.URL{Scheme: "https", Host: "apistub.example.com"}
+}
func (as *APIStub) ConfigGet(ctx context.Context) (json.RawMessage, error) {
as.appendCall(as.ConfigGet, ctx, nil)
return nil, as.Error
}
+func (as *APIStub) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {
+ as.appendCall(as.Login, ctx, options)
+ return arvados.LoginResponse{}, as.Error
+}
func (as *APIStub) CollectionCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Collection, error) {
as.appendCall(as.CollectionCreate, ctx, options)
return arvados.Collection{}, as.Error
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvadostest
+
+import (
+ "crypto/tls"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "net/http/httputil"
+ "net/url"
+ "time"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "gopkg.in/check.v1"
+)
+
+type Proxy struct {
+ *httptest.Server
+
+ // URL where the proxy is listening. Same as Server.URL, but
+ // with parsing already done for you.
+ URL *url.URL
+
+ // A dump of each request that has been proxied.
+ RequestDumps [][]byte
+}
+
+// NewProxy returns a new Proxy that saves a dump of each reqeust
+// before forwarding to the indicated service.
+func NewProxy(c *check.C, svc arvados.Service) *Proxy {
+ var target url.URL
+ c.Assert(svc.InternalURLs, check.HasLen, 1)
+ for u := range svc.InternalURLs {
+ target = url.URL(u)
+ break
+ }
+ rp := httputil.NewSingleHostReverseProxy(&target)
+ rp.ErrorHandler = func(w http.ResponseWriter, r *http.Request, err error) {
+ dump, _ := httputil.DumpRequest(r, false)
+ c.Logf("arvadostest.Proxy ErrorHandler(%s): %s\n%s", r.URL, err, dump)
+ http.Error(w, err.Error(), http.StatusBadGateway)
+ }
+ rp.Transport = &http.Transport{
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ }
+ srv := httptest.NewServer(rp)
+ u, err := url.Parse(srv.URL)
+ c.Assert(err, check.IsNil)
+ proxy := &Proxy{
+ Server: srv,
+ URL: u,
+ }
+ rp.Director = func(r *http.Request) {
+ dump, _ := httputil.DumpRequest(r, true)
+ proxy.RequestDumps = append(proxy.RequestDumps, dump)
+ r.URL.Scheme = target.Scheme
+ r.URL.Host = target.Host
+ }
+ return proxy
+}
BaseApiClient(ConfigProvider config) {
this.config = config;
- client = OkHttpClientFactory.builder()
- .build()
- .create(config.isApiHostInsecure());
+ this.client = OkHttpClientFactory.INSTANCE.create(config.isApiHostInsecure());
}
Request.Builder getRequestBuilder() {
package org.arvados.client.api.client.factory;
+import com.google.common.base.Suppliers;
import okhttp3.OkHttpClient;
import org.arvados.client.exception.ArvadosClientException;
import org.slf4j.Logger;
import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
import java.security.cert.X509Certificate;
+import java.util.function.Supplier;
-public class OkHttpClientFactory {
-
+/**
+ * {@link OkHttpClient} instance factory that builds and configures client instances sharing
+ * the common resource pool: this is the recommended approach to optimize resource usage.
+ */
+public final class OkHttpClientFactory {
+ public static final OkHttpClientFactory INSTANCE = new OkHttpClientFactory();
private final Logger log = org.slf4j.LoggerFactory.getLogger(OkHttpClientFactory.class);
+ private final OkHttpClient clientSecure = new OkHttpClient();
+ private final Supplier<OkHttpClient> clientUnsecure =
+ Suppliers.memoize(this::getDefaultClientAcceptingAllCertificates);
+
+ private OkHttpClientFactory() { /* singleton */}
- OkHttpClientFactory() {
+ public OkHttpClient create(boolean apiHostInsecure) {
+ return apiHostInsecure ? getDefaultUnsecureClient() : getDefaultClient();
}
- public static OkHttpClientFactoryBuilder builder() {
- return new OkHttpClientFactoryBuilder();
+ /**
+ * @return default secure {@link OkHttpClient} with shared resource pool.
+ */
+ public OkHttpClient getDefaultClient() {
+ return clientSecure;
}
- public OkHttpClient create(boolean apiHostInsecure) {
- OkHttpClient.Builder builder = new OkHttpClient.Builder();
- if (apiHostInsecure) {
- trustAllCertificates(builder);
- }
- return builder.build();
+ /**
+ * @return default {@link OkHttpClient} with shared resource pool
+ * that will accept all SSL certificates by default.
+ */
+ public OkHttpClient getDefaultUnsecureClient() {
+ return clientUnsecure.get();
+ }
+
+ /**
+ * @return default {@link OkHttpClient.Builder} with shared resource pool.
+ */
+ public OkHttpClient.Builder getDefaultClientBuilder() {
+ return clientSecure.newBuilder();
+ }
+
+ /**
+ * @return default {@link OkHttpClient.Builder} with shared resource pool
+ * that is preconfigured to accept all SSL certificates.
+ */
+ public OkHttpClient.Builder getDefaultUnsecureClientBuilder() {
+ return clientUnsecure.get().newBuilder();
}
- private void trustAllCertificates(OkHttpClient.Builder builder) {
+ private OkHttpClient getDefaultClientAcceptingAllCertificates() {
log.warn("Creating unsafe OkHttpClient. All SSL certificates will be accepted.");
try {
// Create a trust manager that does not validate certificate chains
- final TrustManager[] trustAllCerts = new TrustManager[] { createX509TrustManager() };
+ final TrustManager[] trustAllCerts = {createX509TrustManager()};
// Install the all-trusting trust manager
SSLContext sslContext = SSLContext.getInstance("SSL");
// Create an ssl socket factory with our all-trusting manager
final SSLSocketFactory sslSocketFactory = sslContext.getSocketFactory();
+ // Create the OkHttpClient.Builder with shared resource pool
+ final OkHttpClient.Builder builder = clientSecure.newBuilder();
builder.sslSocketFactory(sslSocketFactory, (X509TrustManager) trustAllCerts[0]);
builder.hostnameVerifier((hostname, session) -> true);
+ return builder.build();
} catch (NoSuchAlgorithmException | KeyManagementException e) {
throw new ArvadosClientException("Error establishing SSL context", e);
}
private static X509TrustManager createX509TrustManager() {
return new X509TrustManager() {
-
+
@Override
- public void checkClientTrusted(X509Certificate[] chain, String authType) {}
+ public void checkClientTrusted(X509Certificate[] chain, String authType) {
+ }
@Override
- public void checkServerTrusted(X509Certificate[] chain, String authType) {}
+ public void checkServerTrusted(X509Certificate[] chain, String authType) {
+ }
@Override
public X509Certificate[] getAcceptedIssuers() {
- return new X509Certificate[] {};
+ return new X509Certificate[]{};
}
};
}
-
- public static class OkHttpClientFactoryBuilder {
- OkHttpClientFactoryBuilder() {
- }
-
- public OkHttpClientFactory build() {
- return new OkHttpClientFactory();
- }
-
- public String toString() {
- return "OkHttpClientFactory.OkHttpClientFactoryBuilder()";
- }
- }
}
return collectionsApiClient.list(listArgument);
}
+ /**
+ * Gets project details by uuid.
+ *
+ * @param projectUuid uuid of project
+ * @return Group object containing information about project
+ */
+ public Group getProjectByUuid(String projectUuid) {
+ Group project = groupsApiClient.get(projectUuid);
+ log.debug("Retrieved " + project.getName() + " with UUID: " + project.getUuid());
+ return project;
+ }
+
/**
* Creates new project that will be a subproject of "home" for current user.
*
public void secureOkHttpClientIsCreated() throws Exception {
// given
- OkHttpClientFactory factory = OkHttpClientFactory.builder().build();
+ OkHttpClientFactory factory = OkHttpClientFactory.INSTANCE;
// * configure HTTPS server
SSLSocketFactory sf = getSSLSocketFactoryWithSelfSignedCertificate();
server.useHttps(sf, false);
@Test
public void insecureOkHttpClientIsCreated() throws Exception {
// given
- OkHttpClientFactory factory = OkHttpClientFactory.builder().build();
+ OkHttpClientFactory factory = OkHttpClientFactory.INSTANCE;
// * configure HTTPS server
SSLSocketFactory sf = getSSLSocketFactoryWithSelfSignedCertificate();
server.useHttps(sf, false);
raise "Local login disabled when LoginCluster is set"
end
- omniauth = request.env['omniauth.auth']
+ if params[:provider] == 'controller'
+ if request.headers['Authorization'] != 'Bearer ' + Rails.configuration.SystemRootToken
+ return send_error('Invalid authorization header', status: 401)
+ end
+ # arvados-controller verified the user and is passing auth_info
+ # in request params.
+ authinfo = SafeJSON.load(params[:auth_info])
+ else
+ # omniauth middleware verified the user and is passing auth_info
+ # in request.env.
+ authinfo = request.env['omniauth.auth']['info'].with_indifferent_access
+ end
begin
- user = User.register omniauth['info']
+ user = User.register(authinfo)
rescue => e
Rails.logger.warn e
return redirect_to login_failure_url
user.save or raise Exception.new(user.errors.messages)
- omniauth.delete('extra')
-
# Give the authenticated user a cookie for direct API access
session[:user_id] = user.id
session[:api_client_uuid] = nil
t.add :url_prefix
t.add :is_trusted
end
+
+ def is_trusted
+ norm(self.url_prefix) == norm(Rails.configuration.Services.Workbench1.ExternalURL) ||
+ norm(self.url_prefix) == norm(Rails.configuration.Services.Workbench2.ExternalURL) ||
+ super
+ end
+
+ protected
+
+ def norm url
+ # normalize URL for comparison
+ url = URI(url)
+ if url.scheme == "https"
+ url.port == "443"
+ end
+ if url.scheme == "http"
+ url.port == "80"
+ end
+ url.path = "/"
+ url
+ end
end
# alternate_emails
# identity_url
- info = info.with_indifferent_access
-
primary_user = nil
# local database
arvcfg = ConfigLoader.new
arvcfg.declare_config "ClusterID", NonemptyString, :uuid_prefix
arvcfg.declare_config "ManagementToken", String, :ManagementToken
+arvcfg.declare_config "SystemRootToken", String
arvcfg.declare_config "Git.Repositories", String, :git_repositories_dir
arvcfg.declare_config "API.DisabledAPIs", Hash, :disable_api_methods, ->(cfg, k, v) { arrayToHash cfg, "API.DisabledAPIs", v }
arvcfg.declare_config "API.MaxRequestSize", Integer, :max_request_size
arvcfg.declare_config "Users.UserNotifierEmailFrom", String, :user_notifier_email_from
arvcfg.declare_config "Users.NewUserNotificationRecipients", Hash, :new_user_notification_recipients, ->(cfg, k, v) { arrayToHash cfg, "Users.NewUserNotificationRecipients", v }
arvcfg.declare_config "Users.NewInactiveUserNotificationRecipients", Hash, :new_inactive_user_notification_recipients, method(:arrayToHash)
-arvcfg.declare_config "Login.ProviderAppSecret", NonemptyString, :sso_app_secret
-arvcfg.declare_config "Login.ProviderAppID", NonemptyString, :sso_app_id
+arvcfg.declare_config "Login.ProviderAppSecret", String, :sso_app_secret
+arvcfg.declare_config "Login.ProviderAppID", String, :sso_app_id
arvcfg.declare_config "Login.LoginCluster", String
arvcfg.declare_config "Login.RemoteTokenRefresh", ActiveSupport::Duration
arvcfg.declare_config "TLS.Insecure", Boolean, :sso_insecure
assert_nil assigns(:api_client)
end
+ test "controller cannot create session without SystemRootToken" do
+ get :create, params: {provider: 'controller', auth_info: {email: "foo@bar.com"}, return_to: ',https://app.example'}
+ assert_response 401
+ end
+
+ test "controller cannot create session with wrong SystemRootToken" do
+ @request.headers['Authorization'] = 'Bearer blah'
+ get :create, params: {provider: 'controller', auth_info: {email: "foo@bar.com"}, return_to: ',https://app.example'}
+ assert_response 401
+ end
+
+ test "controller can create session using SystemRootToken" do
+ @request.headers['Authorization'] = 'Bearer '+Rails.configuration.SystemRootToken
+ get :create, params: {provider: 'controller', auth_info: {email: "foo@bar.com"}, return_to: ',https://app.example'}
+ assert_response :redirect
+ api_client_auth = assigns(:api_client_auth)
+ assert_not_nil api_client_auth
+ assert_includes(@response.redirect_url, 'api_token='+api_client_auth.token)
+ end
end
require 'test_helper'
class ApiClientTest < ActiveSupport::TestCase
- # test "the truth" do
- # assert true
- # end
+ include CurrentApiClient
+
+ test "configured workbench is trusted" do
+ Rails.configuration.Services.Workbench1.ExternalURL = URI("http://wb1.example.com")
+ Rails.configuration.Services.Workbench2.ExternalURL = URI("https://wb2.example.com:443")
+
+ act_as_system_user do
+ [["http://wb0.example.com", false],
+ ["http://wb1.example.com", true],
+ ["http://wb2.example.com", false],
+ ["https://wb2.example.com", true],
+ ["https://wb2.example.com/", true],
+ ].each do |pfx, result|
+ a = ApiClient.create(url_prefix: pfx, is_trusted: false)
+ assert_equal result, a.is_trusted
+ end
+
+ a = ApiClient.create(url_prefix: "http://example.com", is_trusted: true)
+ a.save!
+ a.reload
+ assert a.is_trusted
+ end
+ end
end
Description=Arvados Docker Image Cleaner
Documentation=https://doc.arvados.org/
After=network.target
-#AssertPathExists=/etc/arvados/docker-cleaner/docker-cleaner.json
# systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
StartLimitInterval=0
changeNone: "none",
}
+type balancedBlockState struct {
+ needed int
+ unneeded int
+ pulling int
+ unachievable bool
+}
+
type balanceResult struct {
blk *BlockState
blkid arvados.SizedDigest
- have int
- want int
+ lost bool
+ blockState balancedBlockState
classState map[string]balancedBlockState
}
+type slot struct {
+ mnt *KeepMount // never nil
+ repl *Replica // replica already stored here (or nil)
+ want bool // we should pull/leave a replica here
+}
+
// balanceBlock compares current state to desired state for a single
// block, and makes the appropriate ChangeSet calls.
func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) balanceResult {
bal.Logger.Debugf("balanceBlock: %v %+v", blkid, blk)
- type slot struct {
- mnt *KeepMount // never nil
- repl *Replica // replica already stored here (or nil)
- want bool // we should pull/leave a replica here
- }
-
// Build a list of all slots (one per mounted volume).
slots := make([]slot, 0, bal.mounts)
for _, srv := range bal.KeepServices {
// won't want to trash any replicas.
underreplicated := false
- classState := make(map[string]balancedBlockState, len(bal.classes))
unsafeToDelete := make(map[int64]bool, len(slots))
for _, class := range bal.classes {
desired := blk.Desired[class]
-
- countedDev := map[string]bool{}
- have := 0
- for _, slot := range slots {
- if slot.repl != nil && bal.mountsByClass[class][slot.mnt] && !countedDev[slot.mnt.DeviceID] {
- have += slot.mnt.Replication
- if slot.mnt.DeviceID != "" {
- countedDev[slot.mnt.DeviceID] = true
- }
- }
- }
- classState[class] = balancedBlockState{
- desired: desired,
- surplus: have - desired,
- }
-
if desired == 0 {
continue
}
underreplicated = safe < desired
}
- // set the unachievable flag if there aren't enough
- // slots offering the relevant storage class. (This is
- // as easy as checking slots[desired] because we
- // already sorted the qualifying slots to the front.)
- if desired >= len(slots) || !bal.mountsByClass[class][slots[desired].mnt] {
- cs := classState[class]
- cs.unachievable = true
- classState[class] = cs
- }
-
// Avoid deleting wanted replicas from devices that
// are mounted on multiple servers -- even if they
// haven't already been added to unsafeToDelete
// replica that doesn't have a timestamp collision with
// others.
- countedDev := map[string]bool{}
- var have, want int
- for _, slot := range slots {
- if countedDev[slot.mnt.DeviceID] {
- continue
- }
- if slot.want {
- want += slot.mnt.Replication
- }
- if slot.repl != nil {
- have += slot.mnt.Replication
- }
- if slot.mnt.DeviceID != "" {
- countedDev[slot.mnt.DeviceID] = true
+ for i, slot := range slots {
+ // Don't trash (1) any replicas of an underreplicated
+ // block, even if they're in the wrong positions, or
+ // (2) any replicas whose Mtimes are identical to
+ // needed replicas (in case we're really seeing the
+ // same copy via different mounts).
+ if slot.repl != nil && (underreplicated || unsafeToDelete[slot.repl.Mtime]) {
+ slots[i].want = true
}
}
+ classState := make(map[string]balancedBlockState, len(bal.classes))
+ for _, class := range bal.classes {
+ classState[class] = computeBlockState(slots, bal.mountsByClass[class], len(blk.Replicas), blk.Desired[class])
+ }
+ blockState := computeBlockState(slots, nil, len(blk.Replicas), 0)
+
+ var lost bool
var changes []string
for _, slot := range slots {
// TODO: request a Touch if Mtime is duplicated.
var change int
switch {
- case !underreplicated && !slot.want && slot.repl != nil && slot.repl.Mtime < bal.MinMtime && !unsafeToDelete[slot.repl.Mtime]:
+ case !slot.want && slot.repl != nil && slot.repl.Mtime < bal.MinMtime:
slot.mnt.KeepService.AddTrash(Trash{
SizedDigest: blkid,
Mtime: slot.repl.Mtime,
From: slot.mnt,
})
change = changeTrash
- case len(blk.Replicas) > 0 && slot.repl == nil && slot.want && !slot.mnt.ReadOnly:
+ case slot.repl == nil && slot.want && len(blk.Replicas) == 0:
+ lost = true
+ change = changeNone
+ case slot.repl == nil && slot.want && !slot.mnt.ReadOnly:
slot.mnt.KeepService.AddPull(Pull{
SizedDigest: blkid,
From: blk.Replicas[0].KeepMount.KeepService,
}
}
if bal.Dumper != nil {
- bal.Dumper.Printf("%s refs=%d have=%d want=%v %v %v", blkid, blk.RefCount, have, want, blk.Desired, changes)
+ bal.Dumper.Printf("%s refs=%d needed=%d unneeded=%d pulling=%v %v %v", blkid, blk.RefCount, blockState.needed, blockState.unneeded, blockState.pulling, blk.Desired, changes)
}
return balanceResult{
blk: blk,
blkid: blkid,
- have: have,
- want: want,
+ lost: lost,
+ blockState: blockState,
classState: classState,
}
}
+func computeBlockState(slots []slot, onlyCount map[*KeepMount]bool, have, needRepl int) (bbs balancedBlockState) {
+ repl := 0
+ countedDev := map[string]bool{}
+ for _, slot := range slots {
+ if onlyCount != nil && !onlyCount[slot.mnt] {
+ continue
+ }
+ if countedDev[slot.mnt.DeviceID] {
+ continue
+ }
+ switch {
+ case slot.repl != nil && slot.want:
+ bbs.needed++
+ repl += slot.mnt.Replication
+ case slot.repl != nil && !slot.want:
+ bbs.unneeded++
+ repl += slot.mnt.Replication
+ case slot.repl == nil && slot.want && have > 0:
+ bbs.pulling++
+ repl += slot.mnt.Replication
+ }
+ if slot.mnt.DeviceID != "" {
+ countedDev[slot.mnt.DeviceID] = true
+ }
+ }
+ if repl < needRepl {
+ bbs.unachievable = true
+ }
+ return
+}
+
type blocksNBytes struct {
replicas int
blocks int
return fmt.Sprintf("%d replicas (%d blocks, %d bytes)", bb.replicas, bb.blocks, bb.bytes)
}
+type replicationStats struct {
+ needed blocksNBytes
+ unneeded blocksNBytes
+ pulling blocksNBytes
+ unachievable blocksNBytes
+}
+
type balancerStats struct {
lost blocksNBytes
overrep blocksNBytes
return float64(s.collectionBlockRefs) / float64(s.collectionBlocks)
}
-type replicationStats struct {
- desired blocksNBytes
- surplus blocksNBytes
- short blocksNBytes
- unachievable blocksNBytes
-}
-
-type balancedBlockState struct {
- desired int
- surplus int
- unachievable bool
-}
-
func (bal *Balancer) collectStatistics(results <-chan balanceResult) {
var s balancerStats
s.replHistogram = make([]int, 2)
s.classStats = make(map[string]replicationStats, len(bal.classes))
for result := range results {
- surplus := result.have - result.want
bytes := result.blkid.Size()
if rc := int64(result.blk.RefCount); rc > 0 {
for class, state := range result.classState {
cs := s.classStats[class]
if state.unachievable {
+ cs.unachievable.replicas++
cs.unachievable.blocks++
cs.unachievable.bytes += bytes
}
- if state.desired > 0 {
- cs.desired.replicas += state.desired
- cs.desired.blocks++
- cs.desired.bytes += bytes * int64(state.desired)
+ if state.needed > 0 {
+ cs.needed.replicas += state.needed
+ cs.needed.blocks++
+ cs.needed.bytes += bytes * int64(state.needed)
}
- if state.surplus > 0 {
- cs.surplus.replicas += state.surplus
- cs.surplus.blocks++
- cs.surplus.bytes += bytes * int64(state.surplus)
- } else if state.surplus < 0 {
- cs.short.replicas += -state.surplus
- cs.short.blocks++
- cs.short.bytes += bytes * int64(-state.surplus)
+ if state.unneeded > 0 {
+ cs.unneeded.replicas += state.unneeded
+ cs.unneeded.blocks++
+ cs.unneeded.bytes += bytes * int64(state.unneeded)
+ }
+ if state.pulling > 0 {
+ cs.pulling.replicas += state.pulling
+ cs.pulling.blocks++
+ cs.pulling.bytes += bytes * int64(state.pulling)
}
s.classStats[class] = cs
}
+ bs := result.blockState
switch {
- case result.have == 0 && result.want > 0:
- s.lost.replicas -= surplus
+ case result.lost:
+ s.lost.replicas++
s.lost.blocks++
- s.lost.bytes += bytes * int64(-surplus)
+ s.lost.bytes += bytes
fmt.Fprintf(bal.lostBlocks, "%s", strings.SplitN(string(result.blkid), "+", 2)[0])
for pdh := range result.blk.Refs {
fmt.Fprintf(bal.lostBlocks, " %s", pdh)
}
fmt.Fprint(bal.lostBlocks, "\n")
- case surplus < 0:
- s.underrep.replicas -= surplus
+ case bs.pulling > 0:
+ s.underrep.replicas += bs.pulling
+ s.underrep.blocks++
+ s.underrep.bytes += bytes * int64(bs.pulling)
+ case bs.unachievable:
+ s.underrep.replicas++
s.underrep.blocks++
- s.underrep.bytes += bytes * int64(-surplus)
- case surplus > 0 && result.want == 0:
+ s.underrep.bytes += bytes
+ case bs.unneeded > 0 && bs.needed == 0:
+ // Count as "garbage" if all replicas are old
+ // enough to trash, otherwise count as
+ // "unref".
counter := &s.garbage
for _, r := range result.blk.Replicas {
if r.Mtime >= bal.MinMtime {
break
}
}
- counter.replicas += surplus
+ counter.replicas += bs.unneeded
counter.blocks++
- counter.bytes += bytes * int64(surplus)
- case surplus > 0:
- s.overrep.replicas += surplus
+ counter.bytes += bytes * int64(bs.unneeded)
+ case bs.unneeded > 0:
+ s.overrep.replicas += bs.unneeded
s.overrep.blocks++
- s.overrep.bytes += bytes * int64(result.have-result.want)
+ s.overrep.bytes += bytes * int64(bs.unneeded)
default:
- s.justright.replicas += result.want
+ s.justright.replicas += bs.needed
s.justright.blocks++
- s.justright.bytes += bytes * int64(result.want)
+ s.justright.bytes += bytes * int64(bs.needed)
}
- if result.want > 0 {
- s.desired.replicas += result.want
+ if bs.needed > 0 {
+ s.desired.replicas += bs.needed
s.desired.blocks++
- s.desired.bytes += bytes * int64(result.want)
+ s.desired.bytes += bytes * int64(bs.needed)
}
- if result.have > 0 {
- s.current.replicas += result.have
+ if bs.needed+bs.unneeded > 0 {
+ s.current.replicas += bs.needed + bs.unneeded
s.current.blocks++
- s.current.bytes += bytes * int64(result.have)
+ s.current.bytes += bytes * int64(bs.needed+bs.unneeded)
}
- for len(s.replHistogram) <= result.have {
+ for len(s.replHistogram) <= bs.needed+bs.unneeded {
s.replHistogram = append(s.replHistogram, 0)
}
- s.replHistogram[result.have]++
+ s.replHistogram[bs.needed+bs.unneeded]++
}
for _, srv := range bal.KeepServices {
s.pulls += len(srv.ChangeSet.Pulls)
for _, class := range bal.classes {
cs := bal.stats.classStats[class]
bal.logf("===")
- bal.logf("storage class %q: %s desired", class, cs.desired)
- bal.logf("storage class %q: %s short", class, cs.short)
- bal.logf("storage class %q: %s surplus", class, cs.surplus)
+ bal.logf("storage class %q: %s needed", class, cs.needed)
+ bal.logf("storage class %q: %s unneeded", class, cs.unneeded)
+ bal.logf("storage class %q: %s pulling", class, cs.pulling)
bal.logf("storage class %q: %s unachievable", class, cs.unachievable)
}
bal.logf("===")
}
func (bal *Balancer) printHistogram(hashColumns int) {
- bal.logf("Replication level distribution (counting N replicas on a single server as N):")
+ bal.logf("Replication level distribution:")
maxCount := 0
for _, count := range bal.stats.replHistogram {
if maxCount < count {
shouldPullMounts []string
shouldTrashMounts []string
- expectResult balanceResult
+ expectBlockState *balancedBlockState
+ expectClassState map[string]balancedBlockState
}
func (bal *balancerSuite) SetUpSuite(c *check.C) {
desired: map[string]int{"default": 2},
current: slots{0, 1},
shouldPull: nil,
- shouldTrash: nil})
+ shouldTrash: nil,
+ expectBlockState: &balancedBlockState{
+ needed: 2,
+ }})
}
func (bal *balancerSuite) TestDecreaseRepl(c *check.C) {
bal.try(c, tester{
desired: map[string]int{"default": 2},
current: slots{0, 2, 1},
- shouldTrash: slots{2}})
+ shouldTrash: slots{2},
+ expectBlockState: &balancedBlockState{
+ needed: 2,
+ unneeded: 1,
+ }})
}
func (bal *balancerSuite) TestDecreaseReplToZero(c *check.C) {
bal.try(c, tester{
desired: map[string]int{"default": 0},
current: slots{0, 1, 3},
- shouldTrash: slots{0, 1, 3}})
+ shouldTrash: slots{0, 1, 3},
+ expectBlockState: &balancedBlockState{
+ unneeded: 3,
+ }})
}
func (bal *balancerSuite) TestIncreaseRepl(c *check.C) {
bal.try(c, tester{
desired: map[string]int{"default": 4},
current: slots{0, 1},
- shouldPull: slots{2, 3}})
+ shouldPull: slots{2, 3},
+ expectBlockState: &balancedBlockState{
+ needed: 2,
+ pulling: 2,
+ }})
}
func (bal *balancerSuite) TestSkipReadonly(c *check.C) {
bal.try(c, tester{
desired: map[string]int{"default": 4},
current: slots{0, 1},
- shouldPull: slots{2, 4}})
+ shouldPull: slots{2, 4},
+ expectBlockState: &balancedBlockState{
+ needed: 2,
+ pulling: 2,
+ }})
}
func (bal *balancerSuite) TestMultipleViewsReadOnly(c *check.C) {
desired: map[string]int{"default": 2},
current: slots{0, 1, 2},
timestamps: []int64{oldTime, newTime, newTime + 1},
- expectResult: balanceResult{
- have: 3,
- want: 2,
- classState: map[string]balancedBlockState{"default": {
- desired: 2,
- surplus: 1,
- unachievable: false}}}})
+ expectBlockState: &balancedBlockState{
+ needed: 2,
+ unneeded: 1,
+ }})
// The best replicas are too new to delete, but the excess
// replica is old enough.
bal.try(c, tester{
known: 0,
desired: map[string]int{"default": 2},
current: slots{1},
- shouldPull: slots{0}})
+ shouldPull: slots{0},
+ expectBlockState: &balancedBlockState{
+ needed: 1,
+ pulling: 1,
+ }})
bal.try(c, tester{
known: 0,
desired: map[string]int{"default": 2},
current: slots{0, 1},
- shouldPull: nil})
+ shouldPull: nil,
+ expectBlockState: &balancedBlockState{
+ needed: 2,
+ }})
bal.try(c, tester{
known: 0,
desired: map[string]int{"default": 2},
current: slots{0, 1, 2},
- shouldTrash: slots{2}})
+ shouldTrash: slots{2},
+ expectBlockState: &balancedBlockState{
+ needed: 2,
+ unneeded: 1,
+ }})
bal.try(c, tester{
known: 0,
desired: map[string]int{"default": 3},
current: slots{0, 2, 3, 4},
shouldPull: slots{1},
shouldTrash: slots{4},
- expectResult: balanceResult{
- have: 4,
- want: 3,
- classState: map[string]balancedBlockState{"default": {
- desired: 3,
- surplus: 1,
- unachievable: false}}}})
+ expectBlockState: &balancedBlockState{
+ needed: 3,
+ unneeded: 1,
+ pulling: 1,
+ }})
bal.try(c, tester{
known: 0,
desired: map[string]int{"default": 3},
current: slots{0, 1, 2, 3, 4},
- shouldTrash: slots{2, 3, 4}})
+ shouldTrash: slots{2, 3, 4},
+ expectBlockState: &balancedBlockState{
+ needed: 2,
+ unneeded: 3,
+ }})
bal.try(c, tester{
known: 0,
desired: map[string]int{"default": 4},
current: slots{0, 1, 2, 3, 4},
shouldTrash: slots{3, 4},
- expectResult: balanceResult{
- have: 6,
- want: 4,
- classState: map[string]balancedBlockState{"default": {
- desired: 4,
- surplus: 2,
- unachievable: false}}}})
+ expectBlockState: &balancedBlockState{
+ needed: 3,
+ unneeded: 2,
+ }})
// block 1 rendezvous is 0,9,7 -- so slot 0 has repl=2
bal.try(c, tester{
known: 1,
desired: map[string]int{"default": 2},
current: slots{0},
- expectResult: balanceResult{
- have: 2,
- want: 2,
- classState: map[string]balancedBlockState{"default": {
- desired: 2,
- surplus: 0,
- unachievable: false}}}})
+ expectBlockState: &balancedBlockState{
+ needed: 1,
+ }})
bal.try(c, tester{
known: 1,
desired: map[string]int{"default": 3},
current: slots{0},
- shouldPull: slots{1}})
+ shouldPull: slots{1},
+ expectBlockState: &balancedBlockState{
+ needed: 1,
+ pulling: 1,
+ }})
bal.try(c, tester{
known: 1,
desired: map[string]int{"default": 4},
current: slots{0},
- shouldPull: slots{1, 2}})
+ shouldPull: slots{1, 2},
+ expectBlockState: &balancedBlockState{
+ needed: 1,
+ pulling: 2,
+ }})
bal.try(c, tester{
known: 1,
desired: map[string]int{"default": 4},
current: slots{2},
- shouldPull: slots{0, 1}})
+ shouldPull: slots{0, 1},
+ expectBlockState: &balancedBlockState{
+ needed: 1,
+ pulling: 2,
+ }})
bal.try(c, tester{
known: 1,
desired: map[string]int{"default": 4},
current: slots{7},
shouldPull: slots{0, 1, 2},
- expectResult: balanceResult{
- have: 1,
- want: 4,
- classState: map[string]balancedBlockState{"default": {
- desired: 4,
- surplus: -3,
- unachievable: false}}}})
+ expectBlockState: &balancedBlockState{
+ needed: 1,
+ pulling: 3,
+ }})
bal.try(c, tester{
known: 1,
desired: map[string]int{"default": 2},
current: slots{1, 2, 3, 4},
shouldPull: slots{0},
- shouldTrash: slots{3, 4}})
+ shouldTrash: slots{3, 4},
+ expectBlockState: &balancedBlockState{
+ needed: 2,
+ unneeded: 2,
+ pulling: 1,
+ }})
bal.try(c, tester{
known: 1,
desired: map[string]int{"default": 2},
current: slots{0, 1, 2},
shouldTrash: slots{1, 2},
- expectResult: balanceResult{
- have: 4,
- want: 2,
- classState: map[string]balancedBlockState{"default": {
- desired: 2,
- surplus: 2,
- unachievable: false}}}})
+ expectBlockState: &balancedBlockState{
+ needed: 1,
+ unneeded: 2,
+ }})
}
func (bal *balancerSuite) TestDeviceRWMountedByMultipleServers(c *check.C) {
desired: map[string]int{"default": 2},
current: slots{1, 9},
shouldPull: slots{0},
- expectResult: balanceResult{
- have: 1,
- classState: map[string]balancedBlockState{"default": {
- desired: 2,
- surplus: -1,
- unachievable: false}}}})
+ expectBlockState: &balancedBlockState{
+ needed: 1,
+ pulling: 1,
+ }})
// block 0 is overreplicated, but the second and third
// replicas are the same replica according to DeviceID
// (despite different Mtimes). Don't trash the third replica.
known: 0,
desired: map[string]int{"default": 2},
current: slots{0, 1, 9},
- expectResult: balanceResult{
- have: 2,
- classState: map[string]balancedBlockState{"default": {
- desired: 2,
- surplus: 0,
- unachievable: false}}}})
+ expectBlockState: &balancedBlockState{
+ needed: 2,
+ }})
// block 0 is overreplicated; the third and fifth replicas are
// extra, but the fourth is another view of the second and
// shouldn't be trashed.
desired: map[string]int{"default": 2},
current: slots{0, 1, 5, 9, 12},
shouldTrash: slots{5, 12},
- expectResult: balanceResult{
- have: 4,
- classState: map[string]balancedBlockState{"default": {
- desired: 2,
- surplus: 2,
- unachievable: false}}}})
+ expectBlockState: &balancedBlockState{
+ needed: 2,
+ unneeded: 2,
+ }})
}
func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
sort.Strings(didTrashMounts)
c.Check(didTrashMounts, check.DeepEquals, t.shouldTrashMounts)
}
- if t.expectResult.have > 0 {
- c.Check(result.have, check.Equals, t.expectResult.have)
- }
- if t.expectResult.want > 0 {
- c.Check(result.want, check.Equals, t.expectResult.want)
+ if t.expectBlockState != nil {
+ c.Check(result.blockState, check.Equals, *t.expectBlockState)
}
- if t.expectResult.classState != nil {
- c.Check(result.classState, check.DeepEquals, t.expectResult.classState)
+ if t.expectClassState != nil {
+ c.Check(result.classState, check.DeepEquals, t.expectClassState)
}
}
{
"comment": "",
- "ignore": "test",
+ "ignore": "test appengine",
"package": [
{
"checksumSHA1": "jfYWZyRWLMfG0J5K7G2K8a9AKfs=",
"revision": "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9",
"revisionTime": "2016-08-04T10:47:26Z"
},
+ {
+ "checksumSHA1": "bNT5FFLDUXSamYK3jGHSwsTJqqo=",
+ "path": "github.com/coreos/go-oidc",
+ "revision": "2be1c5b8a260760503f66dc0996e102b683b3ac3",
+ "revisionTime": "2019-08-15T17:57:29Z"
+ },
{
"checksumSHA1": "+Zz+leZHHC9C0rx8DoRuffSRPso=",
"path": "github.com/coreos/go-systemd/daemon",
"revision": "e881fd58d78e04cf6d0de1217f8707c8cc2249bc",
"revisionTime": "2017-12-16T07:03:16Z"
},
+ {
+ "checksumSHA1": "KxkAlLxQkuSGHH46Dxu6wpAybO4=",
+ "path": "github.com/pquerna/cachecontrol",
+ "revision": "1555304b9b35fdd2b425bccf1a5613677705e7d0",
+ "revisionTime": "2018-05-17T16:36:45Z"
+ },
+ {
+ "checksumSHA1": "wwaht1P9i8vQu6DqNvMEy24IMgY=",
+ "path": "github.com/pquerna/cachecontrol/cacheobject",
+ "revision": "1555304b9b35fdd2b425bccf1a5613677705e7d0",
+ "revisionTime": "2018-05-17T16:36:45Z"
+ },
{
"checksumSHA1": "Ajt29IHVbX99PUvzn8Gc/lMCXBY=",
"path": "github.com/prometheus/client_golang/prometheus",
"revisionTime": "2017-11-10T11:01:46Z"
},
{
- "checksumSHA1": "ySaT8G3I3y4MmnoXOYAAX0rC+p8=",
+ "checksumSHA1": "umeXHK5iK/3th4PtrTkZllezgWo=",
"path": "github.com/sirupsen/logrus",
"revision": "d682213848ed68c0a260ca37d6dd5ace8423f5ba",
"revisionTime": "2017-12-05T20:32:29Z"
"revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8",
"revisionTime": "2017-11-25T19:00:56Z"
},
+ {
+ "checksumSHA1": "1MGpGDQqnUoRpv7VEcQrXOBydXE=",
+ "path": "golang.org/x/crypto/pbkdf2",
+ "revision": "ae8bce0030810cf999bb2b9868ae5c7c58e6343b",
+ "revisionTime": "2018-04-30T17:54:52Z"
+ },
{
"checksumSHA1": "PJY7uCr3UnX4/Mf/RoWnbieSZ8o=",
"path": "golang.org/x/crypto/pkcs12",
"revision": "434ec0c7fe3742c984919a691b2018a6e9694425",
"revisionTime": "2017-09-25T09:26:47Z"
},
+ {
+ "checksumSHA1": "+33kONpAOtjMyyw0uD4AygLvIXg=",
+ "path": "golang.org/x/oauth2",
+ "revision": "ec22f46f877b4505e0117eeaab541714644fdd28",
+ "revisionTime": "2018-05-28T20:23:04Z"
+ },
+ {
+ "checksumSHA1": "fddd1btmbXxnlMKHUZewlVlSaEQ=",
+ "path": "golang.org/x/oauth2/internal",
+ "revision": "ec22f46f877b4505e0117eeaab541714644fdd28",
+ "revisionTime": "2018-05-28T20:23:04Z"
+ },
{
"checksumSHA1": "znPq37/LZ4pJh7B4Lbu0ZuoMhNk=",
"origin": "github.com/docker/docker/vendor/golang.org/x/sys/unix",
"revision": "20d25e2804050c1cd24a7eea1e7a6447dd0e74ec",
"revisionTime": "2016-12-08T18:13:25Z"
},
+ {
+ "checksumSHA1": "oRfTuL23MIBG2nCwjweTJz4Eiqg=",
+ "path": "gopkg.in/square/go-jose.v2",
+ "revision": "730df5f748271903322feb182be83b43ebbbe27d",
+ "revisionTime": "2019-04-10T21:58:30Z"
+ },
+ {
+ "checksumSHA1": "Ho5sr2GbiR8S35IRni7vC54d5Js=",
+ "path": "gopkg.in/square/go-jose.v2/cipher",
+ "revision": "730df5f748271903322feb182be83b43ebbbe27d",
+ "revisionTime": "2019-04-10T21:58:30Z"
+ },
+ {
+ "checksumSHA1": "JFun0lWY9eqd80Js2iWsehu1gc4=",
+ "path": "gopkg.in/square/go-jose.v2/json",
+ "revision": "730df5f748271903322feb182be83b43ebbbe27d",
+ "revisionTime": "2019-04-10T21:58:30Z"
+ },
{
"checksumSHA1": "GdsHg+yOsZtdMvD9HJFovPsqKec=",
"path": "gopkg.in/src-d/go-billy.v4",