assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar\?" # locator on command
assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/foo" # mount input1
assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/bar" # mount input2
- assert_includes @response.body, "href=\"\/collections/f9ddda46bb293b6847da984e3aa735db+290" # mount workflow
assert_includes @response.body, "href=\"#Log\""
assert_includes @response.body, "href=\"#Provenance\""
end
- architecture/index.html.textile.liquid
- Storage in Keep:
- architecture/storage.html.textile.liquid
+ - architecture/keep-components-overview.html.textile.liquid
- architecture/keep-clients.html.textile.liquid
- architecture/keep-data-lifecycle.html.textile.liquid
- architecture/manifest-format.html.textile.liquid
A regular Workbench "download" link is also accepted, but credentials passed via cookie, header, etc. are ignored. Only public data can be served this way:
pre. http://collections.example.com/collections/uuid_or_pdh/foo/bar.txt
+
+h2(#same-site). Same-site requirements for requests with tokens
+
+Although keep-web doesn't care about the domain part of the URL, the clients do: especially when rendering inline content.
+
+When a client passes a token in the URL, keep-web sends a redirect response placing the token in a @Set-Cookie@ header with the @SameSite=Lax@ attribute. The browser will ignore the cookie if it's not coming from a _same-site_ request, and thus its subsequent request will fail with a @401 Unauthorized@ error.
+
+This mainly affects Workbench's ability to show inline content, so it should be taken into account when configuring both services' URL schemes.
+
+You can read more about the definition of a _same-site_ request at the "RFC 6265bis-03 page":https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-5.2
\ No newline at end of file
|cwd|string|Initial working directory, given as an absolute path (in the container) or a path relative to the WORKDIR given in the image's Dockerfile.|Required.|
|command|array of strings|Command to execute in the container.|Required. e.g., @["echo","hello"]@|
|output_path|string|Path to a directory or file inside the container that should be preserved as container's output when it finishes. This path must be one of the mount targets. For best performance, point output_path to a writable collection mount. See "Pre-populate output using Mount points":#pre-populate-output for details regarding optional output pre-population using mount points and "Symlinks in output":#symlinks-in-output for additional details.|Required.|
-|output_name|string|Desired name for the output collection. If null, a name will be assigned automatically.||
+|output_name|string|Desired name for the output collection. If null or empty, a name will be assigned automatically.||
|output_ttl|integer|Desired lifetime for the output collection, in seconds. If zero, the output collection will not be deleted automatically.||
|priority|integer|Range 0-1000. Indicate scheduling order preference.|Clients are expected to submit container requests with zero priority in order to preview the container that will be used to satisfy it. Priority can be null if and only if state!="Committed". See "below for more details":#priority .|
|expires_at|datetime|After this time, priority is considered to be zero.|Not yet implemented.|
--- /dev/null
+---
+layout: default
+navsection: architecture
+title: Keep components overview
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Keep has a number of components. This page describes each component and the role it plays.
+
+h3. Keep clients for data access
+
+In order to access data in Keep, a client is needed to store data in and retrieve data from Keep. Different types of Keep clients exist:
+* a command line client like "@arv-get@":/user/tutorials/tutorial-keep-get.html#download-using-arv or "@arv-put@":/user/tutorials/tutorial-keep.html#upload-using-command
+* a FUSE mount provided by "@arv-mount@":/user/tutorials/tutorial-keep-mount-gnu-linux.html
+* a WebDAV mount provided by @keep-web@
+* an S3-compatible endpoint provided by @keep-web@
+* programmatic access via the "Arvados SDKs":/sdk/index.html
+
+In essense, these clients all do the same thing: they translate file and directory references into requests for Keep blocks and collection manifests. How Keep clients work, and how they use rendezvous hashing, is described in greater detail in "the next section":/architecture/keep-clients.html.
+
+For example, when a request comes in to read a file from Keep, the client will
+* request the collection object (including its manifest) from the API server
+* look up the file in the collection manifest, and retrieve the hashes of the block(s) that contain its content
+* ask the keepstore(s) for the block hashes
+* return the contents of the file to the requestor
+
+All of those steps are subject to access control, which applies at the level of the collection: in the example above, the API server and the keepstore daemons verify that the client has permission to read the collection, and will reject the request if it does not.
+
+h3. API server
+
+The API server stores collection objects and all associated metadata. That includes data about where the blocks for a collection are to be stored, e.g. when "storage classes":/admin/storage-classes.html are configured, as well as the desired and confirmed replication count for each block. It also stores the ACLs that control access to the collections. Finally, the API server provides Keep clients with time-based block signatures for access.
+
+h3. Keepstore
+
+The @keepstore@ daemon is Keep's workhorse, the storage server that stores and retrieves data from an underlying storage system. Keepstore exposes an HTTP REST API. Keepstore only handles requests for blocks. Because blocks are content-addressed, they can be written and deleted, but there is no _update_ operation: blocks are immutable.
+
+So what happens if the content of a file changes? When a client changes a file, it first writes any new blocks to the keepstore(s). Then, it updates the manifest for the collection the file belongs to with the references to the new blocks.
+
+A keepstore can store its blocks in object storage (S3 or an S3-compatible system, or Azure Blob Storage). It can also store blocks on a POSIX file system. A keepstore can be configured with multiple storage volumes. Each keepstore volume is configured with a replication number; e.g. a POSIX file system backed by a single disk would have a replication factor of 1, while an Azure 'LRS' storage volume could be configured with a replication factor of 3 (that is how many copies LRS stores under the hood, according to the Azure documentation).
+
+By default, Arvados uses a replication factor of 2. See the @DefaultReplication@ configuration parameter in "the configuration reference":https://doc.arvados.org/admin/config.html. Additionally, each collection can be configured with its own replication factor. It's worth noting that it is the responsibility of the Keep clients to make sure that all blocks are stored subject to their desired replica count, which is derived from the collections the blocks belong to. @keepstore@ itself does not provide replication; all it does is store blocks on the volumes it knows about. The @keepproxy@ and @keep-balance@ processes (see below) make sure that blocks are replicated properly.
+
+The maximum block size for @keepstore@ is 64 MiB, and keep clients typically combine small files into larger blocks. In a typical Arvados installation, the majority of blocks stored in Keep will be 64 MiB, though some fraction will be smaller.
+
+h3. Keepproxy
+
+The @keepproxy@ server is a gateway into your Keep storage. Unlike the Keepstore servers, which are only accessible on the local LAN, Keepproxy is suitable for clients located elsewhere on the internet. A client writing through Keepproxy only writes one copy of each block; the Keepproxy server will write additional copies of the data to the Keepstore servers, to fulfill the requested replication factor. Keepproxy also checks API token validity before processing requests.
+
+h3. Keep-web
+
+The @keep-web@ server provides read/write access to files stored in Keep using the HTTP, WebDAV and S3 protocols. This makes it easy to access files in Keep from a browser, or mount Keep as a network folder using WebDAV support in various operating systems. It serves public data to unauthenticated clients, and serves private data to clients that supply Arvados API tokens.
+
+h3. Keep-balance
+
+Keep is a garbage-collected system. When a block is no longer referenced in any collection manifest in the system, it becomes eligible for garbage collection. When the desired replication factor for a block (derived from the default replication factor, in addition to the replication factor of any collection(s) the block belongs to) does not match reality, the number of copies stored in the available Keepstore servers needs to be adjusted.
+
+The @keep-balance@ program takes care of these things. It runs as a service, and wakes up periodically to do a scan of the system and send instructions to the Keepstore servers. That process is described in more detail at "Balancing Keep servers":https://doc.arvados.org/admin/keep-balance.html.
There are two approaches to mitigate this.
# The service can tell the browser that all files should go to download instead of in-browser preview, except in situations where an attacker is unlikely to be able to gain access to anything they didn't already have access to.
-# Each each collection served by @keep-web@ is served on its own virtual host. This allows for file with executable content to be displayed in-browser securely. The virtual host embeds the collection uuid or portable data hash in the hostname. For example, a collection with uuid @xxxxx-4zz18-tci4vn4fa95w0zx@ could be served as @xxxxx-4zz18-tci4vn4fa95w0zx.collections.ClusterID.example.com@ . The portable data hash @dd755dbc8d49a67f4fe7dc843e4f10a6+54@ could be served at @dd755dbc8d49a67f4fe7dc843e4f10a6-54.collections.ClusterID.example.com@ . This requires "wildcard DNS record":https://en.wikipedia.org/wiki/Wildcard_DNS_record and "wildcard TLS certificate.":https://en.wikipedia.org/wiki/Wildcard_certificate
+# Each collection served by @keep-web@ is served on its own virtual host. This allows for file with executable content to be displayed in-browser securely. The virtual host embeds the collection uuid or portable data hash in the hostname. For example, a collection with uuid @xxxxx-4zz18-tci4vn4fa95w0zx@ could be served as @xxxxx-4zz18-tci4vn4fa95w0zx.collections.ClusterID.example.com@ . The portable data hash @dd755dbc8d49a67f4fe7dc843e4f10a6+54@ could be served at @dd755dbc8d49a67f4fe7dc843e4f10a6-54.collections.ClusterID.example.com@ . This requires "wildcard DNS record":https://en.wikipedia.org/wiki/Wildcard_DNS_record and "wildcard TLS certificate.":https://en.wikipedia.org/wiki/Wildcard_certificate
h3. Collections download URL
Note the trailing slash.
+{% include 'notebox_begin' %}
+Whether you choose to serve collections from their own subdomain or from a single domain, it's important to keep in mind that they should be served from me same _site_ as Workbench for the inline previews to work.
+
+Please check "keep-web's URL pattern guide":/api/keep-web-urls.html#same-site to learn more.
+{% include 'notebox_end' %}
+
h2. Set InternalURLs
<notextile>
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package boot
+
+import (
+ "context"
+ "net/url"
+
+ "git.arvados.org/arvados.git/lib/controller/rpc"
+ "git.arvados.org/arvados.git/lib/service"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadosclient"
+ "git.arvados.org/arvados.git/sdk/go/auth"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "git.arvados.org/arvados.git/sdk/go/keepclient"
+ "gopkg.in/check.v1"
+)
+
+// TestCluster stores a working test cluster data
+type TestCluster struct {
+ Super Supervisor
+ Config arvados.Config
+ ControllerURL *url.URL
+ ClusterID string
+}
+
+type logger struct {
+ loggerfunc func(...interface{})
+}
+
+func (l logger) Log(args ...interface{}) {
+ l.loggerfunc(args)
+}
+
+// NewTestCluster loads the provided configuration, and sets up a test cluster
+// ready for being started.
+func NewTestCluster(srcPath, clusterID string, cfg *arvados.Config, listenHost string, logWriter func(...interface{})) *TestCluster {
+ return &TestCluster{
+ Super: Supervisor{
+ SourcePath: srcPath,
+ ClusterType: "test",
+ ListenHost: listenHost,
+ ControllerAddr: ":0",
+ OwnTemporaryDatabase: true,
+ Stderr: &service.LogPrefixer{
+ Writer: ctxlog.LogWriter(logWriter),
+ Prefix: []byte("[" + clusterID + "] ")},
+ },
+ Config: *cfg,
+ ClusterID: clusterID,
+ }
+}
+
+// Start the test cluster.
+func (tc *TestCluster) Start() {
+ tc.Super.Start(context.Background(), &tc.Config, "-")
+}
+
+// WaitReady waits for all components to report healthy, and finishes setting
+// up the TestCluster struct.
+func (tc *TestCluster) WaitReady() bool {
+ au, ok := tc.Super.WaitReady()
+ if !ok {
+ return ok
+ }
+ u := url.URL(*au)
+ tc.ControllerURL = &u
+ return ok
+}
+
+// ClientsWithToken returns Context, Arvados.Client and keepclient structs
+// initialized to connect to the cluster with the supplied Arvados token.
+func (tc *TestCluster) ClientsWithToken(token string) (context.Context, *arvados.Client, *keepclient.KeepClient) {
+ cl := tc.Config.Clusters[tc.ClusterID]
+ ctx := auth.NewContext(context.Background(), auth.NewCredentials(token))
+ ac, err := arvados.NewClientFromConfig(&cl)
+ if err != nil {
+ panic(err)
+ }
+ ac.AuthToken = token
+ arv, err := arvadosclient.New(ac)
+ if err != nil {
+ panic(err)
+ }
+ kc := keepclient.New(arv)
+ return ctx, ac, kc
+}
+
+// UserClients logs in as a user called "example", get the user's API token,
+// initialize clients with the API token, set up the user and
+// optionally activate the user. Return client structs for
+// communicating with the cluster on behalf of the 'example' user.
+func (tc *TestCluster) UserClients(rootctx context.Context, c *check.C, conn *rpc.Conn, authEmail string, activate bool) (context.Context, *arvados.Client, *keepclient.KeepClient, arvados.User) {
+ login, err := conn.UserSessionCreate(rootctx, rpc.UserSessionCreateOptions{
+ ReturnTo: ",https://example.com",
+ AuthInfo: rpc.UserSessionAuthInfo{
+ Email: authEmail,
+ FirstName: "Example",
+ LastName: "User",
+ Username: "example",
+ },
+ })
+ c.Assert(err, check.IsNil)
+ redirURL, err := url.Parse(login.RedirectLocation)
+ c.Assert(err, check.IsNil)
+ userToken := redirURL.Query().Get("api_token")
+ c.Logf("user token: %q", userToken)
+ ctx, ac, kc := tc.ClientsWithToken(userToken)
+ user, err := conn.UserGetCurrent(ctx, arvados.GetOptions{})
+ c.Assert(err, check.IsNil)
+ _, err = conn.UserSetup(rootctx, arvados.UserSetupOptions{UUID: user.UUID})
+ c.Assert(err, check.IsNil)
+ if activate {
+ _, err = conn.UserActivate(rootctx, arvados.UserActivateOptions{UUID: user.UUID})
+ c.Assert(err, check.IsNil)
+ user, err = conn.UserGetCurrent(ctx, arvados.GetOptions{})
+ c.Assert(err, check.IsNil)
+ c.Logf("user UUID: %q", user.UUID)
+ if !user.IsActive {
+ c.Fatalf("failed to activate user -- %#v", user)
+ }
+ }
+ return ctx, ac, kc, user
+}
+
+// RootClients returns Context, arvados.Client and keepclient structs initialized
+// to communicate with the cluster as the system root user.
+func (tc *TestCluster) RootClients() (context.Context, *arvados.Client, *keepclient.KeepClient) {
+ return tc.ClientsWithToken(tc.Config.Clusters[tc.ClusterID].SystemRootToken)
+}
+
+// AnonymousClients returns Context, arvados.Client and keepclient structs initialized
+// to communicate with the cluster as the anonymous user.
+func (tc *TestCluster) AnonymousClients() (context.Context, *arvados.Client, *keepclient.KeepClient) {
+ return tc.ClientsWithToken(tc.Config.Clusters[tc.ClusterID].Users.AnonymousUserToken)
+}
+
+// Conn gets rpc connection struct initialized to communicate with the
+// specified cluster.
+func (tc *TestCluster) Conn() *rpc.Conn {
+ return rpc.NewConn(tc.ClusterID, tc.ControllerURL, true, rpc.PassthroughTokenProvider)
+}
environ []string // for child processes
}
+func (super *Supervisor) Cluster() *arvados.Cluster { return super.cluster }
+
func (super *Supervisor) Start(ctx context.Context, cfg *arvados.Config, cfgPath string) {
super.ctx, super.cancel = context.WithCancel(ctx)
super.done = make(chan struct{})
"github.com/prometheus/client_golang/prometheus"
)
+// Command starts a controller service. See cmd/arvados-server/cmd.go
var Command cmd.Handler = service.Command(arvados.ServiceNameController, newHandler)
func newHandler(_ context.Context, cluster *arvados.Cluster, _ string, _ *prometheus.Registry) service.Handler {
return conn.chooseBackend(options.UUID).ContainerUnlock(ctx, options)
}
+func (conn *Conn) ContainerRequestList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerRequestList, error) {
+ return conn.generated_ContainerRequestList(ctx, options)
+}
+
+func (conn *Conn) ContainerRequestCreate(ctx context.Context, options arvados.CreateOptions) (arvados.ContainerRequest, error) {
+ be := conn.chooseBackend(options.ClusterID)
+ if be == conn.local {
+ return be.ContainerRequestCreate(ctx, options)
+ }
+ if _, ok := options.Attrs["runtime_token"]; !ok {
+ // If runtime_token is not set, create a new token
+ aca, err := conn.local.APIClientAuthorizationCurrent(ctx, arvados.GetOptions{})
+ if err != nil {
+ // This should probably be StatusUnauthorized
+ // (need to update test in
+ // lib/controller/federation_test.go):
+ // When RoR is out of the picture this should be:
+ // return arvados.ContainerRequest{}, httpErrorf(http.StatusUnauthorized, "%w", err)
+ return arvados.ContainerRequest{}, httpErrorf(http.StatusForbidden, "%s", "invalid API token")
+ }
+ user, err := conn.local.UserGetCurrent(ctx, arvados.GetOptions{})
+ if err != nil {
+ return arvados.ContainerRequest{}, err
+ }
+ if len(aca.Scopes) == 0 || aca.Scopes[0] != "all" {
+ return arvados.ContainerRequest{}, httpErrorf(http.StatusForbidden, "token scope is not [all]")
+ }
+ if strings.HasPrefix(aca.UUID, conn.cluster.ClusterID) {
+ // Local user, submitting to a remote cluster.
+ // Create a new time-limited token.
+ local, ok := conn.local.(*localdb.Conn)
+ if !ok {
+ return arvados.ContainerRequest{}, httpErrorf(http.StatusInternalServerError, "bug: local backend is a %T, not a *localdb.Conn", conn.local)
+ }
+ aca, err = local.CreateAPIClientAuthorization(ctx, conn.cluster.SystemRootToken, rpc.UserSessionAuthInfo{UserUUID: user.UUID,
+ ExpiresAt: time.Now().UTC().Add(conn.cluster.Collections.BlobSigningTTL.Duration())})
+ if err != nil {
+ return arvados.ContainerRequest{}, err
+ }
+ options.Attrs["runtime_token"] = aca.TokenV2()
+ } else {
+ // Remote user. Container request will use the
+ // current token, minus the trailing portion
+ // (optional container uuid).
+ options.Attrs["runtime_token"] = aca.TokenV2()
+ }
+ }
+ return be.ContainerRequestCreate(ctx, options)
+}
+
+func (conn *Conn) ContainerRequestUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.ContainerRequest, error) {
+ return conn.chooseBackend(options.UUID).ContainerRequestUpdate(ctx, options)
+}
+
+func (conn *Conn) ContainerRequestGet(ctx context.Context, options arvados.GetOptions) (arvados.ContainerRequest, error) {
+ return conn.chooseBackend(options.UUID).ContainerRequestGet(ctx, options)
+}
+
+func (conn *Conn) ContainerRequestDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.ContainerRequest, error) {
+ return conn.chooseBackend(options.UUID).ContainerRequestDelete(ctx, options)
+}
+
func (conn *Conn) SpecimenList(ctx context.Context, options arvados.ListOptions) (arvados.SpecimenList, error) {
return conn.generated_SpecimenList(ctx, options)
}
defer out.Close()
out.Write(regexp.MustCompile(`(?ms)^.*package .*?import.*?\n\)\n`).Find(buf))
io.WriteString(out, "//\n// -- this file is auto-generated -- do not edit -- edit list.go and run \"go generate\" instead --\n//\n\n")
- for _, t := range []string{"Container", "Specimen", "User"} {
+ for _, t := range []string{"Container", "ContainerRequest", "Specimen", "User"} {
_, err := out.Write(bytes.ReplaceAll(orig, []byte("Collection"), []byte(t)))
if err != nil {
panic(err)
return merged, err
}
+func (conn *Conn) generated_ContainerRequestList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerRequestList, error) {
+ var mtx sync.Mutex
+ var merged arvados.ContainerRequestList
+ var needSort atomic.Value
+ needSort.Store(false)
+ err := conn.splitListRequest(ctx, options, func(ctx context.Context, _ string, backend arvados.API, options arvados.ListOptions) ([]string, error) {
+ options.ForwardedFor = conn.cluster.ClusterID + "-" + options.ForwardedFor
+ cl, err := backend.ContainerRequestList(ctx, options)
+ if err != nil {
+ return nil, err
+ }
+ mtx.Lock()
+ defer mtx.Unlock()
+ if len(merged.Items) == 0 {
+ merged = cl
+ } else if len(cl.Items) > 0 {
+ merged.Items = append(merged.Items, cl.Items...)
+ needSort.Store(true)
+ }
+ uuids := make([]string, 0, len(cl.Items))
+ for _, item := range cl.Items {
+ uuids = append(uuids, item.UUID)
+ }
+ return uuids, nil
+ })
+ if needSort.Load().(bool) {
+ // Apply the default/implied order, "modified_at desc"
+ sort.Slice(merged.Items, func(i, j int) bool {
+ mi, mj := merged.Items[i].ModifiedAt, merged.Items[j].ModifiedAt
+ return mj.Before(mi)
+ })
+ }
+ if merged.Items == nil {
+ // Return empty results as [], not null
+ // (https://github.com/golang/go/issues/27589 might be
+ // a better solution in the future)
+ merged.Items = []arvados.ContainerRequest{}
+ }
+ return merged, err
+}
+
func (conn *Conn) generated_SpecimenList(ctx context.Context, options arvados.ListOptions) (arvados.SpecimenList, error) {
var mtx sync.Mutex
var merged arvados.SpecimenList
return s.localServiceHandler(c, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if req.URL.Path == "/arvados/v1/api_client_authorizations/current" {
if req.Header.Get("Authorization") == "Bearer "+arvadostest.ActiveToken {
- json.NewEncoder(w).Encode(arvados.APIClientAuthorization{UUID: arvadostest.ActiveTokenUUID, APIToken: arvadostest.ActiveToken})
+ json.NewEncoder(w).Encode(arvados.APIClientAuthorization{UUID: arvadostest.ActiveTokenUUID, APIToken: arvadostest.ActiveToken, Scopes: []string{"all"}})
+ } else {
+ w.WriteHeader(http.StatusUnauthorized)
+ }
+ } else if req.URL.Path == "/arvados/v1/users/current" {
+ if req.Header.Get("Authorization") == "Bearer "+arvadostest.ActiveToken {
+ json.NewEncoder(w).Encode(arvados.User{UUID: arvadostest.ActiveUserUUID})
} else {
w.WriteHeader(http.StatusUnauthorized)
}
c.Check(strings.HasPrefix(cr.UUID, "zzzzz-"), check.Equals, true)
}
+// getCRfromMockRequest returns a ContainerRequest with the content of the
+// request sent to the remote mock. This function takes into account the
+// Content-Type and acts accordingly.
+func (s *FederationSuite) getCRfromMockRequest(c *check.C) arvados.ContainerRequest {
+
+ // Body can be a json formated or something like:
+ // cluster_id=zmock&container_request=%7B%22command%22%3A%5B%22abc%22%5D%2C%22container_image%22%3A%22123%22%2C%22...7D
+ // or:
+ // "{\"container_request\":{\"command\":[\"abc\"],\"container_image\":\"12...Uncommitted\"}}"
+
+ var cr arvados.ContainerRequest
+ data, err := ioutil.ReadAll(s.remoteMockRequests[0].Body)
+ c.Check(err, check.IsNil)
+
+ if s.remoteMockRequests[0].Header.Get("Content-Type") == "application/json" {
+ // legacy code path sends a JSON request body
+ var answerCR struct {
+ ContainerRequest arvados.ContainerRequest `json:"container_request"`
+ }
+ c.Check(json.Unmarshal(data, &answerCR), check.IsNil)
+ cr = answerCR.ContainerRequest
+ } else if s.remoteMockRequests[0].Header.Get("Content-Type") == "application/x-www-form-urlencoded" {
+ // new code path sends a form-encoded request body with a JSON-encoded parameter value
+ decodedValue, err := url.ParseQuery(string(data))
+ c.Check(err, check.IsNil)
+ decodedValueCR := decodedValue.Get("container_request")
+ c.Check(json.Unmarshal([]byte(decodedValueCR), &cr), check.IsNil)
+ } else {
+ // mock needs to have Content-Type that we can parse.
+ c.Fail()
+ }
+
+ return cr
+}
+
func (s *FederationSuite) TestCreateRemoteContainerRequestCheckRuntimeToken(c *check.C) {
// Send request to zmock and check that outgoing request has
// runtime_token set with a new random v2 token.
defer s.localServiceReturns404(c).Close()
- // pass cluster_id via query parameter, this allows arvados-controller
- // to avoid parsing the body
req := httptest.NewRequest("POST", "/arvados/v1/container_requests?cluster_id=zmock",
strings.NewReader(`{
- "container_request": {
- "name": "hello world",
- "state": "Uncommitted",
- "output_path": "/",
- "container_image": "123",
- "command": ["abc"]
- }
-}
-`))
+ "container_request": {
+ "name": "hello world",
+ "state": "Uncommitted",
+ "output_path": "/",
+ "container_image": "123",
+ "command": ["abc"]
+ }
+ }
+ `))
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
req.Header.Set("Content-type", "application/json")
+ // We replace zhome with zzzzz values (RailsAPI, ClusterID, SystemRootToken)
+ // SystemRoot token is needed because we check the
+ // https://[RailsAPI]/arvados/v1/api_client_authorizations/current
+ // https://[RailsAPI]/arvados/v1/users/current and
+ // https://[RailsAPI]/auth/controller/callback
arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
s.testHandler.Cluster.ClusterID = "zzzzz"
+ s.testHandler.Cluster.SystemRootToken = arvadostest.SystemRootToken
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
- var cr struct {
- arvados.ContainerRequest `json:"container_request"`
- }
- c.Check(json.NewDecoder(s.remoteMockRequests[0].Body).Decode(&cr), check.IsNil)
- c.Check(strings.HasPrefix(cr.ContainerRequest.RuntimeToken, "v2/zzzzz-gj3su-"), check.Equals, true)
- c.Check(cr.ContainerRequest.RuntimeToken, check.Not(check.Equals), arvadostest.ActiveTokenV2)
+
+ cr := s.getCRfromMockRequest(c)
+
+ // Runtime token must match zzzzz cluster
+ c.Check(cr.RuntimeToken, check.Matches, "v2/zzzzz-gj3su-.*")
+ // RuntimeToken must be different than the Original Token we originally did the request with.
+ c.Check(cr.RuntimeToken, check.Not(check.Equals), arvadostest.ActiveTokenV2)
}
func (s *FederationSuite) TestCreateRemoteContainerRequestCheckSetRuntimeToken(c *check.C) {
// to avoid parsing the body
req := httptest.NewRequest("POST", "/arvados/v1/container_requests?cluster_id=zmock",
strings.NewReader(`{
- "container_request": {
- "name": "hello world",
- "state": "Uncommitted",
- "output_path": "/",
- "container_image": "123",
- "command": ["abc"],
- "runtime_token": "xyz"
- }
-}
-`))
+ "container_request": {
+ "name": "hello world",
+ "state": "Uncommitted",
+ "output_path": "/",
+ "container_image": "123",
+ "command": ["abc"],
+ "runtime_token": "xyz"
+ }
+ }
+ `))
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
req.Header.Set("Content-type", "application/json")
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
- var cr struct {
- arvados.ContainerRequest `json:"container_request"`
- }
- c.Check(json.NewDecoder(s.remoteMockRequests[0].Body).Decode(&cr), check.IsNil)
- c.Check(cr.ContainerRequest.RuntimeToken, check.Equals, "xyz")
-}
-func (s *FederationSuite) TestCreateRemoteContainerRequestRuntimeTokenFromAuth(c *check.C) {
- // Send request to zmock and check that outgoing request has
- // runtime_token set using the Auth token because the user is remote.
+ cr := s.getCRfromMockRequest(c)
- defer s.localServiceReturns404(c).Close()
- // pass cluster_id via query parameter, this allows arvados-controller
- // to avoid parsing the body
- req := httptest.NewRequest("POST", "/arvados/v1/container_requests?cluster_id=zmock",
- strings.NewReader(`{
- "container_request": {
- "name": "hello world",
- "state": "Uncommitted",
- "output_path": "/",
- "container_image": "123",
- "command": ["abc"]
- }
-}
-`))
- req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2+"/zzzzz-dz642-parentcontainer")
- req.Header.Set("Content-type", "application/json")
- resp := s.testRequest(req).Result()
- c.Check(resp.StatusCode, check.Equals, http.StatusOK)
- var cr struct {
- arvados.ContainerRequest `json:"container_request"`
- }
- c.Check(json.NewDecoder(s.remoteMockRequests[0].Body).Decode(&cr), check.IsNil)
- c.Check(cr.ContainerRequest.RuntimeToken, check.Equals, arvadostest.ActiveTokenV2)
+ // After mocking around now making sure the runtime_token we sent is still there.
+ c.Check(cr.RuntimeToken, check.Equals, "xyz")
}
func (s *FederationSuite) TestCreateRemoteContainerRequestError(c *check.C) {
mux.Handle("/arvados/v1/collections/", rtr)
mux.Handle("/arvados/v1/users", rtr)
mux.Handle("/arvados/v1/users/", rtr)
+ mux.Handle("/arvados/v1/container_requests", rtr)
+ mux.Handle("/arvados/v1/container_requests/", rtr)
mux.Handle("/login", rtr)
mux.Handle("/logout", rtr)
}
}
resp2, err := client.Get(s.cluster.Services.RailsAPI.ExternalURL.String() + url + "/?api_token=" + token)
c.Check(err, check.Equals, nil)
+ c.Assert(resp2.StatusCode, check.Equals, http.StatusOK,
+ check.Commentf("Wasn't able to get data from the RailsAPI at %q", url))
defer resp2.Body.Close()
db, err := ioutil.ReadAll(resp2.Body)
c.Check(err, check.Equals, nil)
import (
"bytes"
"context"
+ "database/sql"
"encoding/json"
"fmt"
"io"
"math"
"net"
"net/http"
- "net/url"
"os"
"os/exec"
"path/filepath"
"git.arvados.org/arvados.git/lib/boot"
"git.arvados.org/arvados.git/lib/config"
- "git.arvados.org/arvados.git/lib/controller/rpc"
- "git.arvados.org/arvados.git/lib/service"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/auth"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
- "git.arvados.org/arvados.git/sdk/go/keepclient"
check "gopkg.in/check.v1"
)
var _ = check.Suite(&IntegrationSuite{})
-type testCluster struct {
- super boot.Supervisor
- config arvados.Config
- controllerURL *url.URL
-}
-
type IntegrationSuite struct {
- testClusters map[string]*testCluster
+ testClusters map[string]*boot.TestCluster
oidcprovider *arvadostest.OIDCProvider
}
s.oidcprovider.ValidClientID = "clientid"
s.oidcprovider.ValidClientSecret = "clientsecret"
- s.testClusters = map[string]*testCluster{
+ s.testClusters = map[string]*boot.TestCluster{
"z1111": nil,
"z2222": nil,
"z3333": nil,
ExternalURL: https://` + hostport[id] + `
TLS:
Insecure: true
- Login:
- LoginCluster: z1111
SystemLogs:
Format: text
RemoteClusters:
loader.SkipAPICalls = true
cfg, err := loader.Load()
c.Assert(err, check.IsNil)
- s.testClusters[id] = &testCluster{
- super: boot.Supervisor{
- SourcePath: filepath.Join(cwd, "..", ".."),
- ClusterType: "test",
- ListenHost: "127.0.0." + id[3:],
- ControllerAddr: ":0",
- OwnTemporaryDatabase: true,
- Stderr: &service.LogPrefixer{Writer: ctxlog.LogWriter(c.Log), Prefix: []byte("[" + id + "] ")},
- },
- config: *cfg,
- }
- s.testClusters[id].super.Start(context.Background(), &s.testClusters[id].config, "-")
+ tc := boot.NewTestCluster(
+ filepath.Join(cwd, "..", ".."),
+ id, cfg, "127.0.0."+id[3:], c.Log)
+ s.testClusters[id] = tc
+ s.testClusters[id].Start()
}
for _, tc := range s.testClusters {
- au, ok := tc.super.WaitReady()
+ ok := tc.WaitReady()
c.Assert(ok, check.Equals, true)
- u := url.URL(*au)
- tc.controllerURL = &u
}
}
func (s *IntegrationSuite) TearDownSuite(c *check.C) {
for _, c := range s.testClusters {
- c.super.Stop()
- }
-}
-
-// Get rpc connection struct initialized to communicate with the
-// specified cluster.
-func (s *IntegrationSuite) conn(clusterID string) *rpc.Conn {
- return rpc.NewConn(clusterID, s.testClusters[clusterID].controllerURL, true, rpc.PassthroughTokenProvider)
-}
-
-// Return Context, Arvados.Client and keepclient structs initialized
-// to connect to the specified cluster (by clusterID) using with the supplied
-// Arvados token.
-func (s *IntegrationSuite) clientsWithToken(clusterID string, token string) (context.Context, *arvados.Client, *keepclient.KeepClient) {
- cl := s.testClusters[clusterID].config.Clusters[clusterID]
- ctx := auth.NewContext(context.Background(), auth.NewCredentials(token))
- ac, err := arvados.NewClientFromConfig(&cl)
- if err != nil {
- panic(err)
- }
- ac.AuthToken = token
- arv, err := arvadosclient.New(ac)
- if err != nil {
- panic(err)
- }
- kc := keepclient.New(arv)
- return ctx, ac, kc
-}
-
-// Log in as a user called "example", get the user's API token,
-// initialize clients with the API token, set up the user and
-// optionally activate the user. Return client structs for
-// communicating with the cluster on behalf of the 'example' user.
-func (s *IntegrationSuite) userClients(rootctx context.Context, c *check.C, conn *rpc.Conn, clusterID string, activate bool) (context.Context, *arvados.Client, *keepclient.KeepClient, arvados.User) {
- login, err := conn.UserSessionCreate(rootctx, rpc.UserSessionCreateOptions{
- ReturnTo: ",https://example.com",
- AuthInfo: rpc.UserSessionAuthInfo{
- Email: "user@example.com",
- FirstName: "Example",
- LastName: "User",
- Username: "example",
- },
- })
- c.Assert(err, check.IsNil)
- redirURL, err := url.Parse(login.RedirectLocation)
- c.Assert(err, check.IsNil)
- userToken := redirURL.Query().Get("api_token")
- c.Logf("user token: %q", userToken)
- ctx, ac, kc := s.clientsWithToken(clusterID, userToken)
- user, err := conn.UserGetCurrent(ctx, arvados.GetOptions{})
- c.Assert(err, check.IsNil)
- _, err = conn.UserSetup(rootctx, arvados.UserSetupOptions{UUID: user.UUID})
- c.Assert(err, check.IsNil)
- if activate {
- _, err = conn.UserActivate(rootctx, arvados.UserActivateOptions{UUID: user.UUID})
- c.Assert(err, check.IsNil)
- user, err = conn.UserGetCurrent(ctx, arvados.GetOptions{})
- c.Assert(err, check.IsNil)
- c.Logf("user UUID: %q", user.UUID)
- if !user.IsActive {
- c.Fatalf("failed to activate user -- %#v", user)
- }
+ c.Super.Stop()
}
- return ctx, ac, kc, user
-}
-
-// Return Context, arvados.Client and keepclient structs initialized
-// to communicate with the cluster as the system root user.
-func (s *IntegrationSuite) rootClients(clusterID string) (context.Context, *arvados.Client, *keepclient.KeepClient) {
- return s.clientsWithToken(clusterID, s.testClusters[clusterID].config.Clusters[clusterID].SystemRootToken)
-}
-
-// Return Context, arvados.Client and keepclient structs initialized
-// to communicate with the cluster as the anonymous user.
-func (s *IntegrationSuite) anonymousClients(clusterID string) (context.Context, *arvados.Client, *keepclient.KeepClient) {
- return s.clientsWithToken(clusterID, s.testClusters[clusterID].config.Clusters[clusterID].Users.AnonymousUserToken)
}
func (s *IntegrationSuite) TestGetCollectionByPDH(c *check.C) {
- conn1 := s.conn("z1111")
- rootctx1, _, _ := s.rootClients("z1111")
- conn3 := s.conn("z3333")
- userctx1, ac1, kc1, _ := s.userClients(rootctx1, c, conn1, "z1111", true)
+ conn1 := s.testClusters["z1111"].Conn()
+ rootctx1, _, _ := s.testClusters["z1111"].RootClients()
+ conn3 := s.testClusters["z3333"].Conn()
+ userctx1, ac1, kc1, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
// Create the collection to find its PDH (but don't save it
// anywhere yet)
testText := "IntegrationSuite.TestS3WithFederatedToken"
- conn1 := s.conn("z1111")
- rootctx1, _, _ := s.rootClients("z1111")
- userctx1, ac1, _, _ := s.userClients(rootctx1, c, conn1, "z1111", true)
- conn3 := s.conn("z3333")
+ conn1 := s.testClusters["z1111"].Conn()
+ rootctx1, _, _ := s.testClusters["z1111"].RootClients()
+ userctx1, ac1, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+ conn3 := s.testClusters["z3333"].Conn()
createColl := func(clusterID string) arvados.Collection {
- _, ac, kc := s.clientsWithToken(clusterID, ac1.AuthToken)
+ _, ac, kc := s.testClusters[clusterID].ClientsWithToken(ac1.AuthToken)
var coll arvados.Collection
fs, err := coll.FileSystem(ac, kc)
c.Assert(err, check.IsNil)
c.Assert(err, check.IsNil)
mtxt, err := fs.MarshalManifest(".")
c.Assert(err, check.IsNil)
- coll, err = s.conn(clusterID).CollectionCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
+ coll, err = s.testClusters[clusterID].Conn().CollectionCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
"manifest_text": mtxt,
}})
c.Assert(err, check.IsNil)
}
func (s *IntegrationSuite) TestGetCollectionAsAnonymous(c *check.C) {
- conn1 := s.conn("z1111")
- conn3 := s.conn("z3333")
- rootctx1, rootac1, rootkc1 := s.rootClients("z1111")
- anonctx3, anonac3, _ := s.anonymousClients("z3333")
+ conn1 := s.testClusters["z1111"].Conn()
+ conn3 := s.testClusters["z3333"].Conn()
+ rootctx1, rootac1, rootkc1 := s.testClusters["z1111"].RootClients()
+ anonctx3, anonac3, _ := s.testClusters["z3333"].AnonymousClients()
// Make sure anonymous token was set
c.Assert(anonac3.AuthToken, check.Not(check.Equals), "")
c.Check(err, check.IsNil)
// Make a v2 token of the z3 anonymous user, and use it on z1
- _, anonac1, _ := s.clientsWithToken("z1111", outAuth.TokenV2())
+ _, anonac1, _ := s.testClusters["z1111"].ClientsWithToken(outAuth.TokenV2())
outUser2, err := anonac1.CurrentUser()
c.Check(err, check.IsNil)
// z3 anonymous user will be mapped to the z1 anonymous user
// Get a token from the login cluster (z1111), use it to submit a
// container request on z2222.
func (s *IntegrationSuite) TestCreateContainerRequestWithFedToken(c *check.C) {
- conn1 := s.conn("z1111")
- rootctx1, _, _ := s.rootClients("z1111")
- _, ac1, _, _ := s.userClients(rootctx1, c, conn1, "z1111", true)
+ conn1 := s.testClusters["z1111"].Conn()
+ rootctx1, _, _ := s.testClusters["z1111"].RootClients()
+ _, ac1, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
// Use ac2 to get the discovery doc with a blank token, so the
// SDK doesn't magically pass the z1111 token to z2222 before
// we're ready to start our test.
- _, ac2, _ := s.clientsWithToken("z2222", "")
+ _, ac2, _ := s.testClusters["z2222"].ClientsWithToken("")
var dd map[string]interface{}
err := ac2.RequestAndDecode(&dd, "GET", "discovery/v1/apis/arvados/v1/rest", nil, nil)
c.Assert(err, check.IsNil)
c.Assert(err, check.IsNil)
req.Header.Set("Content-Type", "application/json")
err = ac2.DoAndDecode(&cr, req)
+ c.Assert(err, check.IsNil)
c.Logf("err == %#v", err)
c.Log("...get user with good token")
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "OAuth2 "+ac2.AuthToken)
resp, err = arvados.InsecureHTTPClient.Do(req)
- if c.Check(err, check.IsNil) {
- err = json.NewDecoder(resp.Body).Decode(&cr)
+ c.Assert(err, check.IsNil)
+ err = json.NewDecoder(resp.Body).Decode(&cr)
+ c.Check(err, check.IsNil)
+ c.Check(cr.UUID, check.Matches, "z2222-.*")
+}
+
+func (s *IntegrationSuite) TestCreateContainerRequestWithBadToken(c *check.C) {
+ conn1 := s.testClusters["z1111"].Conn()
+ rootctx1, _, _ := s.testClusters["z1111"].RootClients()
+ _, ac1, _, au := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, "user@example.com", true)
+
+ tests := []struct {
+ name string
+ token string
+ expectedCode int
+ }{
+ {"Good token", ac1.AuthToken, http.StatusOK},
+ {"Bogus token", "abcdef", http.StatusUnauthorized},
+ {"v1-looking token", "badtoken00badtoken00badtoken00badtoken00b", http.StatusUnauthorized},
+ {"v2-looking token", "v2/" + au.UUID + "/badtoken00badtoken00badtoken00badtoken00b", http.StatusUnauthorized},
+ }
+
+ body, _ := json.Marshal(map[string]interface{}{
+ "container_request": map[string]interface{}{
+ "command": []string{"echo"},
+ "container_image": "d41d8cd98f00b204e9800998ecf8427e+0",
+ "cwd": "/",
+ "output_path": "/",
+ },
+ })
+
+ for _, tt := range tests {
+ c.Log(c.TestName() + " " + tt.name)
+ ac1.AuthToken = tt.token
+ req, err := http.NewRequest("POST", "https://"+ac1.APIHost+"/arvados/v1/container_requests", bytes.NewReader(body))
+ c.Assert(err, check.IsNil)
+ req.Header.Set("Content-Type", "application/json")
+ resp, err := ac1.Do(req)
+ c.Assert(err, check.IsNil)
+ c.Assert(resp.StatusCode, check.Equals, tt.expectedCode)
+ }
+}
+
+// We test the direct access to the database
+// normally an integration test would not have a database access, but in this case we need
+// to test tokens that are secret, so there is no API response that will give them back
+func (s *IntegrationSuite) dbConn(c *check.C, clusterID string) (*sql.DB, *sql.Conn) {
+ ctx := context.Background()
+ db, err := sql.Open("postgres", s.testClusters[clusterID].Super.Cluster().PostgreSQL.Connection.String())
+ c.Assert(err, check.IsNil)
+
+ conn, err := db.Conn(ctx)
+ c.Assert(err, check.IsNil)
+
+ rows, err := conn.ExecContext(ctx, `SELECT 1`)
+ c.Assert(err, check.IsNil)
+ n, err := rows.RowsAffected()
+ c.Assert(err, check.IsNil)
+ c.Assert(n, check.Equals, int64(1))
+ return db, conn
+}
+
+// TestRuntimeTokenInCR will test several different tokens in the runtime attribute
+// and check the expected results accessing directly to the database if needed.
+func (s *IntegrationSuite) TestRuntimeTokenInCR(c *check.C) {
+ db, dbconn := s.dbConn(c, "z1111")
+ defer db.Close()
+ defer dbconn.Close()
+ conn1 := s.testClusters["z1111"].Conn()
+ rootctx1, _, _ := s.testClusters["z1111"].RootClients()
+ userctx1, ac1, _, au := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, "user@example.com", true)
+
+ tests := []struct {
+ name string
+ token string
+ expectAToGetAValidCR bool
+ expectedToken *string
+ }{
+ {"Good token z1111 user", ac1.AuthToken, true, &ac1.AuthToken},
+ {"Bogus token", "abcdef", false, nil},
+ {"v1-looking token", "badtoken00badtoken00badtoken00badtoken00b", false, nil},
+ {"v2-looking token", "v2/" + au.UUID + "/badtoken00badtoken00badtoken00badtoken00b", false, nil},
+ }
+
+ for _, tt := range tests {
+ c.Log(c.TestName() + " " + tt.name)
+
+ rq := map[string]interface{}{
+ "command": []string{"echo"},
+ "container_image": "d41d8cd98f00b204e9800998ecf8427e+0",
+ "cwd": "/",
+ "output_path": "/",
+ "runtime_token": tt.token,
+ }
+ cr, err := conn1.ContainerRequestCreate(userctx1, arvados.CreateOptions{Attrs: rq})
+ if tt.expectAToGetAValidCR {
+ c.Check(err, check.IsNil)
+ c.Check(cr, check.NotNil)
+ c.Check(cr.UUID, check.Not(check.Equals), "")
+ }
+
+ if tt.expectedToken == nil {
+ continue
+ }
+
+ c.Logf("cr.UUID: %s", cr.UUID)
+ row := dbconn.QueryRowContext(rootctx1, `SELECT runtime_token from container_requests where uuid=$1`, cr.UUID)
+ c.Check(row, check.NotNil)
+ var token sql.NullString
+ row.Scan(&token)
+ if c.Check(token.Valid, check.Equals, true) {
+ c.Check(token.String, check.Equals, *tt.expectedToken)
+ }
+ }
+}
+
+// TestIntermediateCluster will send a container request to
+// one cluster with another cluster as the destination
+// and check the tokens are being handled properly
+func (s *IntegrationSuite) TestIntermediateCluster(c *check.C) {
+ conn1 := s.testClusters["z1111"].Conn()
+ rootctx1, _, _ := s.testClusters["z1111"].RootClients()
+ uctx1, ac1, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, "user@example.com", true)
+
+ tests := []struct {
+ name string
+ token string
+ expectedRuntimeToken string
+ expectedUUIDprefix string
+ }{
+ {"Good token z1111 user sending a CR to z2222", ac1.AuthToken, "", "z2222-xvhdp-"},
+ }
+
+ for _, tt := range tests {
+ c.Log(c.TestName() + " " + tt.name)
+ rq := map[string]interface{}{
+ "command": []string{"echo"},
+ "container_image": "d41d8cd98f00b204e9800998ecf8427e+0",
+ "cwd": "/",
+ "output_path": "/",
+ "runtime_token": tt.token,
+ }
+ cr, err := conn1.ContainerRequestCreate(uctx1, arvados.CreateOptions{ClusterID: "z2222", Attrs: rq})
+
c.Check(err, check.IsNil)
- c.Check(cr.UUID, check.Matches, "z2222-.*")
+ c.Check(strings.HasPrefix(cr.UUID, tt.expectedUUIDprefix), check.Equals, true)
+ c.Check(cr.RuntimeToken, check.Equals, tt.expectedRuntimeToken)
}
}
// Test for bug #16263
func (s *IntegrationSuite) TestListUsers(c *check.C) {
- rootctx1, _, _ := s.rootClients("z1111")
- conn1 := s.conn("z1111")
- conn3 := s.conn("z3333")
- userctx1, _, _, _ := s.userClients(rootctx1, c, conn1, "z1111", true)
+ rootctx1, _, _ := s.testClusters["z1111"].RootClients()
+ conn1 := s.testClusters["z1111"].Conn()
+ conn3 := s.testClusters["z3333"].Conn()
+ userctx1, _, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
// Make sure LoginCluster is properly configured
for cls := range s.testClusters {
c.Check(
- s.testClusters[cls].config.Clusters[cls].Login.LoginCluster,
+ s.testClusters[cls].Config.Clusters[cls].Login.LoginCluster,
check.Equals, "z1111",
check.Commentf("incorrect LoginCluster config on cluster %q", cls))
}
}
func (s *IntegrationSuite) TestSetupUserWithVM(c *check.C) {
- conn1 := s.conn("z1111")
- conn3 := s.conn("z3333")
- rootctx1, rootac1, _ := s.rootClients("z1111")
+ conn1 := s.testClusters["z1111"].Conn()
+ conn3 := s.testClusters["z3333"].Conn()
+ rootctx1, rootac1, _ := s.testClusters["z1111"].RootClients()
// Create user on LoginCluster z1111
- _, _, _, user := s.userClients(rootctx1, c, conn1, "z1111", false)
+ _, _, _, user := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
// Make a new root token (because rootClients() uses SystemRootToken)
var outAuth arvados.APIClientAuthorization
c.Check(err, check.IsNil)
// Make a v2 root token to communicate with z3333
- rootctx3, rootac3, _ := s.clientsWithToken("z3333", outAuth.TokenV2())
+ rootctx3, rootac3, _ := s.testClusters["z3333"].ClientsWithToken(outAuth.TokenV2())
// Create VM on z3333
var outVM arvados.VirtualMachine
}
func (s *IntegrationSuite) TestOIDCAccessTokenAuth(c *check.C) {
- conn1 := s.conn("z1111")
- rootctx1, _, _ := s.rootClients("z1111")
- s.userClients(rootctx1, c, conn1, "z1111", true)
+ conn1 := s.testClusters["z1111"].Conn()
+ rootctx1, _, _ := s.testClusters["z1111"].RootClients()
+ s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
accesstoken := s.oidcprovider.ValidAccessToken()
- for _, clusterid := range []string{"z1111", "z2222"} {
- c.Logf("trying clusterid %s", clusterid)
+ for _, clusterID := range []string{"z1111", "z2222"} {
+ c.Logf("trying clusterid %s", clusterID)
- conn := s.conn(clusterid)
- ctx, ac, kc := s.clientsWithToken(clusterid, accesstoken)
+ conn := s.testClusters[clusterID].Conn()
+ ctx, ac, kc := s.testClusters[clusterID].ClientsWithToken(accesstoken)
var coll arvados.Collection
railsProxy := railsproxy.NewConn(cluster)
var conn Conn
conn = Conn{
- cluster: cluster,
- railsProxy: railsProxy,
- loginController: chooseLoginController(cluster, railsProxy),
+ cluster: cluster,
+ railsProxy: railsProxy,
}
+ conn.loginController = chooseLoginController(cluster, &conn)
return &conn
}
+// Logout handles the logout of conn giving to the appropriate loginController
func (conn *Conn) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
return conn.loginController.Logout(ctx, opts)
}
+// Login handles the login of conn giving to the appropriate loginController
func (conn *Conn) Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error) {
return conn.loginController.Login(ctx, opts)
}
+// UserAuthenticate handles the User Authentication of conn giving to the appropriate loginController
func (conn *Conn) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
return conn.loginController.UserAuthenticate(ctx, opts)
}
UserAuthenticate(ctx context.Context, options arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error)
}
-func chooseLoginController(cluster *arvados.Cluster, railsProxy *railsProxy) loginController {
+func chooseLoginController(cluster *arvados.Cluster, parent *Conn) loginController {
wantGoogle := cluster.Login.Google.Enable
wantOpenIDConnect := cluster.Login.OpenIDConnect.Enable
wantSSO := cluster.Login.SSO.Enable
case wantGoogle:
return &oidcLoginController{
Cluster: cluster,
- RailsProxy: railsProxy,
+ Parent: parent,
Issuer: "https://accounts.google.com",
ClientID: cluster.Login.Google.ClientID,
ClientSecret: cluster.Login.Google.ClientSecret,
case wantOpenIDConnect:
return &oidcLoginController{
Cluster: cluster,
- RailsProxy: railsProxy,
+ Parent: parent,
Issuer: cluster.Login.OpenIDConnect.Issuer,
ClientID: cluster.Login.OpenIDConnect.ClientID,
ClientSecret: cluster.Login.OpenIDConnect.ClientSecret,
UsernameClaim: cluster.Login.OpenIDConnect.UsernameClaim,
}
case wantSSO:
- return &ssoLoginController{railsProxy}
+ return &ssoLoginController{Parent: parent}
case wantPAM:
- return &pamLoginController{Cluster: cluster, RailsProxy: railsProxy}
+ return &pamLoginController{Cluster: cluster, Parent: parent}
case wantLDAP:
- return &ldapLoginController{Cluster: cluster, RailsProxy: railsProxy}
+ return &ldapLoginController{Cluster: cluster, Parent: parent}
case wantTest:
- return &testLoginController{Cluster: cluster, RailsProxy: railsProxy}
+ return &testLoginController{Cluster: cluster, Parent: parent}
case wantLoginCluster:
return &federatedLoginController{Cluster: cluster}
default:
return n
}
-// Login and Logout are passed through to the wrapped railsProxy;
+// Login and Logout are passed through to the parent's railsProxy;
// UserAuthenticate is rejected.
-type ssoLoginController struct{ *railsProxy }
+type ssoLoginController struct{ Parent *Conn }
+func (ctrl *ssoLoginController) Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error) {
+ return ctrl.Parent.railsProxy.Login(ctx, opts)
+}
+func (ctrl *ssoLoginController) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
+ return ctrl.Parent.railsProxy.Logout(ctx, opts)
+}
func (ctrl *ssoLoginController) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
return arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(errors.New("username/password authentication is not available"), http.StatusBadRequest)
}
return arvados.LogoutResponse{RedirectLocation: target}, nil
}
-func createAPIClientAuthorization(ctx context.Context, conn *rpc.Conn, rootToken string, authinfo rpc.UserSessionAuthInfo) (resp arvados.APIClientAuthorization, err error) {
+func (conn *Conn) CreateAPIClientAuthorization(ctx context.Context, rootToken string, authinfo rpc.UserSessionAuthInfo) (resp arvados.APIClientAuthorization, err error) {
+ if rootToken == "" {
+ return arvados.APIClientAuthorization{}, errors.New("configuration error: empty SystemRootToken")
+ }
ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{rootToken}})
- newsession, err := conn.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
+ newsession, err := conn.railsProxy.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
// Send a fake ReturnTo value instead of the caller's
// opts.ReturnTo. We won't follow the resulting
// redirect target anyway.
)
type ldapLoginController struct {
- Cluster *arvados.Cluster
- RailsProxy *railsProxy
+ Cluster *arvados.Cluster
+ Parent *Conn
}
func (ctrl *ldapLoginController) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
return arvados.APIClientAuthorization{}, errors.New("authentication succeeded but ldap returned no email address")
}
- return createAPIClientAuthorization(ctx, ctrl.RailsProxy, ctrl.Cluster.SystemRootToken, rpc.UserSessionAuthInfo{
+ return ctrl.Parent.CreateAPIClientAuthorization(ctx, ctrl.Cluster.SystemRootToken, rpc.UserSessionAuthInfo{
Email: email,
FirstName: attrs["givenname"],
LastName: attrs["sn"],
s.cluster.Login.LDAP.SearchBase = "dc=example,dc=com"
c.Assert(err, check.IsNil)
s.ctrl = &ldapLoginController{
- Cluster: s.cluster,
- RailsProxy: railsproxy.NewConn(s.cluster),
+ Cluster: s.cluster,
+ Parent: &Conn{railsProxy: railsproxy.NewConn(s.cluster)},
}
s.db = arvadostest.DB(c, s.cluster)
}
"time"
"git.arvados.org/arvados.git/lib/controller/api"
- "git.arvados.org/arvados.git/lib/controller/railsproxy"
"git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/sdk/go/arvados"
type oidcLoginController struct {
Cluster *arvados.Cluster
- RailsProxy *railsProxy
+ Parent *Conn
Issuer string // OIDC issuer URL, e.g., "https://accounts.google.com"
ClientID string
ClientSecret string
return loginError(err)
}
ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{ctrl.Cluster.SystemRootToken}})
- return ctrl.RailsProxy.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
+ return ctrl.Parent.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
ReturnTo: state.Remote + "," + state.ReturnTo,
AuthInfo: *authinfo,
})
// We want ctrl to be nil if the chosen controller is not a
// *oidcLoginController, so we can ignore the 2nd return value
// of this type cast.
- ctrl, _ := chooseLoginController(cluster, railsproxy.NewConn(cluster)).(*oidcLoginController)
+ ctrl, _ := NewConn(cluster).loginController.(*oidcLoginController)
cache, err := lru.New2Q(tokenCacheSize)
if err != nil {
panic(err)
}
ctxlog.FromContext(ctx).WithField("HMAC", hmac).Debug("(*oidcTokenAuthorizer)registerToken: updated api_client_authorizations row")
} else {
- aca, err = createAPIClientAuthorization(ctx, ta.ctrl.RailsProxy, ta.ctrl.Cluster.SystemRootToken, *authinfo)
+ aca, err = ta.ctrl.Parent.CreateAPIClientAuthorization(ctx, ta.ctrl.Cluster.SystemRootToken, *authinfo)
if err != nil {
return err
}
)
type pamLoginController struct {
- Cluster *arvados.Cluster
- RailsProxy *railsProxy
+ Cluster *arvados.Cluster
+ Parent *Conn
}
func (ctrl *pamLoginController) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
"user": user,
"email": email,
}).Debug("pam authentication succeeded")
- return createAPIClientAuthorization(ctx, ctrl.RailsProxy, ctrl.Cluster.SystemRootToken, rpc.UserSessionAuthInfo{
+ return ctrl.Parent.CreateAPIClientAuthorization(ctx, ctrl.Cluster.SystemRootToken, rpc.UserSessionAuthInfo{
Username: user,
Email: email,
})
s.cluster.Login.PAM.DefaultEmailDomain = "example.com"
s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
s.ctrl = &pamLoginController{
- Cluster: s.cluster,
- RailsProxy: rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider),
+ Cluster: s.cluster,
+ Parent: &Conn{railsProxy: rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)},
}
}
)
type testLoginController struct {
- Cluster *arvados.Cluster
- RailsProxy *railsProxy
+ Cluster *arvados.Cluster
+ Parent *Conn
}
func (ctrl *testLoginController) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
"username": username,
"email": user.Email,
}).Debug("test authentication succeeded")
- return createAPIClientAuthorization(ctx, ctrl.RailsProxy, ctrl.Cluster.SystemRootToken, rpc.UserSessionAuthInfo{
+ return ctrl.Parent.CreateAPIClientAuthorization(ctx, ctrl.Cluster.SystemRootToken, rpc.UserSessionAuthInfo{
Username: username,
Email: user.Email,
})
}
s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
s.ctrl = &testLoginController{
- Cluster: s.cluster,
- RailsProxy: rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider),
+ Cluster: s.cluster,
+ Parent: &Conn{railsProxy: rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)},
}
s.db = arvadostest.DB(c, s.cluster)
}
} else if defaultItemKind != "" {
item["kind"] = defaultItemKind
}
- items[i] = applySelectParam(opts.Select, item)
+ item = applySelectParam(opts.Select, item)
+ rtr.mungeItemFields(item)
+ items[i] = item
}
if opts.Count == "none" {
delete(tmp, "items_available")
}
} else {
tmp = applySelectParam(opts.Select, tmp)
+ rtr.mungeItemFields(tmp)
}
- // Format non-nil timestamps as rfc3339NanoFixed (by default
- // they will have been encoded to time.RFC3339Nano, which
- // omits trailing zeroes).
- for k, v := range tmp {
- if !strings.HasSuffix(k, "_at") {
- continue
- }
- switch tv := v.(type) {
- case *time.Time:
- if tv == nil {
- break
- }
- tmp[k] = tv.Format(rfc3339NanoFixed)
- case time.Time:
- tmp[k] = tv.Format(rfc3339NanoFixed)
- case string:
- t, err := time.Parse(time.RFC3339Nano, tv)
- if err != nil {
- break
- }
- tmp[k] = t.Format(rfc3339NanoFixed)
- }
- }
w.Header().Set("Content-Type", "application/json")
enc := json.NewEncoder(w)
enc.SetEscapeHTML(false)
return "#" + strings.ToLower(s[1:])
})
}
+
+func (rtr *router) mungeItemFields(tmp map[string]interface{}) {
+ for k, v := range tmp {
+ if strings.HasSuffix(k, "_at") {
+ // Format non-nil timestamps as
+ // rfc3339NanoFixed (otherwise they would use
+ // the default time encoding, which omits
+ // trailing zeroes).
+ switch tv := v.(type) {
+ case *time.Time:
+ if tv == nil || tv.IsZero() {
+ tmp[k] = nil
+ } else {
+ tmp[k] = tv.Format(rfc3339NanoFixed)
+ }
+ case time.Time:
+ if tv.IsZero() {
+ tmp[k] = nil
+ } else {
+ tmp[k] = tv.Format(rfc3339NanoFixed)
+ }
+ case string:
+ if tv == "" {
+ tmp[k] = nil
+ } else if t, err := time.Parse(time.RFC3339Nano, tv); err != nil {
+ // pass through an invalid time value (?)
+ } else if t.IsZero() {
+ tmp[k] = nil
+ } else {
+ tmp[k] = t.Format(rfc3339NanoFixed)
+ }
+ }
+ }
+ // Arvados API spec says when these fields are empty
+ // they appear in responses as null, rather than a
+ // zero value.
+ switch k {
+ case "output_uuid", "output_name", "log_uuid", "description", "requesting_container_uuid", "container_uuid":
+ if v == "" {
+ tmp[k] = nil
+ }
+ case "container_count_max":
+ if v == float64(0) {
+ tmp[k] = nil
+ }
+ }
+ }
+}
return rtr.backend.ContainerDelete(ctx, *opts.(*arvados.DeleteOptions))
},
},
+ {
+ arvados.EndpointContainerRequestCreate,
+ func() interface{} { return &arvados.CreateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.ContainerRequestCreate(ctx, *opts.(*arvados.CreateOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerRequestUpdate,
+ func() interface{} { return &arvados.UpdateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.ContainerRequestUpdate(ctx, *opts.(*arvados.UpdateOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerRequestGet,
+ func() interface{} { return &arvados.GetOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.ContainerRequestGet(ctx, *opts.(*arvados.GetOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerRequestList,
+ func() interface{} { return &arvados.ListOptions{Limit: -1} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.ContainerRequestList(ctx, *opts.(*arvados.ListOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerRequestDelete,
+ func() interface{} { return &arvados.DeleteOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.ContainerRequestDelete(ctx, *opts.(*arvados.DeleteOptions))
+ },
+ },
{
arvados.EndpointContainerLock,
func() interface{} {
"git.arvados.org/arvados.git/sdk/go/auth"
)
+const rfc3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+
type TokenProvider func(context.Context) ([]string, error)
func PassthroughTokenProvider(ctx context.Context) ([]string, error) {
delete(params, "limit")
}
}
+
+ if authinfo, ok := params["auth_info"]; ok {
+ if tmp, ok2 := authinfo.(map[string]interface{}); ok2 {
+ for k, v := range tmp {
+ if strings.HasSuffix(k, "_at") {
+ // Change zero times values to nil
+ if v, ok3 := v.(string); ok3 && (strings.HasPrefix(v, "0001-01-01T00:00:00") || v == "") {
+ tmp[k] = nil
+ }
+ }
+ }
+ }
+ }
+
if len(tokens) > 1 {
params["reader_tokens"] = tokens[1:]
}
return resp, err
}
+func (conn *Conn) ContainerRequestCreate(ctx context.Context, options arvados.CreateOptions) (arvados.ContainerRequest, error) {
+ ep := arvados.EndpointContainerRequestCreate
+ var resp arvados.ContainerRequest
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) ContainerRequestUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.ContainerRequest, error) {
+ ep := arvados.EndpointContainerRequestUpdate
+ var resp arvados.ContainerRequest
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) ContainerRequestGet(ctx context.Context, options arvados.GetOptions) (arvados.ContainerRequest, error) {
+ ep := arvados.EndpointContainerRequestGet
+ var resp arvados.ContainerRequest
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) ContainerRequestList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerRequestList, error) {
+ ep := arvados.EndpointContainerRequestList
+ var resp arvados.ContainerRequestList
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) ContainerRequestDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.ContainerRequest, error) {
+ ep := arvados.EndpointContainerRequestDelete
+ var resp arvados.ContainerRequest
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
func (conn *Conn) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
ep := arvados.EndpointSpecimenCreate
var resp arvados.Specimen
}
type UserSessionAuthInfo struct {
- Email string `json:"email"`
- AlternateEmails []string `json:"alternate_emails"`
- FirstName string `json:"first_name"`
- LastName string `json:"last_name"`
- Username string `json:"username"`
+ UserUUID string `json:"user_uuid"`
+ Email string `json:"email"`
+ AlternateEmails []string `json:"alternate_emails"`
+ FirstName string `json:"first_name"`
+ LastName string `json:"last_name"`
+ Username string `json:"username"`
+ ExpiresAt time.Time `json:"expires_at"`
}
type UserSessionCreateOptions struct {
return fmt.Errorf("output path does not correspond to a writable mount point")
}
- if wantAPI := runner.Container.RuntimeConstraints.API; needCertMount && wantAPI != nil && *wantAPI {
+ if needCertMount && runner.Container.RuntimeConstraints.API {
for _, certfile := range arvadosclient.CertFiles {
_, err := os.Stat(certfile)
if err == nil {
},
}
- if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
+ if runner.Container.RuntimeConstraints.API {
tok, err := runner.ContainerToken()
if err != nil {
return err
// CaptureOutput saves data from the container's output directory if
// needed, and updates the container output accordingly.
func (runner *ContainerRunner) CaptureOutput() error {
- if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
+ if runner.Container.RuntimeConstraints.API {
// Output may have been set directly by the container, so
// refresh the container record to check.
err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID,
cr.Container.Mounts = make(map[string]arvados.Mount)
cr.Container.Mounts["/tmp"] = arvados.Mount{Kind: "tmp"}
cr.Container.OutputPath = "/tmp"
-
- apiflag := true
- cr.Container.RuntimeConstraints.API = &apiflag
+ cr.Container.RuntimeConstraints.API = true
err := cr.SetupMounts()
c.Check(err, IsNil)
cr.CleanupDirs()
checkEmpty()
- apiflag = false
+ cr.Container.RuntimeConstraints.API = false
}
{
EndpointContainerDelete = APIEndpoint{"DELETE", "arvados/v1/containers/{uuid}", ""}
EndpointContainerLock = APIEndpoint{"POST", "arvados/v1/containers/{uuid}/lock", ""}
EndpointContainerUnlock = APIEndpoint{"POST", "arvados/v1/containers/{uuid}/unlock", ""}
+ EndpointContainerRequestCreate = APIEndpoint{"POST", "arvados/v1/container_requests", "container_request"}
+ EndpointContainerRequestUpdate = APIEndpoint{"PATCH", "arvados/v1/container_requests/{uuid}", "container_request"}
+ EndpointContainerRequestGet = APIEndpoint{"GET", "arvados/v1/container_requests/{uuid}", ""}
+ EndpointContainerRequestList = APIEndpoint{"GET", "arvados/v1/container_requests", ""}
+ EndpointContainerRequestDelete = APIEndpoint{"DELETE", "arvados/v1/container_requests/{uuid}", ""}
EndpointUserActivate = APIEndpoint{"POST", "arvados/v1/users/{uuid}/activate", ""}
EndpointUserCreate = APIEndpoint{"POST", "arvados/v1/users", "user"}
EndpointUserCurrent = APIEndpoint{"GET", "arvados/v1/users/current", ""}
ContainerDelete(ctx context.Context, options DeleteOptions) (Container, error)
ContainerLock(ctx context.Context, options GetOptions) (Container, error)
ContainerUnlock(ctx context.Context, options GetOptions) (Container, error)
+ ContainerRequestCreate(ctx context.Context, options CreateOptions) (ContainerRequest, error)
+ ContainerRequestUpdate(ctx context.Context, options UpdateOptions) (ContainerRequest, error)
+ ContainerRequestGet(ctx context.Context, options GetOptions) (ContainerRequest, error)
+ ContainerRequestList(ctx context.Context, options ListOptions) (ContainerRequestList, error)
+ ContainerRequestDelete(ctx context.Context, options DeleteOptions) (ContainerRequest, error)
SpecimenCreate(ctx context.Context, options CreateOptions) (Specimen, error)
SpecimenUpdate(ctx context.Context, options UpdateOptions) (Specimen, error)
SpecimenGet(ctx context.Context, options GetOptions) (Specimen, error)
LogUUID string `json:"log_uuid"`
OutputUUID string `json:"output_uuid"`
RuntimeToken string `json:"runtime_token"`
+ ExpiresAt time.Time `json:"expires_at"`
+ Filters []Filter `json:"filters"`
+ ContainerCount int `json:"container_count"`
}
// Mount is special behavior to attach to a filesystem path or device.
// RuntimeConstraints specify a container's compute resources (RAM,
// CPU) and network connectivity.
type RuntimeConstraints struct {
- API *bool
+ API bool `json:"api"`
RAM int64 `json:"ram"`
VCPUs int `json:"vcpus"`
KeepCacheRAM int64 `json:"keep_cache_ram"`
as.appendCall(ctx, as.ContainerUnlock, options)
return arvados.Container{}, as.Error
}
+func (as *APIStub) ContainerRequestCreate(ctx context.Context, options arvados.CreateOptions) (arvados.ContainerRequest, error) {
+ as.appendCall(ctx, as.ContainerRequestCreate, options)
+ return arvados.ContainerRequest{}, as.Error
+}
+func (as *APIStub) ContainerRequestUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.ContainerRequest, error) {
+ as.appendCall(ctx, as.ContainerRequestUpdate, options)
+ return arvados.ContainerRequest{}, as.Error
+}
+func (as *APIStub) ContainerRequestGet(ctx context.Context, options arvados.GetOptions) (arvados.ContainerRequest, error) {
+ as.appendCall(ctx, as.ContainerRequestGet, options)
+ return arvados.ContainerRequest{}, as.Error
+}
+func (as *APIStub) ContainerRequestList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerRequestList, error) {
+ as.appendCall(ctx, as.ContainerRequestList, options)
+ return arvados.ContainerRequestList{}, as.Error
+}
+func (as *APIStub) ContainerRequestDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.ContainerRequest, error) {
+ as.appendCall(ctx, as.ContainerRequestDelete, options)
+ return arvados.ContainerRequest{}, as.Error
+}
func (as *APIStub) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
as.appendCall(ctx, as.SpecimenCreate, options)
return arvados.Specimen{}, as.Error
raise "Local login disabled when LoginCluster is set"
end
+ max_expires_at = nil
if params[:provider] == 'controller'
if request.headers['Authorization'] != 'Bearer ' + Rails.configuration.SystemRootToken
return send_error('Invalid authorization header', status: 401)
# arvados-controller verified the user and is passing auth_info
# in request params.
authinfo = SafeJSON.load(params[:auth_info])
+ max_expires_at = authinfo["expires_at"]
else
# omniauth middleware verified the user and is passing auth_info
# in request.env.
authinfo = request.env['omniauth.auth']['info'].with_indifferent_access
end
- begin
- user = User.register(authinfo)
- rescue => e
- Rails.logger.warn "User.register error #{e}"
- Rails.logger.warn "authinfo was #{authinfo.inspect}"
- return redirect_to login_failure_url
+ if !authinfo['user_uuid'].blank?
+ user = User.find_by_uuid(authinfo['user_uuid'])
+ if !user
+ Rails.logger.warn "Nonexistent user_uuid in authinfo #{authinfo.inspect}"
+ return redirect_to login_failure_url
+ end
+ else
+ begin
+ user = User.register(authinfo)
+ rescue => e
+ Rails.logger.warn "User.register error #{e}"
+ Rails.logger.warn "authinfo was #{authinfo.inspect}"
+ return redirect_to login_failure_url
+ end
end
# For the benefit of functional and integration tests:
return send_error 'Invalid remote cluster id', status: 400
end
remote = nil if remote == ''
- return send_api_token_to(return_to_url, user, remote)
+ return send_api_token_to(return_to_url, user, remote, max_expires_at)
end
redirect_to @redirect_to
end
end
end
- def send_api_token_to(callback_url, user, remote=nil)
+ def send_api_token_to(callback_url, user, remote=nil, token_expiration=nil)
# Give the API client a token for making API calls on behalf of
# the authenticated user
@api_client = ApiClient.
find_or_create_by(url_prefix: api_client_url_prefix)
end
-
- token_expiration = nil
if Rails.configuration.Login.TokenLifetime > 0
- token_expiration = Time.now + Rails.configuration.Login.TokenLifetime
+ if token_expiration == nil
+ token_expiration = Time.now + Rails.configuration.Login.TokenLifetime
+ else
+ token_expiration = [token_expiration, Time.now + Rails.configuration.Login.TokenLifetime].min
+ end
end
+
@api_client_auth = ApiClientAuthorization.
new(user: user,
api_client: @api_client,
nil
end
+ # Fill in implied zero/false values in database records that were
+ # created before #17014 made them explicit, and reset the Rails
+ # "changed" state so the record doesn't appear to have been modified
+ # after loading.
+ #
+ # Invoked by Container and ContainerRequest models as an after_find
+ # hook.
+ def fill_container_defaults_after_find
+ fill_container_defaults
+ set_attribute_was('runtime_constraints', runtime_constraints)
+ set_attribute_was('scheduling_parameters', scheduling_parameters)
+ clear_changes_information
+ end
+
+ # Fill in implied zero/false values. Invoked by ContainerRequest as
+ # a before_validation hook in order to (a) ensure every key has a
+ # value in the updated database record and (b) ensure the attribute
+ # whitelist doesn't reject a change from an explicit zero/false
+ # value in the database to an implicit zero/false value in an update
+ # request.
+ def fill_container_defaults
+ self.runtime_constraints = {
+ 'api' => false,
+ 'keep_cache_ram' => 0,
+ 'ram' => 0,
+ 'vcpus' => 0,
+ }.merge(attributes['runtime_constraints'] || {})
+ self.scheduling_parameters = {
+ 'max_run_time' => 0,
+ 'partitions' => [],
+ 'preemptible' => false,
+ }.merge(attributes['scheduling_parameters'] || {})
+ end
+
# ArvadosModel.find_by_uuid needs extra magic to allow it to return
# an object in any class.
def self.find_by_uuid uuid
serialize :command, Array
serialize :scheduling_parameters, Hash
+ after_find :fill_container_defaults_after_find
before_validation :fill_field_defaults, :if => :new_record?
before_validation :set_timestamps
before_validation :check_lock
# containers are suitable).
def self.resolve_runtime_constraints(runtime_constraints)
rc = {}
- defaults = {
- 'keep_cache_ram' =>
- Rails.configuration.Containers.DefaultKeepCacheRAM,
- }
- defaults.merge(runtime_constraints).each do |k, v|
+ runtime_constraints.each do |k, v|
if v.is_a? Array
rc[k] = v[0]
else
rc[k] = v
end
end
+ if rc['keep_cache_ram'] == 0
+ rc['keep_cache_ram'] = Rails.configuration.Containers.DefaultKeepCacheRAM
+ end
rc
end
serialize :command, Array
serialize :scheduling_parameters, Hash
+ after_find :fill_container_defaults_after_find
before_validation :fill_field_defaults, :if => :new_record?
- before_validation :validate_runtime_constraints
+ before_validation :fill_container_defaults
before_validation :set_default_preemptible_scheduling_parameter
before_validation :set_container
validates :command, :container_image, :output_path, :cwd, :presence => true
validates :output_ttl, numericality: { only_integer: true, greater_than_or_equal_to: 0 }
validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0, less_than_or_equal_to: 1000 }
validate :validate_datatypes
+ validate :validate_runtime_constraints
validate :validate_scheduling_parameters
validate :validate_state_change
validate :check_update_whitelist
coll_name = "Container #{out_type} for request #{uuid}"
trash_at = nil
if out_type == 'output'
- if self.output_name
+ if self.output_name and self.output_name != ""
coll_name = self.output_name
end
if self.output_ttl > 0
end
def set_default_preemptible_scheduling_parameter
- c = get_requesting_container()
- if self.state == Committed
- # If preemptible instances (eg: AWS Spot Instances) are allowed,
- # ask them on child containers by default.
- if Rails.configuration.Containers.UsePreemptibleInstances and !c.nil? and
- self.scheduling_parameters['preemptible'].nil?
- self.scheduling_parameters['preemptible'] = true
- end
+ if Rails.configuration.Containers.UsePreemptibleInstances && state == Committed && get_requesting_container()
+ self.scheduling_parameters['preemptible'] = true
end
end
def validate_runtime_constraints
case self.state
when Committed
- [['vcpus', true],
- ['ram', true],
- ['keep_cache_ram', false]].each do |k, required|
- if !required && !runtime_constraints.include?(k)
- next
- end
+ ['vcpus', 'ram'].each do |k|
v = runtime_constraints[k]
- unless (v.is_a?(Integer) && v > 0)
+ if !v.is_a?(Integer) || v <= 0
errors.add(:runtime_constraints,
"[#{k}]=#{v.inspect} must be a positive integer")
end
runtime_constraints:
vcpus: 1
ram: 123
+ mounts: {}
running:
uuid: zzzzz-xvhdp-cr4runningcntnr
runtime_constraints:
vcpus: 1
ram: 123
+ mounts: {}
requester_for_running:
uuid: zzzzz-xvhdp-req4runningcntr
runtime_constraints:
vcpus: 1
ram: 123
+ mounts: {}
running_older:
uuid: zzzzz-xvhdp-cr4runningcntn2
runtime_constraints:
vcpus: 1
ram: 123
+ mounts: {}
completed:
uuid: zzzzz-xvhdp-cr4completedctr
runtime_constraints:
vcpus: 1
ram: 123
+ mounts: {}
completed-older:
uuid: zzzzz-xvhdp-cr4completedcr2
runtime_constraints:
vcpus: 1
ram: 123
+ mounts: {}
completed_diagnostics:
name: CWL diagnostics hasher
runtime_constraints:
vcpus: 1
ram: 123
+ mounts: {}
cr_for_requester:
uuid: zzzzz-xvhdp-cr4requestercnt
runtime_constraints:
vcpus: 1
ram: 123
+ mounts: {}
cr_for_requester2:
uuid: zzzzz-xvhdp-cr4requestercn2
runtime_constraints:
vcpus: 1
ram: 123
+ mounts: {}
running_anonymous_accessible:
uuid: zzzzz-xvhdp-runninganonaccs
runtime_constraints:
vcpus: 1
ram: 123
+ mounts: {}
cr_for_failed:
uuid: zzzzz-xvhdp-cr4failedcontnr
runtime_constraints:
vcpus: 1
ram: 123
+ mounts: {}
canceled_with_queued_container:
uuid: zzzzz-xvhdp-canceledqueuedc
runtime_constraints:
vcpus: 1
ram: 123
+ mounts: {}
canceled_with_locked_container:
uuid: zzzzz-xvhdp-canceledlocekdc
runtime_constraints:
vcpus: 1
ram: 123
+ mounts: {}
canceled_with_running_container:
uuid: zzzzz-xvhdp-canceledrunning
runtime_constraints:
vcpus: 1
ram: 123
+ mounts: {}
running_to_be_deleted:
uuid: zzzzz-xvhdp-cr5runningcntnr
runtime_constraints:
vcpus: 1
ram: 123
+ mounts: {}
completed_with_input_mounts:
uuid: zzzzz-xvhdp-crwithinputmnts
container_uuid: zzzzz-dz642-compltcontainer
log_uuid: zzzzz-4zz18-logcollection01
output_uuid: zzzzz-4zz18-znfnqtbbv4spc3w
- mounts:
- /var/lib/cwl/cwl.input.json:
- content:
- input1:
- basename: foo
- class: File
- location: "keep:fa7aeb5140e2848d39b416daeef4ffc5+45/foo"
- input2:
- basename: bar
- class: File
- location: "keep:fa7aeb5140e2848d39b416daeef4ffc5+45/bar"
- /var/lib/cwl/workflow.json: "keep:f9ddda46bb293b6847da984e3aa735db+290"
+ mounts: {
+ "/var/lib/cwl/cwl.input.json": {
+ "kind": "json",
+ "content": {
+ "input1": {
+ "basename": "foo",
+ "class": "File",
+ "location": "keep:fa7aeb5140e2848d39b416daeef4ffc5+45/foo",
+ },
+ "input2": {
+ "basename": "bar",
+ "class": "File",
+ "location": "keep:fa7aeb5140e2848d39b416daeef4ffc5+45/bar",
+ }
+ }
+ }
+ }
uncommitted:
uuid: zzzzz-xvhdp-cr4uncommittedc
runtime_constraints:
vcpus: 1
ram: 123
+ mounts: {}
runtime_token:
uuid: zzzzz-xvhdp-11eklkhy0n4dm86
runtime_constraints:
vcpus: 1
ram: 123
+ mounts: {}
# Test Helper trims the rest of the file
name: cr-<%= i.to_s %>
output_path: test
command: ["echo", "hello"]
+ mounts: {}
<% end %>
# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
cr = JSON.parse(@response.body)
assert_not_nil cr, 'Expected container request'
- assert_equal sp, cr['scheduling_parameters']
+ assert_equal sp['partitions'], cr['scheduling_parameters']['partitions']
+ assert_equal false, cr['scheduling_parameters']['preemptible']
end
test "secret_mounts not in #create responses" do
assert_equal 'bar', req.secret_mounts['/foo']['content']
end
+ test "cancel with runtime_constraints and scheduling_params with default values" do
+ authorize_with :active
+ req = container_requests(:queued)
+
+ patch :update, params: {
+ id: req.uuid,
+ container_request: {
+ state: 'Final',
+ priority: 0,
+ runtime_constraints: {
+ 'vcpus' => 1,
+ 'ram' => 123,
+ 'keep_cache_ram' => 0,
+ },
+ scheduling_parameters: {
+ "preemptible"=>false
+ }
+ },
+ }
+ assert_response :success
+ end
+
test "update without deleting secret_mounts" do
authorize_with :active
req = container_requests(:uncommitted)
1.second)
end
+ [[0, 1.hour, 1.hour],
+ [1.hour, 2.hour, 1.hour],
+ [2.hour, 1.hour, 1.hour],
+ [2.hour, nil, 2.hour],
+ ].each do |config_lifetime, request_lifetime, expect_lifetime|
+ test "login with TokenLifetime=#{config_lifetime} and request has expires_at=#{ request_lifetime.nil? ? "nil" : request_lifetime }" do
+ Rails.configuration.Login.TokenLifetime = config_lifetime
+ expected_expiration_time = Time.now() + expect_lifetime
+ authorize_with :inactive
+ @request.headers['Authorization'] = 'Bearer '+Rails.configuration.SystemRootToken
+ if request_lifetime.nil?
+ get :create, params: {provider: 'controller', auth_info: {email: "foo@bar.com"}, return_to: ',https://app.example'}
+ else
+ get :create, params: {provider: 'controller', auth_info: {email: "foo@bar.com", expires_at: Time.now() + request_lifetime}, return_to: ',https://app.example'}
+ end
+ assert_response :redirect
+ api_client_auth = assigns(:api_client_auth)
+ assert_not_nil api_client_auth
+ assert_not_nil assigns(:api_client)
+ assert_in_delta(api_client_auth.expires_at,
+ expected_expiration_time,
+ 1.second)
+ end
+ end
+
test "login with remote param returns a salted token" do
authorize_with :inactive
api_client_page = 'http://client.example.com/home'
cr.reload
- assert_equal({"vcpus" => 2, "ram" => 30}, cr.runtime_constraints)
+ assert ({"vcpus" => 2, "ram" => 30}.to_a - cr.runtime_constraints.to_a).empty?
assert_not_nil cr.container_uuid
c = Container.find_by_uuid cr.container_uuid
assert_equal({}, c.environment)
assert_equal({"/out" => {"kind"=>"tmp", "capacity"=>1000000}}, c.mounts)
assert_equal "/out", c.output_path
- assert_equal({"keep_cache_ram"=>268435456, "vcpus" => 2, "ram" => 30}, c.runtime_constraints)
+ assert ({"keep_cache_ram"=>268435456, "vcpus" => 2, "ram" => 30}.to_a - c.runtime_constraints.to_a).empty?
assert_operator 0, :<, c.priority
assert_raises(ActiveRecord::RecordInvalid) do
end
else
cr.save!
- assert_equal sp, cr.scheduling_parameters
+ assert (sp.to_a - cr.scheduling_parameters.to_a).empty?
end
end
end
- [
- 'zzzzz-dz642-runningcontainr',
- nil,
- ].each do |requesting_c|
- test "having preemptible instances active on the API server, a committed #{requesting_c.nil? ? 'non-':''}child CR should not ask for preemptible instance if parameter already set to false" do
- common_attrs = {cwd: "test",
- priority: 1,
- command: ["echo", "hello"],
- output_path: "test",
- scheduling_parameters: {"preemptible" => false},
- mounts: {"test" => {"kind" => "json"}}}
-
- Rails.configuration.Containers.UsePreemptibleInstances = true
- set_user_from_auth :active
-
- if requesting_c
- cr = with_container_auth(Container.find_by_uuid requesting_c) do
- create_minimal_req!(common_attrs)
- end
- assert_not_nil cr.requesting_container_uuid
- else
- cr = create_minimal_req!(common_attrs)
- end
-
- cr.state = ContainerRequest::Committed
- cr.save!
-
- assert_equal false, cr.scheduling_parameters['preemptible']
- end
- end
-
[
[true, 'zzzzz-dz642-runningcontainr', true],
- [true, nil, nil],
- [false, 'zzzzz-dz642-runningcontainr', nil],
- [false, nil, nil],
+ [true, nil, false],
+ [false, 'zzzzz-dz642-runningcontainr', false],
+ [false, nil, false],
].each do |preemptible_conf, requesting_c, schedule_preemptible|
test "having Rails.configuration.Containers.UsePreemptibleInstances=#{preemptible_conf}, #{requesting_c.nil? ? 'non-':''}child CR should #{schedule_preemptible ? '':'not'} ask for preemptible instance by default" do
common_attrs = {cwd: "test",
end
else
cr = create_minimal_req!(common_attrs.merge({state: state}))
- assert_equal sp, cr.scheduling_parameters
+ assert (sp.to_a - cr.scheduling_parameters.to_a).empty?
if state == ContainerRequest::Committed
c = Container.find_by_uuid(cr.container_uuid)
- assert_equal sp, c.scheduling_parameters
+ assert (sp.to_a - c.scheduling_parameters.to_a).empty?
end
end
end
command: ["echo", "hello"],
output_path: "test",
runtime_constraints: {
+ "api" => false,
+ "keep_cache_ram" => 0,
"ram" => 12000000000,
"vcpus" => 4,
},
set_user_from_auth :active
env = {"C" => "3", "B" => "2", "A" => "1"}
m = {"F" => {"kind" => "3"}, "E" => {"kind" => "2"}, "D" => {"kind" => "1"}}
- rc = {"vcpus" => 1, "ram" => 1, "keep_cache_ram" => 1}
+ rc = {"vcpus" => 1, "ram" => 1, "keep_cache_ram" => 1, "api" => true}
c, _ = minimal_new(environment: env, mounts: m, runtime_constraints: rc)
- assert_equal c.environment.to_json, Container.deep_sort_hash(env).to_json
- assert_equal c.mounts.to_json, Container.deep_sort_hash(m).to_json
- assert_equal c.runtime_constraints.to_json, Container.deep_sort_hash(rc).to_json
+ c.reload
+ assert_equal Container.deep_sort_hash(env).to_json, c.environment.to_json
+ assert_equal Container.deep_sort_hash(m).to_json, c.mounts.to_json
+ assert_equal Container.deep_sort_hash(rc).to_json, c.runtime_constraints.to_json
end
test 'deep_sort_hash on array of hashes' do
assert_equal Container::Queued, c1.state
reused = Container.find_reusable(common_attrs.merge(runtime_token_attr(:container_runtime_token)))
# See #14584
+ assert_not_nil reused
assert_equal c1.uuid, reused.uuid
end
assert_equal Container::Queued, c1.state
reused = Container.find_reusable(common_attrs.merge(runtime_token_attr(:container_runtime_token)))
# See #14584
+ assert_not_nil reused
assert_equal c1.uuid, reused.uuid
end
assert_equal Container::Queued, c1.state
reused = Container.find_reusable(common_attrs.merge(runtime_token_attr(:container_runtime_token)))
# See #14584
+ assert_not_nil reused
assert_equal c1.uuid, reused.uuid
end
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+ "bytes"
+ "net"
+ "os"
+ "path/filepath"
+
+ "git.arvados.org/arvados.git/lib/boot"
+ "git.arvados.org/arvados.git/lib/config"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&FederationSuite{})
+
+var origAPIHost, origAPIToken string
+
+type FederationSuite struct {
+ testClusters map[string]*boot.TestCluster
+ oidcprovider *arvadostest.OIDCProvider
+}
+
+func (s *FederationSuite) SetUpSuite(c *check.C) {
+ origAPIHost = os.Getenv("ARVADOS_API_HOST")
+ origAPIToken = os.Getenv("ARVADOS_API_TOKEN")
+
+ cwd, _ := os.Getwd()
+
+ s.oidcprovider = arvadostest.NewOIDCProvider(c)
+ s.oidcprovider.AuthEmail = "user@example.com"
+ s.oidcprovider.AuthEmailVerified = true
+ s.oidcprovider.AuthName = "Example User"
+ s.oidcprovider.ValidClientID = "clientid"
+ s.oidcprovider.ValidClientSecret = "clientsecret"
+
+ s.testClusters = map[string]*boot.TestCluster{
+ "z1111": nil,
+ "z2222": nil,
+ }
+ hostport := map[string]string{}
+ for id := range s.testClusters {
+ hostport[id] = func() string {
+ // TODO: Instead of expecting random ports on
+ // 127.0.0.11, 22 to be race-safe, try
+ // different 127.x.y.z until finding one that
+ // isn't in use.
+ ln, err := net.Listen("tcp", ":0")
+ c.Assert(err, check.IsNil)
+ ln.Close()
+ _, port, err := net.SplitHostPort(ln.Addr().String())
+ c.Assert(err, check.IsNil)
+ return "127.0.0." + id[3:] + ":" + port
+ }()
+ }
+ for id := range s.testClusters {
+ yaml := `Clusters:
+ ` + id + `:
+ Services:
+ Controller:
+ ExternalURL: https://` + hostport[id] + `
+ TLS:
+ Insecure: true
+ SystemLogs:
+ Format: text
+ RemoteClusters:
+ z1111:
+ Host: ` + hostport["z1111"] + `
+ Scheme: https
+ Insecure: true
+ Proxy: true
+ ActivateUsers: true
+`
+ if id != "z2222" {
+ yaml += ` z2222:
+ Host: ` + hostport["z2222"] + `
+ Scheme: https
+ Insecure: true
+ Proxy: true
+ ActivateUsers: true
+`
+ }
+ if id == "z1111" {
+ yaml += `
+ Login:
+ LoginCluster: z1111
+ OpenIDConnect:
+ Enable: true
+ Issuer: ` + s.oidcprovider.Issuer.URL + `
+ ClientID: ` + s.oidcprovider.ValidClientID + `
+ ClientSecret: ` + s.oidcprovider.ValidClientSecret + `
+ EmailClaim: email
+ EmailVerifiedClaim: email_verified
+`
+ } else {
+ yaml += `
+ Login:
+ LoginCluster: z1111
+`
+ }
+
+ loader := config.NewLoader(bytes.NewBufferString(yaml), ctxlog.TestLogger(c))
+ loader.Path = "-"
+ loader.SkipLegacy = true
+ loader.SkipAPICalls = true
+ cfg, err := loader.Load()
+ c.Assert(err, check.IsNil)
+ tc := boot.NewTestCluster(
+ filepath.Join(cwd, "..", ".."),
+ id, cfg, "127.0.0."+id[3:], c.Log)
+ s.testClusters[id] = tc
+ s.testClusters[id].Start()
+ }
+ for _, tc := range s.testClusters {
+ ok := tc.WaitReady()
+ c.Assert(ok, check.Equals, true)
+ }
+
+ // Activate user, make it admin.
+ conn1 := s.testClusters["z1111"].Conn()
+ rootctx1, _, _ := s.testClusters["z1111"].RootClients()
+ userctx1, _, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+ user1, err := conn1.UserGetCurrent(userctx1, arvados.GetOptions{})
+ c.Assert(err, check.IsNil)
+ c.Assert(user1.IsAdmin, check.Equals, false)
+ user1, err = conn1.UserUpdate(rootctx1, arvados.UpdateOptions{
+ UUID: user1.UUID,
+ Attrs: map[string]interface{}{
+ "is_admin": true,
+ },
+ })
+ c.Assert(err, check.IsNil)
+ c.Assert(user1.IsAdmin, check.Equals, true)
+}
+
+func (s *FederationSuite) TearDownSuite(c *check.C) {
+ for _, c := range s.testClusters {
+ c.Super.Stop()
+ }
+ _ = os.Setenv("ARVADOS_API_HOST", origAPIHost)
+ _ = os.Setenv("ARVADOS_API_TOKEN", origAPIToken)
+}
+
+func (s *FederationSuite) TestGroupSyncingOnFederatedCluster(c *check.C) {
+ // Get admin user's V2 token
+ conn1 := s.testClusters["z1111"].Conn()
+ rootctx1, _, _ := s.testClusters["z1111"].RootClients()
+ userctx1, _, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+ user1Auth, err := conn1.APIClientAuthorizationCurrent(userctx1, arvados.GetOptions{})
+ c.Check(err, check.IsNil)
+ userV2Token := user1Auth.TokenV2()
+
+ // Get federated admin clients on z2222 to set up environment
+ conn2 := s.testClusters["z2222"].Conn()
+ userctx2, userac2, _ := s.testClusters["z2222"].ClientsWithToken(userV2Token)
+ user2, err := conn2.UserGetCurrent(userctx2, arvados.GetOptions{})
+ c.Check(err, check.IsNil)
+ c.Check(user2.IsAdmin, check.Equals, true)
+
+ // Set up environment for sync-groups using admin user credentials on z2222
+ err = os.Setenv("ARVADOS_API_HOST", userac2.APIHost)
+ c.Assert(err, check.IsNil)
+ err = os.Setenv("ARVADOS_API_TOKEN", userac2.AuthToken)
+ c.Assert(err, check.IsNil)
+
+ // Check that no parent group is created
+ gl := arvados.GroupList{}
+ params := arvados.ResourceListParams{
+ Filters: []arvados.Filter{{
+ Attr: "owner_uuid",
+ Operator: "=",
+ Operand: s.testClusters["z2222"].ClusterID + "-tpzed-000000000000000",
+ }, {
+ Attr: "name",
+ Operator: "=",
+ Operand: "Externally synchronized groups",
+ }},
+ }
+ err = userac2.RequestAndDecode(&gl, "GET", "/arvados/v1/groups", nil, params)
+ c.Assert(err, check.IsNil)
+ c.Assert(gl.ItemsAvailable, check.Equals, 0)
+
+ // Set up config, confirm that the parent group was created
+ os.Args = []string{"cmd", "somefile.csv"}
+ config, err := GetConfig()
+ c.Assert(err, check.IsNil)
+ userac2.RequestAndDecode(&gl, "GET", "/arvados/v1/groups", nil, params)
+ c.Assert(gl.ItemsAvailable, check.Equals, 1)
+
+ // Run the tool with custom config
+ data := [][]string{
+ {"TestGroup1", user2.Email},
+ }
+ tmpfile, err := MakeTempCSVFile(data)
+ c.Assert(err, check.IsNil)
+ defer os.Remove(tmpfile.Name()) // clean up
+ config.Path = tmpfile.Name()
+ err = doMain(&config)
+ c.Assert(err, check.IsNil)
+ // Check the group was created correctly, and has the user as a member
+ groupUUID, err := RemoteGroupExists(&config, "TestGroup1")
+ c.Assert(err, check.IsNil)
+ c.Assert(groupUUID, check.Not(check.Equals), "")
+ c.Assert(GroupMembershipExists(config.Client, user2.UUID, groupUUID, "can_write"), check.Equals, true)
+}