Merge branch '16048-reload-config'
authorTom Clegg <tom@tomclegg.ca>
Tue, 21 Apr 2020 18:34:18 +0000 (14:34 -0400)
committerTom Clegg <tom@tomclegg.ca>
Tue, 21 Apr 2020 18:34:18 +0000 (14:34 -0400)
closes #16048

Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom@tomclegg.ca>

42 files changed:
AUTHORS
apps/workbench/Gemfile.lock
apps/workbench/npm_packages
build/run-library.sh
lib/boot/cert.go
lib/boot/example.sh [new file with mode: 0755]
lib/config/config.default.yml
lib/config/generated_config.go
lib/controller/federation/conn.go
lib/controller/federation/list.go
lib/controller/federation/list_test.go
lib/controller/federation/user_test.go
lib/controller/integration_test.go
lib/controller/router/request.go
lib/controller/rpc/conn.go
lib/crunchrun/crunchrun.go
lib/dispatchcloud/container/queue.go
lib/install/deps.go
sdk/cwl/tests/federation/README
sdk/cwl/tests/federation/arvbox-make-federation.cwl
sdk/cwl/tests/federation/arvboxcwl/fed-config.cwl [moved from sdk/cwl/tests/federation/arvbox/fed-config.cwl with 96% similarity]
sdk/cwl/tests/federation/arvboxcwl/mkdir.cwl [moved from sdk/cwl/tests/federation/arvbox/mkdir.cwl with 91% similarity]
sdk/cwl/tests/federation/arvboxcwl/setup-user.cwl [moved from sdk/cwl/tests/federation/arvbox/setup-user.cwl with 87% similarity]
sdk/cwl/tests/federation/arvboxcwl/setup_user.py [moved from sdk/cwl/tests/federation/arvbox/setup_user.py with 100% similarity]
sdk/cwl/tests/federation/arvboxcwl/start.cwl [moved from sdk/cwl/tests/federation/arvbox/start.cwl with 95% similarity]
sdk/cwl/tests/federation/arvboxcwl/stop.cwl [moved from sdk/cwl/tests/federation/arvbox/stop.cwl with 100% similarity]
sdk/go/arvados/api.go
sdk/go/arvados/resource_list.go
sdk/go/arvados/resource_list_test.go
sdk/python/arvados/commands/federation_migrate.py
sdk/python/tests/fed-migrate/README
sdk/python/tests/fed-migrate/arvbox-make-federation.cwl
sdk/python/tests/fed-migrate/check.py
sdk/python/tests/run_test_server.py
services/api/Gemfile.lock
services/api/app/controllers/application_controller.rb
services/api/app/controllers/arvados/v1/schema_controller.rb
services/api/app/controllers/arvados/v1/users_controller.rb
services/api/config/arvados_config.rb
services/api/test/functional/arvados/v1/users_controller_test.rb
services/api/test/integration/users_test.rb
services/keepstore/command.go

diff --git a/AUTHORS b/AUTHORS
index 436a504c36ab9f93d89e0742c9f9414032b6b8b6..93bdb37dc8f89fa0e8452804beee4943faa9c22e 100644 (file)
--- a/AUTHORS
+++ b/AUTHORS
@@ -19,3 +19,5 @@ Thomas Mooney <tmooney@genome.wustl.edu>
 Chen Chen <aflyhorse@gmail.com>
 Veritas Genetics, Inc. <*@veritasgenetics.com>
 Curii Corporation, Inc. <*@curii.com>
+Dante Tsang <dante@dantetsang.com>
+Codex Genetics Ltd <info@codexgenetics.com>
\ No newline at end of file
index 2af9c8b16f0b282ef3abc1a4d7c3880e690f7420..e722fa24196d1bc9b38ced9ba1cc27102471ac88 100644 (file)
@@ -214,7 +214,7 @@ GEM
       multi_json (~> 1.0)
       websocket-driver (>= 0.2.0)
     public_suffix (4.0.3)
-    rack (2.0.7)
+    rack (2.2.2)
     rack-mini-profiler (1.0.2)
       rack (>= 1.2.0)
     rack-test (0.6.3)
@@ -375,4 +375,4 @@ DEPENDENCIES
   uglifier (~> 2.0)
 
 BUNDLED WITH
-   1.11
+   1.16.6
index 7c757db35bdc6cf0ff72094a6ad285701ba02a0e..05802b4baf77aa52461f4e7e197e1bfee8912384 100644 (file)
@@ -8,7 +8,7 @@
 npm 'browserify', require: false
 npm 'jquery'
 npm 'awesomplete'
-npm 'jssha'
+npm 'jssha', '2.4.2'
 
-npm 'mithril', '1.1.6'
+npm 'mithril', '1.1.7'
 npm 'es6-object-assign'
index ac5dc718be1c6e36e86e743bf0805ab11c891da7..fd37f632b0350fac411e5d3e2fb819c8c045d7af 100755 (executable)
@@ -912,7 +912,9 @@ timer_reset() {
 }
 
 timer() {
-    echo -n "$(($SECONDS - $t0))s"
+    if [[ -n "$t0" ]]; then
+        echo -n "$(($SECONDS - $t0))s"
+    fi
 }
 
 report_outcomes() {
index 4b12c72edd9063a72afd7c287d9d86f2cc752b3f..f0797c2ac51fb7ec9f861413a371f133b3237bd9 100644 (file)
@@ -6,7 +6,9 @@ package boot
 
 import (
        "context"
+       "fmt"
        "io/ioutil"
+       "net"
        "path/filepath"
 )
 
@@ -23,6 +25,13 @@ func (createCertificates) String() string {
 }
 
 func (createCertificates) Run(ctx context.Context, fail func(error), super *Supervisor) error {
+       var san string
+       if net.ParseIP(super.ListenHost) != nil {
+               san = fmt.Sprintf("IP:%s", super.ListenHost)
+       } else {
+               san = fmt.Sprintf("DNS:%s", super.ListenHost)
+       }
+
        // Generate root key
        err := super.RunProgram(ctx, super.tempdir, nil, nil, "openssl", "genrsa", "-out", "rootCA.key", "4096")
        if err != nil {
@@ -43,10 +52,7 @@ func (createCertificates) Run(ctx context.Context, fail func(error), super *Supe
        if err != nil {
                return err
        }
-       err = ioutil.WriteFile(filepath.Join(super.tempdir, "server.cfg"), append(defaultconf, []byte(`
-[SAN]
-subjectAltName=DNS:localhost,DNS:localhost.localdomain
-`)...), 0644)
+       err = ioutil.WriteFile(filepath.Join(super.tempdir, "server.cfg"), append(defaultconf, []byte(fmt.Sprintf("\n[SAN]\nsubjectAltName=DNS:localhost,DNS:localhost.localdomain,%s\n", san))...), 0644)
        if err != nil {
                return err
        }
@@ -56,7 +62,7 @@ subjectAltName=DNS:localhost,DNS:localhost.localdomain
                return err
        }
        // Sign certificate
-       err = super.RunProgram(ctx, super.tempdir, nil, nil, "openssl", "x509", "-req", "-in", "server.csr", "-CA", "rootCA.crt", "-CAkey", "rootCA.key", "-CAcreateserial", "-out", "server.crt", "-days", "3650", "-sha256")
+       err = super.RunProgram(ctx, super.tempdir, nil, nil, "openssl", "x509", "-req", "-in", "server.csr", "-CA", "rootCA.crt", "-CAkey", "rootCA.key", "-CAcreateserial", "-out", "server.crt", "-extfile", "server.cfg", "-extensions", "SAN", "-days", "3650", "-sha256")
        if err != nil {
                return err
        }
diff --git a/lib/boot/example.sh b/lib/boot/example.sh
new file mode 100755 (executable)
index 0000000..c6a66e3
--- /dev/null
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+# Example of using `arvados-server boot` in a script. Bring up a test
+# cluster, wait for it to come up, fetch something from its discovery
+# doc, and shut down.
+
+set -e -o pipefail
+
+cleanup() {
+    set -x
+    kill ${boot_PID} ${consume_stdout_PID}
+    wait ${boot_PID} ${consume_stdout_PID} || true
+    echo >&2 "done"
+}
+
+coproc boot (arvados-server boot -type test -config doc/examples/config/zzzzz.yml -own-temporary-database -timeout 20m)
+trap cleanup ERR EXIT
+
+read controllerURL <&"${boot[0]}"
+
+# Copy coproc's stdout to stderr, to ensure `arvados-server boot`
+# doesn't get blocked trying to write stdout.
+exec 7<&"${boot[0]}"; coproc consume_stdout (cat <&7 >&2)
+
+keepwebURL=$(curl --silent --fail --insecure "${controllerURL}/discovery/v1/apis/arvados/v1/rest" | jq -r .keepWebServiceUrl)
+echo >&2 "controller is at $controllerURL"
+echo >&2 "keep-web is at $keepwebURL"
index fd59c9c4253618f03f18a64f86df0312462d27eb..d4870919eaad2bb0fdb7c1695bf7d28028b38236 100644 (file)
@@ -184,12 +184,21 @@ Clusters:
       MaxItemsPerResponse: 1000
 
       # Maximum number of concurrent requests to accept in a single
-      # service process, or 0 for no limit. Currently supported only
-      # by keepstore.
+      # service process, or 0 for no limit.
       MaxConcurrentRequests: 0
 
-      # Maximum number of 64MiB memory buffers per keepstore server
-      # process, or 0 for no limit.
+      # Maximum number of 64MiB memory buffers per Keepstore server process, or
+      # 0 for no limit. When this limit is reached, up to
+      # (MaxConcurrentRequests - MaxKeepBlobBuffers) HTTP requests requiring
+      # buffers (like GET and PUT) will wait for buffer space to be released.
+      # Any HTTP requests beyond MaxConcurrentRequests will receive an
+      # immediate 503 response.
+      #
+      # MaxKeepBlobBuffers should be set such that (MaxKeepBlobBuffers * 64MiB
+      # * 1.1) fits comfortably in memory. On a host dedicated to running
+      # Keepstore, divide total memory by 88MiB to suggest a suitable value.
+      # For example, if grep MemTotal /proc/meminfo reports MemTotal: 7125440
+      # kB, compute 7125440 / (88 * 1024)=79 and configure MaxBuffers: 79
       MaxKeepBlobBuffers: 128
 
       # API methods to disable. Disabled methods are not listed in the
index 2fce813a79d80a1a32790566735f874456da7cb9..42707396dddc0558d8a901d3ab6e39d79e36e03d 100644 (file)
@@ -190,12 +190,21 @@ Clusters:
       MaxItemsPerResponse: 1000
 
       # Maximum number of concurrent requests to accept in a single
-      # service process, or 0 for no limit. Currently supported only
-      # by keepstore.
+      # service process, or 0 for no limit.
       MaxConcurrentRequests: 0
 
-      # Maximum number of 64MiB memory buffers per keepstore server
-      # process, or 0 for no limit.
+      # Maximum number of 64MiB memory buffers per Keepstore server process, or
+      # 0 for no limit. When this limit is reached, up to
+      # (MaxConcurrentRequests - MaxKeepBlobBuffers) HTTP requests requiring
+      # buffers (like GET and PUT) will wait for buffer space to be released.
+      # Any HTTP requests beyond MaxConcurrentRequests will receive an
+      # immediate 503 response.
+      #
+      # MaxKeepBlobBuffers should be set such that (MaxKeepBlobBuffers * 64MiB
+      # * 1.1) fits comfortably in memory. On a host dedicated to running
+      # Keepstore, divide total memory by 88MiB to suggest a suitable value.
+      # For example, if grep MemTotal /proc/meminfo reports MemTotal: 7125440
+      # kB, compute 7125440 / (88 * 1024)=79 and configure MaxBuffers: 79
       MaxKeepBlobBuffers: 128
 
       # API methods to disable. Disabled methods are not listed in the
index 3e37a5c618429833ea2874ac14cacc7d98b3efa9..418b6811beeb82d814c16603e50b694502372522 100644 (file)
@@ -344,85 +344,97 @@ func (conn *Conn) SpecimenDelete(ctx context.Context, options arvados.DeleteOpti
 }
 
 var userAttrsCachedFromLoginCluster = map[string]bool{
-       "created_at":              true,
-       "email":                   true,
-       "first_name":              true,
-       "is_active":               true,
-       "is_admin":                true,
-       "last_name":               true,
-       "modified_at":             true,
-       "modified_by_client_uuid": true,
-       "modified_by_user_uuid":   true,
-       "prefs":                   true,
-       "username":                true,
-
-       "etag":         false,
-       "full_name":    false,
-       "identity_url": false,
-       "is_invited":   false,
-       "owner_uuid":   false,
-       "uuid":         false,
-       "writable_by":  false,
-}
-
-func (conn *Conn) UserList(ctx context.Context, options arvados.ListOptions) (arvados.UserList, error) {
+       "created_at":  true,
+       "email":       true,
+       "first_name":  true,
+       "is_active":   true,
+       "is_admin":    true,
+       "last_name":   true,
+       "modified_at": true,
+       "prefs":       true,
+       "username":    true,
+
+       "etag":                    false,
+       "full_name":               false,
+       "identity_url":            false,
+       "is_invited":              false,
+       "modified_by_client_uuid": false,
+       "modified_by_user_uuid":   false,
+       "owner_uuid":              false,
+       "uuid":                    false,
+       "writable_by":             false,
+}
+
+func (conn *Conn) batchUpdateUsers(ctx context.Context,
+       options arvados.ListOptions,
+       items []arvados.User) (err error) {
+
+       id := conn.cluster.Login.LoginCluster
        logger := ctxlog.FromContext(ctx)
-       if id := conn.cluster.Login.LoginCluster; id != "" && id != conn.cluster.ClusterID {
-               resp, err := conn.chooseBackend(id).UserList(ctx, options)
-               if err != nil {
-                       return resp, err
+       batchOpts := arvados.UserBatchUpdateOptions{Updates: map[string]map[string]interface{}{}}
+       for _, user := range items {
+               if !strings.HasPrefix(user.UUID, id) {
+                       continue
+               }
+               logger.Debugf("cache user info for uuid %q", user.UUID)
+
+               // If the remote cluster has null timestamps
+               // (e.g., test server with incomplete
+               // fixtures) use dummy timestamps (instead of
+               // the zero time, which causes a Rails API
+               // error "year too big to marshal: 1 UTC").
+               if user.ModifiedAt.IsZero() {
+                       user.ModifiedAt = time.Now()
+               }
+               if user.CreatedAt.IsZero() {
+                       user.CreatedAt = time.Now()
                }
-               batchOpts := arvados.UserBatchUpdateOptions{Updates: map[string]map[string]interface{}{}}
-               for _, user := range resp.Items {
-                       if !strings.HasPrefix(user.UUID, id) {
-                               continue
-                       }
-                       logger.Debugf("cache user info for uuid %q", user.UUID)
-
-                       // If the remote cluster has null timestamps
-                       // (e.g., test server with incomplete
-                       // fixtures) use dummy timestamps (instead of
-                       // the zero time, which causes a Rails API
-                       // error "year too big to marshal: 1 UTC").
-                       if user.ModifiedAt.IsZero() {
-                               user.ModifiedAt = time.Now()
-                       }
-                       if user.CreatedAt.IsZero() {
-                               user.CreatedAt = time.Now()
-                       }
 
-                       var allFields map[string]interface{}
-                       buf, err := json.Marshal(user)
-                       if err != nil {
-                               return arvados.UserList{}, fmt.Errorf("error encoding user record from remote response: %s", err)
-                       }
-                       err = json.Unmarshal(buf, &allFields)
-                       if err != nil {
-                               return arvados.UserList{}, fmt.Errorf("error transcoding user record from remote response: %s", err)
-                       }
-                       updates := allFields
-                       if len(options.Select) > 0 {
-                               updates = map[string]interface{}{}
-                               for _, k := range options.Select {
-                                       if v, ok := allFields[k]; ok && userAttrsCachedFromLoginCluster[k] {
-                                               updates[k] = v
-                                       }
+               var allFields map[string]interface{}
+               buf, err := json.Marshal(user)
+               if err != nil {
+                       return fmt.Errorf("error encoding user record from remote response: %s", err)
+               }
+               err = json.Unmarshal(buf, &allFields)
+               if err != nil {
+                       return fmt.Errorf("error transcoding user record from remote response: %s", err)
+               }
+               updates := allFields
+               if len(options.Select) > 0 {
+                       updates = map[string]interface{}{}
+                       for _, k := range options.Select {
+                               if v, ok := allFields[k]; ok && userAttrsCachedFromLoginCluster[k] {
+                                       updates[k] = v
                                }
-                       } else {
-                               for k := range updates {
-                                       if !userAttrsCachedFromLoginCluster[k] {
-                                               delete(updates, k)
-                                       }
+                       }
+               } else {
+                       for k := range updates {
+                               if !userAttrsCachedFromLoginCluster[k] {
+                                       delete(updates, k)
                                }
                        }
-                       batchOpts.Updates[user.UUID] = updates
                }
-               if len(batchOpts.Updates) > 0 {
-                       ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{conn.cluster.SystemRootToken}})
-                       _, err = conn.local.UserBatchUpdate(ctxRoot, batchOpts)
-                       if err != nil {
-                               return arvados.UserList{}, fmt.Errorf("error updating local user records: %s", err)
-                       }
+               batchOpts.Updates[user.UUID] = updates
+       }
+       if len(batchOpts.Updates) > 0 {
+               ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{conn.cluster.SystemRootToken}})
+               _, err = conn.local.UserBatchUpdate(ctxRoot, batchOpts)
+               if err != nil {
+                       return fmt.Errorf("error updating local user records: %s", err)
+               }
+       }
+       return nil
+}
+
+func (conn *Conn) UserList(ctx context.Context, options arvados.ListOptions) (arvados.UserList, error) {
+       if id := conn.cluster.Login.LoginCluster; id != "" && id != conn.cluster.ClusterID && !options.BypassFederation {
+               resp, err := conn.chooseBackend(id).UserList(ctx, options)
+               if err != nil {
+                       return resp, err
+               }
+               err = conn.batchUpdateUsers(ctx, options, resp.Items)
+               if err != nil {
+                       return arvados.UserList{}, err
                }
                return resp, nil
        } else {
@@ -435,15 +447,18 @@ func (conn *Conn) UserCreate(ctx context.Context, options arvados.CreateOptions)
 }
 
 func (conn *Conn) UserUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.User, error) {
+       if options.BypassFederation {
+               return conn.local.UserUpdate(ctx, options)
+       }
        return conn.chooseBackend(options.UUID).UserUpdate(ctx, options)
 }
 
 func (conn *Conn) UserUpdateUUID(ctx context.Context, options arvados.UpdateUUIDOptions) (arvados.User, error) {
-       return conn.chooseBackend(options.UUID).UserUpdateUUID(ctx, options)
+       return conn.local.UserUpdateUUID(ctx, options)
 }
 
 func (conn *Conn) UserMerge(ctx context.Context, options arvados.UserMergeOptions) (arvados.User, error) {
-       return conn.chooseBackend(options.OldUserUUID).UserMerge(ctx, options)
+       return conn.local.UserMerge(ctx, options)
 }
 
 func (conn *Conn) UserActivate(ctx context.Context, options arvados.UserActivateOptions) (arvados.User, error) {
index 6ee813317417cd012d829387e10e04f2ea1dad17..0a596eb9cb6ac6aac690dc19a2e43ca3dc723340 100644 (file)
@@ -106,6 +106,13 @@ func (conn *Conn) generated_CollectionList(ctx context.Context, options arvados.
 // corresponding options argument suitable for sending to that
 // backend.
 func (conn *Conn) splitListRequest(ctx context.Context, opts arvados.ListOptions, fn func(context.Context, string, arvados.API, arvados.ListOptions) ([]string, error)) error {
+
+       if opts.BypassFederation {
+               // Client requested no federation.  Pass through.
+               _, err := fn(ctx, conn.cluster.ClusterID, conn.local, opts)
+               return err
+       }
+
        cannotSplit := false
        var matchAllFilters map[string]bool
        for _, f := range opts.Filters {
index ce84378a3cbb79d7a4ae894f966bc627bef2b08d..e6d2816f610417e9c980ba539930532da768fc7c 100644 (file)
@@ -58,7 +58,7 @@ func (cl *collectionLister) CollectionList(ctx context.Context, options arvados.
                if cl.MaxPageSize > 0 && len(resp.Items) >= cl.MaxPageSize {
                        break
                }
-               if options.Limit >= 0 && len(resp.Items) >= options.Limit {
+               if options.Limit >= 0 && int64(len(resp.Items)) >= options.Limit {
                        break
                }
                if cl.matchFilters(c, options.Filters) {
@@ -115,8 +115,8 @@ func (s *CollectionListSuite) SetUpTest(c *check.C) {
 
 type listTrial struct {
        count        string
-       limit        int
-       offset       int
+       limit        int64
+       offset       int64
        order        []string
        filters      []arvados.Filter
        selectfields []string
@@ -314,7 +314,7 @@ func (s *CollectionListSuite) TestCollectionListMultiSiteWithCount(c *check.C) {
 }
 
 func (s *CollectionListSuite) TestCollectionListMultiSiteWithLimit(c *check.C) {
-       for _, limit := range []int{0, 1, 2} {
+       for _, limit := range []int64{0, 1, 2} {
                s.test(c, listTrial{
                        count: "none",
                        limit: limit,
index c087273afef5e7607bb4f78388e521b94a1e6dfa..09aa5086decd3eeb62c20874b64ad1308674668c 100644 (file)
@@ -5,8 +5,10 @@
 package federation
 
 import (
+       "context"
        "encoding/json"
        "errors"
+       "math"
        "net/url"
        "os"
        "strings"
@@ -14,6 +16,8 @@ import (
        "git.arvados.org/arvados.git/lib/controller/rpc"
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/arvadostest"
+       "git.arvados.org/arvados.git/sdk/go/auth"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
        check "gopkg.in/check.v1"
 )
 
@@ -32,6 +36,7 @@ func (s *UserSuite) TestLoginClusterUserList(c *check.C) {
        for _, updateFail := range []bool{false, true} {
                for _, opts := range []arvados.ListOptions{
                        {Offset: 0, Limit: -1, Select: nil},
+                       {Offset: 0, Limit: math.MaxInt64, Select: nil},
                        {Offset: 1, Limit: 1, Select: nil},
                        {Offset: 0, Limit: 2, Select: []string{"uuid"}},
                        {Offset: 0, Limit: 2, Select: []string{"uuid", "email"}},
@@ -45,6 +50,9 @@ func (s *UserSuite) TestLoginClusterUserList(c *check.C) {
                                s.fed.local = rpc.NewConn(s.cluster.ClusterID, spy.URL, true, rpc.PassthroughTokenProvider)
                        }
                        userlist, err := s.fed.UserList(s.ctx, opts)
+                       if err != nil {
+                               c.Logf("... UserList failed %q", err)
+                       }
                        if updateFail && err == nil {
                                // All local updates fail, so the only
                                // cases expected to succeed are the
@@ -109,6 +117,36 @@ func (s *UserSuite) TestLoginClusterUserList(c *check.C) {
        }
 }
 
+func (s *UserSuite) TestLoginClusterUserListBypassFederation(c *check.C) {
+       s.cluster.ClusterID = "local"
+       s.cluster.Login.LoginCluster = "zzzzz"
+       s.fed = New(s.cluster)
+       s.addDirectRemote(c, "zzzzz", rpc.NewConn("zzzzz", &url.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")},
+               true, rpc.PassthroughTokenProvider))
+
+       spy := arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
+       s.fed.local = rpc.NewConn(s.cluster.ClusterID, spy.URL, true, rpc.PassthroughTokenProvider)
+
+       _, err := s.fed.UserList(s.ctx, arvados.ListOptions{Offset: 0, Limit: math.MaxInt64, Select: nil, BypassFederation: true})
+       // this will fail because it is not using a root token
+       c.Check(err.(*arvados.TransactionError).StatusCode, check.Equals, 403)
+
+       // Now use SystemRootToken
+       ctx := context.Background()
+       ctx = ctxlog.Context(ctx, ctxlog.TestLogger(c))
+       ctx = auth.NewContext(ctx, &auth.Credentials{Tokens: []string{arvadostest.SystemRootToken}})
+
+       // Assert that it did not try to batch update users.
+       _, err = s.fed.UserList(ctx, arvados.ListOptions{Offset: 0, Limit: math.MaxInt64, Select: nil, BypassFederation: true})
+       for _, d := range spy.RequestDumps {
+               d := string(d)
+               if strings.Contains(d, "PATCH /arvados/v1/users/batch") {
+                       c.Fail()
+               }
+       }
+       c.Check(err, check.IsNil)
+}
+
 // userAttrsCachedFromLoginCluster must have an entry for every field
 // in the User struct.
 func (s *UserSuite) TestUserAttrsUpdateWhitelist(c *check.C) {
index 6472e274201a782b5447955116a30ade448e1190..aad2c4775ad20c0884cca173326e64fa97f98bc7 100644 (file)
@@ -8,6 +8,7 @@ import (
        "bytes"
        "context"
        "io"
+       "math"
        "net"
        "net/url"
        "os"
@@ -73,7 +74,7 @@ func (s *IntegrationSuite) SetUpSuite(c *check.C) {
     TLS:
       Insecure: true
     Login:
-      LoginCluster: z1111
+      LoginCluster: z1111
     SystemLogs:
       Format: text
     RemoteClusters:
@@ -149,7 +150,7 @@ func (s *IntegrationSuite) clientsWithToken(clusterID string, token string) (con
        return ctx, ac, kc
 }
 
-func (s *IntegrationSuite) userClients(c *check.C, conn *rpc.Conn, rootctx context.Context, clusterID string, activate bool) (context.Context, *arvados.Client, *keepclient.KeepClient) {
+func (s *IntegrationSuite) userClients(rootctx context.Context, c *check.C, conn *rpc.Conn, clusterID string, activate bool) (context.Context, *arvados.Client, *keepclient.KeepClient) {
        login, err := conn.UserSessionCreate(rootctx, rpc.UserSessionCreateOptions{
                ReturnTo: ",https://example.com",
                AuthInfo: rpc.UserSessionAuthInfo{
@@ -190,7 +191,7 @@ func (s *IntegrationSuite) TestGetCollectionByPDH(c *check.C) {
        conn1 := s.conn("z1111")
        rootctx1, _, _ := s.rootClients("z1111")
        conn3 := s.conn("z3333")
-       userctx1, ac1, kc1 := s.userClients(c, conn1, rootctx1, "z1111", true)
+       userctx1, ac1, kc1 := s.userClients(rootctx1, c, conn1, "z1111", true)
 
        // Create the collection to find its PDH (but don't save it
        // anywhere yet)
@@ -223,3 +224,52 @@ func (s *IntegrationSuite) TestGetCollectionByPDH(c *check.C) {
        c.Check(err, check.IsNil)
        c.Check(coll.PortableDataHash, check.Equals, pdh)
 }
+
+// Test for bug #16263
+func (s *IntegrationSuite) TestListUsers(c *check.C) {
+       rootctx1, _, _ := s.rootClients("z1111")
+       conn1 := s.conn("z1111")
+       conn3 := s.conn("z3333")
+
+       // Make sure LoginCluster is properly configured
+       for cls := range s.testClusters {
+               c.Check(
+                       s.testClusters[cls].config.Clusters[cls].Login.LoginCluster,
+                       check.Equals, "z1111",
+                       check.Commentf("incorrect LoginCluster config on cluster %q", cls))
+       }
+       // Make sure z1111 has users with NULL usernames
+       lst, err := conn1.UserList(rootctx1, arvados.ListOptions{Limit: -1})
+       nullUsername := false
+       c.Assert(err, check.IsNil)
+       c.Assert(len(lst.Items), check.Not(check.Equals), 0)
+       for _, user := range lst.Items {
+               if user.Username == "" {
+                       nullUsername = true
+               }
+       }
+       c.Assert(nullUsername, check.Equals, true)
+       // Ask for the user list on z3333 using z1111's system root token
+       _, err = conn3.UserList(rootctx1, arvados.ListOptions{Limit: -1})
+       c.Assert(err, check.IsNil, check.Commentf("getting user list: %q", err))
+}
+
+// Test for bug #16263
+func (s *IntegrationSuite) TestListUsersWithMaxLimit(c *check.C) {
+       rootctx1, _, _ := s.rootClients("z1111")
+       conn3 := s.conn("z3333")
+       maxLimit := int64(math.MaxInt64)
+
+       // Make sure LoginCluster is properly configured
+       for cls := range s.testClusters {
+               c.Check(
+                       s.testClusters[cls].config.Clusters[cls].Login.LoginCluster,
+                       check.Equals, "z1111",
+                       check.Commentf("incorrect LoginCluster config on cluster %q", cls))
+       }
+
+       // Ask for the user list on z3333 using z1111's system root token and
+       // limit: max int64 value.
+       _, err := conn3.UserList(rootctx1, arvados.ListOptions{Limit: maxLimit})
+       c.Assert(err, check.IsNil, check.Commentf("getting user list: %q", err))
+}
index 39b4c5100608ebf6a14b250ec26185886aed3ad3..977a15f3abbbff431cd800d0c87cb1b9ad310714 100644 (file)
@@ -169,6 +169,7 @@ var boolParams = map[string]bool{
        "include_old_versions":    true,
        "redirect_to_new_user":    true,
        "send_notification_email": true,
+       "bypass_federation":       true,
 }
 
 func stringToBool(s string) bool {
index c3c66d00a346d9cea1fc07c4add68766481986a1..729d8bdde09e7ee05d2766ef0a4d1ee72f01a8d1 100644 (file)
@@ -5,6 +5,7 @@
 package rpc
 
 import (
+       "bytes"
        "context"
        "crypto/tls"
        "encoding/json"
@@ -14,6 +15,7 @@ import (
        "net"
        "net/http"
        "net/url"
+       "strconv"
        "strings"
        "time"
 
@@ -100,19 +102,23 @@ func (conn *Conn) requestAndDecode(ctx context.Context, dst interface{}, ep arva
                return fmt.Errorf("%T: requestAndDecode: Marshal opts: %s", conn, err)
        }
        var params map[string]interface{}
-       err = json.Unmarshal(j, &params)
+       dec := json.NewDecoder(bytes.NewBuffer(j))
+       dec.UseNumber()
+       err = dec.Decode(&params)
        if err != nil {
-               return fmt.Errorf("%T: requestAndDecode: Unmarshal opts: %s", conn, err)
+               return fmt.Errorf("%T: requestAndDecode: Decode opts: %s", conn, err)
        }
        if attrs, ok := params["attrs"]; ok && ep.AttrsKey != "" {
                params[ep.AttrsKey] = attrs
                delete(params, "attrs")
        }
-       if limit, ok := params["limit"].(float64); ok && limit < 0 {
-               // Negative limit means "not specified" here, but some
-               // servers/versions do not accept that, so we need to
-               // remove it entirely.
-               delete(params, "limit")
+       if limitStr, ok := params["limit"]; ok {
+               if limit, err := strconv.ParseInt(string(limitStr.(json.Number)), 10, 64); err == nil && limit < 0 {
+                       // Negative limit means "not specified" here, but some
+                       // servers/versions do not accept that, so we need to
+                       // remove it entirely.
+                       delete(params, "limit")
+               }
        }
        if len(tokens) > 1 {
                params["reader_tokens"] = tokens[1:]
index b0a4007f74f87fe12be18046583ba23cc29fa9ea..c8f171ca9b83f38d1e2870af16913a4175db6490 100644 (file)
@@ -1074,9 +1074,10 @@ func (runner *ContainerRunner) CreateContainer() error {
        runner.ContainerConfig.Volumes = runner.Volumes
 
        maxRAM := int64(runner.Container.RuntimeConstraints.RAM)
-       if maxRAM < 4*1024*1024 {
-               // Docker daemon won't let you set a limit less than 4 MiB
-               maxRAM = 4 * 1024 * 1024
+       minDockerRAM := int64(16)
+       if maxRAM < minDockerRAM*1024*1024 {
+               // Docker daemon won't let you set a limit less than ~10 MiB
+               maxRAM = minDockerRAM * 1024 * 1024
        }
        runner.HostConfig = dockercontainer.HostConfig{
                Binds: runner.Binds,
index d128c265f84c13594ec309f5ac36d93b65d82b21..45b346383fab8641b27d16063a46bc4468fc96ce 100644 (file)
@@ -382,7 +382,7 @@ func (cq *Queue) poll() (map[string]*arvados.Container, error) {
                        *next[upd.UUID] = upd
                }
        }
-       selectParam := []string{"uuid", "state", "priority", "runtime_constraints", "container_image", "mounts"}
+       selectParam := []string{"uuid", "state", "priority", "runtime_constraints", "container_image", "mounts", "scheduling_parameters"}
        limitParam := 1000
 
        mine, err := cq.fetchAll(arvados.ResourceListParams{
index cbcf7438b12b0f1ed4f7c0ffdfeba66e9546baee..4e1dc73746a17e20a4e893b5aa877b2d681d0f78 100644 (file)
@@ -403,7 +403,7 @@ func identifyOS() (osversion, error) {
        }
        osv.Major, err = strconv.Atoi(vstr)
        if err != nil {
-               return osv, fmt.Errorf("incomprehensible VERSION_ID in /etc/os/release: %q", kv["VERSION_ID"])
+               return osv, fmt.Errorf("incomprehensible VERSION_ID in /etc/os-release: %q", kv["VERSION_ID"])
        }
        return osv, nil
 }
index e5eb04c6006ed315b1152cd8ff004172f42f84ee..f97ca972687c6ba25391a7c9e0906ad518888f67 100644 (file)
@@ -26,7 +26,7 @@ Create main-test.json:
 
 Or create an arvbox test cluster:
 
-$ cwltool --enable-ext arvbox-make-federation.cwl --arvbox_base ~/.arvbox/ --in_acr /path/to/arvados-cwl-runner > main-test.json
+$ cwltool arvbox-make-federation.cwl --arvbox_base ~/.arvbox/ --in_acr /path/to/arvados-cwl-runner > main-test.json
 
 
 Run tests:
index 5872dbef5a8ef429f44da0696881bb8272e74e3f..593f2399f5ef3b734fa4f67f3466cc6a4bf98656 100644 (file)
@@ -2,7 +2,7 @@
 #
 # SPDX-License-Identifier: Apache-2.0
 
-cwlVersion: v1.0
+cwlVersion: v1.1
 class: Workflow
 $namespaces:
   arv: "http://arvados.org/cwl#"
@@ -10,7 +10,7 @@ $namespaces:
 requirements:
   ScatterFeatureRequirement: {}
   StepInputExpressionRequirement: {}
-  cwltool:LoadListingRequirement:
+  LoadListingRequirement:
     loadListing: no_listing
   InlineJavascriptRequirement: {}
 inputs:
@@ -64,7 +64,7 @@ steps:
       containers: containers
       arvbox_base: arvbox_base
     out: [arvbox_data]
-    run: arvbox/mkdir.cwl
+    run: arvboxcwl/mkdir.cwl
   start:
     in:
       container_name: containers
@@ -74,7 +74,7 @@ steps:
     out: [cluster_id, container_host, arvbox_data_out, superuser_token]
     scatter: [container_name, arvbox_data]
     scatterMethod: dotproduct
-    run: arvbox/start.cwl
+    run: arvboxcwl/start.cwl
   fed-config:
     in:
       container_name: containers
@@ -87,10 +87,10 @@ steps:
     out: []
     scatter: [container_name, this_cluster_id, arvbox_data]
     scatterMethod: dotproduct
-    run: arvbox/fed-config.cwl
+    run: arvboxcwl/fed-config.cwl
   setup-user:
     in:
       container_host: {source: start/container_host, valueFrom: "$(self[0])"}
       superuser_token: {source: start/superuser_token, valueFrom: "$(self[0])"}
     out: [test_user_uuid, test_user_token]
-    run: arvbox/setup-user.cwl
+    run: arvboxcwl/setup-user.cwl
similarity index 96%
rename from sdk/cwl/tests/federation/arvbox/fed-config.cwl
rename to sdk/cwl/tests/federation/arvboxcwl/fed-config.cwl
index 37936df6351b6d86e1dfe6619ab19ddc395b3a73..e1cacdcaf70327095f8e2db241824a5427d0fadf 100644 (file)
@@ -2,7 +2,7 @@
 #
 # SPDX-License-Identifier: Apache-2.0
 
-cwlVersion: v1.0
+cwlVersion: v1.1
 class: CommandLineTool
 $namespaces:
   arv: "http://arvados.org/cwl#"
@@ -56,11 +56,11 @@ requirements:
           }
           return JSON.stringify({"development": {"remote_hosts": remoteClusters}});
           }
-  cwltool:LoadListingRequirement:
+  LoadListingRequirement:
     loadListing: no_listing
   ShellCommandRequirement: {}
   InlineJavascriptRequirement: {}
-  cwltool:InplaceUpdateRequirement:
+  InplaceUpdateRequirement:
     inplaceUpdate: true
 arguments:
   - shellQuote: false
similarity index 91%
rename from sdk/cwl/tests/federation/arvbox/mkdir.cwl
rename to sdk/cwl/tests/federation/arvboxcwl/mkdir.cwl
index 727d491a387a02c580749395690e6ba4dea5082f..854a727c688881960b682867137b5ceb7faed755 100644 (file)
@@ -2,7 +2,7 @@
 #
 # SPDX-License-Identifier: Apache-2.0
 
-cwlVersion: v1.0
+cwlVersion: v1.1
 class: CommandLineTool
 $namespaces:
   arv: "http://arvados.org/cwl#"
@@ -37,10 +37,10 @@ requirements:
       - entry: $(inputs.arvbox_base)
         entryname: base
         writable: true
-  cwltool:LoadListingRequirement:
+  LoadListingRequirement:
     loadListing: no_listing
   InlineJavascriptRequirement: {}
-  cwltool:InplaceUpdateRequirement:
+  InplaceUpdateRequirement:
     inplaceUpdate: true
 arguments:
   - mkdir
similarity index 87%
rename from sdk/cwl/tests/federation/arvbox/setup-user.cwl
rename to sdk/cwl/tests/federation/arvboxcwl/setup-user.cwl
index a3ad6e575e6ecb2d8526855536c45c2334d5c26f..d8f2d9e12d6c5d8cf31b00a3d947b301237c1b02 100644 (file)
@@ -2,7 +2,7 @@
 #
 # SPDX-License-Identifier: Apache-2.0
 
-cwlVersion: v1.0
+cwlVersion: v1.1
 class: CommandLineTool
 $namespaces:
   arv: "http://arvados.org/cwl#"
@@ -13,13 +13,15 @@ requirements:
       ARVADOS_API_HOST: $(inputs.container_host)
       ARVADOS_API_TOKEN: $(inputs.superuser_token)
       ARVADOS_API_HOST_INSECURE: "true"
-  cwltool:LoadListingRequirement:
+  LoadListingRequirement:
     loadListing: no_listing
   InlineJavascriptRequirement: {}
-  cwltool:InplaceUpdateRequirement:
+  InplaceUpdateRequirement:
     inplaceUpdate: true
   DockerRequirement:
     dockerPull: arvados/jobs
+  NetworkAccess:
+    networkAccess: true
 inputs:
   container_host: string
   superuser_token: string
similarity index 95%
rename from sdk/cwl/tests/federation/arvbox/start.cwl
rename to sdk/cwl/tests/federation/arvboxcwl/start.cwl
index 57b348973d2ef7a8e7856f1bfbc69744f9485b92..a7f46d6b22a58a79a3fac916c28c53c1da4984ee 100644 (file)
@@ -2,7 +2,7 @@
 #
 # SPDX-License-Identifier: Apache-2.0
 
-cwlVersion: v1.0
+cwlVersion: v1.1
 class: CommandLineTool
 $namespaces:
   arv: "http://arvados.org/cwl#"
@@ -64,7 +64,7 @@ requirements:
       - entry: $(inputs.arvbox_data)
         entryname: $(inputs.container_name)
         writable: true
-  cwltool:InplaceUpdateRequirement:
+  InplaceUpdateRequirement:
     inplaceUpdate: true
   InlineJavascriptRequirement: {}
 arguments:
@@ -74,7 +74,7 @@ arguments:
       mkdir -p $ARVBOX_DATA
       if ! test -d $ARVBOX_DATA/arvados ; then
         cd $ARVBOX_DATA
-        git clone https://github.com/arvados/arvados.git
+        git clone https://git.arvados.org/arvados.git
       fi
       cd $ARVBOX_DATA/arvados
       gitver=`git rev-parse HEAD`
index ae74d079ad3de964bf69932d790ccdb6b3065683..c32f88864f88750c00fe896286e147ccd9d061ce 100644 (file)
@@ -61,11 +61,11 @@ var (
 )
 
 type GetOptions struct {
-       UUID         string   `json:"uuid"`
+       UUID         string   `json:"uuid,omitempty"`
        Select       []string `json:"select"`
        IncludeTrash bool     `json:"include_trash"`
-       ForwardedFor string   `json:"forwarded_for"`
-       Remote       string   `json:"remote"`
+       ForwardedFor string   `json:"forwarded_for,omitempty"`
+       Remote       string   `json:"remote,omitempty"`
 }
 
 type UntrashOptions struct {
@@ -78,13 +78,14 @@ type ListOptions struct {
        Select             []string               `json:"select"`
        Filters            []Filter               `json:"filters"`
        Where              map[string]interface{} `json:"where"`
-       Limit              int                    `json:"limit"`
-       Offset             int                    `json:"offset"`
+       Limit              int64                  `json:"limit"`
+       Offset             int64                  `json:"offset"`
        Order              []string               `json:"order"`
        Distinct           bool                   `json:"distinct"`
        Count              string                 `json:"count"`
        IncludeTrash       bool                   `json:"include_trash"`
        IncludeOldVersions bool                   `json:"include_old_versions"`
+       BypassFederation   bool                   `json:"bypass_federation"`
 }
 
 type CreateOptions struct {
@@ -95,8 +96,9 @@ type CreateOptions struct {
 }
 
 type UpdateOptions struct {
-       UUID  string                 `json:"uuid"`
-       Attrs map[string]interface{} `json:"attrs"`
+       UUID             string                 `json:"uuid"`
+       Attrs            map[string]interface{} `json:"attrs"`
+       BypassFederation bool                   `json:"bypass_federation"`
 }
 
 type UpdateUUIDOptions struct {
index d1a25c438a9eeb72e147cbd5658ee1bb340ee344..a5cc7d3b904f271696defd341f48a99dd47b92f8 100644 (file)
@@ -55,7 +55,7 @@ func (f *Filter) UnmarshalJSON(data []byte) error {
        }
        operand := elements[2]
        switch operand.(type) {
-       case string, float64, []interface{}, nil:
+       case string, float64, []interface{}, nil, bool:
        default:
                return fmt.Errorf("invalid filter operand %q", elements[2])
        }
index 4e09c5375db980c60171f9209714c81f55aef152..b36e82c918298fa624cb290b12889cb8da2734c0 100644 (file)
@@ -34,3 +34,40 @@ func TestMarshalFiltersWithNil(t *testing.T) {
                t.Errorf("Encoded as %q, expected %q", buf, expect)
        }
 }
+
+func TestUnmarshalFiltersWithNil(t *testing.T) {
+       buf := []byte(`["modified_at","=",null]`)
+       f := &Filter{}
+       err := f.UnmarshalJSON(buf)
+       if err != nil {
+               t.Fatal(err)
+       }
+       expect := Filter{Attr: "modified_at", Operator: "=", Operand: nil}
+       if f.Attr != expect.Attr || f.Operator != expect.Operator || f.Operand != expect.Operand {
+               t.Errorf("Decoded as %q, expected %q", f, expect)
+       }
+}
+
+func TestMarshalFiltersWithBoolean(t *testing.T) {
+       buf, err := json.Marshal([]Filter{
+               {Attr: "is_active", Operator: "=", Operand: true}})
+       if err != nil {
+               t.Fatal(err)
+       }
+       if expect := []byte(`[["is_active","=",true]]`); 0 != bytes.Compare(buf, expect) {
+               t.Errorf("Encoded as %q, expected %q", buf, expect)
+       }
+}
+
+func TestUnmarshalFiltersWithBoolean(t *testing.T) {
+       buf := []byte(`["is_active","=",true]`)
+       f := &Filter{}
+       err := f.UnmarshalJSON(buf)
+       if err != nil {
+               t.Fatal(err)
+       }
+       expect := Filter{Attr: "is_active", Operator: "=", Operand: true}
+       if f.Attr != expect.Attr || f.Operator != expect.Operator || f.Operand != expect.Operand {
+               t.Errorf("Decoded as %q, expected %q", f, expect)
+       }
+}
index e74d6215c79cb76af6fdab7170f3cd989a77143e..445775ccedcd1f4ef246297c22a17e470e0f0e94 100755 (executable)
@@ -66,8 +66,8 @@ def connect_clusters(args):
             errors.append("Inconsistent login cluster configuration, expected '%s' on %s but was '%s'" % (loginCluster, config["ClusterID"], config["Login"]["LoginCluster"]))
             continue
 
-        if arv._rootDesc["revision"] < "20190926":
-            errors.append("Arvados API server revision on cluster '%s' is too old, must be updated to at least Arvados 1.5 before running migration." % config["ClusterID"])
+        if arv._rootDesc["revision"] < "20200331":
+            errors.append("Arvados API server revision on cluster '%s' is too old, must be updated to at least Arvados 2.0.2 before running migration." % config["ClusterID"])
             continue
 
         try:
@@ -98,7 +98,7 @@ def fetch_users(clusters, loginCluster):
     users = []
     for c, arv in clusters.items():
         print("Getting user list from %s" % c)
-        ul = arvados.util.list_all(arv.users().list)
+        ul = arvados.util.list_all(arv.users().list, bypass_federation=True)
         for l in ul:
             if l["uuid"].startswith(c):
                 users.append(l)
@@ -171,10 +171,15 @@ def update_username(args, email, user_uuid, username, migratecluster, migratearv
     print("(%s) Updating username of %s to '%s' on %s" % (email, user_uuid, username, migratecluster))
     if not args.dry_run:
         try:
-            conflicts = migratearv.users().list(filters=[["username", "=", username]]).execute()
+            conflicts = migratearv.users().list(filters=[["username", "=", username]], bypass_federation=True).execute()
             if conflicts["items"]:
-                migratearv.users().update(uuid=conflicts["items"][0]["uuid"], body={"user": {"username": username+"migrate"}}).execute()
-            migratearv.users().update(uuid=user_uuid, body={"user": {"username": username}}).execute()
+                # There's already a user with the username, move the old user out of the way
+                migratearv.users().update(uuid=conflicts["items"][0]["uuid"],
+                                          bypass_federation=True,
+                                          body={"user": {"username": username+"migrate"}}).execute()
+            migratearv.users().update(uuid=user_uuid,
+                                      bypass_federation=True,
+                                      body={"user": {"username": username}}).execute()
         except arvados.errors.ApiError as e:
             print("(%s) Error updating username of %s to '%s' on %s: %s" % (email, user_uuid, username, migratecluster, e))
 
@@ -204,10 +209,14 @@ def choose_new_user(args, by_email, email, userhome, username, old_user_uuid, cl
             user = None
             try:
                 olduser = oldhomearv.users().get(uuid=old_user_uuid).execute()
-                conflicts = homearv.users().list(filters=[["username", "=", username]]).execute()
+                conflicts = homearv.users().list(filters=[["username", "=", username]],
+                                                 bypass_federation=True).execute()
                 if conflicts["items"]:
-                    homearv.users().update(uuid=conflicts["items"][0]["uuid"], body={"user": {"username": username+"migrate"}}).execute()
-                user = homearv.users().create(body={"user": {"email": email, "username": username, "is_active": olduser["is_active"]}}).execute()
+                    homearv.users().update(uuid=conflicts["items"][0]["uuid"],
+                                           bypass_federation=True,
+                                           body={"user": {"username": username+"migrate"}}).execute()
+                user = homearv.users().create(body={"user": {"email": email, "username": username,
+                                                             "is_active": olduser["is_active"]}}).execute()
             except arvados.errors.ApiError as e:
                 print("(%s) Could not create user: %s" % (email, str(e)))
                 return None
@@ -241,10 +250,16 @@ def activate_remote_user(args, email, homearv, migratearv, old_user_uuid, new_us
         return None
 
     try:
-        olduser = migratearv.users().get(uuid=old_user_uuid).execute()
+        findolduser = migratearv.users().list(filters=[["uuid", "=", old_user_uuid]], bypass_federation=True).execute()
+        if len(findolduser["items"]) == 0:
+            return False
+        if len(findolduser["items"]) == 1:
+            olduser = findolduser["items"][0]
+        else:
+            print("(%s) Unexpected result" % (email))
+            return None
     except arvados.errors.ApiError as e:
-        if e.resp.status != 404:
-            print("(%s) Could not retrieve user %s from %s, user may have already been migrated: %s" % (email, old_user_uuid, migratecluster, e))
+        print("(%s) Could not retrieve user %s from %s, user may have already been migrated: %s" % (email, old_user_uuid, migratecluster, e))
         return None
 
     salted = 'v2/' + newtok["uuid"] + '/' + hmac.new(newtok["api_token"].encode(),
@@ -253,7 +268,8 @@ def activate_remote_user(args, email, homearv, migratearv, old_user_uuid, new_us
     try:
         ru = urllib.parse.urlparse(migratearv._rootDesc["rootUrl"])
         if not args.dry_run:
-            newuser = arvados.api(host=ru.netloc, token=salted, insecure=os.environ.get("ARVADOS_API_HOST_INSECURE")).users().current().execute()
+            newuser = arvados.api(host=ru.netloc, token=salted,
+                                  insecure=os.environ.get("ARVADOS_API_HOST_INSECURE")).users().current().execute()
         else:
             newuser = {"is_active": True, "username": username}
     except arvados.errors.ApiError as e:
@@ -264,7 +280,8 @@ def activate_remote_user(args, email, homearv, migratearv, old_user_uuid, new_us
         print("(%s) Activating user %s on %s" % (email, new_user_uuid, migratecluster))
         try:
             if not args.dry_run:
-                migratearv.users().update(uuid=new_user_uuid, body={"is_active": True}).execute()
+                migratearv.users().update(uuid=new_user_uuid, bypass_federation=True,
+                                          body={"is_active": True}).execute()
         except arvados.errors.ApiError as e:
             print("(%s) Could not activate user %s on %s: %s" % (email, new_user_uuid, migratecluster, e))
             return None
@@ -351,8 +368,10 @@ def main():
             if new_user_uuid is None:
                 continue
 
-            # cluster where the migration is happening
+            remote_users = {}
+            got_error = False
             for migratecluster in clusters:
+                # cluster where the migration is happening
                 migratearv = clusters[migratecluster]
 
                 # the user's new home cluster
@@ -361,14 +380,22 @@ def main():
 
                 newuser = activate_remote_user(args, email, homearv, migratearv, old_user_uuid, new_user_uuid)
                 if newuser is None:
-                    continue
+                    got_error = True
+                remote_users[migratecluster] = newuser
+
+            if not got_error:
+                for migratecluster in clusters:
+                    migratearv = clusters[migratecluster]
+                    newuser = remote_users[migratecluster]
+                    if newuser is False:
+                        continue
 
-                print("(%s) Migrating %s to %s on %s" % (email, old_user_uuid, new_user_uuid, migratecluster))
+                    print("(%s) Migrating %s to %s on %s" % (email, old_user_uuid, new_user_uuid, migratecluster))
 
-                migrate_user(args, migratearv, email, new_user_uuid, old_user_uuid)
+                    migrate_user(args, migratearv, email, new_user_uuid, old_user_uuid)
 
-                if newuser['username'] != username:
-                    update_username(args, email, new_user_uuid, username, migratecluster, migratearv)
+                    if newuser['username'] != username:
+                        update_username(args, email, new_user_uuid, username, migratecluster, migratearv)
 
 if __name__ == "__main__":
     main()
index 83d659d4dcc2c2625ba291cf967e073263ffe61a..1591b7e17e1f519c2d92dc19514cc36d9ac1ed56 100644 (file)
@@ -6,7 +6,7 @@ arv-federation-migrate should be in the path or the full path supplied
 in the 'fed_migrate' input parameter.
 
 # Create arvbox containers fedbox(1,2,3) for the federation
-$ cwltool --enable-ext arvbox-make-federation.cwl --arvbox_base ~/.arvbox > fed.json
+$ cwltool arvbox-make-federation.cwl --arvbox_base ~/.arvbox > fed.json
 
 # Configure containers and run tests
 $ cwltool fed-migrate.cwl fed.json
index 0aa6f177aad0bdc272914fa4b52aea3f98aae7b1..aa859cba4fdb416513361d7ba4e291ab74a256ae 100644 (file)
@@ -1,4 +1,4 @@
-cwlVersion: v1.0
+cwlVersion: v1.1
 class: Workflow
 $namespaces:
   arv: "http://arvados.org/cwl#"
@@ -32,7 +32,7 @@ requirements:
   SubworkflowFeatureRequirement: {}
   ScatterFeatureRequirement: {}
   StepInputExpressionRequirement: {}
-  cwltool:LoadListingRequirement:
+  LoadListingRequirement:
     loadListing: no_listing
 steps:
   start:
index 85d2d31f2309fe3182fc1d606982d1bea34e41c1..a2c0096165c74b7bc1fda0daf212177cb4d08ac2 100644 (file)
@@ -5,48 +5,55 @@ import sys
 j = json.load(open(sys.argv[1]))
 
 apiA = arvados.api(host=j["arvados_api_hosts"][0], token=j["superuser_tokens"][0], insecure=True)
-apiB = arvados.api(host=j["arvados_api_hosts"][1], token=j["superuser_tokens"][1], insecure=True)
-apiC = arvados.api(host=j["arvados_api_hosts"][2], token=j["superuser_tokens"][2], insecure=True)
+tok = apiA.api_client_authorizations().current().execute()
+v2_token = "v2/%s/%s" % (tok["uuid"], tok["api_token"])
+
+apiB = arvados.api(host=j["arvados_api_hosts"][1], token=v2_token, insecure=True)
+apiC = arvados.api(host=j["arvados_api_hosts"][2], token=v2_token, insecure=True)
 
 ###
 ### Check users on API server "A" (the LoginCluster) ###
 ###
-
-users = apiA.users().list().execute()
-
-assert len(users["items"]) == 11
-
 by_username = {}
-
-for i in range(1, 10):
+def check_A(users):
+    assert len(users["items"]) == 11
+
+    for i in range(1, 10):
+        found = False
+        for u in users["items"]:
+            if u["username"] == ("case%d" % i) and u["email"] == ("case%d@test" % i):
+                found = True
+                by_username[u["username"]] = u["uuid"]
+        assert found
+
+    # Should be active
+    for i in (1, 2, 3, 4, 5, 6, 7, 8):
+        found = False
+        for u in users["items"]:
+            if u["username"] == ("case%d" % i) and u["email"] == ("case%d@test" % i) and u["is_active"] is True:
+                found = True
+        assert found, "Not found case%i" % i
+
+    # case9 should not be active
     found = False
     for u in users["items"]:
-        if u["username"] == ("case%d" % i) and u["email"] == ("case%d@test" % i):
+        if (u["username"] == "case9" and u["email"] == "case9@test" and
+            u["uuid"] == by_username[u["username"]] and u["is_active"] is False):
             found = True
-            by_username[u["username"]] = u["uuid"]
     assert found
 
-# Should be active
-for i in (1, 2, 3, 4, 5, 6, 7, 8):
-    found = False
-    for u in users["items"]:
-        if u["username"] == ("case%d" % i) and u["email"] == ("case%d@test" % i) and u["is_active"] is True:
-            found = True
-    assert found, "Not found case%i" % i
-
-# case9 should not be active
-found = False
-for u in users["items"]:
-    if (u["username"] == "case9" and u["email"] == "case9@test" and
-        u["uuid"] == by_username[u["username"]] and u["is_active"] is False):
-        found = True
-assert found
+users = apiA.users().list().execute()
+check_A(users)
 
+users = apiA.users().list(bypass_federation=True).execute()
+check_A(users)
 
 ###
 ### Check users on API server "B" (federation member) ###
 ###
-users = apiB.users().list().execute()
+
+# check for expected migrations on B
+users = apiB.users().list(bypass_federation=True).execute()
 assert len(users["items"]) == 11
 
 for i in range(2, 9):
@@ -64,11 +71,16 @@ for u in users["items"]:
         found = True
 assert found
 
+# check that federated user listing works
+users = apiB.users().list().execute()
+check_A(users)
 
 ###
 ### Check users on API server "C" (federation member) ###
 ###
-users = apiC.users().list().execute()
+
+# check for expected migrations on C
+users = apiC.users().list(bypass_federation=True).execute()
 assert len(users["items"]) == 8
 
 for i in (2, 4, 6, 7, 8):
@@ -89,4 +101,8 @@ for i in (3, 5, 9):
             found = True
     assert not found
 
+# check that federated user listing works
+users = apiC.users().list().execute()
+check_A(users)
+
 print("Passed checks")
index 22d4f62ea0fd1bf5a8d6718e2d410b79d5377d72..734bb04270bcfc7c94891542add7806b390350bc 100644 (file)
@@ -676,7 +676,6 @@ def setup_config():
            "dbname": "arvados_test",
            "user": "arvados",
            "password": "insecure_arvados_test",
-            "template": "template0", # used by RailsAPI when [re]creating the database
         }
 
     localhost = "127.0.0.1"
index 24d5ad5b64982c1e5d10e8f218336978d1e2b857..9f3a5fb2b3d787e1a320d8dc3734126ac1ee33f4 100644 (file)
@@ -180,7 +180,7 @@ GEM
     pg (1.1.4)
     power_assert (1.1.4)
     public_suffix (4.0.3)
-    rack (2.0.7)
+    rack (2.2.2)
     rack-test (0.6.3)
       rack (>= 1.0)
     rails (5.0.7.2)
@@ -317,4 +317,4 @@ DEPENDENCIES
   uglifier (~> 2.0)
 
 BUNDLED WITH
-   1.11
+   1.16.6
index 369043e780863a4f84c4c9667fda9bdff2597afb..83a233cd54681b18b9fb6bb12c72642a2e95cae4 100644 (file)
@@ -45,6 +45,7 @@ class ApplicationController < ActionController::Base
   before_action :load_required_parameters
   before_action(:find_object_by_uuid,
                 except: [:index, :create] + ERROR_ACTIONS)
+  before_action(:set_nullable_attrs_to_null, only: [:update, :create])
   before_action :load_limit_offset_order_params, only: [:index, :contents]
   before_action :load_where_param, only: [:index, :contents]
   before_action :load_filters_param, only: [:index, :contents]
@@ -52,6 +53,7 @@ class ApplicationController < ActionController::Base
   before_action :reload_object_before_update, :only => :update
   before_action(:render_404_if_no_object,
                 except: [:index, :create] + ERROR_ACTIONS)
+  before_action :only_admin_can_bypass_federation
 
   attr_writer :resource_attrs
 
@@ -138,6 +140,12 @@ class ApplicationController < ActionController::Base
     render_not_found "Object not found" if !@object
   end
 
+  def only_admin_can_bypass_federation
+    unless !params[:bypass_federation] || current_user.andand.is_admin
+      send_error("The bypass_federation parameter is only permitted when current user is admin", status: 403)
+    end
+  end
+
   def render_error(e)
     logger.error e.inspect
     if e.respond_to? :backtrace and e.backtrace
@@ -478,6 +486,29 @@ class ApplicationController < ActionController::Base
     @object = @objects.first
   end
 
+  def nullable_attributes
+    []
+  end
+
+  # Go code may send empty values (ie: empty string instead of NULL) that
+  # should be translated to NULL on the database.
+  def set_nullable_attrs_to_null
+    nullify_attrs(resource_attrs.to_hash).each do |k, v|
+      resource_attrs[k] = v
+    end
+  end
+
+  def nullify_attrs(a = {})
+    new_attrs = a.to_hash.symbolize_keys
+    (new_attrs.keys & nullable_attributes).each do |attr|
+      val = new_attrs[attr]
+      if (val.class == Integer && val == 0) || (val.class == String && val == "")
+        new_attrs[attr] = nil
+      end
+    end
+    return new_attrs
+  end
+
   def reload_object_before_update
     # This is necessary to prevent an ActiveRecord::ReadOnlyRecord
     # error when updating an object which was retrieved using a join.
@@ -632,6 +663,11 @@ class ApplicationController < ActionController::Base
         location: "query",
         required: false,
       },
+      bypass_federation: {
+        type: 'boolean',
+        required: false,
+        description: 'bypass federation behavior, list items from local instance database only'
+      }
     }
   end
 
index 5c223410151788926445ac440e20a723ce6cf9aa..b9aba2726f555883d304fac490f050b6177275b4 100644 (file)
@@ -33,10 +33,10 @@ class Arvados::V1::SchemaController < ApplicationController
         id: "arvados:v1",
         name: "arvados",
         version: "v1",
-        # format is YYYYMMDD, must be fixed with (needs to be linearly
+        # format is YYYYMMDD, must be fixed width (needs to be lexically
         # sortable), updated manually, may be used by clients to
         # determine availability of API server features.
-        revision: "20200212",
+        revision: "20200331",
         source_version: AppVersion.hash,
         sourceVersion: AppVersion.hash, # source_version should be deprecated in the future
         packageVersion: AppVersion.package_version,
index 224f2c0bd464ceb182c4bc0a696a823def99393e..d9ab5556ffc9ac7826abda00bc18e3d4b700269c 100644 (file)
@@ -22,7 +22,7 @@ class Arvados::V1::UsersController < ApplicationController
       rescue ActiveRecord::RecordNotUnique
         retry
       end
-      u.update_attributes!(attrs)
+      u.update_attributes!(nullify_attrs(attrs))
       @objects << u
     end
     @offset = 0
@@ -249,6 +249,14 @@ class Arvados::V1::UsersController < ApplicationController
     }
   end
 
+  def self._update_requires_parameters
+    super.merge({
+      bypass_federation: {
+        type: 'boolean', required: false,
+      },
+    })
+  end
+
   def self._update_uuid_requires_parameters
     {
       new_uuid: {
@@ -271,4 +279,8 @@ class Arvados::V1::UsersController < ApplicationController
     end
     super
   end
+
+  def nullable_attributes
+    super + [:email, :first_name, :last_name, :username]
+  end
 end
index 8d2544dde1e81945e65a580886333098a3ccf6e8..502e3e787d1e1324217983f73cd841bce0e1ea1a 100644 (file)
@@ -253,6 +253,11 @@ end
 if ::Rails.env.to_s == "test" && db_config["test"].nil?
   $arvados_config["PostgreSQL"]["Connection"]["dbname"] = "arvados_test"
 end
+if ::Rails.env.to_s == "test"
+  # Use template0 when creating a new database. Avoids
+  # character-encoding/collation problems.
+  $arvados_config["PostgreSQL"]["Connection"]["template"] = "template0"
+end
 
 if $arvados_config["PostgreSQL"]["Connection"]["password"].empty?
   raise "Database password is empty, PostgreSQL section is: #{$arvados_config["PostgreSQL"]}"
index 753e707b629da2fa59536b9336fcfc404fbb7831..817a1c9ef944eb38e2dc708d53a199a9e70e5e0f 100644 (file)
@@ -88,6 +88,38 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
     assert_nil created['identity_url'], 'expected no identity_url'
   end
 
+  test "create new user with empty username" do
+    authorize_with :admin
+    post :create, params: {
+      user: {
+        first_name: "test_first_name",
+        last_name: "test_last_name",
+        username: ""
+      }
+    }
+    assert_response :success
+    created = JSON.parse(@response.body)
+    assert_equal 'test_first_name', created['first_name']
+    assert_not_nil created['uuid'], 'expected uuid for the newly created user'
+    assert_nil created['email'], 'expected no email'
+    assert_nil created['username'], 'expected no username'
+  end
+
+  test "update user with empty username" do
+    authorize_with :admin
+    user = users('spectator')
+    assert_not_nil user['username']
+    put :update, params: {
+      id: users('spectator')['uuid'],
+      user: {
+        username: ""
+      }
+    }
+    assert_response :success
+    updated = JSON.parse(@response.body)
+    assert_nil updated['username'], 'expected no username'
+  end
+
   test "create user with user, vm and repo as input" do
     authorize_with :admin
     repo_name = 'usertestrepo'
@@ -1025,6 +1057,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
               newuuid => {
                 'first_name' => 'noot',
                 'email' => 'root@remot.example.com',
+                'username' => '',
               },
             }})
     assert_response(:success)
index 4be89a24552eab1d6847fcd525dcf2a6bc7ea561..b24ddc5a52c02c495b14f9871763411fa4ccbea8 100644 (file)
@@ -448,5 +448,22 @@ class UsersTest < ActionDispatch::IntegrationTest
     assert_match(/Cannot activate without being invited/, json_response['errors'][0])
   end
 
+  test "bypass_federation only accepted for admins" do
+    get "/arvados/v1/users",
+      params: {
+        bypass_federation: true
+      },
+      headers: auth(:admin)
+
+    assert_response :success
+
+    get "/arvados/v1/users",
+      params: {
+        bypass_federation: true
+      },
+      headers: auth(:active)
+
+    assert_response 403
+  end
 
 end
index ea08cf9775440ae67245b5c5f8143a63bcae6a04..0927b187048315f0c305d5043c448d5ff38a8002 100644 (file)
@@ -157,10 +157,6 @@ func (h *handler) setup(ctx context.Context, cluster *arvados.Cluster, token str
        }
        bufs = newBufferPool(h.Logger, h.Cluster.API.MaxKeepBlobBuffers, BlockSize)
 
-       if h.Cluster.API.MaxConcurrentRequests < 1 {
-               h.Cluster.API.MaxConcurrentRequests = h.Cluster.API.MaxKeepBlobBuffers * 2
-               h.Logger.Warnf("API.MaxConcurrentRequests <1 or not specified; defaulting to MaxKeepBlobBuffers * 2 == %d", h.Cluster.API.MaxConcurrentRequests)
-       }
        if h.Cluster.API.MaxConcurrentRequests > 0 && h.Cluster.API.MaxConcurrentRequests < h.Cluster.API.MaxKeepBlobBuffers {
                h.Logger.Warnf("Possible configuration mistake: not useful to set API.MaxKeepBlobBuffers (%d) higher than API.MaxConcurrentRequests (%d)", h.Cluster.API.MaxKeepBlobBuffers, h.Cluster.API.MaxConcurrentRequests)
        }