Chen Chen <aflyhorse@gmail.com>
Veritas Genetics, Inc. <*@veritasgenetics.com>
Curii Corporation, Inc. <*@curii.com>
+Dante Tsang <dante@dantetsang.com>
+Codex Genetics Ltd <info@codexgenetics.com>
\ No newline at end of file
multi_json (~> 1.0)
websocket-driver (>= 0.2.0)
public_suffix (4.0.3)
- rack (2.0.7)
+ rack (2.2.2)
rack-mini-profiler (1.0.2)
rack (>= 1.2.0)
rack-test (0.6.3)
uglifier (~> 2.0)
BUNDLED WITH
- 1.11
+ 1.16.6
npm 'browserify', require: false
npm 'jquery'
npm 'awesomplete'
-npm 'jssha'
+npm 'jssha', '2.4.2'
-npm 'mithril', '1.1.6'
+npm 'mithril', '1.1.7'
npm 'es6-object-assign'
}
timer() {
- echo -n "$(($SECONDS - $t0))s"
+ if [[ -n "$t0" ]]; then
+ echo -n "$(($SECONDS - $t0))s"
+ fi
}
report_outcomes() {
import (
"context"
+ "fmt"
"io/ioutil"
+ "net"
"path/filepath"
)
}
func (createCertificates) Run(ctx context.Context, fail func(error), super *Supervisor) error {
+ var san string
+ if net.ParseIP(super.ListenHost) != nil {
+ san = fmt.Sprintf("IP:%s", super.ListenHost)
+ } else {
+ san = fmt.Sprintf("DNS:%s", super.ListenHost)
+ }
+
// Generate root key
err := super.RunProgram(ctx, super.tempdir, nil, nil, "openssl", "genrsa", "-out", "rootCA.key", "4096")
if err != nil {
if err != nil {
return err
}
- err = ioutil.WriteFile(filepath.Join(super.tempdir, "server.cfg"), append(defaultconf, []byte(`
-[SAN]
-subjectAltName=DNS:localhost,DNS:localhost.localdomain
-`)...), 0644)
+ err = ioutil.WriteFile(filepath.Join(super.tempdir, "server.cfg"), append(defaultconf, []byte(fmt.Sprintf("\n[SAN]\nsubjectAltName=DNS:localhost,DNS:localhost.localdomain,%s\n", san))...), 0644)
if err != nil {
return err
}
return err
}
// Sign certificate
- err = super.RunProgram(ctx, super.tempdir, nil, nil, "openssl", "x509", "-req", "-in", "server.csr", "-CA", "rootCA.crt", "-CAkey", "rootCA.key", "-CAcreateserial", "-out", "server.crt", "-days", "3650", "-sha256")
+ err = super.RunProgram(ctx, super.tempdir, nil, nil, "openssl", "x509", "-req", "-in", "server.csr", "-CA", "rootCA.crt", "-CAkey", "rootCA.key", "-CAcreateserial", "-out", "server.crt", "-extfile", "server.cfg", "-extensions", "SAN", "-days", "3650", "-sha256")
if err != nil {
return err
}
--- /dev/null
+#!/bin/bash
+
+# Example of using `arvados-server boot` in a script. Bring up a test
+# cluster, wait for it to come up, fetch something from its discovery
+# doc, and shut down.
+
+set -e -o pipefail
+
+cleanup() {
+ set -x
+ kill ${boot_PID} ${consume_stdout_PID}
+ wait ${boot_PID} ${consume_stdout_PID} || true
+ echo >&2 "done"
+}
+
+coproc boot (arvados-server boot -type test -config doc/examples/config/zzzzz.yml -own-temporary-database -timeout 20m)
+trap cleanup ERR EXIT
+
+read controllerURL <&"${boot[0]}"
+
+# Copy coproc's stdout to stderr, to ensure `arvados-server boot`
+# doesn't get blocked trying to write stdout.
+exec 7<&"${boot[0]}"; coproc consume_stdout (cat <&7 >&2)
+
+keepwebURL=$(curl --silent --fail --insecure "${controllerURL}/discovery/v1/apis/arvados/v1/rest" | jq -r .keepWebServiceUrl)
+echo >&2 "controller is at $controllerURL"
+echo >&2 "keep-web is at $keepwebURL"
MaxItemsPerResponse: 1000
# Maximum number of concurrent requests to accept in a single
- # service process, or 0 for no limit. Currently supported only
- # by keepstore.
+ # service process, or 0 for no limit.
MaxConcurrentRequests: 0
- # Maximum number of 64MiB memory buffers per keepstore server
- # process, or 0 for no limit.
+ # Maximum number of 64MiB memory buffers per Keepstore server process, or
+ # 0 for no limit. When this limit is reached, up to
+ # (MaxConcurrentRequests - MaxKeepBlobBuffers) HTTP requests requiring
+ # buffers (like GET and PUT) will wait for buffer space to be released.
+ # Any HTTP requests beyond MaxConcurrentRequests will receive an
+ # immediate 503 response.
+ #
+ # MaxKeepBlobBuffers should be set such that (MaxKeepBlobBuffers * 64MiB
+ # * 1.1) fits comfortably in memory. On a host dedicated to running
+ # Keepstore, divide total memory by 88MiB to suggest a suitable value.
+ # For example, if grep MemTotal /proc/meminfo reports MemTotal: 7125440
+ # kB, compute 7125440 / (88 * 1024)=79 and configure MaxBuffers: 79
MaxKeepBlobBuffers: 128
# API methods to disable. Disabled methods are not listed in the
MaxItemsPerResponse: 1000
# Maximum number of concurrent requests to accept in a single
- # service process, or 0 for no limit. Currently supported only
- # by keepstore.
+ # service process, or 0 for no limit.
MaxConcurrentRequests: 0
- # Maximum number of 64MiB memory buffers per keepstore server
- # process, or 0 for no limit.
+ # Maximum number of 64MiB memory buffers per Keepstore server process, or
+ # 0 for no limit. When this limit is reached, up to
+ # (MaxConcurrentRequests - MaxKeepBlobBuffers) HTTP requests requiring
+ # buffers (like GET and PUT) will wait for buffer space to be released.
+ # Any HTTP requests beyond MaxConcurrentRequests will receive an
+ # immediate 503 response.
+ #
+ # MaxKeepBlobBuffers should be set such that (MaxKeepBlobBuffers * 64MiB
+ # * 1.1) fits comfortably in memory. On a host dedicated to running
+ # Keepstore, divide total memory by 88MiB to suggest a suitable value.
+ # For example, if grep MemTotal /proc/meminfo reports MemTotal: 7125440
+ # kB, compute 7125440 / (88 * 1024)=79 and configure MaxBuffers: 79
MaxKeepBlobBuffers: 128
# API methods to disable. Disabled methods are not listed in the
}
var userAttrsCachedFromLoginCluster = map[string]bool{
- "created_at": true,
- "email": true,
- "first_name": true,
- "is_active": true,
- "is_admin": true,
- "last_name": true,
- "modified_at": true,
- "modified_by_client_uuid": true,
- "modified_by_user_uuid": true,
- "prefs": true,
- "username": true,
-
- "etag": false,
- "full_name": false,
- "identity_url": false,
- "is_invited": false,
- "owner_uuid": false,
- "uuid": false,
- "writable_by": false,
-}
-
-func (conn *Conn) UserList(ctx context.Context, options arvados.ListOptions) (arvados.UserList, error) {
+ "created_at": true,
+ "email": true,
+ "first_name": true,
+ "is_active": true,
+ "is_admin": true,
+ "last_name": true,
+ "modified_at": true,
+ "prefs": true,
+ "username": true,
+
+ "etag": false,
+ "full_name": false,
+ "identity_url": false,
+ "is_invited": false,
+ "modified_by_client_uuid": false,
+ "modified_by_user_uuid": false,
+ "owner_uuid": false,
+ "uuid": false,
+ "writable_by": false,
+}
+
+func (conn *Conn) batchUpdateUsers(ctx context.Context,
+ options arvados.ListOptions,
+ items []arvados.User) (err error) {
+
+ id := conn.cluster.Login.LoginCluster
logger := ctxlog.FromContext(ctx)
- if id := conn.cluster.Login.LoginCluster; id != "" && id != conn.cluster.ClusterID {
- resp, err := conn.chooseBackend(id).UserList(ctx, options)
- if err != nil {
- return resp, err
+ batchOpts := arvados.UserBatchUpdateOptions{Updates: map[string]map[string]interface{}{}}
+ for _, user := range items {
+ if !strings.HasPrefix(user.UUID, id) {
+ continue
+ }
+ logger.Debugf("cache user info for uuid %q", user.UUID)
+
+ // If the remote cluster has null timestamps
+ // (e.g., test server with incomplete
+ // fixtures) use dummy timestamps (instead of
+ // the zero time, which causes a Rails API
+ // error "year too big to marshal: 1 UTC").
+ if user.ModifiedAt.IsZero() {
+ user.ModifiedAt = time.Now()
+ }
+ if user.CreatedAt.IsZero() {
+ user.CreatedAt = time.Now()
}
- batchOpts := arvados.UserBatchUpdateOptions{Updates: map[string]map[string]interface{}{}}
- for _, user := range resp.Items {
- if !strings.HasPrefix(user.UUID, id) {
- continue
- }
- logger.Debugf("cache user info for uuid %q", user.UUID)
-
- // If the remote cluster has null timestamps
- // (e.g., test server with incomplete
- // fixtures) use dummy timestamps (instead of
- // the zero time, which causes a Rails API
- // error "year too big to marshal: 1 UTC").
- if user.ModifiedAt.IsZero() {
- user.ModifiedAt = time.Now()
- }
- if user.CreatedAt.IsZero() {
- user.CreatedAt = time.Now()
- }
- var allFields map[string]interface{}
- buf, err := json.Marshal(user)
- if err != nil {
- return arvados.UserList{}, fmt.Errorf("error encoding user record from remote response: %s", err)
- }
- err = json.Unmarshal(buf, &allFields)
- if err != nil {
- return arvados.UserList{}, fmt.Errorf("error transcoding user record from remote response: %s", err)
- }
- updates := allFields
- if len(options.Select) > 0 {
- updates = map[string]interface{}{}
- for _, k := range options.Select {
- if v, ok := allFields[k]; ok && userAttrsCachedFromLoginCluster[k] {
- updates[k] = v
- }
+ var allFields map[string]interface{}
+ buf, err := json.Marshal(user)
+ if err != nil {
+ return fmt.Errorf("error encoding user record from remote response: %s", err)
+ }
+ err = json.Unmarshal(buf, &allFields)
+ if err != nil {
+ return fmt.Errorf("error transcoding user record from remote response: %s", err)
+ }
+ updates := allFields
+ if len(options.Select) > 0 {
+ updates = map[string]interface{}{}
+ for _, k := range options.Select {
+ if v, ok := allFields[k]; ok && userAttrsCachedFromLoginCluster[k] {
+ updates[k] = v
}
- } else {
- for k := range updates {
- if !userAttrsCachedFromLoginCluster[k] {
- delete(updates, k)
- }
+ }
+ } else {
+ for k := range updates {
+ if !userAttrsCachedFromLoginCluster[k] {
+ delete(updates, k)
}
}
- batchOpts.Updates[user.UUID] = updates
}
- if len(batchOpts.Updates) > 0 {
- ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{conn.cluster.SystemRootToken}})
- _, err = conn.local.UserBatchUpdate(ctxRoot, batchOpts)
- if err != nil {
- return arvados.UserList{}, fmt.Errorf("error updating local user records: %s", err)
- }
+ batchOpts.Updates[user.UUID] = updates
+ }
+ if len(batchOpts.Updates) > 0 {
+ ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{conn.cluster.SystemRootToken}})
+ _, err = conn.local.UserBatchUpdate(ctxRoot, batchOpts)
+ if err != nil {
+ return fmt.Errorf("error updating local user records: %s", err)
+ }
+ }
+ return nil
+}
+
+func (conn *Conn) UserList(ctx context.Context, options arvados.ListOptions) (arvados.UserList, error) {
+ if id := conn.cluster.Login.LoginCluster; id != "" && id != conn.cluster.ClusterID && !options.BypassFederation {
+ resp, err := conn.chooseBackend(id).UserList(ctx, options)
+ if err != nil {
+ return resp, err
+ }
+ err = conn.batchUpdateUsers(ctx, options, resp.Items)
+ if err != nil {
+ return arvados.UserList{}, err
}
return resp, nil
} else {
}
func (conn *Conn) UserUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.User, error) {
+ if options.BypassFederation {
+ return conn.local.UserUpdate(ctx, options)
+ }
return conn.chooseBackend(options.UUID).UserUpdate(ctx, options)
}
func (conn *Conn) UserUpdateUUID(ctx context.Context, options arvados.UpdateUUIDOptions) (arvados.User, error) {
- return conn.chooseBackend(options.UUID).UserUpdateUUID(ctx, options)
+ return conn.local.UserUpdateUUID(ctx, options)
}
func (conn *Conn) UserMerge(ctx context.Context, options arvados.UserMergeOptions) (arvados.User, error) {
- return conn.chooseBackend(options.OldUserUUID).UserMerge(ctx, options)
+ return conn.local.UserMerge(ctx, options)
}
func (conn *Conn) UserActivate(ctx context.Context, options arvados.UserActivateOptions) (arvados.User, error) {
// corresponding options argument suitable for sending to that
// backend.
func (conn *Conn) splitListRequest(ctx context.Context, opts arvados.ListOptions, fn func(context.Context, string, arvados.API, arvados.ListOptions) ([]string, error)) error {
+
+ if opts.BypassFederation {
+ // Client requested no federation. Pass through.
+ _, err := fn(ctx, conn.cluster.ClusterID, conn.local, opts)
+ return err
+ }
+
cannotSplit := false
var matchAllFilters map[string]bool
for _, f := range opts.Filters {
if cl.MaxPageSize > 0 && len(resp.Items) >= cl.MaxPageSize {
break
}
- if options.Limit >= 0 && len(resp.Items) >= options.Limit {
+ if options.Limit >= 0 && int64(len(resp.Items)) >= options.Limit {
break
}
if cl.matchFilters(c, options.Filters) {
type listTrial struct {
count string
- limit int
- offset int
+ limit int64
+ offset int64
order []string
filters []arvados.Filter
selectfields []string
}
func (s *CollectionListSuite) TestCollectionListMultiSiteWithLimit(c *check.C) {
- for _, limit := range []int{0, 1, 2} {
+ for _, limit := range []int64{0, 1, 2} {
s.test(c, listTrial{
count: "none",
limit: limit,
package federation
import (
+ "context"
"encoding/json"
"errors"
+ "math"
"net/url"
"os"
"strings"
"git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/auth"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
check "gopkg.in/check.v1"
)
for _, updateFail := range []bool{false, true} {
for _, opts := range []arvados.ListOptions{
{Offset: 0, Limit: -1, Select: nil},
+ {Offset: 0, Limit: math.MaxInt64, Select: nil},
{Offset: 1, Limit: 1, Select: nil},
{Offset: 0, Limit: 2, Select: []string{"uuid"}},
{Offset: 0, Limit: 2, Select: []string{"uuid", "email"}},
s.fed.local = rpc.NewConn(s.cluster.ClusterID, spy.URL, true, rpc.PassthroughTokenProvider)
}
userlist, err := s.fed.UserList(s.ctx, opts)
+ if err != nil {
+ c.Logf("... UserList failed %q", err)
+ }
if updateFail && err == nil {
// All local updates fail, so the only
// cases expected to succeed are the
}
}
+func (s *UserSuite) TestLoginClusterUserListBypassFederation(c *check.C) {
+ s.cluster.ClusterID = "local"
+ s.cluster.Login.LoginCluster = "zzzzz"
+ s.fed = New(s.cluster)
+ s.addDirectRemote(c, "zzzzz", rpc.NewConn("zzzzz", &url.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")},
+ true, rpc.PassthroughTokenProvider))
+
+ spy := arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
+ s.fed.local = rpc.NewConn(s.cluster.ClusterID, spy.URL, true, rpc.PassthroughTokenProvider)
+
+ _, err := s.fed.UserList(s.ctx, arvados.ListOptions{Offset: 0, Limit: math.MaxInt64, Select: nil, BypassFederation: true})
+ // this will fail because it is not using a root token
+ c.Check(err.(*arvados.TransactionError).StatusCode, check.Equals, 403)
+
+ // Now use SystemRootToken
+ ctx := context.Background()
+ ctx = ctxlog.Context(ctx, ctxlog.TestLogger(c))
+ ctx = auth.NewContext(ctx, &auth.Credentials{Tokens: []string{arvadostest.SystemRootToken}})
+
+ // Assert that it did not try to batch update users.
+ _, err = s.fed.UserList(ctx, arvados.ListOptions{Offset: 0, Limit: math.MaxInt64, Select: nil, BypassFederation: true})
+ for _, d := range spy.RequestDumps {
+ d := string(d)
+ if strings.Contains(d, "PATCH /arvados/v1/users/batch") {
+ c.Fail()
+ }
+ }
+ c.Check(err, check.IsNil)
+}
+
// userAttrsCachedFromLoginCluster must have an entry for every field
// in the User struct.
func (s *UserSuite) TestUserAttrsUpdateWhitelist(c *check.C) {
"bytes"
"context"
"io"
+ "math"
"net"
"net/url"
"os"
TLS:
Insecure: true
Login:
- # LoginCluster: z1111
+ LoginCluster: z1111
SystemLogs:
Format: text
RemoteClusters:
return ctx, ac, kc
}
-func (s *IntegrationSuite) userClients(c *check.C, conn *rpc.Conn, rootctx context.Context, clusterID string, activate bool) (context.Context, *arvados.Client, *keepclient.KeepClient) {
+func (s *IntegrationSuite) userClients(rootctx context.Context, c *check.C, conn *rpc.Conn, clusterID string, activate bool) (context.Context, *arvados.Client, *keepclient.KeepClient) {
login, err := conn.UserSessionCreate(rootctx, rpc.UserSessionCreateOptions{
ReturnTo: ",https://example.com",
AuthInfo: rpc.UserSessionAuthInfo{
conn1 := s.conn("z1111")
rootctx1, _, _ := s.rootClients("z1111")
conn3 := s.conn("z3333")
- userctx1, ac1, kc1 := s.userClients(c, conn1, rootctx1, "z1111", true)
+ userctx1, ac1, kc1 := s.userClients(rootctx1, c, conn1, "z1111", true)
// Create the collection to find its PDH (but don't save it
// anywhere yet)
c.Check(err, check.IsNil)
c.Check(coll.PortableDataHash, check.Equals, pdh)
}
+
+// Test for bug #16263
+func (s *IntegrationSuite) TestListUsers(c *check.C) {
+ rootctx1, _, _ := s.rootClients("z1111")
+ conn1 := s.conn("z1111")
+ conn3 := s.conn("z3333")
+
+ // Make sure LoginCluster is properly configured
+ for cls := range s.testClusters {
+ c.Check(
+ s.testClusters[cls].config.Clusters[cls].Login.LoginCluster,
+ check.Equals, "z1111",
+ check.Commentf("incorrect LoginCluster config on cluster %q", cls))
+ }
+ // Make sure z1111 has users with NULL usernames
+ lst, err := conn1.UserList(rootctx1, arvados.ListOptions{Limit: -1})
+ nullUsername := false
+ c.Assert(err, check.IsNil)
+ c.Assert(len(lst.Items), check.Not(check.Equals), 0)
+ for _, user := range lst.Items {
+ if user.Username == "" {
+ nullUsername = true
+ }
+ }
+ c.Assert(nullUsername, check.Equals, true)
+ // Ask for the user list on z3333 using z1111's system root token
+ _, err = conn3.UserList(rootctx1, arvados.ListOptions{Limit: -1})
+ c.Assert(err, check.IsNil, check.Commentf("getting user list: %q", err))
+}
+
+// Test for bug #16263
+func (s *IntegrationSuite) TestListUsersWithMaxLimit(c *check.C) {
+ rootctx1, _, _ := s.rootClients("z1111")
+ conn3 := s.conn("z3333")
+ maxLimit := int64(math.MaxInt64)
+
+ // Make sure LoginCluster is properly configured
+ for cls := range s.testClusters {
+ c.Check(
+ s.testClusters[cls].config.Clusters[cls].Login.LoginCluster,
+ check.Equals, "z1111",
+ check.Commentf("incorrect LoginCluster config on cluster %q", cls))
+ }
+
+ // Ask for the user list on z3333 using z1111's system root token and
+ // limit: max int64 value.
+ _, err := conn3.UserList(rootctx1, arvados.ListOptions{Limit: maxLimit})
+ c.Assert(err, check.IsNil, check.Commentf("getting user list: %q", err))
+}
"include_old_versions": true,
"redirect_to_new_user": true,
"send_notification_email": true,
+ "bypass_federation": true,
}
func stringToBool(s string) bool {
package rpc
import (
+ "bytes"
"context"
"crypto/tls"
"encoding/json"
"net"
"net/http"
"net/url"
+ "strconv"
"strings"
"time"
return fmt.Errorf("%T: requestAndDecode: Marshal opts: %s", conn, err)
}
var params map[string]interface{}
- err = json.Unmarshal(j, ¶ms)
+ dec := json.NewDecoder(bytes.NewBuffer(j))
+ dec.UseNumber()
+ err = dec.Decode(¶ms)
if err != nil {
- return fmt.Errorf("%T: requestAndDecode: Unmarshal opts: %s", conn, err)
+ return fmt.Errorf("%T: requestAndDecode: Decode opts: %s", conn, err)
}
if attrs, ok := params["attrs"]; ok && ep.AttrsKey != "" {
params[ep.AttrsKey] = attrs
delete(params, "attrs")
}
- if limit, ok := params["limit"].(float64); ok && limit < 0 {
- // Negative limit means "not specified" here, but some
- // servers/versions do not accept that, so we need to
- // remove it entirely.
- delete(params, "limit")
+ if limitStr, ok := params["limit"]; ok {
+ if limit, err := strconv.ParseInt(string(limitStr.(json.Number)), 10, 64); err == nil && limit < 0 {
+ // Negative limit means "not specified" here, but some
+ // servers/versions do not accept that, so we need to
+ // remove it entirely.
+ delete(params, "limit")
+ }
}
if len(tokens) > 1 {
params["reader_tokens"] = tokens[1:]
runner.ContainerConfig.Volumes = runner.Volumes
maxRAM := int64(runner.Container.RuntimeConstraints.RAM)
- if maxRAM < 4*1024*1024 {
- // Docker daemon won't let you set a limit less than 4 MiB
- maxRAM = 4 * 1024 * 1024
+ minDockerRAM := int64(16)
+ if maxRAM < minDockerRAM*1024*1024 {
+ // Docker daemon won't let you set a limit less than ~10 MiB
+ maxRAM = minDockerRAM * 1024 * 1024
}
runner.HostConfig = dockercontainer.HostConfig{
Binds: runner.Binds,
*next[upd.UUID] = upd
}
}
- selectParam := []string{"uuid", "state", "priority", "runtime_constraints", "container_image", "mounts"}
+ selectParam := []string{"uuid", "state", "priority", "runtime_constraints", "container_image", "mounts", "scheduling_parameters"}
limitParam := 1000
mine, err := cq.fetchAll(arvados.ResourceListParams{
}
osv.Major, err = strconv.Atoi(vstr)
if err != nil {
- return osv, fmt.Errorf("incomprehensible VERSION_ID in /etc/os/release: %q", kv["VERSION_ID"])
+ return osv, fmt.Errorf("incomprehensible VERSION_ID in /etc/os-release: %q", kv["VERSION_ID"])
}
return osv, nil
}
Or create an arvbox test cluster:
-$ cwltool --enable-ext arvbox-make-federation.cwl --arvbox_base ~/.arvbox/ --in_acr /path/to/arvados-cwl-runner > main-test.json
+$ cwltool arvbox-make-federation.cwl --arvbox_base ~/.arvbox/ --in_acr /path/to/arvados-cwl-runner > main-test.json
Run tests:
#
# SPDX-License-Identifier: Apache-2.0
-cwlVersion: v1.0
+cwlVersion: v1.1
class: Workflow
$namespaces:
arv: "http://arvados.org/cwl#"
requirements:
ScatterFeatureRequirement: {}
StepInputExpressionRequirement: {}
- cwltool:LoadListingRequirement:
+ LoadListingRequirement:
loadListing: no_listing
InlineJavascriptRequirement: {}
inputs:
containers: containers
arvbox_base: arvbox_base
out: [arvbox_data]
- run: arvbox/mkdir.cwl
+ run: arvboxcwl/mkdir.cwl
start:
in:
container_name: containers
out: [cluster_id, container_host, arvbox_data_out, superuser_token]
scatter: [container_name, arvbox_data]
scatterMethod: dotproduct
- run: arvbox/start.cwl
+ run: arvboxcwl/start.cwl
fed-config:
in:
container_name: containers
out: []
scatter: [container_name, this_cluster_id, arvbox_data]
scatterMethod: dotproduct
- run: arvbox/fed-config.cwl
+ run: arvboxcwl/fed-config.cwl
setup-user:
in:
container_host: {source: start/container_host, valueFrom: "$(self[0])"}
superuser_token: {source: start/superuser_token, valueFrom: "$(self[0])"}
out: [test_user_uuid, test_user_token]
- run: arvbox/setup-user.cwl
+ run: arvboxcwl/setup-user.cwl
#
# SPDX-License-Identifier: Apache-2.0
-cwlVersion: v1.0
+cwlVersion: v1.1
class: CommandLineTool
$namespaces:
arv: "http://arvados.org/cwl#"
}
return JSON.stringify({"development": {"remote_hosts": remoteClusters}});
}
- cwltool:LoadListingRequirement:
+ LoadListingRequirement:
loadListing: no_listing
ShellCommandRequirement: {}
InlineJavascriptRequirement: {}
- cwltool:InplaceUpdateRequirement:
+ InplaceUpdateRequirement:
inplaceUpdate: true
arguments:
- shellQuote: false
#
# SPDX-License-Identifier: Apache-2.0
-cwlVersion: v1.0
+cwlVersion: v1.1
class: CommandLineTool
$namespaces:
arv: "http://arvados.org/cwl#"
- entry: $(inputs.arvbox_base)
entryname: base
writable: true
- cwltool:LoadListingRequirement:
+ LoadListingRequirement:
loadListing: no_listing
InlineJavascriptRequirement: {}
- cwltool:InplaceUpdateRequirement:
+ InplaceUpdateRequirement:
inplaceUpdate: true
arguments:
- mkdir
#
# SPDX-License-Identifier: Apache-2.0
-cwlVersion: v1.0
+cwlVersion: v1.1
class: CommandLineTool
$namespaces:
arv: "http://arvados.org/cwl#"
ARVADOS_API_HOST: $(inputs.container_host)
ARVADOS_API_TOKEN: $(inputs.superuser_token)
ARVADOS_API_HOST_INSECURE: "true"
- cwltool:LoadListingRequirement:
+ LoadListingRequirement:
loadListing: no_listing
InlineJavascriptRequirement: {}
- cwltool:InplaceUpdateRequirement:
+ InplaceUpdateRequirement:
inplaceUpdate: true
DockerRequirement:
dockerPull: arvados/jobs
+ NetworkAccess:
+ networkAccess: true
inputs:
container_host: string
superuser_token: string
#
# SPDX-License-Identifier: Apache-2.0
-cwlVersion: v1.0
+cwlVersion: v1.1
class: CommandLineTool
$namespaces:
arv: "http://arvados.org/cwl#"
- entry: $(inputs.arvbox_data)
entryname: $(inputs.container_name)
writable: true
- cwltool:InplaceUpdateRequirement:
+ InplaceUpdateRequirement:
inplaceUpdate: true
InlineJavascriptRequirement: {}
arguments:
mkdir -p $ARVBOX_DATA
if ! test -d $ARVBOX_DATA/arvados ; then
cd $ARVBOX_DATA
- git clone https://github.com/arvados/arvados.git
+ git clone https://git.arvados.org/arvados.git
fi
cd $ARVBOX_DATA/arvados
gitver=`git rev-parse HEAD`
)
type GetOptions struct {
- UUID string `json:"uuid"`
+ UUID string `json:"uuid,omitempty"`
Select []string `json:"select"`
IncludeTrash bool `json:"include_trash"`
- ForwardedFor string `json:"forwarded_for"`
- Remote string `json:"remote"`
+ ForwardedFor string `json:"forwarded_for,omitempty"`
+ Remote string `json:"remote,omitempty"`
}
type UntrashOptions struct {
Select []string `json:"select"`
Filters []Filter `json:"filters"`
Where map[string]interface{} `json:"where"`
- Limit int `json:"limit"`
- Offset int `json:"offset"`
+ Limit int64 `json:"limit"`
+ Offset int64 `json:"offset"`
Order []string `json:"order"`
Distinct bool `json:"distinct"`
Count string `json:"count"`
IncludeTrash bool `json:"include_trash"`
IncludeOldVersions bool `json:"include_old_versions"`
+ BypassFederation bool `json:"bypass_federation"`
}
type CreateOptions struct {
}
type UpdateOptions struct {
- UUID string `json:"uuid"`
- Attrs map[string]interface{} `json:"attrs"`
+ UUID string `json:"uuid"`
+ Attrs map[string]interface{} `json:"attrs"`
+ BypassFederation bool `json:"bypass_federation"`
}
type UpdateUUIDOptions struct {
}
operand := elements[2]
switch operand.(type) {
- case string, float64, []interface{}, nil:
+ case string, float64, []interface{}, nil, bool:
default:
return fmt.Errorf("invalid filter operand %q", elements[2])
}
t.Errorf("Encoded as %q, expected %q", buf, expect)
}
}
+
+func TestUnmarshalFiltersWithNil(t *testing.T) {
+ buf := []byte(`["modified_at","=",null]`)
+ f := &Filter{}
+ err := f.UnmarshalJSON(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expect := Filter{Attr: "modified_at", Operator: "=", Operand: nil}
+ if f.Attr != expect.Attr || f.Operator != expect.Operator || f.Operand != expect.Operand {
+ t.Errorf("Decoded as %q, expected %q", f, expect)
+ }
+}
+
+func TestMarshalFiltersWithBoolean(t *testing.T) {
+ buf, err := json.Marshal([]Filter{
+ {Attr: "is_active", Operator: "=", Operand: true}})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if expect := []byte(`[["is_active","=",true]]`); 0 != bytes.Compare(buf, expect) {
+ t.Errorf("Encoded as %q, expected %q", buf, expect)
+ }
+}
+
+func TestUnmarshalFiltersWithBoolean(t *testing.T) {
+ buf := []byte(`["is_active","=",true]`)
+ f := &Filter{}
+ err := f.UnmarshalJSON(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expect := Filter{Attr: "is_active", Operator: "=", Operand: true}
+ if f.Attr != expect.Attr || f.Operator != expect.Operator || f.Operand != expect.Operand {
+ t.Errorf("Decoded as %q, expected %q", f, expect)
+ }
+}
errors.append("Inconsistent login cluster configuration, expected '%s' on %s but was '%s'" % (loginCluster, config["ClusterID"], config["Login"]["LoginCluster"]))
continue
- if arv._rootDesc["revision"] < "20190926":
- errors.append("Arvados API server revision on cluster '%s' is too old, must be updated to at least Arvados 1.5 before running migration." % config["ClusterID"])
+ if arv._rootDesc["revision"] < "20200331":
+ errors.append("Arvados API server revision on cluster '%s' is too old, must be updated to at least Arvados 2.0.2 before running migration." % config["ClusterID"])
continue
try:
users = []
for c, arv in clusters.items():
print("Getting user list from %s" % c)
- ul = arvados.util.list_all(arv.users().list)
+ ul = arvados.util.list_all(arv.users().list, bypass_federation=True)
for l in ul:
if l["uuid"].startswith(c):
users.append(l)
print("(%s) Updating username of %s to '%s' on %s" % (email, user_uuid, username, migratecluster))
if not args.dry_run:
try:
- conflicts = migratearv.users().list(filters=[["username", "=", username]]).execute()
+ conflicts = migratearv.users().list(filters=[["username", "=", username]], bypass_federation=True).execute()
if conflicts["items"]:
- migratearv.users().update(uuid=conflicts["items"][0]["uuid"], body={"user": {"username": username+"migrate"}}).execute()
- migratearv.users().update(uuid=user_uuid, body={"user": {"username": username}}).execute()
+ # There's already a user with the username, move the old user out of the way
+ migratearv.users().update(uuid=conflicts["items"][0]["uuid"],
+ bypass_federation=True,
+ body={"user": {"username": username+"migrate"}}).execute()
+ migratearv.users().update(uuid=user_uuid,
+ bypass_federation=True,
+ body={"user": {"username": username}}).execute()
except arvados.errors.ApiError as e:
print("(%s) Error updating username of %s to '%s' on %s: %s" % (email, user_uuid, username, migratecluster, e))
user = None
try:
olduser = oldhomearv.users().get(uuid=old_user_uuid).execute()
- conflicts = homearv.users().list(filters=[["username", "=", username]]).execute()
+ conflicts = homearv.users().list(filters=[["username", "=", username]],
+ bypass_federation=True).execute()
if conflicts["items"]:
- homearv.users().update(uuid=conflicts["items"][0]["uuid"], body={"user": {"username": username+"migrate"}}).execute()
- user = homearv.users().create(body={"user": {"email": email, "username": username, "is_active": olduser["is_active"]}}).execute()
+ homearv.users().update(uuid=conflicts["items"][0]["uuid"],
+ bypass_federation=True,
+ body={"user": {"username": username+"migrate"}}).execute()
+ user = homearv.users().create(body={"user": {"email": email, "username": username,
+ "is_active": olduser["is_active"]}}).execute()
except arvados.errors.ApiError as e:
print("(%s) Could not create user: %s" % (email, str(e)))
return None
return None
try:
- olduser = migratearv.users().get(uuid=old_user_uuid).execute()
+ findolduser = migratearv.users().list(filters=[["uuid", "=", old_user_uuid]], bypass_federation=True).execute()
+ if len(findolduser["items"]) == 0:
+ return False
+ if len(findolduser["items"]) == 1:
+ olduser = findolduser["items"][0]
+ else:
+ print("(%s) Unexpected result" % (email))
+ return None
except arvados.errors.ApiError as e:
- if e.resp.status != 404:
- print("(%s) Could not retrieve user %s from %s, user may have already been migrated: %s" % (email, old_user_uuid, migratecluster, e))
+ print("(%s) Could not retrieve user %s from %s, user may have already been migrated: %s" % (email, old_user_uuid, migratecluster, e))
return None
salted = 'v2/' + newtok["uuid"] + '/' + hmac.new(newtok["api_token"].encode(),
try:
ru = urllib.parse.urlparse(migratearv._rootDesc["rootUrl"])
if not args.dry_run:
- newuser = arvados.api(host=ru.netloc, token=salted, insecure=os.environ.get("ARVADOS_API_HOST_INSECURE")).users().current().execute()
+ newuser = arvados.api(host=ru.netloc, token=salted,
+ insecure=os.environ.get("ARVADOS_API_HOST_INSECURE")).users().current().execute()
else:
newuser = {"is_active": True, "username": username}
except arvados.errors.ApiError as e:
print("(%s) Activating user %s on %s" % (email, new_user_uuid, migratecluster))
try:
if not args.dry_run:
- migratearv.users().update(uuid=new_user_uuid, body={"is_active": True}).execute()
+ migratearv.users().update(uuid=new_user_uuid, bypass_federation=True,
+ body={"is_active": True}).execute()
except arvados.errors.ApiError as e:
print("(%s) Could not activate user %s on %s: %s" % (email, new_user_uuid, migratecluster, e))
return None
if new_user_uuid is None:
continue
- # cluster where the migration is happening
+ remote_users = {}
+ got_error = False
for migratecluster in clusters:
+ # cluster where the migration is happening
migratearv = clusters[migratecluster]
# the user's new home cluster
newuser = activate_remote_user(args, email, homearv, migratearv, old_user_uuid, new_user_uuid)
if newuser is None:
- continue
+ got_error = True
+ remote_users[migratecluster] = newuser
+
+ if not got_error:
+ for migratecluster in clusters:
+ migratearv = clusters[migratecluster]
+ newuser = remote_users[migratecluster]
+ if newuser is False:
+ continue
- print("(%s) Migrating %s to %s on %s" % (email, old_user_uuid, new_user_uuid, migratecluster))
+ print("(%s) Migrating %s to %s on %s" % (email, old_user_uuid, new_user_uuid, migratecluster))
- migrate_user(args, migratearv, email, new_user_uuid, old_user_uuid)
+ migrate_user(args, migratearv, email, new_user_uuid, old_user_uuid)
- if newuser['username'] != username:
- update_username(args, email, new_user_uuid, username, migratecluster, migratearv)
+ if newuser['username'] != username:
+ update_username(args, email, new_user_uuid, username, migratecluster, migratearv)
if __name__ == "__main__":
main()
in the 'fed_migrate' input parameter.
# Create arvbox containers fedbox(1,2,3) for the federation
-$ cwltool --enable-ext arvbox-make-federation.cwl --arvbox_base ~/.arvbox > fed.json
+$ cwltool arvbox-make-federation.cwl --arvbox_base ~/.arvbox > fed.json
# Configure containers and run tests
$ cwltool fed-migrate.cwl fed.json
-cwlVersion: v1.0
+cwlVersion: v1.1
class: Workflow
$namespaces:
arv: "http://arvados.org/cwl#"
SubworkflowFeatureRequirement: {}
ScatterFeatureRequirement: {}
StepInputExpressionRequirement: {}
- cwltool:LoadListingRequirement:
+ LoadListingRequirement:
loadListing: no_listing
steps:
start:
j = json.load(open(sys.argv[1]))
apiA = arvados.api(host=j["arvados_api_hosts"][0], token=j["superuser_tokens"][0], insecure=True)
-apiB = arvados.api(host=j["arvados_api_hosts"][1], token=j["superuser_tokens"][1], insecure=True)
-apiC = arvados.api(host=j["arvados_api_hosts"][2], token=j["superuser_tokens"][2], insecure=True)
+tok = apiA.api_client_authorizations().current().execute()
+v2_token = "v2/%s/%s" % (tok["uuid"], tok["api_token"])
+
+apiB = arvados.api(host=j["arvados_api_hosts"][1], token=v2_token, insecure=True)
+apiC = arvados.api(host=j["arvados_api_hosts"][2], token=v2_token, insecure=True)
###
### Check users on API server "A" (the LoginCluster) ###
###
-
-users = apiA.users().list().execute()
-
-assert len(users["items"]) == 11
-
by_username = {}
-
-for i in range(1, 10):
+def check_A(users):
+ assert len(users["items"]) == 11
+
+ for i in range(1, 10):
+ found = False
+ for u in users["items"]:
+ if u["username"] == ("case%d" % i) and u["email"] == ("case%d@test" % i):
+ found = True
+ by_username[u["username"]] = u["uuid"]
+ assert found
+
+ # Should be active
+ for i in (1, 2, 3, 4, 5, 6, 7, 8):
+ found = False
+ for u in users["items"]:
+ if u["username"] == ("case%d" % i) and u["email"] == ("case%d@test" % i) and u["is_active"] is True:
+ found = True
+ assert found, "Not found case%i" % i
+
+ # case9 should not be active
found = False
for u in users["items"]:
- if u["username"] == ("case%d" % i) and u["email"] == ("case%d@test" % i):
+ if (u["username"] == "case9" and u["email"] == "case9@test" and
+ u["uuid"] == by_username[u["username"]] and u["is_active"] is False):
found = True
- by_username[u["username"]] = u["uuid"]
assert found
-# Should be active
-for i in (1, 2, 3, 4, 5, 6, 7, 8):
- found = False
- for u in users["items"]:
- if u["username"] == ("case%d" % i) and u["email"] == ("case%d@test" % i) and u["is_active"] is True:
- found = True
- assert found, "Not found case%i" % i
-
-# case9 should not be active
-found = False
-for u in users["items"]:
- if (u["username"] == "case9" and u["email"] == "case9@test" and
- u["uuid"] == by_username[u["username"]] and u["is_active"] is False):
- found = True
-assert found
+users = apiA.users().list().execute()
+check_A(users)
+users = apiA.users().list(bypass_federation=True).execute()
+check_A(users)
###
### Check users on API server "B" (federation member) ###
###
-users = apiB.users().list().execute()
+
+# check for expected migrations on B
+users = apiB.users().list(bypass_federation=True).execute()
assert len(users["items"]) == 11
for i in range(2, 9):
found = True
assert found
+# check that federated user listing works
+users = apiB.users().list().execute()
+check_A(users)
###
### Check users on API server "C" (federation member) ###
###
-users = apiC.users().list().execute()
+
+# check for expected migrations on C
+users = apiC.users().list(bypass_federation=True).execute()
assert len(users["items"]) == 8
for i in (2, 4, 6, 7, 8):
found = True
assert not found
+# check that federated user listing works
+users = apiC.users().list().execute()
+check_A(users)
+
print("Passed checks")
"dbname": "arvados_test",
"user": "arvados",
"password": "insecure_arvados_test",
- "template": "template0", # used by RailsAPI when [re]creating the database
}
localhost = "127.0.0.1"
pg (1.1.4)
power_assert (1.1.4)
public_suffix (4.0.3)
- rack (2.0.7)
+ rack (2.2.2)
rack-test (0.6.3)
rack (>= 1.0)
rails (5.0.7.2)
uglifier (~> 2.0)
BUNDLED WITH
- 1.11
+ 1.16.6
before_action :load_required_parameters
before_action(:find_object_by_uuid,
except: [:index, :create] + ERROR_ACTIONS)
+ before_action(:set_nullable_attrs_to_null, only: [:update, :create])
before_action :load_limit_offset_order_params, only: [:index, :contents]
before_action :load_where_param, only: [:index, :contents]
before_action :load_filters_param, only: [:index, :contents]
before_action :reload_object_before_update, :only => :update
before_action(:render_404_if_no_object,
except: [:index, :create] + ERROR_ACTIONS)
+ before_action :only_admin_can_bypass_federation
attr_writer :resource_attrs
render_not_found "Object not found" if !@object
end
+ def only_admin_can_bypass_federation
+ unless !params[:bypass_federation] || current_user.andand.is_admin
+ send_error("The bypass_federation parameter is only permitted when current user is admin", status: 403)
+ end
+ end
+
def render_error(e)
logger.error e.inspect
if e.respond_to? :backtrace and e.backtrace
@object = @objects.first
end
+ def nullable_attributes
+ []
+ end
+
+ # Go code may send empty values (ie: empty string instead of NULL) that
+ # should be translated to NULL on the database.
+ def set_nullable_attrs_to_null
+ nullify_attrs(resource_attrs.to_hash).each do |k, v|
+ resource_attrs[k] = v
+ end
+ end
+
+ def nullify_attrs(a = {})
+ new_attrs = a.to_hash.symbolize_keys
+ (new_attrs.keys & nullable_attributes).each do |attr|
+ val = new_attrs[attr]
+ if (val.class == Integer && val == 0) || (val.class == String && val == "")
+ new_attrs[attr] = nil
+ end
+ end
+ return new_attrs
+ end
+
def reload_object_before_update
# This is necessary to prevent an ActiveRecord::ReadOnlyRecord
# error when updating an object which was retrieved using a join.
location: "query",
required: false,
},
+ bypass_federation: {
+ type: 'boolean',
+ required: false,
+ description: 'bypass federation behavior, list items from local instance database only'
+ }
}
end
id: "arvados:v1",
name: "arvados",
version: "v1",
- # format is YYYYMMDD, must be fixed with (needs to be linearly
+ # format is YYYYMMDD, must be fixed width (needs to be lexically
# sortable), updated manually, may be used by clients to
# determine availability of API server features.
- revision: "20200212",
+ revision: "20200331",
source_version: AppVersion.hash,
sourceVersion: AppVersion.hash, # source_version should be deprecated in the future
packageVersion: AppVersion.package_version,
rescue ActiveRecord::RecordNotUnique
retry
end
- u.update_attributes!(attrs)
+ u.update_attributes!(nullify_attrs(attrs))
@objects << u
end
@offset = 0
}
end
+ def self._update_requires_parameters
+ super.merge({
+ bypass_federation: {
+ type: 'boolean', required: false,
+ },
+ })
+ end
+
def self._update_uuid_requires_parameters
{
new_uuid: {
end
super
end
+
+ def nullable_attributes
+ super + [:email, :first_name, :last_name, :username]
+ end
end
if ::Rails.env.to_s == "test" && db_config["test"].nil?
$arvados_config["PostgreSQL"]["Connection"]["dbname"] = "arvados_test"
end
+if ::Rails.env.to_s == "test"
+ # Use template0 when creating a new database. Avoids
+ # character-encoding/collation problems.
+ $arvados_config["PostgreSQL"]["Connection"]["template"] = "template0"
+end
if $arvados_config["PostgreSQL"]["Connection"]["password"].empty?
raise "Database password is empty, PostgreSQL section is: #{$arvados_config["PostgreSQL"]}"
assert_nil created['identity_url'], 'expected no identity_url'
end
+ test "create new user with empty username" do
+ authorize_with :admin
+ post :create, params: {
+ user: {
+ first_name: "test_first_name",
+ last_name: "test_last_name",
+ username: ""
+ }
+ }
+ assert_response :success
+ created = JSON.parse(@response.body)
+ assert_equal 'test_first_name', created['first_name']
+ assert_not_nil created['uuid'], 'expected uuid for the newly created user'
+ assert_nil created['email'], 'expected no email'
+ assert_nil created['username'], 'expected no username'
+ end
+
+ test "update user with empty username" do
+ authorize_with :admin
+ user = users('spectator')
+ assert_not_nil user['username']
+ put :update, params: {
+ id: users('spectator')['uuid'],
+ user: {
+ username: ""
+ }
+ }
+ assert_response :success
+ updated = JSON.parse(@response.body)
+ assert_nil updated['username'], 'expected no username'
+ end
+
test "create user with user, vm and repo as input" do
authorize_with :admin
repo_name = 'usertestrepo'
newuuid => {
'first_name' => 'noot',
'email' => 'root@remot.example.com',
+ 'username' => '',
},
}})
assert_response(:success)
assert_match(/Cannot activate without being invited/, json_response['errors'][0])
end
+ test "bypass_federation only accepted for admins" do
+ get "/arvados/v1/users",
+ params: {
+ bypass_federation: true
+ },
+ headers: auth(:admin)
+
+ assert_response :success
+
+ get "/arvados/v1/users",
+ params: {
+ bypass_federation: true
+ },
+ headers: auth(:active)
+
+ assert_response 403
+ end
end
}
bufs = newBufferPool(h.Logger, h.Cluster.API.MaxKeepBlobBuffers, BlockSize)
- if h.Cluster.API.MaxConcurrentRequests < 1 {
- h.Cluster.API.MaxConcurrentRequests = h.Cluster.API.MaxKeepBlobBuffers * 2
- h.Logger.Warnf("API.MaxConcurrentRequests <1 or not specified; defaulting to MaxKeepBlobBuffers * 2 == %d", h.Cluster.API.MaxConcurrentRequests)
- }
if h.Cluster.API.MaxConcurrentRequests > 0 && h.Cluster.API.MaxConcurrentRequests < h.Cluster.API.MaxKeepBlobBuffers {
h.Logger.Warnf("Possible configuration mistake: not useful to set API.MaxKeepBlobBuffers (%d) higher than API.MaxConcurrentRequests (%d)", h.Cluster.API.MaxKeepBlobBuffers, h.Cluster.API.MaxConcurrentRequests)
}