MaxItemsPerResponse: 1000
# Maximum number of concurrent requests to accept in a single
- # service process, or 0 for no limit. Currently supported only
- # by keepstore.
+ # service process, or 0 for no limit.
MaxConcurrentRequests: 0
- # Maximum number of 64MiB memory buffers per keepstore server
- # process, or 0 for no limit.
+ # Maximum number of 64MiB memory buffers per Keepstore server process, or
+ # 0 for no limit. When this limit is reached, up to
+ # (MaxConcurrentRequests - MaxKeepBlobBuffers) HTTP requests requiring
+ # buffers (like GET and PUT) will wait for buffer space to be released.
+ # Any HTTP requests beyond MaxConcurrentRequests will receive an
+ # immediate 503 response.
+ #
+ # MaxKeepBlobBuffers should be set such that (MaxKeepBlobBuffers * 64MiB
+ # * 1.1) fits comfortably in memory. On a host dedicated to running
+ # Keepstore, divide total memory by 88MiB to suggest a suitable value.
+ # For example, if grep MemTotal /proc/meminfo reports MemTotal: 7125440
+ # kB, compute 7125440 / (88 * 1024)=79 and configure MaxBuffers: 79
MaxKeepBlobBuffers: 128
# API methods to disable. Disabled methods are not listed in the
# implementation. Note that it also disables some new federation
# features and will be removed in a future release.
ForceLegacyAPI14: false
+
+ # (Experimental) Restart services automatically when config file
+ # changes are detected. Only supported by `arvados-server boot` in
+ # dev/test mode.
+ AutoReloadConfig: false
MaxItemsPerResponse: 1000
# Maximum number of concurrent requests to accept in a single
- # service process, or 0 for no limit. Currently supported only
- # by keepstore.
+ # service process, or 0 for no limit.
MaxConcurrentRequests: 0
- # Maximum number of 64MiB memory buffers per keepstore server
- # process, or 0 for no limit.
+ # Maximum number of 64MiB memory buffers per Keepstore server process, or
+ # 0 for no limit. When this limit is reached, up to
+ # (MaxConcurrentRequests - MaxKeepBlobBuffers) HTTP requests requiring
+ # buffers (like GET and PUT) will wait for buffer space to be released.
+ # Any HTTP requests beyond MaxConcurrentRequests will receive an
+ # immediate 503 response.
+ #
+ # MaxKeepBlobBuffers should be set such that (MaxKeepBlobBuffers * 64MiB
+ # * 1.1) fits comfortably in memory. On a host dedicated to running
+ # Keepstore, divide total memory by 88MiB to suggest a suitable value.
+ # For example, if grep MemTotal /proc/meminfo reports MemTotal: 7125440
+ # kB, compute 7125440 / (88 * 1024)=79 and configure MaxBuffers: 79
MaxKeepBlobBuffers: 128
# API methods to disable. Disabled methods are not listed in the
# implementation. Note that it also disables some new federation
# features and will be removed in a future release.
ForceLegacyAPI14: false
+
+ # (Experimental) Restart services automatically when config file
+ # changes are detected. Only supported by ` + "`" + `arvados-server boot` + "`" + ` in
+ # dev/test mode.
+ AutoReloadConfig: false
`)
"bytes"
"context"
"io"
+ "math"
"net"
"net/url"
"os"
TLS:
Insecure: true
Login:
- # LoginCluster: z1111
+ LoginCluster: z1111
SystemLogs:
Format: text
RemoteClusters:
},
config: *cfg,
}
- s.testClusters[id].super.Start(context.Background(), &s.testClusters[id].config)
+ s.testClusters[id].super.Start(context.Background(), &s.testClusters[id].config, "-")
}
for _, tc := range s.testClusters {
au, ok := tc.super.WaitReady()
return ctx, ac, kc
}
-func (s *IntegrationSuite) userClients(c *check.C, conn *rpc.Conn, rootctx context.Context, clusterID string, activate bool) (context.Context, *arvados.Client, *keepclient.KeepClient) {
+func (s *IntegrationSuite) userClients(rootctx context.Context, c *check.C, conn *rpc.Conn, clusterID string, activate bool) (context.Context, *arvados.Client, *keepclient.KeepClient) {
login, err := conn.UserSessionCreate(rootctx, rpc.UserSessionCreateOptions{
ReturnTo: ",https://example.com",
AuthInfo: rpc.UserSessionAuthInfo{
conn1 := s.conn("z1111")
rootctx1, _, _ := s.rootClients("z1111")
conn3 := s.conn("z3333")
- userctx1, ac1, kc1 := s.userClients(c, conn1, rootctx1, "z1111", true)
+ userctx1, ac1, kc1 := s.userClients(rootctx1, c, conn1, "z1111", true)
// Create the collection to find its PDH (but don't save it
// anywhere yet)
c.Check(err, check.IsNil)
c.Check(coll.PortableDataHash, check.Equals, pdh)
}
+
+// Test for bug #16263
+func (s *IntegrationSuite) TestListUsers(c *check.C) {
+ rootctx1, _, _ := s.rootClients("z1111")
+ conn1 := s.conn("z1111")
+ conn3 := s.conn("z3333")
+
+ // Make sure LoginCluster is properly configured
+ for cls := range s.testClusters {
+ c.Check(
+ s.testClusters[cls].config.Clusters[cls].Login.LoginCluster,
+ check.Equals, "z1111",
+ check.Commentf("incorrect LoginCluster config on cluster %q", cls))
+ }
+ // Make sure z1111 has users with NULL usernames
+ lst, err := conn1.UserList(rootctx1, arvados.ListOptions{Limit: -1})
+ nullUsername := false
+ c.Assert(err, check.IsNil)
+ c.Assert(len(lst.Items), check.Not(check.Equals), 0)
+ for _, user := range lst.Items {
+ if user.Username == "" {
+ nullUsername = true
+ }
+ }
+ c.Assert(nullUsername, check.Equals, true)
+ // Ask for the user list on z3333 using z1111's system root token
+ _, err = conn3.UserList(rootctx1, arvados.ListOptions{Limit: -1})
+ c.Assert(err, check.IsNil, check.Commentf("getting user list: %q", err))
+}
+
+// Test for bug #16263
+func (s *IntegrationSuite) TestListUsersWithMaxLimit(c *check.C) {
+ rootctx1, _, _ := s.rootClients("z1111")
+ conn3 := s.conn("z3333")
+ maxLimit := int64(math.MaxInt64)
+
+ // Make sure LoginCluster is properly configured
+ for cls := range s.testClusters {
+ c.Check(
+ s.testClusters[cls].config.Clusters[cls].Login.LoginCluster,
+ check.Equals, "z1111",
+ check.Commentf("incorrect LoginCluster config on cluster %q", cls))
+ }
+
+ // Ask for the user list on z3333 using z1111's system root token and
+ // limit: max int64 value.
+ _, err := conn3.UserList(rootctx1, arvados.ListOptions{Limit: maxLimit})
+ c.Assert(err, check.IsNil, check.Commentf("getting user list: %q", err))
+}