flags.StringVar(&super.ClusterType, "type", "production", "cluster `type`: development, test, or production")
flags.StringVar(&super.ListenHost, "listen-host", "localhost", "host name or interface address for service listeners")
flags.StringVar(&super.ControllerAddr, "controller-address", ":0", "desired controller address, `host:port` or `:port`")
+ flags.BoolVar(&super.NoWorkbench1, "no-workbench1", false, "do not run workbench1")
flags.BoolVar(&super.OwnTemporaryDatabase, "own-temporary-database", false, "bring up a postgres server and create a temporary database")
timeout := flags.Duration("timeout", 0, "maximum time to wait for cluster to be ready")
shutdown := flags.Bool("shutdown", false, "shut down when the cluster becomes ready")
{"WORKBENCH1", super.cluster.Services.Workbench1},
{"WS", super.cluster.Services.Websocket},
} {
- host, port, err := internalPort(cmpt.svc)
- if err != nil {
+ var host, port string
+ if len(cmpt.svc.InternalURLs) == 0 {
+ // We won't run this service, but we need an
+ // upstream port to write in our templated
+ // nginx config. Choose a port that will
+ // return 502 Bad Gateway.
+ port = "9"
+ } else if host, port, err = internalPort(cmpt.svc); err != nil {
return fmt.Errorf("%s internal port: %w (%v)", cmpt.varname, err, cmpt.svc)
- }
- if ok, err := addrIsLocal(net.JoinHostPort(host, port)); !ok || err != nil {
- return fmt.Errorf("urlIsLocal() failed for host %q port %q: %v", host, port, err)
+ } else if ok, err := addrIsLocal(net.JoinHostPort(host, port)); !ok || err != nil {
+ return fmt.Errorf("%s addrIsLocal() failed for host %q port %q: %v", cmpt.varname, host, port, err)
}
vars[cmpt.varname+"PORT"] = port
if err != nil {
return fmt.Errorf("%s external port: %w (%v)", cmpt.varname, err, cmpt.svc)
}
- if ok, err := addrIsLocal(net.JoinHostPort(super.ListenHost, port)); !ok || err != nil {
- return fmt.Errorf("urlIsLocal() failed for host %q port %q: %v", super.ListenHost, port, err)
+ listenAddr := net.JoinHostPort(super.ListenHost, port)
+ if ok, err := addrIsLocal(listenAddr); !ok || err != nil {
+ return fmt.Errorf("%s addrIsLocal(%q) failed: %w", cmpt.varname, listenAddr, err)
}
vars[cmpt.varname+"SSLPORT"] = port
}
ClusterType string // e.g., production
ListenHost string // e.g., localhost
ControllerAddr string // e.g., 127.0.0.1:8000
+ NoWorkbench1 bool
OwnTemporaryDatabase bool
Stderr io.Writer
runServiceCommand{name: "ws", svc: super.cluster.Services.Websocket, depends: []supervisedTask{seedDatabase{}}},
installPassenger{src: "services/api"},
runPassenger{src: "services/api", varlibdir: "railsapi", svc: super.cluster.Services.RailsAPI, depends: []supervisedTask{createCertificates{}, seedDatabase{}, installPassenger{src: "services/api"}}},
- installPassenger{src: "apps/workbench", depends: []supervisedTask{seedDatabase{}}}, // dependency ensures workbench doesn't delay api install/startup
- runPassenger{src: "apps/workbench", varlibdir: "workbench1", svc: super.cluster.Services.Workbench1, depends: []supervisedTask{installPassenger{src: "apps/workbench"}}},
seedDatabase{},
}
+ if !super.NoWorkbench1 {
+ tasks = append(tasks,
+ installPassenger{src: "apps/workbench", depends: []supervisedTask{seedDatabase{}}}, // dependency ensures workbench doesn't delay api install/startup
+ runPassenger{src: "apps/workbench", varlibdir: "workbench1", svc: super.cluster.Services.Workbench1, depends: []supervisedTask{installPassenger{src: "apps/workbench"}}},
+ )
+ }
if super.ClusterType != "test" {
tasks = append(tasks,
runServiceCommand{name: "dispatch-cloud", svc: super.cluster.Services.DispatchCloud},
svc.ExternalURL = arvados.URL{Scheme: "wss", Host: fmt.Sprintf("%s:%s", super.ListenHost, nextPort(super.ListenHost)), Path: "/websocket"}
}
}
+ if super.NoWorkbench1 && svc == &cluster.Services.Workbench1 {
+ // When workbench1 is disabled, it gets an
+ // ExternalURL (so we have a valid listening
+ // port to write in our Nginx config) but no
+ // InternalURLs (so health checker doesn't
+ // complain).
+ continue
+ }
if len(svc.InternalURLs) == 0 {
svc.InternalURLs = map[arvados.URL]arvados.ServiceInstance{
{Scheme: "http", Host: fmt.Sprintf("%s:%s", super.ListenHost, nextPort(super.ListenHost)), Path: "/"}: {},
tc := boot.NewTestCluster(
filepath.Join(cwd, "..", ".."),
id, cfg, "127.0.0."+id[3:], c.Log)
+ tc.Super.NoWorkbench1 = true
+ tc.Start()
s.testClusters[id] = tc
- s.testClusters[id].Start()
}
for _, tc := range s.testClusters {
ok := tc.WaitReady()
if p.startswith("keep:") and (arvados.util.keep_locator_pattern.match(p[5:]) or
arvados.util.collection_uuid_pattern.match(p[5:])):
locator = p[5:]
- return (self.collection_cache.get(locator), urllib.parse.unquote(sp[1]) if len(sp) == 2 else None)
+ rest = os.path.normpath(urllib.parse.unquote(sp[1])) if len(sp) == 2 else None
+ return (self.collection_cache.get(locator), rest)
else:
return (None, path)
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.2
+class: CommandLineTool
+inputs: []
+outputs:
+ stuff:
+ type: Directory
+ outputBinding:
+ glob: './foo/'
+requirements:
+ ShellCommandRequirement: {}
+arguments: [{shellQuote: false, valueFrom: "mkdir -p foo && touch baz.txt && touch foo/bar.txt"}]
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.2
+class: CommandLineTool
+inputs: []
+outputs:
+ stuff:
+ type: File
+ outputBinding:
+ glob: './foo/*.txt'
+requirements:
+ ShellCommandRequirement: {}
+arguments: [{shellQuote: false, valueFrom: "mkdir -p foo && touch baz.txt && touch foo/bar.txt"}]
output: {}
tool: wf/trick_defaults2.cwl
doc: "Test issue 17462 - secondary file objects on file defaults are not resolved"
+
+- job: null
+ output: {}
+ tool: 17521-dot-slash-glob.cwl
+ doc: "Test issue 17521 - bug with leading './' capturing files in subdirectories"
+
+- job: null
+ output: {}
+ tool: 10380-trailing-slash-dir.cwl
+ doc: "Test issue 10380 - bug with trailing slash when capturing an output directory"
return nil, err
}
for _, child := range all {
+ ln.treenode.Lock()
_, err = ln.treenode.Child(child.FileInfo().Name(), func(inode) (inode, error) {
return child, nil
})
+ ln.treenode.Unlock()
if err != nil {
return nil, err
}
import (
"crypto/hmac"
"crypto/sha256"
+ "encoding/base64"
"encoding/xml"
"errors"
"fmt"
s3MaxClockSkew = 5 * time.Minute
)
+type commonPrefix struct {
+ Prefix string
+}
+
+type listV1Resp struct {
+ XMLName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
+ s3.ListResp
+ // s3.ListResp marshals an empty tag when
+ // CommonPrefixes is nil, which confuses some clients.
+ // Fix by using this nested struct instead.
+ CommonPrefixes []commonPrefix
+ // Similarly, we need omitempty here, because an empty
+ // tag confuses some clients (e.g.,
+ // github.com/aws/aws-sdk-net never terminates its
+ // paging loop).
+ NextMarker string `xml:"NextMarker,omitempty"`
+ // ListObjectsV2 has a KeyCount response field.
+ KeyCount int
+}
+
+type listV2Resp struct {
+ XMLName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
+ IsTruncated bool
+ Contents []s3.Key
+ Name string
+ Prefix string
+ Delimiter string
+ MaxKeys int
+ CommonPrefixes []commonPrefix
+ EncodingType string `xml:",omitempty"`
+ KeyCount int
+ ContinuationToken string `xml:",omitempty"`
+ NextContinuationToken string `xml:",omitempty"`
+ StartAfter string `xml:",omitempty"`
+}
+
func hmacstring(msg string, key []byte) []byte {
h := hmac.New(sha256.New, key)
io.WriteString(h, msg)
func (h *handler) s3list(bucket string, w http.ResponseWriter, r *http.Request, fs arvados.CustomFileSystem) {
var params struct {
- delimiter string
- marker string
- maxKeys int
- prefix string
+ v2 bool
+ delimiter string
+ maxKeys int
+ prefix string
+ marker string // decoded continuationToken (v2) or provided by client (v1)
+ startAfter string // v2
+ continuationToken string // v2
+ encodingTypeURL bool // v2
}
params.delimiter = r.FormValue("delimiter")
- params.marker = r.FormValue("marker")
if mk, _ := strconv.ParseInt(r.FormValue("max-keys"), 10, 64); mk > 0 && mk < s3MaxKeys {
params.maxKeys = int(mk)
} else {
params.maxKeys = s3MaxKeys
}
params.prefix = r.FormValue("prefix")
+ switch r.FormValue("list-type") {
+ case "":
+ case "2":
+ params.v2 = true
+ default:
+ http.Error(w, "invalid list-type parameter", http.StatusBadRequest)
+ return
+ }
+ if params.v2 {
+ params.continuationToken = r.FormValue("continuation-token")
+ marker, err := base64.StdEncoding.DecodeString(params.continuationToken)
+ if err != nil {
+ http.Error(w, "invalid continuation token", http.StatusBadRequest)
+ return
+ }
+ params.marker = string(marker)
+ params.startAfter = r.FormValue("start-after")
+ switch r.FormValue("encoding-type") {
+ case "":
+ case "url":
+ params.encodingTypeURL = true
+ default:
+ http.Error(w, "invalid encoding-type parameter", http.StatusBadRequest)
+ return
+ }
+ } else {
+ params.marker = r.FormValue("marker")
+ }
bucketdir := "by_id/" + bucket
// walkpath is the directory (relative to bucketdir) we need
walkpath = ""
}
- type commonPrefix struct {
- Prefix string
- }
- type listResp struct {
- XMLName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
- s3.ListResp
- // s3.ListResp marshals an empty tag when
- // CommonPrefixes is nil, which confuses some clients.
- // Fix by using this nested struct instead.
- CommonPrefixes []commonPrefix
- // Similarly, we need omitempty here, because an empty
- // tag confuses some clients (e.g.,
- // github.com/aws/aws-sdk-net never terminates its
- // paging loop).
- NextMarker string `xml:"NextMarker,omitempty"`
- // ListObjectsV2 has a KeyCount response field.
- KeyCount int
- }
- resp := listResp{
- ListResp: s3.ListResp{
- Name: bucket,
- Prefix: params.prefix,
- Delimiter: params.delimiter,
- Marker: params.marker,
- MaxKeys: params.maxKeys,
- },
- }
+ resp := listV2Resp{
+ Name: bucket,
+ Prefix: params.prefix,
+ Delimiter: params.delimiter,
+ MaxKeys: params.maxKeys,
+ ContinuationToken: r.FormValue("continuation-token"),
+ StartAfter: params.startAfter,
+ }
+ nextMarker := ""
+
commonPrefixes := map[string]bool{}
err := walkFS(fs, strings.TrimSuffix(bucketdir+"/"+walkpath, "/"), true, func(path string, fi os.FileInfo) error {
if path == bucketdir {
return errDone
}
}
- if path < params.marker || path < params.prefix {
+ if path < params.marker || path < params.prefix || path <= params.startAfter {
return nil
}
if fi.IsDir() && !h.Config.cluster.Collections.S3FolderObjects {
// finding a regular file inside it.
return nil
}
+ if len(resp.Contents)+len(commonPrefixes) >= params.maxKeys {
+ resp.IsTruncated = true
+ if params.delimiter != "" || params.v2 {
+ nextMarker = path
+ }
+ return errDone
+ }
if params.delimiter != "" {
idx := strings.Index(path[len(params.prefix):], params.delimiter)
if idx >= 0 {
return filepath.SkipDir
}
}
- if len(resp.Contents)+len(commonPrefixes) >= params.maxKeys {
- resp.IsTruncated = true
- if params.delimiter != "" {
- resp.NextMarker = path
- }
- return errDone
- }
resp.Contents = append(resp.Contents, s3.Key{
Key: path,
LastModified: fi.ModTime().UTC().Format("2006-01-02T15:04:05.999") + "Z",
sort.Slice(resp.CommonPrefixes, func(i, j int) bool { return resp.CommonPrefixes[i].Prefix < resp.CommonPrefixes[j].Prefix })
}
resp.KeyCount = len(resp.Contents)
+ var respV1orV2 interface{}
+
+ if params.encodingTypeURL {
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
+ // "If you specify the encoding-type request
+ // parameter, Amazon S3 includes this element in the
+ // response, and returns encoded key name values in
+ // the following response elements:
+ //
+ // Delimiter, Prefix, Key, and StartAfter.
+ //
+ // Type: String
+ //
+ // Valid Values: url"
+ //
+ // This is somewhat vague but in practice it appears
+ // to mean x-www-form-urlencoded as in RFC1866 8.2.1
+ // para 1 (encode space as "+") rather than straight
+ // percent-encoding as in RFC1738 2.2. Presumably,
+ // the intent is to allow the client to decode XML and
+ // then paste the strings directly into another URI
+ // query or POST form like "https://host/path?foo=" +
+ // foo + "&bar=" + bar.
+ resp.EncodingType = "url"
+ resp.Delimiter = url.QueryEscape(resp.Delimiter)
+ resp.Prefix = url.QueryEscape(resp.Prefix)
+ resp.StartAfter = url.QueryEscape(resp.StartAfter)
+ for i, ent := range resp.Contents {
+ ent.Key = url.QueryEscape(ent.Key)
+ resp.Contents[i] = ent
+ }
+ for i, ent := range resp.CommonPrefixes {
+ ent.Prefix = url.QueryEscape(ent.Prefix)
+ resp.CommonPrefixes[i] = ent
+ }
+ }
+
+ if params.v2 {
+ resp.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextMarker))
+ respV1orV2 = resp
+ } else {
+ respV1orV2 = listV1Resp{
+ CommonPrefixes: resp.CommonPrefixes,
+ NextMarker: nextMarker,
+ KeyCount: resp.KeyCount,
+ ListResp: s3.ListResp{
+ IsTruncated: resp.IsTruncated,
+ Name: bucket,
+ Prefix: params.prefix,
+ Delimiter: params.delimiter,
+ Marker: params.marker,
+ MaxKeys: params.maxKeys,
+ Contents: resp.Contents,
+ },
+ }
+ }
+
w.Header().Set("Content-Type", "application/xml")
io.WriteString(w, xml.Header)
- if err := xml.NewEncoder(w).Encode(resp); err != nil {
+ if err := xml.NewEncoder(w).Encode(respV1orV2); err != nil {
ctxlog.FromContext(r.Context()).WithError(err).Error("error writing xml response")
}
}
import (
"bytes"
+ "context"
"crypto/rand"
"crypto/sha256"
"fmt"
"git.arvados.org/arvados.git/sdk/go/keepclient"
"github.com/AdRoll/goamz/aws"
"github.com/AdRoll/goamz/s3"
+ aws_aws "github.com/aws/aws-sdk-go/aws"
+ aws_credentials "github.com/aws/aws-sdk-go/aws/credentials"
+ aws_session "github.com/aws/aws-sdk-go/aws/session"
+ aws_s3 "github.com/aws/aws-sdk-go/service/s3"
check "gopkg.in/check.v1"
)
}
}
+func (s *IntegrationSuite) TestS3ListObjectsV2(c *check.C) {
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+ dirs := 2
+ filesPerDir := 40
+ stage.writeBigDirs(c, dirs, filesPerDir)
+
+ sess := aws_session.Must(aws_session.NewSession(&aws_aws.Config{
+ Region: aws_aws.String("auto"),
+ Endpoint: aws_aws.String("http://" + s.testServer.Addr),
+ Credentials: aws_credentials.NewStaticCredentials(url.QueryEscape(arvadostest.ActiveTokenV2), url.QueryEscape(arvadostest.ActiveTokenV2), ""),
+ S3ForcePathStyle: aws_aws.Bool(true),
+ }))
+
+ stringOrNil := func(s string) *string {
+ if s == "" {
+ return nil
+ } else {
+ return &s
+ }
+ }
+
+ client := aws_s3.New(sess)
+ ctx := context.Background()
+
+ for _, trial := range []struct {
+ prefix string
+ delimiter string
+ startAfter string
+ maxKeys int
+ expectKeys int
+ expectCommonPrefixes map[string]bool
+ }{
+ {
+ // Expect {filesPerDir plus the dir itself}
+ // for each dir, plus emptydir, emptyfile, and
+ // sailboat.txt.
+ expectKeys: (filesPerDir+1)*dirs + 3,
+ },
+ {
+ maxKeys: 15,
+ expectKeys: (filesPerDir+1)*dirs + 3,
+ },
+ {
+ startAfter: "dir0/z",
+ maxKeys: 15,
+ // Expect {filesPerDir plus the dir itself}
+ // for each dir except dir0, plus emptydir,
+ // emptyfile, and sailboat.txt.
+ expectKeys: (filesPerDir+1)*(dirs-1) + 3,
+ },
+ {
+ maxKeys: 1,
+ delimiter: "/",
+ expectKeys: 2, // emptyfile, sailboat.txt
+ expectCommonPrefixes: map[string]bool{"dir0/": true, "dir1/": true, "emptydir/": true},
+ },
+ {
+ startAfter: "dir0/z",
+ maxKeys: 15,
+ delimiter: "/",
+ expectKeys: 2, // emptyfile, sailboat.txt
+ expectCommonPrefixes: map[string]bool{"dir1/": true, "emptydir/": true},
+ },
+ {
+ startAfter: "dir0/file10.txt",
+ maxKeys: 15,
+ delimiter: "/",
+ expectKeys: 2,
+ expectCommonPrefixes: map[string]bool{"dir0/": true, "dir1/": true, "emptydir/": true},
+ },
+ {
+ startAfter: "dir0/file10.txt",
+ maxKeys: 15,
+ prefix: "d",
+ delimiter: "/",
+ expectKeys: 0,
+ expectCommonPrefixes: map[string]bool{"dir0/": true, "dir1/": true},
+ },
+ } {
+ c.Logf("[trial %+v]", trial)
+ params := aws_s3.ListObjectsV2Input{
+ Bucket: aws_aws.String(stage.collbucket.Name),
+ Prefix: stringOrNil(trial.prefix),
+ Delimiter: stringOrNil(trial.delimiter),
+ StartAfter: stringOrNil(trial.startAfter),
+ MaxKeys: aws_aws.Int64(int64(trial.maxKeys)),
+ }
+ keySeen := map[string]bool{}
+ prefixSeen := map[string]bool{}
+ for {
+ result, err := client.ListObjectsV2WithContext(ctx, ¶ms)
+ if !c.Check(err, check.IsNil) {
+ break
+ }
+ c.Check(result.Name, check.DeepEquals, aws_aws.String(stage.collbucket.Name))
+ c.Check(result.Prefix, check.DeepEquals, aws_aws.String(trial.prefix))
+ c.Check(result.Delimiter, check.DeepEquals, aws_aws.String(trial.delimiter))
+ // The following two fields are expected to be
+ // nil (i.e., no tag in XML response) rather
+ // than "" when the corresponding request
+ // field was empty or nil.
+ c.Check(result.StartAfter, check.DeepEquals, stringOrNil(trial.startAfter))
+ c.Check(result.ContinuationToken, check.DeepEquals, params.ContinuationToken)
+
+ if trial.maxKeys > 0 {
+ c.Check(result.MaxKeys, check.DeepEquals, aws_aws.Int64(int64(trial.maxKeys)))
+ c.Check(len(result.Contents)+len(result.CommonPrefixes) <= trial.maxKeys, check.Equals, true)
+ } else {
+ c.Check(result.MaxKeys, check.DeepEquals, aws_aws.Int64(int64(s3MaxKeys)))
+ }
+
+ for _, ent := range result.Contents {
+ c.Assert(ent.Key, check.NotNil)
+ c.Check(*ent.Key > trial.startAfter, check.Equals, true)
+ c.Check(keySeen[*ent.Key], check.Equals, false, check.Commentf("dup key %q", *ent.Key))
+ keySeen[*ent.Key] = true
+ }
+ for _, ent := range result.CommonPrefixes {
+ c.Assert(ent.Prefix, check.NotNil)
+ c.Check(strings.HasSuffix(*ent.Prefix, trial.delimiter), check.Equals, true, check.Commentf("bad CommonPrefix %q", *ent.Prefix))
+ if strings.HasPrefix(trial.startAfter, *ent.Prefix) {
+ // If we asked for
+ // startAfter=dir0/file10.txt,
+ // we expect dir0/ to be
+ // returned as a common prefix
+ } else {
+ c.Check(*ent.Prefix > trial.startAfter, check.Equals, true)
+ }
+ c.Check(prefixSeen[*ent.Prefix], check.Equals, false, check.Commentf("dup common prefix %q", *ent.Prefix))
+ prefixSeen[*ent.Prefix] = true
+ }
+ if *result.IsTruncated && c.Check(result.NextContinuationToken, check.Not(check.Equals), "") {
+ params.ContinuationToken = aws_aws.String(*result.NextContinuationToken)
+ } else {
+ break
+ }
+ }
+ c.Check(keySeen, check.HasLen, trial.expectKeys)
+ c.Check(prefixSeen, check.HasLen, len(trial.expectCommonPrefixes))
+ if len(trial.expectCommonPrefixes) > 0 {
+ c.Check(prefixSeen, check.DeepEquals, trial.expectCommonPrefixes)
+ }
+ }
+}
+
+func (s *IntegrationSuite) TestS3ListObjectsV2EncodingTypeURL(c *check.C) {
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+ dirs := 2
+ filesPerDir := 40
+ stage.writeBigDirs(c, dirs, filesPerDir)
+
+ sess := aws_session.Must(aws_session.NewSession(&aws_aws.Config{
+ Region: aws_aws.String("auto"),
+ Endpoint: aws_aws.String("http://" + s.testServer.Addr),
+ Credentials: aws_credentials.NewStaticCredentials(url.QueryEscape(arvadostest.ActiveTokenV2), url.QueryEscape(arvadostest.ActiveTokenV2), ""),
+ S3ForcePathStyle: aws_aws.Bool(true),
+ }))
+
+ client := aws_s3.New(sess)
+ ctx := context.Background()
+
+ result, err := client.ListObjectsV2WithContext(ctx, &aws_s3.ListObjectsV2Input{
+ Bucket: aws_aws.String(stage.collbucket.Name),
+ Prefix: aws_aws.String("dir0/"),
+ Delimiter: aws_aws.String("/"),
+ StartAfter: aws_aws.String("dir0/"),
+ EncodingType: aws_aws.String("url"),
+ })
+ c.Assert(err, check.IsNil)
+ c.Check(*result.Prefix, check.Equals, "dir0%2F")
+ c.Check(*result.Delimiter, check.Equals, "%2F")
+ c.Check(*result.StartAfter, check.Equals, "dir0%2F")
+ for _, ent := range result.Contents {
+ c.Check(*ent.Key, check.Matches, "dir0%2F.*")
+ }
+ result, err = client.ListObjectsV2WithContext(ctx, &aws_s3.ListObjectsV2Input{
+ Bucket: aws_aws.String(stage.collbucket.Name),
+ Delimiter: aws_aws.String("/"),
+ EncodingType: aws_aws.String("url"),
+ })
+ c.Assert(err, check.IsNil)
+ c.Check(*result.Delimiter, check.Equals, "%2F")
+ c.Check(result.CommonPrefixes, check.HasLen, dirs+1)
+ for _, ent := range result.CommonPrefixes {
+ c.Check(*ent.Prefix, check.Matches, ".*%2F")
+ }
+}
+
// TestS3cmd checks compatibility with the s3cmd command line tool, if
// it's installed. As of Debian buster, s3cmd is only in backports, so
// `arvados-server install` don't install it, and this test skips if
s.files = ["bin/arvados-login-sync", "agpl-3.0.txt"]
s.executables << "arvados-login-sync"
s.required_ruby_version = '>= 2.1.0'
- s.add_runtime_dependency 'arvados', '~> 1.3.0', '>= 1.3.0'
+ s.add_runtime_dependency 'arvados', '>= 1.3.3.20190320201707'
s.add_runtime_dependency 'launchy', '< 2.5'
# arvados-google-api-client 0.8.7.2 is incompatible with faraday 0.16.2
s.add_dependency('faraday', '< 0.16')
tc := boot.NewTestCluster(
filepath.Join(cwd, "..", ".."),
id, cfg, "127.0.0."+id[3:], c.Log)
+ tc.Super.NoWorkbench1 = true
+ tc.Start()
s.testClusters[id] = tc
- s.testClusters[id].Start()
}
for _, tc := range s.testClusters {
ok := tc.WaitReady()