Merge branch '17389-keepproxy-storage-classes-confirmed'
authorLucas Di Pentima <lucas.dipentima@curii.com>
Wed, 23 Jun 2021 18:12:39 +0000 (15:12 -0300)
committerLucas Di Pentima <lucas.dipentima@curii.com>
Wed, 23 Jun 2021 18:12:39 +0000 (15:12 -0300)
Closes #17389

Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima@curii.com>

25 files changed:
cmd/arvados-client/cmd.go
go.mod
go.sum
lib/cloud/ec2/ec2.go
lib/cloud/ec2/ec2_test.go
lib/cmd/cmd.go
lib/costanalyzer/cmd.go
lib/costanalyzer/costanalyzer.go
lib/deduplicationreport/command.go
lib/diagnostics/cmd.go [new file with mode: 0644]
sdk/cwl/arvados_cwl/arvtool.py
sdk/cwl/arvados_cwl/fsaccess.py
sdk/cwl/tests/17801-runtime-outdir.cwl [new file with mode: 0644]
sdk/cwl/tests/arvados-tests.yml
sdk/cwl/tests/test_submit.py
sdk/go/arvados/fs_base.go
sdk/go/arvados/fs_collection.go
sdk/go/arvados/fs_site.go
sdk/go/arvados/fs_site_test.go
sdk/python/arvados/commands/put.py
sdk/python/tests/test_arv_put.py
services/keep-web/handler_test.go
services/keep-web/s3.go
services/keep-web/s3_test.go
tools/salt-install/provision.sh

index aefcce79a45a4eb65fbbed209801d684da6b4923..cb15462119d4d1d3368382e1cbbcceafba40464f 100644 (file)
@@ -11,6 +11,7 @@ import (
        "git.arvados.org/arvados.git/lib/cmd"
        "git.arvados.org/arvados.git/lib/costanalyzer"
        "git.arvados.org/arvados.git/lib/deduplicationreport"
+       "git.arvados.org/arvados.git/lib/diagnostics"
        "git.arvados.org/arvados.git/lib/mount"
 )
 
@@ -59,6 +60,7 @@ var (
                "costanalyzer":         costanalyzer.Command,
                "shell":                shellCommand{},
                "connect-ssh":          connectSSHCommand{},
+               "diagnostics":          diagnostics.Command{},
        })
 )
 
diff --git a/go.mod b/go.mod
index 0ff679a576e232d318bdc14873069115036967bc..b70f6f3b476789f69f1845bffb6bfd4fcac80885 100644 (file)
--- a/go.mod
+++ b/go.mod
@@ -55,12 +55,14 @@ require (
        github.com/prometheus/common v0.7.0
        github.com/satori/go.uuid v1.2.1-0.20180103174451-36e9d2ebbde5 // indirect
        github.com/sergi/go-diff v1.0.0 // indirect
-       github.com/sirupsen/logrus v1.4.2
+       github.com/sirupsen/logrus v1.8.1
        github.com/src-d/gcfg v1.3.0 // indirect
        github.com/xanzy/ssh-agent v0.1.0 // indirect
        golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
        golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
        golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
+       golang.org/x/sys v0.0.0-20210603125802-9665404d3644
+       golang.org/x/tools v0.1.0 // indirect
        golang.org/x/sys v0.0.0-20210510120138-977fb7262007
        golang.org/x/tools v0.1.2 // indirect
        google.golang.org/api v0.13.0
diff --git a/go.sum b/go.sum
index c28ab4624038a143a4d9adf8a9e2593d123183c0..1fd37ed11ce411e625518eb3189c225e80b74616 100644 (file)
--- a/go.sum
+++ b/go.sum
@@ -234,6 +234,8 @@ github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63/go.mod h1:n+
 github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
 github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
 github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
 github.com/spf13/afero v1.2.1 h1:qgMbHoJbPbw579P+1zVY+6n4nIFuIchaIjzZ/I/Yq8M=
 github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
 github.com/src-d/gcfg v1.3.0 h1:2BEDr8r0I0b8h/fOqwtxCEiq2HJu8n2JGZJQFGXWLjg=
@@ -309,10 +311,13 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k=
 golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644 h1:CA1DEQ4NdKphKeL70tvsWNdT5oFh1lOjihRcEDROi0I=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE=
 golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
index 071c95006c9b305b1f47737bbb6eab588961785c..269a7d8def59a1e38603633691d657aef29d8e81 100644 (file)
@@ -20,6 +20,7 @@ import (
        "git.arvados.org/arvados.git/lib/cloud"
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
        "github.com/aws/aws-sdk-go/aws/credentials"
        "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
        "github.com/aws/aws-sdk-go/aws/ec2metadata"
@@ -349,6 +350,31 @@ func (err rateLimitError) EarliestRetry() time.Time {
        return err.earliestRetry
 }
 
+var isCodeCapacity = map[string]bool{
+       "InsufficientInstanceCapacity": true,
+       "VcpuLimitExceeded":            true,
+       "MaxSpotInstanceCountExceeded": true,
+}
+
+// isErrorCapacity returns whether the error is to be throttled based on its code.
+// Returns false if error is nil.
+func isErrorCapacity(err error) bool {
+       if aerr, ok := err.(awserr.Error); ok && aerr != nil {
+               if _, ok := isCodeCapacity[aerr.Code()]; ok {
+                       return true
+               }
+       }
+       return false
+}
+
+type ec2QuotaError struct {
+       error
+}
+
+func (er *ec2QuotaError) IsQuotaError() bool {
+       return true
+}
+
 func wrapError(err error, throttleValue *atomic.Value) error {
        if request.IsErrorThrottle(err) {
                // Back off exponentially until an upstream call
@@ -362,6 +388,8 @@ func wrapError(err error, throttleValue *atomic.Value) error {
                }
                throttleValue.Store(d)
                return rateLimitError{error: err, earliestRetry: time.Now().Add(d)}
+       } else if isErrorCapacity(err) {
+               return &ec2QuotaError{err}
        } else if err != nil {
                throttleValue.Store(time.Duration(0))
                return err
index e7319a0cb66d7fe2e0132c9092f88ab5d5714a28..3cd238ded5a0035adaec66ad4d5c32b9c3fd816a 100644 (file)
@@ -25,6 +25,7 @@ package ec2
 import (
        "encoding/json"
        "flag"
+       "sync/atomic"
        "testing"
 
        "git.arvados.org/arvados.git/lib/cloud"
@@ -32,6 +33,7 @@ import (
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/config"
        "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
        "github.com/aws/aws-sdk-go/service/ec2"
        "github.com/sirupsen/logrus"
        check "gopkg.in/check.v1"
@@ -246,4 +248,14 @@ func (*EC2InstanceSetSuite) TestDestroyInstances(c *check.C) {
        }
 }
 
-var TestRateLimitErrorInterface cloud.RateLimitError = rateLimitError{}
+func (*EC2InstanceSetSuite) TestWrapError(c *check.C) {
+       retryError := awserr.New("Throttling", "", nil)
+       wrapped := wrapError(retryError, &atomic.Value{})
+       _, ok := wrapped.(cloud.RateLimitError)
+       c.Check(ok, check.Equals, true)
+
+       quotaError := awserr.New("InsufficientInstanceCapacity", "", nil)
+       wrapped = wrapError(quotaError, nil)
+       _, ok = wrapped.(cloud.QuotaError)
+       c.Check(ok, check.Equals, true)
+}
index b7d918739b86de347b0960e785bbd27dea477fba..63d7576b4cc1964b11b29a8ac199730660325f2e 100644 (file)
@@ -16,6 +16,8 @@ import (
        "runtime"
        "sort"
        "strings"
+
+       "github.com/sirupsen/logrus"
 )
 
 type Handler interface {
@@ -153,3 +155,9 @@ func SubcommandToFront(args []string, flagset FlagSet) []string {
        copy(newargs[flagargs+1:], args[flagargs+1:])
        return newargs
 }
+
+type NoPrefixFormatter struct{}
+
+func (NoPrefixFormatter) Format(entry *logrus.Entry) ([]byte, error) {
+       return []byte(entry.Message + "\n"), nil
+}
index 525ec619b5e4aeeb0b4e271704f13385a41bcefd..6065ad2c0b2cb934607826d8500215bbabf0d868 100644 (file)
@@ -8,8 +8,8 @@ import (
        "io"
        "time"
 
+       "git.arvados.org/arvados.git/lib/cmd"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
-       "github.com/sirupsen/logrus"
 )
 
 var Command = command{}
@@ -22,24 +22,17 @@ type command struct {
        end        time.Time
 }
 
-type NoPrefixFormatter struct{}
-
-func (f *NoPrefixFormatter) Format(entry *logrus.Entry) ([]byte, error) {
-       return []byte(entry.Message), nil
-}
-
 // RunCommand implements the subcommand "costanalyzer <collection> <collection> ..."
 func (c command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
        var err error
        logger := ctxlog.New(stderr, "text", "info")
+       logger.SetFormatter(cmd.NoPrefixFormatter{})
        defer func() {
                if err != nil {
-                       logger.Error("\n" + err.Error() + "\n")
+                       logger.Error("\n" + err.Error())
                }
        }()
 
-       logger.SetFormatter(new(NoPrefixFormatter))
-
        exitcode, err := c.costAnalyzer(prog, args, logger, stdout, stderr)
 
        return exitcode
index edaaa5bd178243f2c9044d32398c5ddccc67b5dc..dfe3d584cec7a7f07d16ec2e3c267e760201d8dc 100644 (file)
@@ -9,9 +9,6 @@ import (
        "errors"
        "flag"
        "fmt"
-       "git.arvados.org/arvados.git/sdk/go/arvados"
-       "git.arvados.org/arvados.git/sdk/go/arvadosclient"
-       "git.arvados.org/arvados.git/sdk/go/keepclient"
        "io"
        "io/ioutil"
        "net/http"
@@ -20,6 +17,9 @@ import (
        "strings"
        "time"
 
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/arvadosclient"
+       "git.arvados.org/arvados.git/sdk/go/keepclient"
        "github.com/sirupsen/logrus"
 )
 
@@ -169,7 +169,7 @@ Options:
        }
        logger.SetLevel(lvl)
        if !c.cache {
-               logger.Debug("Caching disabled\n")
+               logger.Debug("Caching disabled")
        }
        return
 }
@@ -249,12 +249,12 @@ func loadCachedObject(logger *logrus.Logger, file string, uuid string, object in
        case *arvados.ContainerRequest:
                if v.State == arvados.ContainerRequestStateFinal {
                        reload = false
-                       logger.Debugf("Loaded object %s from local cache (%s)\n", uuid, file)
+                       logger.Debugf("Loaded object %s from local cache (%s)", uuid, file)
                }
        case *arvados.Container:
                if v.State == arvados.ContainerStateComplete || v.State == arvados.ContainerStateCancelled {
                        reload = false
-                       logger.Debugf("Loaded object %s from local cache (%s)\n", uuid, file)
+                       logger.Debugf("Loaded object %s from local cache (%s)", uuid, file)
                }
        }
        return
@@ -384,7 +384,7 @@ func handleProject(logger *logrus.Logger, uuid string, arv *arvadosclient.Arvado
                return nil, fmt.Errorf("error querying container_requests: %s", err.Error())
        }
        if value, ok := childCrs["items"]; ok {
-               logger.Infof("Collecting top level container requests in project %s\n", uuid)
+               logger.Infof("Collecting top level container requests in project %s", uuid)
                items := value.([]interface{})
                for _, item := range items {
                        itemMap := item.(map[string]interface{})
@@ -397,7 +397,7 @@ func handleProject(logger *logrus.Logger, uuid string, arv *arvadosclient.Arvado
                        }
                }
        } else {
-               logger.Infof("No top level container requests found in project %s\n", uuid)
+               logger.Infof("No top level container requests found in project %s", uuid)
        }
        return
 }
@@ -410,7 +410,7 @@ func generateCrCsv(logger *logrus.Logger, uuid string, arv *arvadosclient.Arvado
        var tmpCsv string
        var tmpTotalCost float64
        var totalCost float64
-       fmt.Printf("Processing %s\n", uuid)
+       logger.Debugf("Processing %s", uuid)
 
        var crUUID = uuid
        if strings.Contains(uuid, "-4zz18-") {
@@ -438,7 +438,7 @@ func generateCrCsv(logger *logrus.Logger, uuid string, arv *arvadosclient.Arvado
        }
        if len(cr.ContainerUUID) == 0 {
                // Nothing to do! E.g. a CR in 'Uncommitted' state.
-               logger.Infof("No container associated with container request %s, skipping\n", crUUID)
+               logger.Infof("No container associated with container request %s, skipping", crUUID)
                return nil, nil
        }
        var container arvados.Container
@@ -473,14 +473,20 @@ func generateCrCsv(logger *logrus.Logger, uuid string, arv *arvadosclient.Arvado
                return nil, fmt.Errorf("error querying container_requests: %s", err.Error())
        }
        logger.Infof("Collecting child containers for container request %s (%s)", crUUID, container.FinishedAt)
-       for _, cr2 := range childCrs.Items {
-               logger.Info(".")
+       progressTicker := time.NewTicker(5 * time.Second)
+       defer progressTicker.Stop()
+       for i, cr2 := range childCrs.Items {
+               select {
+               case <-progressTicker.C:
+                       logger.Infof("... %d of %d", i+1, len(childCrs.Items))
+               default:
+               }
                node, err := getNode(arv, ac, kc, cr2)
                if err != nil {
                        logger.Errorf("Skipping container request %s: error getting node %s: %s", cr2.UUID, cr2.UUID, err)
                        continue
                }
-               logger.Debug("\nChild container: " + cr2.ContainerUUID + "\n")
+               logger.Debug("Child container: " + cr2.ContainerUUID)
                var c2 arvados.Container
                err = loadObject(logger, ac, cr.UUID, cr2.ContainerUUID, cache, &c2)
                if err != nil {
@@ -491,7 +497,7 @@ func generateCrCsv(logger *logrus.Logger, uuid string, arv *arvadosclient.Arvado
                csv += tmpCsv
                totalCost += tmpTotalCost
        }
-       logger.Info(" done\n")
+       logger.Debug("Done collecting child containers")
 
        csv += "TOTAL,,,,,,,,," + strconv.FormatFloat(totalCost, 'f', 8, 64) + "\n"
 
@@ -502,7 +508,7 @@ func generateCrCsv(logger *logrus.Logger, uuid string, arv *arvadosclient.Arvado
                if err != nil {
                        return nil, fmt.Errorf("error writing file with path %s: %s", fName, err.Error())
                }
-               logger.Infof("\nUUID report in %s\n\n", fName)
+               logger.Infof("\nUUID report in %s", fName)
        }
 
        return
@@ -562,7 +568,7 @@ func (c *command) costAnalyzer(prog string, args []string, logger *logrus.Logger
 
                                err := ac.RequestAndDecode(&list, "GET", "arvados/v1/container_requests", nil, params)
                                if err != nil {
-                                       logger.Errorf("Error getting container request list from Arvados API: %s\n", err)
+                                       logger.Errorf("Error getting container request list from Arvados API: %s", err)
                                        break
                                }
                                if len(list.Items) == 0 {
@@ -581,7 +587,7 @@ func (c *command) costAnalyzer(prog string, args []string, logger *logrus.Logger
        cost := make(map[string]float64)
 
        for uuid := range uuidChannel {
-               fmt.Printf("Considering %s\n", uuid)
+               logger.Debugf("Considering %s", uuid)
                if strings.Contains(uuid, "-j7d0g-") {
                        // This is a project (group)
                        cost, err = handleProject(logger, uuid, arv, ac, kc, c.resultsDir, c.cache)
@@ -611,14 +617,14 @@ func (c *command) costAnalyzer(prog string, args []string, logger *logrus.Logger
                        // keep going.
                        logger.Errorf("cost analysis is not supported for the 'Home' project: %s", uuid)
                } else {
-                       logger.Errorf("this argument does not look like a uuid: %s\n", uuid)
+                       logger.Errorf("this argument does not look like a uuid: %s", uuid)
                        exitcode = 3
                        return
                }
        }
 
        if len(cost) == 0 {
-               logger.Info("Nothing to do!\n")
+               logger.Info("Nothing to do!")
                return
        }
 
@@ -646,7 +652,7 @@ func (c *command) costAnalyzer(prog string, args []string, logger *logrus.Logger
                        exitcode = 1
                        return
                }
-               logger.Infof("Aggregate cost accounting for all supplied uuids in %s\n", aFile)
+               logger.Infof("Aggregate cost accounting for all supplied uuids in %s", aFile)
        }
 
        // Output the total dollar amount on stdout
index 93cdb61d3b86a8bae2dce3dee1a3be2e5fbacbc0..39bbbc9498c2b10055dff1d24227275ec6ae1fba 100644 (file)
@@ -7,20 +7,14 @@ package deduplicationreport
 import (
        "io"
 
+       "git.arvados.org/arvados.git/lib/cmd"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
-       "github.com/sirupsen/logrus"
 )
 
 var Command command
 
 type command struct{}
 
-type NoPrefixFormatter struct{}
-
-func (f *NoPrefixFormatter) Format(entry *logrus.Entry) ([]byte, error) {
-       return []byte(entry.Message + "\n"), nil
-}
-
 // RunCommand implements the subcommand "deduplication-report <collection> <collection> ..."
 func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
        var err error
@@ -31,7 +25,7 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
                }
        }()
 
-       logger.SetFormatter(new(NoPrefixFormatter))
+       logger.SetFormatter(cmd.NoPrefixFormatter{})
 
        exitcode := report(prog, args, logger, stdout, stderr)
 
diff --git a/lib/diagnostics/cmd.go b/lib/diagnostics/cmd.go
new file mode 100644 (file)
index 0000000..b0241b3
--- /dev/null
@@ -0,0 +1,612 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package diagnostics
+
+import (
+       "bytes"
+       "context"
+       "flag"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net"
+       "net/http"
+       "net/url"
+       "strings"
+       "time"
+
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "github.com/sirupsen/logrus"
+)
+
+type Command struct{}
+
+func (cmd Command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+       var diag diagnoser
+       f := flag.NewFlagSet(prog, flag.ContinueOnError)
+       f.StringVar(&diag.projectName, "project-name", "scratch area for diagnostics", "name of project to find/create in home project and use for temporary/test objects")
+       f.StringVar(&diag.logLevel, "log-level", "info", "logging level (debug, info, warning, error)")
+       f.BoolVar(&diag.checkInternal, "internal-client", false, "check that this host is considered an \"internal\" client")
+       f.BoolVar(&diag.checkExternal, "external-client", false, "check that this host is considered an \"external\" client")
+       f.IntVar(&diag.priority, "priority", 500, "priority for test container (1..1000, or 0 to skip)")
+       f.DurationVar(&diag.timeout, "timeout", 10*time.Second, "timeout for http requests")
+       err := f.Parse(args)
+       if err == flag.ErrHelp {
+               return 0
+       } else if err != nil {
+               fmt.Fprintln(stderr, err)
+               return 2
+       }
+       diag.logger = ctxlog.New(stdout, "text", diag.logLevel)
+       diag.logger.SetFormatter(&logrus.TextFormatter{DisableTimestamp: true, DisableLevelTruncation: true, PadLevelText: true})
+       diag.runtests()
+       if len(diag.errors) == 0 {
+               diag.logger.Info("--- no errors ---")
+               return 0
+       } else {
+               if diag.logger.Level > logrus.ErrorLevel {
+                       fmt.Fprint(stdout, "\n--- cut here --- error summary ---\n\n")
+                       for _, e := range diag.errors {
+                               diag.logger.Error(e)
+                       }
+               }
+               return 1
+       }
+}
+
+type diagnoser struct {
+       stdout        io.Writer
+       stderr        io.Writer
+       logLevel      string
+       priority      int
+       projectName   string
+       checkInternal bool
+       checkExternal bool
+       timeout       time.Duration
+       logger        *logrus.Logger
+       errors        []string
+       done          map[int]bool
+}
+
+func (diag *diagnoser) debugf(f string, args ...interface{}) {
+       diag.logger.Debugf("  ... "+f, args...)
+}
+
+func (diag *diagnoser) infof(f string, args ...interface{}) {
+       diag.logger.Infof("  ... "+f, args...)
+}
+
+func (diag *diagnoser) warnf(f string, args ...interface{}) {
+       diag.logger.Warnf("  ... "+f, args...)
+}
+
+func (diag *diagnoser) errorf(f string, args ...interface{}) {
+       diag.logger.Errorf(f, args...)
+       diag.errors = append(diag.errors, fmt.Sprintf(f, args...))
+}
+
+// Run the given func, logging appropriate messages before and after,
+// adding timing info, etc.
+//
+// The id argument should be unique among tests, and shouldn't change
+// when other tests are added/removed.
+func (diag *diagnoser) dotest(id int, title string, fn func() error) {
+       if diag.done == nil {
+               diag.done = map[int]bool{}
+       } else if diag.done[id] {
+               diag.errorf("(bug) reused test id %d", id)
+       }
+       diag.done[id] = true
+
+       diag.logger.Infof("%4d: %s", id, title)
+       t0 := time.Now()
+       err := fn()
+       elapsed := fmt.Sprintf("%d ms", time.Now().Sub(t0)/time.Millisecond)
+       if err != nil {
+               diag.errorf("%4d: %s (%s): %s", id, title, elapsed, err)
+       } else {
+               diag.logger.Debugf("%4d: %s (%s): ok", id, title, elapsed)
+       }
+}
+
+func (diag *diagnoser) runtests() {
+       client := arvados.NewClientFromEnv()
+
+       if client.APIHost == "" || client.AuthToken == "" {
+               diag.errorf("ARVADOS_API_HOST and ARVADOS_API_TOKEN environment variables are not set -- aborting without running any tests")
+               return
+       }
+
+       var dd arvados.DiscoveryDocument
+       ddpath := "discovery/v1/apis/arvados/v1/rest"
+       diag.dotest(10, fmt.Sprintf("getting discovery document from https://%s/%s", client.APIHost, ddpath), func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               err := client.RequestAndDecodeContext(ctx, &dd, "GET", ddpath, nil, nil)
+               if err != nil {
+                       return err
+               }
+               diag.debugf("BlobSignatureTTL = %d", dd.BlobSignatureTTL)
+               return nil
+       })
+
+       var cluster arvados.Cluster
+       cfgpath := "arvados/v1/config"
+       diag.dotest(20, fmt.Sprintf("getting exported config from https://%s/%s", client.APIHost, cfgpath), func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               err := client.RequestAndDecodeContext(ctx, &cluster, "GET", cfgpath, nil, nil)
+               if err != nil {
+                       return err
+               }
+               diag.debugf("Collections.BlobSigning = %v", cluster.Collections.BlobSigning)
+               return nil
+       })
+
+       var user arvados.User
+       diag.dotest(30, "getting current user record", func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               err := client.RequestAndDecodeContext(ctx, &user, "GET", "arvados/v1/users/current", nil, nil)
+               if err != nil {
+                       return err
+               }
+               diag.debugf("user uuid = %s", user.UUID)
+               return nil
+       })
+
+       // uncomment to create some spurious errors
+       // cluster.Services.WebDAVDownload.ExternalURL.Host = "0.0.0.0:9"
+
+       // TODO: detect routing errors here, like finding wb2 at the
+       // wb1 address.
+       for i, svc := range []*arvados.Service{
+               &cluster.Services.Keepproxy,
+               &cluster.Services.WebDAV,
+               &cluster.Services.WebDAVDownload,
+               &cluster.Services.Websocket,
+               &cluster.Services.Workbench1,
+               &cluster.Services.Workbench2,
+       } {
+               diag.dotest(40+i, fmt.Sprintf("connecting to service endpoint %s", svc.ExternalURL), func() error {
+                       ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+                       defer cancel()
+                       u := svc.ExternalURL
+                       if strings.HasPrefix(u.Scheme, "ws") {
+                               // We can do a real websocket test elsewhere,
+                               // but for now we'll just check the https
+                               // connection.
+                               u.Scheme = "http" + u.Scheme[2:]
+                       }
+                       if svc == &cluster.Services.WebDAV && strings.HasPrefix(u.Host, "*") {
+                               u.Host = "d41d8cd98f00b204e9800998ecf8427e-0" + u.Host[1:]
+                       }
+                       req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
+                       if err != nil {
+                               return err
+                       }
+                       resp, err := http.DefaultClient.Do(req)
+                       if err != nil {
+                               return err
+                       }
+                       resp.Body.Close()
+                       return nil
+               })
+       }
+
+       for i, url := range []string{
+               cluster.Services.Controller.ExternalURL.String(),
+               cluster.Services.Keepproxy.ExternalURL.String() + "d41d8cd98f00b204e9800998ecf8427e+0",
+               cluster.Services.WebDAVDownload.ExternalURL.String(),
+       } {
+               diag.dotest(50+i, fmt.Sprintf("checking CORS headers at %s", url), func() error {
+                       ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+                       defer cancel()
+                       req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
+                       if err != nil {
+                               return err
+                       }
+                       req.Header.Set("Origin", "https://example.com")
+                       resp, err := http.DefaultClient.Do(req)
+                       if err != nil {
+                               return err
+                       }
+                       if hdr := resp.Header.Get("Access-Control-Allow-Origin"); hdr != "*" {
+                               return fmt.Errorf("expected \"Access-Control-Allow-Origin: *\", got %q", hdr)
+                       }
+                       return nil
+               })
+       }
+
+       var keeplist arvados.KeepServiceList
+       diag.dotest(60, "checking internal/external client detection", func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               err := client.RequestAndDecodeContext(ctx, &keeplist, "GET", "arvados/v1/keep_services/accessible", nil, arvados.ListOptions{Limit: 999999})
+               if err != nil {
+                       return fmt.Errorf("error getting keep services list: %s", err)
+               } else if len(keeplist.Items) == 0 {
+                       return fmt.Errorf("controller did not return any keep services")
+               }
+               found := map[string]int{}
+               for _, ks := range keeplist.Items {
+                       found[ks.ServiceType]++
+               }
+               isInternal := found["proxy"] == 0 && len(keeplist.Items) > 0
+               isExternal := found["proxy"] > 0 && found["proxy"] == len(keeplist.Items)
+               if isExternal {
+                       diag.debugf("controller returned only proxy services, this host is treated as \"external\"")
+               } else if isInternal {
+                       diag.debugf("controller returned only non-proxy services, this host is treated as \"internal\"")
+               }
+               if (diag.checkInternal && !isInternal) || (diag.checkExternal && !isExternal) {
+                       return fmt.Errorf("expecting internal=%v external=%v, but found internal=%v external=%v", diag.checkInternal, diag.checkExternal, isInternal, isExternal)
+               }
+               return nil
+       })
+
+       for i, ks := range keeplist.Items {
+               u := url.URL{
+                       Scheme: "http",
+                       Host:   net.JoinHostPort(ks.ServiceHost, fmt.Sprintf("%d", ks.ServicePort)),
+                       Path:   "/",
+               }
+               if ks.ServiceSSLFlag {
+                       u.Scheme = "https"
+               }
+               diag.dotest(61+i, fmt.Sprintf("reading+writing via keep service at %s", u.String()), func() error {
+                       ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+                       defer cancel()
+                       req, err := http.NewRequestWithContext(ctx, "PUT", u.String()+"d41d8cd98f00b204e9800998ecf8427e", nil)
+                       if err != nil {
+                               return err
+                       }
+                       req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+                       resp, err := http.DefaultClient.Do(req)
+                       if err != nil {
+                               return err
+                       }
+                       defer resp.Body.Close()
+                       body, err := ioutil.ReadAll(resp.Body)
+                       if err != nil {
+                               return fmt.Errorf("reading response body: %s", err)
+                       }
+                       loc := strings.TrimSpace(string(body))
+                       if !strings.HasPrefix(loc, "d41d8") {
+                               return fmt.Errorf("unexpected response from write: %q", body)
+                       }
+
+                       req, err = http.NewRequestWithContext(ctx, "GET", u.String()+loc, nil)
+                       if err != nil {
+                               return err
+                       }
+                       req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+                       resp, err = http.DefaultClient.Do(req)
+                       if err != nil {
+                               return err
+                       }
+                       defer resp.Body.Close()
+                       body, err = ioutil.ReadAll(resp.Body)
+                       if err != nil {
+                               return fmt.Errorf("reading response body: %s", err)
+                       }
+                       if len(body) != 0 {
+                               return fmt.Errorf("unexpected response from read: %q", body)
+                       }
+
+                       return nil
+               })
+       }
+
+       var project arvados.Group
+       diag.dotest(80, fmt.Sprintf("finding/creating %q project", diag.projectName), func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               var grplist arvados.GroupList
+               err := client.RequestAndDecodeContext(ctx, &grplist, "GET", "arvados/v1/groups", nil, arvados.ListOptions{
+                       Filters: []arvados.Filter{
+                               {"name", "=", diag.projectName},
+                               {"group_class", "=", "project"},
+                               {"owner_uuid", "=", user.UUID}},
+                       Limit: 999999})
+               if err != nil {
+                       return fmt.Errorf("list groups: %s", err)
+               }
+               if len(grplist.Items) > 0 {
+                       project = grplist.Items[0]
+                       diag.debugf("using existing project, uuid = %s", project.UUID)
+                       return nil
+               }
+               diag.debugf("list groups: ok, no results")
+               err = client.RequestAndDecodeContext(ctx, &project, "POST", "arvados/v1/groups", nil, map[string]interface{}{"group": map[string]interface{}{
+                       "name":        diag.projectName,
+                       "group_class": "project",
+               }})
+               if err != nil {
+                       return fmt.Errorf("create project: %s", err)
+               }
+               diag.debugf("created project, uuid = %s", project.UUID)
+               return nil
+       })
+
+       var collection arvados.Collection
+       diag.dotest(90, "creating temporary collection", func() error {
+               if project.UUID == "" {
+                       return fmt.Errorf("skipping, no project to work in")
+               }
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               err := client.RequestAndDecodeContext(ctx, &collection, "POST", "arvados/v1/collections", nil, map[string]interface{}{
+                       "ensure_unique_name": true,
+                       "collection": map[string]interface{}{
+                               "owner_uuid": project.UUID,
+                               "name":       "test collection",
+                               "trash_at":   time.Now().Add(time.Hour)}})
+               if err != nil {
+                       return err
+               }
+               diag.debugf("ok, uuid = %s", collection.UUID)
+               return nil
+       })
+
+       if collection.UUID != "" {
+               defer func() {
+                       diag.dotest(9990, "deleting temporary collection", func() error {
+                               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+                               defer cancel()
+                               return client.RequestAndDecodeContext(ctx, nil, "DELETE", "arvados/v1/collections/"+collection.UUID, nil, nil)
+                       })
+               }()
+       }
+
+       diag.dotest(100, "uploading file via webdav", func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               if collection.UUID == "" {
+                       return fmt.Errorf("skipping, no test collection")
+               }
+               req, err := http.NewRequestWithContext(ctx, "PUT", cluster.Services.WebDAVDownload.ExternalURL.String()+"c="+collection.UUID+"/testfile", bytes.NewBufferString("testfiledata"))
+               if err != nil {
+                       return fmt.Errorf("BUG? http.NewRequest: %s", err)
+               }
+               req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+               resp, err := http.DefaultClient.Do(req)
+               if err != nil {
+                       return fmt.Errorf("error performing http request: %s", err)
+               }
+               resp.Body.Close()
+               if resp.StatusCode != http.StatusCreated {
+                       return fmt.Errorf("status %s", resp.Status)
+               }
+               diag.debugf("ok, status %s", resp.Status)
+               err = client.RequestAndDecodeContext(ctx, &collection, "GET", "arvados/v1/collections/"+collection.UUID, nil, nil)
+               if err != nil {
+                       return fmt.Errorf("get updated collection: %s", err)
+               }
+               diag.debugf("ok, pdh %s", collection.PortableDataHash)
+               return nil
+       })
+
+       davurl := cluster.Services.WebDAV.ExternalURL
+       diag.dotest(110, fmt.Sprintf("checking WebDAV ExternalURL wildcard (%s)", davurl), func() error {
+               if davurl.Host == "" {
+                       return fmt.Errorf("host missing - content previews will not work")
+               }
+               if !strings.HasPrefix(davurl.Host, "*--") && !strings.HasPrefix(davurl.Host, "*.") && !cluster.Collections.TrustAllContent {
+                       diag.warnf("WebDAV ExternalURL has no leading wildcard and TrustAllContent==false - content previews will not work")
+               }
+               return nil
+       })
+
+       for i, trial := range []struct {
+               needcoll bool
+               status   int
+               fileurl  string
+       }{
+               {false, http.StatusNotFound, strings.Replace(davurl.String(), "*", "d41d8cd98f00b204e9800998ecf8427e-0", 1) + "foo"},
+               {false, http.StatusNotFound, strings.Replace(davurl.String(), "*", "d41d8cd98f00b204e9800998ecf8427e-0", 1) + "testfile"},
+               {false, http.StatusNotFound, cluster.Services.WebDAVDownload.ExternalURL.String() + "c=d41d8cd98f00b204e9800998ecf8427e+0/_/foo"},
+               {false, http.StatusNotFound, cluster.Services.WebDAVDownload.ExternalURL.String() + "c=d41d8cd98f00b204e9800998ecf8427e+0/_/testfile"},
+               {true, http.StatusOK, strings.Replace(davurl.String(), "*", strings.Replace(collection.PortableDataHash, "+", "-", -1), 1) + "testfile"},
+               {true, http.StatusOK, cluster.Services.WebDAVDownload.ExternalURL.String() + "c=" + collection.UUID + "/_/testfile"},
+       } {
+               diag.dotest(120+i, fmt.Sprintf("downloading from webdav (%s)", trial.fileurl), func() error {
+                       ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+                       defer cancel()
+                       if trial.needcoll && collection.UUID == "" {
+                               return fmt.Errorf("skipping, no test collection")
+                       }
+                       req, err := http.NewRequestWithContext(ctx, "GET", trial.fileurl, nil)
+                       if err != nil {
+                               return err
+                       }
+                       req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+                       resp, err := http.DefaultClient.Do(req)
+                       if err != nil {
+                               return err
+                       }
+                       defer resp.Body.Close()
+                       body, err := ioutil.ReadAll(resp.Body)
+                       if err != nil {
+                               return fmt.Errorf("reading response: %s", err)
+                       }
+                       if resp.StatusCode != trial.status {
+                               return fmt.Errorf("unexpected response status: %s", resp.Status)
+                       }
+                       if trial.status == http.StatusOK && string(body) != "testfiledata" {
+                               return fmt.Errorf("unexpected response content: %q", body)
+                       }
+                       return nil
+               })
+       }
+
+       var vm arvados.VirtualMachine
+       diag.dotest(130, "getting list of virtual machines", func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               var vmlist arvados.VirtualMachineList
+               err := client.RequestAndDecodeContext(ctx, &vmlist, "GET", "arvados/v1/virtual_machines", nil, arvados.ListOptions{Limit: 999999})
+               if err != nil {
+                       return err
+               }
+               if len(vmlist.Items) < 1 {
+                       return fmt.Errorf("no VMs found")
+               }
+               vm = vmlist.Items[0]
+               return nil
+       })
+
+       diag.dotest(140, "getting workbench1 webshell page", func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               if vm.UUID == "" {
+                       return fmt.Errorf("skipping, no vm available")
+               }
+               webshelltermurl := cluster.Services.Workbench1.ExternalURL.String() + "virtual_machines/" + vm.UUID + "/webshell/testusername"
+               diag.debugf("url %s", webshelltermurl)
+               req, err := http.NewRequestWithContext(ctx, "GET", webshelltermurl, nil)
+               if err != nil {
+                       return err
+               }
+               req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+               resp, err := http.DefaultClient.Do(req)
+               if err != nil {
+                       return err
+               }
+               defer resp.Body.Close()
+               body, err := ioutil.ReadAll(resp.Body)
+               if err != nil {
+                       return fmt.Errorf("reading response: %s", err)
+               }
+               if resp.StatusCode != http.StatusOK {
+                       return fmt.Errorf("unexpected response status: %s %q", resp.Status, body)
+               }
+               return nil
+       })
+
+       diag.dotest(150, "connecting to webshell service", func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               if vm.UUID == "" {
+                       return fmt.Errorf("skipping, no vm available")
+               }
+               u := cluster.Services.WebShell.ExternalURL
+               webshellurl := u.String() + vm.Hostname + "?"
+               if strings.HasPrefix(u.Host, "*") {
+                       u.Host = vm.Hostname + u.Host[1:]
+                       webshellurl = u.String() + "?"
+               }
+               diag.debugf("url %s", webshellurl)
+               req, err := http.NewRequestWithContext(ctx, "POST", webshellurl, bytes.NewBufferString(url.Values{
+                       "width":   {"80"},
+                       "height":  {"25"},
+                       "session": {"xyzzy"},
+                       "rooturl": {webshellurl},
+               }.Encode()))
+               if err != nil {
+                       return err
+               }
+               req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8")
+               resp, err := http.DefaultClient.Do(req)
+               if err != nil {
+                       return err
+               }
+               defer resp.Body.Close()
+               diag.debugf("response status %s", resp.Status)
+               body, err := ioutil.ReadAll(resp.Body)
+               if err != nil {
+                       return fmt.Errorf("reading response: %s", err)
+               }
+               diag.debugf("response body %q", body)
+               // We don't speak the protocol, so we get a 400 error
+               // from the webshell server even if everything is
+               // OK. Anything else (404, 502, ???) indicates a
+               // problem.
+               if resp.StatusCode != http.StatusBadRequest {
+                       return fmt.Errorf("unexpected response status: %s, %q", resp.Status, body)
+               }
+               return nil
+       })
+
+       diag.dotest(160, "running a container", func() error {
+               if diag.priority < 1 {
+                       diag.infof("skipping (use priority > 0 if you want to run a container)")
+                       return nil
+               }
+               if project.UUID == "" {
+                       return fmt.Errorf("skipping, no project to work in")
+               }
+
+               var cr arvados.ContainerRequest
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+
+               timestamp := time.Now().Format(time.RFC3339)
+               err := client.RequestAndDecodeContext(ctx, &cr, "POST", "arvados/v1/container_requests", nil, map[string]interface{}{"container_request": map[string]interface{}{
+                       "owner_uuid":      project.UUID,
+                       "name":            fmt.Sprintf("diagnostics container request %s", timestamp),
+                       "container_image": "arvados/jobs",
+                       "command":         []string{"echo", timestamp},
+                       "use_existing":    false,
+                       "output_path":     "/mnt/output",
+                       "output_name":     fmt.Sprintf("diagnostics output %s", timestamp),
+                       "priority":        diag.priority,
+                       "state":           arvados.ContainerRequestStateCommitted,
+                       "mounts": map[string]map[string]interface{}{
+                               "/mnt/output": {
+                                       "kind":     "collection",
+                                       "writable": true,
+                               },
+                       },
+                       "runtime_constraints": arvados.RuntimeConstraints{
+                               VCPUs:        1,
+                               RAM:          1 << 26,
+                               KeepCacheRAM: 1 << 26,
+                       },
+               }})
+               if err != nil {
+                       return err
+               }
+               diag.debugf("container request uuid = %s", cr.UUID)
+               diag.debugf("container uuid = %s", cr.ContainerUUID)
+
+               timeout := 10 * time.Minute
+               diag.infof("container request submitted, waiting up to %v for container to run", arvados.Duration(timeout))
+               ctx, cancel = context.WithDeadline(context.Background(), time.Now().Add(timeout))
+               defer cancel()
+
+               var c arvados.Container
+               for ; cr.State != arvados.ContainerRequestStateFinal; time.Sleep(2 * time.Second) {
+                       ctx, cancel := context.WithDeadline(ctx, time.Now().Add(diag.timeout))
+                       defer cancel()
+
+                       crStateWas := cr.State
+                       err := client.RequestAndDecodeContext(ctx, &cr, "GET", "arvados/v1/container_requests/"+cr.UUID, nil, nil)
+                       if err != nil {
+                               return err
+                       }
+                       if cr.State != crStateWas {
+                               diag.debugf("container request state = %s", cr.State)
+                       }
+
+                       cStateWas := c.State
+                       err = client.RequestAndDecodeContext(ctx, &c, "GET", "arvados/v1/containers/"+cr.ContainerUUID, nil, nil)
+                       if err != nil {
+                               return err
+                       }
+                       if c.State != cStateWas {
+                               diag.debugf("container state = %s", c.State)
+                       }
+               }
+
+               if c.State != arvados.ContainerStateComplete {
+                       return fmt.Errorf("container request %s is final but container %s did not complete: container state = %q", cr.UUID, cr.ContainerUUID, c.State)
+               } else if c.ExitCode != 0 {
+                       return fmt.Errorf("container exited %d", c.ExitCode)
+               }
+               return nil
+       })
+}
index a9361a85f9fe66a5260a64b6719fa74c28cdeee2..89176923519ee4c737d9376032463dee08a80102 100644 (file)
@@ -15,7 +15,16 @@ def validate_cluster_target(arvrunner, runtimeContext):
         runtimeContext.submit_runner_cluster not in arvrunner.api._rootDesc["remoteHosts"] and
         runtimeContext.submit_runner_cluster != arvrunner.api._rootDesc["uuidPrefix"]):
         raise WorkflowException("Unknown or invalid cluster id '%s' known remote clusters are %s" % (runtimeContext.submit_runner_cluster,
-                                                                                                  ", ".join(list(arvrunner.api._rootDesc["remoteHosts"].keys()))))
+                                                                                                     ", ".join(list(arvrunner.api._rootDesc["remoteHosts"].keys()))))
+    if runtimeContext.project_uuid:
+        cluster_target = runtimeContext.submit_runner_cluster or arvrunner.api._rootDesc["uuidPrefix"]
+        if not runtimeContext.project_uuid.startswith(cluster_target):
+            raise WorkflowException("Project uuid '%s' must be for target cluster '%s'" % (runtimeContext.project_uuid, cluster_target))
+        try:
+            arvrunner.api.groups().get(uuid=runtimeContext.project_uuid).execute()
+        except Exception as e:
+            raise WorkflowException("Invalid project uuid '%s': %s" % (runtimeContext.project_uuid, e))
+
 def set_cluster_target(tool, arvrunner, builder, runtimeContext):
     cluster_target_req = None
     for field in ("hints", "requirements"):
index 1e339d5bb7d4ab20f90438ed5670f3d67d51e548..4da8f855692aed44f212739d5e515af3fef2ceb0 100644 (file)
@@ -129,7 +129,7 @@ class CollectionFsAccess(cwltool.stdfsaccess.StdFsAccess):
 
     def glob(self, pattern):
         collection, rest = self.get_collection(pattern)
-        if collection is not None and not rest:
+        if collection is not None and rest in (None, "", "."):
             return [pattern]
         patternsegments = rest.split("/")
         return sorted(self._match(collection, patternsegments, "keep:" + collection.manifest_locator()))
diff --git a/sdk/cwl/tests/17801-runtime-outdir.cwl b/sdk/cwl/tests/17801-runtime-outdir.cwl
new file mode 100644 (file)
index 0000000..c01ef05
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.2
+class: CommandLineTool
+inputs: []
+outputs:
+  stuff:
+    type: Directory
+    outputBinding:
+      glob: $(runtime.outdir)
+requirements:
+  ShellCommandRequirement: {}
+arguments: [{shellQuote: false, valueFrom: "mkdir -p foo && touch baz.txt && touch foo/bar.txt"}]
index 41feb7796baf4301af1bb6316952c3c3020f1b45..b22c9aaa27684084ef088f95f119badf1d04e598 100644 (file)
   }
   tool: 10380-trailing-slash-dir.cwl
   doc: "Test issue 10380 - bug with trailing slash when capturing an output directory"
+
+- job: null
+  output: {
+    "stuff": {
+        "basename": "78f3957c41d044352303a3fa326dff1e+102",
+        "class": "Directory",
+        "listing": [
+            {
+                "basename": "baz.txt",
+                "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+                "class": "File",
+                "location": "78f3957c41d044352303a3fa326dff1e+102/baz.txt",
+                "size": 0
+            },
+            {
+                "basename": "foo",
+                "class": "Directory",
+                "listing": [
+                    {
+                        "basename": "bar.txt",
+                        "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+                        "class": "File",
+                        "location": "78f3957c41d044352303a3fa326dff1e+102/foo/bar.txt",
+                        "size": 0
+                    }
+                ],
+                "location": "78f3957c41d044352303a3fa326dff1e+102/foo"
+            }
+        ],
+        "location": "78f3957c41d044352303a3fa326dff1e+102"
+    }
+  }
+  tool: 17801-runtime-outdir.cwl
+  doc: "Test issue 17801 - bug using $(runtime.outdir) to capture the output directory"
index 05515e0980cb15e228844001cfe385814883f7de..c448f218bc5d7d0029c9b1ce359e44887053a8a6 100644 (file)
@@ -1263,6 +1263,21 @@ class TestSubmit(unittest.TestCase):
             stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
         self.assertEqual(exited, 1)
 
+    @stubs
+    def test_submit_validate_project_uuid(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--project-uuid=zzzzb-j7d0g-zzzzzzzzzzzzzzz",
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+        self.assertEqual(exited, 1)
+
+        stubs.api.groups().get().execute.side_effect = Exception("Bad project")
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--project-uuid=zzzzz-j7d0g-zzzzzzzzzzzzzzx",
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+        self.assertEqual(exited, 1)
+
     @mock.patch("arvados.collection.CollectionReader")
     @stubs
     def test_submit_uuid_inputs(self, stubs, collectionReader):
index 2478641df5478639c92ff2b4d0f57d847165eee7..65c207162b0f8590f463097faa3b7de25043f5fc 100644 (file)
@@ -29,12 +29,29 @@ var (
        ErrIsDirectory       = errors.New("cannot rename file to overwrite existing directory")
        ErrNotADirectory     = errors.New("not a directory")
        ErrPermission        = os.ErrPermission
+       DebugLocksPanicMode  = false
 )
 
 type syncer interface {
        Sync() error
 }
 
+func debugPanicIfNotLocked(l sync.Locker) {
+       if !DebugLocksPanicMode {
+               return
+       }
+       race := false
+       go func() {
+               l.Lock()
+               race = true
+               l.Unlock()
+       }()
+       time.Sleep(10)
+       if race {
+               panic("bug: caller-must-have-lock func called, but nobody has lock")
+       }
+}
+
 // A File is an *os.File-like interface for reading and writing files
 // in a FileSystem.
 type File interface {
@@ -271,6 +288,7 @@ func (n *treenode) IsDir() bool {
 }
 
 func (n *treenode) Child(name string, replace func(inode) (inode, error)) (child inode, err error) {
+       debugPanicIfNotLocked(n)
        child = n.inodes[name]
        if name == "" || name == "." || name == ".." {
                err = ErrInvalidArgument
@@ -333,6 +351,7 @@ func (n *treenode) Sync() error {
 func (n *treenode) MemorySize() (size int64) {
        n.RLock()
        defer n.RUnlock()
+       debugPanicIfNotLocked(n)
        for _, inode := range n.inodes {
                size += inode.MemorySize()
        }
index 0233826a7281e9aa95f5dbb9f74e93ddb1bfd473..22e2b31d57e08d6c5dc813017c62b950f61aac01 100644 (file)
@@ -1167,9 +1167,12 @@ func (dn *dirnode) createFileAndParents(path string) (fn *filenode, err error) {
                        node = node.Parent()
                        continue
                }
+               modtime := node.Parent().FileInfo().ModTime()
+               node.Lock()
+               locked := node
                node, err = node.Child(name, func(child inode) (inode, error) {
                        if child == nil {
-                               child, err := node.FS().newNode(name, 0755|os.ModeDir, node.Parent().FileInfo().ModTime())
+                               child, err := node.FS().newNode(name, 0755|os.ModeDir, modtime)
                                if err != nil {
                                        return nil, err
                                }
@@ -1181,6 +1184,7 @@ func (dn *dirnode) createFileAndParents(path string) (fn *filenode, err error) {
                                return child, nil
                        }
                })
+               locked.Unlock()
                if err != nil {
                        return
                }
@@ -1191,10 +1195,13 @@ func (dn *dirnode) createFileAndParents(path string) (fn *filenode, err error) {
                err = fmt.Errorf("invalid file part %q in path %q", basename, path)
                return
        }
+       modtime := node.FileInfo().ModTime()
+       node.Lock()
+       defer node.Unlock()
        _, err = node.Child(basename, func(child inode) (inode, error) {
                switch child := child.(type) {
                case nil:
-                       child, err = node.FS().newNode(basename, 0755, node.FileInfo().ModTime())
+                       child, err = node.FS().newNode(basename, 0755, modtime)
                        if err != nil {
                                return nil, err
                        }
index 900893aa36420e7c9d2008fff31b36d4bd03e0bf..5225df59ee58ec4c2017054f59ca68ee7936e41e 100644 (file)
@@ -54,6 +54,8 @@ func (c *Client) CustomFileSystem(kc keepClient) CustomFileSystem {
 }
 
 func (fs *customFileSystem) MountByID(mount string) {
+       fs.root.treenode.Lock()
+       defer fs.root.treenode.Unlock()
        fs.root.treenode.Child(mount, func(inode) (inode, error) {
                return &vdirnode{
                        treenode: treenode{
@@ -72,12 +74,16 @@ func (fs *customFileSystem) MountByID(mount string) {
 }
 
 func (fs *customFileSystem) MountProject(mount, uuid string) {
+       fs.root.treenode.Lock()
+       defer fs.root.treenode.Unlock()
        fs.root.treenode.Child(mount, func(inode) (inode, error) {
                return fs.newProjectNode(fs.root, mount, uuid), nil
        })
 }
 
 func (fs *customFileSystem) MountUsers(mount string) {
+       fs.root.treenode.Lock()
+       defer fs.root.treenode.Unlock()
        fs.root.treenode.Child(mount, func(inode) (inode, error) {
                return &lookupnode{
                        stale:   fs.Stale,
index b1c627f89c9e43c198dbd7a2a59f059c3cb01372..dc432114a60ec58e4925be28ddbbc227ce4188e9 100644 (file)
@@ -32,6 +32,15 @@ const (
 
 var _ = check.Suite(&SiteFSSuite{})
 
+func init() {
+       // Enable DebugLocksPanicMode sometimes. Don't enable it all
+       // the time, though -- it adds many calls to time.Sleep(),
+       // which could hide different bugs.
+       if time.Now().Second()&1 == 0 {
+               DebugLocksPanicMode = true
+       }
+}
+
 type SiteFSSuite struct {
        client *Client
        fs     CustomFileSystem
index 0920369646c6b7556babdef38fced98fee2e18b8..4f0ac1fffaf9c4ee6875c0092ba1090d6e3eb9a6 100644 (file)
@@ -173,7 +173,8 @@ Follow file and directory symlinks (default).
 """)
 _group.add_argument('--no-follow-links', action='store_false', dest='follow_links',
                     help="""
-Do not follow file and directory symlinks.
+Ignore file and directory symlinks. Even paths given explicitly on the
+command line will be skipped if they are symlinks.
 """)
 
 
@@ -259,9 +260,8 @@ def parse_arguments(arguments):
 
     args.paths = ["-" if x == "/dev/stdin" else x for x in args.paths]
 
-    if len(args.paths) != 1 or os.path.isdir(args.paths[0]):
-        if args.filename:
-            arg_parser.error("""
+    if args.filename and (len(args.paths) != 1 or os.path.isdir(args.paths[0])):
+        arg_parser.error("""
     --filename argument cannot be used when storing a directory or
     multiple files.
     """)
@@ -525,6 +525,9 @@ class ArvPutUploadJob(object):
                 self._write_stdin(self.filename or 'stdin')
             elif not os.path.exists(path):
                  raise PathDoesNotExistError(u"file or directory '{}' does not exist.".format(path))
+            elif (not self.follow_links) and os.path.islink(path):
+                self.logger.warning("Skipping symlink '{}'".format(path))
+                continue
             elif os.path.isdir(path):
                 # Use absolute paths on cache index so CWD doesn't interfere
                 # with the caching logic.
@@ -659,6 +662,9 @@ class ArvPutUploadJob(object):
             self._remote_collection.save(num_retries=self.num_retries,
                                          trash_at=self._collection_trash_at())
         else:
+            if len(self._local_collection) == 0:
+                self.logger.warning("No files were uploaded, skipping collection creation.")
+                return
             self._local_collection.save_new(
                 name=self.name, owner_uuid=self.owner_uuid,
                 ensure_unique_name=self.ensure_unique_name,
@@ -1310,7 +1316,7 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr,
             output = writer.manifest_text()
     elif args.raw:
         output = ','.join(writer.data_locators())
-    else:
+    elif writer.manifest_locator() is not None:
         try:
             expiration_notice = ""
             if writer.collection_trash_at() is not None:
@@ -1336,6 +1342,8 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr,
                 "arv-put: Error creating Collection on project: {}.".format(
                     error))
             status = 1
+    else:
+        status = 1
 
     # Print the locator (uuid) of the new collection.
     if output is None:
index caa03a3e03c02692204f3e707fe31df7a97961ad..fac970c95a6fdb13ecbef709ff850798c67840a5 100644 (file)
@@ -295,6 +295,8 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
         shutil.rmtree(self.tempdir_with_symlink)
 
     def test_symlinks_are_followed_by_default(self):
+        self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkeddir')))
+        self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkedfile')))
         cwriter = arv_put.ArvPutUploadJob([self.tempdir_with_symlink])
         cwriter.start(save_collection=False)
         self.assertIn('linkeddir', cwriter.manifest_text())
@@ -302,12 +304,29 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
         cwriter.destroy_cache()
 
     def test_symlinks_are_not_followed_when_requested(self):
+        self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkeddir')))
+        self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkedfile')))
         cwriter = arv_put.ArvPutUploadJob([self.tempdir_with_symlink],
                                           follow_links=False)
         cwriter.start(save_collection=False)
         self.assertNotIn('linkeddir', cwriter.manifest_text())
         self.assertNotIn('linkedfile', cwriter.manifest_text())
         cwriter.destroy_cache()
+        # Check for bug #17800: passed symlinks should also be ignored.
+        linked_dir = os.path.join(self.tempdir_with_symlink, 'linkeddir')
+        cwriter = arv_put.ArvPutUploadJob([linked_dir], follow_links=False)
+        cwriter.start(save_collection=False)
+        self.assertNotIn('linkeddir', cwriter.manifest_text())
+        cwriter.destroy_cache()
+
+    def test_no_empty_collection_saved(self):
+        self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkeddir')))
+        linked_dir = os.path.join(self.tempdir_with_symlink, 'linkeddir')
+        cwriter = arv_put.ArvPutUploadJob([linked_dir], follow_links=False)
+        cwriter.start(save_collection=True)
+        self.assertIsNone(cwriter.manifest_locator())
+        self.assertEqual('', cwriter.manifest_text())
+        cwriter.destroy_cache()
 
     def test_passing_nonexistant_path_raise_exception(self):
         uuid_str = str(uuid.uuid4())
index 446d591bfd715224651c1d9667e0c451e81f664e..8715ab24f35c0312fcca8152dc464ab60aa582af 100644 (file)
@@ -32,6 +32,10 @@ import (
 
 var _ = check.Suite(&UnitSuite{})
 
+func init() {
+       arvados.DebugLocksPanicMode = true
+}
+
 type UnitSuite struct {
        Config *arvados.Config
 }
index f03ff01b8127d6ee1d1056ee72bee5bd6507d26b..6ea9bf9f7a8383cc10ed82e108b76bb3ca97585b 100644 (file)
@@ -136,15 +136,39 @@ func s3stringToSign(alg, scope, signedHeaders string, r *http.Request) (string,
                }
        }
 
-       normalizedURL := *r.URL
-       normalizedURL.RawPath = ""
-       normalizedURL.Path = reMultipleSlashChars.ReplaceAllString(normalizedURL.Path, "/")
-       ctxlog.FromContext(r.Context()).Infof("escapedPath %s", normalizedURL.EscapedPath())
-       canonicalRequest := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", r.Method, normalizedURL.EscapedPath(), s3querystring(r.URL), canonicalHeaders, signedHeaders, r.Header.Get("X-Amz-Content-Sha256"))
+       normalizedPath := normalizePath(r.URL.Path)
+       ctxlog.FromContext(r.Context()).Debugf("normalizedPath %q", normalizedPath)
+       canonicalRequest := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", r.Method, normalizedPath, s3querystring(r.URL), canonicalHeaders, signedHeaders, r.Header.Get("X-Amz-Content-Sha256"))
        ctxlog.FromContext(r.Context()).Debugf("s3stringToSign: canonicalRequest %s", canonicalRequest)
        return fmt.Sprintf("%s\n%s\n%s\n%s", alg, r.Header.Get("X-Amz-Date"), scope, hashdigest(sha256.New(), canonicalRequest)), nil
 }
 
+func normalizePath(s string) string {
+       // (url.URL).EscapedPath() would be incorrect here. AWS
+       // documentation specifies the URL path should be normalized
+       // according to RFC 3986, i.e., unescaping ALPHA / DIGIT / "-"
+       // / "." / "_" / "~". The implication is that everything other
+       // than those chars (and "/") _must_ be percent-encoded --
+       // even chars like ";" and "," that are not normally
+       // percent-encoded in paths.
+       out := ""
+       for _, c := range []byte(reMultipleSlashChars.ReplaceAllString(s, "/")) {
+               if (c >= 'a' && c <= 'z') ||
+                       (c >= 'A' && c <= 'Z') ||
+                       (c >= '0' && c <= '9') ||
+                       c == '-' ||
+                       c == '.' ||
+                       c == '_' ||
+                       c == '~' ||
+                       c == '/' {
+                       out += string(c)
+               } else {
+                       out += fmt.Sprintf("%%%02X", c)
+               }
+       }
+       return out
+}
+
 func s3signature(secretKey, scope, signedHeaders, stringToSign string) (string, error) {
        // scope is {datestamp}/{region}/{service}/aws4_request
        drs := strings.Split(scope, "/")
index 4f70168b5629ecfbdb06193d75344cb4e27673eb..f411fde871cdba0cc9bbb2584be9a9bb878c6c1e 100644 (file)
@@ -558,12 +558,15 @@ func (s *IntegrationSuite) TestS3NormalizeURIForSignature(c *check.C) {
                rawPath        string
                normalizedPath string
        }{
-               {"/foo", "/foo"},             // boring case
-               {"/foo%5fbar", "/foo_bar"},   // _ must not be escaped
-               {"/foo%2fbar", "/foo/bar"},   // / must not be escaped
-               {"/(foo)", "/%28foo%29"},     // () must be escaped
-               {"/foo%5bbar", "/foo%5Bbar"}, // %XX must be uppercase
+               {"/foo", "/foo"},                           // boring case
+               {"/foo%5fbar", "/foo_bar"},                 // _ must not be escaped
+               {"/foo%2fbar", "/foo/bar"},                 // / must not be escaped
+               {"/(foo)/[];,", "/%28foo%29/%5B%5D%3B%2C"}, // ()[];, must be escaped
+               {"/foo%5bbar", "/foo%5Bbar"},               // %XX must be uppercase
+               {"//foo///.bar", "/foo/.bar"},              // "//" and "///" must be squashed to "/"
        } {
+               c.Logf("trial %q", trial)
+
                date := time.Now().UTC().Format("20060102T150405Z")
                scope := "20200202/zzzzz/S3/aws4_request"
                canonicalRequest := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", "GET", trial.normalizedPath, "", "host:host.example.com\n", "host", "")
@@ -1098,6 +1101,15 @@ func (s *IntegrationSuite) TestS3cmd(c *check.C) {
        buf, err := cmd.CombinedOutput()
        c.Check(err, check.IsNil)
        c.Check(string(buf), check.Matches, `.* 3 +s3://`+arvadostest.FooCollection+`/foo\n`)
+
+       // This tests whether s3cmd's path normalization agrees with
+       // keep-web's signature verification wrt chars like "|"
+       // (neither reserved nor unreserved) and "," (not normally
+       // percent-encoded in a path).
+       cmd = exec.Command("s3cmd", "--no-ssl", "--host="+s.testServer.Addr, "--host-bucket="+s.testServer.Addr, "--access_key="+arvadostest.ActiveTokenUUID, "--secret_key="+arvadostest.ActiveToken, "get", "s3://"+arvadostest.FooCollection+"/foo,;$[|]bar")
+       buf, err = cmd.CombinedOutput()
+       c.Check(err, check.NotNil)
+       c.Check(string(buf), check.Matches, `(?ms).*NoSuchKey.*\n`)
 }
 
 func (s *IntegrationSuite) TestS3BucketInHost(c *check.C) {
index c1af511ad464f5a447a4552e8181dcfff23ffefc..5cd376844dd5ca7ae5f6a198e1bf41a494d374c7 100755 (executable)
@@ -135,11 +135,11 @@ VERSION="latest"
 
 # The arvados-formula version.  For a stable release, this should be a
 # branch name (e.g. X.Y-dev) or tag for the release.
-ARVADOS_TAG="master"
+ARVADOS_TAG="main"
 
 # Other formula versions we depend on
 POSTGRES_TAG="v0.41.6"
-NGINX_TAG="temp-fix-missing-statements-in-pillar"
+NGINX_TAG="v2.7.4"
 DOCKER_TAG="v1.0.0"
 LOCALE_TAG="v0.3.4"
 LETSENCRYPT_TAG="v2.1.0"
@@ -219,8 +219,7 @@ cd ${F_DIR} || exit 1
 git clone --branch "${ARVADOS_TAG}"     https://git.arvados.org/arvados-formula.git
 git clone --branch "${DOCKER_TAG}"      https://github.com/saltstack-formulas/docker-formula.git
 git clone --branch "${LOCALE_TAG}"      https://github.com/saltstack-formulas/locale-formula.git
-# git clone --branch "${NGINX_TAG}"       https://github.com/saltstack-formulas/nginx-formula.git
-git clone --branch "${NGINX_TAG}"       https://github.com/netmanagers/nginx-formula.git
+git clone --branch "${NGINX_TAG}"       https://github.com/saltstack-formulas/nginx-formula.git
 git clone --branch "${POSTGRES_TAG}"    https://github.com/saltstack-formulas/postgres-formula.git
 git clone --branch "${LETSENCRYPT_TAG}" https://github.com/saltstack-formulas/letsencrypt-formula.git