17776: Merge branch 'master' into 17776-more-throttling
authorWard Vandewege <ward@curii.com>
Fri, 18 Jun 2021 18:47:44 +0000 (14:47 -0400)
committerWard Vandewege <ward@curii.com>
Fri, 18 Jun 2021 18:47:44 +0000 (14:47 -0400)
Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward@curii.com>

28 files changed:
apps/workbench/Gemfile.lock
cmd/arvados-client/cmd.go
go.mod
go.sum
lib/cmd/cmd.go
lib/config/config.default.yml
lib/config/generated_config.go
lib/config/load.go
lib/config/load_test.go
lib/costanalyzer/cmd.go
lib/costanalyzer/costanalyzer.go
lib/deduplicationreport/command.go
lib/diagnostics/cmd.go [new file with mode: 0644]
sdk/go/arvados/fs_base.go
sdk/go/arvados/fs_collection.go
sdk/go/arvados/fs_site.go
sdk/go/arvados/fs_site_test.go
sdk/python/arvados/commands/put.py
sdk/python/tests/test_arv_put.py
services/api/Gemfile.lock
services/api/app/models/container_request.rb
services/api/test/fixtures/container_requests.yml
services/api/test/fixtures/containers.yml
services/api/test/unit/container_request_test.rb
services/keep-web/handler_test.go
services/keep-web/s3.go
services/keep-web/s3_test.go
tools/salt-install/provision.sh

index fe497fe84498be13d811c6dcb7374775aed6c7fd..709e5eb3f6d5e1928bcb8105d1e112608138004a 100644 (file)
@@ -16,43 +16,43 @@ GEM
   remote: https://rubygems.org/
   specs:
     RedCloth (4.3.2)
-    actioncable (5.2.4.5)
-      actionpack (= 5.2.4.5)
+    actioncable (5.2.6)
+      actionpack (= 5.2.6)
       nio4r (~> 2.0)
       websocket-driver (>= 0.6.1)
-    actionmailer (5.2.4.5)
-      actionpack (= 5.2.4.5)
-      actionview (= 5.2.4.5)
-      activejob (= 5.2.4.5)
+    actionmailer (5.2.6)
+      actionpack (= 5.2.6)
+      actionview (= 5.2.6)
+      activejob (= 5.2.6)
       mail (~> 2.5, >= 2.5.4)
       rails-dom-testing (~> 2.0)
-    actionpack (5.2.4.5)
-      actionview (= 5.2.4.5)
-      activesupport (= 5.2.4.5)
+    actionpack (5.2.6)
+      actionview (= 5.2.6)
+      activesupport (= 5.2.6)
       rack (~> 2.0, >= 2.0.8)
       rack-test (>= 0.6.3)
       rails-dom-testing (~> 2.0)
       rails-html-sanitizer (~> 1.0, >= 1.0.2)
-    actionview (5.2.4.5)
-      activesupport (= 5.2.4.5)
+    actionview (5.2.6)
+      activesupport (= 5.2.6)
       builder (~> 3.1)
       erubi (~> 1.4)
       rails-dom-testing (~> 2.0)
       rails-html-sanitizer (~> 1.0, >= 1.0.3)
-    activejob (5.2.4.5)
-      activesupport (= 5.2.4.5)
+    activejob (5.2.6)
+      activesupport (= 5.2.6)
       globalid (>= 0.3.6)
-    activemodel (5.2.4.5)
-      activesupport (= 5.2.4.5)
-    activerecord (5.2.4.5)
-      activemodel (= 5.2.4.5)
-      activesupport (= 5.2.4.5)
+    activemodel (5.2.6)
+      activesupport (= 5.2.6)
+    activerecord (5.2.6)
+      activemodel (= 5.2.6)
+      activesupport (= 5.2.6)
       arel (>= 9.0)
-    activestorage (5.2.4.5)
-      actionpack (= 5.2.4.5)
-      activerecord (= 5.2.4.5)
-      marcel (~> 0.3.1)
-    activesupport (5.2.4.5)
+    activestorage (5.2.6)
+      actionpack (= 5.2.6)
+      activerecord (= 5.2.6)
+      marcel (~> 1.0.0)
+    activesupport (5.2.6)
       concurrent-ruby (~> 1.0, >= 1.0.2)
       i18n (>= 0.7, < 2)
       minitest (~> 5.1)
@@ -121,7 +121,7 @@ GEM
       execjs
     coffee-script-source (1.12.2)
     commonjs (0.2.7)
-    concurrent-ruby (1.1.8)
+    concurrent-ruby (1.1.9)
     crass (1.0.6)
     deep_merge (1.2.1)
     docile (1.3.1)
@@ -167,23 +167,20 @@ GEM
       railties (>= 4)
       request_store (~> 1.0)
     logstash-event (1.2.02)
-    loofah (2.9.0)
+    loofah (2.10.0)
       crass (~> 1.0.2)
       nokogiri (>= 1.5.9)
     mail (2.7.1)
       mini_mime (>= 0.1.1)
-    marcel (0.3.3)
-      mimemagic (~> 0.3.2)
+    marcel (1.0.1)
     memoist (0.16.2)
     metaclass (0.0.4)
     method_source (1.0.0)
     mime-types (3.2.2)
       mime-types-data (~> 3.2015)
     mime-types-data (3.2019.0331)
-    mimemagic (0.3.8)
-      nokogiri (~> 1)
-    mini_mime (1.0.2)
-    mini_portile2 (2.5.1)
+    mini_mime (1.1.0)
+    mini_portile2 (2.5.3)
     minitest (5.10.3)
     mocha (1.8.0)
       metaclass (~> 0.0.1)
@@ -200,7 +197,7 @@ GEM
     net-ssh-gateway (2.0.0)
       net-ssh (>= 4.0.0)
     nio4r (2.5.7)
-    nokogiri (1.11.5)
+    nokogiri (1.11.7)
       mini_portile2 (~> 2.5.0)
       racc (~> 1.4)
     npm-rails (0.2.1)
@@ -226,18 +223,18 @@ GEM
       rack (>= 1.2.0)
     rack-test (1.1.0)
       rack (>= 1.0, < 3)
-    rails (5.2.4.5)
-      actioncable (= 5.2.4.5)
-      actionmailer (= 5.2.4.5)
-      actionpack (= 5.2.4.5)
-      actionview (= 5.2.4.5)
-      activejob (= 5.2.4.5)
-      activemodel (= 5.2.4.5)
-      activerecord (= 5.2.4.5)
-      activestorage (= 5.2.4.5)
-      activesupport (= 5.2.4.5)
+    rails (5.2.6)
+      actioncable (= 5.2.6)
+      actionmailer (= 5.2.6)
+      actionpack (= 5.2.6)
+      actionview (= 5.2.6)
+      activejob (= 5.2.6)
+      activemodel (= 5.2.6)
+      activerecord (= 5.2.6)
+      activestorage (= 5.2.6)
+      activesupport (= 5.2.6)
       bundler (>= 1.3.0)
-      railties (= 5.2.4.5)
+      railties (= 5.2.6)
       sprockets-rails (>= 2.0.0)
     rails-controller-testing (1.0.4)
       actionpack (>= 5.0.1.x)
@@ -249,9 +246,9 @@ GEM
     rails-html-sanitizer (1.3.0)
       loofah (~> 2.3)
     rails-perftest (0.0.7)
-    railties (5.2.4.5)
-      actionpack (= 5.2.4.5)
-      activesupport (= 5.2.4.5)
+    railties (5.2.6)
+      actionpack (= 5.2.6)
+      activesupport (= 5.2.6)
       method_source
       rake (>= 0.8.7)
       thor (>= 0.19.0, < 2.0)
@@ -321,7 +318,7 @@ GEM
     uglifier (2.7.2)
       execjs (>= 0.3.0)
       json (>= 1.8.0)
-    websocket-driver (0.7.3)
+    websocket-driver (0.7.4)
       websocket-extensions (>= 0.1.0)
     websocket-extensions (0.1.5)
     xpath (2.1.0)
index aefcce79a45a4eb65fbbed209801d684da6b4923..cb15462119d4d1d3368382e1cbbcceafba40464f 100644 (file)
@@ -11,6 +11,7 @@ import (
        "git.arvados.org/arvados.git/lib/cmd"
        "git.arvados.org/arvados.git/lib/costanalyzer"
        "git.arvados.org/arvados.git/lib/deduplicationreport"
+       "git.arvados.org/arvados.git/lib/diagnostics"
        "git.arvados.org/arvados.git/lib/mount"
 )
 
@@ -59,6 +60,7 @@ var (
                "costanalyzer":         costanalyzer.Command,
                "shell":                shellCommand{},
                "connect-ssh":          connectSSHCommand{},
+               "diagnostics":          diagnostics.Command{},
        })
 )
 
diff --git a/go.mod b/go.mod
index 0ff679a576e232d318bdc14873069115036967bc..b70f6f3b476789f69f1845bffb6bfd4fcac80885 100644 (file)
--- a/go.mod
+++ b/go.mod
@@ -55,12 +55,14 @@ require (
        github.com/prometheus/common v0.7.0
        github.com/satori/go.uuid v1.2.1-0.20180103174451-36e9d2ebbde5 // indirect
        github.com/sergi/go-diff v1.0.0 // indirect
-       github.com/sirupsen/logrus v1.4.2
+       github.com/sirupsen/logrus v1.8.1
        github.com/src-d/gcfg v1.3.0 // indirect
        github.com/xanzy/ssh-agent v0.1.0 // indirect
        golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
        golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
        golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
+       golang.org/x/sys v0.0.0-20210603125802-9665404d3644
+       golang.org/x/tools v0.1.0 // indirect
        golang.org/x/sys v0.0.0-20210510120138-977fb7262007
        golang.org/x/tools v0.1.2 // indirect
        google.golang.org/api v0.13.0
diff --git a/go.sum b/go.sum
index c28ab4624038a143a4d9adf8a9e2593d123183c0..1fd37ed11ce411e625518eb3189c225e80b74616 100644 (file)
--- a/go.sum
+++ b/go.sum
@@ -234,6 +234,8 @@ github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63/go.mod h1:n+
 github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
 github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
 github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
 github.com/spf13/afero v1.2.1 h1:qgMbHoJbPbw579P+1zVY+6n4nIFuIchaIjzZ/I/Yq8M=
 github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
 github.com/src-d/gcfg v1.3.0 h1:2BEDr8r0I0b8h/fOqwtxCEiq2HJu8n2JGZJQFGXWLjg=
@@ -309,10 +311,13 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k=
 golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644 h1:CA1DEQ4NdKphKeL70tvsWNdT5oFh1lOjihRcEDROi0I=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE=
 golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
index b7d918739b86de347b0960e785bbd27dea477fba..63d7576b4cc1964b11b29a8ac199730660325f2e 100644 (file)
@@ -16,6 +16,8 @@ import (
        "runtime"
        "sort"
        "strings"
+
+       "github.com/sirupsen/logrus"
 )
 
 type Handler interface {
@@ -153,3 +155,9 @@ func SubcommandToFront(args []string, flagset FlagSet) []string {
        copy(newargs[flagargs+1:], args[flagargs+1:])
        return newargs
 }
+
+type NoPrefixFormatter struct{}
+
+func (NoPrefixFormatter) Format(entry *logrus.Entry) ([]byte, error) {
+       return []byte(entry.Message + "\n"), nil
+}
index e2ef9899e578d8f50d3ca720b334d046c53f5cf9..f0794a7e5320b115f1cac6b2d0f34c87cb3d7394 100644 (file)
@@ -24,49 +24,42 @@ Clusters:
 
       # In each of the service sections below, the keys under
       # InternalURLs are the endpoints where the service should be
-      # listening, and reachable from other hosts in the cluster.
-      SAMPLE:
-        InternalURLs:
-          "http://host1.example:12345": {}
-          "http://host2.example:12345":
-            # Rendezvous is normally empty/omitted. When changing the
-            # URL of a Keepstore service, Rendezvous should be set to
-            # the old URL (with trailing slash omitted) to preserve
-            # rendezvous ordering.
-            Rendezvous: ""
-          SAMPLE:
-            Rendezvous: ""
-        ExternalURL: "-"
+      # listening, and reachable from other hosts in the
+      # cluster. Example:
+      #
+      # InternalURLs:
+      #   "http://host1.example:12345": {}
+      #   "http://host2.example:12345": {}
 
       RailsAPI:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: "-"
       Controller:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       Websocket:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       Keepbalance:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: "-"
       GitHTTP:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       GitSSH:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       DispatchCloud:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: "-"
       SSO:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       Keepproxy:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       WebDAV:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         # Base URL for Workbench inline preview.  If blank, use
         # WebDAVDownload instead, and disable inline preview.
         # If both are empty, downloading collections from workbench
@@ -105,7 +98,7 @@ Clusters:
         ExternalURL: ""
 
       WebDAVDownload:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         # Base URL for download links. If blank, serve links to WebDAV
         # with disposition=attachment query param.  Unlike preview links,
         # browsers do not render attachments, so there is no risk of XSS.
@@ -119,13 +112,19 @@ Clusters:
         ExternalURL: ""
 
       Keepstore:
-        InternalURLs: {}
+        InternalURLs:
+          SAMPLE:
+            # Rendezvous is normally empty/omitted. When changing the
+            # URL of a Keepstore service, Rendezvous should be set to
+            # the old URL (with trailing slash omitted) to preserve
+            # rendezvous ordering.
+            Rendezvous: ""
         ExternalURL: "-"
       Composer:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       WebShell:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         # ShellInABox service endpoint URL for a given VM.  If empty, do not
         # offer web shell logins.
         #
@@ -136,13 +135,13 @@ Clusters:
         # https://*.webshell.uuid_prefix.arvadosapi.com
         ExternalURL: ""
       Workbench1:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       Workbench2:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       Health:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: "-"
 
     PostgreSQL:
index fbee937b39251ff41b72d89325fffee8ff44e7bb..d5e0f200b84559845450c65c36da1092b84ef5d8 100644 (file)
@@ -30,49 +30,42 @@ Clusters:
 
       # In each of the service sections below, the keys under
       # InternalURLs are the endpoints where the service should be
-      # listening, and reachable from other hosts in the cluster.
-      SAMPLE:
-        InternalURLs:
-          "http://host1.example:12345": {}
-          "http://host2.example:12345":
-            # Rendezvous is normally empty/omitted. When changing the
-            # URL of a Keepstore service, Rendezvous should be set to
-            # the old URL (with trailing slash omitted) to preserve
-            # rendezvous ordering.
-            Rendezvous: ""
-          SAMPLE:
-            Rendezvous: ""
-        ExternalURL: "-"
+      # listening, and reachable from other hosts in the
+      # cluster. Example:
+      #
+      # InternalURLs:
+      #   "http://host1.example:12345": {}
+      #   "http://host2.example:12345": {}
 
       RailsAPI:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: "-"
       Controller:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       Websocket:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       Keepbalance:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: "-"
       GitHTTP:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       GitSSH:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       DispatchCloud:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: "-"
       SSO:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       Keepproxy:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       WebDAV:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         # Base URL for Workbench inline preview.  If blank, use
         # WebDAVDownload instead, and disable inline preview.
         # If both are empty, downloading collections from workbench
@@ -111,7 +104,7 @@ Clusters:
         ExternalURL: ""
 
       WebDAVDownload:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         # Base URL for download links. If blank, serve links to WebDAV
         # with disposition=attachment query param.  Unlike preview links,
         # browsers do not render attachments, so there is no risk of XSS.
@@ -125,13 +118,19 @@ Clusters:
         ExternalURL: ""
 
       Keepstore:
-        InternalURLs: {}
+        InternalURLs:
+          SAMPLE:
+            # Rendezvous is normally empty/omitted. When changing the
+            # URL of a Keepstore service, Rendezvous should be set to
+            # the old URL (with trailing slash omitted) to preserve
+            # rendezvous ordering.
+            Rendezvous: ""
         ExternalURL: "-"
       Composer:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       WebShell:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         # ShellInABox service endpoint URL for a given VM.  If empty, do not
         # offer web shell logins.
         #
@@ -142,13 +141,13 @@ Clusters:
         # https://*.webshell.uuid_prefix.arvadosapi.com
         ExternalURL: ""
       Workbench1:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       Workbench2:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: ""
       Health:
-        InternalURLs: {}
+        InternalURLs: {SAMPLE: {}}
         ExternalURL: "-"
 
     PostgreSQL:
index cc26cdaecc073bf747d308d7acb0a53388f3f4a6..169b252a0e8fbbe44b0e6722e7fe1fe745ca01b6 100644 (file)
@@ -182,6 +182,11 @@ func (ldr *Loader) Load() (*arvados.Config, error) {
                ldr.configdata = buf
        }
 
+       // FIXME: We should reject YAML if the same key is used twice
+       // in a map/object, like {foo: bar, foo: baz}. Maybe we'll get
+       // this fixed free when we upgrade ghodss/yaml to a version
+       // that uses go-yaml v3.
+
        // Load the config into a dummy map to get the cluster ID
        // keys, discarding the values; then set up defaults for each
        // cluster ID; then load the real config on top of the
@@ -291,6 +296,8 @@ func (ldr *Loader) Load() (*arvados.Config, error) {
                        checkKeyConflict(fmt.Sprintf("Clusters.%s.PostgreSQL.Connection", id), cc.PostgreSQL.Connection),
                        ldr.checkEmptyKeepstores(cc),
                        ldr.checkUnlistedKeepstores(cc),
+                       // TODO: check non-empty Rendezvous on
+                       // services other than Keepstore
                } {
                        if err != nil {
                                return nil, err
@@ -354,20 +361,33 @@ func (ldr *Loader) logExtraKeys(expected, supplied map[string]interface{}, prefi
        if ldr.Logger == nil {
                return
        }
-       allowed := map[string]interface{}{}
-       for k, v := range expected {
-               allowed[strings.ToLower(k)] = v
-       }
        for k, vsupp := range supplied {
                if k == "SAMPLE" {
                        // entry will be dropped in removeSampleKeys anyway
                        continue
                }
-               vexp, ok := allowed[strings.ToLower(k)]
+               vexp, ok := expected[k]
                if expected["SAMPLE"] != nil {
+                       // use the SAMPLE entry's keys as the
+                       // "expected" map when checking vsupp
+                       // recursively.
                        vexp = expected["SAMPLE"]
                } else if !ok {
-                       ldr.Logger.Warnf("deprecated or unknown config entry: %s%s", prefix, k)
+                       // check for a case-insensitive match
+                       hint := ""
+                       for ek := range expected {
+                               if strings.EqualFold(k, ek) {
+                                       hint = " (perhaps you meant " + ek + "?)"
+                                       // If we don't delete this, it
+                                       // will end up getting merged,
+                                       // unpredictably
+                                       // merging/overriding the
+                                       // default.
+                                       delete(supplied, k)
+                                       break
+                               }
+                       }
+                       ldr.Logger.Warnf("deprecated or unknown config entry: %s%s%s", prefix, k, hint)
                        continue
                }
                if vsupp, ok := vsupp.(map[string]interface{}); !ok {
index 6c11ee7803116ab2df30f2050f23f7ce6580a9fc..396faca48461b30d7d5708a55f05bafcf73159ac 100644 (file)
@@ -196,21 +196,37 @@ Clusters:
     SystemRootToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
     Collections:
      BlobSigningKey: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
-    postgresql: {}
-    BadKey: {}
-    Containers: {}
+    PostgreSQL: {}
+    BadKey1: {}
+    Containers:
+      RunTimeEngine: abc
     RemoteClusters:
       z2222:
         Host: z2222.arvadosapi.com
         Proxy: true
-        BadKey: badValue
+        BadKey2: badValue
+    Services:
+      KeepStore:
+        InternalURLs:
+          "http://host.example:12345": {}
+      Keepstore:
+        InternalURLs:
+          "http://host.example:12345":
+            RendezVous: x
+    ServiceS:
+      Keepstore:
+        InternalURLs:
+          "http://host.example:12345": {}
+    Volumes:
+      zzzzz-nyw5e-aaaaaaaaaaaaaaa: {}
 `, &logbuf).Load()
        c.Assert(err, check.IsNil)
+       c.Log(logbuf.String())
        logs := strings.Split(strings.TrimSuffix(logbuf.String(), "\n"), "\n")
        for _, log := range logs {
-               c.Check(log, check.Matches, `.*deprecated or unknown config entry:.*BadKey.*`)
+               c.Check(log, check.Matches, `.*deprecated or unknown config entry:.*(RunTimeEngine.*RuntimeEngine|BadKey1|BadKey2|KeepStore|ServiceS|RendezVous).*`)
        }
-       c.Check(logs, check.HasLen, 2)
+       c.Check(logs, check.HasLen, 6)
 }
 
 func (s *LoadSuite) checkSAMPLEKeys(c *check.C, path string, x interface{}) {
@@ -322,8 +338,8 @@ func (s *LoadSuite) TestPostgreSQLKeyConflict(c *check.C) {
        _, err := testLoader(c, `
 Clusters:
  zzzzz:
-  postgresql:
-   connection:
+  PostgreSQL:
+   Connection:
      DBName: dbname
      Host: host
 `, nil).Load()
index 525ec619b5e4aeeb0b4e271704f13385a41bcefd..6065ad2c0b2cb934607826d8500215bbabf0d868 100644 (file)
@@ -8,8 +8,8 @@ import (
        "io"
        "time"
 
+       "git.arvados.org/arvados.git/lib/cmd"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
-       "github.com/sirupsen/logrus"
 )
 
 var Command = command{}
@@ -22,24 +22,17 @@ type command struct {
        end        time.Time
 }
 
-type NoPrefixFormatter struct{}
-
-func (f *NoPrefixFormatter) Format(entry *logrus.Entry) ([]byte, error) {
-       return []byte(entry.Message), nil
-}
-
 // RunCommand implements the subcommand "costanalyzer <collection> <collection> ..."
 func (c command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
        var err error
        logger := ctxlog.New(stderr, "text", "info")
+       logger.SetFormatter(cmd.NoPrefixFormatter{})
        defer func() {
                if err != nil {
-                       logger.Error("\n" + err.Error() + "\n")
+                       logger.Error("\n" + err.Error())
                }
        }()
 
-       logger.SetFormatter(new(NoPrefixFormatter))
-
        exitcode, err := c.costAnalyzer(prog, args, logger, stdout, stderr)
 
        return exitcode
index edaaa5bd178243f2c9044d32398c5ddccc67b5dc..dfe3d584cec7a7f07d16ec2e3c267e760201d8dc 100644 (file)
@@ -9,9 +9,6 @@ import (
        "errors"
        "flag"
        "fmt"
-       "git.arvados.org/arvados.git/sdk/go/arvados"
-       "git.arvados.org/arvados.git/sdk/go/arvadosclient"
-       "git.arvados.org/arvados.git/sdk/go/keepclient"
        "io"
        "io/ioutil"
        "net/http"
@@ -20,6 +17,9 @@ import (
        "strings"
        "time"
 
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/arvadosclient"
+       "git.arvados.org/arvados.git/sdk/go/keepclient"
        "github.com/sirupsen/logrus"
 )
 
@@ -169,7 +169,7 @@ Options:
        }
        logger.SetLevel(lvl)
        if !c.cache {
-               logger.Debug("Caching disabled\n")
+               logger.Debug("Caching disabled")
        }
        return
 }
@@ -249,12 +249,12 @@ func loadCachedObject(logger *logrus.Logger, file string, uuid string, object in
        case *arvados.ContainerRequest:
                if v.State == arvados.ContainerRequestStateFinal {
                        reload = false
-                       logger.Debugf("Loaded object %s from local cache (%s)\n", uuid, file)
+                       logger.Debugf("Loaded object %s from local cache (%s)", uuid, file)
                }
        case *arvados.Container:
                if v.State == arvados.ContainerStateComplete || v.State == arvados.ContainerStateCancelled {
                        reload = false
-                       logger.Debugf("Loaded object %s from local cache (%s)\n", uuid, file)
+                       logger.Debugf("Loaded object %s from local cache (%s)", uuid, file)
                }
        }
        return
@@ -384,7 +384,7 @@ func handleProject(logger *logrus.Logger, uuid string, arv *arvadosclient.Arvado
                return nil, fmt.Errorf("error querying container_requests: %s", err.Error())
        }
        if value, ok := childCrs["items"]; ok {
-               logger.Infof("Collecting top level container requests in project %s\n", uuid)
+               logger.Infof("Collecting top level container requests in project %s", uuid)
                items := value.([]interface{})
                for _, item := range items {
                        itemMap := item.(map[string]interface{})
@@ -397,7 +397,7 @@ func handleProject(logger *logrus.Logger, uuid string, arv *arvadosclient.Arvado
                        }
                }
        } else {
-               logger.Infof("No top level container requests found in project %s\n", uuid)
+               logger.Infof("No top level container requests found in project %s", uuid)
        }
        return
 }
@@ -410,7 +410,7 @@ func generateCrCsv(logger *logrus.Logger, uuid string, arv *arvadosclient.Arvado
        var tmpCsv string
        var tmpTotalCost float64
        var totalCost float64
-       fmt.Printf("Processing %s\n", uuid)
+       logger.Debugf("Processing %s", uuid)
 
        var crUUID = uuid
        if strings.Contains(uuid, "-4zz18-") {
@@ -438,7 +438,7 @@ func generateCrCsv(logger *logrus.Logger, uuid string, arv *arvadosclient.Arvado
        }
        if len(cr.ContainerUUID) == 0 {
                // Nothing to do! E.g. a CR in 'Uncommitted' state.
-               logger.Infof("No container associated with container request %s, skipping\n", crUUID)
+               logger.Infof("No container associated with container request %s, skipping", crUUID)
                return nil, nil
        }
        var container arvados.Container
@@ -473,14 +473,20 @@ func generateCrCsv(logger *logrus.Logger, uuid string, arv *arvadosclient.Arvado
                return nil, fmt.Errorf("error querying container_requests: %s", err.Error())
        }
        logger.Infof("Collecting child containers for container request %s (%s)", crUUID, container.FinishedAt)
-       for _, cr2 := range childCrs.Items {
-               logger.Info(".")
+       progressTicker := time.NewTicker(5 * time.Second)
+       defer progressTicker.Stop()
+       for i, cr2 := range childCrs.Items {
+               select {
+               case <-progressTicker.C:
+                       logger.Infof("... %d of %d", i+1, len(childCrs.Items))
+               default:
+               }
                node, err := getNode(arv, ac, kc, cr2)
                if err != nil {
                        logger.Errorf("Skipping container request %s: error getting node %s: %s", cr2.UUID, cr2.UUID, err)
                        continue
                }
-               logger.Debug("\nChild container: " + cr2.ContainerUUID + "\n")
+               logger.Debug("Child container: " + cr2.ContainerUUID)
                var c2 arvados.Container
                err = loadObject(logger, ac, cr.UUID, cr2.ContainerUUID, cache, &c2)
                if err != nil {
@@ -491,7 +497,7 @@ func generateCrCsv(logger *logrus.Logger, uuid string, arv *arvadosclient.Arvado
                csv += tmpCsv
                totalCost += tmpTotalCost
        }
-       logger.Info(" done\n")
+       logger.Debug("Done collecting child containers")
 
        csv += "TOTAL,,,,,,,,," + strconv.FormatFloat(totalCost, 'f', 8, 64) + "\n"
 
@@ -502,7 +508,7 @@ func generateCrCsv(logger *logrus.Logger, uuid string, arv *arvadosclient.Arvado
                if err != nil {
                        return nil, fmt.Errorf("error writing file with path %s: %s", fName, err.Error())
                }
-               logger.Infof("\nUUID report in %s\n\n", fName)
+               logger.Infof("\nUUID report in %s", fName)
        }
 
        return
@@ -562,7 +568,7 @@ func (c *command) costAnalyzer(prog string, args []string, logger *logrus.Logger
 
                                err := ac.RequestAndDecode(&list, "GET", "arvados/v1/container_requests", nil, params)
                                if err != nil {
-                                       logger.Errorf("Error getting container request list from Arvados API: %s\n", err)
+                                       logger.Errorf("Error getting container request list from Arvados API: %s", err)
                                        break
                                }
                                if len(list.Items) == 0 {
@@ -581,7 +587,7 @@ func (c *command) costAnalyzer(prog string, args []string, logger *logrus.Logger
        cost := make(map[string]float64)
 
        for uuid := range uuidChannel {
-               fmt.Printf("Considering %s\n", uuid)
+               logger.Debugf("Considering %s", uuid)
                if strings.Contains(uuid, "-j7d0g-") {
                        // This is a project (group)
                        cost, err = handleProject(logger, uuid, arv, ac, kc, c.resultsDir, c.cache)
@@ -611,14 +617,14 @@ func (c *command) costAnalyzer(prog string, args []string, logger *logrus.Logger
                        // keep going.
                        logger.Errorf("cost analysis is not supported for the 'Home' project: %s", uuid)
                } else {
-                       logger.Errorf("this argument does not look like a uuid: %s\n", uuid)
+                       logger.Errorf("this argument does not look like a uuid: %s", uuid)
                        exitcode = 3
                        return
                }
        }
 
        if len(cost) == 0 {
-               logger.Info("Nothing to do!\n")
+               logger.Info("Nothing to do!")
                return
        }
 
@@ -646,7 +652,7 @@ func (c *command) costAnalyzer(prog string, args []string, logger *logrus.Logger
                        exitcode = 1
                        return
                }
-               logger.Infof("Aggregate cost accounting for all supplied uuids in %s\n", aFile)
+               logger.Infof("Aggregate cost accounting for all supplied uuids in %s", aFile)
        }
 
        // Output the total dollar amount on stdout
index 93cdb61d3b86a8bae2dce3dee1a3be2e5fbacbc0..39bbbc9498c2b10055dff1d24227275ec6ae1fba 100644 (file)
@@ -7,20 +7,14 @@ package deduplicationreport
 import (
        "io"
 
+       "git.arvados.org/arvados.git/lib/cmd"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
-       "github.com/sirupsen/logrus"
 )
 
 var Command command
 
 type command struct{}
 
-type NoPrefixFormatter struct{}
-
-func (f *NoPrefixFormatter) Format(entry *logrus.Entry) ([]byte, error) {
-       return []byte(entry.Message + "\n"), nil
-}
-
 // RunCommand implements the subcommand "deduplication-report <collection> <collection> ..."
 func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
        var err error
@@ -31,7 +25,7 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
                }
        }()
 
-       logger.SetFormatter(new(NoPrefixFormatter))
+       logger.SetFormatter(cmd.NoPrefixFormatter{})
 
        exitcode := report(prog, args, logger, stdout, stderr)
 
diff --git a/lib/diagnostics/cmd.go b/lib/diagnostics/cmd.go
new file mode 100644 (file)
index 0000000..b0241b3
--- /dev/null
@@ -0,0 +1,612 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package diagnostics
+
+import (
+       "bytes"
+       "context"
+       "flag"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net"
+       "net/http"
+       "net/url"
+       "strings"
+       "time"
+
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "github.com/sirupsen/logrus"
+)
+
+type Command struct{}
+
+func (cmd Command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+       var diag diagnoser
+       f := flag.NewFlagSet(prog, flag.ContinueOnError)
+       f.StringVar(&diag.projectName, "project-name", "scratch area for diagnostics", "name of project to find/create in home project and use for temporary/test objects")
+       f.StringVar(&diag.logLevel, "log-level", "info", "logging level (debug, info, warning, error)")
+       f.BoolVar(&diag.checkInternal, "internal-client", false, "check that this host is considered an \"internal\" client")
+       f.BoolVar(&diag.checkExternal, "external-client", false, "check that this host is considered an \"external\" client")
+       f.IntVar(&diag.priority, "priority", 500, "priority for test container (1..1000, or 0 to skip)")
+       f.DurationVar(&diag.timeout, "timeout", 10*time.Second, "timeout for http requests")
+       err := f.Parse(args)
+       if err == flag.ErrHelp {
+               return 0
+       } else if err != nil {
+               fmt.Fprintln(stderr, err)
+               return 2
+       }
+       diag.logger = ctxlog.New(stdout, "text", diag.logLevel)
+       diag.logger.SetFormatter(&logrus.TextFormatter{DisableTimestamp: true, DisableLevelTruncation: true, PadLevelText: true})
+       diag.runtests()
+       if len(diag.errors) == 0 {
+               diag.logger.Info("--- no errors ---")
+               return 0
+       } else {
+               if diag.logger.Level > logrus.ErrorLevel {
+                       fmt.Fprint(stdout, "\n--- cut here --- error summary ---\n\n")
+                       for _, e := range diag.errors {
+                               diag.logger.Error(e)
+                       }
+               }
+               return 1
+       }
+}
+
+type diagnoser struct {
+       stdout        io.Writer
+       stderr        io.Writer
+       logLevel      string
+       priority      int
+       projectName   string
+       checkInternal bool
+       checkExternal bool
+       timeout       time.Duration
+       logger        *logrus.Logger
+       errors        []string
+       done          map[int]bool
+}
+
+func (diag *diagnoser) debugf(f string, args ...interface{}) {
+       diag.logger.Debugf("  ... "+f, args...)
+}
+
+func (diag *diagnoser) infof(f string, args ...interface{}) {
+       diag.logger.Infof("  ... "+f, args...)
+}
+
+func (diag *diagnoser) warnf(f string, args ...interface{}) {
+       diag.logger.Warnf("  ... "+f, args...)
+}
+
+func (diag *diagnoser) errorf(f string, args ...interface{}) {
+       diag.logger.Errorf(f, args...)
+       diag.errors = append(diag.errors, fmt.Sprintf(f, args...))
+}
+
+// Run the given func, logging appropriate messages before and after,
+// adding timing info, etc.
+//
+// The id argument should be unique among tests, and shouldn't change
+// when other tests are added/removed.
+func (diag *diagnoser) dotest(id int, title string, fn func() error) {
+       if diag.done == nil {
+               diag.done = map[int]bool{}
+       } else if diag.done[id] {
+               diag.errorf("(bug) reused test id %d", id)
+       }
+       diag.done[id] = true
+
+       diag.logger.Infof("%4d: %s", id, title)
+       t0 := time.Now()
+       err := fn()
+       elapsed := fmt.Sprintf("%d ms", time.Now().Sub(t0)/time.Millisecond)
+       if err != nil {
+               diag.errorf("%4d: %s (%s): %s", id, title, elapsed, err)
+       } else {
+               diag.logger.Debugf("%4d: %s (%s): ok", id, title, elapsed)
+       }
+}
+
+func (diag *diagnoser) runtests() {
+       client := arvados.NewClientFromEnv()
+
+       if client.APIHost == "" || client.AuthToken == "" {
+               diag.errorf("ARVADOS_API_HOST and ARVADOS_API_TOKEN environment variables are not set -- aborting without running any tests")
+               return
+       }
+
+       var dd arvados.DiscoveryDocument
+       ddpath := "discovery/v1/apis/arvados/v1/rest"
+       diag.dotest(10, fmt.Sprintf("getting discovery document from https://%s/%s", client.APIHost, ddpath), func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               err := client.RequestAndDecodeContext(ctx, &dd, "GET", ddpath, nil, nil)
+               if err != nil {
+                       return err
+               }
+               diag.debugf("BlobSignatureTTL = %d", dd.BlobSignatureTTL)
+               return nil
+       })
+
+       var cluster arvados.Cluster
+       cfgpath := "arvados/v1/config"
+       diag.dotest(20, fmt.Sprintf("getting exported config from https://%s/%s", client.APIHost, cfgpath), func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               err := client.RequestAndDecodeContext(ctx, &cluster, "GET", cfgpath, nil, nil)
+               if err != nil {
+                       return err
+               }
+               diag.debugf("Collections.BlobSigning = %v", cluster.Collections.BlobSigning)
+               return nil
+       })
+
+       var user arvados.User
+       diag.dotest(30, "getting current user record", func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               err := client.RequestAndDecodeContext(ctx, &user, "GET", "arvados/v1/users/current", nil, nil)
+               if err != nil {
+                       return err
+               }
+               diag.debugf("user uuid = %s", user.UUID)
+               return nil
+       })
+
+       // uncomment to create some spurious errors
+       // cluster.Services.WebDAVDownload.ExternalURL.Host = "0.0.0.0:9"
+
+       // TODO: detect routing errors here, like finding wb2 at the
+       // wb1 address.
+       for i, svc := range []*arvados.Service{
+               &cluster.Services.Keepproxy,
+               &cluster.Services.WebDAV,
+               &cluster.Services.WebDAVDownload,
+               &cluster.Services.Websocket,
+               &cluster.Services.Workbench1,
+               &cluster.Services.Workbench2,
+       } {
+               diag.dotest(40+i, fmt.Sprintf("connecting to service endpoint %s", svc.ExternalURL), func() error {
+                       ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+                       defer cancel()
+                       u := svc.ExternalURL
+                       if strings.HasPrefix(u.Scheme, "ws") {
+                               // We can do a real websocket test elsewhere,
+                               // but for now we'll just check the https
+                               // connection.
+                               u.Scheme = "http" + u.Scheme[2:]
+                       }
+                       if svc == &cluster.Services.WebDAV && strings.HasPrefix(u.Host, "*") {
+                               u.Host = "d41d8cd98f00b204e9800998ecf8427e-0" + u.Host[1:]
+                       }
+                       req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
+                       if err != nil {
+                               return err
+                       }
+                       resp, err := http.DefaultClient.Do(req)
+                       if err != nil {
+                               return err
+                       }
+                       resp.Body.Close()
+                       return nil
+               })
+       }
+
+       for i, url := range []string{
+               cluster.Services.Controller.ExternalURL.String(),
+               cluster.Services.Keepproxy.ExternalURL.String() + "d41d8cd98f00b204e9800998ecf8427e+0",
+               cluster.Services.WebDAVDownload.ExternalURL.String(),
+       } {
+               diag.dotest(50+i, fmt.Sprintf("checking CORS headers at %s", url), func() error {
+                       ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+                       defer cancel()
+                       req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
+                       if err != nil {
+                               return err
+                       }
+                       req.Header.Set("Origin", "https://example.com")
+                       resp, err := http.DefaultClient.Do(req)
+                       if err != nil {
+                               return err
+                       }
+                       if hdr := resp.Header.Get("Access-Control-Allow-Origin"); hdr != "*" {
+                               return fmt.Errorf("expected \"Access-Control-Allow-Origin: *\", got %q", hdr)
+                       }
+                       return nil
+               })
+       }
+
+       var keeplist arvados.KeepServiceList
+       diag.dotest(60, "checking internal/external client detection", func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               err := client.RequestAndDecodeContext(ctx, &keeplist, "GET", "arvados/v1/keep_services/accessible", nil, arvados.ListOptions{Limit: 999999})
+               if err != nil {
+                       return fmt.Errorf("error getting keep services list: %s", err)
+               } else if len(keeplist.Items) == 0 {
+                       return fmt.Errorf("controller did not return any keep services")
+               }
+               found := map[string]int{}
+               for _, ks := range keeplist.Items {
+                       found[ks.ServiceType]++
+               }
+               isInternal := found["proxy"] == 0 && len(keeplist.Items) > 0
+               isExternal := found["proxy"] > 0 && found["proxy"] == len(keeplist.Items)
+               if isExternal {
+                       diag.debugf("controller returned only proxy services, this host is treated as \"external\"")
+               } else if isInternal {
+                       diag.debugf("controller returned only non-proxy services, this host is treated as \"internal\"")
+               }
+               if (diag.checkInternal && !isInternal) || (diag.checkExternal && !isExternal) {
+                       return fmt.Errorf("expecting internal=%v external=%v, but found internal=%v external=%v", diag.checkInternal, diag.checkExternal, isInternal, isExternal)
+               }
+               return nil
+       })
+
+       for i, ks := range keeplist.Items {
+               u := url.URL{
+                       Scheme: "http",
+                       Host:   net.JoinHostPort(ks.ServiceHost, fmt.Sprintf("%d", ks.ServicePort)),
+                       Path:   "/",
+               }
+               if ks.ServiceSSLFlag {
+                       u.Scheme = "https"
+               }
+               diag.dotest(61+i, fmt.Sprintf("reading+writing via keep service at %s", u.String()), func() error {
+                       ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+                       defer cancel()
+                       req, err := http.NewRequestWithContext(ctx, "PUT", u.String()+"d41d8cd98f00b204e9800998ecf8427e", nil)
+                       if err != nil {
+                               return err
+                       }
+                       req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+                       resp, err := http.DefaultClient.Do(req)
+                       if err != nil {
+                               return err
+                       }
+                       defer resp.Body.Close()
+                       body, err := ioutil.ReadAll(resp.Body)
+                       if err != nil {
+                               return fmt.Errorf("reading response body: %s", err)
+                       }
+                       loc := strings.TrimSpace(string(body))
+                       if !strings.HasPrefix(loc, "d41d8") {
+                               return fmt.Errorf("unexpected response from write: %q", body)
+                       }
+
+                       req, err = http.NewRequestWithContext(ctx, "GET", u.String()+loc, nil)
+                       if err != nil {
+                               return err
+                       }
+                       req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+                       resp, err = http.DefaultClient.Do(req)
+                       if err != nil {
+                               return err
+                       }
+                       defer resp.Body.Close()
+                       body, err = ioutil.ReadAll(resp.Body)
+                       if err != nil {
+                               return fmt.Errorf("reading response body: %s", err)
+                       }
+                       if len(body) != 0 {
+                               return fmt.Errorf("unexpected response from read: %q", body)
+                       }
+
+                       return nil
+               })
+       }
+
+       var project arvados.Group
+       diag.dotest(80, fmt.Sprintf("finding/creating %q project", diag.projectName), func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               var grplist arvados.GroupList
+               err := client.RequestAndDecodeContext(ctx, &grplist, "GET", "arvados/v1/groups", nil, arvados.ListOptions{
+                       Filters: []arvados.Filter{
+                               {"name", "=", diag.projectName},
+                               {"group_class", "=", "project"},
+                               {"owner_uuid", "=", user.UUID}},
+                       Limit: 999999})
+               if err != nil {
+                       return fmt.Errorf("list groups: %s", err)
+               }
+               if len(grplist.Items) > 0 {
+                       project = grplist.Items[0]
+                       diag.debugf("using existing project, uuid = %s", project.UUID)
+                       return nil
+               }
+               diag.debugf("list groups: ok, no results")
+               err = client.RequestAndDecodeContext(ctx, &project, "POST", "arvados/v1/groups", nil, map[string]interface{}{"group": map[string]interface{}{
+                       "name":        diag.projectName,
+                       "group_class": "project",
+               }})
+               if err != nil {
+                       return fmt.Errorf("create project: %s", err)
+               }
+               diag.debugf("created project, uuid = %s", project.UUID)
+               return nil
+       })
+
+       var collection arvados.Collection
+       diag.dotest(90, "creating temporary collection", func() error {
+               if project.UUID == "" {
+                       return fmt.Errorf("skipping, no project to work in")
+               }
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               err := client.RequestAndDecodeContext(ctx, &collection, "POST", "arvados/v1/collections", nil, map[string]interface{}{
+                       "ensure_unique_name": true,
+                       "collection": map[string]interface{}{
+                               "owner_uuid": project.UUID,
+                               "name":       "test collection",
+                               "trash_at":   time.Now().Add(time.Hour)}})
+               if err != nil {
+                       return err
+               }
+               diag.debugf("ok, uuid = %s", collection.UUID)
+               return nil
+       })
+
+       if collection.UUID != "" {
+               defer func() {
+                       diag.dotest(9990, "deleting temporary collection", func() error {
+                               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+                               defer cancel()
+                               return client.RequestAndDecodeContext(ctx, nil, "DELETE", "arvados/v1/collections/"+collection.UUID, nil, nil)
+                       })
+               }()
+       }
+
+       diag.dotest(100, "uploading file via webdav", func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               if collection.UUID == "" {
+                       return fmt.Errorf("skipping, no test collection")
+               }
+               req, err := http.NewRequestWithContext(ctx, "PUT", cluster.Services.WebDAVDownload.ExternalURL.String()+"c="+collection.UUID+"/testfile", bytes.NewBufferString("testfiledata"))
+               if err != nil {
+                       return fmt.Errorf("BUG? http.NewRequest: %s", err)
+               }
+               req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+               resp, err := http.DefaultClient.Do(req)
+               if err != nil {
+                       return fmt.Errorf("error performing http request: %s", err)
+               }
+               resp.Body.Close()
+               if resp.StatusCode != http.StatusCreated {
+                       return fmt.Errorf("status %s", resp.Status)
+               }
+               diag.debugf("ok, status %s", resp.Status)
+               err = client.RequestAndDecodeContext(ctx, &collection, "GET", "arvados/v1/collections/"+collection.UUID, nil, nil)
+               if err != nil {
+                       return fmt.Errorf("get updated collection: %s", err)
+               }
+               diag.debugf("ok, pdh %s", collection.PortableDataHash)
+               return nil
+       })
+
+       davurl := cluster.Services.WebDAV.ExternalURL
+       diag.dotest(110, fmt.Sprintf("checking WebDAV ExternalURL wildcard (%s)", davurl), func() error {
+               if davurl.Host == "" {
+                       return fmt.Errorf("host missing - content previews will not work")
+               }
+               if !strings.HasPrefix(davurl.Host, "*--") && !strings.HasPrefix(davurl.Host, "*.") && !cluster.Collections.TrustAllContent {
+                       diag.warnf("WebDAV ExternalURL has no leading wildcard and TrustAllContent==false - content previews will not work")
+               }
+               return nil
+       })
+
+       for i, trial := range []struct {
+               needcoll bool
+               status   int
+               fileurl  string
+       }{
+               {false, http.StatusNotFound, strings.Replace(davurl.String(), "*", "d41d8cd98f00b204e9800998ecf8427e-0", 1) + "foo"},
+               {false, http.StatusNotFound, strings.Replace(davurl.String(), "*", "d41d8cd98f00b204e9800998ecf8427e-0", 1) + "testfile"},
+               {false, http.StatusNotFound, cluster.Services.WebDAVDownload.ExternalURL.String() + "c=d41d8cd98f00b204e9800998ecf8427e+0/_/foo"},
+               {false, http.StatusNotFound, cluster.Services.WebDAVDownload.ExternalURL.String() + "c=d41d8cd98f00b204e9800998ecf8427e+0/_/testfile"},
+               {true, http.StatusOK, strings.Replace(davurl.String(), "*", strings.Replace(collection.PortableDataHash, "+", "-", -1), 1) + "testfile"},
+               {true, http.StatusOK, cluster.Services.WebDAVDownload.ExternalURL.String() + "c=" + collection.UUID + "/_/testfile"},
+       } {
+               diag.dotest(120+i, fmt.Sprintf("downloading from webdav (%s)", trial.fileurl), func() error {
+                       ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+                       defer cancel()
+                       if trial.needcoll && collection.UUID == "" {
+                               return fmt.Errorf("skipping, no test collection")
+                       }
+                       req, err := http.NewRequestWithContext(ctx, "GET", trial.fileurl, nil)
+                       if err != nil {
+                               return err
+                       }
+                       req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+                       resp, err := http.DefaultClient.Do(req)
+                       if err != nil {
+                               return err
+                       }
+                       defer resp.Body.Close()
+                       body, err := ioutil.ReadAll(resp.Body)
+                       if err != nil {
+                               return fmt.Errorf("reading response: %s", err)
+                       }
+                       if resp.StatusCode != trial.status {
+                               return fmt.Errorf("unexpected response status: %s", resp.Status)
+                       }
+                       if trial.status == http.StatusOK && string(body) != "testfiledata" {
+                               return fmt.Errorf("unexpected response content: %q", body)
+                       }
+                       return nil
+               })
+       }
+
+       var vm arvados.VirtualMachine
+       diag.dotest(130, "getting list of virtual machines", func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               var vmlist arvados.VirtualMachineList
+               err := client.RequestAndDecodeContext(ctx, &vmlist, "GET", "arvados/v1/virtual_machines", nil, arvados.ListOptions{Limit: 999999})
+               if err != nil {
+                       return err
+               }
+               if len(vmlist.Items) < 1 {
+                       return fmt.Errorf("no VMs found")
+               }
+               vm = vmlist.Items[0]
+               return nil
+       })
+
+       diag.dotest(140, "getting workbench1 webshell page", func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               if vm.UUID == "" {
+                       return fmt.Errorf("skipping, no vm available")
+               }
+               webshelltermurl := cluster.Services.Workbench1.ExternalURL.String() + "virtual_machines/" + vm.UUID + "/webshell/testusername"
+               diag.debugf("url %s", webshelltermurl)
+               req, err := http.NewRequestWithContext(ctx, "GET", webshelltermurl, nil)
+               if err != nil {
+                       return err
+               }
+               req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+               resp, err := http.DefaultClient.Do(req)
+               if err != nil {
+                       return err
+               }
+               defer resp.Body.Close()
+               body, err := ioutil.ReadAll(resp.Body)
+               if err != nil {
+                       return fmt.Errorf("reading response: %s", err)
+               }
+               if resp.StatusCode != http.StatusOK {
+                       return fmt.Errorf("unexpected response status: %s %q", resp.Status, body)
+               }
+               return nil
+       })
+
+       diag.dotest(150, "connecting to webshell service", func() error {
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+               if vm.UUID == "" {
+                       return fmt.Errorf("skipping, no vm available")
+               }
+               u := cluster.Services.WebShell.ExternalURL
+               webshellurl := u.String() + vm.Hostname + "?"
+               if strings.HasPrefix(u.Host, "*") {
+                       u.Host = vm.Hostname + u.Host[1:]
+                       webshellurl = u.String() + "?"
+               }
+               diag.debugf("url %s", webshellurl)
+               req, err := http.NewRequestWithContext(ctx, "POST", webshellurl, bytes.NewBufferString(url.Values{
+                       "width":   {"80"},
+                       "height":  {"25"},
+                       "session": {"xyzzy"},
+                       "rooturl": {webshellurl},
+               }.Encode()))
+               if err != nil {
+                       return err
+               }
+               req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8")
+               resp, err := http.DefaultClient.Do(req)
+               if err != nil {
+                       return err
+               }
+               defer resp.Body.Close()
+               diag.debugf("response status %s", resp.Status)
+               body, err := ioutil.ReadAll(resp.Body)
+               if err != nil {
+                       return fmt.Errorf("reading response: %s", err)
+               }
+               diag.debugf("response body %q", body)
+               // We don't speak the protocol, so we get a 400 error
+               // from the webshell server even if everything is
+               // OK. Anything else (404, 502, ???) indicates a
+               // problem.
+               if resp.StatusCode != http.StatusBadRequest {
+                       return fmt.Errorf("unexpected response status: %s, %q", resp.Status, body)
+               }
+               return nil
+       })
+
+       diag.dotest(160, "running a container", func() error {
+               if diag.priority < 1 {
+                       diag.infof("skipping (use priority > 0 if you want to run a container)")
+                       return nil
+               }
+               if project.UUID == "" {
+                       return fmt.Errorf("skipping, no project to work in")
+               }
+
+               var cr arvados.ContainerRequest
+               ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+               defer cancel()
+
+               timestamp := time.Now().Format(time.RFC3339)
+               err := client.RequestAndDecodeContext(ctx, &cr, "POST", "arvados/v1/container_requests", nil, map[string]interface{}{"container_request": map[string]interface{}{
+                       "owner_uuid":      project.UUID,
+                       "name":            fmt.Sprintf("diagnostics container request %s", timestamp),
+                       "container_image": "arvados/jobs",
+                       "command":         []string{"echo", timestamp},
+                       "use_existing":    false,
+                       "output_path":     "/mnt/output",
+                       "output_name":     fmt.Sprintf("diagnostics output %s", timestamp),
+                       "priority":        diag.priority,
+                       "state":           arvados.ContainerRequestStateCommitted,
+                       "mounts": map[string]map[string]interface{}{
+                               "/mnt/output": {
+                                       "kind":     "collection",
+                                       "writable": true,
+                               },
+                       },
+                       "runtime_constraints": arvados.RuntimeConstraints{
+                               VCPUs:        1,
+                               RAM:          1 << 26,
+                               KeepCacheRAM: 1 << 26,
+                       },
+               }})
+               if err != nil {
+                       return err
+               }
+               diag.debugf("container request uuid = %s", cr.UUID)
+               diag.debugf("container uuid = %s", cr.ContainerUUID)
+
+               timeout := 10 * time.Minute
+               diag.infof("container request submitted, waiting up to %v for container to run", arvados.Duration(timeout))
+               ctx, cancel = context.WithDeadline(context.Background(), time.Now().Add(timeout))
+               defer cancel()
+
+               var c arvados.Container
+               for ; cr.State != arvados.ContainerRequestStateFinal; time.Sleep(2 * time.Second) {
+                       ctx, cancel := context.WithDeadline(ctx, time.Now().Add(diag.timeout))
+                       defer cancel()
+
+                       crStateWas := cr.State
+                       err := client.RequestAndDecodeContext(ctx, &cr, "GET", "arvados/v1/container_requests/"+cr.UUID, nil, nil)
+                       if err != nil {
+                               return err
+                       }
+                       if cr.State != crStateWas {
+                               diag.debugf("container request state = %s", cr.State)
+                       }
+
+                       cStateWas := c.State
+                       err = client.RequestAndDecodeContext(ctx, &c, "GET", "arvados/v1/containers/"+cr.ContainerUUID, nil, nil)
+                       if err != nil {
+                               return err
+                       }
+                       if c.State != cStateWas {
+                               diag.debugf("container state = %s", c.State)
+                       }
+               }
+
+               if c.State != arvados.ContainerStateComplete {
+                       return fmt.Errorf("container request %s is final but container %s did not complete: container state = %q", cr.UUID, cr.ContainerUUID, c.State)
+               } else if c.ExitCode != 0 {
+                       return fmt.Errorf("container exited %d", c.ExitCode)
+               }
+               return nil
+       })
+}
index 2478641df5478639c92ff2b4d0f57d847165eee7..65c207162b0f8590f463097faa3b7de25043f5fc 100644 (file)
@@ -29,12 +29,29 @@ var (
        ErrIsDirectory       = errors.New("cannot rename file to overwrite existing directory")
        ErrNotADirectory     = errors.New("not a directory")
        ErrPermission        = os.ErrPermission
+       DebugLocksPanicMode  = false
 )
 
 type syncer interface {
        Sync() error
 }
 
+func debugPanicIfNotLocked(l sync.Locker) {
+       if !DebugLocksPanicMode {
+               return
+       }
+       race := false
+       go func() {
+               l.Lock()
+               race = true
+               l.Unlock()
+       }()
+       time.Sleep(10)
+       if race {
+               panic("bug: caller-must-have-lock func called, but nobody has lock")
+       }
+}
+
 // A File is an *os.File-like interface for reading and writing files
 // in a FileSystem.
 type File interface {
@@ -271,6 +288,7 @@ func (n *treenode) IsDir() bool {
 }
 
 func (n *treenode) Child(name string, replace func(inode) (inode, error)) (child inode, err error) {
+       debugPanicIfNotLocked(n)
        child = n.inodes[name]
        if name == "" || name == "." || name == ".." {
                err = ErrInvalidArgument
@@ -333,6 +351,7 @@ func (n *treenode) Sync() error {
 func (n *treenode) MemorySize() (size int64) {
        n.RLock()
        defer n.RUnlock()
+       debugPanicIfNotLocked(n)
        for _, inode := range n.inodes {
                size += inode.MemorySize()
        }
index 0233826a7281e9aa95f5dbb9f74e93ddb1bfd473..22e2b31d57e08d6c5dc813017c62b950f61aac01 100644 (file)
@@ -1167,9 +1167,12 @@ func (dn *dirnode) createFileAndParents(path string) (fn *filenode, err error) {
                        node = node.Parent()
                        continue
                }
+               modtime := node.Parent().FileInfo().ModTime()
+               node.Lock()
+               locked := node
                node, err = node.Child(name, func(child inode) (inode, error) {
                        if child == nil {
-                               child, err := node.FS().newNode(name, 0755|os.ModeDir, node.Parent().FileInfo().ModTime())
+                               child, err := node.FS().newNode(name, 0755|os.ModeDir, modtime)
                                if err != nil {
                                        return nil, err
                                }
@@ -1181,6 +1184,7 @@ func (dn *dirnode) createFileAndParents(path string) (fn *filenode, err error) {
                                return child, nil
                        }
                })
+               locked.Unlock()
                if err != nil {
                        return
                }
@@ -1191,10 +1195,13 @@ func (dn *dirnode) createFileAndParents(path string) (fn *filenode, err error) {
                err = fmt.Errorf("invalid file part %q in path %q", basename, path)
                return
        }
+       modtime := node.FileInfo().ModTime()
+       node.Lock()
+       defer node.Unlock()
        _, err = node.Child(basename, func(child inode) (inode, error) {
                switch child := child.(type) {
                case nil:
-                       child, err = node.FS().newNode(basename, 0755, node.FileInfo().ModTime())
+                       child, err = node.FS().newNode(basename, 0755, modtime)
                        if err != nil {
                                return nil, err
                        }
index 900893aa36420e7c9d2008fff31b36d4bd03e0bf..5225df59ee58ec4c2017054f59ca68ee7936e41e 100644 (file)
@@ -54,6 +54,8 @@ func (c *Client) CustomFileSystem(kc keepClient) CustomFileSystem {
 }
 
 func (fs *customFileSystem) MountByID(mount string) {
+       fs.root.treenode.Lock()
+       defer fs.root.treenode.Unlock()
        fs.root.treenode.Child(mount, func(inode) (inode, error) {
                return &vdirnode{
                        treenode: treenode{
@@ -72,12 +74,16 @@ func (fs *customFileSystem) MountByID(mount string) {
 }
 
 func (fs *customFileSystem) MountProject(mount, uuid string) {
+       fs.root.treenode.Lock()
+       defer fs.root.treenode.Unlock()
        fs.root.treenode.Child(mount, func(inode) (inode, error) {
                return fs.newProjectNode(fs.root, mount, uuid), nil
        })
 }
 
 func (fs *customFileSystem) MountUsers(mount string) {
+       fs.root.treenode.Lock()
+       defer fs.root.treenode.Unlock()
        fs.root.treenode.Child(mount, func(inode) (inode, error) {
                return &lookupnode{
                        stale:   fs.Stale,
index b1c627f89c9e43c198dbd7a2a59f059c3cb01372..dc432114a60ec58e4925be28ddbbc227ce4188e9 100644 (file)
@@ -32,6 +32,15 @@ const (
 
 var _ = check.Suite(&SiteFSSuite{})
 
+func init() {
+       // Enable DebugLocksPanicMode sometimes. Don't enable it all
+       // the time, though -- it adds many calls to time.Sleep(),
+       // which could hide different bugs.
+       if time.Now().Second()&1 == 0 {
+               DebugLocksPanicMode = true
+       }
+}
+
 type SiteFSSuite struct {
        client *Client
        fs     CustomFileSystem
index 9596a2dc2d26efcddebbd7921e32ff38ebdb7352..4f0ac1fffaf9c4ee6875c0092ba1090d6e3eb9a6 100644 (file)
@@ -173,7 +173,8 @@ Follow file and directory symlinks (default).
 """)
 _group.add_argument('--no-follow-links', action='store_false', dest='follow_links',
                     help="""
-Do not follow file and directory symlinks.
+Ignore file and directory symlinks. Even paths given explicitly on the
+command line will be skipped if they are symlinks.
 """)
 
 
@@ -259,9 +260,8 @@ def parse_arguments(arguments):
 
     args.paths = ["-" if x == "/dev/stdin" else x for x in args.paths]
 
-    if len(args.paths) != 1 or os.path.isdir(args.paths[0]):
-        if args.filename:
-            arg_parser.error("""
+    if args.filename and (len(args.paths) != 1 or os.path.isdir(args.paths[0])):
+        arg_parser.error("""
     --filename argument cannot be used when storing a directory or
     multiple files.
     """)
@@ -525,6 +525,9 @@ class ArvPutUploadJob(object):
                 self._write_stdin(self.filename or 'stdin')
             elif not os.path.exists(path):
                  raise PathDoesNotExistError(u"file or directory '{}' does not exist.".format(path))
+            elif (not self.follow_links) and os.path.islink(path):
+                self.logger.warning("Skipping symlink '{}'".format(path))
+                continue
             elif os.path.isdir(path):
                 # Use absolute paths on cache index so CWD doesn't interfere
                 # with the caching logic.
@@ -656,15 +659,14 @@ class ArvPutUploadJob(object):
                 else:
                     # The file already exist on remote collection, skip it.
                     pass
-            self._remote_collection.save(storage_classes=self.storage_classes,
-                                         num_retries=self.num_retries,
+            self._remote_collection.save(num_retries=self.num_retries,
                                          trash_at=self._collection_trash_at())
         else:
-            if self.storage_classes is None:
-                self.storage_classes = ['default']
+            if len(self._local_collection) == 0:
+                self.logger.warning("No files were uploaded, skipping collection creation.")
+                return
             self._local_collection.save_new(
                 name=self.name, owner_uuid=self.owner_uuid,
-                storage_classes=self.storage_classes,
                 ensure_unique_name=self.ensure_unique_name,
                 num_retries=self.num_retries,
                 trash_at=self._collection_trash_at())
@@ -868,6 +870,7 @@ class ArvPutUploadJob(object):
                 self._remote_collection = arvados.collection.Collection(
                     update_collection,
                     api_client=self._api_client,
+                    storage_classes_desired=self.storage_classes,
                     num_retries=self.num_retries)
             except arvados.errors.ApiError as error:
                 raise CollectionUpdateError("Cannot read collection {} ({})".format(update_collection, error))
@@ -910,6 +913,7 @@ class ArvPutUploadJob(object):
             self._local_collection = arvados.collection.Collection(
                 self._state['manifest'],
                 replication_desired=self.replication_desired,
+                storage_classes_desired=(self.storage_classes or ['default']),
                 put_threads=self.put_threads,
                 api_client=self._api_client,
                 num_retries=self.num_retries)
@@ -1197,11 +1201,7 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr,
     #  Split storage-classes argument
     storage_classes = None
     if args.storage_classes:
-        storage_classes = args.storage_classes.strip().split(',')
-        if len(storage_classes) > 1:
-            logger.error("Multiple storage classes are not supported currently.")
-            sys.exit(1)
-
+        storage_classes = args.storage_classes.strip().replace(' ', '').split(',')
 
     # Setup exclude regex from all the --exclude arguments provided
     name_patterns = []
@@ -1316,7 +1316,7 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr,
             output = writer.manifest_text()
     elif args.raw:
         output = ','.join(writer.data_locators())
-    else:
+    elif writer.manifest_locator() is not None:
         try:
             expiration_notice = ""
             if writer.collection_trash_at() is not None:
@@ -1342,6 +1342,8 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr,
                 "arv-put: Error creating Collection on project: {}.".format(
                     error))
             status = 1
+    else:
+        status = 1
 
     # Print the locator (uuid) of the new collection.
     if output is None:
index e75d39d8793ffb6032a8591a37d9d5da2168ba72..fac970c95a6fdb13ecbef709ff850798c67840a5 100644 (file)
@@ -295,6 +295,8 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
         shutil.rmtree(self.tempdir_with_symlink)
 
     def test_symlinks_are_followed_by_default(self):
+        self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkeddir')))
+        self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkedfile')))
         cwriter = arv_put.ArvPutUploadJob([self.tempdir_with_symlink])
         cwriter.start(save_collection=False)
         self.assertIn('linkeddir', cwriter.manifest_text())
@@ -302,12 +304,29 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
         cwriter.destroy_cache()
 
     def test_symlinks_are_not_followed_when_requested(self):
+        self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkeddir')))
+        self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkedfile')))
         cwriter = arv_put.ArvPutUploadJob([self.tempdir_with_symlink],
                                           follow_links=False)
         cwriter.start(save_collection=False)
         self.assertNotIn('linkeddir', cwriter.manifest_text())
         self.assertNotIn('linkedfile', cwriter.manifest_text())
         cwriter.destroy_cache()
+        # Check for bug #17800: passed symlinks should also be ignored.
+        linked_dir = os.path.join(self.tempdir_with_symlink, 'linkeddir')
+        cwriter = arv_put.ArvPutUploadJob([linked_dir], follow_links=False)
+        cwriter.start(save_collection=False)
+        self.assertNotIn('linkeddir', cwriter.manifest_text())
+        cwriter.destroy_cache()
+
+    def test_no_empty_collection_saved(self):
+        self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkeddir')))
+        linked_dir = os.path.join(self.tempdir_with_symlink, 'linkeddir')
+        cwriter = arv_put.ArvPutUploadJob([linked_dir], follow_links=False)
+        cwriter.start(save_collection=True)
+        self.assertIsNone(cwriter.manifest_locator())
+        self.assertEqual('', cwriter.manifest_text())
+        cwriter.destroy_cache()
 
     def test_passing_nonexistant_path_raise_exception(self):
         uuid_str = str(uuid.uuid4())
@@ -813,11 +832,6 @@ class ArvadosPutTest(run_test_server.TestCaseWithServers,
                           self.call_main_with_args,
                           ['--project-uuid', self.Z_UUID, '--stream'])
 
-    def test_error_when_multiple_storage_classes_specified(self):
-        self.assertRaises(SystemExit,
-                          self.call_main_with_args,
-                          ['--storage-classes', 'hot,cold'])
-
     def test_error_when_excluding_absolute_path(self):
         tmpdir = self.make_tmpdir()
         self.assertRaises(SystemExit,
@@ -1315,13 +1329,16 @@ class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
 
     def test_put_collection_with_storage_classes_specified(self):
         collection = self.run_and_find_collection("", ['--storage-classes', 'hot'])
-
         self.assertEqual(len(collection['storage_classes_desired']), 1)
         self.assertEqual(collection['storage_classes_desired'][0], 'hot')
 
+    def test_put_collection_with_multiple_storage_classes_specified(self):
+        collection = self.run_and_find_collection("", ['--storage-classes', ' foo, bar  ,baz'])
+        self.assertEqual(len(collection['storage_classes_desired']), 3)
+        self.assertEqual(collection['storage_classes_desired'], ['foo', 'bar', 'baz'])
+
     def test_put_collection_without_storage_classes_specified(self):
         collection = self.run_and_find_collection("")
-
         self.assertEqual(len(collection['storage_classes_desired']), 1)
         self.assertEqual(collection['storage_classes_desired'][0], 'default')
 
index 6a26ffd494199ba04dcc9026a11434b586ff5d0d..992ff39c099a1c462e9fa3de7bd08a48990439f0 100644 (file)
@@ -8,43 +8,43 @@ GIT
 GEM
   remote: https://rubygems.org/
   specs:
-    actioncable (5.2.4.5)
-      actionpack (= 5.2.4.5)
+    actioncable (5.2.6)
+      actionpack (= 5.2.6)
       nio4r (~> 2.0)
       websocket-driver (>= 0.6.1)
-    actionmailer (5.2.4.5)
-      actionpack (= 5.2.4.5)
-      actionview (= 5.2.4.5)
-      activejob (= 5.2.4.5)
+    actionmailer (5.2.6)
+      actionpack (= 5.2.6)
+      actionview (= 5.2.6)
+      activejob (= 5.2.6)
       mail (~> 2.5, >= 2.5.4)
       rails-dom-testing (~> 2.0)
-    actionpack (5.2.4.5)
-      actionview (= 5.2.4.5)
-      activesupport (= 5.2.4.5)
+    actionpack (5.2.6)
+      actionview (= 5.2.6)
+      activesupport (= 5.2.6)
       rack (~> 2.0, >= 2.0.8)
       rack-test (>= 0.6.3)
       rails-dom-testing (~> 2.0)
       rails-html-sanitizer (~> 1.0, >= 1.0.2)
-    actionview (5.2.4.5)
-      activesupport (= 5.2.4.5)
+    actionview (5.2.6)
+      activesupport (= 5.2.6)
       builder (~> 3.1)
       erubi (~> 1.4)
       rails-dom-testing (~> 2.0)
       rails-html-sanitizer (~> 1.0, >= 1.0.3)
-    activejob (5.2.4.5)
-      activesupport (= 5.2.4.5)
+    activejob (5.2.6)
+      activesupport (= 5.2.6)
       globalid (>= 0.3.6)
-    activemodel (5.2.4.5)
-      activesupport (= 5.2.4.5)
-    activerecord (5.2.4.5)
-      activemodel (= 5.2.4.5)
-      activesupport (= 5.2.4.5)
+    activemodel (5.2.6)
+      activesupport (= 5.2.6)
+    activerecord (5.2.6)
+      activemodel (= 5.2.6)
+      activesupport (= 5.2.6)
       arel (>= 9.0)
-    activestorage (5.2.4.5)
-      actionpack (= 5.2.4.5)
-      activerecord (= 5.2.4.5)
-      marcel (~> 0.3.1)
-    activesupport (5.2.4.5)
+    activestorage (5.2.6)
+      actionpack (= 5.2.6)
+      activerecord (= 5.2.6)
+      marcel (~> 1.0.0)
+    activesupport (5.2.6)
       concurrent-ruby (~> 1.0, >= 1.0.2)
       i18n (>= 0.7, < 2)
       minitest (~> 5.1)
@@ -90,7 +90,7 @@ GEM
       net-sftp (>= 2.0.0)
       net-ssh (>= 2.0.14)
       net-ssh-gateway (>= 1.1.0)
-    concurrent-ruby (1.1.8)
+    concurrent-ruby (1.1.9)
     crass (1.0.6)
     erubi (1.10.0)
     execjs (2.7.0)
@@ -135,20 +135,17 @@ GEM
       railties (>= 4)
       request_store (~> 1.0)
     logstash-event (1.2.02)
-    loofah (2.9.0)
+    loofah (2.10.0)
       crass (~> 1.0.2)
       nokogiri (>= 1.5.9)
     mail (2.7.1)
       mini_mime (>= 0.1.1)
-    marcel (0.3.3)
-      mimemagic (~> 0.3.2)
+    marcel (1.0.1)
     memoist (0.16.2)
     metaclass (0.0.4)
     method_source (1.0.0)
-    mimemagic (0.3.8)
-      nokogiri (~> 1)
-    mini_mime (1.0.2)
-    mini_portile2 (2.5.1)
+    mini_mime (1.1.0)
+    mini_portile2 (2.5.3)
     minitest (5.10.3)
     mocha (1.8.0)
       metaclass (~> 0.0.1)
@@ -164,7 +161,7 @@ GEM
     net-ssh-gateway (2.0.0)
       net-ssh (>= 4.0.0)
     nio4r (2.5.7)
-    nokogiri (1.11.5)
+    nokogiri (1.11.7)
       mini_portile2 (~> 2.5.0)
       racc (~> 1.4)
     oauth2 (1.4.1)
@@ -192,18 +189,18 @@ GEM
     rack (2.2.3)
     rack-test (1.1.0)
       rack (>= 1.0, < 3)
-    rails (5.2.4.5)
-      actioncable (= 5.2.4.5)
-      actionmailer (= 5.2.4.5)
-      actionpack (= 5.2.4.5)
-      actionview (= 5.2.4.5)
-      activejob (= 5.2.4.5)
-      activemodel (= 5.2.4.5)
-      activerecord (= 5.2.4.5)
-      activestorage (= 5.2.4.5)
-      activesupport (= 5.2.4.5)
+    rails (5.2.6)
+      actioncable (= 5.2.6)
+      actionmailer (= 5.2.6)
+      actionpack (= 5.2.6)
+      actionview (= 5.2.6)
+      activejob (= 5.2.6)
+      activemodel (= 5.2.6)
+      activerecord (= 5.2.6)
+      activestorage (= 5.2.6)
+      activesupport (= 5.2.6)
       bundler (>= 1.3.0)
-      railties (= 5.2.4.5)
+      railties (= 5.2.6)
       sprockets-rails (>= 2.0.0)
     rails-controller-testing (1.0.4)
       actionpack (>= 5.0.1.x)
@@ -217,9 +214,9 @@ GEM
     rails-observers (0.1.5)
       activemodel (>= 4.0)
     rails-perftest (0.0.7)
-    railties (5.2.4.5)
-      actionpack (= 5.2.4.5)
-      activesupport (= 5.2.4.5)
+    railties (5.2.6)
+      actionpack (= 5.2.6)
+      activesupport (= 5.2.6)
       method_source
       rake (>= 0.8.7)
       thor (>= 0.19.0, < 2.0)
@@ -281,7 +278,7 @@ GEM
     uglifier (2.7.2)
       execjs (>= 0.3.0)
       json (>= 1.8.0)
-    websocket-driver (0.7.3)
+    websocket-driver (0.7.4)
       websocket-extensions (>= 0.1.0)
     websocket-extensions (0.1.5)
 
index 837f2cdc7010eaacc1182f868e9fb01084367ddb..e712acc6e9c37f85e5d9e40e7f5ec1990e0b947e 100644 (file)
@@ -394,6 +394,20 @@ class ContainerRequest < ArvadosModel
     if self.new_record? || self.state_was == Uncommitted
       # Allow create-and-commit in a single operation.
       permitted.push(*AttrsPermittedBeforeCommit)
+    elsif mounts_changed? && mounts_was.keys == mounts.keys
+      # Ignore the updated mounts if the only changes are default/zero
+      # values as added by controller, see 17774
+      only_defaults = true
+      mounts.each do |path, mount|
+        (mount.to_a - mounts_was[path].to_a).each do |k, v|
+          if !["", false, nil].index(v)
+            only_defaults = false
+          end
+        end
+      end
+      if only_defaults
+        clear_attribute_change("mounts")
+      end
     end
 
     case self.state
index 1c626050291d6247716b3111ee5fcab7dcc0b814..5be132ac30933187b998dd94b9ff975c27ee9988 100644 (file)
@@ -37,10 +37,13 @@ running:
   output_path: test
   command: ["echo", "hello"]
   container_uuid: zzzzz-dz642-runningcontainr
+  mounts:
+    /tmp:
+      kind: tmp
+      capacity: 24000000000
   runtime_constraints:
     vcpus: 1
     ram: 123
-  mounts: {}
 
 requester_for_running:
   uuid: zzzzz-xvhdp-req4runningcntr
@@ -58,10 +61,13 @@ requester_for_running:
   command: ["echo", "hello"]
   container_uuid: zzzzz-dz642-logscontainer03
   requesting_container_uuid: zzzzz-dz642-runningcontainr
+  mounts:
+    /tmp:
+      kind: tmp
+      capacity: 24000000000
   runtime_constraints:
     vcpus: 1
     ram: 123
-  mounts: {}
 
 running_older:
   uuid: zzzzz-xvhdp-cr4runningcntn2
@@ -78,10 +84,13 @@ running_older:
   output_path: test
   command: ["echo", "hello"]
   container_uuid: zzzzz-dz642-runningcontain2
+  mounts:
+    /tmp:
+      kind: tmp
+      capacity: 24000000000
   runtime_constraints:
     vcpus: 1
     ram: 123
-  mounts: {}
 
 completed:
   uuid: zzzzz-xvhdp-cr4completedctr
@@ -429,10 +438,13 @@ running_anonymous_accessible:
   output_path: test
   command: ["echo", "hello"]
   container_uuid: zzzzz-dz642-runningcontain2
+  mounts:
+    /tmp:
+      kind: tmp
+      capacity: 24000000000
   runtime_constraints:
     vcpus: 1
     ram: 123
-  mounts: {}
 
 cr_for_failed:
   uuid: zzzzz-xvhdp-cr4failedcontnr
@@ -509,10 +521,13 @@ canceled_with_running_container:
   output_path: test
   command: ["echo", "hello"]
   container_uuid: zzzzz-dz642-runningcontainr
+  mounts:
+    /tmp:
+      kind: tmp
+      capacity: 24000000000
   runtime_constraints:
     vcpus: 1
     ram: 123
-  mounts: {}
 
 running_to_be_deleted:
   uuid: zzzzz-xvhdp-cr5runningcntnr
@@ -528,11 +543,14 @@ running_to_be_deleted:
   cwd: test
   output_path: test
   command: ["echo", "hello"]
+  mounts:
+    /tmp:
+      kind: tmp
+      capacity: 24000000000
   container_uuid: zzzzz-dz642-runnincntrtodel
   runtime_constraints:
     vcpus: 1
     ram: 123
-  mounts: {}
 
 completed_with_input_mounts:
   uuid: zzzzz-xvhdp-crwithinputmnts
index b7d082771a0b37f2c3760bae23c46591805e07ef..e7cd0abd1fefb4751311cd9f91ba6f3ce572ae99 100644 (file)
@@ -33,12 +33,16 @@ running:
   updated_at: <%= 1.minute.ago.to_s(:db) %>
   started_at: <%= 1.minute.ago.to_s(:db) %>
   container_image: test
-  cwd: test
-  output_path: test
+  cwd: /tmp
+  output_path: /tmp
   command: ["echo", "hello"]
   runtime_constraints:
     ram: 12000000000
     vcpus: 4
+  mounts:
+    /tmp:
+      kind: tmp
+      capacity: 24000000000
   secret_mounts:
     /secret/6x9:
       kind: text
@@ -55,9 +59,13 @@ running_older:
   updated_at: <%= 2.minute.ago.to_s(:db) %>
   started_at: <%= 2.minute.ago.to_s(:db) %>
   container_image: test
-  cwd: test
-  output_path: test
+  cwd: /tmp
+  output_path: /tmp
   command: ["echo", "hello"]
+  mounts:
+    /tmp:
+      kind: tmp
+      capacity: 24000000000
   runtime_constraints:
     ram: 12000000000
     vcpus: 4
@@ -383,6 +391,10 @@ running_container_with_logs:
   cwd: test
   output_path: test
   command: ["echo", "hello"]
+  mounts:
+    /tmp:
+      kind: tmp
+      capacity: 24000000000
   runtime_constraints:
     ram: 12000000000
     vcpus: 4
@@ -401,6 +413,10 @@ running_to_be_deleted:
   cwd: test
   output_path: test
   command: ["echo", "hello"]
+  mounts:
+    /tmp:
+      kind: tmp
+      capacity: 24000000000
   runtime_constraints:
     ram: 12000000000
     vcpus: 4
index b2dde7995606d71680b062b5e1717a07114557bb..2d5c73518191056a8b72909bc8d235a9d2f2ed39 100644 (file)
@@ -1071,6 +1071,31 @@ class ContainerRequestTest < ActiveSupport::TestCase
    ['Committed', false, {container_count: 2}],
    ['Committed', false, {container_count: 0}],
    ['Committed', false, {container_count: nil}],
+   ['Committed', true, {priority: 0, mounts: {"/out" => {"kind" => "tmp", "capacity" => 1000000}}}],
+   ['Committed', true, {priority: 0, mounts: {"/out" => {"capacity" => 1000000, "kind" => "tmp"}}}],
+   # Addition of default values for mounts / runtime_constraints /
+   # scheduling_parameters, as happens in a round-trip through
+   # controller, does not have any real effect and should be
+   # accepted/ignored rather than causing an error when the CR state
+   # dictates those attributes are not allowed to change.
+   ['Committed', true, {priority: 0, mounts: {"/out" => {"capacity" => 1000000, "kind" => "tmp", "exclude_from_output": false}}}],
+   ['Committed', true, {priority: 0, mounts: {"/out" => {"capacity" => 1000000, "kind" => "tmp", "repository_name": ""}}}],
+   ['Committed', true, {priority: 0, mounts: {"/out" => {"capacity" => 1000000, "kind" => "tmp", "content": nil}}}],
+   ['Committed', false, {priority: 0, mounts: {"/out" => {"capacity" => 1000000, "kind" => "tmp", "content": {}}}}],
+   ['Committed', false, {priority: 0, mounts: {"/out" => {"capacity" => 1000000, "kind" => "tmp", "repository_name": "foo"}}}],
+   ['Committed', false, {priority: 0, mounts: {"/out" => {"kind" => "tmp", "capacity" => 1234567}}}],
+   ['Committed', false, {priority: 0, mounts: {}}],
+   ['Committed', true, {priority: 0, runtime_constraints: {"vcpus" => 1, "ram" => 2}}],
+   ['Committed', true, {priority: 0, runtime_constraints: {"vcpus" => 1, "ram" => 2, "keep_cache_ram" => 0}}],
+   ['Committed', true, {priority: 0, runtime_constraints: {"vcpus" => 1, "ram" => 2, "API" => false}}],
+   ['Committed', false, {priority: 0, runtime_constraints: {"vcpus" => 1, "ram" => 2, "keep_cache_ram" => 1}}],
+   ['Committed', false, {priority: 0, runtime_constraints: {"vcpus" => 1, "ram" => 2, "API" => true}}],
+   ['Committed', true, {priority: 0, scheduling_parameters: {"preemptible" => false}}],
+   ['Committed', true, {priority: 0, scheduling_parameters: {"partitions" => []}}],
+   ['Committed', true, {priority: 0, scheduling_parameters: {"max_run_time" => 0}}],
+   ['Committed', false, {priority: 0, scheduling_parameters: {"preemptible" => true}}],
+   ['Committed', false, {priority: 0, scheduling_parameters: {"partitions" => ["foo"]}}],
+   ['Committed', false, {priority: 0, scheduling_parameters: {"max_run_time" => 1}}],
    ['Final', false, {state: ContainerRequest::Committed, name: "foobar"}],
    ['Final', false, {name: "foobar", priority: 123}],
    ['Final', false, {name: "foobar", output_uuid: "zzzzz-4zz18-znfnqtbbv4spc3w"}],
index 446d591bfd715224651c1d9667e0c451e81f664e..8715ab24f35c0312fcca8152dc464ab60aa582af 100644 (file)
@@ -32,6 +32,10 @@ import (
 
 var _ = check.Suite(&UnitSuite{})
 
+func init() {
+       arvados.DebugLocksPanicMode = true
+}
+
 type UnitSuite struct {
        Config *arvados.Config
 }
index f03ff01b8127d6ee1d1056ee72bee5bd6507d26b..6ea9bf9f7a8383cc10ed82e108b76bb3ca97585b 100644 (file)
@@ -136,15 +136,39 @@ func s3stringToSign(alg, scope, signedHeaders string, r *http.Request) (string,
                }
        }
 
-       normalizedURL := *r.URL
-       normalizedURL.RawPath = ""
-       normalizedURL.Path = reMultipleSlashChars.ReplaceAllString(normalizedURL.Path, "/")
-       ctxlog.FromContext(r.Context()).Infof("escapedPath %s", normalizedURL.EscapedPath())
-       canonicalRequest := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", r.Method, normalizedURL.EscapedPath(), s3querystring(r.URL), canonicalHeaders, signedHeaders, r.Header.Get("X-Amz-Content-Sha256"))
+       normalizedPath := normalizePath(r.URL.Path)
+       ctxlog.FromContext(r.Context()).Debugf("normalizedPath %q", normalizedPath)
+       canonicalRequest := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", r.Method, normalizedPath, s3querystring(r.URL), canonicalHeaders, signedHeaders, r.Header.Get("X-Amz-Content-Sha256"))
        ctxlog.FromContext(r.Context()).Debugf("s3stringToSign: canonicalRequest %s", canonicalRequest)
        return fmt.Sprintf("%s\n%s\n%s\n%s", alg, r.Header.Get("X-Amz-Date"), scope, hashdigest(sha256.New(), canonicalRequest)), nil
 }
 
+func normalizePath(s string) string {
+       // (url.URL).EscapedPath() would be incorrect here. AWS
+       // documentation specifies the URL path should be normalized
+       // according to RFC 3986, i.e., unescaping ALPHA / DIGIT / "-"
+       // / "." / "_" / "~". The implication is that everything other
+       // than those chars (and "/") _must_ be percent-encoded --
+       // even chars like ";" and "," that are not normally
+       // percent-encoded in paths.
+       out := ""
+       for _, c := range []byte(reMultipleSlashChars.ReplaceAllString(s, "/")) {
+               if (c >= 'a' && c <= 'z') ||
+                       (c >= 'A' && c <= 'Z') ||
+                       (c >= '0' && c <= '9') ||
+                       c == '-' ||
+                       c == '.' ||
+                       c == '_' ||
+                       c == '~' ||
+                       c == '/' {
+                       out += string(c)
+               } else {
+                       out += fmt.Sprintf("%%%02X", c)
+               }
+       }
+       return out
+}
+
 func s3signature(secretKey, scope, signedHeaders, stringToSign string) (string, error) {
        // scope is {datestamp}/{region}/{service}/aws4_request
        drs := strings.Split(scope, "/")
index 4f70168b5629ecfbdb06193d75344cb4e27673eb..f411fde871cdba0cc9bbb2584be9a9bb878c6c1e 100644 (file)
@@ -558,12 +558,15 @@ func (s *IntegrationSuite) TestS3NormalizeURIForSignature(c *check.C) {
                rawPath        string
                normalizedPath string
        }{
-               {"/foo", "/foo"},             // boring case
-               {"/foo%5fbar", "/foo_bar"},   // _ must not be escaped
-               {"/foo%2fbar", "/foo/bar"},   // / must not be escaped
-               {"/(foo)", "/%28foo%29"},     // () must be escaped
-               {"/foo%5bbar", "/foo%5Bbar"}, // %XX must be uppercase
+               {"/foo", "/foo"},                           // boring case
+               {"/foo%5fbar", "/foo_bar"},                 // _ must not be escaped
+               {"/foo%2fbar", "/foo/bar"},                 // / must not be escaped
+               {"/(foo)/[];,", "/%28foo%29/%5B%5D%3B%2C"}, // ()[];, must be escaped
+               {"/foo%5bbar", "/foo%5Bbar"},               // %XX must be uppercase
+               {"//foo///.bar", "/foo/.bar"},              // "//" and "///" must be squashed to "/"
        } {
+               c.Logf("trial %q", trial)
+
                date := time.Now().UTC().Format("20060102T150405Z")
                scope := "20200202/zzzzz/S3/aws4_request"
                canonicalRequest := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", "GET", trial.normalizedPath, "", "host:host.example.com\n", "host", "")
@@ -1098,6 +1101,15 @@ func (s *IntegrationSuite) TestS3cmd(c *check.C) {
        buf, err := cmd.CombinedOutput()
        c.Check(err, check.IsNil)
        c.Check(string(buf), check.Matches, `.* 3 +s3://`+arvadostest.FooCollection+`/foo\n`)
+
+       // This tests whether s3cmd's path normalization agrees with
+       // keep-web's signature verification wrt chars like "|"
+       // (neither reserved nor unreserved) and "," (not normally
+       // percent-encoded in a path).
+       cmd = exec.Command("s3cmd", "--no-ssl", "--host="+s.testServer.Addr, "--host-bucket="+s.testServer.Addr, "--access_key="+arvadostest.ActiveTokenUUID, "--secret_key="+arvadostest.ActiveToken, "get", "s3://"+arvadostest.FooCollection+"/foo,;$[|]bar")
+       buf, err = cmd.CombinedOutput()
+       c.Check(err, check.NotNil)
+       c.Check(string(buf), check.Matches, `(?ms).*NoSuchKey.*\n`)
 }
 
 func (s *IntegrationSuite) TestS3BucketInHost(c *check.C) {
index c1af511ad464f5a447a4552e8181dcfff23ffefc..5cd376844dd5ca7ae5f6a198e1bf41a494d374c7 100755 (executable)
@@ -135,11 +135,11 @@ VERSION="latest"
 
 # The arvados-formula version.  For a stable release, this should be a
 # branch name (e.g. X.Y-dev) or tag for the release.
-ARVADOS_TAG="master"
+ARVADOS_TAG="main"
 
 # Other formula versions we depend on
 POSTGRES_TAG="v0.41.6"
-NGINX_TAG="temp-fix-missing-statements-in-pillar"
+NGINX_TAG="v2.7.4"
 DOCKER_TAG="v1.0.0"
 LOCALE_TAG="v0.3.4"
 LETSENCRYPT_TAG="v2.1.0"
@@ -219,8 +219,7 @@ cd ${F_DIR} || exit 1
 git clone --branch "${ARVADOS_TAG}"     https://git.arvados.org/arvados-formula.git
 git clone --branch "${DOCKER_TAG}"      https://github.com/saltstack-formulas/docker-formula.git
 git clone --branch "${LOCALE_TAG}"      https://github.com/saltstack-formulas/locale-formula.git
-# git clone --branch "${NGINX_TAG}"       https://github.com/saltstack-formulas/nginx-formula.git
-git clone --branch "${NGINX_TAG}"       https://github.com/netmanagers/nginx-formula.git
+git clone --branch "${NGINX_TAG}"       https://github.com/saltstack-formulas/nginx-formula.git
 git clone --branch "${POSTGRES_TAG}"    https://github.com/saltstack-formulas/postgres-formula.git
 git clone --branch "${LETSENCRYPT_TAG}" https://github.com/saltstack-formulas/letsencrypt-formula.git