remote: https://rubygems.org/
specs:
RedCloth (4.3.2)
- actioncable (5.2.4.5)
- actionpack (= 5.2.4.5)
+ actioncable (5.2.6)
+ actionpack (= 5.2.6)
nio4r (~> 2.0)
websocket-driver (>= 0.6.1)
- actionmailer (5.2.4.5)
- actionpack (= 5.2.4.5)
- actionview (= 5.2.4.5)
- activejob (= 5.2.4.5)
+ actionmailer (5.2.6)
+ actionpack (= 5.2.6)
+ actionview (= 5.2.6)
+ activejob (= 5.2.6)
mail (~> 2.5, >= 2.5.4)
rails-dom-testing (~> 2.0)
- actionpack (5.2.4.5)
- actionview (= 5.2.4.5)
- activesupport (= 5.2.4.5)
+ actionpack (5.2.6)
+ actionview (= 5.2.6)
+ activesupport (= 5.2.6)
rack (~> 2.0, >= 2.0.8)
rack-test (>= 0.6.3)
rails-dom-testing (~> 2.0)
rails-html-sanitizer (~> 1.0, >= 1.0.2)
- actionview (5.2.4.5)
- activesupport (= 5.2.4.5)
+ actionview (5.2.6)
+ activesupport (= 5.2.6)
builder (~> 3.1)
erubi (~> 1.4)
rails-dom-testing (~> 2.0)
rails-html-sanitizer (~> 1.0, >= 1.0.3)
- activejob (5.2.4.5)
- activesupport (= 5.2.4.5)
+ activejob (5.2.6)
+ activesupport (= 5.2.6)
globalid (>= 0.3.6)
- activemodel (5.2.4.5)
- activesupport (= 5.2.4.5)
- activerecord (5.2.4.5)
- activemodel (= 5.2.4.5)
- activesupport (= 5.2.4.5)
+ activemodel (5.2.6)
+ activesupport (= 5.2.6)
+ activerecord (5.2.6)
+ activemodel (= 5.2.6)
+ activesupport (= 5.2.6)
arel (>= 9.0)
- activestorage (5.2.4.5)
- actionpack (= 5.2.4.5)
- activerecord (= 5.2.4.5)
- marcel (~> 0.3.1)
- activesupport (5.2.4.5)
+ activestorage (5.2.6)
+ actionpack (= 5.2.6)
+ activerecord (= 5.2.6)
+ marcel (~> 1.0.0)
+ activesupport (5.2.6)
concurrent-ruby (~> 1.0, >= 1.0.2)
i18n (>= 0.7, < 2)
minitest (~> 5.1)
execjs
coffee-script-source (1.12.2)
commonjs (0.2.7)
- concurrent-ruby (1.1.8)
+ concurrent-ruby (1.1.9)
crass (1.0.6)
deep_merge (1.2.1)
docile (1.3.1)
railties (>= 4)
request_store (~> 1.0)
logstash-event (1.2.02)
- loofah (2.9.0)
+ loofah (2.10.0)
crass (~> 1.0.2)
nokogiri (>= 1.5.9)
mail (2.7.1)
mini_mime (>= 0.1.1)
- marcel (0.3.3)
- mimemagic (~> 0.3.2)
+ marcel (1.0.1)
memoist (0.16.2)
metaclass (0.0.4)
method_source (1.0.0)
mime-types (3.2.2)
mime-types-data (~> 3.2015)
mime-types-data (3.2019.0331)
- mimemagic (0.3.8)
- nokogiri (~> 1)
- mini_mime (1.0.2)
- mini_portile2 (2.5.1)
+ mini_mime (1.1.0)
+ mini_portile2 (2.5.3)
minitest (5.10.3)
mocha (1.8.0)
metaclass (~> 0.0.1)
net-ssh-gateway (2.0.0)
net-ssh (>= 4.0.0)
nio4r (2.5.7)
- nokogiri (1.11.5)
+ nokogiri (1.11.7)
mini_portile2 (~> 2.5.0)
racc (~> 1.4)
npm-rails (0.2.1)
rack (>= 1.2.0)
rack-test (1.1.0)
rack (>= 1.0, < 3)
- rails (5.2.4.5)
- actioncable (= 5.2.4.5)
- actionmailer (= 5.2.4.5)
- actionpack (= 5.2.4.5)
- actionview (= 5.2.4.5)
- activejob (= 5.2.4.5)
- activemodel (= 5.2.4.5)
- activerecord (= 5.2.4.5)
- activestorage (= 5.2.4.5)
- activesupport (= 5.2.4.5)
+ rails (5.2.6)
+ actioncable (= 5.2.6)
+ actionmailer (= 5.2.6)
+ actionpack (= 5.2.6)
+ actionview (= 5.2.6)
+ activejob (= 5.2.6)
+ activemodel (= 5.2.6)
+ activerecord (= 5.2.6)
+ activestorage (= 5.2.6)
+ activesupport (= 5.2.6)
bundler (>= 1.3.0)
- railties (= 5.2.4.5)
+ railties (= 5.2.6)
sprockets-rails (>= 2.0.0)
rails-controller-testing (1.0.4)
actionpack (>= 5.0.1.x)
rails-html-sanitizer (1.3.0)
loofah (~> 2.3)
rails-perftest (0.0.7)
- railties (5.2.4.5)
- actionpack (= 5.2.4.5)
- activesupport (= 5.2.4.5)
+ railties (5.2.6)
+ actionpack (= 5.2.6)
+ activesupport (= 5.2.6)
method_source
rake (>= 0.8.7)
thor (>= 0.19.0, < 2.0)
uglifier (2.7.2)
execjs (>= 0.3.0)
json (>= 1.8.0)
- websocket-driver (0.7.3)
+ websocket-driver (0.7.4)
websocket-extensions (>= 0.1.0)
websocket-extensions (0.1.5)
xpath (2.1.0)
"git.arvados.org/arvados.git/lib/cmd"
"git.arvados.org/arvados.git/lib/costanalyzer"
"git.arvados.org/arvados.git/lib/deduplicationreport"
+ "git.arvados.org/arvados.git/lib/diagnostics"
"git.arvados.org/arvados.git/lib/mount"
)
"costanalyzer": costanalyzer.Command,
"shell": shellCommand{},
"connect-ssh": connectSSHCommand{},
+ "diagnostics": diagnostics.Command{},
})
)
github.com/prometheus/common v0.7.0
github.com/satori/go.uuid v1.2.1-0.20180103174451-36e9d2ebbde5 // indirect
github.com/sergi/go-diff v1.0.0 // indirect
- github.com/sirupsen/logrus v1.4.2
+ github.com/sirupsen/logrus v1.8.1
github.com/src-d/gcfg v1.3.0 // indirect
github.com/xanzy/ssh-agent v0.1.0 // indirect
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
+ golang.org/x/sys v0.0.0-20210603125802-9665404d3644
+ golang.org/x/tools v0.1.0 // indirect
golang.org/x/sys v0.0.0-20210510120138-977fb7262007
golang.org/x/tools v0.1.2 // indirect
google.golang.org/api v0.13.0
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/spf13/afero v1.2.1 h1:qgMbHoJbPbw579P+1zVY+6n4nIFuIchaIjzZ/I/Yq8M=
github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/src-d/gcfg v1.3.0 h1:2BEDr8r0I0b8h/fOqwtxCEiq2HJu8n2JGZJQFGXWLjg=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644 h1:CA1DEQ4NdKphKeL70tvsWNdT5oFh1lOjihRcEDROi0I=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
"runtime"
"sort"
"strings"
+
+ "github.com/sirupsen/logrus"
)
type Handler interface {
copy(newargs[flagargs+1:], args[flagargs+1:])
return newargs
}
+
+type NoPrefixFormatter struct{}
+
+func (NoPrefixFormatter) Format(entry *logrus.Entry) ([]byte, error) {
+ return []byte(entry.Message + "\n"), nil
+}
# In each of the service sections below, the keys under
# InternalURLs are the endpoints where the service should be
- # listening, and reachable from other hosts in the cluster.
- SAMPLE:
- InternalURLs:
- "http://host1.example:12345": {}
- "http://host2.example:12345":
- # Rendezvous is normally empty/omitted. When changing the
- # URL of a Keepstore service, Rendezvous should be set to
- # the old URL (with trailing slash omitted) to preserve
- # rendezvous ordering.
- Rendezvous: ""
- SAMPLE:
- Rendezvous: ""
- ExternalURL: "-"
+ # listening, and reachable from other hosts in the
+ # cluster. Example:
+ #
+ # InternalURLs:
+ # "http://host1.example:12345": {}
+ # "http://host2.example:12345": {}
RailsAPI:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: "-"
Controller:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
Websocket:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
Keepbalance:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: "-"
GitHTTP:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
GitSSH:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
DispatchCloud:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: "-"
SSO:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
Keepproxy:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
WebDAV:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
# Base URL for Workbench inline preview. If blank, use
# WebDAVDownload instead, and disable inline preview.
# If both are empty, downloading collections from workbench
ExternalURL: ""
WebDAVDownload:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
# Base URL for download links. If blank, serve links to WebDAV
# with disposition=attachment query param. Unlike preview links,
# browsers do not render attachments, so there is no risk of XSS.
ExternalURL: ""
Keepstore:
- InternalURLs: {}
+ InternalURLs:
+ SAMPLE:
+ # Rendezvous is normally empty/omitted. When changing the
+ # URL of a Keepstore service, Rendezvous should be set to
+ # the old URL (with trailing slash omitted) to preserve
+ # rendezvous ordering.
+ Rendezvous: ""
ExternalURL: "-"
Composer:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
WebShell:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
# ShellInABox service endpoint URL for a given VM. If empty, do not
# offer web shell logins.
#
# https://*.webshell.uuid_prefix.arvadosapi.com
ExternalURL: ""
Workbench1:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
Workbench2:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
Health:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: "-"
PostgreSQL:
# In each of the service sections below, the keys under
# InternalURLs are the endpoints where the service should be
- # listening, and reachable from other hosts in the cluster.
- SAMPLE:
- InternalURLs:
- "http://host1.example:12345": {}
- "http://host2.example:12345":
- # Rendezvous is normally empty/omitted. When changing the
- # URL of a Keepstore service, Rendezvous should be set to
- # the old URL (with trailing slash omitted) to preserve
- # rendezvous ordering.
- Rendezvous: ""
- SAMPLE:
- Rendezvous: ""
- ExternalURL: "-"
+ # listening, and reachable from other hosts in the
+ # cluster. Example:
+ #
+ # InternalURLs:
+ # "http://host1.example:12345": {}
+ # "http://host2.example:12345": {}
RailsAPI:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: "-"
Controller:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
Websocket:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
Keepbalance:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: "-"
GitHTTP:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
GitSSH:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
DispatchCloud:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: "-"
SSO:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
Keepproxy:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
WebDAV:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
# Base URL for Workbench inline preview. If blank, use
# WebDAVDownload instead, and disable inline preview.
# If both are empty, downloading collections from workbench
ExternalURL: ""
WebDAVDownload:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
# Base URL for download links. If blank, serve links to WebDAV
# with disposition=attachment query param. Unlike preview links,
# browsers do not render attachments, so there is no risk of XSS.
ExternalURL: ""
Keepstore:
- InternalURLs: {}
+ InternalURLs:
+ SAMPLE:
+ # Rendezvous is normally empty/omitted. When changing the
+ # URL of a Keepstore service, Rendezvous should be set to
+ # the old URL (with trailing slash omitted) to preserve
+ # rendezvous ordering.
+ Rendezvous: ""
ExternalURL: "-"
Composer:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
WebShell:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
# ShellInABox service endpoint URL for a given VM. If empty, do not
# offer web shell logins.
#
# https://*.webshell.uuid_prefix.arvadosapi.com
ExternalURL: ""
Workbench1:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
Workbench2:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: ""
Health:
- InternalURLs: {}
+ InternalURLs: {SAMPLE: {}}
ExternalURL: "-"
PostgreSQL:
ldr.configdata = buf
}
+ // FIXME: We should reject YAML if the same key is used twice
+ // in a map/object, like {foo: bar, foo: baz}. Maybe we'll get
+ // this fixed free when we upgrade ghodss/yaml to a version
+ // that uses go-yaml v3.
+
// Load the config into a dummy map to get the cluster ID
// keys, discarding the values; then set up defaults for each
// cluster ID; then load the real config on top of the
checkKeyConflict(fmt.Sprintf("Clusters.%s.PostgreSQL.Connection", id), cc.PostgreSQL.Connection),
ldr.checkEmptyKeepstores(cc),
ldr.checkUnlistedKeepstores(cc),
+ // TODO: check non-empty Rendezvous on
+ // services other than Keepstore
} {
if err != nil {
return nil, err
if ldr.Logger == nil {
return
}
- allowed := map[string]interface{}{}
- for k, v := range expected {
- allowed[strings.ToLower(k)] = v
- }
for k, vsupp := range supplied {
if k == "SAMPLE" {
// entry will be dropped in removeSampleKeys anyway
continue
}
- vexp, ok := allowed[strings.ToLower(k)]
+ vexp, ok := expected[k]
if expected["SAMPLE"] != nil {
+ // use the SAMPLE entry's keys as the
+ // "expected" map when checking vsupp
+ // recursively.
vexp = expected["SAMPLE"]
} else if !ok {
- ldr.Logger.Warnf("deprecated or unknown config entry: %s%s", prefix, k)
+ // check for a case-insensitive match
+ hint := ""
+ for ek := range expected {
+ if strings.EqualFold(k, ek) {
+ hint = " (perhaps you meant " + ek + "?)"
+ // If we don't delete this, it
+ // will end up getting merged,
+ // unpredictably
+ // merging/overriding the
+ // default.
+ delete(supplied, k)
+ break
+ }
+ }
+ ldr.Logger.Warnf("deprecated or unknown config entry: %s%s%s", prefix, k, hint)
continue
}
if vsupp, ok := vsupp.(map[string]interface{}); !ok {
SystemRootToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
Collections:
BlobSigningKey: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
- postgresql: {}
- BadKey: {}
- Containers: {}
+ PostgreSQL: {}
+ BadKey1: {}
+ Containers:
+ RunTimeEngine: abc
RemoteClusters:
z2222:
Host: z2222.arvadosapi.com
Proxy: true
- BadKey: badValue
+ BadKey2: badValue
+ Services:
+ KeepStore:
+ InternalURLs:
+ "http://host.example:12345": {}
+ Keepstore:
+ InternalURLs:
+ "http://host.example:12345":
+ RendezVous: x
+ ServiceS:
+ Keepstore:
+ InternalURLs:
+ "http://host.example:12345": {}
+ Volumes:
+ zzzzz-nyw5e-aaaaaaaaaaaaaaa: {}
`, &logbuf).Load()
c.Assert(err, check.IsNil)
+ c.Log(logbuf.String())
logs := strings.Split(strings.TrimSuffix(logbuf.String(), "\n"), "\n")
for _, log := range logs {
- c.Check(log, check.Matches, `.*deprecated or unknown config entry:.*BadKey.*`)
+ c.Check(log, check.Matches, `.*deprecated or unknown config entry:.*(RunTimeEngine.*RuntimeEngine|BadKey1|BadKey2|KeepStore|ServiceS|RendezVous).*`)
}
- c.Check(logs, check.HasLen, 2)
+ c.Check(logs, check.HasLen, 6)
}
func (s *LoadSuite) checkSAMPLEKeys(c *check.C, path string, x interface{}) {
_, err := testLoader(c, `
Clusters:
zzzzz:
- postgresql:
- connection:
+ PostgreSQL:
+ Connection:
DBName: dbname
Host: host
`, nil).Load()
"io"
"time"
+ "git.arvados.org/arvados.git/lib/cmd"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
- "github.com/sirupsen/logrus"
)
var Command = command{}
end time.Time
}
-type NoPrefixFormatter struct{}
-
-func (f *NoPrefixFormatter) Format(entry *logrus.Entry) ([]byte, error) {
- return []byte(entry.Message), nil
-}
-
// RunCommand implements the subcommand "costanalyzer <collection> <collection> ..."
func (c command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
var err error
logger := ctxlog.New(stderr, "text", "info")
+ logger.SetFormatter(cmd.NoPrefixFormatter{})
defer func() {
if err != nil {
- logger.Error("\n" + err.Error() + "\n")
+ logger.Error("\n" + err.Error())
}
}()
- logger.SetFormatter(new(NoPrefixFormatter))
-
exitcode, err := c.costAnalyzer(prog, args, logger, stdout, stderr)
return exitcode
"errors"
"flag"
"fmt"
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadosclient"
- "git.arvados.org/arvados.git/sdk/go/keepclient"
"io"
"io/ioutil"
"net/http"
"strings"
"time"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadosclient"
+ "git.arvados.org/arvados.git/sdk/go/keepclient"
"github.com/sirupsen/logrus"
)
}
logger.SetLevel(lvl)
if !c.cache {
- logger.Debug("Caching disabled\n")
+ logger.Debug("Caching disabled")
}
return
}
case *arvados.ContainerRequest:
if v.State == arvados.ContainerRequestStateFinal {
reload = false
- logger.Debugf("Loaded object %s from local cache (%s)\n", uuid, file)
+ logger.Debugf("Loaded object %s from local cache (%s)", uuid, file)
}
case *arvados.Container:
if v.State == arvados.ContainerStateComplete || v.State == arvados.ContainerStateCancelled {
reload = false
- logger.Debugf("Loaded object %s from local cache (%s)\n", uuid, file)
+ logger.Debugf("Loaded object %s from local cache (%s)", uuid, file)
}
}
return
return nil, fmt.Errorf("error querying container_requests: %s", err.Error())
}
if value, ok := childCrs["items"]; ok {
- logger.Infof("Collecting top level container requests in project %s\n", uuid)
+ logger.Infof("Collecting top level container requests in project %s", uuid)
items := value.([]interface{})
for _, item := range items {
itemMap := item.(map[string]interface{})
}
}
} else {
- logger.Infof("No top level container requests found in project %s\n", uuid)
+ logger.Infof("No top level container requests found in project %s", uuid)
}
return
}
var tmpCsv string
var tmpTotalCost float64
var totalCost float64
- fmt.Printf("Processing %s\n", uuid)
+ logger.Debugf("Processing %s", uuid)
var crUUID = uuid
if strings.Contains(uuid, "-4zz18-") {
}
if len(cr.ContainerUUID) == 0 {
// Nothing to do! E.g. a CR in 'Uncommitted' state.
- logger.Infof("No container associated with container request %s, skipping\n", crUUID)
+ logger.Infof("No container associated with container request %s, skipping", crUUID)
return nil, nil
}
var container arvados.Container
return nil, fmt.Errorf("error querying container_requests: %s", err.Error())
}
logger.Infof("Collecting child containers for container request %s (%s)", crUUID, container.FinishedAt)
- for _, cr2 := range childCrs.Items {
- logger.Info(".")
+ progressTicker := time.NewTicker(5 * time.Second)
+ defer progressTicker.Stop()
+ for i, cr2 := range childCrs.Items {
+ select {
+ case <-progressTicker.C:
+ logger.Infof("... %d of %d", i+1, len(childCrs.Items))
+ default:
+ }
node, err := getNode(arv, ac, kc, cr2)
if err != nil {
logger.Errorf("Skipping container request %s: error getting node %s: %s", cr2.UUID, cr2.UUID, err)
continue
}
- logger.Debug("\nChild container: " + cr2.ContainerUUID + "\n")
+ logger.Debug("Child container: " + cr2.ContainerUUID)
var c2 arvados.Container
err = loadObject(logger, ac, cr.UUID, cr2.ContainerUUID, cache, &c2)
if err != nil {
csv += tmpCsv
totalCost += tmpTotalCost
}
- logger.Info(" done\n")
+ logger.Debug("Done collecting child containers")
csv += "TOTAL,,,,,,,,," + strconv.FormatFloat(totalCost, 'f', 8, 64) + "\n"
if err != nil {
return nil, fmt.Errorf("error writing file with path %s: %s", fName, err.Error())
}
- logger.Infof("\nUUID report in %s\n\n", fName)
+ logger.Infof("\nUUID report in %s", fName)
}
return
err := ac.RequestAndDecode(&list, "GET", "arvados/v1/container_requests", nil, params)
if err != nil {
- logger.Errorf("Error getting container request list from Arvados API: %s\n", err)
+ logger.Errorf("Error getting container request list from Arvados API: %s", err)
break
}
if len(list.Items) == 0 {
cost := make(map[string]float64)
for uuid := range uuidChannel {
- fmt.Printf("Considering %s\n", uuid)
+ logger.Debugf("Considering %s", uuid)
if strings.Contains(uuid, "-j7d0g-") {
// This is a project (group)
cost, err = handleProject(logger, uuid, arv, ac, kc, c.resultsDir, c.cache)
// keep going.
logger.Errorf("cost analysis is not supported for the 'Home' project: %s", uuid)
} else {
- logger.Errorf("this argument does not look like a uuid: %s\n", uuid)
+ logger.Errorf("this argument does not look like a uuid: %s", uuid)
exitcode = 3
return
}
}
if len(cost) == 0 {
- logger.Info("Nothing to do!\n")
+ logger.Info("Nothing to do!")
return
}
exitcode = 1
return
}
- logger.Infof("Aggregate cost accounting for all supplied uuids in %s\n", aFile)
+ logger.Infof("Aggregate cost accounting for all supplied uuids in %s", aFile)
}
// Output the total dollar amount on stdout
import (
"io"
+ "git.arvados.org/arvados.git/lib/cmd"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
- "github.com/sirupsen/logrus"
)
var Command command
type command struct{}
-type NoPrefixFormatter struct{}
-
-func (f *NoPrefixFormatter) Format(entry *logrus.Entry) ([]byte, error) {
- return []byte(entry.Message + "\n"), nil
-}
-
// RunCommand implements the subcommand "deduplication-report <collection> <collection> ..."
func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
var err error
}
}()
- logger.SetFormatter(new(NoPrefixFormatter))
+ logger.SetFormatter(cmd.NoPrefixFormatter{})
exitcode := report(prog, args, logger, stdout, stderr)
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package diagnostics
+
+import (
+ "bytes"
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "github.com/sirupsen/logrus"
+)
+
+type Command struct{}
+
+func (cmd Command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+ var diag diagnoser
+ f := flag.NewFlagSet(prog, flag.ContinueOnError)
+ f.StringVar(&diag.projectName, "project-name", "scratch area for diagnostics", "name of project to find/create in home project and use for temporary/test objects")
+ f.StringVar(&diag.logLevel, "log-level", "info", "logging level (debug, info, warning, error)")
+ f.BoolVar(&diag.checkInternal, "internal-client", false, "check that this host is considered an \"internal\" client")
+ f.BoolVar(&diag.checkExternal, "external-client", false, "check that this host is considered an \"external\" client")
+ f.IntVar(&diag.priority, "priority", 500, "priority for test container (1..1000, or 0 to skip)")
+ f.DurationVar(&diag.timeout, "timeout", 10*time.Second, "timeout for http requests")
+ err := f.Parse(args)
+ if err == flag.ErrHelp {
+ return 0
+ } else if err != nil {
+ fmt.Fprintln(stderr, err)
+ return 2
+ }
+ diag.logger = ctxlog.New(stdout, "text", diag.logLevel)
+ diag.logger.SetFormatter(&logrus.TextFormatter{DisableTimestamp: true, DisableLevelTruncation: true, PadLevelText: true})
+ diag.runtests()
+ if len(diag.errors) == 0 {
+ diag.logger.Info("--- no errors ---")
+ return 0
+ } else {
+ if diag.logger.Level > logrus.ErrorLevel {
+ fmt.Fprint(stdout, "\n--- cut here --- error summary ---\n\n")
+ for _, e := range diag.errors {
+ diag.logger.Error(e)
+ }
+ }
+ return 1
+ }
+}
+
+type diagnoser struct {
+ stdout io.Writer
+ stderr io.Writer
+ logLevel string
+ priority int
+ projectName string
+ checkInternal bool
+ checkExternal bool
+ timeout time.Duration
+ logger *logrus.Logger
+ errors []string
+ done map[int]bool
+}
+
+func (diag *diagnoser) debugf(f string, args ...interface{}) {
+ diag.logger.Debugf(" ... "+f, args...)
+}
+
+func (diag *diagnoser) infof(f string, args ...interface{}) {
+ diag.logger.Infof(" ... "+f, args...)
+}
+
+func (diag *diagnoser) warnf(f string, args ...interface{}) {
+ diag.logger.Warnf(" ... "+f, args...)
+}
+
+func (diag *diagnoser) errorf(f string, args ...interface{}) {
+ diag.logger.Errorf(f, args...)
+ diag.errors = append(diag.errors, fmt.Sprintf(f, args...))
+}
+
+// Run the given func, logging appropriate messages before and after,
+// adding timing info, etc.
+//
+// The id argument should be unique among tests, and shouldn't change
+// when other tests are added/removed.
+func (diag *diagnoser) dotest(id int, title string, fn func() error) {
+ if diag.done == nil {
+ diag.done = map[int]bool{}
+ } else if diag.done[id] {
+ diag.errorf("(bug) reused test id %d", id)
+ }
+ diag.done[id] = true
+
+ diag.logger.Infof("%4d: %s", id, title)
+ t0 := time.Now()
+ err := fn()
+ elapsed := fmt.Sprintf("%d ms", time.Now().Sub(t0)/time.Millisecond)
+ if err != nil {
+ diag.errorf("%4d: %s (%s): %s", id, title, elapsed, err)
+ } else {
+ diag.logger.Debugf("%4d: %s (%s): ok", id, title, elapsed)
+ }
+}
+
+func (diag *diagnoser) runtests() {
+ client := arvados.NewClientFromEnv()
+
+ if client.APIHost == "" || client.AuthToken == "" {
+ diag.errorf("ARVADOS_API_HOST and ARVADOS_API_TOKEN environment variables are not set -- aborting without running any tests")
+ return
+ }
+
+ var dd arvados.DiscoveryDocument
+ ddpath := "discovery/v1/apis/arvados/v1/rest"
+ diag.dotest(10, fmt.Sprintf("getting discovery document from https://%s/%s", client.APIHost, ddpath), func() error {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+ defer cancel()
+ err := client.RequestAndDecodeContext(ctx, &dd, "GET", ddpath, nil, nil)
+ if err != nil {
+ return err
+ }
+ diag.debugf("BlobSignatureTTL = %d", dd.BlobSignatureTTL)
+ return nil
+ })
+
+ var cluster arvados.Cluster
+ cfgpath := "arvados/v1/config"
+ diag.dotest(20, fmt.Sprintf("getting exported config from https://%s/%s", client.APIHost, cfgpath), func() error {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+ defer cancel()
+ err := client.RequestAndDecodeContext(ctx, &cluster, "GET", cfgpath, nil, nil)
+ if err != nil {
+ return err
+ }
+ diag.debugf("Collections.BlobSigning = %v", cluster.Collections.BlobSigning)
+ return nil
+ })
+
+ var user arvados.User
+ diag.dotest(30, "getting current user record", func() error {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+ defer cancel()
+ err := client.RequestAndDecodeContext(ctx, &user, "GET", "arvados/v1/users/current", nil, nil)
+ if err != nil {
+ return err
+ }
+ diag.debugf("user uuid = %s", user.UUID)
+ return nil
+ })
+
+ // uncomment to create some spurious errors
+ // cluster.Services.WebDAVDownload.ExternalURL.Host = "0.0.0.0:9"
+
+ // TODO: detect routing errors here, like finding wb2 at the
+ // wb1 address.
+ for i, svc := range []*arvados.Service{
+ &cluster.Services.Keepproxy,
+ &cluster.Services.WebDAV,
+ &cluster.Services.WebDAVDownload,
+ &cluster.Services.Websocket,
+ &cluster.Services.Workbench1,
+ &cluster.Services.Workbench2,
+ } {
+ diag.dotest(40+i, fmt.Sprintf("connecting to service endpoint %s", svc.ExternalURL), func() error {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+ defer cancel()
+ u := svc.ExternalURL
+ if strings.HasPrefix(u.Scheme, "ws") {
+ // We can do a real websocket test elsewhere,
+ // but for now we'll just check the https
+ // connection.
+ u.Scheme = "http" + u.Scheme[2:]
+ }
+ if svc == &cluster.Services.WebDAV && strings.HasPrefix(u.Host, "*") {
+ u.Host = "d41d8cd98f00b204e9800998ecf8427e-0" + u.Host[1:]
+ }
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
+ if err != nil {
+ return err
+ }
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+ })
+ }
+
+ for i, url := range []string{
+ cluster.Services.Controller.ExternalURL.String(),
+ cluster.Services.Keepproxy.ExternalURL.String() + "d41d8cd98f00b204e9800998ecf8427e+0",
+ cluster.Services.WebDAVDownload.ExternalURL.String(),
+ } {
+ diag.dotest(50+i, fmt.Sprintf("checking CORS headers at %s", url), func() error {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+ defer cancel()
+ req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Origin", "https://example.com")
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ if hdr := resp.Header.Get("Access-Control-Allow-Origin"); hdr != "*" {
+ return fmt.Errorf("expected \"Access-Control-Allow-Origin: *\", got %q", hdr)
+ }
+ return nil
+ })
+ }
+
+ var keeplist arvados.KeepServiceList
+ diag.dotest(60, "checking internal/external client detection", func() error {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+ defer cancel()
+ err := client.RequestAndDecodeContext(ctx, &keeplist, "GET", "arvados/v1/keep_services/accessible", nil, arvados.ListOptions{Limit: 999999})
+ if err != nil {
+ return fmt.Errorf("error getting keep services list: %s", err)
+ } else if len(keeplist.Items) == 0 {
+ return fmt.Errorf("controller did not return any keep services")
+ }
+ found := map[string]int{}
+ for _, ks := range keeplist.Items {
+ found[ks.ServiceType]++
+ }
+ isInternal := found["proxy"] == 0 && len(keeplist.Items) > 0
+ isExternal := found["proxy"] > 0 && found["proxy"] == len(keeplist.Items)
+ if isExternal {
+ diag.debugf("controller returned only proxy services, this host is treated as \"external\"")
+ } else if isInternal {
+ diag.debugf("controller returned only non-proxy services, this host is treated as \"internal\"")
+ }
+ if (diag.checkInternal && !isInternal) || (diag.checkExternal && !isExternal) {
+ return fmt.Errorf("expecting internal=%v external=%v, but found internal=%v external=%v", diag.checkInternal, diag.checkExternal, isInternal, isExternal)
+ }
+ return nil
+ })
+
+ for i, ks := range keeplist.Items {
+ u := url.URL{
+ Scheme: "http",
+ Host: net.JoinHostPort(ks.ServiceHost, fmt.Sprintf("%d", ks.ServicePort)),
+ Path: "/",
+ }
+ if ks.ServiceSSLFlag {
+ u.Scheme = "https"
+ }
+ diag.dotest(61+i, fmt.Sprintf("reading+writing via keep service at %s", u.String()), func() error {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+ defer cancel()
+ req, err := http.NewRequestWithContext(ctx, "PUT", u.String()+"d41d8cd98f00b204e9800998ecf8427e", nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return fmt.Errorf("reading response body: %s", err)
+ }
+ loc := strings.TrimSpace(string(body))
+ if !strings.HasPrefix(loc, "d41d8") {
+ return fmt.Errorf("unexpected response from write: %q", body)
+ }
+
+ req, err = http.NewRequestWithContext(ctx, "GET", u.String()+loc, nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+ resp, err = http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ body, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return fmt.Errorf("reading response body: %s", err)
+ }
+ if len(body) != 0 {
+ return fmt.Errorf("unexpected response from read: %q", body)
+ }
+
+ return nil
+ })
+ }
+
+ var project arvados.Group
+ diag.dotest(80, fmt.Sprintf("finding/creating %q project", diag.projectName), func() error {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+ defer cancel()
+ var grplist arvados.GroupList
+ err := client.RequestAndDecodeContext(ctx, &grplist, "GET", "arvados/v1/groups", nil, arvados.ListOptions{
+ Filters: []arvados.Filter{
+ {"name", "=", diag.projectName},
+ {"group_class", "=", "project"},
+ {"owner_uuid", "=", user.UUID}},
+ Limit: 999999})
+ if err != nil {
+ return fmt.Errorf("list groups: %s", err)
+ }
+ if len(grplist.Items) > 0 {
+ project = grplist.Items[0]
+ diag.debugf("using existing project, uuid = %s", project.UUID)
+ return nil
+ }
+ diag.debugf("list groups: ok, no results")
+ err = client.RequestAndDecodeContext(ctx, &project, "POST", "arvados/v1/groups", nil, map[string]interface{}{"group": map[string]interface{}{
+ "name": diag.projectName,
+ "group_class": "project",
+ }})
+ if err != nil {
+ return fmt.Errorf("create project: %s", err)
+ }
+ diag.debugf("created project, uuid = %s", project.UUID)
+ return nil
+ })
+
+ var collection arvados.Collection
+ diag.dotest(90, "creating temporary collection", func() error {
+ if project.UUID == "" {
+ return fmt.Errorf("skipping, no project to work in")
+ }
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+ defer cancel()
+ err := client.RequestAndDecodeContext(ctx, &collection, "POST", "arvados/v1/collections", nil, map[string]interface{}{
+ "ensure_unique_name": true,
+ "collection": map[string]interface{}{
+ "owner_uuid": project.UUID,
+ "name": "test collection",
+ "trash_at": time.Now().Add(time.Hour)}})
+ if err != nil {
+ return err
+ }
+ diag.debugf("ok, uuid = %s", collection.UUID)
+ return nil
+ })
+
+ if collection.UUID != "" {
+ defer func() {
+ diag.dotest(9990, "deleting temporary collection", func() error {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+ defer cancel()
+ return client.RequestAndDecodeContext(ctx, nil, "DELETE", "arvados/v1/collections/"+collection.UUID, nil, nil)
+ })
+ }()
+ }
+
+ diag.dotest(100, "uploading file via webdav", func() error {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+ defer cancel()
+ if collection.UUID == "" {
+ return fmt.Errorf("skipping, no test collection")
+ }
+ req, err := http.NewRequestWithContext(ctx, "PUT", cluster.Services.WebDAVDownload.ExternalURL.String()+"c="+collection.UUID+"/testfile", bytes.NewBufferString("testfiledata"))
+ if err != nil {
+ return fmt.Errorf("BUG? http.NewRequest: %s", err)
+ }
+ req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error performing http request: %s", err)
+ }
+ resp.Body.Close()
+ if resp.StatusCode != http.StatusCreated {
+ return fmt.Errorf("status %s", resp.Status)
+ }
+ diag.debugf("ok, status %s", resp.Status)
+ err = client.RequestAndDecodeContext(ctx, &collection, "GET", "arvados/v1/collections/"+collection.UUID, nil, nil)
+ if err != nil {
+ return fmt.Errorf("get updated collection: %s", err)
+ }
+ diag.debugf("ok, pdh %s", collection.PortableDataHash)
+ return nil
+ })
+
+ davurl := cluster.Services.WebDAV.ExternalURL
+ diag.dotest(110, fmt.Sprintf("checking WebDAV ExternalURL wildcard (%s)", davurl), func() error {
+ if davurl.Host == "" {
+ return fmt.Errorf("host missing - content previews will not work")
+ }
+ if !strings.HasPrefix(davurl.Host, "*--") && !strings.HasPrefix(davurl.Host, "*.") && !cluster.Collections.TrustAllContent {
+ diag.warnf("WebDAV ExternalURL has no leading wildcard and TrustAllContent==false - content previews will not work")
+ }
+ return nil
+ })
+
+ for i, trial := range []struct {
+ needcoll bool
+ status int
+ fileurl string
+ }{
+ {false, http.StatusNotFound, strings.Replace(davurl.String(), "*", "d41d8cd98f00b204e9800998ecf8427e-0", 1) + "foo"},
+ {false, http.StatusNotFound, strings.Replace(davurl.String(), "*", "d41d8cd98f00b204e9800998ecf8427e-0", 1) + "testfile"},
+ {false, http.StatusNotFound, cluster.Services.WebDAVDownload.ExternalURL.String() + "c=d41d8cd98f00b204e9800998ecf8427e+0/_/foo"},
+ {false, http.StatusNotFound, cluster.Services.WebDAVDownload.ExternalURL.String() + "c=d41d8cd98f00b204e9800998ecf8427e+0/_/testfile"},
+ {true, http.StatusOK, strings.Replace(davurl.String(), "*", strings.Replace(collection.PortableDataHash, "+", "-", -1), 1) + "testfile"},
+ {true, http.StatusOK, cluster.Services.WebDAVDownload.ExternalURL.String() + "c=" + collection.UUID + "/_/testfile"},
+ } {
+ diag.dotest(120+i, fmt.Sprintf("downloading from webdav (%s)", trial.fileurl), func() error {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+ defer cancel()
+ if trial.needcoll && collection.UUID == "" {
+ return fmt.Errorf("skipping, no test collection")
+ }
+ req, err := http.NewRequestWithContext(ctx, "GET", trial.fileurl, nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return fmt.Errorf("reading response: %s", err)
+ }
+ if resp.StatusCode != trial.status {
+ return fmt.Errorf("unexpected response status: %s", resp.Status)
+ }
+ if trial.status == http.StatusOK && string(body) != "testfiledata" {
+ return fmt.Errorf("unexpected response content: %q", body)
+ }
+ return nil
+ })
+ }
+
+ var vm arvados.VirtualMachine
+ diag.dotest(130, "getting list of virtual machines", func() error {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+ defer cancel()
+ var vmlist arvados.VirtualMachineList
+ err := client.RequestAndDecodeContext(ctx, &vmlist, "GET", "arvados/v1/virtual_machines", nil, arvados.ListOptions{Limit: 999999})
+ if err != nil {
+ return err
+ }
+ if len(vmlist.Items) < 1 {
+ return fmt.Errorf("no VMs found")
+ }
+ vm = vmlist.Items[0]
+ return nil
+ })
+
+ diag.dotest(140, "getting workbench1 webshell page", func() error {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+ defer cancel()
+ if vm.UUID == "" {
+ return fmt.Errorf("skipping, no vm available")
+ }
+ webshelltermurl := cluster.Services.Workbench1.ExternalURL.String() + "virtual_machines/" + vm.UUID + "/webshell/testusername"
+ diag.debugf("url %s", webshelltermurl)
+ req, err := http.NewRequestWithContext(ctx, "GET", webshelltermurl, nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return fmt.Errorf("reading response: %s", err)
+ }
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("unexpected response status: %s %q", resp.Status, body)
+ }
+ return nil
+ })
+
+ diag.dotest(150, "connecting to webshell service", func() error {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+ defer cancel()
+ if vm.UUID == "" {
+ return fmt.Errorf("skipping, no vm available")
+ }
+ u := cluster.Services.WebShell.ExternalURL
+ webshellurl := u.String() + vm.Hostname + "?"
+ if strings.HasPrefix(u.Host, "*") {
+ u.Host = vm.Hostname + u.Host[1:]
+ webshellurl = u.String() + "?"
+ }
+ diag.debugf("url %s", webshellurl)
+ req, err := http.NewRequestWithContext(ctx, "POST", webshellurl, bytes.NewBufferString(url.Values{
+ "width": {"80"},
+ "height": {"25"},
+ "session": {"xyzzy"},
+ "rooturl": {webshellurl},
+ }.Encode()))
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8")
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ diag.debugf("response status %s", resp.Status)
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return fmt.Errorf("reading response: %s", err)
+ }
+ diag.debugf("response body %q", body)
+ // We don't speak the protocol, so we get a 400 error
+ // from the webshell server even if everything is
+ // OK. Anything else (404, 502, ???) indicates a
+ // problem.
+ if resp.StatusCode != http.StatusBadRequest {
+ return fmt.Errorf("unexpected response status: %s, %q", resp.Status, body)
+ }
+ return nil
+ })
+
+ diag.dotest(160, "running a container", func() error {
+ if diag.priority < 1 {
+ diag.infof("skipping (use priority > 0 if you want to run a container)")
+ return nil
+ }
+ if project.UUID == "" {
+ return fmt.Errorf("skipping, no project to work in")
+ }
+
+ var cr arvados.ContainerRequest
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+ defer cancel()
+
+ timestamp := time.Now().Format(time.RFC3339)
+ err := client.RequestAndDecodeContext(ctx, &cr, "POST", "arvados/v1/container_requests", nil, map[string]interface{}{"container_request": map[string]interface{}{
+ "owner_uuid": project.UUID,
+ "name": fmt.Sprintf("diagnostics container request %s", timestamp),
+ "container_image": "arvados/jobs",
+ "command": []string{"echo", timestamp},
+ "use_existing": false,
+ "output_path": "/mnt/output",
+ "output_name": fmt.Sprintf("diagnostics output %s", timestamp),
+ "priority": diag.priority,
+ "state": arvados.ContainerRequestStateCommitted,
+ "mounts": map[string]map[string]interface{}{
+ "/mnt/output": {
+ "kind": "collection",
+ "writable": true,
+ },
+ },
+ "runtime_constraints": arvados.RuntimeConstraints{
+ VCPUs: 1,
+ RAM: 1 << 26,
+ KeepCacheRAM: 1 << 26,
+ },
+ }})
+ if err != nil {
+ return err
+ }
+ diag.debugf("container request uuid = %s", cr.UUID)
+ diag.debugf("container uuid = %s", cr.ContainerUUID)
+
+ timeout := 10 * time.Minute
+ diag.infof("container request submitted, waiting up to %v for container to run", arvados.Duration(timeout))
+ ctx, cancel = context.WithDeadline(context.Background(), time.Now().Add(timeout))
+ defer cancel()
+
+ var c arvados.Container
+ for ; cr.State != arvados.ContainerRequestStateFinal; time.Sleep(2 * time.Second) {
+ ctx, cancel := context.WithDeadline(ctx, time.Now().Add(diag.timeout))
+ defer cancel()
+
+ crStateWas := cr.State
+ err := client.RequestAndDecodeContext(ctx, &cr, "GET", "arvados/v1/container_requests/"+cr.UUID, nil, nil)
+ if err != nil {
+ return err
+ }
+ if cr.State != crStateWas {
+ diag.debugf("container request state = %s", cr.State)
+ }
+
+ cStateWas := c.State
+ err = client.RequestAndDecodeContext(ctx, &c, "GET", "arvados/v1/containers/"+cr.ContainerUUID, nil, nil)
+ if err != nil {
+ return err
+ }
+ if c.State != cStateWas {
+ diag.debugf("container state = %s", c.State)
+ }
+ }
+
+ if c.State != arvados.ContainerStateComplete {
+ return fmt.Errorf("container request %s is final but container %s did not complete: container state = %q", cr.UUID, cr.ContainerUUID, c.State)
+ } else if c.ExitCode != 0 {
+ return fmt.Errorf("container exited %d", c.ExitCode)
+ }
+ return nil
+ })
+}
ErrIsDirectory = errors.New("cannot rename file to overwrite existing directory")
ErrNotADirectory = errors.New("not a directory")
ErrPermission = os.ErrPermission
+ DebugLocksPanicMode = false
)
type syncer interface {
Sync() error
}
+func debugPanicIfNotLocked(l sync.Locker) {
+ if !DebugLocksPanicMode {
+ return
+ }
+ race := false
+ go func() {
+ l.Lock()
+ race = true
+ l.Unlock()
+ }()
+ time.Sleep(10)
+ if race {
+ panic("bug: caller-must-have-lock func called, but nobody has lock")
+ }
+}
+
// A File is an *os.File-like interface for reading and writing files
// in a FileSystem.
type File interface {
}
func (n *treenode) Child(name string, replace func(inode) (inode, error)) (child inode, err error) {
+ debugPanicIfNotLocked(n)
child = n.inodes[name]
if name == "" || name == "." || name == ".." {
err = ErrInvalidArgument
func (n *treenode) MemorySize() (size int64) {
n.RLock()
defer n.RUnlock()
+ debugPanicIfNotLocked(n)
for _, inode := range n.inodes {
size += inode.MemorySize()
}
node = node.Parent()
continue
}
+ modtime := node.Parent().FileInfo().ModTime()
+ node.Lock()
+ locked := node
node, err = node.Child(name, func(child inode) (inode, error) {
if child == nil {
- child, err := node.FS().newNode(name, 0755|os.ModeDir, node.Parent().FileInfo().ModTime())
+ child, err := node.FS().newNode(name, 0755|os.ModeDir, modtime)
if err != nil {
return nil, err
}
return child, nil
}
})
+ locked.Unlock()
if err != nil {
return
}
err = fmt.Errorf("invalid file part %q in path %q", basename, path)
return
}
+ modtime := node.FileInfo().ModTime()
+ node.Lock()
+ defer node.Unlock()
_, err = node.Child(basename, func(child inode) (inode, error) {
switch child := child.(type) {
case nil:
- child, err = node.FS().newNode(basename, 0755, node.FileInfo().ModTime())
+ child, err = node.FS().newNode(basename, 0755, modtime)
if err != nil {
return nil, err
}
}
func (fs *customFileSystem) MountByID(mount string) {
+ fs.root.treenode.Lock()
+ defer fs.root.treenode.Unlock()
fs.root.treenode.Child(mount, func(inode) (inode, error) {
return &vdirnode{
treenode: treenode{
}
func (fs *customFileSystem) MountProject(mount, uuid string) {
+ fs.root.treenode.Lock()
+ defer fs.root.treenode.Unlock()
fs.root.treenode.Child(mount, func(inode) (inode, error) {
return fs.newProjectNode(fs.root, mount, uuid), nil
})
}
func (fs *customFileSystem) MountUsers(mount string) {
+ fs.root.treenode.Lock()
+ defer fs.root.treenode.Unlock()
fs.root.treenode.Child(mount, func(inode) (inode, error) {
return &lookupnode{
stale: fs.Stale,
var _ = check.Suite(&SiteFSSuite{})
+func init() {
+ // Enable DebugLocksPanicMode sometimes. Don't enable it all
+ // the time, though -- it adds many calls to time.Sleep(),
+ // which could hide different bugs.
+ if time.Now().Second()&1 == 0 {
+ DebugLocksPanicMode = true
+ }
+}
+
type SiteFSSuite struct {
client *Client
fs CustomFileSystem
""")
_group.add_argument('--no-follow-links', action='store_false', dest='follow_links',
help="""
-Do not follow file and directory symlinks.
+Ignore file and directory symlinks. Even paths given explicitly on the
+command line will be skipped if they are symlinks.
""")
args.paths = ["-" if x == "/dev/stdin" else x for x in args.paths]
- if len(args.paths) != 1 or os.path.isdir(args.paths[0]):
- if args.filename:
- arg_parser.error("""
+ if args.filename and (len(args.paths) != 1 or os.path.isdir(args.paths[0])):
+ arg_parser.error("""
--filename argument cannot be used when storing a directory or
multiple files.
""")
self._write_stdin(self.filename or 'stdin')
elif not os.path.exists(path):
raise PathDoesNotExistError(u"file or directory '{}' does not exist.".format(path))
+ elif (not self.follow_links) and os.path.islink(path):
+ self.logger.warning("Skipping symlink '{}'".format(path))
+ continue
elif os.path.isdir(path):
# Use absolute paths on cache index so CWD doesn't interfere
# with the caching logic.
else:
# The file already exist on remote collection, skip it.
pass
- self._remote_collection.save(storage_classes=self.storage_classes,
- num_retries=self.num_retries,
+ self._remote_collection.save(num_retries=self.num_retries,
trash_at=self._collection_trash_at())
else:
- if self.storage_classes is None:
- self.storage_classes = ['default']
+ if len(self._local_collection) == 0:
+ self.logger.warning("No files were uploaded, skipping collection creation.")
+ return
self._local_collection.save_new(
name=self.name, owner_uuid=self.owner_uuid,
- storage_classes=self.storage_classes,
ensure_unique_name=self.ensure_unique_name,
num_retries=self.num_retries,
trash_at=self._collection_trash_at())
self._remote_collection = arvados.collection.Collection(
update_collection,
api_client=self._api_client,
+ storage_classes_desired=self.storage_classes,
num_retries=self.num_retries)
except arvados.errors.ApiError as error:
raise CollectionUpdateError("Cannot read collection {} ({})".format(update_collection, error))
self._local_collection = arvados.collection.Collection(
self._state['manifest'],
replication_desired=self.replication_desired,
+ storage_classes_desired=(self.storage_classes or ['default']),
put_threads=self.put_threads,
api_client=self._api_client,
num_retries=self.num_retries)
# Split storage-classes argument
storage_classes = None
if args.storage_classes:
- storage_classes = args.storage_classes.strip().split(',')
- if len(storage_classes) > 1:
- logger.error("Multiple storage classes are not supported currently.")
- sys.exit(1)
-
+ storage_classes = args.storage_classes.strip().replace(' ', '').split(',')
# Setup exclude regex from all the --exclude arguments provided
name_patterns = []
output = writer.manifest_text()
elif args.raw:
output = ','.join(writer.data_locators())
- else:
+ elif writer.manifest_locator() is not None:
try:
expiration_notice = ""
if writer.collection_trash_at() is not None:
"arv-put: Error creating Collection on project: {}.".format(
error))
status = 1
+ else:
+ status = 1
# Print the locator (uuid) of the new collection.
if output is None:
shutil.rmtree(self.tempdir_with_symlink)
def test_symlinks_are_followed_by_default(self):
+ self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkeddir')))
+ self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkedfile')))
cwriter = arv_put.ArvPutUploadJob([self.tempdir_with_symlink])
cwriter.start(save_collection=False)
self.assertIn('linkeddir', cwriter.manifest_text())
cwriter.destroy_cache()
def test_symlinks_are_not_followed_when_requested(self):
+ self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkeddir')))
+ self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkedfile')))
cwriter = arv_put.ArvPutUploadJob([self.tempdir_with_symlink],
follow_links=False)
cwriter.start(save_collection=False)
self.assertNotIn('linkeddir', cwriter.manifest_text())
self.assertNotIn('linkedfile', cwriter.manifest_text())
cwriter.destroy_cache()
+ # Check for bug #17800: passed symlinks should also be ignored.
+ linked_dir = os.path.join(self.tempdir_with_symlink, 'linkeddir')
+ cwriter = arv_put.ArvPutUploadJob([linked_dir], follow_links=False)
+ cwriter.start(save_collection=False)
+ self.assertNotIn('linkeddir', cwriter.manifest_text())
+ cwriter.destroy_cache()
+
+ def test_no_empty_collection_saved(self):
+ self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkeddir')))
+ linked_dir = os.path.join(self.tempdir_with_symlink, 'linkeddir')
+ cwriter = arv_put.ArvPutUploadJob([linked_dir], follow_links=False)
+ cwriter.start(save_collection=True)
+ self.assertIsNone(cwriter.manifest_locator())
+ self.assertEqual('', cwriter.manifest_text())
+ cwriter.destroy_cache()
def test_passing_nonexistant_path_raise_exception(self):
uuid_str = str(uuid.uuid4())
self.call_main_with_args,
['--project-uuid', self.Z_UUID, '--stream'])
- def test_error_when_multiple_storage_classes_specified(self):
- self.assertRaises(SystemExit,
- self.call_main_with_args,
- ['--storage-classes', 'hot,cold'])
-
def test_error_when_excluding_absolute_path(self):
tmpdir = self.make_tmpdir()
self.assertRaises(SystemExit,
def test_put_collection_with_storage_classes_specified(self):
collection = self.run_and_find_collection("", ['--storage-classes', 'hot'])
-
self.assertEqual(len(collection['storage_classes_desired']), 1)
self.assertEqual(collection['storage_classes_desired'][0], 'hot')
+ def test_put_collection_with_multiple_storage_classes_specified(self):
+ collection = self.run_and_find_collection("", ['--storage-classes', ' foo, bar ,baz'])
+ self.assertEqual(len(collection['storage_classes_desired']), 3)
+ self.assertEqual(collection['storage_classes_desired'], ['foo', 'bar', 'baz'])
+
def test_put_collection_without_storage_classes_specified(self):
collection = self.run_and_find_collection("")
-
self.assertEqual(len(collection['storage_classes_desired']), 1)
self.assertEqual(collection['storage_classes_desired'][0], 'default')
GEM
remote: https://rubygems.org/
specs:
- actioncable (5.2.4.5)
- actionpack (= 5.2.4.5)
+ actioncable (5.2.6)
+ actionpack (= 5.2.6)
nio4r (~> 2.0)
websocket-driver (>= 0.6.1)
- actionmailer (5.2.4.5)
- actionpack (= 5.2.4.5)
- actionview (= 5.2.4.5)
- activejob (= 5.2.4.5)
+ actionmailer (5.2.6)
+ actionpack (= 5.2.6)
+ actionview (= 5.2.6)
+ activejob (= 5.2.6)
mail (~> 2.5, >= 2.5.4)
rails-dom-testing (~> 2.0)
- actionpack (5.2.4.5)
- actionview (= 5.2.4.5)
- activesupport (= 5.2.4.5)
+ actionpack (5.2.6)
+ actionview (= 5.2.6)
+ activesupport (= 5.2.6)
rack (~> 2.0, >= 2.0.8)
rack-test (>= 0.6.3)
rails-dom-testing (~> 2.0)
rails-html-sanitizer (~> 1.0, >= 1.0.2)
- actionview (5.2.4.5)
- activesupport (= 5.2.4.5)
+ actionview (5.2.6)
+ activesupport (= 5.2.6)
builder (~> 3.1)
erubi (~> 1.4)
rails-dom-testing (~> 2.0)
rails-html-sanitizer (~> 1.0, >= 1.0.3)
- activejob (5.2.4.5)
- activesupport (= 5.2.4.5)
+ activejob (5.2.6)
+ activesupport (= 5.2.6)
globalid (>= 0.3.6)
- activemodel (5.2.4.5)
- activesupport (= 5.2.4.5)
- activerecord (5.2.4.5)
- activemodel (= 5.2.4.5)
- activesupport (= 5.2.4.5)
+ activemodel (5.2.6)
+ activesupport (= 5.2.6)
+ activerecord (5.2.6)
+ activemodel (= 5.2.6)
+ activesupport (= 5.2.6)
arel (>= 9.0)
- activestorage (5.2.4.5)
- actionpack (= 5.2.4.5)
- activerecord (= 5.2.4.5)
- marcel (~> 0.3.1)
- activesupport (5.2.4.5)
+ activestorage (5.2.6)
+ actionpack (= 5.2.6)
+ activerecord (= 5.2.6)
+ marcel (~> 1.0.0)
+ activesupport (5.2.6)
concurrent-ruby (~> 1.0, >= 1.0.2)
i18n (>= 0.7, < 2)
minitest (~> 5.1)
net-sftp (>= 2.0.0)
net-ssh (>= 2.0.14)
net-ssh-gateway (>= 1.1.0)
- concurrent-ruby (1.1.8)
+ concurrent-ruby (1.1.9)
crass (1.0.6)
erubi (1.10.0)
execjs (2.7.0)
railties (>= 4)
request_store (~> 1.0)
logstash-event (1.2.02)
- loofah (2.9.0)
+ loofah (2.10.0)
crass (~> 1.0.2)
nokogiri (>= 1.5.9)
mail (2.7.1)
mini_mime (>= 0.1.1)
- marcel (0.3.3)
- mimemagic (~> 0.3.2)
+ marcel (1.0.1)
memoist (0.16.2)
metaclass (0.0.4)
method_source (1.0.0)
- mimemagic (0.3.8)
- nokogiri (~> 1)
- mini_mime (1.0.2)
- mini_portile2 (2.5.1)
+ mini_mime (1.1.0)
+ mini_portile2 (2.5.3)
minitest (5.10.3)
mocha (1.8.0)
metaclass (~> 0.0.1)
net-ssh-gateway (2.0.0)
net-ssh (>= 4.0.0)
nio4r (2.5.7)
- nokogiri (1.11.5)
+ nokogiri (1.11.7)
mini_portile2 (~> 2.5.0)
racc (~> 1.4)
oauth2 (1.4.1)
rack (2.2.3)
rack-test (1.1.0)
rack (>= 1.0, < 3)
- rails (5.2.4.5)
- actioncable (= 5.2.4.5)
- actionmailer (= 5.2.4.5)
- actionpack (= 5.2.4.5)
- actionview (= 5.2.4.5)
- activejob (= 5.2.4.5)
- activemodel (= 5.2.4.5)
- activerecord (= 5.2.4.5)
- activestorage (= 5.2.4.5)
- activesupport (= 5.2.4.5)
+ rails (5.2.6)
+ actioncable (= 5.2.6)
+ actionmailer (= 5.2.6)
+ actionpack (= 5.2.6)
+ actionview (= 5.2.6)
+ activejob (= 5.2.6)
+ activemodel (= 5.2.6)
+ activerecord (= 5.2.6)
+ activestorage (= 5.2.6)
+ activesupport (= 5.2.6)
bundler (>= 1.3.0)
- railties (= 5.2.4.5)
+ railties (= 5.2.6)
sprockets-rails (>= 2.0.0)
rails-controller-testing (1.0.4)
actionpack (>= 5.0.1.x)
rails-observers (0.1.5)
activemodel (>= 4.0)
rails-perftest (0.0.7)
- railties (5.2.4.5)
- actionpack (= 5.2.4.5)
- activesupport (= 5.2.4.5)
+ railties (5.2.6)
+ actionpack (= 5.2.6)
+ activesupport (= 5.2.6)
method_source
rake (>= 0.8.7)
thor (>= 0.19.0, < 2.0)
uglifier (2.7.2)
execjs (>= 0.3.0)
json (>= 1.8.0)
- websocket-driver (0.7.3)
+ websocket-driver (0.7.4)
websocket-extensions (>= 0.1.0)
websocket-extensions (0.1.5)
if self.new_record? || self.state_was == Uncommitted
# Allow create-and-commit in a single operation.
permitted.push(*AttrsPermittedBeforeCommit)
+ elsif mounts_changed? && mounts_was.keys == mounts.keys
+ # Ignore the updated mounts if the only changes are default/zero
+ # values as added by controller, see 17774
+ only_defaults = true
+ mounts.each do |path, mount|
+ (mount.to_a - mounts_was[path].to_a).each do |k, v|
+ if !["", false, nil].index(v)
+ only_defaults = false
+ end
+ end
+ end
+ if only_defaults
+ clear_attribute_change("mounts")
+ end
end
case self.state
output_path: test
command: ["echo", "hello"]
container_uuid: zzzzz-dz642-runningcontainr
+ mounts:
+ /tmp:
+ kind: tmp
+ capacity: 24000000000
runtime_constraints:
vcpus: 1
ram: 123
- mounts: {}
requester_for_running:
uuid: zzzzz-xvhdp-req4runningcntr
command: ["echo", "hello"]
container_uuid: zzzzz-dz642-logscontainer03
requesting_container_uuid: zzzzz-dz642-runningcontainr
+ mounts:
+ /tmp:
+ kind: tmp
+ capacity: 24000000000
runtime_constraints:
vcpus: 1
ram: 123
- mounts: {}
running_older:
uuid: zzzzz-xvhdp-cr4runningcntn2
output_path: test
command: ["echo", "hello"]
container_uuid: zzzzz-dz642-runningcontain2
+ mounts:
+ /tmp:
+ kind: tmp
+ capacity: 24000000000
runtime_constraints:
vcpus: 1
ram: 123
- mounts: {}
completed:
uuid: zzzzz-xvhdp-cr4completedctr
output_path: test
command: ["echo", "hello"]
container_uuid: zzzzz-dz642-runningcontain2
+ mounts:
+ /tmp:
+ kind: tmp
+ capacity: 24000000000
runtime_constraints:
vcpus: 1
ram: 123
- mounts: {}
cr_for_failed:
uuid: zzzzz-xvhdp-cr4failedcontnr
output_path: test
command: ["echo", "hello"]
container_uuid: zzzzz-dz642-runningcontainr
+ mounts:
+ /tmp:
+ kind: tmp
+ capacity: 24000000000
runtime_constraints:
vcpus: 1
ram: 123
- mounts: {}
running_to_be_deleted:
uuid: zzzzz-xvhdp-cr5runningcntnr
cwd: test
output_path: test
command: ["echo", "hello"]
+ mounts:
+ /tmp:
+ kind: tmp
+ capacity: 24000000000
container_uuid: zzzzz-dz642-runnincntrtodel
runtime_constraints:
vcpus: 1
ram: 123
- mounts: {}
completed_with_input_mounts:
uuid: zzzzz-xvhdp-crwithinputmnts
updated_at: <%= 1.minute.ago.to_s(:db) %>
started_at: <%= 1.minute.ago.to_s(:db) %>
container_image: test
- cwd: test
- output_path: test
+ cwd: /tmp
+ output_path: /tmp
command: ["echo", "hello"]
runtime_constraints:
ram: 12000000000
vcpus: 4
+ mounts:
+ /tmp:
+ kind: tmp
+ capacity: 24000000000
secret_mounts:
/secret/6x9:
kind: text
updated_at: <%= 2.minute.ago.to_s(:db) %>
started_at: <%= 2.minute.ago.to_s(:db) %>
container_image: test
- cwd: test
- output_path: test
+ cwd: /tmp
+ output_path: /tmp
command: ["echo", "hello"]
+ mounts:
+ /tmp:
+ kind: tmp
+ capacity: 24000000000
runtime_constraints:
ram: 12000000000
vcpus: 4
cwd: test
output_path: test
command: ["echo", "hello"]
+ mounts:
+ /tmp:
+ kind: tmp
+ capacity: 24000000000
runtime_constraints:
ram: 12000000000
vcpus: 4
cwd: test
output_path: test
command: ["echo", "hello"]
+ mounts:
+ /tmp:
+ kind: tmp
+ capacity: 24000000000
runtime_constraints:
ram: 12000000000
vcpus: 4
['Committed', false, {container_count: 2}],
['Committed', false, {container_count: 0}],
['Committed', false, {container_count: nil}],
+ ['Committed', true, {priority: 0, mounts: {"/out" => {"kind" => "tmp", "capacity" => 1000000}}}],
+ ['Committed', true, {priority: 0, mounts: {"/out" => {"capacity" => 1000000, "kind" => "tmp"}}}],
+ # Addition of default values for mounts / runtime_constraints /
+ # scheduling_parameters, as happens in a round-trip through
+ # controller, does not have any real effect and should be
+ # accepted/ignored rather than causing an error when the CR state
+ # dictates those attributes are not allowed to change.
+ ['Committed', true, {priority: 0, mounts: {"/out" => {"capacity" => 1000000, "kind" => "tmp", "exclude_from_output": false}}}],
+ ['Committed', true, {priority: 0, mounts: {"/out" => {"capacity" => 1000000, "kind" => "tmp", "repository_name": ""}}}],
+ ['Committed', true, {priority: 0, mounts: {"/out" => {"capacity" => 1000000, "kind" => "tmp", "content": nil}}}],
+ ['Committed', false, {priority: 0, mounts: {"/out" => {"capacity" => 1000000, "kind" => "tmp", "content": {}}}}],
+ ['Committed', false, {priority: 0, mounts: {"/out" => {"capacity" => 1000000, "kind" => "tmp", "repository_name": "foo"}}}],
+ ['Committed', false, {priority: 0, mounts: {"/out" => {"kind" => "tmp", "capacity" => 1234567}}}],
+ ['Committed', false, {priority: 0, mounts: {}}],
+ ['Committed', true, {priority: 0, runtime_constraints: {"vcpus" => 1, "ram" => 2}}],
+ ['Committed', true, {priority: 0, runtime_constraints: {"vcpus" => 1, "ram" => 2, "keep_cache_ram" => 0}}],
+ ['Committed', true, {priority: 0, runtime_constraints: {"vcpus" => 1, "ram" => 2, "API" => false}}],
+ ['Committed', false, {priority: 0, runtime_constraints: {"vcpus" => 1, "ram" => 2, "keep_cache_ram" => 1}}],
+ ['Committed', false, {priority: 0, runtime_constraints: {"vcpus" => 1, "ram" => 2, "API" => true}}],
+ ['Committed', true, {priority: 0, scheduling_parameters: {"preemptible" => false}}],
+ ['Committed', true, {priority: 0, scheduling_parameters: {"partitions" => []}}],
+ ['Committed', true, {priority: 0, scheduling_parameters: {"max_run_time" => 0}}],
+ ['Committed', false, {priority: 0, scheduling_parameters: {"preemptible" => true}}],
+ ['Committed', false, {priority: 0, scheduling_parameters: {"partitions" => ["foo"]}}],
+ ['Committed', false, {priority: 0, scheduling_parameters: {"max_run_time" => 1}}],
['Final', false, {state: ContainerRequest::Committed, name: "foobar"}],
['Final', false, {name: "foobar", priority: 123}],
['Final', false, {name: "foobar", output_uuid: "zzzzz-4zz18-znfnqtbbv4spc3w"}],
var _ = check.Suite(&UnitSuite{})
+func init() {
+ arvados.DebugLocksPanicMode = true
+}
+
type UnitSuite struct {
Config *arvados.Config
}
}
}
- normalizedURL := *r.URL
- normalizedURL.RawPath = ""
- normalizedURL.Path = reMultipleSlashChars.ReplaceAllString(normalizedURL.Path, "/")
- ctxlog.FromContext(r.Context()).Infof("escapedPath %s", normalizedURL.EscapedPath())
- canonicalRequest := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", r.Method, normalizedURL.EscapedPath(), s3querystring(r.URL), canonicalHeaders, signedHeaders, r.Header.Get("X-Amz-Content-Sha256"))
+ normalizedPath := normalizePath(r.URL.Path)
+ ctxlog.FromContext(r.Context()).Debugf("normalizedPath %q", normalizedPath)
+ canonicalRequest := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", r.Method, normalizedPath, s3querystring(r.URL), canonicalHeaders, signedHeaders, r.Header.Get("X-Amz-Content-Sha256"))
ctxlog.FromContext(r.Context()).Debugf("s3stringToSign: canonicalRequest %s", canonicalRequest)
return fmt.Sprintf("%s\n%s\n%s\n%s", alg, r.Header.Get("X-Amz-Date"), scope, hashdigest(sha256.New(), canonicalRequest)), nil
}
+func normalizePath(s string) string {
+ // (url.URL).EscapedPath() would be incorrect here. AWS
+ // documentation specifies the URL path should be normalized
+ // according to RFC 3986, i.e., unescaping ALPHA / DIGIT / "-"
+ // / "." / "_" / "~". The implication is that everything other
+ // than those chars (and "/") _must_ be percent-encoded --
+ // even chars like ";" and "," that are not normally
+ // percent-encoded in paths.
+ out := ""
+ for _, c := range []byte(reMultipleSlashChars.ReplaceAllString(s, "/")) {
+ if (c >= 'a' && c <= 'z') ||
+ (c >= 'A' && c <= 'Z') ||
+ (c >= '0' && c <= '9') ||
+ c == '-' ||
+ c == '.' ||
+ c == '_' ||
+ c == '~' ||
+ c == '/' {
+ out += string(c)
+ } else {
+ out += fmt.Sprintf("%%%02X", c)
+ }
+ }
+ return out
+}
+
func s3signature(secretKey, scope, signedHeaders, stringToSign string) (string, error) {
// scope is {datestamp}/{region}/{service}/aws4_request
drs := strings.Split(scope, "/")
rawPath string
normalizedPath string
}{
- {"/foo", "/foo"}, // boring case
- {"/foo%5fbar", "/foo_bar"}, // _ must not be escaped
- {"/foo%2fbar", "/foo/bar"}, // / must not be escaped
- {"/(foo)", "/%28foo%29"}, // () must be escaped
- {"/foo%5bbar", "/foo%5Bbar"}, // %XX must be uppercase
+ {"/foo", "/foo"}, // boring case
+ {"/foo%5fbar", "/foo_bar"}, // _ must not be escaped
+ {"/foo%2fbar", "/foo/bar"}, // / must not be escaped
+ {"/(foo)/[];,", "/%28foo%29/%5B%5D%3B%2C"}, // ()[];, must be escaped
+ {"/foo%5bbar", "/foo%5Bbar"}, // %XX must be uppercase
+ {"//foo///.bar", "/foo/.bar"}, // "//" and "///" must be squashed to "/"
} {
+ c.Logf("trial %q", trial)
+
date := time.Now().UTC().Format("20060102T150405Z")
scope := "20200202/zzzzz/S3/aws4_request"
canonicalRequest := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", "GET", trial.normalizedPath, "", "host:host.example.com\n", "host", "")
buf, err := cmd.CombinedOutput()
c.Check(err, check.IsNil)
c.Check(string(buf), check.Matches, `.* 3 +s3://`+arvadostest.FooCollection+`/foo\n`)
+
+ // This tests whether s3cmd's path normalization agrees with
+ // keep-web's signature verification wrt chars like "|"
+ // (neither reserved nor unreserved) and "," (not normally
+ // percent-encoded in a path).
+ cmd = exec.Command("s3cmd", "--no-ssl", "--host="+s.testServer.Addr, "--host-bucket="+s.testServer.Addr, "--access_key="+arvadostest.ActiveTokenUUID, "--secret_key="+arvadostest.ActiveToken, "get", "s3://"+arvadostest.FooCollection+"/foo,;$[|]bar")
+ buf, err = cmd.CombinedOutput()
+ c.Check(err, check.NotNil)
+ c.Check(string(buf), check.Matches, `(?ms).*NoSuchKey.*\n`)
}
func (s *IntegrationSuite) TestS3BucketInHost(c *check.C) {
# The arvados-formula version. For a stable release, this should be a
# branch name (e.g. X.Y-dev) or tag for the release.
-ARVADOS_TAG="master"
+ARVADOS_TAG="main"
# Other formula versions we depend on
POSTGRES_TAG="v0.41.6"
-NGINX_TAG="temp-fix-missing-statements-in-pillar"
+NGINX_TAG="v2.7.4"
DOCKER_TAG="v1.0.0"
LOCALE_TAG="v0.3.4"
LETSENCRYPT_TAG="v2.1.0"
git clone --branch "${ARVADOS_TAG}" https://git.arvados.org/arvados-formula.git
git clone --branch "${DOCKER_TAG}" https://github.com/saltstack-formulas/docker-formula.git
git clone --branch "${LOCALE_TAG}" https://github.com/saltstack-formulas/locale-formula.git
-# git clone --branch "${NGINX_TAG}" https://github.com/saltstack-formulas/nginx-formula.git
-git clone --branch "${NGINX_TAG}" https://github.com/netmanagers/nginx-formula.git
+git clone --branch "${NGINX_TAG}" https://github.com/saltstack-formulas/nginx-formula.git
git clone --branch "${POSTGRES_TAG}" https://github.com/saltstack-formulas/postgres-formula.git
git clone --branch "${LETSENCRYPT_TAG}" https://github.com/saltstack-formulas/letsencrypt-formula.git