Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <ldipentima@veritasgenetics.com>
all_services_stopped=
fail=1
- # Create config if it hasn't been created already. Normally
- # this happens in install_env because there are downstream
- # steps like workbench install which require a valid
- # config.yml, but when invoked with --skip-install that doesn't
- # happen, so make sure to run it here.
- eval $(python sdk/python/tests/run_test_server.py setup_config)
-
cd "$WORKSPACE" \
&& eval $(python sdk/python/tests/run_test_server.py start --auth admin) \
&& export ARVADOS_TEST_API_HOST="$ARVADOS_API_HOST" \
. "$VENVDIR/bin/activate"
# Needed for run_test_server.py which is used by certain (non-Python) tests.
- pip install --no-cache-dir PyYAML \
+ pip install --no-cache-dir PyYAML future \
|| fatal "pip install PyYAML failed"
- # Create config file. The run_test_server script requires PyYAML,
- # so virtualenv needs to be active. Downstream steps like
- # workbench install which require a valid config.yml.
- eval $(python sdk/python/tests/run_test_server.py setup_config)
-
# Preinstall libcloud if using a fork; otherwise nodemanager "pip
# install" won't pick it up by default.
if [[ -n "$LIBCLOUD_PIN_SRC" ]]; then
}
do_test() {
+ check_arvados_config "$1"
+
case "${1}" in
apps/workbench_units | apps/workbench_functionals | apps/workbench_integration)
suite=apps/workbench
return $result
}
+check_arvados_config() {
+ if [[ "$1" = "env" ]] ; then
+ return
+ fi
+ if [[ -z "$ARVADOS_CONFIG" ]] ; then
+ # Create config file. The run_test_server script requires PyYAML,
+ # so virtualenv needs to be active. Downstream steps like
+ # workbench install which require a valid config.yml.
+ if [[ ! -s "$VENVDIR/bin/activate" ]] ; then
+ install_env
+ fi
+ . "$VENVDIR/bin/activate"
+ eval $(python sdk/python/tests/run_test_server.py setup_config)
+ deactivate
+ fi
+}
+
do_install() {
+ check_arvados_config "$1"
if [[ -n "${skip[install]}" || ( -n "${only_install}" && "${only_install}" != "${1}" && "${only_install}" != "${2}" ) ]]; then
return 0
fi
&& git --git-dir internal.git init \
|| return 1
- cd "$WORKSPACE/services/api" \
- && RAILS_ENV=test bundle exec rails db:environment:set \
- && RAILS_ENV=test bundle exec rake db:drop \
- && RAILS_ENV=test bundle exec rake db:setup \
- && RAILS_ENV=test bundle exec rake db:fixtures:load
+
+ (cd "$WORKSPACE/services/api"
+ export RAILS_ENV=test
+ if bundle exec rails db:environment:set ; then
+ bundle exec rake db:drop
+ fi
+ bundle exec rake db:setup \
+ && bundle exec rake db:fixtures:load
+ )
}
declare -a pythonstuff
</code></pre>
</notextile>
-h2. Create a dispatcher token
+h2. Configure the dispatcher (optional)
-Create an Arvados superuser token for use by the dispatcher. If you have multiple dispatch processes, you should give each one a different token.
+Crunch-dispatch-slurm reads the common configuration file at @/etc/arvados/config.yml@. The essential configuration parameters will already be set by previous install steps, so no additional configuration is required. The following sections describe optional configuration parameters.
-{% include 'create_superuser_token' %}
+h3(#PollPeriod). Containers.PollInterval
-h2. Configure the dispatcher
-
-Set up crunch-dispatch-slurm's configuration directory:
+crunch-dispatch-slurm polls the API server periodically for new containers to run. The @PollInterval@ option controls how often this poll happens. Set this to a string of numbers suffixed with one of the time units @ns@, @us@, @ms@, @s@, @m@, or @h@. For example:
<notextile>
-<pre><code>~$ <span class="userinput">sudo mkdir -p /etc/arvados</span>
-~$ <span class="userinput">sudo install -d -o root -g <b>crunch</b> -m 0750 /etc/arvados/crunch-dispatch-slurm</span>
+<pre>
+Clusters:
+ zzzzz:
+ Containers:
+ <code class="userinput">PollInterval: <b>3m30s</b>
</code></pre>
</notextile>
-Edit @/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml@ to authenticate to your Arvados API server, using the token you generated in the previous step. Follow this YAML format:
+h3(#ReserveExtraRAM). Containers.ReserveExtraRAM: Extra RAM for jobs
+
+Extra RAM to reserve (in bytes) on each SLURM job submitted by Arvados, which is added to the amount specified in the container's @runtime_constraints@. If not provided, the default value is zero. Helpful when using @-cgroup-parent-subsystem@, where @crunch-run@ and @arv-mount@ share the control group memory limit with the user process. In this situation, at least 256MiB is recommended to accomodate each container's @crunch-run@ and @arv-mount@ processes.
+
+Supports suffixes @KB@, @KiB@, @MB@, @MiB@, @GB@, @GiB@, @TB@, @TiB@, @PB@, @PiB@, @EB@, @EiB@ (where @KB@ is 10[^3^], @KiB@ is 2[^10^], @MB@ is 10[^6^], @MiB@ is 2[^20^] and so forth).
<notextile>
-<pre><code class="userinput">Client:
- APIHost: <b>zzzzz.arvadosapi.com</b>
- AuthToken: <b>zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz</b>
-</code></pre>
+<pre>
+Clusters:
+ zzzzz:
+ Containers:
+ <code class="userinput">ReserveExtraRAM: <b>256MiB</b></code>
+</pre>
</notextile>
-This is the only configuration required by crunch-dispatch-slurm. The subsections below describe optional configuration flags you can set inside the main configuration object.
-
-h3(#KeepServiceURIs). Client::KeepServiceURIs
+h3(#MinRetryPeriod). Containers.MinRetryPeriod: Rate-limit repeated attempts to start containers
-Override Keep service discovery with a predefined list of Keep URIs. This can be useful if the compute nodes run a local keepstore that should handle all Keep traffic. Example:
+If SLURM is unable to run a container, the dispatcher will submit it again after the next PollPeriod. If PollPeriod is very short, this can be excessive. If MinRetryPeriod is set, the dispatcher will avoid submitting the same container to SLURM more than once in the given time span.
<notextile>
-<pre><code class="userinput">Client:
- APIHost: zzzzz.arvadosapi.com
- AuthToken: zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
- KeepServiceURIs:
- - <b>http://127.0.0.1:25107</b>
-</code></pre>
+<pre>
+Clusters:
+ zzzzz:
+ Containers:
+ <code class="userinput">MinRetryPeriod: <b>30s</b></code>
+</pre>
</notextile>
-h3(#PollPeriod). PollPeriod
+h3(#KeepServiceURIs). Containers.SLURM.SbatchEnvironmentVariables
-crunch-dispatch-slurm polls the API server periodically for new containers to run. The @PollPeriod@ option controls how often this poll happens. Set this to a string of numbers suffixed with one of the time units @ns@, @us@, @ms@, @s@, @m@, or @h@. For example:
+Some Arvados installations run a local keepstore on each compute node to handle all Keep traffic. To override Keep service discovery and access the local keep server instead of the global servers, set ARVADOS_KEEP_SERVICES in SbatchEnvironmentVariables:
<notextile>
-<pre><code class="userinput">PollPeriod: <b>3m30s</b>
+<pre>
+Clusters:
+ zzzzz:
+ Containers:
+ SLURM:
+ <span class="userinput">SbatchEnvironmentVariables:
+ ARVADOS_KEEP_SERVICES: "http://127.0.0.1:25107"</span>
</code></pre>
</notextile>
-h3(#PrioritySpread). PrioritySpread
+h3(#PrioritySpread). Containers.SLURM.PrioritySpread
crunch-dispatch-slurm adjusts the "nice" values of its SLURM jobs to ensure containers are prioritized correctly relative to one another. This option tunes the adjustment mechanism.
* If non-Arvados jobs run on your SLURM cluster, and your Arvados containers are waiting too long in the SLURM queue because their "nice" values are too high for them to compete with other SLURM jobs, you should use a smaller PrioritySpread value.
The smallest usable value is @1@. The default value of @10@ is used if this option is zero or negative. Example:
<notextile>
-<pre><code class="userinput">PrioritySpread: <b>1000</b>
-</code></pre>
+<pre>
+Clusters:
+ zzzzz:
+ Containers:
+ SLURM:
+ <code class="userinput">PrioritySpread: <b>1000</b></code></pre>
</notextile>
-h3(#SbatchArguments). SbatchArguments
+h3(#SbatchArguments). Containers.SLURM.SbatchArgumentsList
When crunch-dispatch-slurm invokes @sbatch@, you can add arguments to the command by specifying @SbatchArguments@. You can use this to send the jobs to specific cluster partitions or add resource requests. Set @SbatchArguments@ to an array of strings. For example:
<notextile>
-<pre><code class="userinput">SbatchArguments:
-- <b>"--partition=PartitionName"</b>
-</code></pre>
+<pre>
+Clusters:
+ zzzzz:
+ Containers:
+ SLURM:
+ <code class="userinput">SbatchArgumentsList:
+ - <b>"--partition=PartitionName"</b></code>
+</pre>
</notextile>
Note: If an argument is supplied multiple times, @slurm@ uses the value of the last occurrence of the argument on the command line. Arguments specified through Arvados are added after the arguments listed in SbatchArguments. This means, for example, an Arvados container with that specifies @partitions@ in @scheduling_parameter@ will override an occurrence of @--partition@ in SbatchArguments. As a result, for container parameters that can be specified through Arvados, SbatchArguments can be used to specify defaults but not enforce specific policy.
-h3(#CrunchRunCommand-cgroups). CrunchRunCommand: Dispatch to SLURM cgroups
+h3(#CrunchRunCommand-cgroups). Containers.CrunchRunArgumentList: Dispatch to SLURM cgroups
If your SLURM cluster uses the @task/cgroup@ TaskPlugin, you can configure Crunch's Docker containers to be dispatched inside SLURM's cgroups. This provides consistent enforcement of resource constraints. To do this, use a crunch-dispatch-slurm configuration like the following:
<notextile>
-<pre><code class="userinput">CrunchRunCommand:
-- <b>crunch-run</b>
-- <b>"-cgroup-parent-subsystem=memory"</b>
-</code></pre>
+<pre>
+Clusters:
+ zzzzz:
+ Containers:
+ <code class="userinput">CrunchRunArgumentsList:
+ - <b>"-cgroup-parent-subsystem=memory"</b></code>
+</pre>
</notextile>
The choice of subsystem ("memory" in this example) must correspond to one of the resource types enabled in SLURM's @cgroup.conf@. Limits for other resource types will also be respected. The specified subsystem is singled out only to let Crunch determine the name of the cgroup provided by SLURM. When doing this, you should also set "ReserveExtraRAM":#ReserveExtraRAM .
{% include 'notebox_end' %}
-h3(#CrunchRunCommand-network). CrunchRunCommand: Using host networking for containers
+h3(#CrunchRunCommand-network). Containers.CrunchRunArgumentList: Using host networking for containers
Older Linux kernels (prior to 3.18) have bugs in network namespace handling which can lead to compute node lockups. This by is indicated by blocked kernel tasks in "Workqueue: netns cleanup_net". If you are experiencing this problem, as a workaround you can disable use of network namespaces by Docker across the cluster. Be aware this reduces container isolation, which may be a security risk.
<notextile>
-<pre><code class="userinput">CrunchRunCommand:
-- <b>crunch-run</b>
-- <b>"-container-enable-networking=always"</b>
-- <b>"-container-network-mode=host"</b>
-</code></pre>
-</notextile>
-
-h3(#MinRetryPeriod). MinRetryPeriod: Rate-limit repeated attempts to start containers
-
-If SLURM is unable to run a container, the dispatcher will submit it again after the next PollPeriod. If PollPeriod is very short, this can be excessive. If MinRetryPeriod is set, the dispatcher will avoid submitting the same container to SLURM more than once in the given time span.
-
-<notextile>
-<pre><code class="userinput">MinRetryPeriod: <b>30s</b>
-</code></pre>
-</notextile>
-
-h3(#ReserveExtraRAM). ReserveExtraRAM: Extra RAM for jobs
-
-Extra RAM to reserve (in bytes) on each SLURM job submitted by Arvados, which is added to the amount specified in the container's @runtime_constraints@. If not provided, the default value is zero. Helpful when using @-cgroup-parent-subsystem@, where @crunch-run@ and @arv-mount@ share the control group memory limit with the user process. In this situation, at least 256MiB is recommended to accomodate each container's @crunch-run@ and @arv-mount@ processes.
-
-<notextile>
-<pre><code class="userinput">ReserveExtraRAM: <b>268435456</b>
-</code></pre>
+<pre>
+Clusters:
+ zzzzz:
+ Containers:
+ <code class="userinput">CrunchRunArgumentsList:
+ - <b>"-container-enable-networking=always"</b>
+ - <b>"-container-network-mode=host"</b></code>
+</pre>
</notextile>
h2. Restart the dispatcher
<pre><code>~$ <span class="userinput">arvados-ws -h</span>
Usage of arvados-ws:
-config path
- path to config file (default "/etc/arvados/ws/ws.yml")
+ path to config file (default "/etc/arvados/config.yml")
-dump-config
show current configuration and exit
</code></pre>
</notextile>
-h3. Create a configuration file
+h3. Update cluster config
-Create @/etc/arvados/ws/ws.yml@ using the following template. Replace @xxxxxxxx@ with the "password you generated during database setup":install-postgresql.html#api.
+Edit the cluster config at @/etc/arvados/config.yml@ and set @Services.Websocket.ExternalURL@ and @Services.Websocket.InternalURLs@. Replace @zzzzz@ with your cluster id.
<notextile>
-<pre><code>Client:
- APIHost: <span class="userinput">uuid_prefix.your.domain</span>:443
-Listen: ":<span class="userinput">9003</span>"
-Postgres:
- dbname: arvados_production
- host: localhost
- password: <span class="userinput">xxxxxxxx</span>
- user: arvados
-</code></pre>
+<pre><code>Clusters:
+ zzzzz:
+ Services:
+ <span class="userinput">Websocket:
+ ExternalURL: wss://ws.uuid_prefix.your.domain/websocket
+ InternalURLs:
+ "http://localhost:9003": {}
+</span></code></pre>
</notextile>
h3. Start the service (option 1: systemd)
h3. Update API server configuration
-Ensure the websocket server address is correct in the API server configuration file @/etc/arvados/api/application.yml@.
-
-<notextile>
-<pre><code>websocket_address: wss://ws.<span class="userinput">uuid_prefix.your.domain</span>/websocket
-</code></pre>
-</notextile>
-
Restart Nginx to reload the API server configuration.
<notextile>
h3. Verify DNS and proxy setup
-Use a host elsewhere on the Internet to confirm that your DNS, proxy, and SSL are configured correctly.
+Use a host elsewhere on the Internet to confirm that your DNS, proxy, and SSL are configured correctly. For @Authorization: Bearer xxxx@ replace @xxxx@ with the value from @ManagementToken@ in @config.yml@.
<notextile>
-<pre><code>$ <span class="userinput">curl https://ws.<b>uuid_prefix.your.domain</b>/status.json</span>
-{"Clients":1}
+<pre><code>$ <span class="userinput">curl -H "Authorization: Bearer xxxx" https://ws.<b>uuid_prefix.your.domain</b>/_health/ping</span>
+{"health":"OK"}
</code></pre>
</notextile>
SLURM:
PrioritySpread: 0
SbatchArgumentsList: []
+ SbatchEnvironmentVariables:
+ SAMPLE: ""
Managed:
# Path to dns server configuration directory
# (e.g. /etc/unbound.d/conf.d). If false, do not write any config
cluster.SystemRootToken = client.AuthToken
}
cluster.TLS.Insecure = client.Insecure
+ ks := ""
+ for i, u := range client.KeepServiceURIs {
+ if i > 0 {
+ ks += " "
+ }
+ ks += u
+ }
+ cluster.Containers.SLURM.SbatchEnvironmentVariables = map[string]string{"ARVADOS_KEEP_SERVICES": ks}
}
// update config using values from an crunch-dispatch-slurm config file.
if err != nil {
return err
}
+
+ // ClusterID is not marshalled by default (see `json:"-"`).
+ // Add it back here so it is included in the exported config.
+ m["ClusterID"] = cluster.ClusterID
err = redactUnsafe(m, "", "")
if err != nil {
return err
// exists.
var whitelist = map[string]bool{
// | sort -t'"' -k2,2
+ "ClusterID": true,
"API": true,
"API.AsyncPermissionsUpdateInterval": false,
"API.DisabledAPIs": false,
confdata := strings.Replace(string(DefaultYAML), "SAMPLE", "testkey", -1)
cfg, err := testLoader(c, confdata, nil).Load()
c.Assert(err, check.IsNil)
- cluster := cfg.Clusters["xxxxx"]
+ cluster, err := cfg.GetCluster("xxxxx")
+ c.Assert(err, check.IsNil)
cluster.ManagementToken = "abcdefg"
var exported bytes.Buffer
- err = ExportJSON(&exported, &cluster)
+ err = ExportJSON(&exported, cluster)
c.Check(err, check.IsNil)
if err != nil {
c.Logf("If all the new keys are safe, add these to whitelist in export.go:")
c.Logf("\t%q: true,", strings.Replace(k, `"`, "", -1))
}
}
- c.Check(exported.String(), check.Not(check.Matches), `(?ms).*abcdefg.*`)
+ var exportedStr = exported.String()
+ c.Check(exportedStr, check.Matches, `(?ms).*ClusterID":"xxxxx.*`)
+ c.Check(exportedStr, check.Not(check.Matches), `(?ms).*abcdefg.*`)
}
SLURM:
PrioritySpread: 0
SbatchArgumentsList: []
+ SbatchEnvironmentVariables:
+ SAMPLE: ""
Managed:
# Path to dns server configuration directory
# (e.g. /etc/unbound.d/conf.d). If false, do not write any config
LogUpdateSize ByteSize
}
SLURM struct {
- PrioritySpread int64
- SbatchArgumentsList []string
- Managed struct {
+ PrioritySpread int64
+ SbatchArgumentsList []string
+ SbatchEnvironmentVariables map[string]string
+ Managed struct {
DNSServerConfDir string
DNSServerConfTemplate string
DNSServerReloadCommand string
# filter on that same cutoff time, or
# (once we see our first matching event)
# the ID of the last-seen event.
- self._skip_old_events = [[
+ #
+ # Note: self._skip_old_events must not be
+ # set until the threshold is decided.
+ # Otherwise, tests will be unreliable.
+ filter_by_time = [[
"created_at", ">=",
time.strftime(
"%Y-%m-%dT%H:%M:%SZ",
items = self.api.logs().list(
order="id desc",
limit=1,
- filters=f+self._skip_old_events).execute()
+ filters=f+filter_by_time).execute()
if items["items"]:
self._skip_old_events = [
["id", ">", str(items["items"][0]["id"])]]
"items": [],
"items_available": 0,
}
+ else:
+ # No recent events. We can keep using
+ # the same timestamp threshold until
+ # we receive our first new event.
+ self._skip_old_events = filter_by_time
else:
# In this case, either we know the most
# recent matching ID, or we know there
blobSignatureTtl: Rails.configuration.Collections.BlobSigningTTL,
maxRequestSize: Rails.configuration.API.MaxRequestSize,
maxItemsPerResponse: Rails.configuration.API.MaxItemsPerResponse,
- dockerImageFormats: Rails.configuration.Containers.SupportedDockerImageFormats,
+ dockerImageFormats: Rails.configuration.Containers.SupportedDockerImageFormats.keys,
crunchLogBytesPerEvent: Rails.configuration.Containers.Logging.LogBytesPerEvent,
crunchLogSecondsBetweenEvents: Rails.configuration.Containers.Logging.LogSecondsBetweenEvents,
crunchLogThrottlePeriod: Rails.configuration.Containers.Logging.LogThrottlePeriod,
end
end
Rails.configuration.API.DisabledAPIs.each do |method, _|
- ctrl, action = method.split('.', 2)
+ ctrl, action = method.to_s.split('.', 2)
discovery[:resources][ctrl][:methods].delete(action.to_sym)
end
discovery
if disp.Client.Insecure {
os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
}
- os.Setenv("ARVADOS_KEEP_SERVICES", strings.Join(disp.Client.KeepServiceURIs, " "))
os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
+ for k, v := range disp.cluster.Containers.SLURM.SbatchEnvironmentVariables {
+ os.Setenv(k, v)
+ }
} else {
disp.logger.Warnf("Client credentials missing from config, so falling back on environment variables (deprecated).")
}
"fmt"
"io"
"io/ioutil"
- "log"
"net/http"
"net/http/httptest"
"os"
Client:
APIHost: example.com
AuthToken: abcdefg
+ KeepServiceURIs:
+ - https://example.com/keep1
+ - https://example.com/keep2
SbatchArguments: ["--foo", "bar"]
PollPeriod: 12s
PrioritySpread: 42
`)
tmpfile, err := ioutil.TempFile("", "example")
if err != nil {
- log.Fatal(err)
+ c.Error(err)
}
defer os.Remove(tmpfile.Name()) // clean up
if _, err := tmpfile.Write(content); err != nil {
- log.Fatal(err)
+ c.Error(err)
}
if err := tmpfile.Close(); err != nil {
- log.Fatal(err)
+ c.Error(err)
}
+ os.Setenv("ARVADOS_KEEP_SERVICES", "")
err = s.disp.configure("crunch-dispatch-slurm", []string{"-config", tmpfile.Name()})
c.Check(err, IsNil)
c.Check(s.disp.cluster.Containers.ReserveExtraRAM, Equals, arvados.ByteSize(12345))
c.Check(s.disp.cluster.Containers.MinRetryPeriod, Equals, arvados.Duration(13*time.Second))
c.Check(s.disp.cluster.API.MaxItemsPerResponse, Equals, 99)
+ c.Check(s.disp.cluster.Containers.SLURM.SbatchEnvironmentVariables, DeepEquals, map[string]string{
+ "ARVADOS_KEEP_SERVICES": "https://example.com/keep1 https://example.com/keep2",
+ })
+ c.Check(os.Getenv("ARVADOS_KEEP_SERVICES"), Equals, "https://example.com/keep1 https://example.com/keep2")
}
stripParts = 4
pathToken = true
} else {
- log.Info(" !!!! ATTN: Into /collections/uuid/path with anon token: ", h.Config.cluster.Users.AnonymousUserToken)
// /collections/ID/PATH...
collectionID = parseCollectionIDFromURL(pathParts[1])
- tokens = []string{h.Config.cluster.Users.AnonymousUserToken}
stripParts = 2
+ // This path is only meant to work for public
+ // data. Tokens provided with the request are
+ // ignored.
+ credentialsOK = false
}
}
forceReload = true
}
+ if credentialsOK {
+ reqTokens = auth.CredentialsFromRequest(r).Tokens
+ }
+
formToken := r.FormValue("api_token")
if formToken != "" && r.Header.Get("Origin") != "" && attachment && r.URL.Query().Get("api_token") == "" {
// The client provided an explicit token in the POST
//
// * The token isn't embedded in the URL, so we don't
// need to worry about bookmarks and copy/paste.
- tokens = append(tokens, formToken)
+ reqTokens = append(reqTokens, formToken)
} else if formToken != "" && browserMethod[r.Method] {
// The client provided an explicit token in the query
// string, or a form in POST body. We must put the
}
if useSiteFS {
- if tokens == nil {
- tokens = auth.CredentialsFromRequest(r).Tokens
- }
- h.serveSiteFS(w, r, tokens, credentialsOK, attachment)
+ h.serveSiteFS(w, r, reqTokens, credentialsOK, attachment)
return
}
}
if tokens == nil {
- if credentialsOK {
- reqTokens = auth.CredentialsFromRequest(r).Tokens
- }
tokens = append(reqTokens, h.Config.cluster.Users.AnonymousUserToken)
}
return resp
}
-func (s *IntegrationSuite) TestDirectoryListing(c *check.C) {
+func (s *IntegrationSuite) TestDirectoryListingWithAnonymousToken(c *check.C) {
+ s.testServer.Config.cluster.Users.AnonymousUserToken = arvadostest.AnonymousToken
+ s.testDirectoryListing(c)
+}
+
+func (s *IntegrationSuite) TestDirectoryListingWithNoAnonymousToken(c *check.C) {
+ s.testServer.Config.cluster.Users.AnonymousUserToken = ""
+ s.testDirectoryListing(c)
+}
+
+func (s *IntegrationSuite) testDirectoryListing(c *check.C) {
s.testServer.Config.cluster.Services.WebDAVDownload.ExternalURL.Host = "download.example.com"
authHeader := http.Header{
"Authorization": {"OAuth2 " + arvadostest.ActiveToken},
expect: []string{"foo", "bar"},
cutDirs: 1,
},
- // This test case fails
{
- uri: "download.example.com/collections/" + arvadostest.FooAndBarFilesInDirUUID + "/",
- header: authHeader,
- expect: []string{"dir1/foo", "dir1/bar"},
- cutDirs: 2,
+ // URLs of this form ignore authHeader, and
+ // FooAndBarFilesInDirUUID isn't public, so
+ // this returns 404.
+ uri: "download.example.com/collections/" + arvadostest.FooAndBarFilesInDirUUID + "/",
+ header: authHeader,
+ expect: nil,
},
{
uri: "download.example.com/users/active/foo_file_in_dir/",
}
func (s *IntegrationSuite) TestMetrics(c *check.C) {
+ s.testServer.Config.cluster.Services.WebDAVDownload.ExternalURL.Host = s.testServer.Addr
origin := "http://" + s.testServer.Addr
req, _ := http.NewRequest("GET", origin+"/notfound", nil)
_, err := http.DefaultClient.Do(req)
set -u
-uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
-
-if ! test -s /var/lib/arvados/api_secret_token ; then
- ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/api_secret_token
-fi
-secret_token=$(cat /var/lib/arvados/api_secret_token)
-
-if ! test -s /var/lib/arvados/blob_signing_key ; then
- ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/blob_signing_key
-fi
-blob_signing_key=$(cat /var/lib/arvados/blob_signing_key)
-
-if ! test -s /var/lib/arvados/management_token ; then
- ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/management_token
-fi
-management_token=$(cat /var/lib/arvados/management_token)
-
-sso_app_secret=$(cat /var/lib/arvados/sso_app_secret)
-
-if test -s /var/lib/arvados/vm-uuid ; then
- vm_uuid=$(cat /var/lib/arvados/vm-uuid)
-else
- vm_uuid=$uuid_prefix-2x53u-$(ruby -e 'puts rand(2**400).to_s(36)[0,15]')
- echo $vm_uuid > /var/lib/arvados/vm-uuid
-fi
-
-if ! test -f /var/lib/arvados/api_database_pw ; then
- ruby -e 'puts rand(2**128).to_s(36)' > /var/lib/arvados/api_database_pw
-fi
-database_pw=$(cat /var/lib/arvados/api_database_pw)
-
-if ! (psql postgres -c "\du" | grep "^ arvados ") >/dev/null ; then
- psql postgres -c "create user arvados with password '$database_pw'"
-fi
-psql postgres -c "ALTER USER arvados WITH SUPERUSER;"
+flock /var/lib/arvados/cluster_config.yml.lock /usr/local/lib/arvbox/cluster-config.sh
if test -a /usr/src/arvados/services/api/config/arvados_config.rb ; then
rm -f config/application.yml config/database.yml
- flock /var/lib/arvados/cluster_config.yml.lock /usr/local/lib/arvbox/cluster-config.sh
else
+ uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
+ secret_token=$(cat /var/lib/arvados/api_secret_token)
+ blob_signing_key=$(cat /var/lib/arvados/blob_signing_key)
+ management_token=$(cat /var/lib/arvados/management_token)
+ sso_app_secret=$(cat /var/lib/arvados/sso_app_secret)
+ database_pw=$(cat /var/lib/arvados/api_database_pw)
+ vm_uuid=$(cat /var/lib/arvados/vm-uuid)
+
cat >config/application.yml <<EOF
$RAILS_ENV:
uuid_prefix: $uuid_prefix
. /usr/local/lib/arvbox/common.sh
+set -u
+
+if ! test -s /var/lib/arvados/api_uuid_prefix ; then
+ ruby -e 'puts "x#{rand(2**64).to_s(36)[0,4]}"' > /var/lib/arvados/api_uuid_prefix
+fi
uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
+
+if ! test -s /var/lib/arvados/api_secret_token ; then
+ ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/api_secret_token
+fi
secret_token=$(cat /var/lib/arvados/api_secret_token)
+
+if ! test -s /var/lib/arvados/blob_signing_key ; then
+ ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/blob_signing_key
+fi
blob_signing_key=$(cat /var/lib/arvados/blob_signing_key)
+
+if ! test -s /var/lib/arvados/management_token ; then
+ ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/management_token
+fi
management_token=$(cat /var/lib/arvados/management_token)
+
+if ! test -s /var/lib/arvados/sso_app_secret ; then
+ ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/sso_app_secret
+fi
sso_app_secret=$(cat /var/lib/arvados/sso_app_secret)
+
+if ! test -s /var/lib/arvados/vm-uuid ; then
+ echo $uuid_prefix-2x53u-$(ruby -e 'puts rand(2**400).to_s(36)[0,15]') > /var/lib/arvados/vm-uuid
+fi
vm_uuid=$(cat /var/lib/arvados/vm-uuid)
+
+if ! test -f /var/lib/arvados/api_database_pw ; then
+ ruby -e 'puts rand(2**128).to_s(36)' > /var/lib/arvados/api_database_pw
+fi
database_pw=$(cat /var/lib/arvados/api_database_pw)
+if ! (psql postgres -c "\du" | grep "^ arvados ") >/dev/null ; then
+ psql postgres -c "create user arvados with password '$database_pw'"
+fi
+psql postgres -c "ALTER USER arvados WITH SUPERUSER;"
+
+if ! test -s /var/lib/arvados/workbench_secret_token ; then
+ ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/workbench_secret_token
+fi
workbench_secret_key_base=$(cat /var/lib/arvados/workbench_secret_token)
if test -s /var/lib/arvados/api_rails_env ; then
. /usr/local/lib/arvbox/common.sh
+/usr/local/lib/arvbox/runsu.sh flock /var/lib/arvados/cluster_config.yml.lock /usr/local/lib/arvbox/cluster-config.sh
+
uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
if ! openssl verify -CAfile $root_cert $root_cert ; then
exit
fi
-flock /var/lib/arvados/cluster_config.yml.lock /usr/local/lib/arvbox/cluster-config.sh
+/usr/local/lib/arvbox/runsu.sh flock /var/lib/arvados/cluster_config.yml.lock /usr/local/lib/arvbox/cluster-config.sh
exec /usr/local/lib/arvbox/runsu.sh /usr/local/bin/arvados-controller
set -u
-if ! test -s /var/lib/arvados/api_uuid_prefix ; then
- ruby -e 'puts "x#{rand(2**64).to_s(36)[0,4]}"' > /var/lib/arvados/api_uuid_prefix
-fi
uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
if ! test -s /var/lib/arvados/sso_secret_token ; then
if ! test -f /var/lib/arvados/sso_database_setup ; then
bundle exec rake db:setup
- if ! test -s /var/lib/arvados/sso_app_secret ; then
- ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/sso_app_secret
- fi
app_secret=$(cat /var/lib/arvados/sso_app_secret)
bundle exec rails console <<EOF
set -u
-if ! test -s /var/lib/arvados/workbench_secret_token ; then
- ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/workbench_secret_token
-fi
secret_token=$(cat /var/lib/arvados/workbench_secret_token)
if test -a /usr/src/arvados/apps/workbench/config/arvados_config.rb ; then