From: Ward Vandewege Date: Sat, 26 Mar 2022 00:32:27 +0000 (-0400) Subject: 18903: Merge branch 'main' into 18903-fix-activity-script X-Git-Tag: 2.5.0~232^2~2 X-Git-Url: https://git.arvados.org/arvados.git/commitdiff_plain/c3c1f4261f61bc52dbc8fadad644520797b3f6a6?hp=ba245c7a71e54de5b0a9ad0adb1f8ed0486e686e 18903: Merge branch 'main' into 18903-fix-activity-script Arvados-DCO-1.1-Signed-off-by: Ward Vandewege --- diff --git a/.licenseignore b/.licenseignore index 97ce38af93..d13eee3901 100644 --- a/.licenseignore +++ b/.licenseignore @@ -88,4 +88,5 @@ sdk/python/tests/fed-migrate/*.cwlex doc/install/*.xlsx sdk/cwl/tests/wf/hello.txt sdk/cwl/tests/wf/indir1/hello2.txt -sdk/cwl/tests/chipseq/data/Genomes/* \ No newline at end of file +sdk/cwl/tests/chipseq/data/Genomes/* +CITATION.cff diff --git a/AUTHORS b/AUTHORS index b8b75518ff..fa9fa86d34 100644 --- a/AUTHORS +++ b/AUTHORS @@ -18,7 +18,7 @@ President and Fellows of Harvard College <*@harvard.edu> Thomas Mooney Chen Chen Veritas Genetics, Inc. <*@veritasgenetics.com> -Curii Corporation, Inc. <*@curii.com> +Curii Corporation <*@curii.com> Dante Tsang Codex Genetics Ltd Bruno P. Kinoshita diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 0000000000..df3b35db61 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,37 @@ +cff-version: 1.2.0 +message: "If you use this software, please cite it as below." +authors: +- name: "The Arvados Authors" +- family-names: "Amstutz" + given-names: "Peter" + orcid: "https://orcid.org/0000-0003-3566-7705" +- family-names: "Bértoli" + given-names: "Javier" + family-names: "César" + given-names: "Nico" +- family-names: "Clegg" + given-names: "Tom" + orcid: "https://orcid.org/0000-0001-6751-2930" +- family-names: "Di Pentima" + given-names: "Lucas" + orcid: "https://orcid.org/0000-0002-2807-6854" +- family-names: "Kutyła" + given-names: "Daniel" +- family-names: "Li" + given-names: "Jiayong" +- family-names: "Smith" + given-names: "Stephen" +- family-names: "Vandewege" + given-names: "Ward" + orcid: "https://orcid.org/0000-0002-2527-6949" +- family-names: "Wait Zaranek" + given-names: "Alexander" + orcid: "https://orcid.org/0000-0002-0415-9655" +- family-names: "Wait Zaranek" + given-names: "Sarah" + orcid: "https://orcid.org/0000-0003-4716-9121" +title: "Arvados" +abstract: "Arvados is an open source platform for managing, processing, and sharing genomic and other large scientific and biomedical data." +type: software +url: "https://github.com/arvados/arvados/" +doi: 10.5281/zenodo.6382942 diff --git a/doc/admin/spot-instances.html.textile.liquid b/doc/admin/spot-instances.html.textile.liquid index 3837f30d6d..703e70fb86 100644 --- a/doc/admin/spot-instances.html.textile.liquid +++ b/doc/admin/spot-instances.html.textile.liquid @@ -16,31 +16,48 @@ Currently Arvados supports preemptible instances using AWS and Azure spot instan h2. Configuration -Add entries to @InstanceTypes@ that have @Preemptible: true@. Typically you want to add both preemptible and non-preemptible entries for each cloud provider VM type. The @Price@ for preemptible instances is the maximum bid price, the actual price paid is dynamic and will likely be lower. For example: +First, configure some @InstanceTypes@ that have @Preemptible: true@. For a preemptible instance, @Price@ determines the maximum bid price; the actual price paid is dynamic and will likely be lower. + +Typically you want to add both preemptible and non-preemptible entries for each cloud provider VM type. To do this automatically, use @PreemptiblePriceFactor@ to enable a preemptible version of each listed type, using the given factor to set the maximum bid price relative to the non-preemptible price. Alternatively, you can configure preemptible instance types explicitly. For example, the following two configurations are equivalent:
 Clusters:
   ClusterID:
+    Containers:
+      PreemptiblePriceFactor: 0.8
     InstanceTypes:
       m4.large:
-        Preemptible: false
         ProviderType: m4.large
         VCPUs: 2
         RAM: 8GiB
         AddedScratch: 32GB
         Price: 0.1
-      m4.large.spot:
-        Preemptible: true
+
+ +
+Clusters:
+  ClusterID:
+    InstanceTypes:
+      m4.large:
         ProviderType: m4.large
         VCPUs: 2
         RAM: 8GiB
         AddedScratch: 32GB
         Price: 0.1
+      m4.large.preemptible:
+        Preemptible: true
+        ProviderType: m4.large
+        VCPUs: 2
+        RAM: 8GiB
+        AddedScratch: 32GB
+        Price: 0.08
 
Next, you can choose to enable automatic use of preemptible instances:
+Clusters:
+  ClusterID:
     Containers:
       AlwaysUsePreemptibleInstances: true
 
diff --git a/doc/admin/upgrading.html.textile.liquid b/doc/admin/upgrading.html.textile.liquid index abaa190c8c..1ed3b694ce 100644 --- a/doc/admin/upgrading.html.textile.liquid +++ b/doc/admin/upgrading.html.textile.liquid @@ -28,9 +28,11 @@ TODO: extract this information based on git commit messages and generate changel
-h2(#main). development main (as of 2022-03-08) +h2(#main). development main (as of 2022-03-??) -"previous: Upgrading to 2.3.0":#v2_3_0 +h2(#v2_4_0). v2.4.0 (2022-03-??) + +"previous: Upgrading to 2.3.1":#v2_3_1 h3. Ubuntu 18.04 Arvados Python packages now depend on python-3.8 @@ -46,25 +48,33 @@ h3. Anonymous token changes The anonymous token configured in @Users.AnonymousUserToken@ must now be 32 characters or longer. This was already the suggestion in the documentation, now it is enforced. The @script/get_anonymous_user_token.rb@ script that was needed to register the anonymous user token in the database has been removed. Registration of the anonymous token is no longer necessary. If the anonymous token in @config.yml@ is specified as a full V2 token, that will now generate a warning - it should be updated to list just the secret (i.e. the part after the last forward slash). -h3. Preemptible instance types are used automatically, if any are configured +h3. Preemptible instance support changes -The default behavior for selecting "preemptible instances":{{site.baseurl}}/admin/spot-instances.html has changed. If your configuration lists any instance types with @Preemptible: true@, all child (non-top-level) containers will automatically be scheduled on preemptible instances. To avoid using preemptible instances except when explicitly requested by clients, add @AlwaysUsePreemptibleInstances: false@ in the @Containers@ config section. (Previously, preemptible instance types were never used unless the configuration specified @UsePreemptibleInstances: true@. That flag has been removed.) +The @Containers.UsePreemptibleInstances@ option has been renamed to @Containers.AlwaysUsePreemptibleInstances@ and has the same behavior when @true@ and one or more preemptible instances are configured. However, a value of @false@ no longer disables support for preemptible instances, instead users can now enable use of preemptible instances at the level of an individual workflow or workflow step. -h3. Role groups are visible to all users by default - -The permission model has changed such that all role groups are visible to all active users. This enables users to share objects with groups they don't belong to. To preserve the previous behavior, where role groups are only visible to members and admins, add @RoleGroupsVisibleToAll: false@ to the @Users@ section of your configuration file. +In addition, there is a new configuration option @Containers.PreemptiblePriceFactor@ will automatically add a preemptible instance type corresponding to each regular instance type. See "Using Preemptible instances":spot-instances.html for details. h3. Default LSF arguments have changed If you use LSF and your configuration specifies @Containers.LSF.BsubArgumentsList@, you should update it to include the new arguments (@"-R", "select[mem>=%MMB]", ...@, see "configuration reference":{{site.baseurl}}/admin/config.html). Otherwise, containers that are too big to run on any LSF host will remain in the LSF queue instead of being cancelled. -h3. Previously trashed role groups will be deleted +h3. Support for NVIDIA CUDA GPUs -Due to a bug in previous versions, the @DELETE@ operation on a role group caused the group to be flagged as trash in the database, but continue to grant permissions regardless. After upgrading, any role groups that had been trashed this way will be deleted. This might surprise some users if they were relying on permissions that were still in effect due to this bug. Future @DELETE@ operations on a role group will immediately delete the group and revoke the associated permissions. +Arvados now supports requesting NVIDIA CUDA GPUs for cloud and LSF (Slurm is currently not supported). To be able to request GPU nodes, some additional configuration is needed: -h3. Users are visible to other users by default +"Including GPU support in cloud compute node image":{{site.baseurl}}/install/crunch2-cloud/install-compute-node.html#nvidia -When a new user is set up (either via @AutoSetupNewUsers@ config or via Workbench admin interface) the user immediately becomes visible to other users. To revert to the previous behavior, where the administrator must add two users to the same group using the Workbench admin interface in order for the users to see each other, change the new @Users.ActivatedUsersAreVisibleToOthers@ config to @false@. +"Configure cloud dispatcher for GPU support":{{site.baseurl}}/install/crunch2-cloud/install-dispatch-cloud.html#GPUsupport + +"LSF GPU configuration":{{site.baseurl}}/install/crunch2-lsf/install-dispatch.html + +h3. Role groups are visible to all users by default + +The permission model has changed such that all role groups are visible to all active users. This enables users to share objects with groups they don't belong to. To preserve the previous behavior, where role groups are only visible to members and admins, add @RoleGroupsVisibleToAll: false@ to the @Users@ section of your configuration file. + +h3. Previously trashed role groups will be deleted + +Due to a bug in previous versions, the @DELETE@ operation on a role group caused the group to be flagged as trash in the database, but continue to grant permissions regardless. After upgrading, any role groups that had been trashed this way will be deleted. This might surprise some users if they were relying on permissions that were still in effect due to this bug. Future @DELETE@ operations on a role group will immediately delete the group and revoke the associated permissions. h3. Dedicated keepstore process for each container @@ -73,6 +83,14 @@ When Arvados runs a container via @arvados-dispatch-cloud@, the @crunch-run@ sup * If you already have a robust permanent keepstore infrastructure, you can set @Containers.LocalKeepBlobBuffersPerVCPU@ to 0 to disable this feature and preserve the previous behavior of sending container I/O traffic to your separately provisioned keepstore servers. * This feature is enabled only if no volumes use @AccessViaHosts@, and no volumes have underlying @Replication@ less than @Collections.DefaultReplication@. If the feature is configured but cannot be enabled due to an incompatible volume configuration, this will be noted in the @crunch-run.txt@ file in the container log. +h2(#v2_3_1). v2.3.1 (2021-11-24) + +"previous: Upgrading to 2.3.0":#v2_3_0 + +h3. Users are visible to other users by default + +When a new user is set up (either via @AutoSetupNewUsers@ config or via Workbench admin interface) the user immediately becomes visible to other users. To revert to the previous behavior, where the administrator must add two users to the same group using the Workbench admin interface in order for the users to see each other, change the new @Users.ActivatedUsersAreVisibleToOthers@ config to @false@. + h3. Backend support for vocabulary checking If your installation uses the vocabulary feature on Workbench2, you will need to update the cluster configuration by moving the vocabulary definition file to the node where @controller@ runs, and set the @API.VocabularyPath@ configuration parameter to the local path where the file was placed. diff --git a/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid b/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid index ee71d7a3f6..0ed7a599fc 100644 --- a/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid +++ b/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid @@ -74,7 +74,7 @@ Add or update the following portions of your cluster configuration file, @config -h4. NVIDIA GPU support +h4(#GPUsupport). NVIDIA GPU support To specify instance types with NVIDIA GPUs, you must include an additional @CUDA@ section: diff --git a/lib/config/config.default.yml b/lib/config/config.default.yml index 22e2c58b78..6512389815 100644 --- a/lib/config/config.default.yml +++ b/lib/config/config.default.yml @@ -919,6 +919,15 @@ Clusters: # configured, and has no effect on top-level containers. AlwaysUsePreemptibleInstances: false + # Automatically add a preemptible variant for every + # non-preemptible entry in InstanceTypes below. The maximum bid + # price for the preemptible variant will be the non-preemptible + # price multiplied by PreemptiblePriceFactor. If 0, preemptible + # variants are not added automatically. + # + # A price factor of 1.0 is a reasonable starting point. + PreemptiblePriceFactor: 0 + # PEM encoded SSH key (RSA, DSA, or ECDSA) used by the # cloud dispatcher for executing containers on worker VMs. # Begins with "-----BEGIN RSA PRIVATE KEY-----\n" diff --git a/lib/config/export.go b/lib/config/export.go index db413b97bd..dae749c874 100644 --- a/lib/config/export.go +++ b/lib/config/export.go @@ -133,6 +133,7 @@ var whitelist = map[string]bool{ "Containers.MaxDispatchAttempts": false, "Containers.MaxRetryAttempts": true, "Containers.MinRetryPeriod": true, + "Containers.PreemptiblePriceFactor": false, "Containers.ReserveExtraRAM": true, "Containers.RuntimeEngine": true, "Containers.ShellAccess": true, diff --git a/lib/config/load.go b/lib/config/load.go index 8d498af170..de43b9d2e2 100644 --- a/lib/config/load.go +++ b/lib/config/load.go @@ -285,6 +285,19 @@ func (ldr *Loader) Load() (*arvados.Config, error) { } } + // Preprocess/automate some configs + for id, cc := range cfg.Clusters { + ldr.autofillPreemptible("Clusters."+id, &cc) + + if strings.Count(cc.Users.AnonymousUserToken, "/") == 3 { + // V2 token, strip it to just a secret + tmp := strings.Split(cc.Users.AnonymousUserToken, "/") + cc.Users.AnonymousUserToken = tmp[2] + } + + cfg.Clusters[id] = cc + } + // Check for known mistakes for id, cc := range cfg.Clusters { for remote := range cc.RemoteClusters { @@ -316,11 +329,6 @@ func (ldr *Loader) Load() (*arvados.Config, error) { return nil, err } } - if strings.Count(cc.Users.AnonymousUserToken, "/") == 3 { - // V2 token, strip it to just a secret - tmp := strings.Split(cc.Users.AnonymousUserToken, "/") - cc.Users.AnonymousUserToken = tmp[2] - } } return &cfg, nil } @@ -527,3 +535,21 @@ func (ldr *Loader) logExtraKeys(expected, supplied map[string]interface{}, prefi } } } + +func (ldr *Loader) autofillPreemptible(label string, cc *arvados.Cluster) { + if factor := cc.Containers.PreemptiblePriceFactor; factor > 0 { + for name, it := range cc.InstanceTypes { + if !it.Preemptible { + it.Preemptible = true + it.Price = it.Price * factor + it.Name = name + ".preemptible" + if it2, exists := cc.InstanceTypes[it.Name]; exists && it2 != it { + ldr.Logger.Warnf("%s.InstanceTypes[%s]: already exists, so not automatically adding a preemptible variant of %s", label, it.Name, name) + continue + } + cc.InstanceTypes[it.Name] = it + } + } + } + +} diff --git a/lib/config/load_test.go b/lib/config/load_test.go index 1ede805b00..5270dcccce 100644 --- a/lib/config/load_test.go +++ b/lib/config/load_test.go @@ -305,8 +305,6 @@ func (s *LoadSuite) TestNoUnrecognizedKeysInDefaultConfig(c *check.C) { func (s *LoadSuite) TestNoWarningsForDumpedConfig(c *check.C) { var logbuf bytes.Buffer - logger := logrus.New() - logger.Out = &logbuf cfg, err := testLoader(c, ` Clusters: zzzzz: @@ -695,3 +693,72 @@ Clusters: _, err = ldr.Load() c.Assert(err, check.ErrorMatches, `there is no default storage class.*`) } + +func (s *LoadSuite) TestPreemptiblePriceFactor(c *check.C) { + yaml := ` +Clusters: + z1111: + InstanceTypes: + Type1: + RAM: 12345M + VCPUs: 8 + Price: 1.23 + z2222: + Containers: + PreemptiblePriceFactor: 0.5 + InstanceTypes: + Type1: + RAM: 12345M + VCPUs: 8 + Price: 1.23 + z3333: + Containers: + PreemptiblePriceFactor: 0.5 + InstanceTypes: + Type1: + RAM: 12345M + VCPUs: 8 + Price: 1.23 + Type1.preemptible: # higher price than the auto-added variant would use -- should generate warning + ProviderType: Type1 + RAM: 12345M + VCPUs: 8 + Price: 1.23 + Preemptible: true + Type2: + RAM: 23456M + VCPUs: 16 + Price: 2.46 + Type2.preemptible: # identical to the auto-added variant -- so no warning + ProviderType: Type2 + RAM: 23456M + VCPUs: 16 + Price: 1.23 + Preemptible: true +` + var logbuf bytes.Buffer + cfg, err := testLoader(c, yaml, &logbuf).Load() + c.Assert(err, check.IsNil) + cc, err := cfg.GetCluster("z1111") + c.Assert(err, check.IsNil) + c.Check(cc.InstanceTypes["Type1"].Price, check.Equals, 1.23) + c.Check(cc.InstanceTypes, check.HasLen, 1) + + cc, err = cfg.GetCluster("z2222") + c.Assert(err, check.IsNil) + c.Check(cc.InstanceTypes["Type1"].Preemptible, check.Equals, false) + c.Check(cc.InstanceTypes["Type1"].Price, check.Equals, 1.23) + c.Check(cc.InstanceTypes["Type1.preemptible"].Preemptible, check.Equals, true) + c.Check(cc.InstanceTypes["Type1.preemptible"].Price, check.Equals, 1.23/2) + c.Check(cc.InstanceTypes["Type1.preemptible"].ProviderType, check.Equals, "Type1") + c.Check(cc.InstanceTypes, check.HasLen, 2) + + cc, err = cfg.GetCluster("z3333") + c.Assert(err, check.IsNil) + // Don't overwrite the explicitly configured preemptible variant + c.Check(cc.InstanceTypes["Type1.preemptible"].Price, check.Equals, 1.23) + c.Check(cc.InstanceTypes, check.HasLen, 4) + c.Check(logbuf.String(), check.Matches, `(?ms).*Clusters\.z3333\.InstanceTypes\[Type1\.preemptible\]: already exists, so not automatically adding a preemptible variant of Type1.*`) + c.Check(logbuf.String(), check.Not(check.Matches), `(?ms).*Type2\.preemptible.*`) + c.Check(logbuf.String(), check.Not(check.Matches), `(?ms).*(z1111|z2222)[^\n]*InstanceTypes.*`) +} diff --git a/sdk/go/arvados/config.go b/sdk/go/arvados/config.go index e0750bd8c5..6c9324e478 100644 --- a/sdk/go/arvados/config.go +++ b/sdk/go/arvados/config.go @@ -448,6 +448,7 @@ type ContainersConfig struct { StaleLockTimeout Duration SupportedDockerImageFormats StringSet AlwaysUsePreemptibleInstances bool + PreemptiblePriceFactor float64 RuntimeEngine string LocalKeepBlobBuffersPerVCPU int LocalKeepLogsToContainerLog string diff --git a/services/api/test/unit/container_request_test.rb b/services/api/test/unit/container_request_test.rb index 9b35769ef2..aa649e9106 100644 --- a/services/api/test/unit/container_request_test.rb +++ b/services/api/test/unit/container_request_test.rb @@ -1126,7 +1126,8 @@ class ContainerRequestTest < ActiveSupport::TestCase end end - test "Having preemptible_instances=true create a committed child container request and verify the scheduling parameter of its container" do + test "AlwaysUsePreemptibleInstances makes child containers preemptible" do + Rails.configuration.Containers.AlwaysUsePreemptibleInstances = true common_attrs = {cwd: "test", priority: 1, command: ["echo", "hello"], diff --git a/tools/salt-install/config_examples/multi_host/aws/states/shell_sudo_passwordless.sls b/tools/salt-install/config_examples/multi_host/aws/states/shell_sudo_passwordless.sls new file mode 100644 index 0000000000..dbcc9c907a --- /dev/null +++ b/tools/salt-install/config_examples/multi_host/aws/states/shell_sudo_passwordless.sls @@ -0,0 +1,27 @@ +# Copyright (C) The Arvados Authors. All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +{%- set curr_tpldir = tpldir %} +{%- set tpldir = 'arvados' %} +{%- from "arvados/map.jinja" import arvados with context %} +{%- set tpldir = curr_tpldir %} + +extra_shell_sudo_passwordless_sudo_pkg_installed: + pkg.installed: + - name: sudo + +extra_shell_sudo_passwordless_config_file_managed: + file.managed: + - name: /etc/sudoers.d/arvados_passwordless + - makedirs: true + - user: root + - group: root + - mode: '0440' + - replace: false + - contents: | + # This file managed by Salt, do not edit by hand!! + # Allow members of group sudo to execute any command without password + %sudo ALL=(ALL:ALL) NOPASSWD:ALL + - require: + - pkg: extra_shell_sudo_passwordless_sudo_pkg_installed diff --git a/tools/salt-install/config_examples/single_host/single_hostname/states/shell_sudo_passwordless.sls b/tools/salt-install/config_examples/single_host/single_hostname/states/shell_sudo_passwordless.sls new file mode 100644 index 0000000000..dbcc9c907a --- /dev/null +++ b/tools/salt-install/config_examples/single_host/single_hostname/states/shell_sudo_passwordless.sls @@ -0,0 +1,27 @@ +# Copyright (C) The Arvados Authors. All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +{%- set curr_tpldir = tpldir %} +{%- set tpldir = 'arvados' %} +{%- from "arvados/map.jinja" import arvados with context %} +{%- set tpldir = curr_tpldir %} + +extra_shell_sudo_passwordless_sudo_pkg_installed: + pkg.installed: + - name: sudo + +extra_shell_sudo_passwordless_config_file_managed: + file.managed: + - name: /etc/sudoers.d/arvados_passwordless + - makedirs: true + - user: root + - group: root + - mode: '0440' + - replace: false + - contents: | + # This file managed by Salt, do not edit by hand!! + # Allow members of group sudo to execute any command without password + %sudo ALL=(ALL:ALL) NOPASSWD:ALL + - require: + - pkg: extra_shell_sudo_passwordless_sudo_pkg_installed diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh index 0f3c9a1411..a26b3feaa3 100755 --- a/tools/salt-install/provision.sh +++ b/tools/salt-install/provision.sh @@ -417,30 +417,34 @@ for f in $(ls "${SOURCE_PILLARS_DIR}"/*); do "${f}" > "${P_DIR}"/$(basename "${f}") done -if [ "x${TEST}" = "xyes" ] && [ ! -d "${SOURCE_TESTS_DIR}" ]; then - echo "You requested to run tests, but ${SOURCE_TESTS_DIR} does not exist or is not a directory. Exiting." - exit 1 -fi -mkdir -p ${T_DIR} -# Replace cluster and domain name in the test files -for f in $(ls "${SOURCE_TESTS_DIR}"/*); do - FILTERS="s#__CLUSTER__#${CLUSTER}#g; - s#__CONTROLLER_EXT_SSL_PORT__#${CONTROLLER_EXT_SSL_PORT}#g; - s#__DOMAIN__#${DOMAIN}#g; - s#__IP_INT__#${IP_INT}#g; - s#__INITIAL_USER_EMAIL__#${INITIAL_USER_EMAIL}#g; - s#__INITIAL_USER_PASSWORD__#${INITIAL_USER_PASSWORD}#g - s#__INITIAL_USER__#${INITIAL_USER}#g; - s#__DATABASE_PASSWORD__#${DATABASE_PASSWORD}#g; - s#__SYSTEM_ROOT_TOKEN__#${SYSTEM_ROOT_TOKEN}#g" - if [ "$USE_SINGLE_HOSTNAME" = "yes" ]; then - FILTERS="s#__CLUSTER__.__DOMAIN__#${HOSTNAME_EXT}#g; - $FILTERS" +if [ ! -d "${SOURCE_TESTS_DIR}" ]; then + echo "WARNING: The tests directory was not copied to \"${SOURCE_TESTS_DIR}\"." + if [ "x${TEST}" = "xyes" ]; then + echo "WARNING: Disabling tests for this installation." fi - sed "$FILTERS" \ - "${f}" > ${T_DIR}/$(basename "${f}") -done -chmod 755 ${T_DIR}/run-test.sh + TEST="no" +else + mkdir -p ${T_DIR} + # Replace cluster and domain name in the test files + for f in $(ls "${SOURCE_TESTS_DIR}"/*); do + FILTERS="s#__CLUSTER__#${CLUSTER}#g; + s#__CONTROLLER_EXT_SSL_PORT__#${CONTROLLER_EXT_SSL_PORT}#g; + s#__DOMAIN__#${DOMAIN}#g; + s#__IP_INT__#${IP_INT}#g; + s#__INITIAL_USER_EMAIL__#${INITIAL_USER_EMAIL}#g; + s#__INITIAL_USER_PASSWORD__#${INITIAL_USER_PASSWORD}#g + s#__INITIAL_USER__#${INITIAL_USER}#g; + s#__DATABASE_PASSWORD__#${DATABASE_PASSWORD}#g; + s#__SYSTEM_ROOT_TOKEN__#${SYSTEM_ROOT_TOKEN}#g" + if [ "$USE_SINGLE_HOSTNAME" = "yes" ]; then + FILTERS="s#__CLUSTER__.__DOMAIN__#${HOSTNAME_EXT}#g; + $FILTERS" + fi + sed "$FILTERS" \ + "${f}" > ${T_DIR}/$(basename "${f}") + done + chmod 755 ${T_DIR}/run-test.sh +fi # Replace helper state files that differ from the formula's examples if [ -d "${SOURCE_STATES_DIR}" ]; then @@ -514,7 +518,7 @@ if [ -d "${F_DIR}"/extra/extra ]; then # Same when using self-signed certificates. SKIP_SNAKE_OIL="dont_add_snakeoil_certs" fi - for f in $(ls "${F_DIR}"/extra/extra/*.sls | grep -v ${SKIP_SNAKE_OIL}); do + for f in $(ls "${F_DIR}"/extra/extra/*.sls | egrep -v "${SKIP_SNAKE_OIL}|shell_sudo_passwordless"); do echo " - extra.$(basename ${f} | sed 's/.sls$//g')" >> ${S_DIR}/top.sls done # Use byo or self-signed certificates @@ -544,6 +548,7 @@ if [ -z "${ROLES}" ]; then grep -q "custom_certs" ${S_DIR}/top.sls || echo " - extra.custom_certs" >> ${S_DIR}/top.sls fi + echo " - extra.shell_sudo_passwordless" >> ${S_DIR}/top.sls echo " - postgres" >> ${S_DIR}/top.sls echo " - docker.software" >> ${S_DIR}/top.sls echo " - arvados" >> ${S_DIR}/top.sls @@ -753,6 +758,7 @@ else ;; "shell") # States + echo " - extra.shell_sudo_passwordless" >> ${S_DIR}/top.sls grep -q "docker" ${S_DIR}/top.sls || echo " - docker.software" >> ${S_DIR}/top.sls grep -q "arvados.${R}" ${S_DIR}/top.sls || echo " - arvados.${R}" >> ${S_DIR}/top.sls # Pillars