From: Lucas Di Pentima Date: Fri, 27 May 2022 14:33:42 +0000 (-0300) Subject: Merge branch '19144-wb-copy-collections-fix'. Closes #19144 X-Git-Tag: 2.5.0~149 X-Git-Url: https://git.arvados.org/arvados.git/commitdiff_plain/08c300b0d042f4f9bf80656a533e6233fa121c74?hp=12440f46cd197713157e19b44671a6e357d5431b Merge branch '19144-wb-copy-collections-fix'. Closes #19144 Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima --- diff --git a/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid b/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid index e75be0881e..ba24c17ea1 100644 --- a/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid +++ b/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid @@ -82,52 +82,62 @@ Syntax: Options: - --json-file (required) - Path to the packer json file - --arvados-cluster-id (required) - The ID of the Arvados cluster, e.g. zzzzz - --aws-profile (default: false) - AWS profile to use (valid profile from ~/.aws/config - --aws-secrets-file (default: false, required if building for AWS) - AWS secrets file which will be sourced from this script - --aws-source-ami (default: false, required if building for AWS) - The AMI to use as base for building the images - --aws-region (default: us-east-1) + --json-file <path> + Path to the packer json file (required) + --arvados-cluster-id <xxxxx> + The ID of the Arvados cluster, e.g. zzzzz(required) + --aws-profile <profile> + AWS profile to use (valid profile from ~/.aws/config (optional) + --aws-secrets-file <path> + AWS secrets file which will be sourced from this script (optional) + When building for AWS, either an AWS profile or an AWS secrets file + must be provided. + --aws-source-ami <ami-xxxxxxxxxxxxxxxxx> + The AMI to use as base for building the images (required if building for AWS) + --aws-region <region> (default: us-east-1) The AWS region to use for building the images - --aws-vpc-id (optional) - VPC id for AWS, otherwise packer will pick the default one - --aws-subnet-id - Subnet id for AWS otherwise packer will pick the default one for the VPC - --aws-ebs-autoscale (default: false) - Install the AWS EBS autoscaler daemon. - --gcp-project-id (default: false, required if building for GCP) - GCP project id - --gcp-account-file (default: false, required if building for GCP) - GCP account file - --gcp-zone (default: us-central1-f) + --aws-vpc-id <vpc-id> + VPC id for AWS, if not specified packer will derive from the subnet id or pick the default one. + --aws-subnet-id <subnet-xxxxxxxxxxxxxxxxx> + Subnet id for AWS, if not specified packer will pick the default one for the VPC. + --aws-ebs-autoscale + Install the AWS EBS autoscaler daemon (default: do not install the AWS EBS autoscaler). + --aws-associate-public-ip <true|false> + Associate a public IP address with the node used for building the compute image. + Required when the machine running packer can not reach the node used for building + the compute image via its private IP. (default: true if building for AWS) + Note: if the subnet has "Auto-assign public IPv4 address" enabled, disabling this + flag will have no effect. + --aws-ena-support <true|false> + Enable enhanced networking (default: true if building for AWS) + --gcp-project-id <project-id> + GCP project id (required if building for GCP) + --gcp-account-file <path> + GCP account file (required if building for GCP) + --gcp-zone <zone> (default: us-central1-f) GCP zone - --azure-secrets-file (default: false, required if building for Azure) - Azure secrets file which will be sourced from this script - --azure-resource-group (default: false, required if building for Azure) - Azure resource group - --azure-location (default: false, required if building for Azure) - Azure location, e.g. centralus, eastus, westeurope - --azure-sku (default: unset, required if building for Azure, e.g. 16.04-LTS) + --azure-secrets-file <patch> + Azure secrets file which will be sourced from this script (required if building for Azure) + --azure-resource-group <resouce-group> + Azure resource group (required if building for Azure) + --azure-location <location> + Azure location, e.g. centralus, eastus, westeurope (required if building for Azure) + --azure-sku <sku> (required if building for Azure, e.g. 16.04-LTS) Azure SKU image to use - --ssh_user (default: packer) + --ssh_user <user> (default: packer) The user packer will use to log into the image - --resolver (default: host's network provided) - The dns resolver for the machine - --reposuffix (default: unset) + --resolver <resolver_IP> + The dns resolver for the machine (default: host's network provided) + --reposuffix <suffix> Set this to "-dev" to track the unstable/dev Arvados repositories - --public-key-file (required) - Path to the public key file that a-d-c will use to log into the compute node + --public-key-file <path> + Path to the public key file that a-d-c will use to log into the compute node (required) --mksquashfs-mem (default: 256M) Only relevant when using Singularity. This is the amount of memory mksquashfs is allowed to use. - --nvidia-gpu-support (default: false) - Install all the necessary tooling for Nvidia GPU support - --debug (default: false) - Output debug information + --nvidia-gpu-support + Install all the necessary tooling for Nvidia GPU support (default: do not install Nvidia GPU support) + --debug + Output debug information (default: no debug output is printed) h2(#dns-resolution). DNS resolution diff --git a/doc/install/salt-multi-host.html.textile.liquid b/doc/install/salt-multi-host.html.textile.liquid index 1778338f53..5d871c4277 100644 --- a/doc/install/salt-multi-host.html.textile.liquid +++ b/doc/install/salt-multi-host.html.textile.liquid @@ -123,8 +123,6 @@ When you finished customizing the configuration, you are ready to copy the files
scp -r provision.sh local* user@host:
-# if you use custom certificates (not Let's Encrypt), make sure to copy those too:
-# scp -r certs user@host:
 ssh user@host sudo ./provision.sh --roles comma,separated,list,of,roles,to,apply
 
diff --git a/doc/install/salt-single-host.html.textile.liquid b/doc/install/salt-single-host.html.textile.liquid index 106fab9bd4..4b13dcc11e 100644 --- a/doc/install/salt-single-host.html.textile.liquid +++ b/doc/install/salt-single-host.html.textile.liquid @@ -111,7 +111,6 @@ To supply your own certificate, change the configuration like this:
SSL_MODE="bring-your-own"
-CUSTOM_CERTS_DIR="${SCRIPT_DIR}/certs"
 
@@ -135,8 +134,6 @@ When you finished customizing the configuration, you are ready to copy the files
scp -r provision.sh local* tests user@host:
-# if you have set SSL_MODE to "bring-your-own", make sure to also copy the certificate files:
-# scp -r certs user@host:
 ssh user@host sudo ./provision.sh
 
diff --git a/sdk/cwl/arvados_cwl/arvcontainer.py b/sdk/cwl/arvados_cwl/arvcontainer.py index f3e122e603..5094ea3bf1 100644 --- a/sdk/cwl/arvados_cwl/arvcontainer.py +++ b/sdk/cwl/arvados_cwl/arvcontainer.py @@ -37,6 +37,9 @@ from ._version import __version__ logger = logging.getLogger('arvados.cwl-runner') metrics = logging.getLogger('arvados.cwl-runner.metrics') +def cleanup_name_for_collection(name): + return name.replace("/", " ") + class ArvadosContainer(JobBase): """Submit and manage a Crunch container request for executing a CWL CommandLineTool.""" @@ -320,7 +323,7 @@ class ArvadosContainer(JobBase): if runtimeContext.submit_runner_cluster: extra_submit_params["cluster_id"] = runtimeContext.submit_runner_cluster - container_request["output_name"] = "Output from step %s" % (self.name) + container_request["output_name"] = cleanup_name_for_collection("Output from step %s" % (self.name)) container_request["output_ttl"] = self.output_ttl container_request["mounts"] = mounts container_request["secret_mounts"] = secret_mounts diff --git a/sdk/cwl/arvados_cwl/executor.py b/sdk/cwl/arvados_cwl/executor.py index fe078e3227..778af58ac3 100644 --- a/sdk/cwl/arvados_cwl/executor.py +++ b/sdk/cwl/arvados_cwl/executor.py @@ -31,7 +31,7 @@ from arvados.keep import KeepClient from arvados.errors import ApiError import arvados_cwl.util -from .arvcontainer import RunnerContainer +from .arvcontainer import RunnerContainer, cleanup_name_for_collection from .runner import Runner, upload_docker, upload_job_order, upload_workflow_deps, make_builder from .arvtool import ArvadosCommandTool, validate_cluster_target, ArvadosExpressionTool from .arvworkflow import ArvadosWorkflow, upload_workflow @@ -630,6 +630,8 @@ The 'jobs' API is no longer supported. if not self.output_name: self.output_name = "Output from workflow %s" % runtimeContext.name + self.output_name = cleanup_name_for_collection(self.output_name) + if self.work_api == "containers": if self.ignore_docker_for_reuse: raise Exception("--ignore-docker-for-reuse not supported with containers API.") diff --git a/tools/compute-images/arvados-images-aws.json b/tools/compute-images/arvados-images-aws.json index c030ea6aff..9e07b461c5 100644 --- a/tools/compute-images/arvados-images-aws.json +++ b/tools/compute-images/arvados-images-aws.json @@ -1,12 +1,13 @@ { "variables": { "arvados_cluster": "", - "associate_public_ip_address": "true", "aws_access_key": "", "aws_profile": "", "aws_secret_key": "", "aws_source_ami": "ami-031283ff8a43b021c", "aws_ebs_autoscale": "", + "aws_associate_public_ip_address": "", + "aws_ena_support": "", "build_environment": "aws", "public_key_file": "", "mksquashfs_mem": "", @@ -24,12 +25,12 @@ "access_key": "{{user `aws_access_key`}}", "secret_key": "{{user `aws_secret_key`}}", "region": "{{user `aws_default_region`}}", - "ena_support": "true", + "ena_support": "{{user `aws_ena_support`}}", "source_ami": "{{user `aws_source_ami`}}", "instance_type": "m5.large", "vpc_id": "{{user `vpc_id`}}", "subnet_id": "{{user `subnet_id`}}", - "associate_public_ip_address": "{{user `associate_public_ip_address`}}", + "associate_public_ip_address": "{{user `aws_associate_public_ip_address`}}", "ssh_username": "{{user `ssh_user`}}", "ami_name": "arvados-{{user `arvados_cluster`}}-compute-{{isotime \"20060102150405\"}}", "launch_block_device_mappings": [{ diff --git a/tools/compute-images/build.sh b/tools/compute-images/build.sh index c589ffa055..5b3db262c1 100755 --- a/tools/compute-images/build.sh +++ b/tools/compute-images/build.sh @@ -17,52 +17,62 @@ Syntax: Options: - --json-file (required) - Path to the packer json file - --arvados-cluster-id (required) - The ID of the Arvados cluster, e.g. zzzzz - --aws-profile (default: false) - AWS profile to use (valid profile from ~/.aws/config - --aws-secrets-file (default: false, required if building for AWS) - AWS secrets file which will be sourced from this script - --aws-source-ami (default: false, required if building for AWS) - The AMI to use as base for building the images - --aws-region (default: us-east-1) + --json-file + Path to the packer json file (required) + --arvados-cluster-id + The ID of the Arvados cluster, e.g. zzzzz(required) + --aws-profile + AWS profile to use (valid profile from ~/.aws/config (optional) + --aws-secrets-file + AWS secrets file which will be sourced from this script (optional) + When building for AWS, either an AWS profile or an AWS secrets file + must be provided. + --aws-source-ami + The AMI to use as base for building the images (required if building for AWS) + --aws-region (default: us-east-1) The AWS region to use for building the images - --aws-vpc-id (optional) - VPC id for AWS, otherwise packer will pick the default one - --aws-subnet-id - Subnet id for AWS otherwise packer will pick the default one for the VPC - --aws-ebs-autoscale (default: false) - Install the AWS EBS autoscaler daemon. - --gcp-project-id (default: false, required if building for GCP) - GCP project id - --gcp-account-file (default: false, required if building for GCP) - GCP account file - --gcp-zone (default: us-central1-f) + --aws-vpc-id + VPC id for AWS, if not specified packer will derive from the subnet id or pick the default one. + --aws-subnet-id + Subnet id for AWS, if not specified packer will pick the default one for the VPC. + --aws-ebs-autoscale + Install the AWS EBS autoscaler daemon (default: do not install the AWS EBS autoscaler). + --aws-associate-public-ip + Associate a public IP address with the node used for building the compute image. + Required when the machine running packer can not reach the node used for building + the compute image via its private IP. (default: true if building for AWS) + Note: if the subnet has "Auto-assign public IPv4 address" enabled, disabling this + flag will have no effect. + --aws-ena-support + Enable enhanced networking (default: true if building for AWS) + --gcp-project-id + GCP project id (required if building for GCP) + --gcp-account-file + GCP account file (required if building for GCP) + --gcp-zone (default: us-central1-f) GCP zone - --azure-secrets-file (default: false, required if building for Azure) - Azure secrets file which will be sourced from this script - --azure-resource-group (default: false, required if building for Azure) - Azure resource group - --azure-location (default: false, required if building for Azure) - Azure location, e.g. centralus, eastus, westeurope - --azure-sku (default: unset, required if building for Azure, e.g. 16.04-LTS) + --azure-secrets-file + Azure secrets file which will be sourced from this script (required if building for Azure) + --azure-resource-group + Azure resource group (required if building for Azure) + --azure-location + Azure location, e.g. centralus, eastus, westeurope (required if building for Azure) + --azure-sku (required if building for Azure, e.g. 16.04-LTS) Azure SKU image to use - --ssh_user (default: packer) + --ssh_user (default: packer) The user packer will use to log into the image - --resolver (default: host's network provided) - The dns resolver for the machine - --reposuffix (default: unset) + --resolver + The dns resolver for the machine (default: host's network provided) + --reposuffix Set this to "-dev" to track the unstable/dev Arvados repositories - --public-key-file (required) - Path to the public key file that a-d-c will use to log into the compute node + --public-key-file + Path to the public key file that a-d-c will use to log into the compute node (required) --mksquashfs-mem (default: 256M) Only relevant when using Singularity. This is the amount of memory mksquashfs is allowed to use. - --nvidia-gpu-support (default: false) - Install all the necessary tooling for Nvidia GPU support - --debug (default: false) - Output debug information + --nvidia-gpu-support + Install all the necessary tooling for Nvidia GPU support (default: do not install Nvidia GPU support) + --debug + Output debug information (default: no debug output is printed) For more information, see the Arvados documentation at https://doc.arvados.org/install/crunch2-cloud/install-compute-node.html @@ -76,6 +86,8 @@ AWS_SOURCE_AMI= AWS_VPC_ID= AWS_SUBNET_ID= AWS_EBS_AUTOSCALE= +AWS_ASSOCIATE_PUBLIC_IP=true +AWS_ENA_SUPPORT=true GCP_PROJECT_ID= GCP_ACCOUNT_FILE= GCP_ZONE= @@ -91,7 +103,7 @@ MKSQUASHFS_MEM=256M NVIDIA_GPU_SUPPORT= PARSEDOPTS=$(getopt --name "$0" --longoptions \ - help,json-file:,arvados-cluster-id:,aws-source-ami:,aws-profile:,aws-secrets-file:,aws-region:,aws-vpc-id:,aws-subnet-id:,aws-ebs-autoscale,gcp-project-id:,gcp-account-file:,gcp-zone:,azure-secrets-file:,azure-resource-group:,azure-location:,azure-sku:,azure-cloud-environment:,ssh_user:,resolver:,reposuffix:,public-key-file:,mksquashfs-mem:,nvidia-gpu-support,debug \ + help,json-file:,arvados-cluster-id:,aws-source-ami:,aws-profile:,aws-secrets-file:,aws-region:,aws-vpc-id:,aws-subnet-id:,aws-ebs-autoscale,aws-associate-public-ip:,aws-ena-support:,gcp-project-id:,gcp-account-file:,gcp-zone:,azure-secrets-file:,azure-resource-group:,azure-location:,azure-sku:,azure-cloud-environment:,ssh_user:,resolver:,reposuffix:,public-key-file:,mksquashfs-mem:,nvidia-gpu-support,debug \ -- "" "$@") if [ $? -ne 0 ]; then exit 1 @@ -132,6 +144,12 @@ while [ $# -gt 0 ]; do --aws-ebs-autoscale) AWS_EBS_AUTOSCALE=1 ;; + --aws-associate-public-ip) + AWS_ASSOCIATE_PUBLIC_IP="$2"; shift + ;; + --aws-ena-support) + AWS_ENA_SUPPORT="$2"; shift + ;; --gcp-project-id) GCP_PROJECT_ID="$2"; shift ;; @@ -226,25 +244,36 @@ if [[ ! -z "$AZURE_SECRETS_FILE" ]]; then fi +AWS=0 EXTRA2="" if [[ -n "$AWS_SOURCE_AMI" ]]; then EXTRA2+=" -var aws_source_ami=$AWS_SOURCE_AMI" + AWS=1 fi if [[ -n "$AWS_PROFILE" ]]; then EXTRA2+=" -var aws_profile=$AWS_PROFILE" + AWS=1 fi if [[ -n "$AWS_VPC_ID" ]]; then - EXTRA2+=" -var vpc_id=$AWS_VPC_ID -var associate_public_ip_address=true " + EXTRA2+=" -var vpc_id=$AWS_VPC_ID" + AWS=1 fi if [[ -n "$AWS_SUBNET_ID" ]]; then - EXTRA2+=" -var subnet_id=$AWS_SUBNET_ID -var associate_public_ip_address=true " + EXTRA2+=" -var subnet_id=$AWS_SUBNET_ID" + AWS=1 fi if [[ -n "$AWS_DEFAULT_REGION" ]]; then EXTRA2+=" -var aws_default_region=$AWS_DEFAULT_REGION" + AWS=1 fi if [[ -n "$AWS_EBS_AUTOSCALE" ]]; then EXTRA2+=" -var aws_ebs_autoscale=$AWS_EBS_AUTOSCALE" + AWS=1 +fi +if [[ $AWS -eq 1 ]]; then + EXTRA2+=" -var aws_associate_public_ip_address=$AWS_ASSOCIATE_PUBLIC_IP" + EXTRA2+=" -var aws_ena_support=$AWS_ENA_SUPPORT" fi if [[ -n "$GCP_PROJECT_ID" ]]; then EXTRA2+=" -var project_id=$GCP_PROJECT_ID" diff --git a/tools/compute-images/scripts/base.sh b/tools/compute-images/scripts/base.sh index c9fd0945d2..6ab9de9184 100644 --- a/tools/compute-images/scripts/base.sh +++ b/tools/compute-images/scripts/base.sh @@ -50,8 +50,8 @@ echo "deb http://apt.arvados.org/$LSB_RELEASE_CODENAME $LSB_RELEASE_CODENAME${RE # Add the arvados signing key cat /tmp/1078ECD7.asc | $SUDO apt-key add - -# Add the debian keys -wait_for_apt_locks && $SUDO DEBIAN_FRONTEND=noninteractive apt-get install --yes debian-keyring debian-archive-keyring +# Add the debian keys (but don't abort if we can't find them, e.g. on Ubuntu where we don't need them) +wait_for_apt_locks && $SUDO DEBIAN_FRONTEND=noninteractive apt-get install --yes debian-keyring debian-archive-keyring 2>/dev/null || true # Fix locale $SUDO /bin/sed -ri 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_passenger.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_passenger.sls index 2b6b96c3a4..c14fbd1f59 100644 --- a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_passenger.sls +++ b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_passenger.sls @@ -9,7 +9,7 @@ {%- set passenger_mod = '/usr/lib64/nginx/modules/ngx_http_passenger_module.so' if grains.osfinger in ('CentOS Linux-7',) else '/usr/lib/nginx/modules/ngx_http_passenger_module.so' %} -{%- set passenger_ruby = '/usr/local/rvm/rubies/ruby-2.7.2/bin/ruby' +{%- set passenger_ruby = '/usr/local/rvm/wrappers/default/ruby' if grains.osfinger in ('CentOS Linux-7', 'Ubuntu-18.04', 'Debian-10') else '/usr/bin/ruby' %} diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_passenger.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_passenger.sls index e51ec21eb9..dfddf3b623 100644 --- a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_passenger.sls +++ b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_passenger.sls @@ -9,7 +9,7 @@ {%- set passenger_mod = '/usr/lib64/nginx/modules/ngx_http_passenger_module.so' if grains.osfinger in ('CentOS Linux-7',) else '/usr/lib/nginx/modules/ngx_http_passenger_module.so' %} -{%- set passenger_ruby = '/usr/local/rvm/rubies/ruby-2.7.2/bin/ruby' +{%- set passenger_ruby = '/usr/local/rvm/wrappers/default/ruby' if grains.osfinger in ('CentOS Linux-7', 'Ubuntu-18.04', 'Debian-10') else '/usr/bin/ruby' %} diff --git a/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_passenger.sls b/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_passenger.sls index 2b764eb2c2..21c1510de8 100644 --- a/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_passenger.sls +++ b/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_passenger.sls @@ -9,7 +9,7 @@ {%- set passenger_mod = '/usr/lib64/nginx/modules/ngx_http_passenger_module.so' if grains.osfinger in ('CentOS Linux-7',) else '/usr/lib/nginx/modules/ngx_http_passenger_module.so' %} -{%- set passenger_ruby = '/usr/local/rvm/rubies/ruby-2.7.2/bin/ruby' +{%- set passenger_ruby = '/usr/local/rvm/wrappers/default/ruby' if grains.osfinger in ('CentOS Linux-7', 'Ubuntu-18.04', 'Debian-10') else '/usr/bin/ruby' %} diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts index 32d1f8bb96..0ddec2c7cc 100644 --- a/tools/salt-install/local.params.example.multiple_hosts +++ b/tools/salt-install/local.params.example.multiple_hosts @@ -82,7 +82,7 @@ LE_AWS_SECRET_ACCESS_KEY="thisistherandomstringthatisyoursecretkey" # Please set it to the FULL PATH to the certs dir if you're going to use a different dir # Default is "${SCRIPT_DIR}/certs", where the variable "SCRIPT_DIR" has the path to the # directory where the "provision.sh" script was copied in the destination host. -# CUSTOM_CERTS_DIR="${SCRIPT_DIR}/certs" +# CUSTOM_CERTS_DIR="${SCRIPT_DIR}/local_config_dir/certs" # The script expects cert/key files with these basenames (matching the role except for # keepweb, which is split in both download/collections): # "controller" diff --git a/tools/salt-install/local.params.example.single_host_multiple_hostnames b/tools/salt-install/local.params.example.single_host_multiple_hostnames index d6bfb102e9..3cdc1ec8f1 100644 --- a/tools/salt-install/local.params.example.single_host_multiple_hostnames +++ b/tools/salt-install/local.params.example.single_host_multiple_hostnames @@ -45,7 +45,7 @@ SSL_MODE="self-signed" # CUSTOM_CERTS_DIR is only used when SSL_MODE is set to "bring-your-own". # See https://doc.arvados.org/intall/salt-single-host.html#bring-your-own for more information. -# CUSTOM_CERTS_DIR="${SCRIPT_DIR}/certs" +# CUSTOM_CERTS_DIR="${SCRIPT_DIR}/local_config_dir/certs" # The directory to check for the config files (pillars, states) you want to use. # There are a few examples under 'config_examples'. diff --git a/tools/salt-install/local.params.example.single_host_single_hostname b/tools/salt-install/local.params.example.single_host_single_hostname index b6c7e5f7a5..45842b3dcc 100644 --- a/tools/salt-install/local.params.example.single_host_single_hostname +++ b/tools/salt-install/local.params.example.single_host_single_hostname @@ -53,7 +53,7 @@ SSL_MODE="self-signed" # CUSTOM_CERTS_DIR is only used when SSL_MODE is set to "bring-your-own". # See https://doc.arvados.org/intall/salt-single-host.html#bring-your-own for more information. -# CUSTOM_CERTS_DIR="${SCRIPT_DIR}/certs" +# CUSTOM_CERTS_DIR="${SCRIPT_DIR}/local_config_dir/certs" # The directory to check for the config files (pillars, states) you want to use. # There are a few examples under 'config_examples'. diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh index be1506c620..f1fc700a9e 100755 --- a/tools/salt-install/provision.sh +++ b/tools/salt-install/provision.sh @@ -200,7 +200,7 @@ WORKBENCH2_EXT_SSL_PORT=3001 SSL_MODE="self-signed" USE_LETSENCRYPT_ROUTE53="no" -CUSTOM_CERTS_DIR="${SCRIPT_DIR}/certs" +CUSTOM_CERTS_DIR="${SCRIPT_DIR}/local_config_dir/certs" ## These are ARVADOS-related parameters # For a stable release, change RELEASE "production" and VERSION to the