Merge branch '15467-config-fixups'
authorPeter Amstutz <pamstutz@veritasgenetics.com>
Mon, 29 Jul 2019 19:37:32 +0000 (15:37 -0400)
committerPeter Amstutz <pamstutz@veritasgenetics.com>
Mon, 29 Jul 2019 19:37:32 +0000 (15:37 -0400)
Also includes 14713-cds-new-config and 14717-ws-new-config

Updates run_test_server.py and run-tests.sh to use new config.yml

refs #14713
refs #14717
refs #15467

Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz@veritasgenetics.com>

52 files changed:
apps/workbench/config/application.default.yml
apps/workbench/test/controllers/healthcheck_controller_test.rb
apps/workbench/test/integration/anonymous_access_test.rb
apps/workbench/test/integration/collections_test.rb
apps/workbench/test/integration/download_test.rb
apps/workbench/test/integration/jobs_test.rb
apps/workbench/test/integration/repositories_browse_test.rb
apps/workbench/test/integration/user_settings_menu_test.rb
apps/workbench/test/integration_helper.rb
build/run-tests.sh
lib/cloud/ec2/ec2.go
lib/config/config.default.yml
lib/config/deprecated.go
lib/config/export.go
lib/config/generated_config.go
lib/config/load.go
lib/config/load_test.go
sdk/go/arvados/config.go
sdk/go/arvados/config_test.go
sdk/go/dispatch/dispatch.go
sdk/python/tests/run_test_server.py
services/api/app/controllers/application_controller.rb
services/api/app/controllers/arvados/v1/collections_controller.rb
services/api/app/controllers/arvados/v1/groups_controller.rb
services/api/app/controllers/arvados/v1/schema_controller.rb
services/api/app/mailers/admin_notifier.rb
services/api/app/models/arvados_model.rb
services/api/app/models/collection.rb
services/api/app/models/node.rb
services/api/app/models/repository.rb
services/api/app/models/user.rb
services/api/config/application.default.yml
services/api/config/arvados_config.rb
services/api/lib/enable_jobs_api.rb
services/api/test/functional/arvados/v1/groups_controller_test.rb
services/api/test/functional/arvados/v1/healthcheck_controller_test.rb
services/api/test/functional/arvados/v1/jobs_controller_test.rb
services/api/test/functional/arvados/v1/schema_controller_test.rb
services/api/test/unit/container_request_test.rb
services/api/test/unit/job_test.rb
services/api/test/unit/log_test.rb
services/api/test/unit/user_test.rb
services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
services/crunch-run/git_mount_test.go
services/login-sync/Gemfile.lock
services/ws/config.go [deleted file]
services/ws/main.go
services/ws/router.go
services/ws/server.go
services/ws/server_test.go
tools/jenkins/submit-ci-dev.sh [new file with mode: 0755]

index 8d1dff995248d9f98feaea4b77ff5ba723b0a738..9456e61455c306cb7b19db7963366c34a55b1345 100644 (file)
@@ -79,18 +79,6 @@ test:
   profiling_enabled: true
   secret_token: <%= rand(2**256).to_s(36) %>
   secret_key_base: <%= rand(2**256).to_s(36) %>
-  # This setting is to allow workbench start when running tests, it should be
-  # set to a correct value when testing relevant features.
-  keep_web_url: http://example.com/c=%{uuid_or_pdh}
-
-  # When you run the Workbench's integration tests, it starts the API
-  # server as a dependency.  These settings should match the API
-  # server's Rails defaults.  If you adjust those, change these
-  # settings in application.yml to match.
-  arvados_login_base: https://localhost:3000/login
-  arvados_v1_base: https://localhost:3000/arvados/v1
-  arvados_insecure_https: true
-
   site_name: Workbench:test
 
   # Enable user profile with one required field
index 45726e5a646f8aea9f7b8d855ab880cde1135d74..c3a0ddea906886e6b6e556cfb4a73375897d9c05 100644 (file)
@@ -16,7 +16,11 @@ class HealthcheckControllerTest < ActionController::TestCase
     [true, 'Bearer configuredmanagementtoken', 200, '{"health":"OK"}'],
   ].each do |enabled, header, error_code, error_msg|
     test "ping when #{if enabled then 'enabled' else 'disabled' end} with header '#{header}'" do
-      Rails.configuration.ManagementToken = 'configuredmanagementtoken' if enabled
+      if enabled
+        Rails.configuration.ManagementToken = 'configuredmanagementtoken'
+      else
+        Rails.configuration.ManagementToken = ""
+      end
 
       @request.headers['Authorization'] = header
       get :ping
index c414f8a7ad76e4a5c3844c0ce61a088fe5fcd403..0842635f603ff00ad93dbb15581950f860c37567 100644 (file)
@@ -5,8 +5,6 @@
 require 'integration_helper'
 
 class AnonymousAccessTest < ActionDispatch::IntegrationTest
-  include KeepWebConfig
-
   # These tests don't do state-changing API calls. Save some time by
   # skipping the database reset.
   reset_api_fixtures :after_each_test, false
@@ -119,8 +117,6 @@ class AnonymousAccessTest < ActionDispatch::IntegrationTest
   end
 
   test 'view file' do
-    use_keep_web_config
-
     magic = rand(2**512).to_s 36
     owner = api_fixture('groups')['anonymously_accessible_project']['uuid']
     col = upload_data_and_get_collection(magic, 'admin', "Hello\\040world.txt", owner)
index 6dd3c526968f3f574ee4b5fcba5746f78fe57a5e..87d3d678d174c99e03f527c58a6970f05c122f11 100644 (file)
@@ -6,8 +6,6 @@ require 'integration_helper'
 require_relative 'integration_test_utils'
 
 class CollectionsTest < ActionDispatch::IntegrationTest
-  include KeepWebConfig
-
   setup do
     need_javascript
   end
@@ -55,8 +53,6 @@ class CollectionsTest < ActionDispatch::IntegrationTest
   end
 
   test "can download an entire collection with a reader token" do
-    use_keep_web_config
-
     token = api_token('active')
     data = "foo\nfile\n"
     datablock = `echo -n #{data.shellescape} | ARVADOS_API_TOKEN=#{token.shellescape} arv-put --no-progress --raw -`.strip
index b19c00dae99c9b8839ff904dec648e45833149e2..6ae9f29274eb27a135c64a1d9492fc4161ae1534 100644 (file)
@@ -6,13 +6,9 @@ require 'integration_helper'
 require 'helpers/download_helper'
 
 class DownloadTest < ActionDispatch::IntegrationTest
-  include KeepWebConfig
-
   @@wrote_test_data = false
 
   setup do
-    use_keep_web_config
-
     # Make sure Capybara can download files.
     need_selenium 'for downloading', :selenium_with_download
     DownloadHelper.clear
index b54a31380ce93f9cbb0ae726a47a636e4ed57981..a66dfd80301c0ca7cbcf35c4e4075b38da456a38 100644 (file)
@@ -8,8 +8,6 @@ require 'tmpdir'
 require 'integration_helper'
 
 class JobsTest < ActionDispatch::IntegrationTest
-  include KeepWebConfig
-
   setup do
       need_javascript
   end
@@ -41,7 +39,6 @@ class JobsTest < ActionDispatch::IntegrationTest
 
   test 'view partial job log' do
     need_selenium 'to be able to see the CORS response headers (PhantomJS 1.9.8 does not)'
-    use_keep_web_config
 
     # This config will be restored during teardown by ../test_helper.rb:
     Rails.configuration.Workbench.LogViewerMaxBytes = 100
@@ -61,8 +58,6 @@ class JobsTest < ActionDispatch::IntegrationTest
   end
 
   test 'view log via keep-web redirect' do
-    use_keep_web_config
-
     token = api_token('active')
     logdata = fakepipe_with_log_data.read
     logblock = `echo -n #{logdata.shellescape} | ARVADOS_API_TOKEN=#{token.shellescape} arv-put --no-progress --raw -`.strip
index e668b8ca18017ec7076f33e7de643803c10e90ea..056598ef11e1719cf92409947b12be34d6d0485a 100644 (file)
@@ -41,12 +41,9 @@ class RepositoriesTest < ActionDispatch::IntegrationTest
 
   test "browse using arv-git-http" do
     repo = api_fixture('repositories')['foo']
-    portfile =
-      File.expand_path('../../../../../tmp/arv-git-httpd-ssl.port', __FILE__)
-    gitsslport = File.read(portfile)
     Repository.any_instance.
       stubs(:http_fetch_url).
-      returns "https://localhost:#{gitsslport}/#{repo['name']}.git"
+      returns "#{Rails.configuration.Services.GitHTTP.ExternalURL.to_s}/#{repo['name']}.git"
     commit_sha1 = '1de84a854e2b440dc53bf42f8548afa4c17da332'
     visit page_with_token('active', "/repositories/#{repo['uuid']}/commit/#{commit_sha1}")
     assert_text "Date:   Tue Mar 18 15:55:28 2014 -0400"
index 562dc7d1f41cb885aee778aace35ae81d9e2d11f..5f2886c7a3a0bb7277cf3b536dc65fbc25fac136 100644 (file)
@@ -188,7 +188,7 @@ class UserSettingsMenuTest < ActionDispatch::IntegrationTest
     end
     assert_text ":active/workbenchtest.git"
     assert_match /git@git.*:active\/workbenchtest.git/, page.text
-    assert_match /https:\/\/git.*\/active\/workbenchtest.git/, page.text
+    assert_match /#{Rails.configuration.Services.GitHTTP.ExternalURL.to_s}\/active\/workbenchtest.git/, page.text
   end
 
   [
index 34ee1f479a6a2a9a9eacd8402d79dd343bbaf08a..0c66e59c8cce0d3e03ed90e9952be8ff264c647e 100644 (file)
@@ -156,19 +156,6 @@ module HeadlessHelper
   end
 end
 
-module KeepWebConfig
-  def getport service
-    File.read(File.expand_path("../../../../tmp/#{service}.port", __FILE__))
-  end
-
-  def use_keep_web_config
-    @kwport = getport 'keep-web-ssl'
-    @kwdport = getport 'keep-web-dl-ssl'
-    Rails.configuration.Services.WebDAV.ExternalURL = URI("https://localhost:#{@kwport}")
-    Rails.configuration.Services.WebDAVDownload.ExternalURL = URI("https://localhost:#{@kwdport}")
-  end
-end
-
 class ActionDispatch::IntegrationTest
   # Make the Capybara DSL available in all integration tests
   include Capybara::DSL
index a46f2ec7660590cd82a3969e22ebecaceafe1dc2..b19eafa41e39127c87d1ed560a5727518e9ee036 100755 (executable)
@@ -35,8 +35,7 @@ Options:
 --short        Skip (or scale down) some slow tests.
 --interactive  Set up, then prompt for test/install steps to perform.
 WORKSPACE=path Arvados source tree to test.
-CONFIGSRC=path Dir with api server config files to copy into source tree.
-               (If none given, leave config files alone in source tree.)
+CONFIGSRC=path Dir with config.yml file containing PostgreSQL section for use by tests. (required)
 services/api_test="TEST=test/functional/arvados/v1/collections_controller_test.rb"
                Restrict apiserver tests to the given file
 sdk/python_test="--test-suite tests.test_keep_locator"
@@ -197,6 +196,10 @@ sanity_checks() {
     [[ -n "${skip[sanity]}" ]] && return 0
     ( [[ -n "$WORKSPACE" ]] && [[ -d "$WORKSPACE/services" ]] ) \
         || fatal "WORKSPACE environment variable not set to a source directory (see: $0 --help)"
+    [[ -n "$CONFIGSRC" ]] \
+       || fatal "CONFIGSRC environment not set (see: $0 --help)"
+    [[ -s "$CONFIGSRC/config.yml" ]] \
+       || fatal "'$CONFIGSRC/config.yml' is empty or not found (see: $0 --help)"
     echo Checking dependencies:
     echo "locale: ${LANG}"
     [[ "$(locale charmap)" = "UTF-8" ]] \
@@ -385,12 +388,8 @@ checkpidfile() {
 
 checkhealth() {
     svc="$1"
-    port="$(cat "$WORKSPACE/tmp/${svc}.port")"
-    scheme=http
-    if [[ ${svc} =~ -ssl$ || ${svc} = wss ]]; then
-        scheme=https
-    fi
-    url="$scheme://localhost:${port}/_health/ping"
+    base=$(python -c "import yaml; print list(yaml.safe_load(file('$ARVADOS_CONFIG'))['Clusters']['zzzzz']['Services']['$1']['InternalURLs'].keys())[0]")
+    url="$base/_health/ping"
     if ! curl -Ss -H "Authorization: Bearer e687950a23c3a9bceec28c6223a06c79" "${url}" | tee -a /dev/stderr | grep '"OK"'; then
         echo "${url} failed"
         return 1
@@ -422,28 +421,36 @@ start_services() {
     fi
     all_services_stopped=
     fail=1
+
+    # Create config if it hasn't been created already.  Normally
+    # this happens in install_env because there are downstream
+    # steps like workbench install which require a valid
+    # config.yml, but when invoked with --skip-install that doesn't
+    # happen, so make sure to run it here.
+    eval $(python sdk/python/tests/run_test_server.py setup_config)
+
     cd "$WORKSPACE" \
         && eval $(python sdk/python/tests/run_test_server.py start --auth admin) \
         && export ARVADOS_TEST_API_HOST="$ARVADOS_API_HOST" \
         && export ARVADOS_TEST_API_INSTALLED="$$" \
         && checkpidfile api \
         && checkdiscoverydoc $ARVADOS_API_HOST \
+        && eval $(python sdk/python/tests/run_test_server.py start_nginx) \
+        && checkpidfile nginx \
         && python sdk/python/tests/run_test_server.py start_controller \
         && checkpidfile controller \
-        && checkhealth controller \
+        && checkhealth Controller \
+        && checkdiscoverydoc $ARVADOS_API_HOST \
         && python sdk/python/tests/run_test_server.py start_keep_proxy \
         && checkpidfile keepproxy \
         && python sdk/python/tests/run_test_server.py start_keep-web \
         && checkpidfile keep-web \
-        && checkhealth keep-web \
+        && checkhealth WebDAV \
         && python sdk/python/tests/run_test_server.py start_arv-git-httpd \
         && checkpidfile arv-git-httpd \
-        && checkhealth arv-git-httpd \
+        && checkhealth GitHTTP \
         && python sdk/python/tests/run_test_server.py start_ws \
         && checkpidfile ws \
-        && eval $(python sdk/python/tests/run_test_server.py start_nginx) \
-        && checkdiscoverydoc $ARVADOS_API_HOST \
-        && checkpidfile nginx \
         && export ARVADOS_TEST_PROXY_SERVICES=1 \
         && (env | egrep ^ARVADOS) \
         && fail=0
@@ -579,11 +586,6 @@ initialize() {
 
     echo "WORKSPACE=$WORKSPACE"
 
-    if [[ -z "$CONFIGSRC" ]] && [[ -d "$HOME/arvados-api-server" ]]; then
-        # Jenkins expects us to use this by default.
-        CONFIGSRC="$HOME/arvados-api-server"
-    fi
-
     # Clean up .pyc files that may exist in the workspace
     cd "$WORKSPACE"
     find -name '*.pyc' -delete
@@ -627,26 +629,6 @@ initialize() {
     # whine a lot.
     setup_ruby_environment
 
-    if [[ -s "$CONFIGSRC/config.yml" ]] ; then
-       cp "$CONFIGSRC/config.yml" "$temp/test-config.yml"
-       export ARVADOS_CONFIG="$temp/test-config.yml"
-    else
-       if [[ -s /etc/arvados/config.yml ]] ; then
-           python > "$temp/test-config.yml" <<EOF
-import yaml
-import json
-v = list(yaml.safe_load(open('/etc/arvados/config.yml'))['Clusters'].values())[0]['PostgreSQL']
-v['Connection']['dbname'] = 'arvados_test'
-print(json.dumps({"Clusters": { "zzzzz": {'PostgreSQL': v}}}))
-EOF
-           export ARVADOS_CONFIG="$temp/test-config.yml"
-       else
-           if [[ ! -f "$WORKSPACE/services/api/config/database.yml" ]]; then
-               fatal "Please provide a database.yml file for the test suite"
-           fi
-       fi
-    fi
-
     echo "PATH is $PATH"
 }
 
@@ -682,6 +664,11 @@ install_env() {
     pip install --no-cache-dir PyYAML \
         || fatal "pip install PyYAML failed"
 
+    # Create config file.  The run_test_server script requires PyYAML,
+    # so virtualenv needs to be active.  Downstream steps like
+    # workbench install which require a valid config.yml.
+    eval $(python sdk/python/tests/run_test_server.py setup_config)
+
     # Preinstall libcloud if using a fork; otherwise nodemanager "pip
     # install" won't pick it up by default.
     if [[ -n "$LIBCLOUD_PIN_SRC" ]]; then
@@ -951,19 +938,11 @@ install_services/api() {
     rm -f config/environments/test.rb
     cp config/environments/test.rb.example config/environments/test.rb
 
-    if [ -n "$CONFIGSRC" ]
-    then
-        for f in database.yml
-        do
-            cp "$CONFIGSRC/$f" config/ || fatal "$f"
-        done
-    fi
-
     # Clear out any lingering postgresql connections to the test
     # database, so that we can drop it. This assumes the current user
     # is a postgresql superuser.
     cd "$WORKSPACE/services/api" \
-        && test_database=$(python -c "import yaml; print yaml.safe_load(file('config/database.yml'))['test']['database']") \
+        && test_database=$(python -c "import yaml; print yaml.safe_load(file('$ARVADOS_CONFIG'))['Clusters']['zzzzz']['PostgreSQL']['Connection']['dbname']") \
         && psql "$test_database" -c "SELECT pg_terminate_backend (pg_stat_activity.pid::int) FROM pg_stat_activity WHERE pg_stat_activity.datname = '$test_database';" 2>/dev/null
 
     mkdir -p "$WORKSPACE/services/api/tmp/pids"
index 079c32802ca4d3a038b1a395b5d56188b99a7cce..7ee8138665159874777dd39f98e399effcf1bd73 100644 (file)
@@ -32,7 +32,7 @@ type ec2InstanceSetConfig struct {
        AccessKeyID      string
        SecretAccessKey  string
        Region           string
-       SecurityGroupIDs []string
+       SecurityGroupIDs map[string]interface{}
        SubnetID         string
        AdminUsername    string
        EBSVolumeType    string
@@ -161,6 +161,11 @@ func (instanceSet *ec2InstanceSet) Create(
                })
        }
 
+       var groups []string
+       for sg := range instanceSet.ec2config.SecurityGroupIDs {
+               groups = append(groups, sg)
+       }
+
        rii := ec2.RunInstancesInput{
                ImageId:      aws.String(string(imageID)),
                InstanceType: &instanceType.ProviderType,
@@ -173,7 +178,7 @@ func (instanceSet *ec2InstanceSet) Create(
                                AssociatePublicIpAddress: aws.Bool(false),
                                DeleteOnTermination:      aws.Bool(true),
                                DeviceIndex:              aws.Int64(0),
-                               Groups:                   aws.StringSlice(instanceSet.ec2config.SecurityGroupIDs),
+                               Groups:                   aws.StringSlice(groups),
                                SubnetId:                 &instanceSet.ec2config.SubnetID,
                        }},
                DisableApiTermination:             aws.Bool(false),
index 7e5b47191e95d93d848df9fe56a6ca0762f6bae2..2b1da2f2a86b734d43464867c819a4be81dbbd7c 100644 (file)
@@ -178,8 +178,8 @@ Clusters:
 
       # API methods to disable. Disabled methods are not listed in the
       # discovery document, and respond 404 to all requests.
-      # Example: ["jobs.create", "pipeline_instances.create"]
-      DisabledAPIs: []
+      # Example: {"jobs.create":{}, "pipeline_instances.create": {}}
+      DisabledAPIs: {}
 
       # Interval (seconds) between asynchronous permission view updates. Any
       # permission-updating API called with the 'async' parameter schedules a an
@@ -198,6 +198,14 @@ Clusters:
       # Maximum wall clock time to spend handling an incoming request.
       RequestTimeout: 5m
 
+      # Websocket will send a periodic empty event after 'SendTimeout'
+      # if there is no other activity to maintain the connection /
+      # detect dropped connections.
+      SendTimeout: 60s
+
+      WebsocketClientEventQueue: 64
+      WebsocketServerEventQueue: 4
+
     Users:
       # Config parameters to automatically setup new users.  If enabled,
       # this users will be able to self-activate.  Enable this if you want
@@ -209,7 +217,14 @@ Clusters:
       AutoSetupNewUsers: false
       AutoSetupNewUsersWithVmUUID: ""
       AutoSetupNewUsersWithRepository: false
-      AutoSetupUsernameBlacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
+      AutoSetupUsernameBlacklist:
+        arvados: {}
+        git: {}
+        gitolite: {}
+        gitolite-admin: {}
+        root: {}
+        syslog: {}
+        SAMPLE: {}
 
       # When new_users_are_active is set to true, new users will be active
       # immediately.  This skips the "self-activate" step which enforces
@@ -233,8 +248,8 @@ Clusters:
       AdminNotifierEmailFrom: arvados@example.com
       EmailSubjectPrefix: "[ARVADOS] "
       UserNotifierEmailFrom: arvados@example.com
-      NewUserNotificationRecipients: []
-      NewInactiveUserNotificationRecipients: []
+      NewUserNotificationRecipients: {}
+      NewInactiveUserNotificationRecipients: {}
 
       # Set anonymous_user_token to enable anonymous user access. You can get
       # the token by running "bundle exec ./script/get_anonymous_user_token.rb"
@@ -261,13 +276,13 @@ Clusters:
       MaxDeleteBatch: 0
 
       # Attributes to suppress in events and audit logs.  Notably,
-      # specifying ["manifest_text"] here typically makes the database
+      # specifying {"manifest_text": {}} here typically makes the database
       # smaller and faster.
       #
       # Warning: Using any non-empty value here can have undesirable side
       # effects for any client or component that relies on event logs.
       # Use at your own risk.
-      UnloggedAttributes: []
+      UnloggedAttributes: {}
 
     SystemLogs:
 
@@ -402,10 +417,12 @@ Clusters:
       # to skip the compatibility check (and display a warning message to
       # that effect).
       #
-      # Example for sites running docker < 1.10: ["v1"]
-      # Example for sites running docker >= 1.10: ["v2"]
-      # Example for disabling check: []
-      SupportedDockerImageFormats: ["v2"]
+      # Example for sites running docker < 1.10: {"v1": {}}
+      # Example for sites running docker >= 1.10: {"v2": {}}
+      # Example for disabling check: {}
+      SupportedDockerImageFormats:
+        "v2": {}
+        SAMPLE: {}
 
       # Include details about job reuse decisions in the server log. This
       # causes additional database queries to run, so it should not be
@@ -449,6 +466,20 @@ Clusters:
       # stale locks from a previous dispatch process.
       StaleLockTimeout: 1m
 
+      # The crunch-run command to manage the container on a node
+      CrunchRunCommand: "crunch-run"
+
+      # Extra arguments to add to crunch-run invocation
+      # Example: ["--cgroup-parent-subsystem=memory"]
+      CrunchRunArgumentsList: []
+
+      # Extra RAM to reserve on the node, in addition to
+      # the amount specified in the container's RuntimeConstraints
+      ReserveExtraRAM: 256MiB
+
+      # Minimum time between two attempts to run the same container
+      MinRetryPeriod: 0s
+
       Logging:
         # When you run the db:delete_old_container_logs task, it will find
         # containers that have been finished for at least this many seconds,
@@ -492,6 +523,8 @@ Clusters:
         LogUpdateSize: 32MiB
 
       SLURM:
+        PrioritySpread: 0
+        SbatchArgumentsList: []
         Managed:
           # Path to dns server configuration directory
           # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
@@ -515,7 +548,8 @@ Clusters:
 
           ComputeNodeDomain: ""
           ComputeNodeNameservers:
-            - 192.168.1.1
+            "192.168.1.1": {}
+            SAMPLE: {}
 
           # Hostname to assign to a compute node when it sends a "ping" and the
           # hostname in its Node record is nil.
@@ -657,7 +691,7 @@ Clusters:
 
           # (ec2) Instance configuration.
           SecurityGroupIDs:
-            - ""
+            "SAMPLE": {}
           SubnetID: ""
           Region: ""
           EBSVolumeType: gp2
index 0b0bb26689902af4fb8c2b668e75cc4cc7e00981..3e1ec7278bcf4662dafbddb0314a8f56432682ac 100644 (file)
@@ -108,29 +108,37 @@ type oldKeepstoreConfig struct {
        Debug *bool
 }
 
-// update config using values from an old-style keepstore config file.
-func (ldr *Loader) loadOldKeepstoreConfig(cfg *arvados.Config) error {
-       path := ldr.KeepstorePath
+func (ldr *Loader) loadOldConfigHelper(component, path string, target interface{}) error {
        if path == "" {
                return nil
        }
        buf, err := ioutil.ReadFile(path)
-       if os.IsNotExist(err) && path == defaultKeepstoreConfigPath {
-               return nil
-       } else if err != nil {
+       if err != nil {
                return err
-       } else {
-               ldr.Logger.Warnf("you should remove the legacy keepstore config file (%s) after migrating all config keys to the cluster configuration file (%s)", path, ldr.Path)
        }
-       cluster, err := cfg.GetCluster("")
+
+       ldr.Logger.Warnf("you should remove the legacy %v config file (%s) after migrating all config keys to the cluster configuration file (%s)", component, path, ldr.Path)
+
+       err = yaml.Unmarshal(buf, target)
        if err != nil {
-               return err
+               return fmt.Errorf("%s: %s", path, err)
        }
+       return nil
+}
 
+// update config using values from an old-style keepstore config file.
+func (ldr *Loader) loadOldKeepstoreConfig(cfg *arvados.Config) error {
        var oc oldKeepstoreConfig
-       err = yaml.Unmarshal(buf, &oc)
+       err := ldr.loadOldConfigHelper("keepstore", ldr.KeepstorePath, &oc)
+       if os.IsNotExist(err) && (ldr.KeepstorePath == defaultKeepstoreConfigPath) {
+               return nil
+       } else if err != nil {
+               return err
+       }
+
+       cluster, err := cfg.GetCluster("")
        if err != nil {
-               return fmt.Errorf("%s: %s", path, err)
+               return err
        }
 
        if v := oc.Debug; v == nil {
@@ -143,3 +151,161 @@ func (ldr *Loader) loadOldKeepstoreConfig(cfg *arvados.Config) error {
        cfg.Clusters[cluster.ClusterID] = *cluster
        return nil
 }
+
+type oldCrunchDispatchSlurmConfig struct {
+       Client *arvados.Client
+
+       SbatchArguments *[]string
+       PollPeriod      *arvados.Duration
+       PrioritySpread  *int64
+
+       // crunch-run command to invoke. The container UUID will be
+       // appended. If nil, []string{"crunch-run"} will be used.
+       //
+       // Example: []string{"crunch-run", "--cgroup-parent-subsystem=memory"}
+       CrunchRunCommand *[]string
+
+       // Extra RAM to reserve (in Bytes) for SLURM job, in addition
+       // to the amount specified in the container's RuntimeConstraints
+       ReserveExtraRAM *int64
+
+       // Minimum time between two attempts to run the same container
+       MinRetryPeriod *arvados.Duration
+
+       // Batch size for container queries
+       BatchSize *int64
+}
+
+const defaultCrunchDispatchSlurmConfigPath = "/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml"
+
+func loadOldClientConfig(cluster *arvados.Cluster, client *arvados.Client) {
+       if client == nil {
+               return
+       }
+       if client.APIHost != "" {
+               cluster.Services.Controller.ExternalURL.Host = client.APIHost
+       }
+       if client.Scheme != "" {
+               cluster.Services.Controller.ExternalURL.Scheme = client.Scheme
+       } else {
+               cluster.Services.Controller.ExternalURL.Scheme = "https"
+       }
+       if client.AuthToken != "" {
+               cluster.SystemRootToken = client.AuthToken
+       }
+       cluster.TLS.Insecure = client.Insecure
+}
+
+// update config using values from an crunch-dispatch-slurm config file.
+func (ldr *Loader) loadOldCrunchDispatchSlurmConfig(cfg *arvados.Config) error {
+       var oc oldCrunchDispatchSlurmConfig
+       err := ldr.loadOldConfigHelper("crunch-dispatch-slurm", ldr.CrunchDispatchSlurmPath, &oc)
+       if os.IsNotExist(err) && (ldr.CrunchDispatchSlurmPath == defaultCrunchDispatchSlurmConfigPath) {
+               return nil
+       } else if err != nil {
+               return err
+       }
+
+       cluster, err := cfg.GetCluster("")
+       if err != nil {
+               return err
+       }
+
+       loadOldClientConfig(cluster, oc.Client)
+
+       if oc.SbatchArguments != nil {
+               cluster.Containers.SLURM.SbatchArgumentsList = *oc.SbatchArguments
+       }
+       if oc.PollPeriod != nil {
+               cluster.Containers.CloudVMs.PollInterval = *oc.PollPeriod
+       }
+       if oc.PrioritySpread != nil {
+               cluster.Containers.SLURM.PrioritySpread = *oc.PrioritySpread
+       }
+       if oc.CrunchRunCommand != nil {
+               if len(*oc.CrunchRunCommand) >= 1 {
+                       cluster.Containers.CrunchRunCommand = (*oc.CrunchRunCommand)[0]
+               }
+               if len(*oc.CrunchRunCommand) >= 2 {
+                       cluster.Containers.CrunchRunArgumentsList = (*oc.CrunchRunCommand)[1:]
+               }
+       }
+       if oc.ReserveExtraRAM != nil {
+               cluster.Containers.ReserveExtraRAM = arvados.ByteSize(*oc.ReserveExtraRAM)
+       }
+       if oc.MinRetryPeriod != nil {
+               cluster.Containers.MinRetryPeriod = *oc.MinRetryPeriod
+       }
+       if oc.BatchSize != nil {
+               cluster.API.MaxItemsPerResponse = int(*oc.BatchSize)
+       }
+
+       cfg.Clusters[cluster.ClusterID] = *cluster
+       return nil
+}
+
+type oldWsConfig struct {
+       Client       *arvados.Client
+       Postgres     *arvados.PostgreSQLConnection
+       PostgresPool *int
+       Listen       *string
+       LogLevel     *string
+       LogFormat    *string
+
+       PingTimeout      *arvados.Duration
+       ClientEventQueue *int
+       ServerEventQueue *int
+
+       ManagementToken *string
+}
+
+const defaultWebsocketConfigPath = "/etc/arvados/ws/ws.yml"
+
+// update config using values from an crunch-dispatch-slurm config file.
+func (ldr *Loader) loadOldWebsocketConfig(cfg *arvados.Config) error {
+       var oc oldWsConfig
+       err := ldr.loadOldConfigHelper("arvados-ws", ldr.WebsocketPath, &oc)
+       if os.IsNotExist(err) && ldr.WebsocketPath == defaultWebsocketConfigPath {
+               return nil
+       } else if err != nil {
+               return err
+       }
+
+       cluster, err := cfg.GetCluster("")
+       if err != nil {
+               return err
+       }
+
+       loadOldClientConfig(cluster, oc.Client)
+
+       if oc.Postgres != nil {
+               cluster.PostgreSQL.Connection = *oc.Postgres
+       }
+       if oc.PostgresPool != nil {
+               cluster.PostgreSQL.ConnectionPool = *oc.PostgresPool
+       }
+       if oc.Listen != nil {
+               cluster.Services.Websocket.InternalURLs[arvados.URL{Host: *oc.Listen}] = arvados.ServiceInstance{}
+       }
+       if oc.LogLevel != nil {
+               cluster.SystemLogs.LogLevel = *oc.LogLevel
+       }
+       if oc.LogFormat != nil {
+               cluster.SystemLogs.Format = *oc.LogFormat
+       }
+       if oc.PingTimeout != nil {
+               cluster.API.SendTimeout = *oc.PingTimeout
+       }
+       if oc.ClientEventQueue != nil {
+               cluster.API.WebsocketClientEventQueue = *oc.ClientEventQueue
+       }
+       if oc.ServerEventQueue != nil {
+               cluster.API.WebsocketServerEventQueue = *oc.ServerEventQueue
+       }
+       if oc.ManagementToken != nil {
+               cluster.ManagementToken = *oc.ManagementToken
+       }
+
+       cfg.Clusters[cluster.ClusterID] = *cluster
+       return nil
+}
index b79dec4d9d1532b1f348965e5c657e71df21704e..6ff72afcc1eb709e51cf8746672ce4a5014889da 100644 (file)
@@ -64,6 +64,9 @@ var whitelist = map[string]bool{
        "API.MaxRequestSize":                           true,
        "API.RailsSessionSecretToken":                  false,
        "API.RequestTimeout":                           true,
+       "API.WebsocketClientEventQueue":                false,
+       "API.SendTimeout":                              true,
+       "API.WebsocketServerEventQueue":                false,
        "AuditLogs":                                    false,
        "AuditLogs.MaxAge":                             false,
        "AuditLogs.MaxDeleteBatch":                     false,
@@ -83,6 +86,8 @@ var whitelist = map[string]bool{
        "Collections.TrustAllContent":                  false,
        "Containers":                                   true,
        "Containers.CloudVMs":                          false,
+       "Containers.CrunchRunCommand":                  false,
+       "Containers.CrunchRunArgumentsList":            false,
        "Containers.DefaultKeepCacheRAM":               true,
        "Containers.DispatchPrivateKey":                false,
        "Containers.JobsAPI":                           true,
@@ -98,9 +103,12 @@ var whitelist = map[string]bool{
        "Containers.MaxComputeVMs":                     false,
        "Containers.MaxDispatchAttempts":               false,
        "Containers.MaxRetryAttempts":                  true,
+       "Containers.MinRetryPeriod":                    true,
+       "Containers.ReserveExtraRAM":                   true,
        "Containers.SLURM":                             false,
        "Containers.StaleLockTimeout":                  false,
        "Containers.SupportedDockerImageFormats":       true,
+       "Containers.SupportedDockerImageFormats.*":     true,
        "Containers.UsePreemptibleInstances":           true,
        "EnableBetaController14287":                    false,
        "Git":                                          false,
index 0a9d7a5b6df6ce8ec00d775081c732d3d7b91a50..35edb05bcd683a1b07596d39e39ae441c1b7aa86 100644 (file)
@@ -184,8 +184,8 @@ Clusters:
 
       # API methods to disable. Disabled methods are not listed in the
       # discovery document, and respond 404 to all requests.
-      # Example: ["jobs.create", "pipeline_instances.create"]
-      DisabledAPIs: []
+      # Example: {"jobs.create":{}, "pipeline_instances.create": {}}
+      DisabledAPIs: {}
 
       # Interval (seconds) between asynchronous permission view updates. Any
       # permission-updating API called with the 'async' parameter schedules a an
@@ -204,6 +204,14 @@ Clusters:
       # Maximum wall clock time to spend handling an incoming request.
       RequestTimeout: 5m
 
+      # Websocket will send a periodic empty event after 'SendTimeout'
+      # if there is no other activity to maintain the connection /
+      # detect dropped connections.
+      SendTimeout: 60s
+
+      WebsocketClientEventQueue: 64
+      WebsocketServerEventQueue: 4
+
     Users:
       # Config parameters to automatically setup new users.  If enabled,
       # this users will be able to self-activate.  Enable this if you want
@@ -215,7 +223,14 @@ Clusters:
       AutoSetupNewUsers: false
       AutoSetupNewUsersWithVmUUID: ""
       AutoSetupNewUsersWithRepository: false
-      AutoSetupUsernameBlacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
+      AutoSetupUsernameBlacklist:
+        arvados: {}
+        git: {}
+        gitolite: {}
+        gitolite-admin: {}
+        root: {}
+        syslog: {}
+        SAMPLE: {}
 
       # When new_users_are_active is set to true, new users will be active
       # immediately.  This skips the "self-activate" step which enforces
@@ -239,8 +254,8 @@ Clusters:
       AdminNotifierEmailFrom: arvados@example.com
       EmailSubjectPrefix: "[ARVADOS] "
       UserNotifierEmailFrom: arvados@example.com
-      NewUserNotificationRecipients: []
-      NewInactiveUserNotificationRecipients: []
+      NewUserNotificationRecipients: {}
+      NewInactiveUserNotificationRecipients: {}
 
       # Set anonymous_user_token to enable anonymous user access. You can get
       # the token by running "bundle exec ./script/get_anonymous_user_token.rb"
@@ -267,13 +282,13 @@ Clusters:
       MaxDeleteBatch: 0
 
       # Attributes to suppress in events and audit logs.  Notably,
-      # specifying ["manifest_text"] here typically makes the database
+      # specifying {"manifest_text": {}} here typically makes the database
       # smaller and faster.
       #
       # Warning: Using any non-empty value here can have undesirable side
       # effects for any client or component that relies on event logs.
       # Use at your own risk.
-      UnloggedAttributes: []
+      UnloggedAttributes: {}
 
     SystemLogs:
 
@@ -408,10 +423,12 @@ Clusters:
       # to skip the compatibility check (and display a warning message to
       # that effect).
       #
-      # Example for sites running docker < 1.10: ["v1"]
-      # Example for sites running docker >= 1.10: ["v2"]
-      # Example for disabling check: []
-      SupportedDockerImageFormats: ["v2"]
+      # Example for sites running docker < 1.10: {"v1": {}}
+      # Example for sites running docker >= 1.10: {"v2": {}}
+      # Example for disabling check: {}
+      SupportedDockerImageFormats:
+        "v2": {}
+        SAMPLE: {}
 
       # Include details about job reuse decisions in the server log. This
       # causes additional database queries to run, so it should not be
@@ -455,6 +472,20 @@ Clusters:
       # stale locks from a previous dispatch process.
       StaleLockTimeout: 1m
 
+      # The crunch-run command to manage the container on a node
+      CrunchRunCommand: "crunch-run"
+
+      # Extra arguments to add to crunch-run invocation
+      # Example: ["--cgroup-parent-subsystem=memory"]
+      CrunchRunArgumentsList: []
+
+      # Extra RAM to reserve on the node, in addition to
+      # the amount specified in the container's RuntimeConstraints
+      ReserveExtraRAM: 256MiB
+
+      # Minimum time between two attempts to run the same container
+      MinRetryPeriod: 0s
+
       Logging:
         # When you run the db:delete_old_container_logs task, it will find
         # containers that have been finished for at least this many seconds,
@@ -498,6 +529,8 @@ Clusters:
         LogUpdateSize: 32MiB
 
       SLURM:
+        PrioritySpread: 0
+        SbatchArgumentsList: []
         Managed:
           # Path to dns server configuration directory
           # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
@@ -521,7 +554,8 @@ Clusters:
 
           ComputeNodeDomain: ""
           ComputeNodeNameservers:
-            - 192.168.1.1
+            "192.168.1.1": {}
+            SAMPLE: {}
 
           # Hostname to assign to a compute node when it sends a "ping" and the
           # hostname in its Node record is nil.
@@ -663,7 +697,7 @@ Clusters:
 
           # (ec2) Instance configuration.
           SecurityGroupIDs:
-            - ""
+            "SAMPLE": {}
           SubnetID: ""
           Region: ""
           EBSVolumeType: gp2
index 168c1aa22a8554ef649cc65463b10b8437970494..2dacd5c26c423739cbb5e2238d6ef2c9cbbbc370 100644 (file)
@@ -28,8 +28,10 @@ type Loader struct {
        Logger         logrus.FieldLogger
        SkipDeprecated bool // Don't load legacy/deprecated config keys/files
 
-       Path          string
-       KeepstorePath string
+       Path                    string
+       KeepstorePath           string
+       CrunchDispatchSlurmPath string
+       WebsocketPath           string
 
        configdata []byte
 }
@@ -57,6 +59,8 @@ func NewLoader(stdin io.Reader, logger logrus.FieldLogger) *Loader {
 func (ldr *Loader) SetupFlags(flagset *flag.FlagSet) {
        flagset.StringVar(&ldr.Path, "config", arvados.DefaultConfigFile, "Site configuration `file` (default may be overridden by setting an ARVADOS_CONFIG environment variable)")
        flagset.StringVar(&ldr.KeepstorePath, "legacy-keepstore-config", defaultKeepstoreConfigPath, "Legacy keepstore configuration `file`")
+       flagset.StringVar(&ldr.CrunchDispatchSlurmPath, "legacy-crunch-dispatch-slurm-config", defaultCrunchDispatchSlurmConfigPath, "Legacy crunch-dispatch-slurm configuration `file`")
+       flagset.StringVar(&ldr.WebsocketPath, "legacy-ws-config", defaultWebsocketConfigPath, "Legacy arvados-ws configuration `file`")
 }
 
 // MungeLegacyConfigArgs checks args for a -config flag whose argument
@@ -203,8 +207,14 @@ func (ldr *Loader) Load() (*arvados.Config, error) {
                if err != nil {
                        return nil, err
                }
+               // legacy file is required when either:
+               // * a non-default location was specified
+               // * no primary config was loaded, and this is the
+               // legacy config file for the current component
                for _, err := range []error{
                        ldr.loadOldKeepstoreConfig(&cfg),
+                       ldr.loadOldCrunchDispatchSlurmConfig(&cfg),
+                       ldr.loadOldWebsocketConfig(&cfg),
                } {
                        if err != nil {
                                return nil, err
index 340eb0a0a7e6900cc090bd170dea373a9100b664..c7289350ec8c09ca86637edab258cd32874882e7 100644 (file)
@@ -168,7 +168,9 @@ func (s *LoadSuite) TestSampleKeys(c *check.C) {
 }
 
 func (s *LoadSuite) TestMultipleClusters(c *check.C) {
-       cfg, err := testLoader(c, `{"Clusters":{"z1111":{},"z2222":{}}}`, nil).Load()
+       ldr := testLoader(c, `{"Clusters":{"z1111":{},"z2222":{}}}`, nil)
+       ldr.SkipDeprecated = true
+       cfg, err := ldr.Load()
        c.Assert(err, check.IsNil)
        c1, err := cfg.GetCluster("z1111")
        c.Assert(err, check.IsNil)
@@ -373,3 +375,80 @@ func (s *LoadSuite) checkEquivalent(c *check.C, goty, expectedy string) {
                c.Check(err, check.IsNil)
        }
 }
+
+func checkListKeys(path string, x interface{}) (err error) {
+       v := reflect.Indirect(reflect.ValueOf(x))
+       switch v.Kind() {
+       case reflect.Map:
+               iter := v.MapRange()
+               for iter.Next() {
+                       k := iter.Key()
+                       if k.Kind() == reflect.String {
+                               if err = checkListKeys(path+"."+k.String(), iter.Value().Interface()); err != nil {
+                                       return
+                               }
+                       }
+               }
+               return
+
+       case reflect.Struct:
+               for i := 0; i < v.NumField(); i++ {
+                       val := v.Field(i)
+                       structField := v.Type().Field(i)
+                       fieldname := structField.Name
+                       endsWithList := strings.HasSuffix(fieldname, "List")
+                       isAnArray := structField.Type.Kind() == reflect.Slice
+                       if endsWithList != isAnArray {
+                               if endsWithList {
+                                       err = fmt.Errorf("%s.%s ends with 'List' but field is not an array (type %v)", path, fieldname, val.Kind())
+                                       return
+                               }
+                               if isAnArray && structField.Type.Elem().Kind() != reflect.Uint8 {
+                                       err = fmt.Errorf("%s.%s is an array but field name does not end in 'List' (slice of %v)", path, fieldname, structField.Type.Elem().Kind())
+                                       return
+                               }
+                       }
+                       if val.CanInterface() {
+                               checkListKeys(path+"."+fieldname, val.Interface())
+                       }
+               }
+       }
+       return
+}
+
+func (s *LoadSuite) TestListKeys(c *check.C) {
+       v1 := struct {
+               EndInList []string
+       }{[]string{"a", "b"}}
+       var m1 = make(map[string]interface{})
+       m1["c"] = &v1
+       if err := checkListKeys("", m1); err != nil {
+               c.Error(err)
+       }
+
+       v2 := struct {
+               DoesNot []string
+       }{[]string{"a", "b"}}
+       var m2 = make(map[string]interface{})
+       m2["c"] = &v2
+       if err := checkListKeys("", m2); err == nil {
+               c.Errorf("Should have produced an error")
+       }
+
+       v3 := struct {
+               EndInList string
+       }{"a"}
+       var m3 = make(map[string]interface{})
+       m3["c"] = &v3
+       if err := checkListKeys("", m3); err == nil {
+               c.Errorf("Should have produced an error")
+       }
+
+       var logbuf bytes.Buffer
+       loader := testLoader(c, string(DefaultYAML), &logbuf)
+       cfg, err := loader.Load()
+       c.Assert(err, check.IsNil)
+       if err := checkListKeys("", cfg); err != nil {
+               c.Error(err)
+       }
+}
index c8206c7da437c48ff963d563e976cc77cdb4ac3b..bee93046eb8e696f554e31e04a7b6fb0a9fb37dc 100644 (file)
@@ -69,18 +69,21 @@ type Cluster struct {
 
        API struct {
                AsyncPermissionsUpdateInterval Duration
-               DisabledAPIs                   []string
+               DisabledAPIs                   StringSet
                MaxIndexDatabaseRead           int
                MaxItemsPerResponse            int
                MaxRequestAmplification        int
                MaxRequestSize                 int
                RailsSessionSecretToken        string
                RequestTimeout                 Duration
+               SendTimeout                    Duration
+               WebsocketClientEventQueue      int
+               WebsocketServerEventQueue      int
        }
        AuditLogs struct {
                MaxAge             Duration
                MaxDeleteBatch     int
-               UnloggedAttributes []string
+               UnloggedAttributes StringSet
        }
        Collections struct {
                BlobSigning          bool
@@ -132,10 +135,10 @@ type Cluster struct {
                AutoSetupNewUsers                     bool
                AutoSetupNewUsersWithRepository       bool
                AutoSetupNewUsersWithVmUUID           string
-               AutoSetupUsernameBlacklist            []string
+               AutoSetupUsernameBlacklist            StringSet
                EmailSubjectPrefix                    string
-               NewInactiveUserNotificationRecipients []string
-               NewUserNotificationRecipients         []string
+               NewInactiveUserNotificationRecipients StringSet
+               NewUserNotificationRecipients         StringSet
                NewUsersAreActive                     bool
                UserNotifierEmailFrom                 string
                UserProfileNotificationAddress        string
@@ -145,7 +148,7 @@ type Cluster struct {
                APIClientConnectTimeout          Duration
                APIClientReceiveTimeout          Duration
                APIResponseCompression           bool
-               ApplicationMimetypesWithViewIcon map[string]struct{}
+               ApplicationMimetypesWithViewIcon StringSet
                ArvadosDocsite                   string
                ArvadosPublicDataDocURL          string
                DefaultOpenIdPrefix              string
@@ -253,14 +256,18 @@ type InstanceType struct {
 
 type ContainersConfig struct {
        CloudVMs                    CloudVMsConfig
+       CrunchRunCommand            string
+       CrunchRunArgumentsList      []string
        DefaultKeepCacheRAM         ByteSize
        DispatchPrivateKey          string
        LogReuseDecisions           bool
        MaxComputeVMs               int
        MaxDispatchAttempts         int
        MaxRetryAttempts            int
+       MinRetryPeriod              Duration
+       ReserveExtraRAM             ByteSize
        StaleLockTimeout            Duration
-       SupportedDockerImageFormats []string
+       SupportedDockerImageFormats StringSet
        UsePreemptibleInstances     bool
 
        JobsAPI struct {
@@ -285,13 +292,15 @@ type ContainersConfig struct {
                LogUpdateSize                ByteSize
        }
        SLURM struct {
-               Managed struct {
+               PrioritySpread      int64
+               SbatchArgumentsList []string
+               Managed             struct {
                        DNSServerConfDir       string
                        DNSServerConfTemplate  string
                        DNSServerReloadCommand string
                        DNSServerUpdateCommand string
                        ComputeNodeDomain      string
-                       ComputeNodeNameservers []string
+                       ComputeNodeNameservers StringSet
                        AssignNodeHostname     string
                }
        }
@@ -379,6 +388,40 @@ func (it *InstanceTypeMap) UnmarshalJSON(data []byte) error {
        return nil
 }
 
+type StringSet map[string]struct{}
+
+// UnmarshalJSON handles old config files that provide an array of
+// instance types instead of a hash.
+func (ss *StringSet) UnmarshalJSON(data []byte) error {
+       if len(data) > 0 && data[0] == '[' {
+               var arr []string
+               err := json.Unmarshal(data, &arr)
+               if err != nil {
+                       return err
+               }
+               if len(arr) == 0 {
+                       *ss = nil
+                       return nil
+               }
+               *ss = make(map[string]struct{}, len(arr))
+               for _, t := range arr {
+                       (*ss)[t] = struct{}{}
+               }
+               return nil
+       }
+       var hash map[string]struct{}
+       err := json.Unmarshal(data, &hash)
+       if err != nil {
+               return err
+       }
+       *ss = make(map[string]struct{}, len(hash))
+       for t, _ := range hash {
+               (*ss)[t] = struct{}{}
+       }
+
+       return nil
+}
+
 type ServiceName string
 
 const (
index 59c7432686c8fb3246c2aab007eb1f01b4d7fdd6..b984cb5669ce851f2ec1f136a9c96bfb0d06b832 100644 (file)
@@ -14,6 +14,16 @@ var _ = check.Suite(&ConfigSuite{})
 type ConfigSuite struct{}
 
 func (s *ConfigSuite) TestInstanceTypesAsArray(c *check.C) {
+       var cluster Cluster
+       yaml.Unmarshal([]byte(`
+API:
+  DisabledAPIs: [jobs.list]`), &cluster)
+       c.Check(len(cluster.API.DisabledAPIs), check.Equals, 1)
+       _, ok := cluster.API.DisabledAPIs["jobs.list"]
+       c.Check(ok, check.Equals, true)
+}
+
+func (s *ConfigSuite) TestStringSetAsArray(c *check.C) {
        var cluster Cluster
        yaml.Unmarshal([]byte("InstanceTypes:\n- Name: foo\n"), &cluster)
        c.Check(len(cluster.InstanceTypes), check.Equals, 1)
index fdb52e510bd34e36ffe7f22b2975fc95bc05bf60..587c9999c4c98d3e68c957ccfd88d6eb130ae5d7 100644 (file)
@@ -38,7 +38,7 @@ type Dispatcher struct {
        Logger Logger
 
        // Batch size for container queries
-       BatchSize int64
+       BatchSize int
 
        // Queue polling frequency
        PollPeriod time.Duration
index 0b86aea13eab61a791fc6e367f1557a8a95f2047..0f8f1c5f818e2768168e4c71094a67c34fb4478e 100644 (file)
@@ -26,6 +26,11 @@ import time
 import unittest
 import yaml
 
+try:
+    from urllib.parse import urlparse
+except ImportError:
+    from urlparse import urlparse
+
 MY_DIRNAME = os.path.dirname(os.path.realpath(__file__))
 if __name__ == '__main__' and os.path.exists(
       os.path.join(MY_DIRNAME, '..', 'arvados', '__init__.py')):
@@ -313,15 +318,9 @@ def run(leave_running_atexit=False):
         os.makedirs(gitdir)
     subprocess.check_output(['tar', '-xC', gitdir, '-f', gittarball])
 
-    # The nginx proxy isn't listening here yet, but we need to choose
-    # the wss:// port now so we can write the API server config file.
-    wss_port = find_available_port()
-    _setport('wss', wss_port)
-
-    port = find_available_port()
+    port = internal_port_from_config("RailsAPI")
     env = os.environ.copy()
     env['RAILS_ENV'] = 'test'
-    env['ARVADOS_TEST_WSS_PORT'] = str(wss_port)
     env.pop('ARVADOS_WEBSOCKETS', None)
     env.pop('ARVADOS_TEST_API_HOST', None)
     env.pop('ARVADOS_API_HOST', None)
@@ -375,10 +374,7 @@ def reset():
 
     os.environ['ARVADOS_API_HOST_INSECURE'] = 'true'
     os.environ['ARVADOS_API_TOKEN'] = token
-    if _wait_until_port_listens(_getport('controller-ssl'), timeout=0.5, warn=False):
-        os.environ['ARVADOS_API_HOST'] = '0.0.0.0:'+str(_getport('controller-ssl'))
-    else:
-        os.environ['ARVADOS_API_HOST'] = existing_api_host
+    os.environ['ARVADOS_API_HOST'] = existing_api_host
 
 def stop(force=False):
     """Stop the API server, if one is running.
@@ -399,58 +395,30 @@ def stop(force=False):
         kill_server_pid(_pidfile('api'))
         my_api_host = None
 
+def get_config():
+    with open(os.environ["ARVADOS_CONFIG"]) as f:
+        return yaml.safe_load(f)
+
+def internal_port_from_config(service):
+    return int(urlparse(
+        list(get_config()["Clusters"]["zzzzz"]["Services"][service]["InternalURLs"].keys())[0]).
+               netloc.split(":")[1])
+
+def external_port_from_config(service):
+    return int(urlparse(get_config()["Clusters"]["zzzzz"]["Services"][service]["ExternalURL"]).netloc.split(":")[1])
+
 def run_controller():
     if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
         return
     stop_controller()
-    rails_api_port = int(string.split(os.environ.get('ARVADOS_TEST_API_HOST', my_api_host), ':')[-1])
-    port = find_available_port()
-    conf = os.path.join(TEST_TMPDIR, 'arvados.yml')
-    with open(conf, 'w') as f:
-        f.write("""
-Clusters:
-  zzzzz:
-    EnableBetaController14287: {beta14287}
-    ManagementToken: e687950a23c3a9bceec28c6223a06c79
-    API:
-      RequestTimeout: 30s
-    Logging:
-        Level: "{loglevel}"
-    HTTPRequestTimeout: 30s
-    PostgreSQL:
-      ConnectionPool: 32
-      Connection:
-        host: {dbhost}
-        dbname: {dbname}
-        user: {dbuser}
-        password: {dbpass}
-    TLS:
-      Insecure: true
-    Services:
-      Controller:
-        InternalURLs:
-          "http://localhost:{controllerport}": {{}}
-      RailsAPI:
-        InternalURLs:
-          "https://localhost:{railsport}": {{}}
-        """.format(
-            beta14287=('true' if '14287' in os.environ.get('ARVADOS_EXPERIMENTAL', '') else 'false'),
-            loglevel=('info' if os.environ.get('ARVADOS_DEBUG', '') in ['','0'] else 'debug'),
-            dbhost=_dbconfig('host'),
-            dbname=_dbconfig('dbname'),
-            dbuser=_dbconfig('user'),
-            dbpass=_dbconfig('password'),
-            controllerport=port,
-            railsport=rails_api_port,
-        ))
     logf = open(_logfilename('controller'), 'a')
+    port = internal_port_from_config("Controller")
     controller = subprocess.Popen(
-        ["arvados-server", "controller", "-config", conf],
+        ["arvados-server", "controller"],
         stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
     with open(_pidfile('controller'), 'w') as f:
         f.write(str(controller.pid))
     _wait_until_port_listens(port)
-    _setport('controller', port)
     return port
 
 def stop_controller():
@@ -462,36 +430,13 @@ def run_ws():
     if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
         return
     stop_ws()
-    port = find_available_port()
-    conf = os.path.join(TEST_TMPDIR, 'ws.yml')
-    with open(conf, 'w') as f:
-        f.write("""
-Client:
-  APIHost: {}
-  Insecure: true
-Listen: :{}
-LogLevel: {}
-Postgres:
-  host: {}
-  dbname: {}
-  user: {}
-  password: {}
-  sslmode: require
-        """.format(os.environ['ARVADOS_API_HOST'],
-                   port,
-                   ('info' if os.environ.get('ARVADOS_DEBUG', '') in ['','0'] else 'debug'),
-                   _dbconfig('host'),
-                   _dbconfig('dbname'),
-                   _dbconfig('user'),
-                   _dbconfig('password')))
+    port = internal_port_from_config("Websocket")
     logf = open(_logfilename('ws'), 'a')
-    ws = subprocess.Popen(
-        ["ws", "-config", conf],
+    ws = subprocess.Popen(["ws"],
         stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
     with open(_pidfile('ws'), 'w') as f:
         f.write(str(ws.pid))
     _wait_until_port_listens(port)
-    _setport('ws', port)
     return port
 
 def stop_ws():
@@ -590,11 +535,11 @@ def stop_keep(num_servers=2):
 
 def run_keep_proxy():
     if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
-        os.environ["ARVADOS_KEEP_SERVICES"] = "http://localhost:{}".format(_getport('keepproxy'))
+        os.environ["ARVADOS_KEEP_SERVICES"] = "http://localhost:{}".format(internal_port_from_config('Keepproxy'))
         return
     stop_keep_proxy()
 
-    port = find_available_port()
+    port = internal_port_from_config("Keepproxy")
     env = os.environ.copy()
     env['ARVADOS_API_TOKEN'] = auth_token('anonymous')
     logf = open(_logfilename('keepproxy'), 'a')
@@ -604,6 +549,7 @@ def run_keep_proxy():
          '-listen=:{}'.format(port)],
         env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
 
+    print("Using API %s token %s" % (os.environ['ARVADOS_API_HOST'], auth_token('admin')), file=sys.stdout)
     api = arvados.api(
         version='v1',
         host=os.environ['ARVADOS_API_HOST'],
@@ -619,7 +565,6 @@ def run_keep_proxy():
         'service_ssl_flag': False,
     }}).execute()
     os.environ["ARVADOS_KEEP_SERVICES"] = "http://localhost:{}".format(port)
-    _setport('keepproxy', port)
     _wait_until_port_listens(port)
 
 def stop_keep_proxy():
@@ -633,7 +578,7 @@ def run_arv_git_httpd():
     stop_arv_git_httpd()
 
     gitdir = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'git')
-    gitport = find_available_port()
+    gitport = internal_port_from_config("GitHTTP")
     env = os.environ.copy()
     env.pop('ARVADOS_API_TOKEN', None)
     logf = open(_logfilename('arv-git-httpd'), 'a')
@@ -645,7 +590,6 @@ def run_arv_git_httpd():
         env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
     with open(_pidfile('arv-git-httpd'), 'w') as f:
         f.write(str(agh.pid))
-    _setport('arv-git-httpd', gitport)
     _wait_until_port_listens(gitport)
 
 def stop_arv_git_httpd():
@@ -658,7 +602,7 @@ def run_keep_web():
         return
     stop_keep_web()
 
-    keepwebport = find_available_port()
+    keepwebport = internal_port_from_config("WebDAV")
     env = os.environ.copy()
     env['ARVADOS_API_TOKEN'] = auth_token('anonymous')
     logf = open(_logfilename('keep-web'), 'a')
@@ -671,7 +615,6 @@ def run_keep_web():
         env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
     with open(_pidfile('keep-web'), 'w') as f:
         f.write(str(keepweb.pid))
-    _setport('keep-web', keepwebport)
     _wait_until_port_listens(keepwebport)
 
 def stop_keep_web():
@@ -684,17 +627,17 @@ def run_nginx():
         return
     stop_nginx()
     nginxconf = {}
-    nginxconf['CONTROLLERPORT'] = _getport('controller')
-    nginxconf['CONTROLLERSSLPORT'] = find_available_port()
-    nginxconf['KEEPWEBPORT'] = _getport('keep-web')
-    nginxconf['KEEPWEBDLSSLPORT'] = find_available_port()
-    nginxconf['KEEPWEBSSLPORT'] = find_available_port()
-    nginxconf['KEEPPROXYPORT'] = _getport('keepproxy')
-    nginxconf['KEEPPROXYSSLPORT'] = find_available_port()
-    nginxconf['GITPORT'] = _getport('arv-git-httpd')
-    nginxconf['GITSSLPORT'] = find_available_port()
-    nginxconf['WSPORT'] = _getport('ws')
-    nginxconf['WSSPORT'] = _getport('wss')
+    nginxconf['CONTROLLERPORT'] = internal_port_from_config("Controller")
+    nginxconf['CONTROLLERSSLPORT'] = external_port_from_config("Controller")
+    nginxconf['KEEPWEBPORT'] = internal_port_from_config("WebDAV")
+    nginxconf['KEEPWEBDLSSLPORT'] = external_port_from_config("WebDAVDownload")
+    nginxconf['KEEPWEBSSLPORT'] = external_port_from_config("WebDAV")
+    nginxconf['KEEPPROXYPORT'] = internal_port_from_config("Keepproxy")
+    nginxconf['KEEPPROXYSSLPORT'] = external_port_from_config("Keepproxy")
+    nginxconf['GITPORT'] = internal_port_from_config("GitHTTP")
+    nginxconf['GITSSLPORT'] = external_port_from_config("GitHTTP")
+    nginxconf['WSPORT'] = internal_port_from_config("Websocket")
+    nginxconf['WSSPORT'] = external_port_from_config("Websocket")
     nginxconf['SSLCERT'] = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'self-signed.pem')
     nginxconf['SSLKEY'] = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'self-signed.key')
     nginxconf['ACCESSLOG'] = _logfilename('nginx_access')
@@ -718,11 +661,102 @@ def run_nginx():
          '-g', 'pid '+_pidfile('nginx')+';',
          '-c', conffile],
         env=env, stdin=open('/dev/null'), stdout=sys.stderr)
-    _setport('controller-ssl', nginxconf['CONTROLLERSSLPORT'])
-    _setport('keep-web-dl-ssl', nginxconf['KEEPWEBDLSSLPORT'])
-    _setport('keep-web-ssl', nginxconf['KEEPWEBSSLPORT'])
-    _setport('keepproxy-ssl', nginxconf['KEEPPROXYSSLPORT'])
-    _setport('arv-git-httpd-ssl', nginxconf['GITSSLPORT'])
+
+def setup_config():
+    rails_api_port = find_available_port()
+    controller_port = find_available_port()
+    controller_external_port = find_available_port()
+    websocket_port = find_available_port()
+    websocket_external_port = find_available_port()
+    git_httpd_port = find_available_port()
+    git_httpd_external_port = find_available_port()
+    keepproxy_port = find_available_port()
+    keepproxy_external_port = find_available_port()
+    keep_web_port = find_available_port()
+    keep_web_external_port = find_available_port()
+    keep_web_dl_port = find_available_port()
+    keep_web_dl_external_port = find_available_port()
+
+    dbconf = os.path.join(os.environ["CONFIGSRC"], "config.yml")
+
+    print("Getting config from %s" % dbconf, file=sys.stderr)
+
+    pgconnection = yaml.safe_load(open(dbconf))["Clusters"]["zzzzz"]["PostgreSQL"]["Connection"]
+
+    localhost = "127.0.0.1"
+    services = {
+        "RailsAPI": {
+            "InternalURLs": {
+                "https://%s:%s"%(localhost, rails_api_port): {}
+            }
+        },
+        "Controller": {
+            "ExternalURL": "https://%s:%s" % (localhost, controller_external_port),
+            "InternalURLs": {
+                "http://%s:%s"%(localhost, controller_port): {}
+            }
+        },
+        "Websocket": {
+            "ExternalURL": "wss://%s:%s/websocket" % (localhost, websocket_external_port),
+            "InternalURLs": {
+                "http://%s:%s"%(localhost, websocket_port): {}
+            }
+        },
+        "GitHTTP": {
+            "ExternalURL": "https://%s:%s" % (localhost, git_httpd_external_port),
+            "InternalURLs": {
+                "http://%s:%s"%(localhost, git_httpd_port): {}
+            }
+        },
+        "Keepproxy": {
+            "ExternalURL": "https://%s:%s" % (localhost, keepproxy_external_port),
+            "InternalURLs": {
+                "http://%s:%s"%(localhost, keepproxy_port): {}
+            }
+        },
+        "WebDAV": {
+            "ExternalURL": "https://%s:%s" % (localhost, keep_web_external_port),
+            "InternalURLs": {
+                "http://%s:%s"%(localhost, keep_web_port): {}
+            }
+        },
+        "WebDAVDownload": {
+            "ExternalURL": "https://%s:%s" % (localhost, keep_web_dl_external_port),
+            "InternalURLs": {
+                "http://%s:%s"%(localhost, keep_web_dl_port): {}
+            }
+        }
+    }
+
+    config = {
+        "Clusters": {
+            "zzzzz": {
+                "EnableBetaController14287": ('14287' in os.environ.get('ARVADOS_EXPERIMENTAL', '')),
+                "ManagementToken": "e687950a23c3a9bceec28c6223a06c79",
+                "API": {
+                    "RequestTimeout": "30s"
+                },
+                "SystemLogs": {
+                    "LogLevel": ('info' if os.environ.get('ARVADOS_DEBUG', '') in ['','0'] else 'debug')
+                },
+                "PostgreSQL": {
+                    "Connection": pgconnection,
+                },
+                "TLS": {
+                    "Insecure": True
+                },
+                "Services": services
+            }
+        }
+    }
+
+    conf = os.path.join(TEST_TMPDIR, 'arvados.yml')
+    with open(conf, 'w') as f:
+        yaml.safe_dump(config, f)
+
+    ex = "export ARVADOS_CONFIG="+conf
+    print(ex)
+
 
 def stop_nginx():
     if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
@@ -732,50 +766,6 @@ def stop_nginx():
 def _pidfile(program):
     return os.path.join(TEST_TMPDIR, program + '.pid')
 
-def _portfile(program):
-    return os.path.join(TEST_TMPDIR, program + '.port')
-
-def _setport(program, port):
-    with open(_portfile(program), 'w') as f:
-        f.write(str(port))
-
-# Returns 9 if program is not up.
-def _getport(program):
-    try:
-        with open(_portfile(program)) as prog:
-            return int(prog.read())
-    except IOError:
-        return 9
-
-def _dbconfig(key):
-    global _cached_db_config
-    if not _cached_db_config:
-        if "ARVADOS_CONFIG" in os.environ:
-            _cached_db_config = list(yaml.safe_load(open(os.environ["ARVADOS_CONFIG"]))["Clusters"].values())[0]["PostgreSQL"]["Connection"]
-        else:
-            _cached_db_config = yaml.safe_load(open(os.path.join(
-                SERVICES_SRC_DIR, 'api', 'config', 'database.yml')))["test"]
-            _cached_db_config["dbname"] = _cached_db_config["database"]
-            _cached_db_config["user"] = _cached_db_config["username"]
-    return _cached_db_config[key]
-
-def _apiconfig(key):
-    global _cached_config
-    if _cached_config:
-        return _cached_config[key]
-    def _load(f, required=True):
-        fullpath = os.path.join(SERVICES_SRC_DIR, 'api', 'config', f)
-        if not required and not os.path.exists(fullpath):
-            return {}
-        return yaml.safe_load(fullpath)
-    cdefault = _load('application.default.yml')
-    csite = _load('application.yml', required=False)
-    _cached_config = {}
-    for section in [cdefault.get('common',{}), cdefault.get('test',{}),
-                    csite.get('common',{}), csite.get('test',{})]:
-        _cached_config.update(section)
-    return _cached_config[key]
-
 def fixture(fix):
     '''load a fixture yaml file'''
     with open(os.path.join(SERVICES_SRC_DIR, 'api', "test", "fixtures",
@@ -868,7 +858,7 @@ if __name__ == "__main__":
         'start_keep_proxy', 'stop_keep_proxy',
         'start_keep-web', 'stop_keep-web',
         'start_arv-git-httpd', 'stop_arv-git-httpd',
-        'start_nginx', 'stop_nginx',
+        'start_nginx', 'stop_nginx', 'setup_config',
     ]
     parser = argparse.ArgumentParser()
     parser.add_argument('action', type=str, help="one of {}".format(actions))
@@ -922,8 +912,10 @@ if __name__ == "__main__":
         stop_keep_web()
     elif args.action == 'start_nginx':
         run_nginx()
-        print("export ARVADOS_API_HOST=0.0.0.0:{}".format(_getport('controller-ssl')))
+        print("export ARVADOS_API_HOST=0.0.0.0:{}".format(external_port_from_config('Controller')))
     elif args.action == 'stop_nginx':
         stop_nginx()
+    elif args.action == 'setup_config':
+        setup_config()
     else:
         raise Exception("action recognized but not implemented!?")
index b23515dda4528b1e7f44433a8fcfa006d7ee3486..369043e780863a4f84c4c9667fda9bdff2597afb 100644 (file)
@@ -433,7 +433,7 @@ class ApplicationController < ActionController::Base
   end
 
   def disable_api_methods
-    if Rails.configuration.API.DisabledAPIs.include?(controller_name + "." + action_name)
+    if Rails.configuration.API.DisabledAPIs[controller_name + "." + action_name]
       send_error("Disabled", status: 404)
     end
   end
index a8ef4b91b4b018f863c0a156e784e05622995893..81b9ca9e5bec3d74c5b524cb351752845017b1e7 100644 (file)
@@ -152,7 +152,7 @@ class Arvados::V1::CollectionsController < ApplicationController
 
       if direction == :search_up
         # Search upstream for jobs where this locator is the output of some job
-        if !Rails.configuration.API.DisabledAPIs.include?("jobs.list")
+        if !Rails.configuration.API.DisabledAPIs["jobs.list"]
           Job.readable_by(*@read_users).where(output: loc.to_s).each do |job|
             search_edges(visited, job.uuid, :search_up)
           end
@@ -176,7 +176,7 @@ class Arvados::V1::CollectionsController < ApplicationController
         end
 
         # Search downstream for jobs where this locator is in script_parameters
-        if !Rails.configuration.API.DisabledAPIs.include?("jobs.list")
+        if !Rails.configuration.API.DisabledAPIs["jobs.list"]
           Job.readable_by(*@read_users).where(["jobs.script_parameters like ?", "%#{loc.to_s}%"]).each do |job|
             search_edges(visited, job.uuid, :search_down)
           end
@@ -245,7 +245,7 @@ class Arvados::V1::CollectionsController < ApplicationController
           if direction == :search_up
             visited[c.uuid] = c.as_api_response
 
-            if !Rails.configuration.API.DisabledAPIs.include?("jobs.list")
+            if !Rails.configuration.API.DisabledAPIs["jobs.list"]
               Job.readable_by(*@read_users).where(output: c.portable_data_hash).each do |job|
                 search_edges(visited, job.uuid, :search_up)
               end
index d502d5a698e647c7806a6be0ff5497aa6b0f43a8..46d3a75a3a24407ac8ecb1541f2e646b89daf946 100644 (file)
@@ -204,7 +204,7 @@ class Arvados::V1::GroupsController < ApplicationController
     table_names = Hash[klasses.collect { |k| [k, k.table_name] }]
 
     disabled_methods = Rails.configuration.API.DisabledAPIs
-    avail_klasses = table_names.select{|k, t| !disabled_methods.include?(t+'.index')}
+    avail_klasses = table_names.select{|k, t| !disabled_methods[t+'.index']}
     klasses = avail_klasses.keys
 
     request_filters.each do |col, op, val|
index 313fe5d0a086241ba9ca9cef95c8e29db60ad843..90d3db685ef6e7320dabb7206bad9bcf4ad7015b 100644 (file)
@@ -401,7 +401,7 @@ class Arvados::V1::SchemaController < ApplicationController
           end
         end
       end
-      Rails.configuration.API.DisabledAPIs.each do |method|
+      Rails.configuration.API.DisabledAPIs.each do |method, _|
         ctrl, action = method.split('.', 2)
         discovery[:resources][ctrl][:methods].delete(action.to_sym)
       end
index 45e329030f6bfd54e7aea559d011029ab932bdb1..2c39a3924e7a65b3c3b105abff3e01224f25ac5a 100644 (file)
@@ -10,7 +10,7 @@ class AdminNotifier < ActionMailer::Base
   def new_user(user)
     @user = user
     if not Rails.configuration.Users.NewUserNotificationRecipients.empty? then
-      @recipients = Rails.configuration.Users.NewUserNotificationRecipients
+      @recipients = Rails.configuration.Users.NewUserNotificationRecipients.keys
       logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)"
 
       add_to_subject = ''
@@ -27,7 +27,7 @@ class AdminNotifier < ActionMailer::Base
   def new_inactive_user(user)
     @user = user
     if not Rails.configuration.Users.NewInactiveUserNotificationRecipients.empty? then
-      @recipients = Rails.configuration.Users.NewInactiveUserNotificationRecipients
+      @recipients = Rails.configuration.Users.NewInactiveUserNotificationRecipients.keys
       logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)"
       mail(to: @recipients,
            subject: "#{Rails.configuration.Users.EmailSubjectPrefix}New inactive user notification"
index 8c8ad8e254467cadbb5874c3f7519a574ccfe21c..f93312693307c39f1c39a85694cfe51011ecdc09 100644 (file)
@@ -422,7 +422,7 @@ class ArvadosModel < ApplicationRecord
   end
 
   def logged_attributes
-    attributes.except(*Rails.configuration.AuditLogs.UnloggedAttributes)
+    attributes.except(*Rails.configuration.AuditLogs.UnloggedAttributes.keys)
   end
 
   def self.full_text_searchable_columns
index ac00e5d39c2cce78c1dd26cfca3fba285810b80e..292cf34ccb4da99e6a8fcca06cfe69dd2c637d69 100644 (file)
@@ -521,7 +521,7 @@ class Collection < ArvadosModel
       joins("JOIN collections ON links.head_uuid = collections.uuid").
       order("links.created_at DESC")
 
-    docker_image_formats = Rails.configuration.Containers.SupportedDockerImageFormats
+    docker_image_formats = Rails.configuration.Containers.SupportedDockerImageFormats.keys.map(&:to_s)
 
     if (docker_image_formats.include? 'v1' and
         docker_image_formats.include? 'v2') or filter_compatible_format == false
index 044d83c287969cbcc61f7cec7724b8be630037de..d200bb80110869ade17386d3ebbac9cf9b8de979 100644 (file)
@@ -39,7 +39,7 @@ class Node < ArvadosModel
   api_accessible :superuser, :extend => :user do |t|
     t.add :first_ping_at
     t.add :info
-    t.add lambda { |x| Rails.configuration.Containers.SLURM.Managed.ComputeNodeNameservers }, :as => :nameservers
+    t.add lambda { |x| Rails.configuration.Containers.SLURM.Managed.ComputeNodeNameservers.keys }, :as => :nameservers
   end
 
   after_initialize do
index 5e0e39f9bedb090d7b566b3ddf9389c8f10f95dd..e6a0795402b36415cc6bc4019a6b760fd4396435 100644 (file)
@@ -115,6 +115,9 @@ class Repository < ArvadosModel
     else
       base = URI(default_base_fmt % prefix)
     end
+    if base.path == ""
+      base.path = "/"
+    end
     if base.scheme == "ssh"
       '%s@%s:%s.git' % [base.user, base.host, name]
     else
index fc5ae0a49db5b2fb05036a0587759d92378358cf..ee44812e0075aaa1eb0b71a66a8c60ec73907930 100644 (file)
@@ -381,7 +381,7 @@ class User < ArvadosModel
     quoted_name = self.class.connection.quote_string(basename)
     next_username = basename
     next_suffix = 1
-    while Rails.configuration.Users.AutoSetupUsernameBlacklist.include?(next_username)
+    while Rails.configuration.Users.AutoSetupUsernameBlacklist[next_username]
       next_suffix += 1
       next_username = "%s%i" % [basename, next_suffix]
     end
index 3ad3cac2b27e3f750c8607856d3092d72ff6e669..4e1936b7716c65561d7592debbe9a3d5c5cfb51e 100644 (file)
@@ -86,6 +86,5 @@ test:
   workbench_address: https://localhost:3001/
   git_repositories_dir: <%= Rails.root.join 'tmp', 'git', 'test' %>
   git_internal_dir: <%= Rails.root.join 'tmp', 'internal.git' %>
-  websocket_address: "wss://0.0.0.0:<%= ENV['ARVADOS_TEST_WSS_PORT'] %>/websocket"
   trash_sweep_interval: -1
   docker_image_formats: ["v1"]
index cf4b842c4a93cef5900c66aa67cbfd259bb031db..39d50cbbb33c62acf03c7340cc9afb05b4229a6c 100644 (file)
@@ -73,12 +73,20 @@ end
 # Now make a copy
 $arvados_config = $arvados_config_global.deep_dup
 
+def arrayToHash cfg, k, v
+  val = {}
+  v.each do |entry|
+    val[entry.to_s] = {}
+  end
+  ConfigLoader.set_cfg cfg, k, val
+end
+
 # Declare all our configuration items.
 arvcfg = ConfigLoader.new
 arvcfg.declare_config "ClusterID", NonemptyString, :uuid_prefix
 arvcfg.declare_config "ManagementToken", String, :ManagementToken
 arvcfg.declare_config "Git.Repositories", String, :git_repositories_dir
-arvcfg.declare_config "API.DisabledAPIs", Array, :disable_api_methods
+arvcfg.declare_config "API.DisabledAPIs", Hash, :disable_api_methods, ->(cfg, k, v) { arrayToHash cfg, "API.DisabledAPIs", v }
 arvcfg.declare_config "API.MaxRequestSize", Integer, :max_request_size
 arvcfg.declare_config "API.MaxIndexDatabaseRead", Integer, :max_index_database_read
 arvcfg.declare_config "API.MaxItemsPerResponse", Integer, :max_items_per_response
@@ -87,7 +95,7 @@ arvcfg.declare_config "API.RailsSessionSecretToken", NonemptyString, :secret_tok
 arvcfg.declare_config "Users.AutoSetupNewUsers", Boolean, :auto_setup_new_users
 arvcfg.declare_config "Users.AutoSetupNewUsersWithVmUUID", String, :auto_setup_new_users_with_vm_uuid
 arvcfg.declare_config "Users.AutoSetupNewUsersWithRepository", Boolean, :auto_setup_new_users_with_repository
-arvcfg.declare_config "Users.AutoSetupUsernameBlacklist", Array, :auto_setup_name_blacklist
+arvcfg.declare_config "Users.AutoSetupUsernameBlacklist", Hash, :auto_setup_name_blacklist, ->(cfg, k, v) { arrayToHash cfg, "Users.AutoSetupUsernameBlacklist", v }
 arvcfg.declare_config "Users.NewUsersAreActive", Boolean, :new_users_are_active
 arvcfg.declare_config "Users.AutoAdminUserWithEmail", String, :auto_admin_user
 arvcfg.declare_config "Users.AutoAdminFirstUser", Boolean, :auto_admin_first_user
@@ -95,15 +103,15 @@ arvcfg.declare_config "Users.UserProfileNotificationAddress", String, :user_prof
 arvcfg.declare_config "Users.AdminNotifierEmailFrom", String, :admin_notifier_email_from
 arvcfg.declare_config "Users.EmailSubjectPrefix", String, :email_subject_prefix
 arvcfg.declare_config "Users.UserNotifierEmailFrom", String, :user_notifier_email_from
-arvcfg.declare_config "Users.NewUserNotificationRecipients", Array, :new_user_notification_recipients
-arvcfg.declare_config "Users.NewInactiveUserNotificationRecipients", Array, :new_inactive_user_notification_recipients
+arvcfg.declare_config "Users.NewUserNotificationRecipients", Hash, :new_user_notification_recipients, ->(cfg, k, v) { arrayToHash cfg, "Users.NewUserNotificationRecipients", v }
+arvcfg.declare_config "Users.NewInactiveUserNotificationRecipients", Hash, :new_inactive_user_notification_recipients, method(:arrayToHash)
 arvcfg.declare_config "Login.ProviderAppSecret", NonemptyString, :sso_app_secret
 arvcfg.declare_config "Login.ProviderAppID", NonemptyString, :sso_app_id
 arvcfg.declare_config "TLS.Insecure", Boolean, :sso_insecure
 arvcfg.declare_config "Services.SSO.ExternalURL", NonemptyString, :sso_provider_url
 arvcfg.declare_config "AuditLogs.MaxAge", ActiveSupport::Duration, :max_audit_log_age
 arvcfg.declare_config "AuditLogs.MaxDeleteBatch", Integer, :max_audit_log_delete_batch
-arvcfg.declare_config "AuditLogs.UnloggedAttributes", Array, :unlogged_attributes
+arvcfg.declare_config "AuditLogs.UnloggedAttributes", Hash, :unlogged_attributes, ->(cfg, k, v) { arrayToHash cfg, "AuditLogs.UnloggedAttributes", v }
 arvcfg.declare_config "SystemLogs.MaxRequestLogParamsSize", Integer, :max_request_log_params_size
 arvcfg.declare_config "Collections.DefaultReplication", Integer, :default_collection_replication
 arvcfg.declare_config "Collections.DefaultTrashLifetime", ActiveSupport::Duration, :default_trash_lifetime
@@ -113,7 +121,7 @@ arvcfg.declare_config "Collections.TrashSweepInterval", ActiveSupport::Duration,
 arvcfg.declare_config "Collections.BlobSigningKey", NonemptyString, :blob_signing_key
 arvcfg.declare_config "Collections.BlobSigningTTL", ActiveSupport::Duration, :blob_signature_ttl
 arvcfg.declare_config "Collections.BlobSigning", Boolean, :permit_create_collection_with_unsigned_manifest, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, "Collections.BlobSigning", !v }
-arvcfg.declare_config "Containers.SupportedDockerImageFormats", Array, :docker_image_formats
+arvcfg.declare_config "Containers.SupportedDockerImageFormats", Hash, :docker_image_formats, ->(cfg, k, v) { arrayToHash cfg, "Containers.SupportedDockerImageFormats", v }
 arvcfg.declare_config "Containers.LogReuseDecisions", Boolean, :log_reuse_decisions
 arvcfg.declare_config "Containers.DefaultKeepCacheRAM", Integer, :container_default_keep_cache_ram
 arvcfg.declare_config "Containers.MaxDispatchAttempts", Integer, :max_container_dispatch_attempts
@@ -135,7 +143,7 @@ arvcfg.declare_config "Containers.SLURM.Managed.DNSServerConfTemplate", Pathname
 arvcfg.declare_config "Containers.SLURM.Managed.DNSServerReloadCommand", String, :dns_server_reload_command
 arvcfg.declare_config "Containers.SLURM.Managed.DNSServerUpdateCommand", String, :dns_server_update_command
 arvcfg.declare_config "Containers.SLURM.Managed.ComputeNodeDomain", String, :compute_node_domain
-arvcfg.declare_config "Containers.SLURM.Managed.ComputeNodeNameservers", Array, :compute_node_nameservers
+arvcfg.declare_config "Containers.SLURM.Managed.ComputeNodeNameservers", Hash, :compute_node_nameservers, ->(cfg, k, v) { arrayToHash cfg, "Containers.SLURM.Managed.ComputeNodeNameservers", v }
 arvcfg.declare_config "Containers.SLURM.Managed.AssignNodeHostname", String, :assign_node_hostname
 arvcfg.declare_config "Containers.JobsAPI.Enable", String, :enable_legacy_jobs_api, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, "Containers.JobsAPI.Enable", v.to_s }
 arvcfg.declare_config "Containers.JobsAPI.CrunchJobWrapper", String, :crunch_job_wrapper
index c909ae92276480d38f6a0b5ada592c4efb2ea5a7..a4fdc5a1e785119d943a0db21d5f69f11a0ae701 100644 (file)
@@ -2,38 +2,37 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-Disable_jobs_api_method_list = ["jobs.create",
-                                               "pipeline_instances.create",
-                                               "pipeline_templates.create",
-                                               "jobs.get",
-                                               "pipeline_instances.get",
-                                               "pipeline_templates.get",
-                                               "jobs.list",
-                                               "pipeline_instances.list",
-                                               "pipeline_templates.list",
-                                               "jobs.index",
-                                               "pipeline_instances.index",
-                                               "pipeline_templates.index",
-                                               "jobs.update",
-                                               "pipeline_instances.update",
-                                               "pipeline_templates.update",
-                                               "jobs.queue",
-                                               "jobs.queue_size",
-                                               "job_tasks.create",
-                                               "job_tasks.get",
-                                               "job_tasks.list",
-                                               "job_tasks.index",
-                                               "job_tasks.update",
-                                               "jobs.show",
-                                               "pipeline_instances.show",
-                                               "pipeline_templates.show",
-                                               "jobs.show",
-                                               "job_tasks.show"]
+Disable_jobs_api_method_list = {"jobs.create"=>{},
+                                "pipeline_instances.create"=>{},
+                                "pipeline_templates.create"=>{},
+                                "jobs.get"=>{},
+                                "pipeline_instances.get"=>{},
+                                "pipeline_templates.get"=>{},
+                                "jobs.list"=>{},
+                                "pipeline_instances.list"=>{},
+                                "pipeline_templates.list"=>{},
+                                "jobs.index"=>{},
+                                "pipeline_instances.index"=>{},
+                                "pipeline_templates.index"=>{},
+                                "jobs.update"=>{},
+                                "pipeline_instances.update"=>{},
+                                "pipeline_templates.update"=>{},
+                                "jobs.queue"=>{},
+                                "jobs.queue_size"=>{},
+                                "job_tasks.create"=>{},
+                                "job_tasks.get"=>{},
+                                "job_tasks.list"=>{},
+                                "job_tasks.index"=>{},
+                                "job_tasks.update"=>{},
+                                "jobs.show"=>{},
+                                "pipeline_instances.show"=>{},
+                                "pipeline_templates.show"=>{},
+                                "job_tasks.show"=>{}}
 
 def check_enable_legacy_jobs_api
   if Rails.configuration.Containers.JobsAPI.Enable == "false" ||
      (Rails.configuration.Containers.JobsAPI.Enable == "auto" &&
       Job.count == 0)
-    Rails.configuration.API.DisabledAPIs += Disable_jobs_api_method_list
+    Rails.configuration.API.DisabledAPIs.merge! Disable_jobs_api_method_list
   end
 end
index 4618305b3239dece9c30f223ac8850b47369335f..30ab89c7e2aa4527960e518cdf63a95bbaef4550 100644 (file)
@@ -431,7 +431,7 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
   end
 
   test 'get contents with jobs and pipeline instances disabled' do
-    Rails.configuration.API.DisabledAPIs = ['jobs.index', 'pipeline_instances.index']
+    Rails.configuration.API.DisabledAPIs = {'jobs.index'=>{}, 'pipeline_instances.index'=>{}}
 
     authorize_with :active
     get :contents, params: {
index 551eefa8787baf10e3507a8d6768484de06fb8df..76fdb0426d35e11dd7ae1f63795d2a7dce3d6909 100644 (file)
@@ -13,7 +13,11 @@ class Arvados::V1::HealthcheckControllerTest < ActionController::TestCase
     [true, 'Bearer configuredmanagementtoken', 200, '{"health":"OK"}'],
   ].each do |enabled, header, error_code, error_msg|
     test "ping when #{if enabled then 'enabled' else 'disabled' end} with header '#{header}'" do
-      Rails.configuration.ManagementToken = 'configuredmanagementtoken' if enabled
+      if enabled
+        Rails.configuration.ManagementToken = 'configuredmanagementtoken'
+      else
+        Rails.configuration.ManagementToken = ""
+      end
 
       @request.headers['Authorization'] = header
       get :ping
index b3e10bf4a4fc38eff96d181ecc77715ad0512a6f..3803a0dc45d8928d16e7174fdcb55f4e618dfb04 100644 (file)
@@ -480,8 +480,8 @@ class Arvados::V1::JobsControllerTest < ActionController::TestCase
   end
 
   test 'jobs.create disabled in config' do
-    Rails.configuration.API.DisabledAPIs = ["jobs.create",
-                                               "pipeline_instances.create"]
+    Rails.configuration.API.DisabledAPIs = {"jobs.create"=>{},
+                                               "pipeline_instances.create"=>{}}
     authorize_with :active
     post :create, params: {
       job: {
index 53421a4cbcf249871b09389fc618c1790f22a776..3dd343b13cd29ac567f8244b9399c534651fbceb 100644 (file)
@@ -66,7 +66,7 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase
 
   test "non-empty disable_api_methods" do
     Rails.configuration.API.DisabledAPIs =
-      ['jobs.create', 'pipeline_instances.create', 'pipeline_templates.create']
+      {'jobs.create'=>{}, 'pipeline_instances.create'=>{}, 'pipeline_templates.create'=>{}}
     get :index
     assert_response :success
     discovery_doc = JSON.parse(@response.body)
index 69e277cc2c000ae627c4b1193c629ac8cf99e7dc..2637e77937b1cb8b5954abed52d66c382fe4f24a 100644 (file)
@@ -515,7 +515,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
   test "Container.resolve_container_image(pdh)" do
     set_user_from_auth :active
     [[:docker_image, 'v1'], [:docker_image_1_12, 'v2']].each do |coll, ver|
-      Rails.configuration.Containers.SupportedDockerImageFormats = [ver]
+      Rails.configuration.Containers.SupportedDockerImageFormats = {ver=>{}}
       pdh = collections(coll).portable_data_hash
       resolved = Container.resolve_container_image(pdh)
       assert_equal resolved, pdh
@@ -541,7 +541,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
   end
 
   test "migrated docker image" do
-    Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
+    Rails.configuration.Containers.SupportedDockerImageFormats = {'v2'=>{}}
     add_docker19_migration_link
 
     # Test that it returns only v2 images even though request is for v1 image.
@@ -559,7 +559,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
   end
 
   test "use unmigrated docker image" do
-    Rails.configuration.Containers.SupportedDockerImageFormats = ['v1']
+    Rails.configuration.Containers.SupportedDockerImageFormats = {'v1'=>{}}
     add_docker19_migration_link
 
     # Test that it returns only supported v1 images even though there is a
@@ -578,7 +578,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
   end
 
   test "incompatible docker image v1" do
-    Rails.configuration.Containers.SupportedDockerImageFormats = ['v1']
+    Rails.configuration.Containers.SupportedDockerImageFormats = {'v1'=>{}}
     add_docker19_migration_link
 
     # Don't return unsupported v2 image even if we ask for it directly.
@@ -591,7 +591,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
   end
 
   test "incompatible docker image v2" do
-    Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
+    Rails.configuration.Containers.SupportedDockerImageFormats = {'v2'=>{}}
     # No migration link, don't return unsupported v1 image,
 
     set_user_from_auth :active
index f47a1c10f9b0d1b9d9cf6031c3d5b87446a9a29b..764aac3e4734dc94d6cdbe66b03f39305df51d1c 100644 (file)
@@ -426,7 +426,7 @@ class JobTest < ActiveSupport::TestCase
   end
 
   test "use migrated docker image if requesting old-format image by tag" do
-    Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
+    Rails.configuration.Containers.SupportedDockerImageFormats = {'v2'=>{}}
     add_docker19_migration_link
     job = Job.create!(
       job_attrs(
@@ -438,7 +438,7 @@ class JobTest < ActiveSupport::TestCase
   end
 
   test "use migrated docker image if requesting old-format image by pdh" do
-    Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
+    Rails.configuration.Containers.SupportedDockerImageFormats = {'v2'=>{}}
     add_docker19_migration_link
     job = Job.create!(
       job_attrs(
@@ -455,7 +455,7 @@ class JobTest < ActiveSupport::TestCase
    [:docker_image_1_12, :docker_image_1_12, :docker_image_1_12],
   ].each do |existing_image, request_image, expect_image|
     test "if a #{existing_image} job exists, #{request_image} yields #{expect_image} after migration" do
-      Rails.configuration.Containers.SupportedDockerImageFormats = ['v1']
+      Rails.configuration.Containers.SupportedDockerImageFormats = {'v1'=>{}}
 
       if existing_image == :docker_image
         oldjob = Job.create!(
@@ -477,7 +477,7 @@ class JobTest < ActiveSupport::TestCase
         end
       end
 
-      Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
+      Rails.configuration.Containers.SupportedDockerImageFormats = {'v2'=>{}}
       add_docker19_migration_link
 
       # Check that both v1 and v2 images get resolved to v2.
@@ -650,7 +650,7 @@ class JobTest < ActiveSupport::TestCase
   test 'enable legacy api configuration option = true' do
     Rails.configuration.Containers.JobsAPI.Enable = "true"
     check_enable_legacy_jobs_api
-    assert_equal [], Rails.configuration.API.DisabledAPIs
+    assert_equal({}, Rails.configuration.API.DisabledAPIs)
   end
 
   test 'enable legacy api configuration option = false' do
@@ -663,7 +663,7 @@ class JobTest < ActiveSupport::TestCase
     Rails.configuration.Containers.JobsAPI.Enable = "auto"
     assert Job.count > 0
     check_enable_legacy_jobs_api
-    assert_equal [], Rails.configuration.API.DisabledAPIs
+    assert_equal({}, Rails.configuration.API.DisabledAPIs)
   end
 
   test 'enable legacy api configuration option = auto, no jobs' do
@@ -672,7 +672,7 @@ class JobTest < ActiveSupport::TestCase
       Job.destroy_all
     end
     assert_equal 0, Job.count
-    assert_equal [], Rails.configuration.API.DisabledAPIs
+    assert_equal({}, Rails.configuration.API.DisabledAPIs)
     check_enable_legacy_jobs_api
     assert_equal Disable_jobs_api_method_list, Rails.configuration.API.DisabledAPIs
   end
index 8a878ada91a9b9f0afe6da60f60036eaca3450e6..a1c8ff8a921d214d2ea27608708c0b3d19caa8f9 100644 (file)
@@ -282,7 +282,7 @@ class LogTest < ActiveSupport::TestCase
   end
 
   test "non-empty configuration.unlogged_attributes" do
-    Rails.configuration.AuditLogs.UnloggedAttributes = ["manifest_text"]
+    Rails.configuration.AuditLogs.UnloggedAttributes = {"manifest_text"=>{}}
     txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
 
     act_as_system_user do
@@ -297,7 +297,7 @@ class LogTest < ActiveSupport::TestCase
   end
 
   test "empty configuration.unlogged_attributes" do
-    Rails.configuration.AuditLogs.UnloggedAttributes = []
+    Rails.configuration.AuditLogs.UnloggedAttributes = {}
     txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
 
     act_as_system_user do
index 185653e873811d8b79e18de40c0f589b5763557a..6d2157b144d689b54534b6cd71a7de37e30b3791 100644 (file)
@@ -110,7 +110,7 @@ class UserTest < ActiveSupport::TestCase
   end
 
   test "new username set avoiding blacklist" do
-    Rails.configuration.Users.AutoSetupUsernameBlacklist = ["root"]
+    Rails.configuration.Users.AutoSetupUsernameBlacklist = {"root"=>{}}
     check_new_username_setting("root", "root2")
   end
 
@@ -341,45 +341,45 @@ class UserTest < ActiveSupport::TestCase
   test "create new user with notifications" do
     set_user_from_auth :admin
 
-    create_user_and_verify_setup_and_notifications true, 'active-notify-address@example.com', 'inactive-notify-address@example.com', nil, nil
-    create_user_and_verify_setup_and_notifications true, 'active-notify-address@example.com', [], nil, nil
-    create_user_and_verify_setup_and_notifications true, [], [], nil, nil
-    create_user_and_verify_setup_and_notifications false, 'active-notify-address@example.com', 'inactive-notify-address@example.com', nil, nil
-    create_user_and_verify_setup_and_notifications false, [], 'inactive-notify-address@example.com', nil, nil
-    create_user_and_verify_setup_and_notifications false, [], [], nil, nil
+    create_user_and_verify_setup_and_notifications true, {'active-notify-address@example.com'=>{}}, {'inactive-notify-address@example.com'=>{}}, nil, nil
+    create_user_and_verify_setup_and_notifications true, {'active-notify-address@example.com'=>{}}, {}, nil, nil
+    create_user_and_verify_setup_and_notifications true, {}, [], nil, nil
+    create_user_and_verify_setup_and_notifications false, {'active-notify-address@example.com'=>{}}, {'inactive-notify-address@example.com'=>{}}, nil, nil
+    create_user_and_verify_setup_and_notifications false, {}, {'inactive-notify-address@example.com'=>{}}, nil, nil
+    create_user_and_verify_setup_and_notifications false, {}, {}, nil, nil
   end
 
   [
     # Easy inactive user tests.
-    [false, [], [], "inactive-none@example.com", false, false, "inactivenone"],
-    [false, [], [], "inactive-vm@example.com", true, false, "inactivevm"],
-    [false, [], [], "inactive-repo@example.com", false, true, "inactiverepo"],
-    [false, [], [], "inactive-both@example.com", true, true, "inactiveboth"],
+    [false, {}, {}, "inactive-none@example.com", false, false, "inactivenone"],
+    [false, {}, {}, "inactive-vm@example.com", true, false, "inactivevm"],
+    [false, {}, {}, "inactive-repo@example.com", false, true, "inactiverepo"],
+    [false, {}, {}, "inactive-both@example.com", true, true, "inactiveboth"],
 
     # Easy active user tests.
-    [true, "active-notify@example.com", "inactive-notify@example.com", "active-none@example.com", false, false, "activenone"],
-    [true, "active-notify@example.com", "inactive-notify@example.com", "active-vm@example.com", true, false, "activevm"],
-    [true, "active-notify@example.com", "inactive-notify@example.com", "active-repo@example.com", false, true, "activerepo"],
-    [true, "active-notify@example.com", "inactive-notify@example.com", "active-both@example.com", true, true, "activeboth"],
+    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "active-none@example.com", false, false, "activenone"],
+    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "active-vm@example.com", true, false, "activevm"],
+    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "active-repo@example.com", false, true, "activerepo"],
+    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "active-both@example.com", true, true, "activeboth"],
 
     # Test users with malformed e-mail addresses.
-    [false, [], [], nil, true, true, nil],
-    [false, [], [], "arvados", true, true, nil],
-    [false, [], [], "@example.com", true, true, nil],
-    [true, "active-notify@example.com", "inactive-notify@example.com", "*!*@example.com", true, false, nil],
-    [true, "active-notify@example.com", "inactive-notify@example.com", "*!*@example.com", false, false, nil],
+    [false, {}, {}, nil, true, true, nil],
+    [false, {}, {}, "arvados", true, true, nil],
+    [false, {}, {}, "@example.com", true, true, nil],
+    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "*!*@example.com", true, false, nil],
+    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "*!*@example.com", false, false, nil],
 
     # Test users with various username transformations.
-    [false, [], [], "arvados@example.com", false, false, "arvados2"],
-    [true, "active-notify@example.com", "inactive-notify@example.com", "arvados@example.com", false, false, "arvados2"],
-    [true, "active-notify@example.com", "inactive-notify@example.com", "root@example.com", true, false, "root2"],
-    [false, "active-notify@example.com", "inactive-notify@example.com", "root@example.com", true, false, "root2"],
-    [true, "active-notify@example.com", "inactive-notify@example.com", "roo_t@example.com", false, true, "root2"],
-    [false, [], [], "^^incorrect_format@example.com", true, true, "incorrectformat"],
-    [true, "active-notify@example.com", "inactive-notify@example.com", "&4a_d9.@example.com", true, true, "ad9"],
-    [true, "active-notify@example.com", "inactive-notify@example.com", "&4a_d9.@example.com", false, false, "ad9"],
-    [false, "active-notify@example.com", "inactive-notify@example.com", "&4a_d9.@example.com", true, true, "ad9"],
-    [false, "active-notify@example.com", "inactive-notify@example.com", "&4a_d9.@example.com", false, false, "ad9"],
+    [false, {}, {}, "arvados@example.com", false, false, "arvados2"],
+    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "arvados@example.com", false, false, "arvados2"],
+    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "root@example.com", true, false, "root2"],
+    [false, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "root@example.com", true, false, "root2"],
+    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "roo_t@example.com", false, true, "root2"],
+    [false, {}, {}, "^^incorrect_format@example.com", true, true, "incorrectformat"],
+    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "&4a_d9.@example.com", true, true, "ad9"],
+    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "&4a_d9.@example.com", false, false, "ad9"],
+    [false, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "&4a_d9.@example.com", true, true, "ad9"],
+    [false, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "&4a_d9.@example.com", false, false, "ad9"],
   ].each do |active, new_user_recipients, inactive_recipients, email, auto_setup_vm, auto_setup_repo, expect_username|
     test "create new user with auto setup #{active} #{email} #{auto_setup_vm} #{auto_setup_repo}" do
       set_user_from_auth :admin
@@ -686,7 +686,7 @@ class UserTest < ActiveSupport::TestCase
     if not new_user_recipients.empty? then
       assert_not_nil new_user_email, 'Expected new user email after setup'
       assert_equal Rails.configuration.Users.UserNotifierEmailFrom, new_user_email.from[0]
-      assert_equal new_user_recipients, new_user_email.to[0]
+      assert_equal new_user_recipients.keys.first, new_user_email.to[0]
       assert_equal new_user_email_subject, new_user_email.subject
     else
       assert_nil new_user_email, 'Did not expect new user email after setup'
@@ -696,7 +696,7 @@ class UserTest < ActiveSupport::TestCase
       if not inactive_recipients.empty? then
         assert_not_nil new_inactive_user_email, 'Expected new inactive user email after setup'
         assert_equal Rails.configuration.Users.UserNotifierEmailFrom, new_inactive_user_email.from[0]
-        assert_equal inactive_recipients, new_inactive_user_email.to[0]
+        assert_equal inactive_recipients.keys.first, new_inactive_user_email.to[0]
         assert_equal "#{Rails.configuration.Users.EmailSubjectPrefix}New inactive user notification", new_inactive_user_email.subject
       else
         assert_nil new_inactive_user_email, 'Did not expect new inactive user email after setup'
index 889e41095e8c6fb859dfbb45219657381433478c..9f69c44460caf7e03733acf619817f31e3a72327 100644 (file)
@@ -18,12 +18,13 @@ import (
        "strings"
        "time"
 
+       "git.curoverse.com/arvados.git/lib/config"
        "git.curoverse.com/arvados.git/lib/dispatchcloud"
        "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
-       "git.curoverse.com/arvados.git/sdk/go/config"
        "git.curoverse.com/arvados.git/sdk/go/dispatch"
        "github.com/coreos/go-systemd/daemon"
+       "github.com/ghodss/yaml"
        "github.com/sirupsen/logrus"
 )
 
@@ -35,8 +36,7 @@ type logger interface {
 const initialNiceValue int64 = 10000
 
 var (
-       version           = "dev"
-       defaultConfigPath = "/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml"
+       version = "dev"
 )
 
 type Dispatcher struct {
@@ -47,26 +47,6 @@ type Dispatcher struct {
        slurm   Slurm
 
        Client arvados.Client
-
-       SbatchArguments []string
-       PollPeriod      arvados.Duration
-       PrioritySpread  int64
-
-       // crunch-run command to invoke. The container UUID will be
-       // appended. If nil, []string{"crunch-run"} will be used.
-       //
-       // Example: []string{"crunch-run", "--cgroup-parent-subsystem=memory"}
-       CrunchRunCommand []string
-
-       // Extra RAM to reserve (in Bytes) for SLURM job, in addition
-       // to the amount specified in the container's RuntimeConstraints
-       ReserveExtraRAM int64
-
-       // Minimum time between two attempts to run the same container
-       MinRetryPeriod arvados.Duration
-
-       // Batch size for container queries
-       BatchSize int64
 }
 
 func main() {
@@ -94,13 +74,15 @@ func (disp *Dispatcher) Run(prog string, args []string) error {
 
 // configure() loads config files. Tests skip this.
 func (disp *Dispatcher) configure(prog string, args []string) error {
+       if disp.logger == nil {
+               disp.logger = logrus.StandardLogger()
+       }
        flags := flag.NewFlagSet(prog, flag.ExitOnError)
        flags.Usage = func() { usage(flags) }
 
-       configPath := flags.String(
-               "config",
-               defaultConfigPath,
-               "`path` to JSON or YAML configuration file")
+       loader := config.NewLoader(nil, disp.logger)
+       loader.SetupFlags(flags)
+
        dumpConfig := flag.Bool(
                "dump-config",
                false,
@@ -109,8 +91,15 @@ func (disp *Dispatcher) configure(prog string, args []string) error {
                "version",
                false,
                "Print version information and exit.")
+
+       args = loader.MungeLegacyConfigArgs(logrus.StandardLogger(), args, "-legacy-crunch-dispatch-slurm-config")
+
        // Parse args; omit the first arg which is the command name
-       flags.Parse(args)
+       err := flags.Parse(args)
+
+       if err == flag.ErrHelp {
+               return nil
+       }
 
        // Print version information if requested
        if *getVersion {
@@ -120,18 +109,18 @@ func (disp *Dispatcher) configure(prog string, args []string) error {
 
        disp.logger.Printf("crunch-dispatch-slurm %s started", version)
 
-       err := disp.readConfig(*configPath)
+       cfg, err := loader.Load()
        if err != nil {
                return err
        }
 
-       if disp.CrunchRunCommand == nil {
-               disp.CrunchRunCommand = []string{"crunch-run"}
+       if disp.cluster, err = cfg.GetCluster(""); err != nil {
+               return fmt.Errorf("config error: %s", err)
        }
 
-       if disp.PollPeriod == 0 {
-               disp.PollPeriod = arvados.Duration(10 * time.Second)
-       }
+       disp.Client.APIHost = disp.cluster.Services.Controller.ExternalURL.Host
+       disp.Client.AuthToken = disp.cluster.SystemRootToken
+       disp.Client.Insecure = disp.cluster.TLS.Insecure
 
        if disp.Client.APIHost != "" || disp.Client.AuthToken != "" {
                // Copy real configs into env vars so [a]
@@ -150,16 +139,14 @@ func (disp *Dispatcher) configure(prog string, args []string) error {
        }
 
        if *dumpConfig {
-               return config.DumpAndExit(disp)
-       }
-
-       siteConfig, err := arvados.GetConfig(arvados.DefaultConfigFile)
-       if os.IsNotExist(err) {
-               disp.logger.Warnf("no cluster config (%s), proceeding with no node types defined", err)
-       } else if err != nil {
-               return fmt.Errorf("error loading config: %s", err)
-       } else if disp.cluster, err = siteConfig.GetCluster(""); err != nil {
-               return fmt.Errorf("config error: %s", err)
+               out, err := yaml.Marshal(cfg)
+               if err != nil {
+                       return err
+               }
+               _, err = os.Stdout.Write(out)
+               if err != nil {
+                       return err
+               }
        }
 
        return nil
@@ -167,9 +154,6 @@ func (disp *Dispatcher) configure(prog string, args []string) error {
 
 // setup() initializes private fields after configure().
 func (disp *Dispatcher) setup() {
-       if disp.logger == nil {
-               disp.logger = logrus.StandardLogger()
-       }
        arv, err := arvadosclient.MakeArvadosClient()
        if err != nil {
                disp.logger.Fatalf("Error making Arvados client: %v", err)
@@ -179,17 +163,17 @@ func (disp *Dispatcher) setup() {
        disp.slurm = NewSlurmCLI()
        disp.sqCheck = &SqueueChecker{
                Logger:         disp.logger,
-               Period:         time.Duration(disp.PollPeriod),
-               PrioritySpread: disp.PrioritySpread,
+               Period:         time.Duration(disp.cluster.Containers.CloudVMs.PollInterval),
+               PrioritySpread: disp.cluster.Containers.SLURM.PrioritySpread,
                Slurm:          disp.slurm,
        }
        disp.Dispatcher = &dispatch.Dispatcher{
                Arv:            arv,
                Logger:         disp.logger,
-               BatchSize:      disp.BatchSize,
+               BatchSize:      disp.cluster.API.MaxItemsPerResponse,
                RunContainer:   disp.runContainer,
-               PollPeriod:     time.Duration(disp.PollPeriod),
-               MinRetryPeriod: time.Duration(disp.MinRetryPeriod),
+               PollPeriod:     time.Duration(disp.cluster.Containers.CloudVMs.PollInterval),
+               MinRetryPeriod: time.Duration(disp.cluster.Containers.MinRetryPeriod),
        }
 }
 
@@ -227,7 +211,9 @@ func (disp *Dispatcher) checkSqueueForOrphans() {
 }
 
 func (disp *Dispatcher) slurmConstraintArgs(container arvados.Container) []string {
-       mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+container.RuntimeConstraints.KeepCacheRAM+disp.ReserveExtraRAM) / float64(1048576)))
+       mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+
+               container.RuntimeConstraints.KeepCacheRAM+
+               int64(disp.cluster.Containers.ReserveExtraRAM)) / float64(1048576)))
 
        disk := dispatchcloud.EstimateScratchSpace(&container)
        disk = int64(math.Ceil(float64(disk) / float64(1048576)))
@@ -240,7 +226,7 @@ func (disp *Dispatcher) slurmConstraintArgs(container arvados.Container) []strin
 
 func (disp *Dispatcher) sbatchArgs(container arvados.Container) ([]string, error) {
        var args []string
-       args = append(args, disp.SbatchArguments...)
+       args = append(args, disp.cluster.Containers.SLURM.SbatchArgumentsList...)
        args = append(args, "--job-name="+container.UUID, fmt.Sprintf("--nice=%d", initialNiceValue), "--no-requeue")
 
        if disp.cluster == nil {
@@ -288,7 +274,9 @@ func (disp *Dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Contain
 
        if ctr.State == dispatch.Locked && !disp.sqCheck.HasUUID(ctr.UUID) {
                log.Printf("Submitting container %s to slurm", ctr.UUID)
-               if err := disp.submit(ctr, disp.CrunchRunCommand); err != nil {
+               cmd := []string{disp.cluster.Containers.CrunchRunCommand}
+               cmd = append(cmd, disp.cluster.Containers.CrunchRunArgumentsList...)
+               if err := disp.submit(ctr, cmd); err != nil {
                        var text string
                        if err, ok := err.(dispatchcloud.ConstraintsNotSatisfiableError); ok {
                                var logBuf bytes.Buffer
@@ -379,12 +367,3 @@ func (disp *Dispatcher) scancel(ctr arvados.Container) {
                time.Sleep(time.Second)
        }
 }
-
-func (disp *Dispatcher) readConfig(path string) error {
-       err := config.LoadFile(disp, path)
-       if err != nil && os.IsNotExist(err) && path == defaultConfigPath {
-               log.Printf("Config not specified. Continue with default configuration.")
-               err = nil
-       }
-       return err
-}
index eea102012befe3c09dbb22a21c5b2a5ad532af4e..6007c6d4a80c5e3d151ed96485fdfabf56c92d1b 100644 (file)
@@ -11,6 +11,7 @@ import (
        "fmt"
        "io"
        "io/ioutil"
+       "log"
        "net/http"
        "net/http/httptest"
        "os"
@@ -45,6 +46,7 @@ func (s *IntegrationSuite) SetUpTest(c *C) {
        arvadostest.StartAPI()
        os.Setenv("ARVADOS_API_TOKEN", arvadostest.Dispatch1Token)
        s.disp = Dispatcher{}
+       s.disp.cluster = &arvados.Cluster{}
        s.disp.setup()
        s.slurm = slurmFake{}
 }
@@ -118,7 +120,7 @@ func (s *IntegrationSuite) integrationTest(c *C,
        c.Check(err, IsNil)
        c.Assert(len(containers.Items), Equals, 1)
 
-       s.disp.CrunchRunCommand = []string{"echo"}
+       s.disp.cluster.Containers.CrunchRunCommand = "echo"
 
        ctx, cancel := context.WithCancel(context.Background())
        doneRun := make(chan struct{})
@@ -243,6 +245,7 @@ type StubbedSuite struct {
 
 func (s *StubbedSuite) SetUpTest(c *C) {
        s.disp = Dispatcher{}
+       s.disp.cluster = &arvados.Cluster{}
        s.disp.setup()
 }
 
@@ -272,7 +275,7 @@ func (s *StubbedSuite) testWithServerStub(c *C, apiStubResponses map[string]arva
        logrus.SetOutput(io.MultiWriter(buf, os.Stderr))
        defer logrus.SetOutput(os.Stderr)
 
-       s.disp.CrunchRunCommand = []string{crunchCmd}
+       s.disp.cluster.Containers.CrunchRunCommand = "crunchCmd"
 
        ctx, cancel := context.WithCancel(context.Background())
        dispatcher := dispatch.Dispatcher{
@@ -302,51 +305,6 @@ func (s *StubbedSuite) testWithServerStub(c *C, apiStubResponses map[string]arva
        c.Check(buf.String(), Matches, `(?ms).*`+expected+`.*`)
 }
 
-func (s *StubbedSuite) TestNoSuchConfigFile(c *C) {
-       err := s.disp.readConfig("/nosuchdir89j7879/8hjwr7ojgyy7")
-       c.Assert(err, NotNil)
-}
-
-func (s *StubbedSuite) TestBadSbatchArgsConfig(c *C) {
-       tmpfile, err := ioutil.TempFile(os.TempDir(), "config")
-       c.Check(err, IsNil)
-       defer os.Remove(tmpfile.Name())
-
-       _, err = tmpfile.Write([]byte(`{"SbatchArguments": "oops this is not a string array"}`))
-       c.Check(err, IsNil)
-
-       err = s.disp.readConfig(tmpfile.Name())
-       c.Assert(err, NotNil)
-}
-
-func (s *StubbedSuite) TestNoSuchArgInConfigIgnored(c *C) {
-       tmpfile, err := ioutil.TempFile(os.TempDir(), "config")
-       c.Check(err, IsNil)
-       defer os.Remove(tmpfile.Name())
-
-       _, err = tmpfile.Write([]byte(`{"NoSuchArg": "Nobody loves me, not one tiny hunk."}`))
-       c.Check(err, IsNil)
-
-       err = s.disp.readConfig(tmpfile.Name())
-       c.Assert(err, IsNil)
-       c.Check(0, Equals, len(s.disp.SbatchArguments))
-}
-
-func (s *StubbedSuite) TestReadConfig(c *C) {
-       tmpfile, err := ioutil.TempFile(os.TempDir(), "config")
-       c.Check(err, IsNil)
-       defer os.Remove(tmpfile.Name())
-
-       args := []string{"--arg1=v1", "--arg2", "--arg3=v3"}
-       argsS := `{"SbatchArguments": ["--arg1=v1",  "--arg2", "--arg3=v3"]}`
-       _, err = tmpfile.Write([]byte(argsS))
-       c.Check(err, IsNil)
-
-       err = s.disp.readConfig(tmpfile.Name())
-       c.Assert(err, IsNil)
-       c.Check(args, DeepEquals, s.disp.SbatchArguments)
-}
-
 func (s *StubbedSuite) TestSbatchArgs(c *C) {
        container := arvados.Container{
                UUID:               "123",
@@ -360,7 +318,7 @@ func (s *StubbedSuite) TestSbatchArgs(c *C) {
                {"--arg1=v1", "--arg2"},
        } {
                c.Logf("%#v", defaults)
-               s.disp.SbatchArguments = defaults
+               s.disp.cluster.Containers.SLURM.SbatchArgumentsList = defaults
 
                args, err := s.disp.sbatchArgs(container)
                c.Check(args, DeepEquals, append(defaults, "--job-name=123", "--nice=10000", "--no-requeue", "--mem=239", "--cpus-per-task=2", "--tmp=0"))
@@ -432,3 +390,45 @@ func (s *StubbedSuite) TestSbatchPartition(c *C) {
        })
        c.Check(err, IsNil)
 }
+
+func (s *StubbedSuite) TestLoadLegacyConfig(c *C) {
+       content := []byte(`
+Client:
+  APIHost: example.com
+  AuthToken: abcdefg
+SbatchArguments: ["--foo", "bar"]
+PollPeriod: 12s
+PrioritySpread: 42
+CrunchRunCommand: ["x-crunch-run", "--cgroup-parent-subsystem=memory"]
+ReserveExtraRAM: 12345
+MinRetryPeriod: 13s
+BatchSize: 99
+`)
+       tmpfile, err := ioutil.TempFile("", "example")
+       if err != nil {
+               log.Fatal(err)
+       }
+
+       defer os.Remove(tmpfile.Name()) // clean up
+
+       if _, err := tmpfile.Write(content); err != nil {
+               log.Fatal(err)
+       }
+       if err := tmpfile.Close(); err != nil {
+               log.Fatal(err)
+
+       }
+       err = s.disp.configure("crunch-dispatch-slurm", []string{"-config", tmpfile.Name()})
+       c.Check(err, IsNil)
+
+       c.Check(s.disp.cluster.Services.Controller.ExternalURL, Equals, arvados.URL{Scheme: "https", Host: "example.com"})
+       c.Check(s.disp.cluster.SystemRootToken, Equals, "abcdefg")
+       c.Check(s.disp.cluster.Containers.SLURM.SbatchArgumentsList, DeepEquals, []string{"--foo", "bar"})
+       c.Check(s.disp.cluster.Containers.CloudVMs.PollInterval, Equals, arvados.Duration(12*time.Second))
+       c.Check(s.disp.cluster.Containers.SLURM.PrioritySpread, Equals, int64(42))
+       c.Check(s.disp.cluster.Containers.CrunchRunCommand, Equals, "x-crunch-run")
+       c.Check(s.disp.cluster.Containers.CrunchRunArgumentsList, DeepEquals, []string{"--cgroup-parent-subsystem=memory"})
+       c.Check(s.disp.cluster.Containers.ReserveExtraRAM, Equals, arvados.ByteSize(12345))
+       c.Check(s.disp.cluster.Containers.MinRetryPeriod, Equals, arvados.Duration(13*time.Second))
+       c.Check(s.disp.cluster.API.MaxItemsPerResponse, Equals, 99)
+}
index 4dc95bc3dfae529069595cc996ef23ab1b83cb09..5796ad7a278cfbb79df34210d3c81880e979a67b 100644 (file)
@@ -6,9 +6,11 @@ package main
 
 import (
        "io/ioutil"
+       "net/url"
        "os"
        "path/filepath"
 
+       "git.curoverse.com/arvados.git/lib/config"
        "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        check "gopkg.in/check.v1"
@@ -203,7 +205,11 @@ func (s *GitMountSuite) checkTmpdirContents(c *check.C, expect []string) {
 func (*GitMountSuite) useTestGitServer(c *check.C) {
        git_client.InstallProtocol("https", git_http.NewClient(arvados.InsecureHTTPClient))
 
-       port, err := ioutil.ReadFile("../../tmp/arv-git-httpd-ssl.port")
+       loader := config.NewLoader(nil, nil)
+       cfg, err := loader.Load()
        c.Assert(err, check.IsNil)
-       discoveryMap["gitUrl"] = "https://localhost:" + string(port)
+       cluster, err := cfg.GetCluster("")
+       c.Assert(err, check.IsNil)
+
+       discoveryMap["gitUrl"] = (*url.URL)(&cluster.Services.GitHTTP.ExternalURL).String()
 }
index eff59d2f72104407615302109bd6967e3a9b3fff..f0283b6114a05fcf7c29eb72b2307671fc0b499c 100644 (file)
@@ -1,7 +1,7 @@
 PATH
   remote: .
   specs:
-    arvados-login-sync (1.4.0.20190701162225)
+    arvados-login-sync (1.4.0.20190709140013)
       arvados (~> 1.3.0, >= 1.3.0)
 
 GEM
diff --git a/services/ws/config.go b/services/ws/config.go
deleted file mode 100644 (file)
index ead1ec2..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package main
-
-import (
-       "time"
-
-       "git.curoverse.com/arvados.git/sdk/go/arvados"
-)
-
-type wsConfig struct {
-       Client       arvados.Client
-       Postgres     arvados.PostgreSQLConnection
-       PostgresPool int
-       Listen       string
-       LogLevel     string
-       LogFormat    string
-
-       PingTimeout      arvados.Duration
-       ClientEventQueue int
-       ServerEventQueue int
-
-       ManagementToken string
-}
-
-func defaultConfig() wsConfig {
-       return wsConfig{
-               Client: arvados.Client{
-                       APIHost: "localhost:443",
-               },
-               Postgres: arvados.PostgreSQLConnection{
-                       "dbname":                    "arvados_production",
-                       "user":                      "arvados",
-                       "password":                  "xyzzy",
-                       "host":                      "localhost",
-                       "connect_timeout":           "30",
-                       "sslmode":                   "require",
-                       "fallback_application_name": "arvados-ws",
-               },
-               PostgresPool:     64,
-               LogLevel:         "info",
-               LogFormat:        "json",
-               PingTimeout:      arvados.Duration(time.Minute),
-               ClientEventQueue: 64,
-               ServerEventQueue: 4,
-       }
-}
index a0006a4f8a8e0e70e7488f6ce4dee4ac4359984c..de8e6328dbaadb0ba5568a229f6e62193ecd28cb 100644 (file)
@@ -7,47 +7,71 @@ package main
 import (
        "flag"
        "fmt"
+       "os"
 
-       "git.curoverse.com/arvados.git/sdk/go/config"
+       "git.curoverse.com/arvados.git/lib/config"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+       "github.com/ghodss/yaml"
+       "github.com/sirupsen/logrus"
 )
 
 var logger = ctxlog.FromContext
 var version = "dev"
 
-func main() {
-       log := logger(nil)
+func configure(log logrus.FieldLogger, args []string) *arvados.Cluster {
+       flags := flag.NewFlagSet(args[0], flag.ExitOnError)
+       dumpConfig := flags.Bool("dump-config", false, "show current configuration and exit")
+       getVersion := flags.Bool("version", false, "Print version information and exit.")
+
+       loader := config.NewLoader(nil, log)
+       loader.SetupFlags(flags)
+       args = loader.MungeLegacyConfigArgs(log, args[1:], "-legacy-ws-config")
 
-       configPath := flag.String("config", "/etc/arvados/ws/ws.yml", "`path` to config file")
-       dumpConfig := flag.Bool("dump-config", false, "show current configuration and exit")
-       getVersion := flag.Bool("version", false, "Print version information and exit.")
-       cfg := defaultConfig()
-       flag.Parse()
+       flags.Parse(args)
 
        // Print version information if requested
        if *getVersion {
                fmt.Printf("arvados-ws %s\n", version)
-               return
+               return nil
        }
 
-       err := config.LoadFile(&cfg, *configPath)
+       cfg, err := loader.Load()
        if err != nil {
                log.Fatal(err)
        }
 
-       ctxlog.SetLevel(cfg.LogLevel)
-       ctxlog.SetFormat(cfg.LogFormat)
+       cluster, err := cfg.GetCluster("")
+       if err != nil {
+               log.Fatal(err)
+       }
+
+       ctxlog.SetLevel(cluster.SystemLogs.LogLevel)
+       ctxlog.SetFormat(cluster.SystemLogs.Format)
 
        if *dumpConfig {
-               txt, err := config.Dump(&cfg)
+               out, err := yaml.Marshal(cfg)
                if err != nil {
                        log.Fatal(err)
                }
-               fmt.Print(string(txt))
+               _, err = os.Stdout.Write(out)
+               if err != nil {
+                       log.Fatal(err)
+               }
+               return nil
+       }
+       return cluster
+}
+
+func main() {
+       log := logger(nil)
+
+       cluster := configure(log, os.Args)
+       if cluster == nil {
                return
        }
 
        log.Printf("arvados-ws %s started", version)
-       srv := &server{wsConfig: &cfg}
+       srv := &server{cluster: cluster}
        log.Fatal(srv.Run())
 }
index a408b58bddf31b5483f799c779823cdfcd98902d..14dc63ec37b12d5524885f476a6912643847ce1c 100644 (file)
@@ -13,6 +13,7 @@ import (
        "sync/atomic"
        "time"
 
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/ctxlog"
        "git.curoverse.com/arvados.git/sdk/go/health"
        "github.com/sirupsen/logrus"
@@ -27,7 +28,8 @@ type wsConn interface {
 }
 
 type router struct {
-       Config         *wsConfig
+       client         arvados.Client
+       cluster        *arvados.Cluster
        eventSource    eventSource
        newPermChecker func() permChecker
 
@@ -52,8 +54,8 @@ type debugStatuser interface {
 
 func (rtr *router) setup() {
        rtr.handler = &handler{
-               PingTimeout: rtr.Config.PingTimeout.Duration(),
-               QueueSize:   rtr.Config.ClientEventQueue,
+               PingTimeout: time.Duration(rtr.cluster.API.SendTimeout),
+               QueueSize:   rtr.cluster.API.WebsocketClientEventQueue,
        }
        rtr.mux = http.NewServeMux()
        rtr.mux.Handle("/websocket", rtr.makeServer(newSessionV0))
@@ -62,7 +64,7 @@ func (rtr *router) setup() {
        rtr.mux.Handle("/status.json", rtr.jsonHandler(rtr.Status))
 
        rtr.mux.Handle("/_health/", &health.Handler{
-               Token:  rtr.Config.ManagementToken,
+               Token:  rtr.cluster.ManagementToken,
                Prefix: "/_health/",
                Routes: health.Routes{
                        "db": rtr.eventSource.DBHealth,
@@ -87,7 +89,7 @@ func (rtr *router) makeServer(newSession sessionFactory) *websocket.Server {
 
                        stats := rtr.handler.Handle(ws, rtr.eventSource,
                                func(ws wsConn, sendq chan<- interface{}) (session, error) {
-                                       return newSession(ws, sendq, rtr.eventSource.DB(), rtr.newPermChecker(), &rtr.Config.Client)
+                                       return newSession(ws, sendq, rtr.eventSource.DB(), rtr.newPermChecker(), &rtr.client)
                                })
 
                        log.WithFields(logrus.Fields{
index eda7ff2a486a0f9ae59ddc12bf696e3e7a8059c5..081ff53b300b38113259ea3d853821579491a9b5 100644 (file)
@@ -10,13 +10,14 @@ import (
        "sync"
        "time"
 
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
        "github.com/coreos/go-systemd/daemon"
 )
 
 type server struct {
        httpServer  *http.Server
        listener    net.Listener
-       wsConfig    *wsConfig
+       cluster     *arvados.Cluster
        eventSource *pgEventSource
        setupOnce   sync.Once
 }
@@ -40,27 +41,38 @@ func (srv *server) Run() error {
 func (srv *server) setup() {
        log := logger(nil)
 
-       ln, err := net.Listen("tcp", srv.wsConfig.Listen)
+       var listen arvados.URL
+       for listen, _ = range srv.cluster.Services.Websocket.InternalURLs {
+               break
+       }
+       ln, err := net.Listen("tcp", listen.Host)
        if err != nil {
-               log.WithField("Listen", srv.wsConfig.Listen).Fatal(err)
+               log.WithField("Listen", listen).Fatal(err)
        }
        log.WithField("Listen", ln.Addr().String()).Info("listening")
 
+       client := arvados.Client{}
+       client.APIHost = srv.cluster.Services.Controller.ExternalURL.Host
+       client.AuthToken = srv.cluster.SystemRootToken
+       client.Insecure = srv.cluster.TLS.Insecure
+
        srv.listener = ln
        srv.eventSource = &pgEventSource{
-               DataSource:   srv.wsConfig.Postgres.String(),
-               MaxOpenConns: srv.wsConfig.PostgresPool,
-               QueueSize:    srv.wsConfig.ServerEventQueue,
+               DataSource:   srv.cluster.PostgreSQL.Connection.String(),
+               MaxOpenConns: srv.cluster.PostgreSQL.ConnectionPool,
+               QueueSize:    srv.cluster.API.WebsocketServerEventQueue,
        }
+
        srv.httpServer = &http.Server{
-               Addr:           srv.wsConfig.Listen,
+               Addr:           listen.Host,
                ReadTimeout:    time.Minute,
                WriteTimeout:   time.Minute,
                MaxHeaderBytes: 1 << 20,
                Handler: &router{
-                       Config:         srv.wsConfig,
+                       cluster:        srv.cluster,
+                       client:         client,
                        eventSource:    srv.eventSource,
-                       newPermChecker: func() permChecker { return newPermChecker(srv.wsConfig.Client) },
+                       newPermChecker: func() permChecker { return newPermChecker(client) },
                },
        }
 
index b1f943857a18f495f2777c24aa3627855aa0d9f6..7eef27258b58b1c2b9a3d81b8ece6f4b35ffa019 100644 (file)
@@ -8,9 +8,11 @@ import (
        "encoding/json"
        "io/ioutil"
        "net/http"
+       "os"
        "sync"
        "time"
 
+       "git.curoverse.com/arvados.git/lib/config"
        "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        check "gopkg.in/check.v1"
@@ -19,29 +21,42 @@ import (
 var _ = check.Suite(&serverSuite{})
 
 type serverSuite struct {
-       cfg *wsConfig
-       srv *server
-       wg  sync.WaitGroup
+       cluster *arvados.Cluster
+       srv     *server
+       wg      sync.WaitGroup
 }
 
 func (s *serverSuite) SetUpTest(c *check.C) {
-       s.cfg = s.testConfig()
-       s.srv = &server{wsConfig: s.cfg}
+       var err error
+       s.cluster, err = s.testConfig()
+       c.Assert(err, check.IsNil)
+       s.srv = &server{cluster: s.cluster}
 }
 
-func (*serverSuite) testConfig() *wsConfig {
-       cfg := defaultConfig()
-       cfg.Client = *(arvados.NewClientFromEnv())
-       cfg.Postgres = testDBConfig()
-       cfg.Listen = ":"
-       cfg.ManagementToken = arvadostest.ManagementToken
-       return &cfg
+func (*serverSuite) testConfig() (*arvados.Cluster, error) {
+       ldr := config.NewLoader(nil, nil)
+       cfg, err := ldr.Load()
+       if err != nil {
+               return nil, err
+       }
+       cluster, err := cfg.GetCluster("")
+       if err != nil {
+               return nil, err
+       }
+       client := arvados.NewClientFromEnv()
+       cluster.Services.Controller.ExternalURL.Host = client.APIHost
+       cluster.SystemRootToken = client.AuthToken
+       cluster.TLS.Insecure = client.Insecure
+       cluster.PostgreSQL.Connection = testDBConfig()
+       cluster.Services.Websocket.InternalURLs = map[arvados.URL]arvados.ServiceInstance{arvados.URL{Host: ":"}: arvados.ServiceInstance{}}
+       cluster.ManagementToken = arvadostest.ManagementToken
+       return cluster, nil
 }
 
 // TestBadDB ensures Run() returns an error (instead of panicking or
 // deadlocking) if it can't connect to the database server at startup.
 func (s *serverSuite) TestBadDB(c *check.C) {
-       s.cfg.Postgres["password"] = "1234"
+       s.cluster.PostgreSQL.Connection["password"] = "1234"
 
        var wg sync.WaitGroup
        wg.Add(1)
@@ -72,7 +87,7 @@ func (s *serverSuite) TestHealth(c *check.C) {
        go s.srv.Run()
        defer s.srv.Close()
        s.srv.WaitReady()
-       for _, token := range []string{"", "foo", s.cfg.ManagementToken} {
+       for _, token := range []string{"", "foo", s.cluster.ManagementToken} {
                req, err := http.NewRequest("GET", "http://"+s.srv.listener.Addr().String()+"/_health/ping", nil)
                c.Assert(err, check.IsNil)
                if token != "" {
@@ -80,7 +95,7 @@ func (s *serverSuite) TestHealth(c *check.C) {
                }
                resp, err := http.DefaultClient.Do(req)
                c.Check(err, check.IsNil)
-               if token == s.cfg.ManagementToken {
+               if token == s.cluster.ManagementToken {
                        c.Check(resp.StatusCode, check.Equals, http.StatusOK)
                        buf, err := ioutil.ReadAll(resp.Body)
                        c.Check(err, check.IsNil)
@@ -107,7 +122,7 @@ func (s *serverSuite) TestStatus(c *check.C) {
 }
 
 func (s *serverSuite) TestHealthDisabled(c *check.C) {
-       s.cfg.ManagementToken = ""
+       s.cluster.ManagementToken = ""
 
        go s.srv.Run()
        defer s.srv.Close()
@@ -122,3 +137,63 @@ func (s *serverSuite) TestHealthDisabled(c *check.C) {
                c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
        }
 }
+
+func (s *serverSuite) TestLoadLegacyConfig(c *check.C) {
+       content := []byte(`
+Client:
+  APIHost: example.com
+  AuthToken: abcdefg
+Postgres:
+  "dbname": "arvados_production"
+  "user": "arvados"
+  "password": "xyzzy"
+  "host": "localhost"
+  "connect_timeout": "30"
+  "sslmode": "require"
+  "fallback_application_name": "arvados-ws"
+PostgresPool: 63
+Listen: ":8765"
+LogLevel: "debug"
+LogFormat: "text"
+PingTimeout: 61s
+ClientEventQueue: 62
+ServerEventQueue:  5
+ManagementToken: qqqqq
+`)
+       tmpfile, err := ioutil.TempFile("", "example")
+       if err != nil {
+               c.Error(err)
+       }
+
+       defer os.Remove(tmpfile.Name()) // clean up
+
+       if _, err := tmpfile.Write(content); err != nil {
+               c.Error(err)
+       }
+       if err := tmpfile.Close(); err != nil {
+               c.Error(err)
+
+       }
+       cluster := configure(logger(nil), []string{"arvados-ws", "-config", tmpfile.Name()})
+       c.Check(cluster, check.NotNil)
+
+       c.Check(cluster.Services.Controller.ExternalURL, check.Equals, arvados.URL{Scheme: "https", Host: "example.com"})
+       c.Check(cluster.SystemRootToken, check.Equals, "abcdefg")
+
+       c.Check(cluster.PostgreSQL.Connection, check.DeepEquals, arvados.PostgreSQLConnection{
+               "connect_timeout":           "30",
+               "dbname":                    "arvados_production",
+               "fallback_application_name": "arvados-ws",
+               "host":                      "localhost",
+               "password":                  "xyzzy",
+               "sslmode":                   "require",
+               "user":                      "arvados"})
+       c.Check(cluster.PostgreSQL.ConnectionPool, check.Equals, 63)
+       c.Check(cluster.Services.Websocket.InternalURLs[arvados.URL{Host: ":8765"}], check.NotNil)
+       c.Check(cluster.SystemLogs.LogLevel, check.Equals, "debug")
+       c.Check(cluster.SystemLogs.Format, check.Equals, "text")
+       c.Check(cluster.API.SendTimeout, check.Equals, arvados.Duration(61*time.Second))
+       c.Check(cluster.API.WebsocketClientEventQueue, check.Equals, 62)
+       c.Check(cluster.API.WebsocketServerEventQueue, check.Equals, 5)
+       c.Check(cluster.ManagementToken, check.Equals, "qqqqq")
+}
diff --git a/tools/jenkins/submit-ci-dev.sh b/tools/jenkins/submit-ci-dev.sh
new file mode 100755 (executable)
index 0000000..7188c15
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+head=$(git log --first-parent --max-count=1 --format=%H)
+curl -X POST https://ci.curoverse.com/job/developer-run-tests/build \
+  --user $(cat ~/.jenkins.ci.curoverse.com) \
+  --data-urlencode json='{"parameter": [{"name":"git_hash", "value":"'$head'"}]}'