16636: Merge branch 'master' into 16636-more-metrics
authorWard Vandewege <ward@curii.com>
Tue, 15 Sep 2020 19:32:15 +0000 (15:32 -0400)
committerWard Vandewege <ward@curii.com>
Tue, 15 Sep 2020 19:32:30 +0000 (15:32 -0400)
Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward@curii.com>

24 files changed:
lib/config/config.default.yml
lib/config/generated_config.go
lib/crunchrun/background.go
lib/dispatchcloud/dispatcher_test.go
lib/dispatchcloud/test/stub_driver.go
lib/dispatchcloud/worker/pool.go
lib/dispatchcloud/worker/worker.go
sdk/go/arvados/config.go
sdk/python/tests/run_test_server.py
services/api/app/controllers/arvados/v1/users_controller.rb
services/api/app/models/arvados_model.rb
services/api/lib/config_loader.rb
services/api/lib/enable_jobs_api.rb
services/api/test/functional/arvados/v1/groups_controller_test.rb
services/api/test/functional/arvados/v1/schema_controller_test.rb
services/api/test/functional/arvados/v1/users_controller_test.rb
services/api/test/functional/user_sessions_controller_test.rb
services/api/test/test_helper.rb
services/api/test/unit/application_test.rb
services/api/test/unit/collection_test.rb
services/api/test/unit/container_request_test.rb
services/api/test/unit/job_test.rb
services/api/test/unit/log_test.rb
services/api/test/unit/user_test.rb

index b1865a2217ce99c48a13ae8a17e4cf10d336cbf0..15e7c7c06ce723114cafc86d8e4c6ec0c2b99dff 100644 (file)
@@ -938,6 +938,11 @@ Clusters:
         # Time before repeating SIGTERM when killing a container.
         TimeoutSignal: 5s
 
+        # Time to give up on a process (most likely arv-mount) that
+        # still holds a container lockfile after its main supervisor
+        # process has exited, and declare the instance broken.
+        TimeoutStaleRunLock: 5s
+
         # Time to give up on SIGTERM and write off the worker.
         TimeoutTERM: 2m
 
index 201ae3604537f9f44a9e788320b7262685944f98..7ed332151b8bbacaa81fa7352e8251058296b8c8 100644 (file)
@@ -944,6 +944,11 @@ Clusters:
         # Time before repeating SIGTERM when killing a container.
         TimeoutSignal: 5s
 
+        # Time to give up on a process (most likely arv-mount) that
+        # still holds a container lockfile after its main supervisor
+        # process has exited, and declare the instance broken.
+        TimeoutStaleRunLock: 5s
+
         # Time to give up on SIGTERM and write off the worker.
         TimeoutTERM: 2m
 
index bf039afa0ad53799183607fe9795b5556f615bad..8cdba72c10d3c5902225456de9389bcc70b6dbca 100644 (file)
@@ -218,6 +218,24 @@ func ListProcesses(stdout, stderr io.Writer) int {
                        return nil
                }
 
+               proc, err := os.FindProcess(pi.PID)
+               if err != nil {
+                       // FindProcess should have succeeded, even if the
+                       // process does not exist.
+                       fmt.Fprintf(stderr, "%s: find process %d: %s", path, pi.PID, err)
+                       return nil
+               }
+               err = proc.Signal(syscall.Signal(0))
+               if err != nil {
+                       // Process is dead, even though lockfile was
+                       // still locked. Most likely a stuck arv-mount
+                       // process that inherited the lock from
+                       // crunch-run. Report container UUID as
+                       // "stale".
+                       fmt.Fprintln(stdout, pi.UUID, "stale")
+                       return nil
+               }
+
                fmt.Fprintln(stdout, pi.UUID)
                return nil
        }))
index 80cb28f3508e28a810da019c913bb820ca5f3f09..9f1eb098e01aa029ebe735f2f8a7a956bf56c236 100644 (file)
@@ -66,6 +66,7 @@ func (s *DispatcherSuite) SetUpTest(c *check.C) {
                                ProbeInterval:        arvados.Duration(5 * time.Millisecond),
                                MaxProbesPerSecond:   1000,
                                TimeoutSignal:        arvados.Duration(3 * time.Millisecond),
+                               TimeoutStaleRunLock:  arvados.Duration(3 * time.Millisecond),
                                TimeoutTERM:          arvados.Duration(20 * time.Millisecond),
                                ResourceTags:         map[string]string{"testtag": "test value"},
                                TagKeyPrefix:         "test:",
@@ -169,6 +170,7 @@ func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
                        stubvm.ReportBroken = time.Now().Add(time.Duration(rand.Int63n(200)) * time.Millisecond)
                default:
                        stubvm.CrunchRunCrashRate = 0.1
+                       stubvm.ArvMountDeadlockRate = 0.1
                }
        }
        s.stubDriver.Bugf = c.Errorf
index 132bd4d695f0ef88095951b151be592029c31328..4d32cf221ce49461e092a834ad192460bc37a49d 100644 (file)
@@ -131,7 +131,7 @@ func (sis *StubInstanceSet) Create(it arvados.InstanceType, image cloud.ImageID,
                tags:         copyTags(tags),
                providerType: it.ProviderType,
                initCommand:  cmd,
-               running:      map[string]int64{},
+               running:      map[string]stubProcess{},
                killing:      map[string]bool{},
        }
        svm.SSHService = SSHService{
@@ -189,6 +189,8 @@ type StubVM struct {
        CrunchRunMissing      bool
        CrunchRunCrashRate    float64
        CrunchRunDetachDelay  time.Duration
+       ArvMountMaxExitLag    time.Duration
+       ArvMountDeadlockRate  float64
        ExecuteContainer      func(arvados.Container) int
        CrashRunningContainer func(arvados.Container)
 
@@ -198,12 +200,21 @@ type StubVM struct {
        initCommand  cloud.InitCommand
        providerType string
        SSHService   SSHService
-       running      map[string]int64
+       running      map[string]stubProcess
        killing      map[string]bool
        lastPID      int64
+       deadlocked   string
        sync.Mutex
 }
 
+type stubProcess struct {
+       pid int64
+
+       // crunch-run has exited, but arv-mount process (or something)
+       // still holds lock in /var/run/
+       exited bool
+}
+
 func (svm *StubVM) Instance() stubInstance {
        svm.Lock()
        defer svm.Unlock()
@@ -256,7 +267,7 @@ func (svm *StubVM) Exec(env map[string]string, command string, stdin io.Reader,
                svm.Lock()
                svm.lastPID++
                pid := svm.lastPID
-               svm.running[uuid] = pid
+               svm.running[uuid] = stubProcess{pid: pid}
                svm.Unlock()
                time.Sleep(svm.CrunchRunDetachDelay)
                fmt.Fprintf(stderr, "starting %s\n", uuid)
@@ -273,14 +284,13 @@ func (svm *StubVM) Exec(env map[string]string, command string, stdin io.Reader,
                                logger.Print("[test] exiting crunch-run stub")
                                svm.Lock()
                                defer svm.Unlock()
-                               if svm.running[uuid] != pid {
+                               if svm.running[uuid].pid != pid {
                                        bugf := svm.sis.driver.Bugf
                                        if bugf == nil {
                                                bugf = logger.Warnf
                                        }
-                                       bugf("[test] StubDriver bug or caller bug: pid %d exiting, running[%s]==%d", pid, uuid, svm.running[uuid])
-                               } else {
-                                       delete(svm.running, uuid)
+                                       bugf("[test] StubDriver bug or caller bug: pid %d exiting, running[%s].pid==%d", pid, uuid, svm.running[uuid].pid)
+                                       return
                                }
                                if !completed {
                                        logger.WithField("State", ctr.State).Print("[test] crashing crunch-run stub")
@@ -288,6 +298,15 @@ func (svm *StubVM) Exec(env map[string]string, command string, stdin io.Reader,
                                                svm.CrashRunningContainer(ctr)
                                        }
                                }
+                               sproc := svm.running[uuid]
+                               sproc.exited = true
+                               svm.running[uuid] = sproc
+                               svm.Unlock()
+                               time.Sleep(svm.ArvMountMaxExitLag * time.Duration(math_rand.Float64()))
+                               svm.Lock()
+                               if math_rand.Float64() >= svm.ArvMountDeadlockRate {
+                                       delete(svm.running, uuid)
+                               }
                        }()
 
                        crashluck := math_rand.Float64()
@@ -333,26 +352,31 @@ func (svm *StubVM) Exec(env map[string]string, command string, stdin io.Reader,
        if command == "crunch-run --list" {
                svm.Lock()
                defer svm.Unlock()
-               for uuid := range svm.running {
-                       fmt.Fprintf(stdout, "%s\n", uuid)
+               for uuid, sproc := range svm.running {
+                       if sproc.exited {
+                               fmt.Fprintf(stdout, "%s stale\n", uuid)
+                       } else {
+                               fmt.Fprintf(stdout, "%s\n", uuid)
+                       }
                }
                if !svm.ReportBroken.IsZero() && svm.ReportBroken.Before(time.Now()) {
                        fmt.Fprintln(stdout, "broken")
                }
+               fmt.Fprintln(stdout, svm.deadlocked)
                return 0
        }
        if strings.HasPrefix(command, "crunch-run --kill ") {
                svm.Lock()
-               _, running := svm.running[uuid]
-               if running {
+               sproc, running := svm.running[uuid]
+               if running && !sproc.exited {
                        svm.killing[uuid] = true
                        svm.Unlock()
                        time.Sleep(time.Duration(math_rand.Float64()*2) * time.Millisecond)
                        svm.Lock()
-                       _, running = svm.running[uuid]
+                       sproc, running = svm.running[uuid]
                }
                svm.Unlock()
-               if running {
+               if running && !sproc.exited {
                        fmt.Fprintf(stderr, "%s: container is running\n", uuid)
                        return 1
                }
index 4c90c4e6f42199606ea7df9ceda09e87e1360c11..c6eaeae2b618b11423512cc2be1cb5cfc454c20c 100644 (file)
@@ -64,15 +64,16 @@ type Executor interface {
 }
 
 const (
-       defaultSyncInterval       = time.Minute
-       defaultProbeInterval      = time.Second * 10
-       defaultMaxProbesPerSecond = 10
-       defaultTimeoutIdle        = time.Minute
-       defaultTimeoutBooting     = time.Minute * 10
-       defaultTimeoutProbe       = time.Minute * 10
-       defaultTimeoutShutdown    = time.Second * 10
-       defaultTimeoutTERM        = time.Minute * 2
-       defaultTimeoutSignal      = time.Second * 5
+       defaultSyncInterval        = time.Minute
+       defaultProbeInterval       = time.Second * 10
+       defaultMaxProbesPerSecond  = 10
+       defaultTimeoutIdle         = time.Minute
+       defaultTimeoutBooting      = time.Minute * 10
+       defaultTimeoutProbe        = time.Minute * 10
+       defaultTimeoutShutdown     = time.Second * 10
+       defaultTimeoutTERM         = time.Minute * 2
+       defaultTimeoutSignal       = time.Second * 5
+       defaultTimeoutStaleRunLock = time.Second * 5
 
        // Time after a quota error to try again anyway, even if no
        // instances have been shutdown.
@@ -115,6 +116,7 @@ func NewPool(logger logrus.FieldLogger, arvClient *arvados.Client, reg *promethe
                timeoutShutdown:                duration(cluster.Containers.CloudVMs.TimeoutShutdown, defaultTimeoutShutdown),
                timeoutTERM:                    duration(cluster.Containers.CloudVMs.TimeoutTERM, defaultTimeoutTERM),
                timeoutSignal:                  duration(cluster.Containers.CloudVMs.TimeoutSignal, defaultTimeoutSignal),
+               timeoutStaleRunLock:            duration(cluster.Containers.CloudVMs.TimeoutStaleRunLock, defaultTimeoutStaleRunLock),
                installPublicKey:               installPublicKey,
                tagKeyPrefix:                   cluster.Containers.CloudVMs.TagKeyPrefix,
                stop:                           make(chan bool),
@@ -152,6 +154,7 @@ type Pool struct {
        timeoutShutdown                time.Duration
        timeoutTERM                    time.Duration
        timeoutSignal                  time.Duration
+       timeoutStaleRunLock            time.Duration
        installPublicKey               ssh.PublicKey
        tagKeyPrefix                   string
 
index 95794d0b36e5d1d552be1c751091e472b562f3b0..5b145d7c6599b75bb6e8b30f6c65e65d82186d84 100644 (file)
@@ -110,6 +110,7 @@ type worker struct {
        probing             chan struct{}
        bootOutcomeReported bool
        timeToReadyReported bool
+       staleRunLockSince   time.Time
 }
 
 func (wkr *worker) onUnkillable(uuid string) {
@@ -385,13 +386,43 @@ func (wkr *worker) probeRunning() (running []string, reportsBroken, ok bool) {
                return
        }
        ok = true
+
+       staleRunLock := false
        for _, s := range strings.Split(string(stdout), "\n") {
-               if s == "broken" {
+               // Each line of the "crunch-run --list" output is one
+               // of the following:
+               //
+               // * a container UUID, indicating that processes
+               //   related to that container are currently running.
+               //   Optionally followed by " stale", indicating that
+               //   the crunch-run process itself has exited (the
+               //   remaining process is probably arv-mount).
+               //
+               // * the string "broken", indicating that the instance
+               //   appears incapable of starting containers.
+               //
+               // See ListProcesses() in lib/crunchrun/background.go.
+               if s == "" {
+                       // empty string following final newline
+               } else if s == "broken" {
                        reportsBroken = true
-               } else if s != "" {
+               } else if toks := strings.Split(s, " "); len(toks) == 1 {
                        running = append(running, s)
+               } else if toks[1] == "stale" {
+                       wkr.logger.WithField("ContainerUUID", toks[0]).Info("probe reported stale run lock")
+                       staleRunLock = true
                }
        }
+       wkr.mtx.Lock()
+       defer wkr.mtx.Unlock()
+       if !staleRunLock {
+               wkr.staleRunLockSince = time.Time{}
+       } else if wkr.staleRunLockSince.IsZero() {
+               wkr.staleRunLockSince = time.Now()
+       } else if dur := time.Now().Sub(wkr.staleRunLockSince); dur > wkr.wp.timeoutStaleRunLock {
+               wkr.logger.WithField("Duration", dur).Warn("reporting broken after reporting stale run lock for too long")
+               reportsBroken = true
+       }
        return
 }
 
index 363d09dafb5452b350077d6933ed2698689fb513..394e30a737e2aff13128df90d96decfc12428863 100644 (file)
@@ -462,6 +462,7 @@ type CloudVMsConfig struct {
        TimeoutProbe                   Duration
        TimeoutShutdown                Duration
        TimeoutSignal                  Duration
+       TimeoutStaleRunLock            Duration
        TimeoutTERM                    Duration
        ResourceTags                   map[string]string
        TagKeyPrefix                   string
index 358608d63c5718f2161973808e0233bc5f6b74a8..0cb4151ac3b040292c0af4b5c8b5eef888b48096 100644 (file)
@@ -43,6 +43,14 @@ import arvados.config
 
 ARVADOS_DIR = os.path.realpath(os.path.join(MY_DIRNAME, '../../..'))
 SERVICES_SRC_DIR = os.path.join(ARVADOS_DIR, 'services')
+
+# Work around https://bugs.python.org/issue27805, should be no longer
+# necessary from sometime in Python 3.8.x
+if not os.environ.get('ARVADOS_DEBUG', ''):
+    WRITE_MODE = 'a'
+else:
+    WRITE_MODE = 'w'
+
 if 'GOPATH' in os.environ:
     # Add all GOPATH bin dirs to PATH -- but insert them after the
     # ruby gems bin dir, to ensure "bundle" runs the Ruby bundler
@@ -327,10 +335,7 @@ def run(leave_running_atexit=False):
     env.pop('ARVADOS_API_HOST', None)
     env.pop('ARVADOS_API_HOST_INSECURE', None)
     env.pop('ARVADOS_API_TOKEN', None)
-    if not os.environ.get('ARVADOS_DEBUG', ''):
-        logf = open(_logfilename('railsapi'), 'a')
-    else:
-        logf = open(_logfilename('railsapi'), 'w')
+    logf = open(_logfilename('railsapi'), WRITE_MODE)
     railsapi = subprocess.Popen(
         ['bundle', 'exec',
          'passenger', 'start', '-p{}'.format(port),
@@ -412,7 +417,7 @@ def run_controller():
     if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
         return
     stop_controller()
-    logf = open(_logfilename('controller'), 'a')
+    logf = open(_logfilename('controller'), WRITE_MODE)
     port = internal_port_from_config("Controller")
     controller = subprocess.Popen(
         ["arvados-server", "controller"],
@@ -432,7 +437,7 @@ def run_ws():
         return
     stop_ws()
     port = internal_port_from_config("Websocket")
-    logf = open(_logfilename('ws'), 'a')
+    logf = open(_logfilename('ws'), WRITE_MODE)
     ws = subprocess.Popen(
         ["arvados-server", "ws"],
         stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
@@ -465,7 +470,7 @@ def _start_keep(n, blob_signing=False):
         yaml.safe_dump(confdata, f)
     keep_cmd = ["keepstore", "-config", conf]
 
-    with open(_logfilename('keep{}'.format(n)), 'a') as logf:
+    with open(_logfilename('keep{}'.format(n)), WRITE_MODE) as logf:
         with open('/dev/null') as _stdin:
             child = subprocess.Popen(
                 keep_cmd, stdin=_stdin, stdout=logf, stderr=logf, close_fds=True)
@@ -532,7 +537,7 @@ def run_keep_proxy():
     port = internal_port_from_config("Keepproxy")
     env = os.environ.copy()
     env['ARVADOS_API_TOKEN'] = auth_token('anonymous')
-    logf = open(_logfilename('keepproxy'), 'a')
+    logf = open(_logfilename('keepproxy'), WRITE_MODE)
     kp = subprocess.Popen(
         ['keepproxy'], env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
 
@@ -571,7 +576,7 @@ def run_arv_git_httpd():
     gitport = internal_port_from_config("GitHTTP")
     env = os.environ.copy()
     env.pop('ARVADOS_API_TOKEN', None)
-    logf = open(_logfilename('arv-git-httpd'), 'a')
+    logf = open(_logfilename('arv-git-httpd'), WRITE_MODE)
     agh = subprocess.Popen(['arv-git-httpd'],
         env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
     with open(_pidfile('arv-git-httpd'), 'w') as f:
@@ -590,7 +595,7 @@ def run_keep_web():
 
     keepwebport = internal_port_from_config("WebDAV")
     env = os.environ.copy()
-    logf = open(_logfilename('keep-web'), 'a')
+    logf = open(_logfilename('keep-web'), WRITE_MODE)
     keepweb = subprocess.Popen(
         ['keep-web'],
         env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
index 867b9a6e6abfdf0ae050a668f4340d1664608586..cd23706d08140f2176fccb063ed5b4070202536a 100644 (file)
@@ -22,7 +22,15 @@ class Arvados::V1::UsersController < ApplicationController
       rescue ActiveRecord::RecordNotUnique
         retry
       end
-      u.update_attributes!(nullify_attrs(attrs))
+      needupdate = {}
+      nullify_attrs(attrs).each do |k,v|
+        if !v.nil? && u.send(k) != v
+          needupdate[k] = v
+        end
+      end
+      if needupdate.length > 0
+        u.update_attributes!(needupdate)
+      end
       @objects << u
     end
     @offset = 0
index 6fb8ff2b33549af8e4e512a1374363f8dee8fa64..709b4b4d1c1304e456c494a4079ca2ea0cccc8fb 100644 (file)
@@ -454,7 +454,7 @@ class ArvadosModel < ApplicationRecord
   end
 
   def logged_attributes
-    attributes.except(*Rails.configuration.AuditLogs.UnloggedAttributes.keys)
+    attributes.except(*Rails.configuration.AuditLogs.UnloggedAttributes.stringify_keys.keys)
   end
 
   def self.full_text_searchable_columns
index cf16993ca51054e220713c24cca1a33510e2a423..f421fb5b2a07817905c28bbd1a463da9be936ac5 100644 (file)
@@ -147,14 +147,14 @@ class ConfigLoader
             'Ki' => 1 << 10,
             'M' => 1000000,
             'Mi' => 1 << 20,
-           "G" =>  1000000000,
-           "Gi" => 1 << 30,
-           "T" =>  1000000000000,
-           "Ti" => 1 << 40,
-           "P" =>  1000000000000000,
-           "Pi" => 1 << 50,
-           "E" =>  1000000000000000000,
-           "Ei" => 1 << 60,
+            "G" =>  1000000000,
+            "Gi" => 1 << 30,
+            "T" =>  1000000000000,
+            "Ti" => 1 << 40,
+            "P" =>  1000000000000000,
+            "Pi" => 1 << 50,
+            "E" =>  1000000000000000000,
+            "Ei" => 1 << 60,
           }[mt[2]]
         end
       end
index 1a96a81ad66708f8e45c032737a821d3a5d12ebc..cef76f08a5c93342f2ba02bf4ec699d2bf98bdd3 100644 (file)
@@ -2,16 +2,19 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-Disable_update_jobs_api_method_list = {"jobs.create"=>{},
-                                "pipeline_instances.create"=>{},
-                                "pipeline_templates.create"=>{},
-                                "jobs.update"=>{},
-                                "pipeline_instances.update"=>{},
-                                "pipeline_templates.update"=>{},
-                                "job_tasks.create"=>{},
-                                "job_tasks.update"=>{}}
+Disable_update_jobs_api_method_list = ConfigLoader.to_OrderedOptions({
+                                        "jobs.create"=>{},
+                                        "pipeline_instances.create"=>{},
+                                        "pipeline_templates.create"=>{},
+                                        "jobs.update"=>{},
+                                        "pipeline_instances.update"=>{},
+                                        "pipeline_templates.update"=>{},
+                                        "job_tasks.create"=>{},
+                                        "job_tasks.update"=>{}
+                                      })
 
-Disable_jobs_api_method_list = {"jobs.create"=>{},
+Disable_jobs_api_method_list = ConfigLoader.to_OrderedOptions({
+                                "jobs.create"=>{},
                                 "pipeline_instances.create"=>{},
                                 "pipeline_templates.create"=>{},
                                 "jobs.get"=>{},
@@ -36,7 +39,7 @@ Disable_jobs_api_method_list = {"jobs.create"=>{},
                                 "jobs.show"=>{},
                                 "pipeline_instances.show"=>{},
                                 "pipeline_templates.show"=>{},
-                                "job_tasks.show"=>{}}
+                                "job_tasks.show"=>{}})
 
 def check_enable_legacy_jobs_api
   # Create/update is permanently disabled (legacy functionality has been removed)
index ff89cd2129b31ad5357d54066262a80cd702e41c..f413188b54b2ba54236a8ea5ce2ab51c002cb434 100644 (file)
@@ -430,7 +430,8 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
   end
 
   test 'get contents with jobs and pipeline instances disabled' do
-    Rails.configuration.API.DisabledAPIs = {'jobs.index'=>{}, 'pipeline_instances.index'=>{}}
+    Rails.configuration.API.DisabledAPIs = ConfigLoader.to_OrderedOptions(
+      {'jobs.index'=>{}, 'pipeline_instances.index'=>{}})
 
     authorize_with :active
     get :contents, params: {
index 3dd343b13cd29ac567f8244b9399c534651fbceb..764f3a8d1dd395a7bb84b365144ca114bf3c92ff 100644 (file)
@@ -65,8 +65,8 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase
   end
 
   test "non-empty disable_api_methods" do
-    Rails.configuration.API.DisabledAPIs =
-      {'jobs.create'=>{}, 'pipeline_instances.create'=>{}, 'pipeline_templates.create'=>{}}
+    Rails.configuration.API.DisabledAPIs = ConfigLoader.to_OrderedOptions(
+      {'jobs.create'=>{}, 'pipeline_instances.create'=>{}, 'pipeline_templates.create'=>{}})
     get :index
     assert_response :success
     discovery_doc = JSON.parse(@response.body)
index 0ce9f1137f3fad8592e16318720c3b13d00406d3..ea5d5b1436bd256506cc1c23061dfd81eb8a763f 100644 (file)
@@ -1039,9 +1039,12 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
   test "batch update" do
     existinguuid = 'remot-tpzed-foobarbazwazqux'
     newuuid = 'remot-tpzed-newnarnazwazqux'
+    unchanginguuid = 'remot-tpzed-nochangingattrs'
     act_as_system_user do
       User.create!(uuid: existinguuid, email: 'root@existing.example.com')
+      User.create!(uuid: unchanginguuid, email: 'root@unchanging.example.com', prefs: {'foo' => {'bar' => 'baz'}})
     end
+    assert_equal(1, Log.where(object_uuid: unchanginguuid).count)
 
     authorize_with(:admin)
     patch(:batch_update,
@@ -1059,6 +1062,10 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
                 'email' => 'root@remot.example.com',
                 'username' => '',
               },
+              unchanginguuid => {
+                'email' => 'root@unchanging.example.com',
+                'prefs' => {'foo' => {'bar' => 'baz'}},
+              },
             }})
     assert_response(:success)
 
@@ -1070,6 +1077,8 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
 
     assert_equal('noot', User.find_by_uuid(newuuid).first_name)
     assert_equal('root@remot.example.com', User.find_by_uuid(newuuid).email)
+
+    assert_equal(1, Log.where(object_uuid: unchanginguuid).count)
   end
 
   NON_ADMIN_USER_DATA = ["uuid", "kind", "is_active", "email", "first_name",
index cd475dea4d1849f6d99374fa3976068767ef1fcb..d979208d381b1a28d5f6ead099e45e6e7d4f9302 100644 (file)
@@ -68,7 +68,7 @@ class UserSessionsControllerTest < ActionController::TestCase
 
   test "login to LoginCluster" do
     Rails.configuration.Login.LoginCluster = 'zbbbb'
-    Rails.configuration.RemoteClusters['zbbbb'] = {'Host' => 'zbbbb.example.com'}
+    Rails.configuration.RemoteClusters['zbbbb'] = ConfigLoader.to_OrderedOptions({'Host' => 'zbbbb.example.com'})
     api_client_page = 'http://client.example.com/home'
     get :login, params: {return_to: api_client_page}
     assert_response :redirect
index c99a57aaff49b24910df17c4a745088a4903ce22..5dc77cb98aeaa9cb7758576042e6705d29ef7f22 100644 (file)
@@ -62,7 +62,7 @@ class ActiveSupport::TestCase
   include ArvadosTestSupport
   include CurrentApiClient
 
-  teardown do
+  setup do
     Thread.current[:api_client_ip_address] = nil
     Thread.current[:api_client_authorization] = nil
     Thread.current[:api_client_uuid] = nil
@@ -72,6 +72,14 @@ class ActiveSupport::TestCase
     restore_configuration
   end
 
+  teardown do
+    # Confirm that any changed configuration doesn't include non-symbol keys
+    $arvados_config.keys.each do |conf_name|
+      conf = Rails.configuration.send(conf_name)
+      confirm_keys_as_symbols(conf, conf_name) if conf.respond_to?('keys')
+    end
+  end
+
   def assert_equal(expect, *args)
     if expect.nil?
       assert_nil(*args)
@@ -99,6 +107,14 @@ class ActiveSupport::TestCase
     end
   end
 
+  def confirm_keys_as_symbols(conf, conf_name)
+    assert(conf.is_a?(ActiveSupport::OrderedOptions), "#{conf_name} should be an OrderedOptions object")
+    conf.keys.each do |k|
+      assert(k.is_a?(Symbol), "Key '#{k}' on section '#{conf_name}' should be a Symbol")
+      confirm_keys_as_symbols(conf[k], "#{conf_name}.#{k}") if conf[k].respond_to?('keys')
+    end
+  end
+
   def restore_configuration
     # Restore configuration settings changed during tests
     ConfigLoader.copy_into_config $arvados_config, Rails.configuration
index 679dddf223fa72b9d89f94258f8415c75992143f..e1565ec627c42ec53bbba875d8760d7637302e8c 100644 (file)
@@ -7,7 +7,7 @@ require 'test_helper'
 class ApplicationTest < ActiveSupport::TestCase
   include CurrentApiClient
 
-  test "test act_as_system_user" do
+  test "act_as_system_user" do
     Thread.current[:user] = users(:active)
     assert_equal users(:active), Thread.current[:user]
     act_as_system_user do
@@ -17,7 +17,7 @@ class ApplicationTest < ActiveSupport::TestCase
     assert_equal users(:active), Thread.current[:user]
   end
 
-  test "test act_as_system_user is exception safe" do
+  test "act_as_system_user is exception safe" do
     Thread.current[:user] = users(:active)
     assert_equal users(:active), Thread.current[:user]
     caught = false
@@ -33,4 +33,12 @@ class ApplicationTest < ActiveSupport::TestCase
     assert caught
     assert_equal users(:active), Thread.current[:user]
   end
+
+  test "config maps' keys are returned as symbols" do
+    assert Rails.configuration.Users.AutoSetupUsernameBlacklist.is_a? ActiveSupport::OrderedOptions
+    assert Rails.configuration.Users.AutoSetupUsernameBlacklist.keys.size > 0
+    Rails.configuration.Users.AutoSetupUsernameBlacklist.keys.each do |k|
+      assert k.is_a? Symbol
+    end
+  end
 end
index addea83062404b84baf1911d6b81a262d582ce05..48cae5afee92622fcda41c2b48e89761f54ac80d 100644 (file)
@@ -1044,10 +1044,10 @@ class CollectionTest < ActiveSupport::TestCase
   end
 
   test "create collections with managed properties" do
-    Rails.configuration.Collections.ManagedProperties = {
+    Rails.configuration.Collections.ManagedProperties = ConfigLoader.to_OrderedOptions({
       'default_prop1' => {'Value' => 'prop1_value'},
       'responsible_person_uuid' => {'Function' => 'original_owner'}
-    }
+    })
     # Test collection without initial properties
     act_as_user users(:active) do
       c = create_collection 'foo', Encoding::US_ASCII
@@ -1076,9 +1076,9 @@ class CollectionTest < ActiveSupport::TestCase
   end
 
   test "update collection with protected managed properties" do
-    Rails.configuration.Collections.ManagedProperties = {
+    Rails.configuration.Collections.ManagedProperties = ConfigLoader.to_OrderedOptions({
       'default_prop1' => {'Value' => 'prop1_value', 'Protected' => true},
-    }
+    })
     act_as_user users(:active) do
       c = create_collection 'foo', Encoding::US_ASCII
       assert c.valid?
index b91910d2d66f89081c5bd8ee44ecca20a03da046..90de800b2fa472e05e711a73a56fe97bb5c67572 100644 (file)
@@ -576,7 +576,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
   test "Container.resolve_container_image(pdh)" do
     set_user_from_auth :active
     [[:docker_image, 'v1'], [:docker_image_1_12, 'v2']].each do |coll, ver|
-      Rails.configuration.Containers.SupportedDockerImageFormats = {ver=>{}}
+      Rails.configuration.Containers.SupportedDockerImageFormats = ConfigLoader.to_OrderedOptions({ver=>{}})
       pdh = collections(coll).portable_data_hash
       resolved = Container.resolve_container_image(pdh)
       assert_equal resolved, pdh
@@ -602,7 +602,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
   end
 
   test "migrated docker image" do
-    Rails.configuration.Containers.SupportedDockerImageFormats = {'v2'=>{}}
+    Rails.configuration.Containers.SupportedDockerImageFormats = ConfigLoader.to_OrderedOptions({'v2'=>{}})
     add_docker19_migration_link
 
     # Test that it returns only v2 images even though request is for v1 image.
@@ -620,7 +620,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
   end
 
   test "use unmigrated docker image" do
-    Rails.configuration.Containers.SupportedDockerImageFormats = {'v1'=>{}}
+    Rails.configuration.Containers.SupportedDockerImageFormats = ConfigLoader.to_OrderedOptions({'v1'=>{}})
     add_docker19_migration_link
 
     # Test that it returns only supported v1 images even though there is a
@@ -639,7 +639,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
   end
 
   test "incompatible docker image v1" do
-    Rails.configuration.Containers.SupportedDockerImageFormats = {'v1'=>{}}
+    Rails.configuration.Containers.SupportedDockerImageFormats = ConfigLoader.to_OrderedOptions({'v1'=>{}})
     add_docker19_migration_link
 
     # Don't return unsupported v2 image even if we ask for it directly.
@@ -652,7 +652,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
   end
 
   test "incompatible docker image v2" do
-    Rails.configuration.Containers.SupportedDockerImageFormats = {'v2'=>{}}
+    Rails.configuration.Containers.SupportedDockerImageFormats = ConfigLoader.to_OrderedOptions({'v2'=>{}})
     # No migration link, don't return unsupported v1 image,
 
     set_user_from_auth :active
index 0e8cc48538e0f00acc0650d80910774876bc7822..c529aab8b653947ef0d8f64db7fed64c5000c523 100644 (file)
@@ -117,7 +117,7 @@ class JobTest < ActiveSupport::TestCase
     'locator' => BAD_COLLECTION,
   }.each_pair do |spec_type, image_spec|
     test "Job validation fails with nonexistent Docker image #{spec_type}" do
-      Rails.configuration.RemoteClusters = {}
+      Rails.configuration.RemoteClusters = ConfigLoader.to_OrderedOptions({})
       job = Job.new job_attrs(runtime_constraints:
                               {'docker_image' => image_spec})
       assert(job.invalid?, "nonexistent Docker image #{spec_type} #{image_spec} was valid")
index 016a0e4eb4a9b6a59717de2c75a634b3182dd82f..58f0cddb709064ad2f8af9df6c20aa20f0c77d6a 100644 (file)
@@ -282,7 +282,7 @@ class LogTest < ActiveSupport::TestCase
   end
 
   test "non-empty configuration.unlogged_attributes" do
-    Rails.configuration.AuditLogs.UnloggedAttributes = {"manifest_text"=>{}}
+    Rails.configuration.AuditLogs.UnloggedAttributes = ConfigLoader.to_OrderedOptions({"manifest_text"=>{}})
     txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
 
     act_as_system_user do
@@ -297,7 +297,7 @@ class LogTest < ActiveSupport::TestCase
   end
 
   test "empty configuration.unlogged_attributes" do
-    Rails.configuration.AuditLogs.UnloggedAttributes = {}
+    Rails.configuration.AuditLogs.UnloggedAttributes = ConfigLoader.to_OrderedOptions({})
     txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
 
     act_as_system_user do
index 7fcd36d7091a4c7a00a9af6ae50f10f5a413871d..b6d66230db6e2bbb0a06bbe0add823fdb397642d 100644 (file)
@@ -110,7 +110,7 @@ class UserTest < ActiveSupport::TestCase
   end
 
   test "new username set avoiding blacklist" do
-    Rails.configuration.Users.AutoSetupUsernameBlacklist = {"root"=>{}}
+    Rails.configuration.Users.AutoSetupUsernameBlacklist = ConfigLoader.to_OrderedOptions({"root"=>{}})
     check_new_username_setting("root", "root2")
   end
 
@@ -340,48 +340,52 @@ class UserTest < ActiveSupport::TestCase
     assert_equal(user.first_name, 'first_name_for_newly_created_user_updated')
   end
 
+  active_notify_list = ConfigLoader.to_OrderedOptions({"active-notify@example.com"=>{}})
+  inactive_notify_list = ConfigLoader.to_OrderedOptions({"inactive-notify@example.com"=>{}})
+  empty_notify_list = ConfigLoader.to_OrderedOptions({})
+
   test "create new user with notifications" do
     set_user_from_auth :admin
 
-    create_user_and_verify_setup_and_notifications true, {'active-notify-address@example.com'=>{}}, {'inactive-notify-address@example.com'=>{}}, nil, nil
-    create_user_and_verify_setup_and_notifications true, {'active-notify-address@example.com'=>{}}, {}, nil, nil
-    create_user_and_verify_setup_and_notifications true, {}, [], nil, nil
-    create_user_and_verify_setup_and_notifications false, {'active-notify-address@example.com'=>{}}, {'inactive-notify-address@example.com'=>{}}, nil, nil
-    create_user_and_verify_setup_and_notifications false, {}, {'inactive-notify-address@example.com'=>{}}, nil, nil
-    create_user_and_verify_setup_and_notifications false, {}, {}, nil, nil
+    create_user_and_verify_setup_and_notifications true, active_notify_list, inactive_notify_list, nil, nil
+    create_user_and_verify_setup_and_notifications true, active_notify_list, empty_notify_list, nil, nil
+    create_user_and_verify_setup_and_notifications true, empty_notify_list, empty_notify_list, nil, nil
+    create_user_and_verify_setup_and_notifications false, active_notify_list, inactive_notify_list, nil, nil
+    create_user_and_verify_setup_and_notifications false, empty_notify_list, inactive_notify_list, nil, nil
+    create_user_and_verify_setup_and_notifications false, empty_notify_list, empty_notify_list, nil, nil
   end
 
   [
     # Easy inactive user tests.
-    [false, {}, {}, "inactive-none@example.com", false, false, "inactivenone"],
-    [false, {}, {}, "inactive-vm@example.com", true, false, "inactivevm"],
-    [false, {}, {}, "inactive-repo@example.com", false, true, "inactiverepo"],
-    [false, {}, {}, "inactive-both@example.com", true, true, "inactiveboth"],
+    [false, empty_notify_list, empty_notify_list, "inactive-none@example.com", false, false, "inactivenone"],
+    [false, empty_notify_list, empty_notify_list, "inactive-vm@example.com", true, false, "inactivevm"],
+    [false, empty_notify_list, empty_notify_list, "inactive-repo@example.com", false, true, "inactiverepo"],
+    [false, empty_notify_list, empty_notify_list, "inactive-both@example.com", true, true, "inactiveboth"],
 
     # Easy active user tests.
-    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "active-none@example.com", false, false, "activenone"],
-    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "active-vm@example.com", true, false, "activevm"],
-    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "active-repo@example.com", false, true, "activerepo"],
-    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "active-both@example.com", true, true, "activeboth"],
+    [true, active_notify_list, inactive_notify_list, "active-none@example.com", false, false, "activenone"],
+    [true, active_notify_list, inactive_notify_list, "active-vm@example.com", true, false, "activevm"],
+    [true, active_notify_list, inactive_notify_list, "active-repo@example.com", false, true, "activerepo"],
+    [true, active_notify_list, inactive_notify_list, "active-both@example.com", true, true, "activeboth"],
 
     # Test users with malformed e-mail addresses.
-    [false, {}, {}, nil, true, true, nil],
-    [false, {}, {}, "arvados", true, true, nil],
-    [false, {}, {}, "@example.com", true, true, nil],
-    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "*!*@example.com", true, false, nil],
-    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "*!*@example.com", false, false, nil],
+    [false, empty_notify_list, empty_notify_list, nil, true, true, nil],
+    [false, empty_notify_list, empty_notify_list, "arvados", true, true, nil],
+    [false, empty_notify_list, empty_notify_list, "@example.com", true, true, nil],
+    [true, active_notify_list, inactive_notify_list, "*!*@example.com", true, false, nil],
+    [true, active_notify_list, inactive_notify_list, "*!*@example.com", false, false, nil],
 
     # Test users with various username transformations.
-    [false, {}, {}, "arvados@example.com", false, false, "arvados2"],
-    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "arvados@example.com", false, false, "arvados2"],
-    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "root@example.com", true, false, "root2"],
-    [false, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "root@example.com", true, false, "root2"],
-    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "roo_t@example.com", false, true, "root2"],
-    [false, {}, {}, "^^incorrect_format@example.com", true, true, "incorrectformat"],
-    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "&4a_d9.@example.com", true, true, "ad9"],
-    [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "&4a_d9.@example.com", false, false, "ad9"],
-    [false, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "&4a_d9.@example.com", true, true, "ad9"],
-    [false, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "&4a_d9.@example.com", false, false, "ad9"],
+    [false, empty_notify_list, empty_notify_list, "arvados@example.com", false, false, "arvados2"],
+    [true, active_notify_list, inactive_notify_list, "arvados@example.com", false, false, "arvados2"],
+    [true, active_notify_list, inactive_notify_list, "root@example.com", true, false, "root2"],
+    [false, active_notify_list, inactive_notify_list, "root@example.com", true, false, "root2"],
+    [true, active_notify_list, inactive_notify_list, "roo_t@example.com", false, true, "root2"],
+    [false, empty_notify_list, empty_notify_list, "^^incorrect_format@example.com", true, true, "incorrectformat"],
+    [true, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", true, true, "ad9"],
+    [true, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", false, false, "ad9"],
+    [false, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", true, true, "ad9"],
+    [false, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", false, false, "ad9"],
   ].each do |active, new_user_recipients, inactive_recipients, email, auto_setup_vm, auto_setup_repo, expect_username|
     test "create new user with auto setup #{active} #{email} #{auto_setup_vm} #{auto_setup_repo}" do
       set_user_from_auth :admin
@@ -569,7 +573,6 @@ class UserTest < ActiveSupport::TestCase
     assert_not_nil resp_user, 'expected user object'
     assert_not_nil resp_user['uuid'], 'expected user object'
     assert_equal email, resp_user['email'], 'expected email not found'
-
   end
 
   def verify_link (link_object, link_class, link_name, tail_uuid, head_uuid)
@@ -648,7 +651,7 @@ class UserTest < ActiveSupport::TestCase
     if not new_user_recipients.empty? then
       assert_not_nil new_user_email, 'Expected new user email after setup'
       assert_equal Rails.configuration.Users.UserNotifierEmailFrom, new_user_email.from[0]
-      assert_equal new_user_recipients.keys.first, new_user_email.to[0]
+      assert_equal new_user_recipients.stringify_keys.keys.first, new_user_email.to[0]
       assert_equal new_user_email_subject, new_user_email.subject
     else
       assert_nil new_user_email, 'Did not expect new user email after setup'
@@ -658,7 +661,7 @@ class UserTest < ActiveSupport::TestCase
       if not inactive_recipients.empty? then
         assert_not_nil new_inactive_user_email, 'Expected new inactive user email after setup'
         assert_equal Rails.configuration.Users.UserNotifierEmailFrom, new_inactive_user_email.from[0]
-        assert_equal inactive_recipients.keys.first, new_inactive_user_email.to[0]
+        assert_equal inactive_recipients.stringify_keys.keys.first, new_inactive_user_email.to[0]
         assert_equal "#{Rails.configuration.Users.EmailSubjectPrefix}New inactive user notification", new_inactive_user_email.subject
       else
         assert_nil new_inactive_user_email, 'Did not expect new inactive user email after setup'
@@ -667,7 +670,6 @@ class UserTest < ActiveSupport::TestCase
       assert_nil new_inactive_user_email, 'Expected no inactive user email after setting up active user'
     end
     ActionMailer::Base.deliveries = []
-
   end
 
   def verify_link_exists link_exists, head_uuid, tail_uuid, link_class, link_name, property_name=nil, property_value=nil