14484: Adds functionality and test for pdh grouping in the container model
[arvados.git] / services / api / app / models / container.rb
index 15a9c501f71758dc85fa3af7a3327e1f9a9130e1..b3328d9c76942949f8153449ec1368e4eb8418ba 100644 (file)
@@ -1,29 +1,48 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'log_reuse_info'
 require 'whitelist_update'
 require 'safe_json'
+require 'update_priority'
 
 class Container < ArvadosModel
+  include ArvadosModelUpdates
   include HasUuid
   include KindAndEtag
   include CommonApiTemplate
   include WhitelistUpdate
   extend CurrentApiClient
+  extend DbCurrentTime
+  extend LogReuseInfo
 
   serialize :environment, Hash
   serialize :mounts, Hash
   serialize :runtime_constraints, Hash
   serialize :command, Array
   serialize :scheduling_parameters, Hash
+  serialize :secret_mounts, Hash
+  serialize :runtime_status, Hash
 
   before_validation :fill_field_defaults, :if => :new_record?
   before_validation :set_timestamps
-  validates :command, :container_image, :output_path, :cwd, :priority, :presence => true
+  validates :command, :container_image, :output_path, :cwd, :priority, { presence: true }
+  validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0 }
+  validate :validate_runtime_status
   validate :validate_state_change
   validate :validate_change
   validate :validate_lock
   validate :validate_output
   after_validation :assign_auth
   before_save :sort_serialized_attrs
+  before_save :update_secret_mounts_md5
+  before_save :scrub_secrets
+  before_save :clear_runtime_status_when_queued
+  after_save :update_cr_logs
   after_save :handle_completed
+  after_save :propagate_priority
+  after_commit { UpdatePriority.run_update_thread }
 
   has_many :container_requests, :foreign_key => :container_uuid, :class_name => 'ContainerRequest', :primary_key => :uuid
   belongs_to :auth, :class_name => 'ApiClientAuthorization', :foreign_key => :auth_uuid, :primary_key => :uuid
@@ -43,10 +62,13 @@ class Container < ArvadosModel
     t.add :priority
     t.add :progress
     t.add :runtime_constraints
+    t.add :runtime_status
     t.add :started_at
     t.add :state
     t.add :auth_uuid
     t.add :scheduling_parameters
+    t.add :runtime_user_uuid
+    t.add :runtime_auth_scopes
   end
 
   # Supported states for a container
@@ -70,35 +92,93 @@ class Container < ArvadosModel
     ["mounts"]
   end
 
+  def self.full_text_searchable_columns
+    super - ["secret_mounts", "secret_mounts_md5", "runtime_token"]
+  end
+
+  def self.searchable_columns *args
+    super - ["secret_mounts_md5", "runtime_token"]
+  end
+
+  def logged_attributes
+    super.except('secret_mounts', 'runtime_token')
+  end
+
   def state_transitions
     State_transitions
   end
 
+  # Container priority is the highest "computed priority" of any
+  # matching request. The computed priority of a container-submitted
+  # request is the priority of the submitting container. The computed
+  # priority of a user-submitted request is a function of
+  # user-assigned priority and request creation time.
   def update_priority!
-    if [Queued, Locked, Running].include? self.state
-      # Update the priority of this container to the maximum priority of any of
-      # its committed container requests and save the record.
-      self.priority = ContainerRequest.
-        where(container_uuid: uuid,
+    return if ![Queued, Locked, Running].include?(state)
+    p = ContainerRequest.
+        where('container_uuid=? and priority>0', uuid).
+        includes(:requesting_container).
+        lock(true).
+        map do |cr|
+      if cr.requesting_container
+        cr.requesting_container.priority
+      else
+        (cr.priority << 50) - (cr.created_at.to_time.to_f * 1000).to_i
+      end
+    end.max || 0
+    update_attributes!(priority: p)
+  end
+
+  def propagate_priority
+    return true unless priority_changed?
+    act_as_system_user do
+      # Update the priority of child container requests to match new
+      # priority of the parent container (ignoring requests with no
+      # container assigned, because their priority doesn't matter).
+      ContainerRequest.
+        where(requesting_container_uuid: self.uuid,
               state: ContainerRequest::Committed).
-        maximum('priority')
-      self.save!
+        where('container_uuid is not null').
+        includes(:container).
+        map(&:container).
+        map(&:update_priority!)
     end
   end
 
   # Create a new container (or find an existing one) to satisfy the
   # given container request.
   def self.resolve(req)
-    c_attrs = {
-      command: req.command,
-      cwd: req.cwd,
-      environment: req.environment,
-      output_path: req.output_path,
-      container_image: resolve_container_image(req.container_image),
-      mounts: resolve_mounts(req.mounts),
-      runtime_constraints: resolve_runtime_constraints(req.runtime_constraints),
-      scheduling_parameters: req.scheduling_parameters,
-    }
+    if req.runtime_token.nil?
+      runtime_user = if req.modified_by_user_uuid.nil?
+                       current_user
+                     else
+                       User.find_by_uuid(req.modified_by_user_uuid)
+                     end
+      runtime_auth_scopes = ["all"]
+    else
+      auth = ApiClientAuthorization.validate(token: req.runtime_token)
+      if auth.nil?
+        raise ArgumentError.new "Invalid runtime token"
+      end
+      runtime_user = User.find_by_id(auth.user_id)
+      runtime_auth_scopes = auth.scopes
+    end
+    c_attrs = act_as_user runtime_user do
+      {
+        command: req.command,
+        cwd: req.cwd,
+        environment: req.environment,
+        output_path: req.output_path,
+        container_image: resolve_container_image(req.container_image),
+        mounts: resolve_mounts(req.mounts),
+        runtime_constraints: resolve_runtime_constraints(req.runtime_constraints),
+        scheduling_parameters: req.scheduling_parameters,
+        secret_mounts: req.secret_mounts,
+        runtime_token: req.runtime_token,
+        runtime_user_uuid: runtime_user.uuid,
+        runtime_auth_scopes: runtime_auth_scopes
+      }
+    end
     act_as_system_user do
       if req.use_existing && (reusable = find_reusable(c_attrs))
         reusable
@@ -143,7 +223,11 @@ class Container < ArvadosModel
       if mount['kind'] != 'collection'
         next
       end
-      if (uuid = mount.delete 'uuid')
+
+      uuid = mount.delete 'uuid'
+
+      if mount['portable_data_hash'].nil? and !uuid.nil?
+        # PDH not supplied, try by UUID
         c = Collection.
           readable_by(current_user).
           where(uuid: uuid).
@@ -152,13 +236,7 @@ class Container < ArvadosModel
         if !c
           raise ArvadosModel::UnresolvableContainerError.new "cannot mount collection #{uuid.inspect}: not found"
         end
-        if mount['portable_data_hash'].nil?
-          # PDH not supplied by client
-          mount['portable_data_hash'] = c.portable_data_hash
-        elsif mount['portable_data_hash'] != c.portable_data_hash
-          # UUID and PDH supplied by client, but they don't agree
-          raise ArgumentError.new "cannot mount collection #{uuid.inspect}: current portable_data_hash #{c.portable_data_hash.inspect} does not match #{c['portable_data_hash'].inspect} in request"
-        end
+        mount['portable_data_hash'] = c.portable_data_hash
       end
     end
     return c_mounts
@@ -174,82 +252,220 @@ class Container < ArvadosModel
   end
 
   def self.find_reusable(attrs)
-    candidates = Container.
-      where_serialized(:command, attrs[:command]).
-      where('cwd = ?', attrs[:cwd]).
-      where_serialized(:environment, attrs[:environment]).
-      where('output_path = ?', attrs[:output_path]).
-      where('container_image = ?', resolve_container_image(attrs[:container_image])).
-      where_serialized(:mounts, resolve_mounts(attrs[:mounts])).
-      where_serialized(:runtime_constraints, resolve_runtime_constraints(attrs[:runtime_constraints]))
-
-    # Check for Completed candidates whose output and log are both readable.
+    log_reuse_info { "starting with #{Container.all.count} container records in database" }
+    candidates = Container.where_serialized(:command, attrs[:command], md5: true)
+    log_reuse_info(candidates) { "after filtering on command #{attrs[:command].inspect}" }
+
+    candidates = candidates.where('cwd = ?', attrs[:cwd])
+    log_reuse_info(candidates) { "after filtering on cwd #{attrs[:cwd].inspect}" }
+
+    candidates = candidates.where_serialized(:environment, attrs[:environment], md5: true)
+    log_reuse_info(candidates) { "after filtering on environment #{attrs[:environment].inspect}" }
+
+    candidates = candidates.where('output_path = ?', attrs[:output_path])
+    log_reuse_info(candidates) { "after filtering on output_path #{attrs[:output_path].inspect}" }
+
+    image = resolve_container_image(attrs[:container_image])
+    candidates = candidates.where('container_image = ?', image)
+    log_reuse_info(candidates) { "after filtering on container_image #{image.inspect} (resolved from #{attrs[:container_image].inspect})" }
+
+    candidates = candidates.where_serialized(:mounts, resolve_mounts(attrs[:mounts]), md5: true)
+    log_reuse_info(candidates) { "after filtering on mounts #{attrs[:mounts].inspect}" }
+
+    secret_mounts_md5 = Digest::MD5.hexdigest(SafeJSON.dump(self.deep_sort_hash(attrs[:secret_mounts])))
+    candidates = candidates.where('secret_mounts_md5 = ?', secret_mounts_md5)
+    log_reuse_info(candidates) { "after filtering on secret_mounts_md5 #{secret_mounts_md5.inspect}" }
+
+    candidates = candidates.where_serialized(:runtime_constraints, resolve_runtime_constraints(attrs[:runtime_constraints]), md5: true)
+    log_reuse_info(candidates) { "after filtering on runtime_constraints #{attrs[:runtime_constraints].inspect}" }
+
+    log_reuse_info { "checking for state=Complete with readable output and log..." }
+
     select_readable_pdh = Collection.
       readable_by(current_user).
       select(:portable_data_hash).
       to_sql
-    usable = candidates.
-      where(state: Complete).
-      where(exit_code: 0).
-      where("log IN (#{select_readable_pdh})").
-      where("output IN (#{select_readable_pdh})").
-      order('finished_at ASC').
-      limit(1).
-      first
-    return usable if usable
-
-    # Check for Running candidates and return the most likely to finish sooner.
+
+    usable = candidates.where(state: Complete, exit_code: 0)
+    log_reuse_info(usable) { "with state=Complete, exit_code=0" }
+
+    usable = usable.where("log IN (#{select_readable_pdh})")
+    log_reuse_info(usable) { "with readable log" }
+
+    usable = usable.where("output IN (#{select_readable_pdh})")
+    log_reuse_info(usable) { "with readable output" }
+
+    usable = usable.order('finished_at ASC').limit(1).first
+    if usable
+      log_reuse_info { "done, reusing container #{usable.uuid} with state=Complete" }
+      return usable
+    end
+
+    # Check for non-failing Running candidates and return the most likely to finish sooner.
+    log_reuse_info { "checking for state=Running..." }
     running = candidates.where(state: Running).
-      order('progress desc, started_at asc').limit(1).first
-    return running if not running.nil?
+              where("(runtime_status->'error') is null").
+              order('progress desc, started_at asc').
+              limit(1).first
+    if running
+      log_reuse_info { "done, reusing container #{running.uuid} with state=Running" }
+      return running
+    else
+      log_reuse_info { "have no containers in Running state" }
+    end
 
     # Check for Locked or Queued ones and return the most likely to start first.
-    locked_or_queued = candidates.where("state IN (?)", [Locked, Queued]).
-      order('state asc, priority desc, created_at asc').limit(1).first
-    return locked_or_queued if not locked_or_queued.nil?
+    locked_or_queued = candidates.
+                       where("state IN (?)", [Locked, Queued]).
+                       order('state asc, priority desc, created_at asc').
+                       limit(1).first
+    if locked_or_queued
+      log_reuse_info { "done, reusing container #{locked_or_queued.uuid} with state=#{locked_or_queued.state}" }
+      return locked_or_queued
+    else
+      log_reuse_info { "have no containers in Locked or Queued state" }
+    end
 
-    # No suitable candidate found.
+    log_reuse_info { "done, no reusable container found" }
     nil
   end
 
+  def check_lock_fail
+    if self.state != Queued
+      raise LockFailedError.new("cannot lock when #{self.state}")
+    elsif self.priority <= 0
+      raise LockFailedError.new("cannot lock when priority<=0")
+    end
+  end
+
   def lock
-    with_lock do
-      if self.state == Locked
-        raise AlreadyLockedError
-      end
-      self.state = Locked
-      self.save!
+    # Check invalid state transitions once before getting the lock
+    # (because it's cheaper that way) and once after getting the lock
+    # (because state might have changed while acquiring the lock).
+    check_lock_fail
+    transaction do
+      reload
+      check_lock_fail
+      update_attributes!(state: Locked, lock_count: self.lock_count+1)
+    end
+  end
+
+  def check_unlock_fail
+    if self.state != Locked
+      raise InvalidStateTransitionError.new("cannot unlock when #{self.state}")
+    elsif self.locked_by_uuid != current_api_client_authorization.uuid
+      raise InvalidStateTransitionError.new("locked by a different token")
     end
   end
 
   def unlock
-    with_lock do
-      if self.state == Queued
-        raise InvalidStateTransitionError
+    # Check invalid state transitions twice (see lock)
+    check_unlock_fail
+    transaction do
+      reload(lock: 'FOR UPDATE')
+      check_unlock_fail
+      if self.lock_count < Rails.configuration.max_container_dispatch_attempts
+        update_attributes!(state: Queued)
+      else
+        update_attributes!(state: Cancelled,
+                           runtime_status: {
+                             error: "Container exceeded 'max_container_dispatch_attempts' (lock_count=#{self.lock_count}."
+                           })
       end
-      self.state = Queued
-      self.save!
     end
   end
 
   def self.readable_by(*users_list)
+    # Load optional keyword arguments, if they exist.
+    if users_list.last.is_a? Hash
+      kwargs = users_list.pop
+    else
+      kwargs = {}
+    end
     if users_list.select { |u| u.is_admin }.any?
-      return self
+      return super
     end
-    user_uuids = users_list.map { |u| u.uuid }
-    uuid_list = user_uuids + users_list.flat_map { |u| u.groups_i_can(:read) }
-    uuid_list.uniq!
-    permitted = "(SELECT head_uuid FROM links WHERE link_class='permission' AND tail_uuid IN (:uuids))"
-    joins(:container_requests).
-      where("container_requests.uuid IN #{permitted} OR "+
-            "container_requests.owner_uuid IN (:uuids)",
-            uuids: uuid_list)
+    Container.where(ContainerRequest.readable_by(*users_list).where("containers.uuid = container_requests.container_uuid").exists)
   end
 
   def final?
     [Complete, Cancelled].include?(self.state)
   end
 
+  def self.for_current_token
+    return if !current_api_client_authorization
+    _, _, _, container_uuid = Thread.current[:token].split('/')
+    if container_uuid.nil?
+      Container.where(auth_uuid: current_api_client_authorization.uuid).first
+    else
+      Container.where('auth_uuid=? or (uuid=? and runtime_token=?)',
+                      current_api_client_authorization.uuid,
+                      container_uuid,
+                      current_api_client_authorization.token).first
+    end
+  end
+
+  # NOTE: Migration 20190322174136_add_file_info_to_collection.rb relies on this function.
+  #
+  # Change with caution!
+  #
+  # Correctly groups pdhs to use for batch database updates. Helps avoid
+  # updating too many database rows in a single transaction.
+  def self.group_pdhs_for_multiple_transactions(log_prefix)
+    batch_size_max = 1 << 28 # 256 MiB
+    last_pdh = '0'
+    done = 0
+    any = true
+
+    total = ActiveRecord::Base.connection.exec_query(
+      'SELECT DISTINCT portable_data_hash FROM collections'
+    ).rows.count
+
+    while any
+      any = false
+      pdhs = ActiveRecord::Base.connection.exec_query(
+        'SELECT DISTINCT portable_data_hash FROM collections '\
+        "WHERE portable_data_hash > '#{last_pdh}' "\
+        'GROUP BY portable_data_hash LIMIT 1000'
+      )
+      if pdhs.rows.count.zero?
+        break
+      end
+
+      Container.group_pdhs_by_manifest_size(pdhs, batch_size_max) do |grouped_pdhs|
+        any = true
+        yield grouped_pdhs
+        done += grouped_pdhs.size
+        last_pdh = pdhs[-1]
+        Rails.logger.info(log_prefix + ": #{done}/#{total}")
+      end
+    end
+    Rails.logger.info(log_prefix + ': finished')
+  end
+
+  # NOTE: Migration 20190322174136_add_file_info_to_collection.rb relies on this function.
+  #
+  # Change with caution!
+  #
+  # Given an array of pdhs, yield a subset array of pdhs when the total
+  # size of all manifest_texts is no more than batch_size_max. Pdhs whose manifest_text 
+  # is bigger than batch_size_max are yielded by themselves
+  def self.group_pdhs_by_manifest_size(pdhs, batch_size_max)
+    batch_size = 0
+    batch_pdhs = {}
+    pdhs.each do |pdh|
+      manifest_size = pdh.split('+')[1].to_i
+      if batch_size > 0 && batch_size + manifest_size > batch_size_max
+        yield batch_pdhs.keys
+        batch_pdhs = {}
+        batch_size = 0
+      end
+      batch_pdhs[pdh] = true
+      batch_size += manifest_size
+    end
+    yield batch_pdhs.keys
+  end
+
   protected
 
   def fill_field_defaults
@@ -258,7 +474,7 @@ class Container < ArvadosModel
     self.runtime_constraints ||= {}
     self.mounts ||= {}
     self.cwd ||= "."
-    self.priority ||= 1
+    self.priority ||= 0
     self.scheduling_parameters ||= {}
   end
 
@@ -266,24 +482,11 @@ class Container < ArvadosModel
     current_user.andand.is_admin
   end
 
-  def permission_to_update
-    # Override base permission check to allow auth_uuid to set progress and
-    # output (only).  Whether it is legal to set progress and output in the current
-    # state has already been checked in validate_change.
-    current_user.andand.is_admin ||
-      (!current_api_client_authorization.nil? and
-       [self.auth_uuid, self.locked_by_uuid].include? current_api_client_authorization.uuid)
-  end
-
   def ensure_owner_uuid_is_permitted
-    # Override base permission check to allow auth_uuid to set progress and
-    # output (only).  Whether it is legal to set progress and output in the current
-    # state has already been checked in validate_change.
-    if !current_api_client_authorization.nil? and self.auth_uuid == current_api_client_authorization.uuid
-      check_update_whitelist [:progress, :output]
-    else
-      super
-    end
+    # validate_change ensures owner_uuid can't be changed at all --
+    # except during create, which requires admin privileges. Checking
+    # permission here would be superfluous.
+    true
   end
 
   def set_timestamps
@@ -296,36 +499,54 @@ class Container < ArvadosModel
     end
   end
 
+  # Check that well-known runtime status keys have desired data types
+  def validate_runtime_status
+    [
+      'error', 'errorDetail', 'warning', 'warningDetail', 'activity'
+    ].each do |k|
+      if self.runtime_status.andand.include?(k) && !self.runtime_status[k].is_a?(String)
+        errors.add(:runtime_status, "'#{k}' value must be a string")
+      end
+    end
+  end
+
   def validate_change
     permitted = [:state]
+    progress_attrs = [:progress, :runtime_status, :log, :output]
+    final_attrs = [:exit_code, :finished_at]
 
     if self.new_record?
       permitted.push(:owner_uuid, :command, :container_image, :cwd,
                      :environment, :mounts, :output_path, :priority,
-                     :runtime_constraints, :scheduling_parameters)
+                     :runtime_constraints, :scheduling_parameters,
+                     :secret_mounts, :runtime_token,
+                     :runtime_user_uuid, :runtime_auth_scopes)
     end
 
     case self.state
-    when Queued, Locked
+    when Locked
+      permitted.push :priority, :runtime_status, :log, :lock_count
+
+    when Queued
       permitted.push :priority
 
     when Running
-      permitted.push :priority, :progress, :output
+      permitted.push :priority, *progress_attrs
       if self.state_changed?
         permitted.push :started_at
       end
 
     when Complete
       if self.state_was == Running
-        permitted.push :finished_at, :output, :log, :exit_code
+        permitted.push *final_attrs, *progress_attrs
       end
 
     when Cancelled
       case self.state_was
       when Running
-        permitted.push :finished_at, :output, :log
+        permitted.push :finished_at, *progress_attrs
       when Queued, Locked
-        permitted.push :finished_at
+        permitted.push :finished_at, :log, :runtime_status
       end
 
     else
@@ -333,6 +554,19 @@ class Container < ArvadosModel
       return false
     end
 
+    if self.state == Running &&
+       !current_api_client_authorization.nil? &&
+       (current_api_client_authorization.uuid == self.auth_uuid ||
+        current_api_client_authorization.token == self.runtime_token)
+      # The contained process itself can write final attrs but can't
+      # change priority or log.
+      permitted.push *final_attrs
+      permitted = permitted - [:log, :priority]
+    elsif self.locked_by_uuid && self.locked_by_uuid != current_api_client_authorization.andand.uuid
+      # When locked, progress fields cannot be updated by the wrong
+      # dispatcher, even though it has admin privileges.
+      permitted = permitted - progress_attrs
+    end
     check_update_whitelist permitted
   end
 
@@ -361,21 +595,32 @@ class Container < ArvadosModel
     # that a container cannot "claim" a collection that it doesn't otherwise
     # have access to just by setting the output field to the collection PDH.
     if output_changed?
-      c = Collection.unscoped do
-        Collection.
-            readable_by(current_user).
+      c = Collection.
+            readable_by(current_user, {include_trash: true}).
             where(portable_data_hash: self.output).
             first
-      end
       if !c
         errors.add :output, "collection must exist and be readable by current user."
       end
     end
   end
 
+  def update_cr_logs
+    # If self.final?, this update is superfluous: the final log/output
+    # update will be done when handle_completed calls finalize! on
+    # each requesting CR.
+    return if self.final? || !self.log_changed?
+    leave_modified_by_user_alone do
+      ContainerRequest.where(container_uuid: self.uuid).each do |cr|
+        cr.update_collections(container: self, collections: ['log'])
+        cr.save!
+      end
+    end
+  end
+
   def assign_auth
     if self.auth_uuid_changed?
-      return errors.add :auth_uuid, 'is readonly'
+         return errors.add :auth_uuid, 'is readonly'
     end
     if not [Locked, Running].include? self.state
       # don't need one
@@ -386,16 +631,29 @@ class Container < ArvadosModel
       # already have one
       return
     end
-    cr = ContainerRequest.
-      where('container_uuid=? and priority>0', self.uuid).
-      order('priority desc').
-      first
-    if !cr
-      return errors.add :auth_uuid, "cannot be assigned because priority <= 0"
+    if self.runtime_token.nil?
+      if self.runtime_user_uuid.nil?
+        # legacy behavior, we don't have a runtime_user_uuid so get
+        # the user from the highest priority container request, needed
+        # when performing an upgrade and there are queued containers,
+        # and some tests.
+        cr = ContainerRequest.
+               where('container_uuid=? and priority>0', self.uuid).
+               order('priority desc').
+               first
+        if !cr
+          return errors.add :auth_uuid, "cannot be assigned because priority <= 0"
+        end
+        self.runtime_user_uuid = cr.modified_by_user_uuid
+        self.runtime_auth_scopes = ["all"]
+      end
+
+      # generate a new token
+      self.auth = ApiClientAuthorization.
+                    create!(user_id: User.find_by_uuid(self.runtime_user_uuid).id,
+                            api_client_id: 0,
+                            scopes: self.runtime_auth_scopes)
     end
-    self.auth = ApiClientAuthorization.
-      create!(user_id: User.find_by_uuid(cr.modified_by_user_uuid).id,
-              api_client_id: 0)
   end
 
   def sort_serialized_attrs
@@ -411,6 +669,33 @@ class Container < ArvadosModel
     if self.scheduling_parameters_changed?
       self.scheduling_parameters = self.class.deep_sort_hash(self.scheduling_parameters)
     end
+    if self.runtime_auth_scopes_changed?
+      self.runtime_auth_scopes = self.runtime_auth_scopes.sort
+    end
+  end
+
+  def update_secret_mounts_md5
+    if self.secret_mounts_changed?
+      self.secret_mounts_md5 = Digest::MD5.hexdigest(
+        SafeJSON.dump(self.class.deep_sort_hash(self.secret_mounts)))
+    end
+  end
+
+  def scrub_secrets
+    # this runs after update_secret_mounts_md5, so the
+    # secret_mounts_md5 will still reflect the secrets that are being
+    # scrubbed here.
+    if self.state_changed? && self.final?
+      self.secret_mounts = {}
+      self.runtime_token = nil
+    end
+  end
+
+  def clear_runtime_status_when_queued
+    # Avoid leaking status messages between different dispatch attempts
+    if self.state_was == Locked && self.state == Queued
+      self.runtime_status = {}
+    end
   end
 
   def handle_completed
@@ -434,14 +719,20 @@ class Container < ArvadosModel
             container_image: self.container_image,
             mounts: self.mounts,
             runtime_constraints: self.runtime_constraints,
-            scheduling_parameters: self.scheduling_parameters
+            scheduling_parameters: self.scheduling_parameters,
+            secret_mounts: self.secret_mounts_was,
+            runtime_token: self.runtime_token_was,
+            runtime_user_uuid: self.runtime_user_uuid,
+            runtime_auth_scopes: self.runtime_auth_scopes
           }
           c = Container.create! c_attrs
           retryable_requests.each do |cr|
             cr.with_lock do
-              # Use row locking because this increments container_count
-              cr.container_uuid = c.uuid
-              cr.save
+              leave_modified_by_user_alone do
+                # Use row locking because this increments container_count
+                cr.container_uuid = c.uuid
+                cr.save!
+              end
             end
           end
         end
@@ -449,17 +740,30 @@ class Container < ArvadosModel
         # Notify container requests associated with this container
         ContainerRequest.where(container_uuid: uuid,
                                state: ContainerRequest::Committed).each do |cr|
-          cr.finalize!
+          leave_modified_by_user_alone do
+            cr.finalize!
+          end
         end
 
-        # Try to cancel any outstanding container requests made by this container.
-        ContainerRequest.where(requesting_container_uuid: uuid,
-                               state: ContainerRequest::Committed).each do |cr|
-          cr.priority = 0
-          cr.save
+        # Cancel outstanding container requests made by this container.
+        ContainerRequest.
+          includes(:container).
+          where(requesting_container_uuid: uuid,
+                state: ContainerRequest::Committed).each do |cr|
+          leave_modified_by_user_alone do
+            cr.update_attributes!(priority: 0)
+            cr.container.reload
+            if cr.container.state == Container::Queued || cr.container.state == Container::Locked
+              # If the child container hasn't started yet, finalize the
+              # child CR now instead of leaving it "on hold", i.e.,
+              # Queued with priority 0.  (OTOH, if the child is already
+              # running, leave it alone so it can get cancelled the
+              # usual way, get a copy of the log collection, etc.)
+              cr.update_attributes!(state: ContainerRequest::Final)
+            end
+          end
         end
       end
     end
   end
-
 end