# already know how to properly treat them.
attribute :secret_mounts, :jsonbHash, default: {}
attribute :runtime_status, :jsonbHash, default: {}
- attribute :runtime_auth_scopes, :jsonbHash, default: {}
+ attribute :runtime_auth_scopes, :jsonbArray, default: []
+ attribute :output_storage_classes, :jsonbArray, default: lambda { Rails.configuration.DefaultStorageClasses }
+ attribute :output_properties, :jsonbHash, default: {}
serialize :environment, Hash
serialize :mounts, Hash
t.add :lock_count
t.add :gateway_address
t.add :interactive_session_started
+ t.add :output_storage_classes
+ t.add :output_properties
+ t.add :cost
+ t.add :subrequests_cost
end
# Supported states for a container
end
def self.full_text_searchable_columns
- super - ["secret_mounts", "secret_mounts_md5", "runtime_token", "gateway_address"]
+ super - ["secret_mounts", "secret_mounts_md5", "runtime_token", "gateway_address", "output_storage_classes"]
end
def self.searchable_columns *args
- super - ["secret_mounts_md5", "runtime_token", "gateway_address"]
+ super - ["secret_mounts_md5", "runtime_token", "gateway_address", "output_storage_classes"]
end
def logged_attributes
secret_mounts: req.secret_mounts,
runtime_token: req.runtime_token,
runtime_user_uuid: runtime_user.uuid,
- runtime_auth_scopes: runtime_auth_scopes
+ runtime_auth_scopes: runtime_auth_scopes,
+ output_storage_classes: req.output_storage_classes,
}
end
act_as_system_user do
if rc['keep_cache_ram'] == 0
rc['keep_cache_ram'] = Rails.configuration.Containers.DefaultKeepCacheRAM
end
+ if rc['keep_cache_disk'] == 0 and rc['keep_cache_ram'] == 0
+ # If neither ram nor disk cache was specified and
+ # DefaultKeepCacheRAM==0, default to disk cache with size equal
+ # to RAM constraint (but at least 2 GiB and at most 32 GiB).
+ rc['keep_cache_disk'] = [[rc['ram'] || 0, 2 << 30].max, 32 << 30].min
+ end
rc
end
candidates = candidates.where('secret_mounts_md5 = ?', secret_mounts_md5)
log_reuse_info(candidates) { "after filtering on secret_mounts_md5 #{secret_mounts_md5.inspect}" }
- candidates = candidates.where_serialized(:runtime_constraints, resolve_runtime_constraints(attrs[:runtime_constraints]), md5: true)
+ if attrs[:runtime_constraints]['cuda'].nil?
+ attrs[:runtime_constraints]['cuda'] = {
+ 'device_count' => 0,
+ 'driver_version' => '',
+ 'hardware_capability' => '',
+ }
+ end
+ resolved_runtime_constraints = [resolve_runtime_constraints(attrs[:runtime_constraints])]
+ if resolved_runtime_constraints[0]['cuda']['device_count'] == 0
+ # If no CUDA requested, extend search to include older container
+ # records that don't have a 'cuda' section in runtime_constraints
+ resolved_runtime_constraints << resolved_runtime_constraints[0].except('cuda')
+ end
+ if resolved_runtime_constraints[0]['keep_cache_disk'] == 0
+ # If no disk cache requested, extend search to include older container
+ # records that don't have a 'keep_cache_disk' field in runtime_constraints
+ if resolved_runtime_constraints.length == 2
+ # exclude the one that also excludes CUDA
+ resolved_runtime_constraints << resolved_runtime_constraints[1].except('keep_cache_disk')
+ end
+ resolved_runtime_constraints << resolved_runtime_constraints[0].except('keep_cache_disk')
+ end
+
+ candidates = candidates.where_serialized(:runtime_constraints, resolved_runtime_constraints, md5: true, multivalue: true)
log_reuse_info(candidates) { "after filtering on runtime_constraints #{attrs[:runtime_constraints].inspect}" }
log_reuse_info { "checking for state=Complete with readable output and log..." }
def validate_change
permitted = [:state]
- progress_attrs = [:progress, :runtime_status, :log, :output]
- final_attrs = [:exit_code, :finished_at]
+ final_attrs = [:finished_at]
+ progress_attrs = [:progress, :runtime_status, :subrequests_cost, :cost,
+ :log, :output, :output_properties, :exit_code]
if self.new_record?
permitted.push(:owner_uuid, :command, :container_image, :cwd,
:environment, :mounts, :output_path, :priority,
:runtime_constraints, :scheduling_parameters,
:secret_mounts, :runtime_token,
- :runtime_user_uuid, :runtime_auth_scopes)
+ :runtime_user_uuid, :runtime_auth_scopes,
+ :output_storage_classes)
end
case self.state
permitted.push :priority
when Running
- permitted.push :priority, *progress_attrs
+ permitted.push :priority, :output_properties, :gateway_address, *progress_attrs
if self.state_changed?
- permitted.push :started_at, :gateway_address
+ permitted.push :started_at
end
if !self.interactive_session_started_was
permitted.push :interactive_session_started
when Running
permitted.push :finished_at, *progress_attrs
when Queued, Locked
- permitted.push :finished_at, :log, :runtime_status
+ permitted.push :finished_at, :log, :runtime_status, :cost
end
else
self.runtime_auth_scopes = ["all"]
end
- # generate a new token
+ # Generate a new token. This runs with admin credentials as it's done by a
+ # dispatcher user, so expires_at isn't enforced by API.MaxTokenLifetime.
self.auth = ApiClientAuthorization.
create!(user_id: User.find_by_uuid(self.runtime_user_uuid).id,
api_client_id: 0,
cr.with_lock do
leave_modified_by_user_alone do
# Use row locking because this increments container_count
+ cr.cumulative_cost += self.cost + self.subrequests_cost
cr.container_uuid = c.uuid
cr.save!
end