1 # Copyright (C) The Arvados Authors. All rights reserved.
3 # SPDX-License-Identifier: AGPL-3.0
5 require 'log_reuse_info'
8 class Job < ArvadosModel
11 include CommonApiTemplate
12 extend CurrentApiClient
14 serialize :components, Hash
15 serialize :script_parameters, Hash
16 serialize :runtime_constraints, Hash
17 serialize :tasks_summary, Hash
18 before_create :ensure_unique_submit_id
19 after_commit :trigger_crunch_dispatch_if_cancelled, :on => :update
20 before_validation :set_priority
21 before_validation :update_state_from_old_state_attrs
22 before_validation :update_script_parameters_digest
23 validate :ensure_script_version_is_commit
24 validate :find_docker_image_locator
25 validate :find_arvados_sdk_version
26 validate :validate_status
27 validate :validate_state_change
28 validate :ensure_no_collection_uuids_in_script_params
29 before_save :tag_version_in_internal_repository
30 before_save :update_timestamps_when_state_changes
32 has_many :commit_ancestors, :foreign_key => :descendant, :primary_key => :script_version
33 has_many(:nodes, foreign_key: :job_uuid, primary_key: :uuid)
35 class SubmitIdReused < RequestError
38 api_accessible :user, extend: :common do |t|
42 t.add :script_parameters
45 t.add :cancelled_by_client_uuid
46 t.add :cancelled_by_user_uuid
53 t.add :is_locked_by_uuid
55 t.add :runtime_constraints
57 t.add :nondeterministic
59 t.add :supplied_script_version
60 t.add :arvados_sdk_version
61 t.add :docker_image_locator
68 # Supported states for a job
71 (Running = 'Running'),
72 (Cancelled = 'Cancelled'),
74 (Complete = 'Complete'),
78 @need_crunch_dispatch_trigger = false
81 def self.limit_index_columns_read
85 def self.protected_attributes
86 [:arvados_sdk_version, :docker_image_locator]
90 update_attributes(finished_at: finished_at || db_current_time,
91 success: success.nil? ? false : success,
100 self.where('state = ?', Queued).order('priority desc, created_at')
104 # We used to report this accurately, but the implementation made queue
105 # API requests O(n**2) for the size of the queue. See #8800.
106 # We've soft-disabled it because it's not clear we even want this
107 # functionality: now that we have Node Manager with support for multiple
108 # node sizes, "queue position" tells you very little about when a job will
110 state == Queued ? 0 : nil
114 self.where('running = ?', true).
115 order('priority desc, created_at')
118 def lock locked_by_uuid
120 unless self.state == Queued and self.is_locked_by_uuid.nil?
121 raise AlreadyLockedError
124 self.is_locked_by_uuid = locked_by_uuid
129 def update_script_parameters_digest
130 self.script_parameters_digest = self.class.sorted_hash_digest(script_parameters)
133 def self.searchable_columns operator
134 super - ["script_parameters_digest"]
137 def self.full_text_searchable_columns
138 super - ["script_parameters_digest"]
141 def self.load_job_specific_filters attrs, orig_filters, read_users
142 # Convert Job-specific @filters entries into general SQL filters.
143 script_info = {"repository" => nil, "script" => nil}
144 git_filters = Hash.new do |hash, key|
145 hash[key] = {"max_version" => "HEAD", "exclude_versions" => []}
148 orig_filters.each do |attr, operator, operand|
149 if (script_info.has_key? attr) and (operator == "=")
150 if script_info[attr].nil?
151 script_info[attr] = operand
152 elsif script_info[attr] != operand
153 raise ArgumentError.new("incompatible #{attr} filters")
158 git_filters[attr]["min_version"] = operand
160 git_filters[attr]["exclude_versions"] += Array.wrap(operand)
161 when "in docker", "not in docker"
162 image_hashes = Array.wrap(operand).flat_map do |search_term|
163 image_search, image_tag = search_term.split(':', 2)
165 find_all_for_docker_image(image_search, image_tag, read_users, filter_compatible_format: false).
166 map(&:portable_data_hash)
168 filters << [attr, operator.sub(/ docker$/, ""), image_hashes]
170 filters << [attr, operator, operand]
174 # Build a real script_version filter from any "not? in git" filters.
175 git_filters.each_pair do |attr, filter|
177 when "script_version"
178 script_info.each_pair do |key, value|
180 raise ArgumentError.new("script_version filter needs #{key} filter")
183 filter["repository"] = script_info["repository"]
184 if attrs[:script_version]
185 filter["max_version"] = attrs[:script_version]
187 # Using HEAD, set earlier by the hash default, is fine.
189 when "arvados_sdk_version"
190 filter["repository"] = "arvados"
192 raise ArgumentError.new("unknown attribute for git filter: #{attr}")
194 revisions = Commit.find_commit_range(filter["repository"],
195 filter["min_version"],
196 filter["max_version"],
197 filter["exclude_versions"])
200 new("error searching #{filter['repository']} from " +
201 "'#{filter['min_version']}' to '#{filter['max_version']}', " +
202 "excluding #{filter['exclude_versions']}")
204 filters.append([attr, "in", revisions])
210 def self.find_reusable attrs, params, filters, read_users
211 if filters.empty? # Translate older creation parameters into filters.
213 [["repository", "=", attrs[:repository]],
214 ["script", "=", attrs[:script]],
215 ["script_version", "not in git", params[:exclude_script_versions]],
216 ].reject { |filter| filter.last.nil? or filter.last.empty? }
217 if !params[:minimum_script_version].blank?
218 filters << ["script_version", "in git",
219 params[:minimum_script_version]]
221 filters += default_git_filters("script_version", attrs[:repository],
222 attrs[:script_version])
224 if image_search = attrs[:runtime_constraints].andand["docker_image"]
225 if image_tag = attrs[:runtime_constraints]["docker_image_tag"]
226 image_search += ":#{image_tag}"
228 image_locator = Collection.
229 for_latest_docker_image(image_search).andand.portable_data_hash
233 filters << ["docker_image_locator", "=", image_locator]
234 if sdk_version = attrs[:runtime_constraints].andand["arvados_sdk_version"]
235 filters += default_git_filters("arvados_sdk_version", "arvados", sdk_version)
237 filters = load_job_specific_filters(attrs, filters, read_users)
240 # Check specified filters for some reasonableness.
241 filter_names = filters.map { |f| f.first }.uniq
242 ["repository", "script"].each do |req_filter|
243 if not filter_names.include?(req_filter)
244 return send_error("#{req_filter} filter required")
248 # Search for a reusable Job, and return it if found.
249 candidates = Job.readable_by(current_user)
250 log_reuse_info { "starting with #{candidates.count} jobs readable by current user #{current_user.uuid}" }
252 candidates = candidates.where(
253 'state = ? or (owner_uuid = ? and state in (?))',
254 Job::Complete, current_user.uuid, [Job::Queued, Job::Running])
255 log_reuse_info(candidates) { "after filtering on job state ((state=Complete) or (state=Queued/Running and (submitted by current user)))" }
257 digest = Job.sorted_hash_digest(attrs[:script_parameters])
258 candidates = candidates.where('script_parameters_digest = ?', digest)
259 log_reuse_info(candidates) { "after filtering on script_parameters_digest #{digest}" }
261 candidates = candidates.where('nondeterministic is distinct from ?', true)
262 log_reuse_info(candidates) { "after filtering on !nondeterministic" }
264 # prefer Running jobs over Queued
265 candidates = candidates.order('state desc, created_at')
267 candidates = apply_filters candidates, filters
268 log_reuse_info(candidates) { "after filtering on repo, script, and custom filters #{filters.inspect}" }
273 candidates.each do |j|
274 if j.state != Job::Complete
276 # We'll use this if we don't find a job that has completed
277 log_reuse_info { "job #{j.uuid} is reusable, but unfinished; continuing search for completed jobs" }
280 log_reuse_info { "job #{j.uuid} is unfinished and we already have #{incomplete_job.uuid}; ignoring" }
282 elsif chosen == false
283 # Ignore: we have already decided not to reuse any completed
285 log_reuse_info { "job #{j.uuid} with output #{j.output} ignored, see above" }
287 log_reuse_info { "job #{j.uuid} has nil output" }
289 log_reuse_info { "job #{j.uuid} has nil log" }
290 elsif Rails.configuration.Containers.JobsAPI.ReuseJobIfOutputsDiffer
291 if !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
292 # Ignore: keep looking for an incomplete job or one whose
293 # output is readable.
294 log_reuse_info { "job #{j.uuid} output #{j.output} unavailable to user; continuing search" }
295 elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.log)
296 # Ignore: keep looking for an incomplete job or one whose
298 log_reuse_info { "job #{j.uuid} log #{j.log} unavailable to user; continuing search" }
300 log_reuse_info { "job #{j.uuid} with output #{j.output} is reusable; decision is final." }
304 if chosen_output != j.output
305 # If two matching jobs produced different outputs, run a new
306 # job (or use one that's already running/queued) instead of
307 # choosing one arbitrarily.
308 log_reuse_info { "job #{j.uuid} output #{j.output} disagrees; forgetting about #{chosen.uuid} and ignoring any other finished jobs (see reuse_job_if_outputs_differ in application.default.yml)" }
311 log_reuse_info { "job #{j.uuid} output #{j.output} agrees with chosen #{chosen.uuid}; continuing search in case other candidates have different outputs" }
313 # ...and that's the only thing we need to do once we've chosen
315 elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
316 # This user cannot read the output of this job. Any other
317 # completed job will have either the same output (making it
318 # unusable) or a different output (making it unusable because
319 # reuse_job_if_outputs_different is turned off). Therefore,
320 # any further investigation of reusable jobs is futile.
321 log_reuse_info { "job #{j.uuid} output #{j.output} is unavailable to user; this means no finished job can be reused (see reuse_job_if_outputs_differ in application.default.yml)" }
323 elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.log)
324 # This user cannot read the log of this job, don't try to reuse the
325 # job but consider if the output is consistent.
326 log_reuse_info { "job #{j.uuid} log #{j.log} is unavailable to user; continuing search" }
327 chosen_output = j.output
329 log_reuse_info { "job #{j.uuid} with output #{j.output} can be reused; continuing search in case other candidates have different outputs" }
331 chosen_output = j.output
334 j = chosen || incomplete_job
336 log_reuse_info { "done, #{j.uuid} was selected" }
338 log_reuse_info { "done, nothing suitable" }
343 def self.default_git_filters(attr_name, repo_name, refspec)
344 # Add a filter to @filters for `attr_name` = the latest commit available
345 # in `repo_name` at `refspec`. No filter is added if refspec can't be
347 commits = Commit.find_commit_range(repo_name, nil, refspec, nil)
348 if commit_hash = commits.first
349 [[attr_name, "=", commit_hash]]
355 def cancel(cascade: false, need_transaction: true)
357 ActiveRecord::Base.transaction do
358 cancel(cascade: cascade, need_transaction: false)
363 if self.state.in?([Queued, Running])
364 self.state = Cancelled
366 elsif self.state != Cancelled
367 raise InvalidStateTransitionError
372 # cancel all children; they could be jobs or pipeline instances
373 children = self.components.andand.collect{|_, u| u}.compact
375 return if children.empty?
377 # cancel any child jobs
378 Job.where(uuid: children, state: [Queued, Running]).each do |job|
379 job.cancel(cascade: cascade, need_transaction: false)
382 # cancel any child pipelines
383 PipelineInstance.where(uuid: children, state: [PipelineInstance::RunningOnServer, PipelineInstance::RunningOnClient]).each do |pi|
384 pi.cancel(cascade: cascade, need_transaction: false)
390 def self.sorted_hash_digest h
391 Digest::MD5.hexdigest(Oj.dump(deep_sort_hash(h)))
394 def foreign_key_attributes
395 super + %w(output log)
398 def skip_uuid_read_permission_check
399 super + %w(cancelled_by_client_uuid)
402 def skip_uuid_existence_check
403 super + %w(output log)
407 if self.priority.nil?
413 def ensure_script_version_is_commit
415 # Apparently client has already decided to go for it. This is
416 # needed to run a local job using a local working directory
417 # instead of a commit-ish.
420 if new_record? or repository_changed? or script_version_changed?
421 sha1 = Commit.find_commit_range(repository,
422 nil, script_version, nil).first
424 errors.add :script_version, "#{script_version} does not resolve to a commit"
427 if supplied_script_version.nil? or supplied_script_version.empty?
428 self.supplied_script_version = script_version
430 self.script_version = sha1
435 def tag_version_in_internal_repository
437 # No point now. See ensure_script_version_is_commit.
440 # Won't be saved, and script_version might not even be valid.
442 elsif new_record? or repository_changed? or script_version_changed?
446 Commit.tag_in_internal_repository repository, script_version, uuid
454 def ensure_unique_submit_id
456 if Job.where('submit_id=?',self.submit_id).first
457 raise SubmitIdReused.new
463 def resolve_runtime_constraint(key, attr_sym)
464 if ((runtime_constraints.is_a? Hash) and
465 (search = runtime_constraints[key]))
466 ok, result = yield search
468 ok, result = true, nil
471 send("#{attr_sym}=".to_sym, result)
473 errors.add(attr_sym, result)
478 def find_arvados_sdk_version
479 resolve_runtime_constraint("arvados_sdk_version",
480 :arvados_sdk_version) do |git_search|
481 commits = Commit.find_commit_range("arvados",
482 nil, git_search, nil)
484 [false, "#{git_search} does not resolve to a commit"]
485 elsif not runtime_constraints["docker_image"]
486 [false, "cannot be specified without a Docker image constraint"]
488 [true, commits.first]
493 def find_docker_image_locator
494 if runtime_constraints.is_a? Hash and Rails.configuration.Containers.JobsAPI.DefaultDockerImage != ""
495 runtime_constraints['docker_image'] ||=
496 Rails.configuration.Containers.JobsAPI.DefaultDockerImage
499 resolve_runtime_constraint("docker_image",
500 :docker_image_locator) do |image_search|
501 image_tag = runtime_constraints['docker_image_tag']
502 if coll = Collection.for_latest_docker_image(image_search, image_tag)
503 [true, coll.portable_data_hash]
505 [false, "not found for #{image_search}"]
510 def permission_to_update
511 if is_locked_by_uuid_was and !(current_user and
512 (current_user.uuid == is_locked_by_uuid_was or
513 current_user.uuid == system_user.uuid))
514 if script_changed? or
515 script_parameters_changed? or
516 script_version_changed? or
517 (!cancelled_at_was.nil? and
518 (cancelled_by_client_uuid_changed? or
519 cancelled_by_user_uuid_changed? or
520 cancelled_at_changed?)) or
521 started_at_changed? or
522 finished_at_changed? or
527 tasks_summary_changed? or
528 (state_changed? && state != Cancelled) or
530 logger.warn "User #{current_user.uuid if current_user} tried to change protected job attributes on locked #{self.class.to_s} #{uuid_was}"
534 if !is_locked_by_uuid_changed?
538 logger.warn "Anonymous user tried to change lock on #{self.class.to_s} #{uuid_was}"
540 elsif is_locked_by_uuid_was and is_locked_by_uuid_was != current_user.uuid
541 logger.warn "User #{current_user.uuid} tried to steal lock on #{self.class.to_s} #{uuid_was} from #{is_locked_by_uuid_was}"
543 elsif !is_locked_by_uuid.nil? and is_locked_by_uuid != current_user.uuid
544 logger.warn "User #{current_user.uuid} tried to lock #{self.class.to_s} #{uuid_was} with uuid #{is_locked_by_uuid}"
552 def update_modified_by_fields
553 if self.cancelled_at_changed?
554 # Ensure cancelled_at cannot be set to arbitrary non-now times,
555 # or changed once it is set.
556 if self.cancelled_at and not self.cancelled_at_was
557 self.cancelled_at = db_current_time
558 self.cancelled_by_user_uuid = current_user.uuid
559 self.cancelled_by_client_uuid = current_api_client.andand.uuid
560 @need_crunch_dispatch_trigger = true
562 self.cancelled_at = self.cancelled_at_was
563 self.cancelled_by_user_uuid = self.cancelled_by_user_uuid_was
564 self.cancelled_by_client_uuid = self.cancelled_by_client_uuid_was
570 def trigger_crunch_dispatch_if_cancelled
571 if @need_crunch_dispatch_trigger
572 File.open(Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger, 'wb') do
573 # That's all, just create/touch a file for crunch-job to see.
578 def update_timestamps_when_state_changes
579 return if not (state_changed? or new_record?)
583 self.started_at ||= db_current_time
584 when Failed, Complete
585 self.finished_at ||= db_current_time
587 self.cancelled_at ||= db_current_time
590 # TODO: Remove the following case block when old "success" and
591 # "running" attrs go away. Until then, this ensures we still
592 # expose correct success/running flags to older clients, even if
593 # some new clients are writing only the new state attribute.
601 when Cancelled, Failed
608 self.running ||= false # Default to false instead of nil.
610 @need_crunch_dispatch_trigger = true
615 def update_state_from_old_state_attrs
616 # If a client has touched the legacy state attrs, update the
617 # "state" attr to agree with the updated values of the legacy
620 # TODO: Remove this method when old "success" and "running" attrs
622 if cancelled_at_changed? or
627 self.state = Cancelled
628 elsif success == false
630 elsif success == true
631 self.state = Complete
632 elsif running == true
642 if self.state.in?(States)
645 errors.add :state, "#{state.inspect} must be one of: #{States.inspect}"
650 def validate_state_change
652 if self.state_changed?
653 ok = case self.state_was
655 # state isn't set yet
658 # Permit going from queued to any state
661 # From running, may only transition to a finished state
662 [Complete, Failed, Cancelled].include? self.state
663 when Complete, Failed, Cancelled
664 # Once in a finished state, don't permit any more state changes
667 # Any other state transition is also invalid
671 errors.add :state, "invalid change from #{self.state_was} to #{self.state}"
677 def ensure_no_collection_uuids_in_script_params
678 # Fail validation if any script_parameters field includes a string containing a
679 # collection uuid pattern.
680 if self.script_parameters_changed?
681 if recursive_hash_search(self.script_parameters, Collection.uuid_regex)
682 self.errors.add :script_parameters, "must use portable_data_hash instead of collection uuid"
689 # recursive_hash_search searches recursively through hashes and
690 # arrays in 'thing' for string fields matching regular expression
691 # 'pattern'. Returns true if pattern is found, false otherwise.
692 def recursive_hash_search thing, pattern
695 return true if recursive_hash_search v, pattern
697 elsif thing.is_a? Array
699 return true if recursive_hash_search k, pattern
701 elsif thing.is_a? String
702 return true if thing.match pattern