13996: Updating API server to use new config object WIP
[arvados.git] / services / api / app / models / job.rb
1 # Copyright (C) The Arvados Authors. All rights reserved.
2 #
3 # SPDX-License-Identifier: AGPL-3.0
4
5 require 'log_reuse_info'
6 require 'safe_json'
7
8 class Job < ArvadosModel
9   include HasUuid
10   include KindAndEtag
11   include CommonApiTemplate
12   extend CurrentApiClient
13   extend LogReuseInfo
14   serialize :components, Hash
15   serialize :script_parameters, Hash
16   serialize :runtime_constraints, Hash
17   serialize :tasks_summary, Hash
18   before_create :ensure_unique_submit_id
19   after_commit :trigger_crunch_dispatch_if_cancelled, :on => :update
20   before_validation :set_priority
21   before_validation :update_state_from_old_state_attrs
22   before_validation :update_script_parameters_digest
23   validate :ensure_script_version_is_commit
24   validate :find_docker_image_locator
25   validate :find_arvados_sdk_version
26   validate :validate_status
27   validate :validate_state_change
28   validate :ensure_no_collection_uuids_in_script_params
29   before_save :tag_version_in_internal_repository
30   before_save :update_timestamps_when_state_changes
31
32   has_many :commit_ancestors, :foreign_key => :descendant, :primary_key => :script_version
33   has_many(:nodes, foreign_key: :job_uuid, primary_key: :uuid)
34
35   class SubmitIdReused < RequestError
36   end
37
38   api_accessible :user, extend: :common do |t|
39     t.add :submit_id
40     t.add :priority
41     t.add :script
42     t.add :script_parameters
43     t.add :script_version
44     t.add :cancelled_at
45     t.add :cancelled_by_client_uuid
46     t.add :cancelled_by_user_uuid
47     t.add :started_at
48     t.add :finished_at
49     t.add :output
50     t.add :success
51     t.add :running
52     t.add :state
53     t.add :is_locked_by_uuid
54     t.add :log
55     t.add :runtime_constraints
56     t.add :tasks_summary
57     t.add :nondeterministic
58     t.add :repository
59     t.add :supplied_script_version
60     t.add :arvados_sdk_version
61     t.add :docker_image_locator
62     t.add :queue_position
63     t.add :node_uuids
64     t.add :description
65     t.add :components
66   end
67
68   # Supported states for a job
69   States = [
70             (Queued = 'Queued'),
71             (Running = 'Running'),
72             (Cancelled = 'Cancelled'),
73             (Failed = 'Failed'),
74             (Complete = 'Complete'),
75            ]
76
77   after_initialize do
78     @need_crunch_dispatch_trigger = false
79   end
80
81   def self.limit_index_columns_read
82     ["components"]
83   end
84
85   def self.protected_attributes
86     [:arvados_sdk_version, :docker_image_locator]
87   end
88
89   def assert_finished
90     update_attributes(finished_at: finished_at || db_current_time,
91                       success: success.nil? ? false : success,
92                       running: false)
93   end
94
95   def node_uuids
96     nodes.map(&:uuid)
97   end
98
99   def self.queue
100     self.where('state = ?', Queued).order('priority desc, created_at')
101   end
102
103   def queue_position
104     # We used to report this accurately, but the implementation made queue
105     # API requests O(n**2) for the size of the queue.  See #8800.
106     # We've soft-disabled it because it's not clear we even want this
107     # functionality: now that we have Node Manager with support for multiple
108     # node sizes, "queue position" tells you very little about when a job will
109     # run.
110     state == Queued ? 0 : nil
111   end
112
113   def self.running
114     self.where('running = ?', true).
115       order('priority desc, created_at')
116   end
117
118   def lock locked_by_uuid
119     with_lock do
120       unless self.state == Queued and self.is_locked_by_uuid.nil?
121         raise AlreadyLockedError
122       end
123       self.state = Running
124       self.is_locked_by_uuid = locked_by_uuid
125       self.save!
126     end
127   end
128
129   def update_script_parameters_digest
130     self.script_parameters_digest = self.class.sorted_hash_digest(script_parameters)
131   end
132
133   def self.searchable_columns operator
134     super - ["script_parameters_digest"]
135   end
136
137   def self.full_text_searchable_columns
138     super - ["script_parameters_digest"]
139   end
140
141   def self.load_job_specific_filters attrs, orig_filters, read_users
142     # Convert Job-specific @filters entries into general SQL filters.
143     script_info = {"repository" => nil, "script" => nil}
144     git_filters = Hash.new do |hash, key|
145       hash[key] = {"max_version" => "HEAD", "exclude_versions" => []}
146     end
147     filters = []
148     orig_filters.each do |attr, operator, operand|
149       if (script_info.has_key? attr) and (operator == "=")
150         if script_info[attr].nil?
151           script_info[attr] = operand
152         elsif script_info[attr] != operand
153           raise ArgumentError.new("incompatible #{attr} filters")
154         end
155       end
156       case operator
157       when "in git"
158         git_filters[attr]["min_version"] = operand
159       when "not in git"
160         git_filters[attr]["exclude_versions"] += Array.wrap(operand)
161       when "in docker", "not in docker"
162         image_hashes = Array.wrap(operand).flat_map do |search_term|
163           image_search, image_tag = search_term.split(':', 2)
164           Collection.
165             find_all_for_docker_image(image_search, image_tag, read_users, filter_compatible_format: false).
166             map(&:portable_data_hash)
167         end
168         filters << [attr, operator.sub(/ docker$/, ""), image_hashes]
169       else
170         filters << [attr, operator, operand]
171       end
172     end
173
174     # Build a real script_version filter from any "not? in git" filters.
175     git_filters.each_pair do |attr, filter|
176       case attr
177       when "script_version"
178         script_info.each_pair do |key, value|
179           if value.nil?
180             raise ArgumentError.new("script_version filter needs #{key} filter")
181           end
182         end
183         filter["repository"] = script_info["repository"]
184         if attrs[:script_version]
185           filter["max_version"] = attrs[:script_version]
186         else
187           # Using HEAD, set earlier by the hash default, is fine.
188         end
189       when "arvados_sdk_version"
190         filter["repository"] = "arvados"
191       else
192         raise ArgumentError.new("unknown attribute for git filter: #{attr}")
193       end
194       revisions = Commit.find_commit_range(filter["repository"],
195                                            filter["min_version"],
196                                            filter["max_version"],
197                                            filter["exclude_versions"])
198       if revisions.empty?
199         raise ArgumentError.
200           new("error searching #{filter['repository']} from " +
201               "'#{filter['min_version']}' to '#{filter['max_version']}', " +
202               "excluding #{filter['exclude_versions']}")
203       end
204       filters.append([attr, "in", revisions])
205     end
206
207     filters
208   end
209
210   def self.find_reusable attrs, params, filters, read_users
211     if filters.empty?  # Translate older creation parameters into filters.
212       filters =
213         [["repository", "=", attrs[:repository]],
214          ["script", "=", attrs[:script]],
215          ["script_version", "not in git", params[:exclude_script_versions]],
216         ].reject { |filter| filter.last.nil? or filter.last.empty? }
217       if !params[:minimum_script_version].blank?
218         filters << ["script_version", "in git",
219                      params[:minimum_script_version]]
220       else
221         filters += default_git_filters("script_version", attrs[:repository],
222                                        attrs[:script_version])
223       end
224       if image_search = attrs[:runtime_constraints].andand["docker_image"]
225         if image_tag = attrs[:runtime_constraints]["docker_image_tag"]
226           image_search += ":#{image_tag}"
227         end
228         image_locator = Collection.
229           for_latest_docker_image(image_search).andand.portable_data_hash
230       else
231         image_locator = nil
232       end
233       filters << ["docker_image_locator", "=", image_locator]
234       if sdk_version = attrs[:runtime_constraints].andand["arvados_sdk_version"]
235         filters += default_git_filters("arvados_sdk_version", "arvados", sdk_version)
236       end
237       filters = load_job_specific_filters(attrs, filters, read_users)
238     end
239
240     # Check specified filters for some reasonableness.
241     filter_names = filters.map { |f| f.first }.uniq
242     ["repository", "script"].each do |req_filter|
243       if not filter_names.include?(req_filter)
244         return send_error("#{req_filter} filter required")
245       end
246     end
247
248     # Search for a reusable Job, and return it if found.
249     candidates = Job.readable_by(current_user)
250     log_reuse_info { "starting with #{candidates.count} jobs readable by current user #{current_user.uuid}" }
251
252     candidates = candidates.where(
253       'state = ? or (owner_uuid = ? and state in (?))',
254       Job::Complete, current_user.uuid, [Job::Queued, Job::Running])
255     log_reuse_info(candidates) { "after filtering on job state ((state=Complete) or (state=Queued/Running and (submitted by current user)))" }
256
257     digest = Job.sorted_hash_digest(attrs[:script_parameters])
258     candidates = candidates.where('script_parameters_digest = ?', digest)
259     log_reuse_info(candidates) { "after filtering on script_parameters_digest #{digest}" }
260
261     candidates = candidates.where('nondeterministic is distinct from ?', true)
262     log_reuse_info(candidates) { "after filtering on !nondeterministic" }
263
264     # prefer Running jobs over Queued
265     candidates = candidates.order('state desc, created_at')
266
267     candidates = apply_filters candidates, filters
268     log_reuse_info(candidates) { "after filtering on repo, script, and custom filters #{filters.inspect}" }
269
270     chosen = nil
271     chosen_output = nil
272     incomplete_job = nil
273     candidates.each do |j|
274       if j.state != Job::Complete
275         if !incomplete_job
276           # We'll use this if we don't find a job that has completed
277           log_reuse_info { "job #{j.uuid} is reusable, but unfinished; continuing search for completed jobs" }
278           incomplete_job = j
279         else
280           log_reuse_info { "job #{j.uuid} is unfinished and we already have #{incomplete_job.uuid}; ignoring" }
281         end
282       elsif chosen == false
283         # Ignore: we have already decided not to reuse any completed
284         # job.
285         log_reuse_info { "job #{j.uuid} with output #{j.output} ignored, see above" }
286       elsif j.output.nil?
287         log_reuse_info { "job #{j.uuid} has nil output" }
288       elsif j.log.nil?
289         log_reuse_info { "job #{j.uuid} has nil log" }
290       elsif Rails.configuration.Containers["JobsAPI"]["ReuseJobIfOutputsDiffer"]
291         if !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
292           # Ignore: keep looking for an incomplete job or one whose
293           # output is readable.
294           log_reuse_info { "job #{j.uuid} output #{j.output} unavailable to user; continuing search" }
295         elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.log)
296           # Ignore: keep looking for an incomplete job or one whose
297           # log is readable.
298           log_reuse_info { "job #{j.uuid} log #{j.log} unavailable to user; continuing search" }
299         else
300           log_reuse_info { "job #{j.uuid} with output #{j.output} is reusable; decision is final." }
301           return j
302         end
303       elsif chosen_output
304         if chosen_output != j.output
305           # If two matching jobs produced different outputs, run a new
306           # job (or use one that's already running/queued) instead of
307           # choosing one arbitrarily.
308           log_reuse_info { "job #{j.uuid} output #{j.output} disagrees; forgetting about #{chosen.uuid} and ignoring any other finished jobs (see reuse_job_if_outputs_differ in application.default.yml)" }
309           chosen = false
310         else
311           log_reuse_info { "job #{j.uuid} output #{j.output} agrees with chosen #{chosen.uuid}; continuing search in case other candidates have different outputs" }
312         end
313         # ...and that's the only thing we need to do once we've chosen
314         # a job to reuse.
315       elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
316         # This user cannot read the output of this job. Any other
317         # completed job will have either the same output (making it
318         # unusable) or a different output (making it unusable because
319         # reuse_job_if_outputs_different is turned off). Therefore,
320         # any further investigation of reusable jobs is futile.
321         log_reuse_info { "job #{j.uuid} output #{j.output} is unavailable to user; this means no finished job can be reused (see reuse_job_if_outputs_differ in application.default.yml)" }
322         chosen = false
323       elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.log)
324         # This user cannot read the log of this job, don't try to reuse the
325         # job but consider if the output is consistent.
326         log_reuse_info { "job #{j.uuid} log #{j.log} is unavailable to user; continuing search" }
327         chosen_output = j.output
328       else
329         log_reuse_info { "job #{j.uuid} with output #{j.output} can be reused; continuing search in case other candidates have different outputs" }
330         chosen = j
331         chosen_output = j.output
332       end
333     end
334     j = chosen || incomplete_job
335     if j
336       log_reuse_info { "done, #{j.uuid} was selected" }
337     else
338       log_reuse_info { "done, nothing suitable" }
339     end
340     return j
341   end
342
343   def self.default_git_filters(attr_name, repo_name, refspec)
344     # Add a filter to @filters for `attr_name` = the latest commit available
345     # in `repo_name` at `refspec`.  No filter is added if refspec can't be
346     # resolved.
347     commits = Commit.find_commit_range(repo_name, nil, refspec, nil)
348     if commit_hash = commits.first
349       [[attr_name, "=", commit_hash]]
350     else
351       []
352     end
353   end
354
355   def cancel(cascade: false, need_transaction: true)
356     if need_transaction
357       ActiveRecord::Base.transaction do
358         cancel(cascade: cascade, need_transaction: false)
359       end
360       return
361     end
362
363     if self.state.in?([Queued, Running])
364       self.state = Cancelled
365       self.save!
366     elsif self.state != Cancelled
367       raise InvalidStateTransitionError
368     end
369
370     return if !cascade
371
372     # cancel all children; they could be jobs or pipeline instances
373     children = self.components.andand.collect{|_, u| u}.compact
374
375     return if children.empty?
376
377     # cancel any child jobs
378     Job.where(uuid: children, state: [Queued, Running]).each do |job|
379       job.cancel(cascade: cascade, need_transaction: false)
380     end
381
382     # cancel any child pipelines
383     PipelineInstance.where(uuid: children, state: [PipelineInstance::RunningOnServer, PipelineInstance::RunningOnClient]).each do |pi|
384       pi.cancel(cascade: cascade, need_transaction: false)
385     end
386   end
387
388   protected
389
390   def self.sorted_hash_digest h
391     Digest::MD5.hexdigest(Oj.dump(deep_sort_hash(h)))
392   end
393
394   def foreign_key_attributes
395     super + %w(output log)
396   end
397
398   def skip_uuid_read_permission_check
399     super + %w(cancelled_by_client_uuid)
400   end
401
402   def skip_uuid_existence_check
403     super + %w(output log)
404   end
405
406   def set_priority
407     if self.priority.nil?
408       self.priority = 0
409     end
410     true
411   end
412
413   def ensure_script_version_is_commit
414     if state == Running
415       # Apparently client has already decided to go for it. This is
416       # needed to run a local job using a local working directory
417       # instead of a commit-ish.
418       return true
419     end
420     if new_record? or repository_changed? or script_version_changed?
421       sha1 = Commit.find_commit_range(repository,
422                                       nil, script_version, nil).first
423       if not sha1
424         errors.add :script_version, "#{script_version} does not resolve to a commit"
425         return false
426       end
427       if supplied_script_version.nil? or supplied_script_version.empty?
428         self.supplied_script_version = script_version
429       end
430       self.script_version = sha1
431     end
432     true
433   end
434
435   def tag_version_in_internal_repository
436     if state == Running
437       # No point now. See ensure_script_version_is_commit.
438       true
439     elsif errors.any?
440       # Won't be saved, and script_version might not even be valid.
441       true
442     elsif new_record? or repository_changed? or script_version_changed?
443       uuid_was = uuid
444       begin
445         assign_uuid
446         Commit.tag_in_internal_repository repository, script_version, uuid
447       rescue
448         self.uuid = uuid_was
449         raise
450       end
451     end
452   end
453
454   def ensure_unique_submit_id
455     if !submit_id.nil?
456       if Job.where('submit_id=?',self.submit_id).first
457         raise SubmitIdReused.new
458       end
459     end
460     true
461   end
462
463   def resolve_runtime_constraint(key, attr_sym)
464     if ((runtime_constraints.is_a? Hash) and
465         (search = runtime_constraints[key]))
466       ok, result = yield search
467     else
468       ok, result = true, nil
469     end
470     if ok
471       send("#{attr_sym}=".to_sym, result)
472     else
473       errors.add(attr_sym, result)
474     end
475     ok
476   end
477
478   def find_arvados_sdk_version
479     resolve_runtime_constraint("arvados_sdk_version",
480                                :arvados_sdk_version) do |git_search|
481       commits = Commit.find_commit_range("arvados",
482                                          nil, git_search, nil)
483       if commits.empty?
484         [false, "#{git_search} does not resolve to a commit"]
485       elsif not runtime_constraints["docker_image"]
486         [false, "cannot be specified without a Docker image constraint"]
487       else
488         [true, commits.first]
489       end
490     end
491   end
492
493   def find_docker_image_locator
494     if runtime_constraints.is_a? Hash
495       runtime_constraints['docker_image'] ||=
496         Rails.configuration.Containers["JobsAPI"]["DefaultDockerImage"]
497     end
498
499     resolve_runtime_constraint("docker_image",
500                                :docker_image_locator) do |image_search|
501       image_tag = runtime_constraints['docker_image_tag']
502       if coll = Collection.for_latest_docker_image(image_search, image_tag)
503         [true, coll.portable_data_hash]
504       else
505         [false, "not found for #{image_search}"]
506       end
507     end
508   end
509
510   def permission_to_update
511     if is_locked_by_uuid_was and !(current_user and
512                                    (current_user.uuid == is_locked_by_uuid_was or
513                                     current_user.uuid == system_user.uuid))
514       if script_changed? or
515           script_parameters_changed? or
516           script_version_changed? or
517           (!cancelled_at_was.nil? and
518            (cancelled_by_client_uuid_changed? or
519             cancelled_by_user_uuid_changed? or
520             cancelled_at_changed?)) or
521           started_at_changed? or
522           finished_at_changed? or
523           running_changed? or
524           success_changed? or
525           output_changed? or
526           log_changed? or
527           tasks_summary_changed? or
528           (state_changed? && state != Cancelled) or
529           components_changed?
530         logger.warn "User #{current_user.uuid if current_user} tried to change protected job attributes on locked #{self.class.to_s} #{uuid_was}"
531         return false
532       end
533     end
534     if !is_locked_by_uuid_changed?
535       super
536     else
537       if !current_user
538         logger.warn "Anonymous user tried to change lock on #{self.class.to_s} #{uuid_was}"
539         false
540       elsif is_locked_by_uuid_was and is_locked_by_uuid_was != current_user.uuid
541         logger.warn "User #{current_user.uuid} tried to steal lock on #{self.class.to_s} #{uuid_was} from #{is_locked_by_uuid_was}"
542         false
543       elsif !is_locked_by_uuid.nil? and is_locked_by_uuid != current_user.uuid
544         logger.warn "User #{current_user.uuid} tried to lock #{self.class.to_s} #{uuid_was} with uuid #{is_locked_by_uuid}"
545         false
546       else
547         super
548       end
549     end
550   end
551
552   def update_modified_by_fields
553     if self.cancelled_at_changed?
554       # Ensure cancelled_at cannot be set to arbitrary non-now times,
555       # or changed once it is set.
556       if self.cancelled_at and not self.cancelled_at_was
557         self.cancelled_at = db_current_time
558         self.cancelled_by_user_uuid = current_user.uuid
559         self.cancelled_by_client_uuid = current_api_client.andand.uuid
560         @need_crunch_dispatch_trigger = true
561       else
562         self.cancelled_at = self.cancelled_at_was
563         self.cancelled_by_user_uuid = self.cancelled_by_user_uuid_was
564         self.cancelled_by_client_uuid = self.cancelled_by_client_uuid_was
565       end
566     end
567     super
568   end
569
570   def trigger_crunch_dispatch_if_cancelled
571     if @need_crunch_dispatch_trigger
572       File.open(Rails.configuration.Containers["JobsAPI"]["CrunchRefreshTrigger"], 'wb') do
573         # That's all, just create/touch a file for crunch-job to see.
574       end
575     end
576   end
577
578   def update_timestamps_when_state_changes
579     return if not (state_changed? or new_record?)
580
581     case state
582     when Running
583       self.started_at ||= db_current_time
584     when Failed, Complete
585       self.finished_at ||= db_current_time
586     when Cancelled
587       self.cancelled_at ||= db_current_time
588     end
589
590     # TODO: Remove the following case block when old "success" and
591     # "running" attrs go away. Until then, this ensures we still
592     # expose correct success/running flags to older clients, even if
593     # some new clients are writing only the new state attribute.
594     case state
595     when Queued
596       self.running = false
597       self.success = nil
598     when Running
599       self.running = true
600       self.success = nil
601     when Cancelled, Failed
602       self.running = false
603       self.success = false
604     when Complete
605       self.running = false
606       self.success = true
607     end
608     self.running ||= false # Default to false instead of nil.
609
610     @need_crunch_dispatch_trigger = true
611
612     true
613   end
614
615   def update_state_from_old_state_attrs
616     # If a client has touched the legacy state attrs, update the
617     # "state" attr to agree with the updated values of the legacy
618     # attrs.
619     #
620     # TODO: Remove this method when old "success" and "running" attrs
621     # go away.
622     if cancelled_at_changed? or
623         success_changed? or
624         running_changed? or
625         state.nil?
626       if cancelled_at
627         self.state = Cancelled
628       elsif success == false
629         self.state = Failed
630       elsif success == true
631         self.state = Complete
632       elsif running == true
633         self.state = Running
634       else
635         self.state = Queued
636       end
637     end
638     true
639   end
640
641   def validate_status
642     if self.state.in?(States)
643       true
644     else
645       errors.add :state, "#{state.inspect} must be one of: #{States.inspect}"
646       false
647     end
648   end
649
650   def validate_state_change
651     ok = true
652     if self.state_changed?
653       ok = case self.state_was
654            when nil
655              # state isn't set yet
656              true
657            when Queued
658              # Permit going from queued to any state
659              true
660            when Running
661              # From running, may only transition to a finished state
662              [Complete, Failed, Cancelled].include? self.state
663            when Complete, Failed, Cancelled
664              # Once in a finished state, don't permit any more state changes
665              false
666            else
667              # Any other state transition is also invalid
668              false
669            end
670       if not ok
671         errors.add :state, "invalid change from #{self.state_was} to #{self.state}"
672       end
673     end
674     ok
675   end
676
677   def ensure_no_collection_uuids_in_script_params
678     # Fail validation if any script_parameters field includes a string containing a
679     # collection uuid pattern.
680     if self.script_parameters_changed?
681       if recursive_hash_search(self.script_parameters, Collection.uuid_regex)
682         self.errors.add :script_parameters, "must use portable_data_hash instead of collection uuid"
683         return false
684       end
685     end
686     true
687   end
688
689   # recursive_hash_search searches recursively through hashes and
690   # arrays in 'thing' for string fields matching regular expression
691   # 'pattern'.  Returns true if pattern is found, false otherwise.
692   def recursive_hash_search thing, pattern
693     if thing.is_a? Hash
694       thing.each do |k, v|
695         return true if recursive_hash_search v, pattern
696       end
697     elsif thing.is_a? Array
698       thing.each do |k|
699         return true if recursive_hash_search k, pattern
700       end
701     elsif thing.is_a? String
702       return true if thing.match pattern
703     end
704     false
705   end
706 end