Merge branch '8784-dir-listings'
[arvados.git] / services / api / app / models / job.rb
1 # Copyright (C) The Arvados Authors. All rights reserved.
2 #
3 # SPDX-License-Identifier: AGPL-3.0
4
5 require 'log_reuse_info'
6 require 'safe_json'
7
8 class Job < ArvadosModel
9   include HasUuid
10   include KindAndEtag
11   include CommonApiTemplate
12   extend CurrentApiClient
13   extend LogReuseInfo
14   serialize :components, Hash
15   attr_protected :arvados_sdk_version, :docker_image_locator
16   serialize :script_parameters, Hash
17   serialize :runtime_constraints, Hash
18   serialize :tasks_summary, Hash
19   before_create :ensure_unique_submit_id
20   after_commit :trigger_crunch_dispatch_if_cancelled, :on => :update
21   before_validation :set_priority
22   before_validation :update_state_from_old_state_attrs
23   before_validation :update_script_parameters_digest
24   validate :ensure_script_version_is_commit
25   validate :find_docker_image_locator
26   validate :find_arvados_sdk_version
27   validate :validate_status
28   validate :validate_state_change
29   validate :ensure_no_collection_uuids_in_script_params
30   before_save :tag_version_in_internal_repository
31   before_save :update_timestamps_when_state_changes
32
33   has_many :commit_ancestors, :foreign_key => :descendant, :primary_key => :script_version
34   has_many(:nodes, foreign_key: :job_uuid, primary_key: :uuid)
35
36   class SubmitIdReused < StandardError
37   end
38
39   api_accessible :user, extend: :common do |t|
40     t.add :submit_id
41     t.add :priority
42     t.add :script
43     t.add :script_parameters
44     t.add :script_version
45     t.add :cancelled_at
46     t.add :cancelled_by_client_uuid
47     t.add :cancelled_by_user_uuid
48     t.add :started_at
49     t.add :finished_at
50     t.add :output
51     t.add :success
52     t.add :running
53     t.add :state
54     t.add :is_locked_by_uuid
55     t.add :log
56     t.add :runtime_constraints
57     t.add :tasks_summary
58     t.add :nondeterministic
59     t.add :repository
60     t.add :supplied_script_version
61     t.add :arvados_sdk_version
62     t.add :docker_image_locator
63     t.add :queue_position
64     t.add :node_uuids
65     t.add :description
66     t.add :components
67   end
68
69   # Supported states for a job
70   States = [
71             (Queued = 'Queued'),
72             (Running = 'Running'),
73             (Cancelled = 'Cancelled'),
74             (Failed = 'Failed'),
75             (Complete = 'Complete'),
76            ]
77
78   after_initialize do
79     @need_crunch_dispatch_trigger = false
80   end
81
82   def self.limit_index_columns_read
83     ["components"]
84   end
85
86   def assert_finished
87     update_attributes(finished_at: finished_at || db_current_time,
88                       success: success.nil? ? false : success,
89                       running: false)
90   end
91
92   def node_uuids
93     nodes.map(&:uuid)
94   end
95
96   def self.queue
97     self.where('state = ?', Queued).order('priority desc, created_at')
98   end
99
100   def queue_position
101     # We used to report this accurately, but the implementation made queue
102     # API requests O(n**2) for the size of the queue.  See #8800.
103     # We've soft-disabled it because it's not clear we even want this
104     # functionality: now that we have Node Manager with support for multiple
105     # node sizes, "queue position" tells you very little about when a job will
106     # run.
107     state == Queued ? 0 : nil
108   end
109
110   def self.running
111     self.where('running = ?', true).
112       order('priority desc, created_at')
113   end
114
115   def lock locked_by_uuid
116     with_lock do
117       unless self.state == Queued and self.is_locked_by_uuid.nil?
118         raise AlreadyLockedError
119       end
120       self.state = Running
121       self.is_locked_by_uuid = locked_by_uuid
122       self.save!
123     end
124   end
125
126   def update_script_parameters_digest
127     self.script_parameters_digest = self.class.sorted_hash_digest(script_parameters)
128   end
129
130   def self.searchable_columns operator
131     super - ["script_parameters_digest"]
132   end
133
134   def self.full_text_searchable_columns
135     super - ["script_parameters_digest"]
136   end
137
138   def self.load_job_specific_filters attrs, orig_filters, read_users
139     # Convert Job-specific @filters entries into general SQL filters.
140     script_info = {"repository" => nil, "script" => nil}
141     git_filters = Hash.new do |hash, key|
142       hash[key] = {"max_version" => "HEAD", "exclude_versions" => []}
143     end
144     filters = []
145     orig_filters.each do |attr, operator, operand|
146       if (script_info.has_key? attr) and (operator == "=")
147         if script_info[attr].nil?
148           script_info[attr] = operand
149         elsif script_info[attr] != operand
150           raise ArgumentError.new("incompatible #{attr} filters")
151         end
152       end
153       case operator
154       when "in git"
155         git_filters[attr]["min_version"] = operand
156       when "not in git"
157         git_filters[attr]["exclude_versions"] += Array.wrap(operand)
158       when "in docker", "not in docker"
159         image_hashes = Array.wrap(operand).flat_map do |search_term|
160           image_search, image_tag = search_term.split(':', 2)
161           Collection.
162             find_all_for_docker_image(image_search, image_tag, read_users, filter_compatible_format: false).
163             map(&:portable_data_hash)
164         end
165         filters << [attr, operator.sub(/ docker$/, ""), image_hashes]
166       else
167         filters << [attr, operator, operand]
168       end
169     end
170
171     # Build a real script_version filter from any "not? in git" filters.
172     git_filters.each_pair do |attr, filter|
173       case attr
174       when "script_version"
175         script_info.each_pair do |key, value|
176           if value.nil?
177             raise ArgumentError.new("script_version filter needs #{key} filter")
178           end
179         end
180         filter["repository"] = script_info["repository"]
181         if attrs[:script_version]
182           filter["max_version"] = attrs[:script_version]
183         else
184           # Using HEAD, set earlier by the hash default, is fine.
185         end
186       when "arvados_sdk_version"
187         filter["repository"] = "arvados"
188       else
189         raise ArgumentError.new("unknown attribute for git filter: #{attr}")
190       end
191       revisions = Commit.find_commit_range(filter["repository"],
192                                            filter["min_version"],
193                                            filter["max_version"],
194                                            filter["exclude_versions"])
195       if revisions.empty?
196         raise ArgumentError.
197           new("error searching #{filter['repository']} from " +
198               "'#{filter['min_version']}' to '#{filter['max_version']}', " +
199               "excluding #{filter['exclude_versions']}")
200       end
201       filters.append([attr, "in", revisions])
202     end
203
204     filters
205   end
206
207   def self.find_reusable attrs, params, filters, read_users
208     if filters.empty?  # Translate older creation parameters into filters.
209       filters =
210         [["repository", "=", attrs[:repository]],
211          ["script", "=", attrs[:script]],
212          ["script_version", "not in git", params[:exclude_script_versions]],
213         ].reject { |filter| filter.last.nil? or filter.last.empty? }
214       if !params[:minimum_script_version].blank?
215         filters << ["script_version", "in git",
216                      params[:minimum_script_version]]
217       else
218         filters += default_git_filters("script_version", attrs[:repository],
219                                        attrs[:script_version])
220       end
221       if image_search = attrs[:runtime_constraints].andand["docker_image"]
222         if image_tag = attrs[:runtime_constraints]["docker_image_tag"]
223           image_search += ":#{image_tag}"
224         end
225         image_locator = Collection.
226           for_latest_docker_image(image_search).andand.portable_data_hash
227       else
228         image_locator = nil
229       end
230       filters << ["docker_image_locator", "=", image_locator]
231       if sdk_version = attrs[:runtime_constraints].andand["arvados_sdk_version"]
232         filters += default_git_filters("arvados_sdk_version", "arvados", sdk_version)
233       end
234       filters = load_job_specific_filters(attrs, filters, read_users)
235     end
236
237     # Check specified filters for some reasonableness.
238     filter_names = filters.map { |f| f.first }.uniq
239     ["repository", "script"].each do |req_filter|
240       if not filter_names.include?(req_filter)
241         return send_error("#{req_filter} filter required")
242       end
243     end
244
245     # Search for a reusable Job, and return it if found.
246     candidates = Job.readable_by(current_user)
247     log_reuse_info { "starting with #{candidates.count} jobs readable by current user #{current_user.uuid}" }
248
249     candidates = candidates.where(
250       'state = ? or (owner_uuid = ? and state in (?))',
251       Job::Complete, current_user.uuid, [Job::Queued, Job::Running])
252     log_reuse_info(candidates) { "after filtering on job state ((state=Complete) or (state=Queued/Running and (submitted by current user)))" }
253
254     digest = Job.sorted_hash_digest(attrs[:script_parameters])
255     candidates = candidates.where('script_parameters_digest = ?', digest)
256     log_reuse_info(candidates) { "after filtering on script_parameters_digest #{digest}" }
257
258     candidates = candidates.where('nondeterministic is distinct from ?', true)
259     log_reuse_info(candidates) { "after filtering on !nondeterministic" }
260
261     # prefer Running jobs over Queued
262     candidates = candidates.order('state desc, created_at')
263
264     candidates = apply_filters candidates, filters
265     log_reuse_info(candidates) { "after filtering on repo, script, and custom filters #{filters.inspect}" }
266
267     chosen = nil
268     chosen_output = nil
269     incomplete_job = nil
270     candidates.each do |j|
271       if j.state != Job::Complete
272         if !incomplete_job
273           # We'll use this if we don't find a job that has completed
274           log_reuse_info { "job #{j.uuid} is reusable, but unfinished; continuing search for completed jobs" }
275           incomplete_job = j
276         else
277           log_reuse_info { "job #{j.uuid} is unfinished and we already have #{incomplete_job.uuid}; ignoring" }
278         end
279       elsif chosen == false
280         # Ignore: we have already decided not to reuse any completed
281         # job.
282         log_reuse_info { "job #{j.uuid} with output #{j.output} ignored, see above" }
283       elsif j.output.nil?
284         log_reuse_info { "job #{j.uuid} has nil output" }
285       elsif j.log.nil?
286         log_reuse_info { "job #{j.uuid} has nil log" }
287       elsif Rails.configuration.reuse_job_if_outputs_differ
288         if !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
289           # Ignore: keep looking for an incomplete job or one whose
290           # output is readable.
291           log_reuse_info { "job #{j.uuid} output #{j.output} unavailable to user; continuing search" }
292         elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.log)
293           # Ignore: keep looking for an incomplete job or one whose
294           # log is readable.
295           log_reuse_info { "job #{j.uuid} log #{j.log} unavailable to user; continuing search" }
296         else
297           log_reuse_info { "job #{j.uuid} with output #{j.output} is reusable; decision is final." }
298           return j
299         end
300       elsif chosen_output
301         if chosen_output != j.output
302           # If two matching jobs produced different outputs, run a new
303           # job (or use one that's already running/queued) instead of
304           # choosing one arbitrarily.
305           log_reuse_info { "job #{j.uuid} output #{j.output} disagrees; forgetting about #{chosen.uuid} and ignoring any other finished jobs (see reuse_job_if_outputs_differ in application.default.yml)" }
306           chosen = false
307         else
308           log_reuse_info { "job #{j.uuid} output #{j.output} agrees with chosen #{chosen.uuid}; continuing search in case other candidates have different outputs" }
309         end
310         # ...and that's the only thing we need to do once we've chosen
311         # a job to reuse.
312       elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
313         # This user cannot read the output of this job. Any other
314         # completed job will have either the same output (making it
315         # unusable) or a different output (making it unusable because
316         # reuse_job_if_outputs_different is turned off). Therefore,
317         # any further investigation of reusable jobs is futile.
318         log_reuse_info { "job #{j.uuid} output #{j.output} is unavailable to user; this means no finished job can be reused (see reuse_job_if_outputs_differ in application.default.yml)" }
319         chosen = false
320       elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.log)
321         # This user cannot read the log of this job, don't try to reuse the
322         # job but consider if the output is consistent.
323         log_reuse_info { "job #{j.uuid} log #{j.log} is unavailable to user; continuing search" }
324         chosen_output = j.output
325       else
326         log_reuse_info { "job #{j.uuid} with output #{j.output} can be reused; continuing search in case other candidates have different outputs" }
327         chosen = j
328         chosen_output = j.output
329       end
330     end
331     j = chosen || incomplete_job
332     if j
333       log_reuse_info { "done, #{j.uuid} was selected" }
334     else
335       log_reuse_info { "done, nothing suitable" }
336     end
337     return j
338   end
339
340   def self.default_git_filters(attr_name, repo_name, refspec)
341     # Add a filter to @filters for `attr_name` = the latest commit available
342     # in `repo_name` at `refspec`.  No filter is added if refspec can't be
343     # resolved.
344     commits = Commit.find_commit_range(repo_name, nil, refspec, nil)
345     if commit_hash = commits.first
346       [[attr_name, "=", commit_hash]]
347     else
348       []
349     end
350   end
351
352   def cancel(cascade: false, need_transaction: true)
353     if need_transaction
354       ActiveRecord::Base.transaction do
355         cancel(cascade: cascade, need_transaction: false)
356       end
357       return
358     end
359
360     if self.state.in?([Queued, Running])
361       self.state = Cancelled
362       self.save!
363     elsif self.state != Cancelled
364       raise InvalidStateTransitionError
365     end
366
367     return if !cascade
368
369     # cancel all children; they could be jobs or pipeline instances
370     children = self.components.andand.collect{|_, u| u}.compact
371
372     return if children.empty?
373
374     # cancel any child jobs
375     Job.where(uuid: children, state: [Queued, Running]).each do |job|
376       job.cancel(cascade: cascade, need_transaction: false)
377     end
378
379     # cancel any child pipelines
380     PipelineInstance.where(uuid: children, state: [PipelineInstance::RunningOnServer, PipelineInstance::RunningOnClient]).each do |pi|
381       pi.cancel(cascade: cascade, need_transaction: false)
382     end
383   end
384
385   protected
386
387   def self.sorted_hash_digest h
388     Digest::MD5.hexdigest(Oj.dump(deep_sort_hash(h)))
389   end
390
391   def foreign_key_attributes
392     super + %w(output log)
393   end
394
395   def skip_uuid_read_permission_check
396     super + %w(cancelled_by_client_uuid)
397   end
398
399   def skip_uuid_existence_check
400     super + %w(output log)
401   end
402
403   def set_priority
404     if self.priority.nil?
405       self.priority = 0
406     end
407     true
408   end
409
410   def ensure_script_version_is_commit
411     if state == Running
412       # Apparently client has already decided to go for it. This is
413       # needed to run a local job using a local working directory
414       # instead of a commit-ish.
415       return true
416     end
417     if new_record? or repository_changed? or script_version_changed?
418       sha1 = Commit.find_commit_range(repository,
419                                       nil, script_version, nil).first
420       if not sha1
421         errors.add :script_version, "#{script_version} does not resolve to a commit"
422         return false
423       end
424       if supplied_script_version.nil? or supplied_script_version.empty?
425         self.supplied_script_version = script_version
426       end
427       self.script_version = sha1
428     end
429     true
430   end
431
432   def tag_version_in_internal_repository
433     if state == Running
434       # No point now. See ensure_script_version_is_commit.
435       true
436     elsif errors.any?
437       # Won't be saved, and script_version might not even be valid.
438       true
439     elsif new_record? or repository_changed? or script_version_changed?
440       uuid_was = uuid
441       begin
442         assign_uuid
443         Commit.tag_in_internal_repository repository, script_version, uuid
444       rescue
445         self.uuid = uuid_was
446         raise
447       end
448     end
449   end
450
451   def ensure_unique_submit_id
452     if !submit_id.nil?
453       if Job.where('submit_id=?',self.submit_id).first
454         raise SubmitIdReused.new
455       end
456     end
457     true
458   end
459
460   def resolve_runtime_constraint(key, attr_sym)
461     if ((runtime_constraints.is_a? Hash) and
462         (search = runtime_constraints[key]))
463       ok, result = yield search
464     else
465       ok, result = true, nil
466     end
467     if ok
468       send("#{attr_sym}=".to_sym, result)
469     else
470       errors.add(attr_sym, result)
471     end
472     ok
473   end
474
475   def find_arvados_sdk_version
476     resolve_runtime_constraint("arvados_sdk_version",
477                                :arvados_sdk_version) do |git_search|
478       commits = Commit.find_commit_range("arvados",
479                                          nil, git_search, nil)
480       if commits.empty?
481         [false, "#{git_search} does not resolve to a commit"]
482       elsif not runtime_constraints["docker_image"]
483         [false, "cannot be specified without a Docker image constraint"]
484       else
485         [true, commits.first]
486       end
487     end
488   end
489
490   def find_docker_image_locator
491     if runtime_constraints.is_a? Hash
492       runtime_constraints['docker_image'] ||=
493         Rails.configuration.default_docker_image_for_jobs
494     end
495
496     resolve_runtime_constraint("docker_image",
497                                :docker_image_locator) do |image_search|
498       image_tag = runtime_constraints['docker_image_tag']
499       if coll = Collection.for_latest_docker_image(image_search, image_tag)
500         [true, coll.portable_data_hash]
501       else
502         [false, "not found for #{image_search}"]
503       end
504     end
505   end
506
507   def permission_to_update
508     if is_locked_by_uuid_was and !(current_user and
509                                    (current_user.uuid == is_locked_by_uuid_was or
510                                     current_user.uuid == system_user.uuid))
511       if script_changed? or
512           script_parameters_changed? or
513           script_version_changed? or
514           (!cancelled_at_was.nil? and
515            (cancelled_by_client_uuid_changed? or
516             cancelled_by_user_uuid_changed? or
517             cancelled_at_changed?)) or
518           started_at_changed? or
519           finished_at_changed? or
520           running_changed? or
521           success_changed? or
522           output_changed? or
523           log_changed? or
524           tasks_summary_changed? or
525           (state_changed? && state != Cancelled) or
526           components_changed?
527         logger.warn "User #{current_user.uuid if current_user} tried to change protected job attributes on locked #{self.class.to_s} #{uuid_was}"
528         return false
529       end
530     end
531     if !is_locked_by_uuid_changed?
532       super
533     else
534       if !current_user
535         logger.warn "Anonymous user tried to change lock on #{self.class.to_s} #{uuid_was}"
536         false
537       elsif is_locked_by_uuid_was and is_locked_by_uuid_was != current_user.uuid
538         logger.warn "User #{current_user.uuid} tried to steal lock on #{self.class.to_s} #{uuid_was} from #{is_locked_by_uuid_was}"
539         false
540       elsif !is_locked_by_uuid.nil? and is_locked_by_uuid != current_user.uuid
541         logger.warn "User #{current_user.uuid} tried to lock #{self.class.to_s} #{uuid_was} with uuid #{is_locked_by_uuid}"
542         false
543       else
544         super
545       end
546     end
547   end
548
549   def update_modified_by_fields
550     if self.cancelled_at_changed?
551       # Ensure cancelled_at cannot be set to arbitrary non-now times,
552       # or changed once it is set.
553       if self.cancelled_at and not self.cancelled_at_was
554         self.cancelled_at = db_current_time
555         self.cancelled_by_user_uuid = current_user.uuid
556         self.cancelled_by_client_uuid = current_api_client.andand.uuid
557         @need_crunch_dispatch_trigger = true
558       else
559         self.cancelled_at = self.cancelled_at_was
560         self.cancelled_by_user_uuid = self.cancelled_by_user_uuid_was
561         self.cancelled_by_client_uuid = self.cancelled_by_client_uuid_was
562       end
563     end
564     super
565   end
566
567   def trigger_crunch_dispatch_if_cancelled
568     if @need_crunch_dispatch_trigger
569       File.open(Rails.configuration.crunch_refresh_trigger, 'wb') do
570         # That's all, just create/touch a file for crunch-job to see.
571       end
572     end
573   end
574
575   def update_timestamps_when_state_changes
576     return if not (state_changed? or new_record?)
577
578     case state
579     when Running
580       self.started_at ||= db_current_time
581     when Failed, Complete
582       self.finished_at ||= db_current_time
583     when Cancelled
584       self.cancelled_at ||= db_current_time
585     end
586
587     # TODO: Remove the following case block when old "success" and
588     # "running" attrs go away. Until then, this ensures we still
589     # expose correct success/running flags to older clients, even if
590     # some new clients are writing only the new state attribute.
591     case state
592     when Queued
593       self.running = false
594       self.success = nil
595     when Running
596       self.running = true
597       self.success = nil
598     when Cancelled, Failed
599       self.running = false
600       self.success = false
601     when Complete
602       self.running = false
603       self.success = true
604     end
605     self.running ||= false # Default to false instead of nil.
606
607     @need_crunch_dispatch_trigger = true
608
609     true
610   end
611
612   def update_state_from_old_state_attrs
613     # If a client has touched the legacy state attrs, update the
614     # "state" attr to agree with the updated values of the legacy
615     # attrs.
616     #
617     # TODO: Remove this method when old "success" and "running" attrs
618     # go away.
619     if cancelled_at_changed? or
620         success_changed? or
621         running_changed? or
622         state.nil?
623       if cancelled_at
624         self.state = Cancelled
625       elsif success == false
626         self.state = Failed
627       elsif success == true
628         self.state = Complete
629       elsif running == true
630         self.state = Running
631       else
632         self.state = Queued
633       end
634     end
635     true
636   end
637
638   def validate_status
639     if self.state.in?(States)
640       true
641     else
642       errors.add :state, "#{state.inspect} must be one of: #{States.inspect}"
643       false
644     end
645   end
646
647   def validate_state_change
648     ok = true
649     if self.state_changed?
650       ok = case self.state_was
651            when nil
652              # state isn't set yet
653              true
654            when Queued
655              # Permit going from queued to any state
656              true
657            when Running
658              # From running, may only transition to a finished state
659              [Complete, Failed, Cancelled].include? self.state
660            when Complete, Failed, Cancelled
661              # Once in a finished state, don't permit any more state changes
662              false
663            else
664              # Any other state transition is also invalid
665              false
666            end
667       if not ok
668         errors.add :state, "invalid change from #{self.state_was} to #{self.state}"
669       end
670     end
671     ok
672   end
673
674   def ensure_no_collection_uuids_in_script_params
675     # Fail validation if any script_parameters field includes a string containing a
676     # collection uuid pattern.
677     if self.script_parameters_changed?
678       if recursive_hash_search(self.script_parameters, Collection.uuid_regex)
679         self.errors.add :script_parameters, "must use portable_data_hash instead of collection uuid"
680         return false
681       end
682     end
683     true
684   end
685
686   # recursive_hash_search searches recursively through hashes and
687   # arrays in 'thing' for string fields matching regular expression
688   # 'pattern'.  Returns true if pattern is found, false otherwise.
689   def recursive_hash_search thing, pattern
690     if thing.is_a? Hash
691       thing.each do |k, v|
692         return true if recursive_hash_search v, pattern
693       end
694     elsif thing.is_a? Array
695       thing.each do |k|
696         return true if recursive_hash_search k, pattern
697       end
698     elsif thing.is_a? String
699       return true if thing.match pattern
700     end
701     false
702   end
703 end