11642: Jobs where log is unreadable (but not null) are not offered for reuse
[arvados.git] / services / api / app / models / job.rb
1 require 'log_reuse_info'
2 require 'safe_json'
3
4 class Job < ArvadosModel
5   include HasUuid
6   include KindAndEtag
7   include CommonApiTemplate
8   extend CurrentApiClient
9   extend LogReuseInfo
10   serialize :components, Hash
11   attr_protected :arvados_sdk_version, :docker_image_locator
12   serialize :script_parameters, Hash
13   serialize :runtime_constraints, Hash
14   serialize :tasks_summary, Hash
15   before_create :ensure_unique_submit_id
16   after_commit :trigger_crunch_dispatch_if_cancelled, :on => :update
17   before_validation :set_priority
18   before_validation :update_state_from_old_state_attrs
19   before_validation :update_script_parameters_digest
20   validate :ensure_script_version_is_commit
21   validate :find_docker_image_locator
22   validate :find_arvados_sdk_version
23   validate :validate_status
24   validate :validate_state_change
25   validate :ensure_no_collection_uuids_in_script_params
26   before_save :tag_version_in_internal_repository
27   before_save :update_timestamps_when_state_changes
28
29   has_many :commit_ancestors, :foreign_key => :descendant, :primary_key => :script_version
30   has_many(:nodes, foreign_key: :job_uuid, primary_key: :uuid)
31
32   class SubmitIdReused < StandardError
33   end
34
35   api_accessible :user, extend: :common do |t|
36     t.add :submit_id
37     t.add :priority
38     t.add :script
39     t.add :script_parameters
40     t.add :script_version
41     t.add :cancelled_at
42     t.add :cancelled_by_client_uuid
43     t.add :cancelled_by_user_uuid
44     t.add :started_at
45     t.add :finished_at
46     t.add :output
47     t.add :success
48     t.add :running
49     t.add :state
50     t.add :is_locked_by_uuid
51     t.add :log
52     t.add :runtime_constraints
53     t.add :tasks_summary
54     t.add :nondeterministic
55     t.add :repository
56     t.add :supplied_script_version
57     t.add :arvados_sdk_version
58     t.add :docker_image_locator
59     t.add :queue_position
60     t.add :node_uuids
61     t.add :description
62     t.add :components
63   end
64
65   # Supported states for a job
66   States = [
67             (Queued = 'Queued'),
68             (Running = 'Running'),
69             (Cancelled = 'Cancelled'),
70             (Failed = 'Failed'),
71             (Complete = 'Complete'),
72            ]
73
74   after_initialize do
75     @need_crunch_dispatch_trigger = false
76   end
77
78   def self.limit_index_columns_read
79     ["components"]
80   end
81
82   def assert_finished
83     update_attributes(finished_at: finished_at || db_current_time,
84                       success: success.nil? ? false : success,
85                       running: false)
86   end
87
88   def node_uuids
89     nodes.map(&:uuid)
90   end
91
92   def self.queue
93     self.where('state = ?', Queued).order('priority desc, created_at')
94   end
95
96   def queue_position
97     # We used to report this accurately, but the implementation made queue
98     # API requests O(n**2) for the size of the queue.  See #8800.
99     # We've soft-disabled it because it's not clear we even want this
100     # functionality: now that we have Node Manager with support for multiple
101     # node sizes, "queue position" tells you very little about when a job will
102     # run.
103     state == Queued ? 0 : nil
104   end
105
106   def self.running
107     self.where('running = ?', true).
108       order('priority desc, created_at')
109   end
110
111   def lock locked_by_uuid
112     with_lock do
113       unless self.state == Queued and self.is_locked_by_uuid.nil?
114         raise AlreadyLockedError
115       end
116       self.state = Running
117       self.is_locked_by_uuid = locked_by_uuid
118       self.save!
119     end
120   end
121
122   def update_script_parameters_digest
123     self.script_parameters_digest = self.class.sorted_hash_digest(script_parameters)
124   end
125
126   def self.searchable_columns operator
127     super - ["script_parameters_digest"]
128   end
129
130   def self.full_text_searchable_columns
131     super - ["script_parameters_digest"]
132   end
133
134   def self.load_job_specific_filters attrs, orig_filters, read_users
135     # Convert Job-specific @filters entries into general SQL filters.
136     script_info = {"repository" => nil, "script" => nil}
137     git_filters = Hash.new do |hash, key|
138       hash[key] = {"max_version" => "HEAD", "exclude_versions" => []}
139     end
140     filters = []
141     orig_filters.each do |attr, operator, operand|
142       if (script_info.has_key? attr) and (operator == "=")
143         if script_info[attr].nil?
144           script_info[attr] = operand
145         elsif script_info[attr] != operand
146           raise ArgumentError.new("incompatible #{attr} filters")
147         end
148       end
149       case operator
150       when "in git"
151         git_filters[attr]["min_version"] = operand
152       when "not in git"
153         git_filters[attr]["exclude_versions"] += Array.wrap(operand)
154       when "in docker", "not in docker"
155         image_hashes = Array.wrap(operand).flat_map do |search_term|
156           image_search, image_tag = search_term.split(':', 2)
157           Collection.
158             find_all_for_docker_image(image_search, image_tag, read_users, filter_compatible_format: false).
159             map(&:portable_data_hash)
160         end
161         filters << [attr, operator.sub(/ docker$/, ""), image_hashes]
162       else
163         filters << [attr, operator, operand]
164       end
165     end
166
167     # Build a real script_version filter from any "not? in git" filters.
168     git_filters.each_pair do |attr, filter|
169       case attr
170       when "script_version"
171         script_info.each_pair do |key, value|
172           if value.nil?
173             raise ArgumentError.new("script_version filter needs #{key} filter")
174           end
175         end
176         filter["repository"] = script_info["repository"]
177         if attrs[:script_version]
178           filter["max_version"] = attrs[:script_version]
179         else
180           # Using HEAD, set earlier by the hash default, is fine.
181         end
182       when "arvados_sdk_version"
183         filter["repository"] = "arvados"
184       else
185         raise ArgumentError.new("unknown attribute for git filter: #{attr}")
186       end
187       revisions = Commit.find_commit_range(filter["repository"],
188                                            filter["min_version"],
189                                            filter["max_version"],
190                                            filter["exclude_versions"])
191       if revisions.empty?
192         raise ArgumentError.
193           new("error searching #{filter['repository']} from " +
194               "'#{filter['min_version']}' to '#{filter['max_version']}', " +
195               "excluding #{filter['exclude_versions']}")
196       end
197       filters.append([attr, "in", revisions])
198     end
199
200     filters
201   end
202
203   def self.find_reusable attrs, params, filters, read_users
204     if filters.empty?  # Translate older creation parameters into filters.
205       filters =
206         [["repository", "=", attrs[:repository]],
207          ["script", "=", attrs[:script]],
208          ["script_version", "not in git", params[:exclude_script_versions]],
209         ].reject { |filter| filter.last.nil? or filter.last.empty? }
210       if !params[:minimum_script_version].blank?
211         filters << ["script_version", "in git",
212                      params[:minimum_script_version]]
213       else
214         filters += default_git_filters("script_version", attrs[:repository],
215                                        attrs[:script_version])
216       end
217       if image_search = attrs[:runtime_constraints].andand["docker_image"]
218         if image_tag = attrs[:runtime_constraints]["docker_image_tag"]
219           image_search += ":#{image_tag}"
220         end
221         image_locator = Collection.
222           for_latest_docker_image(image_search).andand.portable_data_hash
223       else
224         image_locator = nil
225       end
226       filters << ["docker_image_locator", "=", image_locator]
227       if sdk_version = attrs[:runtime_constraints].andand["arvados_sdk_version"]
228         filters += default_git_filters("arvados_sdk_version", "arvados", sdk_version)
229       end
230       filters = load_job_specific_filters(attrs, filters, read_users)
231     end
232
233     # Check specified filters for some reasonableness.
234     filter_names = filters.map { |f| f.first }.uniq
235     ["repository", "script"].each do |req_filter|
236       if not filter_names.include?(req_filter)
237         return send_error("#{req_filter} filter required")
238       end
239     end
240
241     # Search for a reusable Job, and return it if found.
242     candidates = Job.readable_by(current_user)
243     log_reuse_info { "starting with #{candidates.count} jobs readable by current user #{current_user.uuid}" }
244
245     candidates = candidates.where(
246       'state = ? or (owner_uuid = ? and state in (?))',
247       Job::Complete, current_user.uuid, [Job::Queued, Job::Running])
248     log_reuse_info(candidates) { "after filtering on job state ((state=Complete) or (state=Queued/Running and (submitted by current user)))" }
249
250     digest = Job.sorted_hash_digest(attrs[:script_parameters])
251     candidates = candidates.where('script_parameters_digest = ?', digest)
252     log_reuse_info(candidates) { "after filtering on script_parameters_digest #{digest}" }
253
254     candidates = candidates.where('nondeterministic is distinct from ?', true)
255     log_reuse_info(candidates) { "after filtering on !nondeterministic" }
256
257     # prefer Running jobs over Queued
258     candidates = candidates.order('state desc, created_at')
259
260     candidates = apply_filters candidates, filters
261     log_reuse_info(candidates) { "after filtering on repo, script, and custom filters #{filters.inspect}" }
262
263     chosen = nil
264     chosen_output = nil
265     incomplete_job = nil
266     candidates.each do |j|
267       if j.state != Job::Complete
268         if !incomplete_job
269           # We'll use this if we don't find a job that has completed
270           log_reuse_info { "job #{j.uuid} is reusable, but unfinished; continuing search for completed jobs" }
271           incomplete_job = j
272         else
273           log_reuse_info { "job #{j.uuid} is unfinished and we already have #{incomplete_job.uuid}; ignoring" }
274         end
275       elsif chosen == false
276         # Ignore: we have already decided not to reuse any completed
277         # job.
278         log_reuse_info { "job #{j.uuid} with output #{j.output} ignored, see above" }
279       elsif j.output.nil?
280         log_reuse_info { "job #{j.uuid} has nil output" }
281       elsif j.log.nil?
282         log_reuse_info { "job #{j.uuid} has nil log" }
283       elsif Rails.configuration.reuse_job_if_outputs_differ
284         if !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
285           # Ignore: keep looking for an incomplete job or one whose
286           # output is readable.
287           log_reuse_info { "job #{j.uuid} output #{j.output} unavailable to user; continuing search" }
288         elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.log)
289           # Ignore: keep looking for an incomplete job or one whose
290           # log is readable.
291           log_reuse_info { "job #{j.uuid} log #{j.log} unavailable to user; continuing search" }
292         else
293           log_reuse_info { "job #{j.uuid} with output #{j.output} is reusable; decision is final." }
294           return j
295         end
296       elsif chosen_output
297         if chosen_output != j.output
298           # If two matching jobs produced different outputs, run a new
299           # job (or use one that's already running/queued) instead of
300           # choosing one arbitrarily.
301           log_reuse_info { "job #{j.uuid} output #{j.output} disagrees; forgetting about #{chosen.uuid} and ignoring any other finished jobs (see reuse_job_if_outputs_differ in application.default.yml)" }
302           chosen = false
303         else
304           log_reuse_info { "job #{j.uuid} output #{j.output} agrees with chosen #{chosen.uuid}; continuing search in case other candidates have different outputs" }
305         end
306         # ...and that's the only thing we need to do once we've chosen
307         # a job to reuse.
308       elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
309         # This user cannot read the output of this job. Any other
310         # completed job will have either the same output (making it
311         # unusable) or a different output (making it unusable because
312         # reuse_job_if_outputs_different is turned off). Therefore,
313         # any further investigation of reusable jobs is futile.
314         log_reuse_info { "job #{j.uuid} output #{j.output} is unavailable to user; this means no finished job can be reused (see reuse_job_if_outputs_differ in application.default.yml)" }
315         chosen = false
316       elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.log)
317         # This user cannot read the log of this job, don't try to reuse the
318         # job but consider if the output is consistent.
319         log_reuse_info { "job #{j.uuid} log #{j.log} is unavailable to user; continuing search" }
320         chosen_output = j.output
321       else
322         log_reuse_info { "job #{j.uuid} with output #{j.output} can be reused; continuing search in case other candidates have different outputs" }
323         chosen = j
324         chosen_output = j.output
325       end
326     end
327     j = chosen || incomplete_job
328     if j
329       log_reuse_info { "done, #{j.uuid} was selected" }
330     else
331       log_reuse_info { "done, nothing suitable" }
332     end
333     return j
334   end
335
336   def self.default_git_filters(attr_name, repo_name, refspec)
337     # Add a filter to @filters for `attr_name` = the latest commit available
338     # in `repo_name` at `refspec`.  No filter is added if refspec can't be
339     # resolved.
340     commits = Commit.find_commit_range(repo_name, nil, refspec, nil)
341     if commit_hash = commits.first
342       [[attr_name, "=", commit_hash]]
343     else
344       []
345     end
346   end
347
348   def cancel(cascade: false, need_transaction: true)
349     if need_transaction
350       ActiveRecord::Base.transaction do
351         cancel(cascade: cascade, need_transaction: false)
352       end
353       return
354     end
355
356     if self.state.in?([Queued, Running])
357       self.state = Cancelled
358       self.save!
359     elsif self.state != Cancelled
360       raise InvalidStateTransitionError
361     end
362
363     return if !cascade
364
365     # cancel all children; they could be jobs or pipeline instances
366     children = self.components.andand.collect{|_, u| u}.compact
367
368     return if children.empty?
369
370     # cancel any child jobs
371     Job.where(uuid: children, state: [Queued, Running]).each do |job|
372       job.cancel(cascade: cascade, need_transaction: false)
373     end
374
375     # cancel any child pipelines
376     PipelineInstance.where(uuid: children, state: [PipelineInstance::RunningOnServer, PipelineInstance::RunningOnClient]).each do |pi|
377       pi.cancel(cascade: cascade, need_transaction: false)
378     end
379   end
380
381   protected
382
383   def self.sorted_hash_digest h
384     Digest::MD5.hexdigest(Oj.dump(deep_sort_hash(h)))
385   end
386
387   def foreign_key_attributes
388     super + %w(output log)
389   end
390
391   def skip_uuid_read_permission_check
392     super + %w(cancelled_by_client_uuid)
393   end
394
395   def skip_uuid_existence_check
396     super + %w(output log)
397   end
398
399   def set_priority
400     if self.priority.nil?
401       self.priority = 0
402     end
403     true
404   end
405
406   def ensure_script_version_is_commit
407     if state == Running
408       # Apparently client has already decided to go for it. This is
409       # needed to run a local job using a local working directory
410       # instead of a commit-ish.
411       return true
412     end
413     if new_record? or repository_changed? or script_version_changed?
414       sha1 = Commit.find_commit_range(repository,
415                                       nil, script_version, nil).first
416       if not sha1
417         errors.add :script_version, "#{script_version} does not resolve to a commit"
418         return false
419       end
420       if supplied_script_version.nil? or supplied_script_version.empty?
421         self.supplied_script_version = script_version
422       end
423       self.script_version = sha1
424     end
425     true
426   end
427
428   def tag_version_in_internal_repository
429     if state == Running
430       # No point now. See ensure_script_version_is_commit.
431       true
432     elsif errors.any?
433       # Won't be saved, and script_version might not even be valid.
434       true
435     elsif new_record? or repository_changed? or script_version_changed?
436       uuid_was = uuid
437       begin
438         assign_uuid
439         Commit.tag_in_internal_repository repository, script_version, uuid
440       rescue
441         self.uuid = uuid_was
442         raise
443       end
444     end
445   end
446
447   def ensure_unique_submit_id
448     if !submit_id.nil?
449       if Job.where('submit_id=?',self.submit_id).first
450         raise SubmitIdReused.new
451       end
452     end
453     true
454   end
455
456   def resolve_runtime_constraint(key, attr_sym)
457     if ((runtime_constraints.is_a? Hash) and
458         (search = runtime_constraints[key]))
459       ok, result = yield search
460     else
461       ok, result = true, nil
462     end
463     if ok
464       send("#{attr_sym}=".to_sym, result)
465     else
466       errors.add(attr_sym, result)
467     end
468     ok
469   end
470
471   def find_arvados_sdk_version
472     resolve_runtime_constraint("arvados_sdk_version",
473                                :arvados_sdk_version) do |git_search|
474       commits = Commit.find_commit_range("arvados",
475                                          nil, git_search, nil)
476       if commits.empty?
477         [false, "#{git_search} does not resolve to a commit"]
478       elsif not runtime_constraints["docker_image"]
479         [false, "cannot be specified without a Docker image constraint"]
480       else
481         [true, commits.first]
482       end
483     end
484   end
485
486   def find_docker_image_locator
487     if runtime_constraints.is_a? Hash
488       runtime_constraints['docker_image'] ||=
489         Rails.configuration.default_docker_image_for_jobs
490     end
491
492     resolve_runtime_constraint("docker_image",
493                                :docker_image_locator) do |image_search|
494       image_tag = runtime_constraints['docker_image_tag']
495       if coll = Collection.for_latest_docker_image(image_search, image_tag)
496         [true, coll.portable_data_hash]
497       else
498         [false, "not found for #{image_search}"]
499       end
500     end
501   end
502
503   def permission_to_update
504     if is_locked_by_uuid_was and !(current_user and
505                                    (current_user.uuid == is_locked_by_uuid_was or
506                                     current_user.uuid == system_user.uuid))
507       if script_changed? or
508           script_parameters_changed? or
509           script_version_changed? or
510           (!cancelled_at_was.nil? and
511            (cancelled_by_client_uuid_changed? or
512             cancelled_by_user_uuid_changed? or
513             cancelled_at_changed?)) or
514           started_at_changed? or
515           finished_at_changed? or
516           running_changed? or
517           success_changed? or
518           output_changed? or
519           log_changed? or
520           tasks_summary_changed? or
521           (state_changed? && state != Cancelled) or
522           components_changed?
523         logger.warn "User #{current_user.uuid if current_user} tried to change protected job attributes on locked #{self.class.to_s} #{uuid_was}"
524         return false
525       end
526     end
527     if !is_locked_by_uuid_changed?
528       super
529     else
530       if !current_user
531         logger.warn "Anonymous user tried to change lock on #{self.class.to_s} #{uuid_was}"
532         false
533       elsif is_locked_by_uuid_was and is_locked_by_uuid_was != current_user.uuid
534         logger.warn "User #{current_user.uuid} tried to steal lock on #{self.class.to_s} #{uuid_was} from #{is_locked_by_uuid_was}"
535         false
536       elsif !is_locked_by_uuid.nil? and is_locked_by_uuid != current_user.uuid
537         logger.warn "User #{current_user.uuid} tried to lock #{self.class.to_s} #{uuid_was} with uuid #{is_locked_by_uuid}"
538         false
539       else
540         super
541       end
542     end
543   end
544
545   def update_modified_by_fields
546     if self.cancelled_at_changed?
547       # Ensure cancelled_at cannot be set to arbitrary non-now times,
548       # or changed once it is set.
549       if self.cancelled_at and not self.cancelled_at_was
550         self.cancelled_at = db_current_time
551         self.cancelled_by_user_uuid = current_user.uuid
552         self.cancelled_by_client_uuid = current_api_client.andand.uuid
553         @need_crunch_dispatch_trigger = true
554       else
555         self.cancelled_at = self.cancelled_at_was
556         self.cancelled_by_user_uuid = self.cancelled_by_user_uuid_was
557         self.cancelled_by_client_uuid = self.cancelled_by_client_uuid_was
558       end
559     end
560     super
561   end
562
563   def trigger_crunch_dispatch_if_cancelled
564     if @need_crunch_dispatch_trigger
565       File.open(Rails.configuration.crunch_refresh_trigger, 'wb') do
566         # That's all, just create/touch a file for crunch-job to see.
567       end
568     end
569   end
570
571   def update_timestamps_when_state_changes
572     return if not (state_changed? or new_record?)
573
574     case state
575     when Running
576       self.started_at ||= db_current_time
577     when Failed, Complete
578       self.finished_at ||= db_current_time
579     when Cancelled
580       self.cancelled_at ||= db_current_time
581     end
582
583     # TODO: Remove the following case block when old "success" and
584     # "running" attrs go away. Until then, this ensures we still
585     # expose correct success/running flags to older clients, even if
586     # some new clients are writing only the new state attribute.
587     case state
588     when Queued
589       self.running = false
590       self.success = nil
591     when Running
592       self.running = true
593       self.success = nil
594     when Cancelled, Failed
595       self.running = false
596       self.success = false
597     when Complete
598       self.running = false
599       self.success = true
600     end
601     self.running ||= false # Default to false instead of nil.
602
603     @need_crunch_dispatch_trigger = true
604
605     true
606   end
607
608   def update_state_from_old_state_attrs
609     # If a client has touched the legacy state attrs, update the
610     # "state" attr to agree with the updated values of the legacy
611     # attrs.
612     #
613     # TODO: Remove this method when old "success" and "running" attrs
614     # go away.
615     if cancelled_at_changed? or
616         success_changed? or
617         running_changed? or
618         state.nil?
619       if cancelled_at
620         self.state = Cancelled
621       elsif success == false
622         self.state = Failed
623       elsif success == true
624         self.state = Complete
625       elsif running == true
626         self.state = Running
627       else
628         self.state = Queued
629       end
630     end
631     true
632   end
633
634   def validate_status
635     if self.state.in?(States)
636       true
637     else
638       errors.add :state, "#{state.inspect} must be one of: #{States.inspect}"
639       false
640     end
641   end
642
643   def validate_state_change
644     ok = true
645     if self.state_changed?
646       ok = case self.state_was
647            when nil
648              # state isn't set yet
649              true
650            when Queued
651              # Permit going from queued to any state
652              true
653            when Running
654              # From running, may only transition to a finished state
655              [Complete, Failed, Cancelled].include? self.state
656            when Complete, Failed, Cancelled
657              # Once in a finished state, don't permit any more state changes
658              false
659            else
660              # Any other state transition is also invalid
661              false
662            end
663       if not ok
664         errors.add :state, "invalid change from #{self.state_was} to #{self.state}"
665       end
666     end
667     ok
668   end
669
670   def ensure_no_collection_uuids_in_script_params
671     # Fail validation if any script_parameters field includes a string containing a
672     # collection uuid pattern.
673     if self.script_parameters_changed?
674       if recursive_hash_search(self.script_parameters, Collection.uuid_regex)
675         self.errors.add :script_parameters, "must use portable_data_hash instead of collection uuid"
676         return false
677       end
678     end
679     true
680   end
681
682   # recursive_hash_search searches recursively through hashes and
683   # arrays in 'thing' for string fields matching regular expression
684   # 'pattern'.  Returns true if pattern is found, false otherwise.
685   def recursive_hash_search thing, pattern
686     if thing.is_a? Hash
687       thing.each do |k, v|
688         return true if recursive_hash_search v, pattern
689       end
690     elsif thing.is_a? Array
691       thing.each do |k|
692         return true if recursive_hash_search k, pattern
693       end
694     elsif thing.is_a? String
695       return true if thing.match pattern
696     end
697     false
698   end
699 end