10903: support need_transaction for job and pipeline_instance cancel methods.
[arvados.git] / services / api / app / models / job.rb
1 class Job < ArvadosModel
2   include HasUuid
3   include KindAndEtag
4   include CommonApiTemplate
5   extend CurrentApiClient
6   serialize :components, Hash
7   attr_protected :arvados_sdk_version, :docker_image_locator
8   serialize :script_parameters, Hash
9   serialize :runtime_constraints, Hash
10   serialize :tasks_summary, Hash
11   before_create :ensure_unique_submit_id
12   after_commit :trigger_crunch_dispatch_if_cancelled, :on => :update
13   before_validation :set_priority
14   before_validation :update_state_from_old_state_attrs
15   before_validation :update_script_parameters_digest
16   validate :ensure_script_version_is_commit
17   validate :find_docker_image_locator
18   validate :find_arvados_sdk_version
19   validate :validate_status
20   validate :validate_state_change
21   validate :ensure_no_collection_uuids_in_script_params
22   before_save :tag_version_in_internal_repository
23   before_save :update_timestamps_when_state_changes
24
25   has_many :commit_ancestors, :foreign_key => :descendant, :primary_key => :script_version
26   has_many(:nodes, foreign_key: :job_uuid, primary_key: :uuid)
27
28   class SubmitIdReused < StandardError
29   end
30
31   api_accessible :user, extend: :common do |t|
32     t.add :submit_id
33     t.add :priority
34     t.add :script
35     t.add :script_parameters
36     t.add :script_version
37     t.add :cancelled_at
38     t.add :cancelled_by_client_uuid
39     t.add :cancelled_by_user_uuid
40     t.add :started_at
41     t.add :finished_at
42     t.add :output
43     t.add :success
44     t.add :running
45     t.add :state
46     t.add :is_locked_by_uuid
47     t.add :log
48     t.add :runtime_constraints
49     t.add :tasks_summary
50     t.add :nondeterministic
51     t.add :repository
52     t.add :supplied_script_version
53     t.add :arvados_sdk_version
54     t.add :docker_image_locator
55     t.add :queue_position
56     t.add :node_uuids
57     t.add :description
58     t.add :components
59   end
60
61   # Supported states for a job
62   States = [
63             (Queued = 'Queued'),
64             (Running = 'Running'),
65             (Cancelled = 'Cancelled'),
66             (Failed = 'Failed'),
67             (Complete = 'Complete'),
68            ]
69
70   after_initialize do
71     @need_crunch_dispatch_trigger = false
72   end
73
74   def assert_finished
75     update_attributes(finished_at: finished_at || db_current_time,
76                       success: success.nil? ? false : success,
77                       running: false)
78   end
79
80   def node_uuids
81     nodes.map(&:uuid)
82   end
83
84   def self.queue
85     self.where('state = ?', Queued).order('priority desc, created_at')
86   end
87
88   def queue_position
89     # We used to report this accurately, but the implementation made queue
90     # API requests O(n**2) for the size of the queue.  See #8800.
91     # We've soft-disabled it because it's not clear we even want this
92     # functionality: now that we have Node Manager with support for multiple
93     # node sizes, "queue position" tells you very little about when a job will
94     # run.
95     state == Queued ? 0 : nil
96   end
97
98   def self.running
99     self.where('running = ?', true).
100       order('priority desc, created_at')
101   end
102
103   def lock locked_by_uuid
104     with_lock do
105       unless self.state == Queued and self.is_locked_by_uuid.nil?
106         raise AlreadyLockedError
107       end
108       self.state = Running
109       self.is_locked_by_uuid = locked_by_uuid
110       self.save!
111     end
112   end
113
114   def update_script_parameters_digest
115     self.script_parameters_digest = self.class.sorted_hash_digest(script_parameters)
116   end
117
118   def self.searchable_columns operator
119     super - ["script_parameters_digest"]
120   end
121
122   def self.full_text_searchable_columns
123     super - ["script_parameters_digest"]
124   end
125
126   def self.load_job_specific_filters attrs, orig_filters, read_users
127     # Convert Job-specific @filters entries into general SQL filters.
128     script_info = {"repository" => nil, "script" => nil}
129     git_filters = Hash.new do |hash, key|
130       hash[key] = {"max_version" => "HEAD", "exclude_versions" => []}
131     end
132     filters = []
133     orig_filters.each do |attr, operator, operand|
134       if (script_info.has_key? attr) and (operator == "=")
135         if script_info[attr].nil?
136           script_info[attr] = operand
137         elsif script_info[attr] != operand
138           raise ArgumentError.new("incompatible #{attr} filters")
139         end
140       end
141       case operator
142       when "in git"
143         git_filters[attr]["min_version"] = operand
144       when "not in git"
145         git_filters[attr]["exclude_versions"] += Array.wrap(operand)
146       when "in docker", "not in docker"
147         image_hashes = Array.wrap(operand).flat_map do |search_term|
148           image_search, image_tag = search_term.split(':', 2)
149           Collection.
150             find_all_for_docker_image(image_search, image_tag, read_users).
151             map(&:portable_data_hash)
152         end
153         filters << [attr, operator.sub(/ docker$/, ""), image_hashes]
154       else
155         filters << [attr, operator, operand]
156       end
157     end
158
159     # Build a real script_version filter from any "not? in git" filters.
160     git_filters.each_pair do |attr, filter|
161       case attr
162       when "script_version"
163         script_info.each_pair do |key, value|
164           if value.nil?
165             raise ArgumentError.new("script_version filter needs #{key} filter")
166           end
167         end
168         filter["repository"] = script_info["repository"]
169         if attrs[:script_version]
170           filter["max_version"] = attrs[:script_version]
171         else
172           # Using HEAD, set earlier by the hash default, is fine.
173         end
174       when "arvados_sdk_version"
175         filter["repository"] = "arvados"
176       else
177         raise ArgumentError.new("unknown attribute for git filter: #{attr}")
178       end
179       revisions = Commit.find_commit_range(filter["repository"],
180                                            filter["min_version"],
181                                            filter["max_version"],
182                                            filter["exclude_versions"])
183       if revisions.empty?
184         raise ArgumentError.
185           new("error searching #{filter['repository']} from " +
186               "'#{filter['min_version']}' to '#{filter['max_version']}', " +
187               "excluding #{filter['exclude_versions']}")
188       end
189       filters.append([attr, "in", revisions])
190     end
191
192     filters
193   end
194
195   def self.find_reusable attrs, params, filters, read_users
196     if filters.empty?  # Translate older creation parameters into filters.
197       filters =
198         [["repository", "=", attrs[:repository]],
199          ["script", "=", attrs[:script]],
200          ["script_version", "not in git", params[:exclude_script_versions]],
201         ].reject { |filter| filter.last.nil? or filter.last.empty? }
202       if !params[:minimum_script_version].blank?
203         filters << ["script_version", "in git",
204                      params[:minimum_script_version]]
205       else
206         filters += default_git_filters("script_version", attrs[:repository],
207                                        attrs[:script_version])
208       end
209       if image_search = attrs[:runtime_constraints].andand["docker_image"]
210         if image_tag = attrs[:runtime_constraints]["docker_image_tag"]
211           image_search += ":#{image_tag}"
212         end
213         image_locator = Collection.
214           for_latest_docker_image(image_search).andand.portable_data_hash
215       else
216         image_locator = nil
217       end
218       filters << ["docker_image_locator", "=", image_locator]
219       if sdk_version = attrs[:runtime_constraints].andand["arvados_sdk_version"]
220         filters += default_git_filters("arvados_sdk_version", "arvados", sdk_version)
221       end
222       filters = load_job_specific_filters(attrs, filters, read_users)
223     end
224
225     # Check specified filters for some reasonableness.
226     filter_names = filters.map { |f| f.first }.uniq
227     ["repository", "script"].each do |req_filter|
228       if not filter_names.include?(req_filter)
229         return send_error("#{req_filter} filter required")
230       end
231     end
232
233     # Search for a reusable Job, and return it if found.
234     candidates = Job.
235       readable_by(current_user).
236       where('state = ? or (owner_uuid = ? and state in (?))',
237             Job::Complete, current_user.uuid, [Job::Queued, Job::Running]).
238       where('script_parameters_digest = ?', Job.sorted_hash_digest(attrs[:script_parameters])).
239       where('nondeterministic is distinct from ?', true).
240       order('state desc, created_at') # prefer Running jobs over Queued
241     candidates = apply_filters candidates, filters
242     chosen = nil
243     incomplete_job = nil
244     candidates.each do |j|
245       if j.state != Job::Complete
246         # We'll use this if we don't find a job that has completed
247         incomplete_job ||= j
248         next
249       end
250
251       if chosen == false
252         # We have already decided not to reuse any completed job
253         next
254       elsif chosen
255         if chosen.output != j.output
256           # If two matching jobs produced different outputs, run a new
257           # job (or use one that's already running/queued) instead of
258           # choosing one arbitrarily.
259           chosen = false
260         end
261         # ...and that's the only thing we need to do once we've chosen
262         # a job to reuse.
263       elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
264         # As soon as the output we will end up returning (if any) is
265         # decided, check whether it will be visible to the user; if
266         # not, any further investigation of reusable jobs is futile.
267         chosen = false
268       else
269         chosen = j
270       end
271     end
272     chosen || incomplete_job
273   end
274
275   def self.default_git_filters(attr_name, repo_name, refspec)
276     # Add a filter to @filters for `attr_name` = the latest commit available
277     # in `repo_name` at `refspec`.  No filter is added if refspec can't be
278     # resolved.
279     commits = Commit.find_commit_range(repo_name, nil, refspec, nil)
280     if commit_hash = commits.first
281       [[attr_name, "=", commit_hash]]
282     else
283       []
284     end
285   end
286
287   def cancel(cascade: false, need_transaction: true)
288     if need_transaction
289       ActiveRecord::Base.transaction do
290         cancel(cascade: cascade, need_transaction: false)
291       end
292       return
293     end
294
295     if self.state.in?([Queued, Running])
296       self.state = Cancelled
297       self.save!
298     elsif self.state != Cancelled
299       raise InvalidStateTransitionError
300     end
301
302     return if !cascade
303
304     # cancel all children; they could be jobs or pipeline instances
305     children = self.components.andand.collect{|_, u| u}.compact
306
307     return if children.empty?
308
309     # cancel any child jobs
310     Job.where(uuid: children, state: [Queued, Running]).each do |job|
311       job.cancel(cascade: cascade, need_transaction: false)
312     end
313
314     # cancel any child pipelines
315     PipelineInstance.where(uuid: children, state: [PipelineInstance::RunningOnServer, PipelineInstance::RunningOnClient]).each do |pi|
316       pi.cancel(cascade: cascade, need_transaction: false)
317     end
318   end
319
320   protected
321
322   def self.sorted_hash_digest h
323     Digest::MD5.hexdigest(Oj.dump(deep_sort_hash(h)))
324   end
325
326   def foreign_key_attributes
327     super + %w(output log)
328   end
329
330   def skip_uuid_read_permission_check
331     super + %w(cancelled_by_client_uuid)
332   end
333
334   def skip_uuid_existence_check
335     super + %w(output log)
336   end
337
338   def set_priority
339     if self.priority.nil?
340       self.priority = 0
341     end
342     true
343   end
344
345   def ensure_script_version_is_commit
346     if state == Running
347       # Apparently client has already decided to go for it. This is
348       # needed to run a local job using a local working directory
349       # instead of a commit-ish.
350       return true
351     end
352     if new_record? or repository_changed? or script_version_changed?
353       sha1 = Commit.find_commit_range(repository,
354                                       nil, script_version, nil).first
355       if not sha1
356         errors.add :script_version, "#{script_version} does not resolve to a commit"
357         return false
358       end
359       if supplied_script_version.nil? or supplied_script_version.empty?
360         self.supplied_script_version = script_version
361       end
362       self.script_version = sha1
363     end
364     true
365   end
366
367   def tag_version_in_internal_repository
368     if state == Running
369       # No point now. See ensure_script_version_is_commit.
370       true
371     elsif errors.any?
372       # Won't be saved, and script_version might not even be valid.
373       true
374     elsif new_record? or repository_changed? or script_version_changed?
375       uuid_was = uuid
376       begin
377         assign_uuid
378         Commit.tag_in_internal_repository repository, script_version, uuid
379       rescue
380         self.uuid = uuid_was
381         raise
382       end
383     end
384   end
385
386   def ensure_unique_submit_id
387     if !submit_id.nil?
388       if Job.where('submit_id=?',self.submit_id).first
389         raise SubmitIdReused.new
390       end
391     end
392     true
393   end
394
395   def resolve_runtime_constraint(key, attr_sym)
396     if ((runtime_constraints.is_a? Hash) and
397         (search = runtime_constraints[key]))
398       ok, result = yield search
399     else
400       ok, result = true, nil
401     end
402     if ok
403       send("#{attr_sym}=".to_sym, result)
404     else
405       errors.add(attr_sym, result)
406     end
407     ok
408   end
409
410   def find_arvados_sdk_version
411     resolve_runtime_constraint("arvados_sdk_version",
412                                :arvados_sdk_version) do |git_search|
413       commits = Commit.find_commit_range("arvados",
414                                          nil, git_search, nil)
415       if commits.empty?
416         [false, "#{git_search} does not resolve to a commit"]
417       elsif not runtime_constraints["docker_image"]
418         [false, "cannot be specified without a Docker image constraint"]
419       else
420         [true, commits.first]
421       end
422     end
423   end
424
425   def find_docker_image_locator
426     runtime_constraints['docker_image'] =
427         Rails.configuration.default_docker_image_for_jobs if ((runtime_constraints.is_a? Hash) and
428                                                               (runtime_constraints['docker_image']).nil? and
429                                                               Rails.configuration.default_docker_image_for_jobs)
430     resolve_runtime_constraint("docker_image",
431                                :docker_image_locator) do |image_search|
432       image_tag = runtime_constraints['docker_image_tag']
433       if coll = Collection.for_latest_docker_image(image_search, image_tag)
434         [true, coll.portable_data_hash]
435       else
436         [false, "not found for #{image_search}"]
437       end
438     end
439   end
440
441   def permission_to_update
442     if is_locked_by_uuid_was and !(current_user and
443                                    (current_user.uuid == is_locked_by_uuid_was or
444                                     current_user.uuid == system_user.uuid))
445       if script_changed? or
446           script_parameters_changed? or
447           script_version_changed? or
448           (!cancelled_at_was.nil? and
449            (cancelled_by_client_uuid_changed? or
450             cancelled_by_user_uuid_changed? or
451             cancelled_at_changed?)) or
452           started_at_changed? or
453           finished_at_changed? or
454           running_changed? or
455           success_changed? or
456           output_changed? or
457           log_changed? or
458           tasks_summary_changed? or
459           (state_changed? && state != Cancelled) or
460           components_changed?
461         logger.warn "User #{current_user.uuid if current_user} tried to change protected job attributes on locked #{self.class.to_s} #{uuid_was}"
462         return false
463       end
464     end
465     if !is_locked_by_uuid_changed?
466       super
467     else
468       if !current_user
469         logger.warn "Anonymous user tried to change lock on #{self.class.to_s} #{uuid_was}"
470         false
471       elsif is_locked_by_uuid_was and is_locked_by_uuid_was != current_user.uuid
472         logger.warn "User #{current_user.uuid} tried to steal lock on #{self.class.to_s} #{uuid_was} from #{is_locked_by_uuid_was}"
473         false
474       elsif !is_locked_by_uuid.nil? and is_locked_by_uuid != current_user.uuid
475         logger.warn "User #{current_user.uuid} tried to lock #{self.class.to_s} #{uuid_was} with uuid #{is_locked_by_uuid}"
476         false
477       else
478         super
479       end
480     end
481   end
482
483   def update_modified_by_fields
484     if self.cancelled_at_changed?
485       # Ensure cancelled_at cannot be set to arbitrary non-now times,
486       # or changed once it is set.
487       if self.cancelled_at and not self.cancelled_at_was
488         self.cancelled_at = db_current_time
489         self.cancelled_by_user_uuid = current_user.uuid
490         self.cancelled_by_client_uuid = current_api_client.andand.uuid
491         @need_crunch_dispatch_trigger = true
492       else
493         self.cancelled_at = self.cancelled_at_was
494         self.cancelled_by_user_uuid = self.cancelled_by_user_uuid_was
495         self.cancelled_by_client_uuid = self.cancelled_by_client_uuid_was
496       end
497     end
498     super
499   end
500
501   def trigger_crunch_dispatch_if_cancelled
502     if @need_crunch_dispatch_trigger
503       File.open(Rails.configuration.crunch_refresh_trigger, 'wb') do
504         # That's all, just create/touch a file for crunch-job to see.
505       end
506     end
507   end
508
509   def update_timestamps_when_state_changes
510     return if not (state_changed? or new_record?)
511
512     case state
513     when Running
514       self.started_at ||= db_current_time
515     when Failed, Complete
516       self.finished_at ||= db_current_time
517     when Cancelled
518       self.cancelled_at ||= db_current_time
519     end
520
521     # TODO: Remove the following case block when old "success" and
522     # "running" attrs go away. Until then, this ensures we still
523     # expose correct success/running flags to older clients, even if
524     # some new clients are writing only the new state attribute.
525     case state
526     when Queued
527       self.running = false
528       self.success = nil
529     when Running
530       self.running = true
531       self.success = nil
532     when Cancelled, Failed
533       self.running = false
534       self.success = false
535     when Complete
536       self.running = false
537       self.success = true
538     end
539     self.running ||= false # Default to false instead of nil.
540
541     @need_crunch_dispatch_trigger = true
542
543     true
544   end
545
546   def update_state_from_old_state_attrs
547     # If a client has touched the legacy state attrs, update the
548     # "state" attr to agree with the updated values of the legacy
549     # attrs.
550     #
551     # TODO: Remove this method when old "success" and "running" attrs
552     # go away.
553     if cancelled_at_changed? or
554         success_changed? or
555         running_changed? or
556         state.nil?
557       if cancelled_at
558         self.state = Cancelled
559       elsif success == false
560         self.state = Failed
561       elsif success == true
562         self.state = Complete
563       elsif running == true
564         self.state = Running
565       else
566         self.state = Queued
567       end
568     end
569     true
570   end
571
572   def validate_status
573     if self.state.in?(States)
574       true
575     else
576       errors.add :state, "#{state.inspect} must be one of: #{States.inspect}"
577       false
578     end
579   end
580
581   def validate_state_change
582     ok = true
583     if self.state_changed?
584       ok = case self.state_was
585            when nil
586              # state isn't set yet
587              true
588            when Queued
589              # Permit going from queued to any state
590              true
591            when Running
592              # From running, may only transition to a finished state
593              [Complete, Failed, Cancelled].include? self.state
594            when Complete, Failed, Cancelled
595              # Once in a finished state, don't permit any more state changes
596              false
597            else
598              # Any other state transition is also invalid
599              false
600            end
601       if not ok
602         errors.add :state, "invalid change from #{self.state_was} to #{self.state}"
603       end
604     end
605     ok
606   end
607
608   def ensure_no_collection_uuids_in_script_params
609     # Fail validation if any script_parameters field includes a string containing a
610     # collection uuid pattern.
611     if self.script_parameters_changed?
612       if recursive_hash_search(self.script_parameters, Collection.uuid_regex)
613         self.errors.add :script_parameters, "must use portable_data_hash instead of collection uuid"
614         return false
615       end
616     end
617     true
618   end
619
620   # recursive_hash_search searches recursively through hashes and
621   # arrays in 'thing' for string fields matching regular expression
622   # 'pattern'.  Returns true if pattern is found, false otherwise.
623   def recursive_hash_search thing, pattern
624     if thing.is_a? Hash
625       thing.each do |k, v|
626         return true if recursive_hash_search v, pattern
627       end
628     elsif thing.is_a? Array
629       thing.each do |k|
630         return true if recursive_hash_search k, pattern
631       end
632     elsif thing.is_a? String
633       return true if thing.match pattern
634     end
635     false
636   end
637 end