Merge branch 'master' into 10979-cancelled-job-nodes
[arvados.git] / services / api / app / models / job.rb
1 class Job < ArvadosModel
2   include HasUuid
3   include KindAndEtag
4   include CommonApiTemplate
5   extend CurrentApiClient
6   serialize :components, Hash
7   attr_protected :arvados_sdk_version, :docker_image_locator
8   serialize :script_parameters, Hash
9   serialize :runtime_constraints, Hash
10   serialize :tasks_summary, Hash
11   before_create :ensure_unique_submit_id
12   after_commit :trigger_crunch_dispatch_if_cancelled, :on => :update
13   before_validation :set_priority
14   before_validation :update_state_from_old_state_attrs
15   before_validation :update_script_parameters_digest
16   validate :ensure_script_version_is_commit
17   validate :find_docker_image_locator
18   validate :find_arvados_sdk_version
19   validate :validate_status
20   validate :validate_state_change
21   validate :ensure_no_collection_uuids_in_script_params
22   before_save :tag_version_in_internal_repository
23   before_save :update_timestamps_when_state_changes
24
25   has_many :commit_ancestors, :foreign_key => :descendant, :primary_key => :script_version
26   has_many(:nodes, foreign_key: :job_uuid, primary_key: :uuid)
27
28   class SubmitIdReused < StandardError
29   end
30
31   api_accessible :user, extend: :common do |t|
32     t.add :submit_id
33     t.add :priority
34     t.add :script
35     t.add :script_parameters
36     t.add :script_version
37     t.add :cancelled_at
38     t.add :cancelled_by_client_uuid
39     t.add :cancelled_by_user_uuid
40     t.add :started_at
41     t.add :finished_at
42     t.add :output
43     t.add :success
44     t.add :running
45     t.add :state
46     t.add :is_locked_by_uuid
47     t.add :log
48     t.add :runtime_constraints
49     t.add :tasks_summary
50     t.add :nondeterministic
51     t.add :repository
52     t.add :supplied_script_version
53     t.add :arvados_sdk_version
54     t.add :docker_image_locator
55     t.add :queue_position
56     t.add :node_uuids
57     t.add :description
58     t.add :components
59   end
60
61   # Supported states for a job
62   States = [
63             (Queued = 'Queued'),
64             (Running = 'Running'),
65             (Cancelled = 'Cancelled'),
66             (Failed = 'Failed'),
67             (Complete = 'Complete'),
68            ]
69
70   after_initialize do
71     @need_crunch_dispatch_trigger = false
72   end
73
74   def assert_finished
75     update_attributes(finished_at: finished_at || db_current_time,
76                       success: success.nil? ? false : success,
77                       running: false)
78   end
79
80   def node_uuids
81     nodes.map(&:uuid)
82   end
83
84   def self.queue
85     self.where('state = ?', Queued).order('priority desc, created_at')
86   end
87
88   def queue_position
89     # We used to report this accurately, but the implementation made queue
90     # API requests O(n**2) for the size of the queue.  See #8800.
91     # We've soft-disabled it because it's not clear we even want this
92     # functionality: now that we have Node Manager with support for multiple
93     # node sizes, "queue position" tells you very little about when a job will
94     # run.
95     state == Queued ? 0 : nil
96   end
97
98   def self.running
99     self.where('running = ?', true).
100       order('priority desc, created_at')
101   end
102
103   def lock locked_by_uuid
104     with_lock do
105       unless self.state == Queued and self.is_locked_by_uuid.nil?
106         raise AlreadyLockedError
107       end
108       self.state = Running
109       self.is_locked_by_uuid = locked_by_uuid
110       self.save!
111     end
112   end
113
114   def update_script_parameters_digest
115     self.script_parameters_digest = self.class.sorted_hash_digest(script_parameters)
116   end
117
118   def self.searchable_columns operator
119     super - ["script_parameters_digest"]
120   end
121
122   def self.full_text_searchable_columns
123     super - ["script_parameters_digest"]
124   end
125
126   def self.load_job_specific_filters attrs, orig_filters, read_users
127     # Convert Job-specific @filters entries into general SQL filters.
128     script_info = {"repository" => nil, "script" => nil}
129     git_filters = Hash.new do |hash, key|
130       hash[key] = {"max_version" => "HEAD", "exclude_versions" => []}
131     end
132     filters = []
133     orig_filters.each do |attr, operator, operand|
134       if (script_info.has_key? attr) and (operator == "=")
135         if script_info[attr].nil?
136           script_info[attr] = operand
137         elsif script_info[attr] != operand
138           raise ArgumentError.new("incompatible #{attr} filters")
139         end
140       end
141       case operator
142       when "in git"
143         git_filters[attr]["min_version"] = operand
144       when "not in git"
145         git_filters[attr]["exclude_versions"] += Array.wrap(operand)
146       when "in docker", "not in docker"
147         image_hashes = Array.wrap(operand).flat_map do |search_term|
148           image_search, image_tag = search_term.split(':', 2)
149           Collection.
150             find_all_for_docker_image(image_search, image_tag, read_users).
151             map(&:portable_data_hash)
152         end
153         filters << [attr, operator.sub(/ docker$/, ""), image_hashes]
154       else
155         filters << [attr, operator, operand]
156       end
157     end
158
159     # Build a real script_version filter from any "not? in git" filters.
160     git_filters.each_pair do |attr, filter|
161       case attr
162       when "script_version"
163         script_info.each_pair do |key, value|
164           if value.nil?
165             raise ArgumentError.new("script_version filter needs #{key} filter")
166           end
167         end
168         filter["repository"] = script_info["repository"]
169         if attrs[:script_version]
170           filter["max_version"] = attrs[:script_version]
171         else
172           # Using HEAD, set earlier by the hash default, is fine.
173         end
174       when "arvados_sdk_version"
175         filter["repository"] = "arvados"
176       else
177         raise ArgumentError.new("unknown attribute for git filter: #{attr}")
178       end
179       revisions = Commit.find_commit_range(filter["repository"],
180                                            filter["min_version"],
181                                            filter["max_version"],
182                                            filter["exclude_versions"])
183       if revisions.empty?
184         raise ArgumentError.
185           new("error searching #{filter['repository']} from " +
186               "'#{filter['min_version']}' to '#{filter['max_version']}', " +
187               "excluding #{filter['exclude_versions']}")
188       end
189       filters.append([attr, "in", revisions])
190     end
191
192     filters
193   end
194
195   def self.find_reusable attrs, params, filters, read_users
196     if filters.empty?  # Translate older creation parameters into filters.
197       filters =
198         [["repository", "=", attrs[:repository]],
199          ["script", "=", attrs[:script]],
200          ["script_version", "not in git", params[:exclude_script_versions]],
201         ].reject { |filter| filter.last.nil? or filter.last.empty? }
202       if !params[:minimum_script_version].blank?
203         filters << ["script_version", "in git",
204                      params[:minimum_script_version]]
205       else
206         filters += default_git_filters("script_version", attrs[:repository],
207                                        attrs[:script_version])
208       end
209       if image_search = attrs[:runtime_constraints].andand["docker_image"]
210         if image_tag = attrs[:runtime_constraints]["docker_image_tag"]
211           image_search += ":#{image_tag}"
212         end
213         image_locator = Collection.
214           for_latest_docker_image(image_search).andand.portable_data_hash
215       else
216         image_locator = nil
217       end
218       filters << ["docker_image_locator", "=",
219                   Collection.docker_migration_pdh(read_users, image_locator)]
220       if sdk_version = attrs[:runtime_constraints].andand["arvados_sdk_version"]
221         filters += default_git_filters("arvados_sdk_version", "arvados", sdk_version)
222       end
223       filters = load_job_specific_filters(attrs, filters, read_users)
224     end
225
226     # Check specified filters for some reasonableness.
227     filter_names = filters.map { |f| f.first }.uniq
228     ["repository", "script"].each do |req_filter|
229       if not filter_names.include?(req_filter)
230         return send_error("#{req_filter} filter required")
231       end
232     end
233
234     # Search for a reusable Job, and return it if found.
235     candidates = Job.
236       readable_by(current_user).
237       where('state = ? or (owner_uuid = ? and state in (?))',
238             Job::Complete, current_user.uuid, [Job::Queued, Job::Running]).
239       where('script_parameters_digest = ?', Job.sorted_hash_digest(attrs[:script_parameters])).
240       where('nondeterministic is distinct from ?', true).
241       order('state desc, created_at') # prefer Running jobs over Queued
242     candidates = apply_filters candidates, filters
243     chosen = nil
244     incomplete_job = nil
245     candidates.each do |j|
246       if j.state != Job::Complete
247         # We'll use this if we don't find a job that has completed
248         incomplete_job ||= j
249         next
250       end
251
252       if chosen == false
253         # We have already decided not to reuse any completed job
254         next
255       elsif chosen
256         if chosen.output != j.output
257           # If two matching jobs produced different outputs, run a new
258           # job (or use one that's already running/queued) instead of
259           # choosing one arbitrarily.
260           chosen = false
261         end
262         # ...and that's the only thing we need to do once we've chosen
263         # a job to reuse.
264       elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
265         # As soon as the output we will end up returning (if any) is
266         # decided, check whether it will be visible to the user; if
267         # not, any further investigation of reusable jobs is futile.
268         chosen = false
269       else
270         chosen = j
271       end
272     end
273     chosen || incomplete_job
274   end
275
276   def self.default_git_filters(attr_name, repo_name, refspec)
277     # Add a filter to @filters for `attr_name` = the latest commit available
278     # in `repo_name` at `refspec`.  No filter is added if refspec can't be
279     # resolved.
280     commits = Commit.find_commit_range(repo_name, nil, refspec, nil)
281     if commit_hash = commits.first
282       [[attr_name, "=", commit_hash]]
283     else
284       []
285     end
286   end
287
288   def cancel(cascade: false, need_transaction: true)
289     if need_transaction
290       ActiveRecord::Base.transaction do
291         cancel(cascade: cascade, need_transaction: false)
292       end
293       return
294     end
295
296     if self.state.in?([Queued, Running])
297       self.state = Cancelled
298       self.save!
299     elsif self.state != Cancelled
300       raise InvalidStateTransitionError
301     end
302
303     return if !cascade
304
305     # cancel all children; they could be jobs or pipeline instances
306     children = self.components.andand.collect{|_, u| u}.compact
307
308     return if children.empty?
309
310     # cancel any child jobs
311     Job.where(uuid: children, state: [Queued, Running]).each do |job|
312       job.cancel(cascade: cascade, need_transaction: false)
313     end
314
315     # cancel any child pipelines
316     PipelineInstance.where(uuid: children, state: [PipelineInstance::RunningOnServer, PipelineInstance::RunningOnClient]).each do |pi|
317       pi.cancel(cascade: cascade, need_transaction: false)
318     end
319   end
320
321   protected
322
323   def self.sorted_hash_digest h
324     Digest::MD5.hexdigest(Oj.dump(deep_sort_hash(h)))
325   end
326
327   def foreign_key_attributes
328     super + %w(output log)
329   end
330
331   def skip_uuid_read_permission_check
332     super + %w(cancelled_by_client_uuid)
333   end
334
335   def skip_uuid_existence_check
336     super + %w(output log)
337   end
338
339   def set_priority
340     if self.priority.nil?
341       self.priority = 0
342     end
343     true
344   end
345
346   def ensure_script_version_is_commit
347     if state == Running
348       # Apparently client has already decided to go for it. This is
349       # needed to run a local job using a local working directory
350       # instead of a commit-ish.
351       return true
352     end
353     if new_record? or repository_changed? or script_version_changed?
354       sha1 = Commit.find_commit_range(repository,
355                                       nil, script_version, nil).first
356       if not sha1
357         errors.add :script_version, "#{script_version} does not resolve to a commit"
358         return false
359       end
360       if supplied_script_version.nil? or supplied_script_version.empty?
361         self.supplied_script_version = script_version
362       end
363       self.script_version = sha1
364     end
365     true
366   end
367
368   def tag_version_in_internal_repository
369     if state == Running
370       # No point now. See ensure_script_version_is_commit.
371       true
372     elsif errors.any?
373       # Won't be saved, and script_version might not even be valid.
374       true
375     elsif new_record? or repository_changed? or script_version_changed?
376       uuid_was = uuid
377       begin
378         assign_uuid
379         Commit.tag_in_internal_repository repository, script_version, uuid
380       rescue
381         self.uuid = uuid_was
382         raise
383       end
384     end
385   end
386
387   def ensure_unique_submit_id
388     if !submit_id.nil?
389       if Job.where('submit_id=?',self.submit_id).first
390         raise SubmitIdReused.new
391       end
392     end
393     true
394   end
395
396   def resolve_runtime_constraint(key, attr_sym)
397     if ((runtime_constraints.is_a? Hash) and
398         (search = runtime_constraints[key]))
399       ok, result = yield search
400     else
401       ok, result = true, nil
402     end
403     if ok
404       send("#{attr_sym}=".to_sym, result)
405     else
406       errors.add(attr_sym, result)
407     end
408     ok
409   end
410
411   def find_arvados_sdk_version
412     resolve_runtime_constraint("arvados_sdk_version",
413                                :arvados_sdk_version) do |git_search|
414       commits = Commit.find_commit_range("arvados",
415                                          nil, git_search, nil)
416       if commits.empty?
417         [false, "#{git_search} does not resolve to a commit"]
418       elsif not runtime_constraints["docker_image"]
419         [false, "cannot be specified without a Docker image constraint"]
420       else
421         [true, commits.first]
422       end
423     end
424   end
425
426   def find_docker_image_locator
427     if runtime_constraints.is_a? Hash
428       runtime_constraints['docker_image'] ||=
429         Rails.configuration.default_docker_image_for_jobs
430     end
431
432     resolve_runtime_constraint("docker_image",
433                                :docker_image_locator) do |image_search|
434       image_tag = runtime_constraints['docker_image_tag']
435       if coll = Collection.for_latest_docker_image(image_search, image_tag)
436         [true, coll.portable_data_hash]
437       else
438         [false, "not found for #{image_search}"]
439       end
440     end
441     Rails.logger.info("docker_image_locator is #{docker_image_locator}")
442     if docker_image_locator && docker_image_locator_changed?
443       self.docker_image_locator =
444         Collection.docker_migration_pdh([current_user], docker_image_locator)
445     end
446     Rails.logger.info("docker_image_locator is #{docker_image_locator}")
447   end
448
449   def permission_to_update
450     if is_locked_by_uuid_was and !(current_user and
451                                    (current_user.uuid == is_locked_by_uuid_was or
452                                     current_user.uuid == system_user.uuid))
453       if script_changed? or
454           script_parameters_changed? or
455           script_version_changed? or
456           (!cancelled_at_was.nil? and
457            (cancelled_by_client_uuid_changed? or
458             cancelled_by_user_uuid_changed? or
459             cancelled_at_changed?)) or
460           started_at_changed? or
461           finished_at_changed? or
462           running_changed? or
463           success_changed? or
464           output_changed? or
465           log_changed? or
466           tasks_summary_changed? or
467           (state_changed? && state != Cancelled) or
468           components_changed?
469         logger.warn "User #{current_user.uuid if current_user} tried to change protected job attributes on locked #{self.class.to_s} #{uuid_was}"
470         return false
471       end
472     end
473     if !is_locked_by_uuid_changed?
474       super
475     else
476       if !current_user
477         logger.warn "Anonymous user tried to change lock on #{self.class.to_s} #{uuid_was}"
478         false
479       elsif is_locked_by_uuid_was and is_locked_by_uuid_was != current_user.uuid
480         logger.warn "User #{current_user.uuid} tried to steal lock on #{self.class.to_s} #{uuid_was} from #{is_locked_by_uuid_was}"
481         false
482       elsif !is_locked_by_uuid.nil? and is_locked_by_uuid != current_user.uuid
483         logger.warn "User #{current_user.uuid} tried to lock #{self.class.to_s} #{uuid_was} with uuid #{is_locked_by_uuid}"
484         false
485       else
486         super
487       end
488     end
489   end
490
491   def update_modified_by_fields
492     if self.cancelled_at_changed?
493       # Ensure cancelled_at cannot be set to arbitrary non-now times,
494       # or changed once it is set.
495       if self.cancelled_at and not self.cancelled_at_was
496         self.cancelled_at = db_current_time
497         self.cancelled_by_user_uuid = current_user.uuid
498         self.cancelled_by_client_uuid = current_api_client.andand.uuid
499         @need_crunch_dispatch_trigger = true
500       else
501         self.cancelled_at = self.cancelled_at_was
502         self.cancelled_by_user_uuid = self.cancelled_by_user_uuid_was
503         self.cancelled_by_client_uuid = self.cancelled_by_client_uuid_was
504       end
505     end
506     super
507   end
508
509   def trigger_crunch_dispatch_if_cancelled
510     if @need_crunch_dispatch_trigger
511       File.open(Rails.configuration.crunch_refresh_trigger, 'wb') do
512         # That's all, just create/touch a file for crunch-job to see.
513       end
514     end
515   end
516
517   def update_timestamps_when_state_changes
518     return if not (state_changed? or new_record?)
519
520     case state
521     when Running
522       self.started_at ||= db_current_time
523     when Failed, Complete
524       self.finished_at ||= db_current_time
525     when Cancelled
526       self.cancelled_at ||= db_current_time
527     end
528
529     # TODO: Remove the following case block when old "success" and
530     # "running" attrs go away. Until then, this ensures we still
531     # expose correct success/running flags to older clients, even if
532     # some new clients are writing only the new state attribute.
533     case state
534     when Queued
535       self.running = false
536       self.success = nil
537     when Running
538       self.running = true
539       self.success = nil
540     when Cancelled, Failed
541       self.running = false
542       self.success = false
543     when Complete
544       self.running = false
545       self.success = true
546     end
547     self.running ||= false # Default to false instead of nil.
548
549     @need_crunch_dispatch_trigger = true
550
551     true
552   end
553
554   def update_state_from_old_state_attrs
555     # If a client has touched the legacy state attrs, update the
556     # "state" attr to agree with the updated values of the legacy
557     # attrs.
558     #
559     # TODO: Remove this method when old "success" and "running" attrs
560     # go away.
561     if cancelled_at_changed? or
562         success_changed? or
563         running_changed? or
564         state.nil?
565       if cancelled_at
566         self.state = Cancelled
567       elsif success == false
568         self.state = Failed
569       elsif success == true
570         self.state = Complete
571       elsif running == true
572         self.state = Running
573       else
574         self.state = Queued
575       end
576     end
577     true
578   end
579
580   def validate_status
581     if self.state.in?(States)
582       true
583     else
584       errors.add :state, "#{state.inspect} must be one of: #{States.inspect}"
585       false
586     end
587   end
588
589   def validate_state_change
590     ok = true
591     if self.state_changed?
592       ok = case self.state_was
593            when nil
594              # state isn't set yet
595              true
596            when Queued
597              # Permit going from queued to any state
598              true
599            when Running
600              # From running, may only transition to a finished state
601              [Complete, Failed, Cancelled].include? self.state
602            when Complete, Failed, Cancelled
603              # Once in a finished state, don't permit any more state changes
604              false
605            else
606              # Any other state transition is also invalid
607              false
608            end
609       if not ok
610         errors.add :state, "invalid change from #{self.state_was} to #{self.state}"
611       end
612     end
613     ok
614   end
615
616   def ensure_no_collection_uuids_in_script_params
617     # Fail validation if any script_parameters field includes a string containing a
618     # collection uuid pattern.
619     if self.script_parameters_changed?
620       if recursive_hash_search(self.script_parameters, Collection.uuid_regex)
621         self.errors.add :script_parameters, "must use portable_data_hash instead of collection uuid"
622         return false
623       end
624     end
625     true
626   end
627
628   # recursive_hash_search searches recursively through hashes and
629   # arrays in 'thing' for string fields matching regular expression
630   # 'pattern'.  Returns true if pattern is found, false otherwise.
631   def recursive_hash_search thing, pattern
632     if thing.is_a? Hash
633       thing.each do |k, v|
634         return true if recursive_hash_search v, pattern
635       end
636     elsif thing.is_a? Array
637       thing.each do |k|
638         return true if recursive_hash_search k, pattern
639       end
640     elsif thing.is_a? String
641       return true if thing.match pattern
642     end
643     false
644   end
645 end