7 (ARGV.any? ? ARGV : ['--jobs', '--pipelines']).each do |arg|
10 $options[:jobs] = true
12 $options[:pipelines] = true
14 abort "Unrecognized command line option '#{arg}'"
17 if not ($options[:jobs] or $options[:pipelines])
18 abort "Nothing to do. Please specify at least one of: --jobs, --pipelines."
21 ARGV.reject! { |a| a =~ /--jobs|--pipelines/ }
25 %w{TERM INT}.each do |sig|
28 $stderr.puts "Received #{signame} signal"
33 if ENV["CRUNCH_DISPATCH_LOCKFILE"]
34 lockfilename = ENV.delete "CRUNCH_DISPATCH_LOCKFILE"
35 lockfile = File.open(lockfilename, File::RDWR|File::CREAT, 0644)
36 unless lockfile.flock File::LOCK_EX|File::LOCK_NB
37 abort "Lock unavailable on #{lockfilename} - exit"
41 ENV["RAILS_ENV"] = ARGV[0] || ENV["RAILS_ENV"] || "development"
43 require File.dirname(__FILE__) + '/../config/boot'
44 require File.dirname(__FILE__) + '/../config/environment'
49 self.utc.strftime "%Y-%m-%d_%H:%M:%S"
54 include ApplicationHelper
59 @crunch_job_bin = (ENV['CRUNCH_JOB_BIN'] || `which arv-crunch-job`.strip)
60 if @crunch_job_bin.empty?
61 raise "No CRUNCH_JOB_BIN env var, and crunch-job not in path."
64 @arvados_internal = Rails.configuration.git_internal_dir
65 if not File.exists? @arvados_internal
66 $stderr.puts `mkdir -p #{@arvados_internal.shellescape} && git init --bare #{@arvados_internal.shellescape}`
67 raise "No internal git repository available" unless ($? == 0)
70 @repo_root = Rails.configuration.git_repositories_dir
71 @arvados_repo_path = Repository.where(name: "arvados").first.server_path
77 @pipe_auth_tokens = {}
84 return act_as_system_user
89 @todo = Job.queue.select(&:repository)
91 if $options[:pipelines]
92 @todo_pipelines = PipelineInstance.queue
96 def each_slurm_line(cmd, outfmt, max_fields=nil)
97 max_fields ||= outfmt.split(":").size
98 max_fields += 1 # To accommodate the node field we add
99 @@slurm_version ||= Gem::Version.new(`sinfo --version`.match(/\b[\d\.]+\b/)[0])
100 if Gem::Version.new('2.3') <= @@slurm_version
101 `#{cmd} --noheader -o '%n:#{outfmt}'`.each_line do |line|
102 yield line.chomp.split(":", max_fields)
105 # Expand rows with hostname ranges (like "foo[1-3,5,9-12]:idle")
106 # into multiple rows with one hostname each.
107 `#{cmd} --noheader -o '%N:#{outfmt}'`.each_line do |line|
108 tokens = line.chomp.split(":", max_fields)
109 if (re = tokens[0].match /^(.*?)\[([-,\d]+)\]$/)
111 re[2].split(",").each do |range|
112 range = range.split("-").collect(&:to_i)
113 (range[0]..range[-1]).each do |n|
114 yield [re[1] + n.to_s] + tokens
126 each_slurm_line("sinfo", "%t") do |hostname, state|
127 # Treat nodes in idle* state as down, because the * means that slurm
128 # hasn't been able to communicate with it recently.
129 state.sub!(/^idle\*/, "down")
130 state.sub!(/\W+$/, "")
131 state = "down" unless %w(idle alloc down).include?(state)
132 slurm_nodes[hostname] = {state: state, job: nil}
134 each_slurm_line("squeue", "%j") do |hostname, job_uuid|
135 slurm_nodes[hostname][:job] = job_uuid if slurm_nodes[hostname]
140 def update_node_status
141 return unless Server::Application.config.crunch_job_wrapper.to_s.match /^slurm/
142 slurm_status.each_pair do |hostname, slurmdata|
143 next if @node_state[hostname] == slurmdata
145 node = Node.where('hostname=?', hostname).order(:last_ping_at).last
147 $stderr.puts "dispatch: update #{hostname} state to #{slurmdata}"
148 node.info["slurm_state"] = slurmdata[:state]
149 node.job_uuid = slurmdata[:job]
151 @node_state[hostname] = slurmdata
153 $stderr.puts "dispatch: failed to update #{node.uuid}: #{node.errors.messages}"
155 elsif slurmdata[:state] != 'down'
156 $stderr.puts "dispatch: SLURM reports '#{hostname}' is not down, but no node has that name"
159 $stderr.puts "dispatch: error updating #{hostname} node status: #{error}"
164 def positive_int(raw_value, default=nil)
165 value = begin raw_value.to_i rescue 0 end
173 NODE_CONSTRAINT_MAP = {
174 # Map Job runtime_constraints keys to the corresponding Node info key.
175 'min_ram_mb_per_node' => 'total_ram_mb',
176 'min_scratch_mb_per_node' => 'total_scratch_mb',
177 'min_cores_per_node' => 'total_cpu_cores',
180 def nodes_available_for_job_now(job)
181 # Find Nodes that satisfy a Job's runtime constraints (by building
182 # a list of Procs and using them to test each Node). If there
183 # enough to run the Job, return an array of their names.
184 # Otherwise, return nil.
185 need_procs = NODE_CONSTRAINT_MAP.each_pair.map do |job_key, node_key|
187 positive_int(node.info[node_key], 0) >=
188 positive_int(job.runtime_constraints[job_key], 0)
191 min_node_count = positive_int(job.runtime_constraints['min_nodes'], 1)
193 Node.find_each do |node|
194 good_node = (node.info['slurm_state'] == 'idle')
195 need_procs.each { |node_test| good_node &&= node_test.call(node) }
198 if usable_nodes.count >= min_node_count
199 return usable_nodes.map { |node| node.hostname }
206 def nodes_available_for_job(job)
207 # Check if there are enough idle nodes with the Job's minimum
208 # hardware requirements to run it. If so, return an array of
209 # their names. If not, up to once per hour, signal start_jobs to
210 # hold off launching Jobs. This delay is meant to give the Node
211 # Manager an opportunity to make new resources available for new
214 # The exact timing parameters here might need to be adjusted for
215 # the best balance between helping the longest-waiting Jobs run,
216 # and making efficient use of immediately available resources.
217 # These are all just first efforts until we have more data to work
219 nodelist = nodes_available_for_job_now(job)
220 if nodelist.nil? and not did_recently(:wait_for_available_nodes, 3600)
221 $stderr.puts "dispatch: waiting for nodes for #{job.uuid}"
222 @node_wait_deadline = Time.now + 5.minutes
227 def fail_job job, message
228 $stderr.puts "dispatch: #{job.uuid}: #{message}"
230 Log.new(object_uuid: job.uuid,
231 event_type: 'dispatch',
232 owner_uuid: job.owner_uuid,
234 properties: {"text" => message}).save!
236 $stderr.puts "dispatch: log.create failed"
240 job.lock @authorizations[job.uuid].user.uuid
243 $stderr.puts "dispatch: save failed setting job #{job.uuid} to failed"
245 rescue ArvadosModel::AlreadyLockedError
246 $stderr.puts "dispatch: tried to mark job #{job.uuid} as failed but it was already locked by someone else"
250 def stdout_s(cmd_a, opts={})
251 IO.popen(cmd_a, "r", opts) do |pipe|
252 return pipe.read.chomp
257 ["git", "--git-dir=#{@arvados_internal}"] + cmd_a
260 def get_authorization(job)
261 if @authorizations[job.uuid] and
262 @authorizations[job.uuid].user.uuid != job.modified_by_user_uuid
263 # We already made a token for this job, but we need a new one
264 # because modified_by_user_uuid has changed (the job will run
265 # as a different user).
266 @authorizations[job.uuid].update_attributes expires_at: Time.now
267 @authorizations[job.uuid] = nil
269 if not @authorizations[job.uuid]
270 auth = ApiClientAuthorization.
271 new(user: User.where('uuid=?', job.modified_by_user_uuid).first,
274 $stderr.puts "dispatch: auth.save failed for #{job.uuid}"
276 @authorizations[job.uuid] = auth
279 @authorizations[job.uuid]
282 def internal_repo_has_commit? sha1
283 if (not @fetched_commits[sha1] and
284 sha1 == stdout_s(git_cmd("rev-list", "-n1", sha1), err: "/dev/null") and
286 @fetched_commits[sha1] = true
288 return @fetched_commits[sha1]
291 def get_commit src_repo, sha1
292 return true if internal_repo_has_commit? sha1
294 # commit does not exist in internal repository, so import the
295 # source repository using git fetch-pack
296 cmd = git_cmd("fetch-pack", "--no-progress", "--all", src_repo)
297 $stderr.puts "dispatch: #{cmd}"
298 $stderr.puts(stdout_s(cmd))
299 @fetched_commits[sha1] = ($? == 0)
302 def tag_commit(commit_hash, tag_name)
303 # @git_tags[T]==V if we know commit V has been tagged T in the
304 # arvados_internal repository.
305 if not @git_tags[tag_name]
306 cmd = git_cmd("tag", tag_name, commit_hash)
307 $stderr.puts "dispatch: #{cmd}"
308 $stderr.puts(stdout_s(cmd, err: "/dev/null"))
310 # git tag failed. This may be because the tag already exists, so check for that.
311 tag_rev = stdout_s(git_cmd("rev-list", "-n1", tag_name))
313 # We got a revision back
314 if tag_rev != commit_hash
315 # Uh oh, the tag doesn't point to the revision we were expecting.
316 # Someone has been monkeying with the job record and/or git.
317 fail_job job, "Existing tag #{tag_name} points to commit #{tag_rev} but expected commit #{commit_hash}"
320 # we're okay (fall through to setting @git_tags below)
322 # git rev-list failed for some reason.
323 fail_job job, "'git tag' for #{tag_name} failed but did not find any existing tag using 'git rev-list'"
327 # 'git tag' was successful, or there is an existing tag that points to the same revision.
328 @git_tags[tag_name] = commit_hash
329 elsif @git_tags[tag_name] != commit_hash
330 fail_job job, "Existing tag #{tag_name} points to commit #{@git_tags[tag_name]} but this job uses commit #{commit_hash}"
338 next if @running[job.uuid]
341 case Server::Application.config.crunch_job_wrapper
344 # Don't run more than one at a time.
348 when :slurm_immediate
349 nodelist = nodes_available_for_job(job)
351 if Time.now < @node_wait_deadline
357 cmd_args = ["salloc",
362 "--job-name=#{job.uuid}",
363 "--nodelist=#{nodelist.join(',')}"]
365 raise "Unknown crunch_job_wrapper: #{Server::Application.config.crunch_job_wrapper}"
368 if Server::Application.config.crunch_job_user
369 cmd_args.unshift("sudo", "-E", "-u",
370 Server::Application.config.crunch_job_user,
371 "PATH=#{ENV['PATH']}",
372 "PERLLIB=#{ENV['PERLLIB']}",
373 "PYTHONPATH=#{ENV['PYTHONPATH']}",
374 "RUBYLIB=#{ENV['RUBYLIB']}",
375 "GEM_PATH=#{ENV['GEM_PATH']}")
378 next unless get_authorization job
380 ready = internal_repo_has_commit? job.script_version
383 # Import the commit from the specified repository into the
384 # internal repository. This should have been done already when
385 # the job was created/updated; this code is obsolete except to
386 # avoid deployment races. Failing the job would be a
387 # reasonable thing to do at this point.
388 repo = Repository.where(name: job.repository).first
389 if repo.nil? or repo.server_path.nil?
390 fail_job "Repository #{job.repository} not found under #{@repo_root}"
393 ready &&= get_commit repo.server_path, job.script_version
394 ready &&= tag_commit job.script_version, job.uuid
397 # This should be unnecessary, because API server does it during
398 # job create/update, but it's still not a bad idea to verify the
399 # tag is correct before starting the job:
400 ready &&= tag_commit job.script_version, job.uuid
402 # The arvados_sdk_version doesn't support use of arbitrary
403 # remote URLs, so the requested version isn't necessarily copied
404 # into the internal repository yet.
405 if job.arvados_sdk_version
406 ready &&= get_commit @arvados_repo_path, job.arvados_sdk_version
407 ready &&= tag_commit job.arvados_sdk_version, "#{job.uuid}-arvados-sdk"
411 fail_job job, "commit not present in internal repository"
415 cmd_args += [@crunch_job_bin,
416 '--job-api-token', @authorizations[job.uuid].api_token,
418 '--git-dir', @arvados_internal]
420 $stderr.puts "dispatch: #{cmd_args.join ' '}"
423 i, o, e, t = Open3.popen3(*cmd_args)
425 $stderr.puts "dispatch: popen3: #{$!}"
430 $stderr.puts "dispatch: job #{job.uuid}"
431 start_banner = "dispatch: child #{t.pid} start #{LogTime.now}"
432 $stderr.puts start_banner
434 @running[job.uuid] = {
440 buf: {stderr: '', stdout: ''},
443 job_auth: @authorizations[job.uuid],
444 stderr_buf_to_flush: '',
445 stderr_flushed_at: Time.new(0),
448 log_throttle_is_open: true,
449 log_throttle_reset_time: Time.now + Rails.configuration.crunch_log_throttle_period,
450 log_throttle_bytes_so_far: 0,
451 log_throttle_lines_so_far: 0,
452 log_throttle_bytes_skipped: 0,
459 # Test for hard cap on total output and for log throttling. Returns whether
460 # the log line should go to output or not. Modifies "line" in place to
461 # replace it with an error if a logging limit is tripped.
462 def rate_limit running_job, line
465 if running_job[:log_throttle_is_open]
466 running_job[:log_throttle_lines_so_far] += 1
467 running_job[:log_throttle_bytes_so_far] += linesize
468 running_job[:bytes_logged] += linesize
470 if (running_job[:bytes_logged] >
471 Rails.configuration.crunch_limit_log_bytes_per_job)
472 message = "Exceeded log limit #{Rails.configuration.crunch_limit_log_bytes_per_job} bytes (crunch_limit_log_bytes_per_job). Log will be truncated."
473 running_job[:log_throttle_reset_time] = Time.now + 100.years
474 running_job[:log_throttle_is_open] = false
476 elsif (running_job[:log_throttle_bytes_so_far] >
477 Rails.configuration.crunch_log_throttle_bytes)
478 remaining_time = running_job[:log_throttle_reset_time] - Time.now
479 message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_bytes} bytes per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_bytes). Logging will be silenced for the next #{remaining_time.round} seconds.\n"
480 running_job[:log_throttle_is_open] = false
482 elsif (running_job[:log_throttle_lines_so_far] >
483 Rails.configuration.crunch_log_throttle_lines)
484 remaining_time = running_job[:log_throttle_reset_time] - Time.now
485 message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_lines} lines per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_lines), logging will be silenced for the next #{remaining_time.round} seconds.\n"
486 running_job[:log_throttle_is_open] = false
490 if not running_job[:log_throttle_is_open]
491 # Don't log anything if any limit has been exceeded. Just count lossage.
492 running_job[:log_throttle_bytes_skipped] += linesize
496 # Yes, write to logs, but use our "rate exceeded" message
497 # instead of the log message that exceeded the limit.
501 running_job[:log_throttle_is_open]
506 @running.each do |job_uuid, j|
510 if now > j[:log_throttle_reset_time]
511 # It has been more than throttle_period seconds since the last
512 # checkpoint so reset the throttle
513 if j[:log_throttle_bytes_skipped] > 0
514 message = "#{job_uuid} ! Skipped #{j[:log_throttle_bytes_skipped]} bytes of log"
516 j[:stderr_buf_to_flush] << "#{LogTime.now} #{message}\n"
519 j[:log_throttle_reset_time] = now + Rails.configuration.crunch_log_throttle_period
520 j[:log_throttle_bytes_so_far] = 0
521 j[:log_throttle_lines_so_far] = 0
522 j[:log_throttle_bytes_skipped] = 0
523 j[:log_throttle_is_open] = true
526 j[:buf].each do |stream, streambuf|
527 # Read some data from the child stream
530 # It's important to use a big enough buffer here. When we're
531 # being flooded with logs, we must read and discard many
532 # bytes at once. Otherwise, we can easily peg a CPU with
533 # time-checking and other loop overhead. (Quick tests show a
534 # 1MiB buffer working 2.5x as fast as a 64 KiB buffer.)
536 # So don't reduce this buffer size!
537 buf = j[stream].read_nonblock(2**20)
538 rescue Errno::EAGAIN, EOFError
541 # Short circuit the counting code if we're just going to throw
542 # away the data anyway.
543 if not j[:log_throttle_is_open]
544 j[:log_throttle_bytes_skipped] += streambuf.size + buf.size
551 # Append to incomplete line from previous read, if any
555 streambuf.each_line do |line|
556 if not line.end_with? $/
557 if line.size > Rails.configuration.crunch_log_throttle_bytes
558 # Without a limit here, we'll use 2x an arbitrary amount
559 # of memory, and waste a lot of time copying strings
560 # around, all without providing any feedback to anyone
561 # about what's going on _or_ hitting any of our throttle
564 # Here we leave "line" alone, knowing it will never be
565 # sent anywhere: rate_limit() will reach
566 # crunch_log_throttle_bytes immediately. However, we'll
567 # leave [...] in bufend: if the trailing end of the long
568 # line does end up getting sent anywhere, it will have
569 # some indication that it is incomplete.
572 # If line length is sane, we'll wait for the rest of the
573 # line to appear in the next read_pipes() call.
578 # rate_limit returns true or false as to whether to actually log
579 # the line or not. It also modifies "line" in place to replace
580 # it with an error if a logging limit is tripped.
581 if rate_limit j, line
582 $stderr.print "#{job_uuid} ! " unless line.index(job_uuid)
584 pub_msg = "#{LogTime.now} #{line.strip}\n"
585 j[:stderr_buf_to_flush] << pub_msg
589 # Leave the trailing incomplete line (if any) in streambuf for
591 streambuf.replace bufend
593 # Flush buffered logs to the logs table, if appropriate. We have
594 # to do this even if we didn't collect any new logs this time:
595 # otherwise, buffered data older than seconds_between_events
596 # won't get flushed until new data arrives.
602 return if 0 == @running.size
608 pid_done = waitpid(-1, Process::WNOHANG | Process::WUNTRACED)
610 j_done = @running.values.
611 select { |j| j[:wait_thr].pid == pid_done }.
614 rescue SystemCallError
615 # I have @running processes but system reports I have no
616 # children. This is likely to happen repeatedly if it happens at
617 # all; I will log this no more than once per child process I
619 if 0 < @running.select { |uuid,j| j[:warned_waitpid_error].nil? }.size
620 children = @running.values.collect { |j| j[:wait_thr].pid }.join ' '
621 $stderr.puts "dispatch: IPC bug: waitpid() error (#{$!}), but I have children #{children}"
623 @running.each do |uuid,j| j[:warned_waitpid_error] = true end
626 @running.each do |uuid, j|
627 if j[:wait_thr].status == false
628 pid_done = j[:wait_thr].pid
636 job_done = j_done[:job]
637 $stderr.puts "dispatch: child #{pid_done} exit"
638 $stderr.puts "dispatch: job #{job_done.uuid} end"
640 # Ensure every last drop of stdout and stderr is consumed.
642 # Reset flush timestamp to make sure log gets written.
643 j_done[:stderr_flushed_at] = Time.new(0)
644 # Write any remaining logs.
647 j_done[:buf].each do |stream, streambuf|
649 $stderr.puts streambuf + "\n"
653 # Wait the thread (returns a Process::Status)
654 exit_status = j_done[:wait_thr].value.exitstatus
656 jobrecord = Job.find_by_uuid(job_done.uuid)
657 if exit_status != EXIT_TEMPFAIL and jobrecord.state == "Running"
658 # crunch-job did not return exit code 75 (see below) and left the job in
659 # the "Running" state, which means there was an unhandled error. Fail
661 jobrecord.state = "Failed"
662 if not jobrecord.save
663 $stderr.puts "dispatch: jobrecord.save failed"
666 # Don't fail the job if crunch-job didn't even get as far as
667 # starting it. If the job failed to run due to an infrastructure
668 # issue with crunch-job or slurm, we want the job to stay in the
669 # queue. If crunch-job exited after losing a race to another
670 # crunch-job process, it exits 75 and we should leave the job
671 # record alone so the winner of the race do its thing.
673 # There is still an unhandled race condition: If our crunch-job
674 # process is about to lose a race with another crunch-job
675 # process, but crashes before getting to its "exit 75" (for
676 # example, "cannot fork" or "cannot reach API server") then we
677 # will assume incorrectly that it's our process's fault
678 # jobrecord.started_at is non-nil, and mark the job as failed
679 # even though the winner of the race is probably still doing
683 # Invalidate the per-job auth token, unless the job is still queued and we
684 # might want to try it again.
685 if jobrecord.state != "Queued"
686 j_done[:job_auth].update_attributes expires_at: Time.now
689 @running.delete job_done.uuid
693 expire_tokens = @pipe_auth_tokens.dup
694 @todo_pipelines.each do |p|
695 pipe_auth = (@pipe_auth_tokens[p.uuid] ||= ApiClientAuthorization.
696 create(user: User.where('uuid=?', p.modified_by_user_uuid).first,
698 puts `export ARVADOS_API_TOKEN=#{pipe_auth.api_token} && arv-run-pipeline-instance --run-pipeline-here --no-wait --instance #{p.uuid}`
699 expire_tokens.delete p.uuid
702 expire_tokens.each do |k, v|
703 v.update_attributes expires_at: Time.now
704 @pipe_auth_tokens.delete k
710 $stderr.puts "dispatch: ready"
711 while !$signal[:term] or @running.size > 0
714 @running.each do |uuid, j|
715 if !j[:started] and j[:sent_int] < 2
717 Process.kill 'INT', j[:wait_thr].pid
719 # No such pid = race condition + desired result is
726 refresh_todo unless did_recently(:refresh_todo, 1.0)
727 update_node_status unless did_recently(:update_node_status, 1.0)
728 unless @todo.empty? or did_recently(:start_jobs, 1.0) or $signal[:term]
731 unless (@todo_pipelines.empty? and @pipe_auth_tokens.empty?) or did_recently(:update_pipelines, 5.0)
736 select(@running.values.collect { |j| [j[:stdout], j[:stderr]] }.flatten,
743 def did_recently(thing, min_interval)
744 if !@did_recently[thing] or @did_recently[thing] < Time.now - min_interval
745 @did_recently[thing] = Time.now
752 # send message to log table. we want these records to be transient
753 def write_log running_job
754 return if running_job[:stderr_buf_to_flush] == ''
756 # Send out to log event if buffer size exceeds the bytes per event or if
757 # it has been at least crunch_log_seconds_between_events seconds since
759 if running_job[:stderr_buf_to_flush].size > Rails.configuration.crunch_log_bytes_per_event or
760 (Time.now - running_job[:stderr_flushed_at]) >= Rails.configuration.crunch_log_seconds_between_events
762 log = Log.new(object_uuid: running_job[:job].uuid,
763 event_type: 'stderr',
764 owner_uuid: running_job[:job].owner_uuid,
765 properties: {"text" => running_job[:stderr_buf_to_flush]})
767 running_job[:events_logged] += 1
769 $stderr.puts "Failed to write logs"
770 $stderr.puts exception.backtrace
772 running_job[:stderr_buf_to_flush] = ''
773 running_job[:stderr_flushed_at] = Time.now
778 # This is how crunch-job child procs know where the "refresh" trigger file is
779 ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.crunch_refresh_trigger
781 # If salloc can't allocate resources immediately, make it use our temporary
782 # failure exit code. This ensures crunch-dispatch won't mark a job failed
783 # because of an issue with node allocation. This often happens when
784 # another dispatcher wins the race to allocate nodes.
785 ENV["SLURM_EXIT_IMMEDIATE"] = Dispatcher::EXIT_TEMPFAIL.to_s