7 (ARGV.any? ? ARGV : ['--jobs', '--pipelines']).each do |arg|
10 $options[:jobs] = true
12 $options[:pipelines] = true
14 abort "Unrecognized command line option '#{arg}'"
17 if not ($options[:jobs] or $options[:pipelines])
18 abort "Nothing to do. Please specify at least one of: --jobs, --pipelines."
21 ARGV.reject! { |a| a =~ /--jobs|--pipelines/ }
25 %w{TERM INT}.each do |sig|
28 $stderr.puts "Received #{signame} signal"
33 if ENV["CRUNCH_DISPATCH_LOCKFILE"]
34 lockfilename = ENV.delete "CRUNCH_DISPATCH_LOCKFILE"
35 lockfile = File.open(lockfilename, File::RDWR|File::CREAT, 0644)
36 unless lockfile.flock File::LOCK_EX|File::LOCK_NB
37 abort "Lock unavailable on #{lockfilename} - exit"
41 ENV["RAILS_ENV"] = ARGV[0] || ENV["RAILS_ENV"] || "development"
43 require File.dirname(__FILE__) + '/../config/boot'
44 require File.dirname(__FILE__) + '/../config/environment'
48 include ApplicationHelper
51 return act_as_system_user
57 @todo = Job.queue.select(&:repository)
60 if $options[:pipelines]
61 @todo_pipelines = PipelineInstance.queue
65 def each_slurm_line(cmd, outfmt, max_fields=nil)
66 max_fields ||= outfmt.split(":").size
67 max_fields += 1 # To accommodate the node field we add
68 @@slurm_version ||= Gem::Version.new(`sinfo --version`.match(/\b[\d\.]+\b/)[0])
69 if Gem::Version.new('2.3') <= @@slurm_version
70 `#{cmd} --noheader -o '%n:#{outfmt}'`.each_line do |line|
71 yield line.chomp.split(":", max_fields)
74 # Expand rows with hostname ranges (like "foo[1-3,5,9-12]:idle")
75 # into multiple rows with one hostname each.
76 `#{cmd} --noheader -o '%N:#{outfmt}'`.each_line do |line|
77 tokens = line.chomp.split(":", max_fields)
78 if (re = tokens[0].match /^(.*?)\[([-,\d]+)\]$/)
80 re[2].split(",").each do |range|
81 range = range.split("-").collect(&:to_i)
82 (range[0]..range[-1]).each do |n|
83 yield [re[1] + n.to_s] + tokens
95 each_slurm_line("sinfo", "%t") do |hostname, state|
96 state.sub!(/\W+$/, "")
97 state = "down" unless %w(idle alloc down).include?(state)
98 slurm_nodes[hostname] = {state: state, job: nil}
100 each_slurm_line("squeue", "%j") do |hostname, job_uuid|
101 slurm_nodes[hostname][:job] = job_uuid if slurm_nodes[hostname]
106 def update_node_status
107 return unless Server::Application.config.crunch_job_wrapper.to_s.match /^slurm/
109 slurm_status.each_pair do |hostname, slurmdata|
110 next if @node_state[hostname] == slurmdata
112 node = Node.where('hostname=?', hostname).order(:last_ping_at).last
114 $stderr.puts "dispatch: update #{hostname} state to #{slurmdata}"
115 node.info["slurm_state"] = slurmdata[:state]
116 node.job_uuid = slurmdata[:job]
118 @node_state[hostname] = slurmdata
120 $stderr.puts "dispatch: failed to update #{node.uuid}: #{node.errors.messages}"
122 elsif slurmdata[:state] != 'down'
123 $stderr.puts "dispatch: SLURM reports '#{hostname}' is not down, but no node has that name"
126 $stderr.puts "dispatch: error updating #{hostname} node status: #{error}"
131 def positive_int(raw_value, default=nil)
132 value = begin raw_value.to_i rescue 0 end
140 NODE_CONSTRAINT_MAP = {
141 # Map Job runtime_constraints keys to the corresponding Node info key.
142 'min_ram_mb_per_node' => 'total_ram_mb',
143 'min_scratch_mb_per_node' => 'total_scratch_mb',
144 'min_cores_per_node' => 'total_cpu_cores',
147 def nodes_available_for_job_now(job)
148 # Find Nodes that satisfy a Job's runtime constraints (by building
149 # a list of Procs and using them to test each Node). If there
150 # enough to run the Job, return an array of their names.
151 # Otherwise, return nil.
152 need_procs = NODE_CONSTRAINT_MAP.each_pair.map do |job_key, node_key|
154 positive_int(node.info[node_key], 0) >=
155 positive_int(job.runtime_constraints[job_key], 0)
158 min_node_count = positive_int(job.runtime_constraints['min_nodes'], 1)
160 Node.find_each do |node|
161 good_node = (node.info['slurm_state'] == 'idle')
162 need_procs.each { |node_test| good_node &&= node_test.call(node) }
165 if usable_nodes.count >= min_node_count
166 return usable_nodes.map { |node| node.hostname }
173 def nodes_available_for_job(job)
174 # Check if there are enough idle nodes with the Job's minimum
175 # hardware requirements to run it. If so, return an array of
176 # their names. If not, up to once per hour, signal start_jobs to
177 # hold off launching Jobs. This delay is meant to give the Node
178 # Manager an opportunity to make new resources available for new
181 # The exact timing parameters here might need to be adjusted for
182 # the best balance between helping the longest-waiting Jobs run,
183 # and making efficient use of immediately available resources.
184 # These are all just first efforts until we have more data to work
186 nodelist = nodes_available_for_job_now(job)
187 if nodelist.nil? and not did_recently(:wait_for_available_nodes, 3600)
188 $stderr.puts "dispatch: waiting for nodes for #{job.uuid}"
189 @node_wait_deadline = Time.now + 5.minutes
194 def fail_job job, message
195 $stderr.puts "dispatch: #{job.uuid}: #{message}"
197 Log.new(object_uuid: job.uuid,
198 event_type: 'dispatch',
199 owner_uuid: job.owner_uuid,
201 properties: {"text" => message}).save!
203 $stderr.puts "dispatch: log.create failed"
207 job.lock @authorizations[job.uuid].user.uuid
210 $stderr.puts "dispatch: save failed setting job #{job.uuid} to failed"
212 rescue ArvadosModel::AlreadyLockedError
213 $stderr.puts "dispatch: tried to mark job #{job.uuid} as failed but it was already locked by someone else"
219 next if @running[job.uuid]
222 case Server::Application.config.crunch_job_wrapper
225 # Don't run more than one at a time.
229 when :slurm_immediate
230 nodelist = nodes_available_for_job(job)
232 if Time.now < @node_wait_deadline
238 cmd_args = ["salloc",
243 "--job-name=#{job.uuid}",
244 "--nodelist=#{nodelist.join(',')}"]
246 raise "Unknown crunch_job_wrapper: #{Server::Application.config.crunch_job_wrapper}"
249 if Server::Application.config.crunch_job_user
250 cmd_args.unshift("sudo", "-E", "-u",
251 Server::Application.config.crunch_job_user,
252 "PATH=#{ENV['PATH']}",
253 "PERLLIB=#{ENV['PERLLIB']}",
254 "PYTHONPATH=#{ENV['PYTHONPATH']}",
255 "RUBYLIB=#{ENV['RUBYLIB']}",
256 "GEM_PATH=#{ENV['GEM_PATH']}")
259 @authorizations ||= {}
260 if @authorizations[job.uuid] and
261 @authorizations[job.uuid].user.uuid != job.modified_by_user_uuid
262 # We already made a token for this job, but we need a new one
263 # because modified_by_user_uuid has changed (the job will run
264 # as a different user).
265 @authorizations[job.uuid].update_attributes expires_at: Time.now
266 @authorizations[job.uuid] = nil
268 if not @authorizations[job.uuid]
269 auth = ApiClientAuthorization.
270 new(user: User.where('uuid=?', job.modified_by_user_uuid).first,
273 $stderr.puts "dispatch: auth.save failed"
276 @authorizations[job.uuid] = auth
279 crunch_job_bin = (ENV['CRUNCH_JOB_BIN'] || `which arv-crunch-job`.strip)
280 if crunch_job_bin == ''
281 raise "No CRUNCH_JOB_BIN env var, and crunch-job not in path."
284 arvados_internal = Rails.configuration.git_internal_dir
285 if not File.exists? arvados_internal
286 $stderr.puts `mkdir -p #{arvados_internal.shellescape} && cd #{arvados_internal.shellescape} && git init --bare`
289 git = "git --git-dir=#{arvados_internal.shellescape}"
291 # @fetched_commits[V]==true if we know commit V exists in the
292 # arvados_internal git repository.
293 @fetched_commits ||= {}
294 if !@fetched_commits[job.script_version]
296 repo_root = Rails.configuration.git_repositories_dir
297 src_repo = File.join(repo_root, job.repository + '.git')
298 if not File.exists? src_repo
299 src_repo = File.join(repo_root, job.repository, '.git')
300 if not File.exists? src_repo
301 fail_job job, "No #{job.repository}.git or #{job.repository}/.git at #{repo_root}"
306 # check if the commit needs to be fetched or not
307 commit_rev = `#{git} rev-list -n1 #{job.script_version.shellescape} 2>/dev/null`.chomp
308 unless $? == 0 and commit_rev == job.script_version
309 # commit does not exist in internal repository, so import the source repository using git fetch-pack
310 cmd = "#{git} fetch-pack --no-progress --all #{src_repo.shellescape}"
311 $stderr.puts "dispatch: #{cmd}"
312 $stderr.puts `#{cmd}`
314 fail_job job, "git fetch-pack failed"
318 @fetched_commits[job.script_version] = true
321 # @job_tags[J]==V if we know commit V has been tagged J in the
322 # arvados_internal repository. (J is a job UUID, V is a commit
325 if not @job_tags[job.uuid]
326 cmd = "#{git} tag #{job.uuid.shellescape} #{job.script_version.shellescape} 2>/dev/null"
327 $stderr.puts "dispatch: #{cmd}"
328 $stderr.puts `#{cmd}`
330 # git tag failed. This may be because the tag already exists, so check for that.
331 tag_rev = `#{git} rev-list -n1 #{job.uuid.shellescape}`.chomp
333 # We got a revision back
334 if tag_rev != job.script_version
335 # Uh oh, the tag doesn't point to the revision we were expecting.
336 # Someone has been monkeying with the job record and/or git.
337 fail_job job, "Existing tag #{job.uuid} points to commit #{tag_rev} but expected commit #{job.script_version}"
340 # we're okay (fall through to setting @job_tags below)
342 # git rev-list failed for some reason.
343 fail_job job, "'git tag' for #{job.uuid} failed but did not find any existing tag using 'git rev-list'"
347 # 'git tag' was successful, or there is an existing tag that points to the same revision.
348 @job_tags[job.uuid] = job.script_version
349 elsif @job_tags[job.uuid] != job.script_version
350 fail_job job, "Existing tag #{job.uuid} points to commit #{@job_tags[job.uuid]} but this job uses commit #{job.script_version}"
354 cmd_args << crunch_job_bin
355 cmd_args << '--job-api-token'
356 cmd_args << @authorizations[job.uuid].api_token
359 cmd_args << '--git-dir'
360 cmd_args << arvados_internal
362 $stderr.puts "dispatch: #{cmd_args.join ' '}"
365 i, o, e, t = Open3.popen3(*cmd_args)
367 $stderr.puts "dispatch: popen3: #{$!}"
372 $stderr.puts "dispatch: job #{job.uuid}"
373 start_banner = "dispatch: child #{t.pid} start #{Time.now.ctime.to_s}"
374 $stderr.puts start_banner
376 @running[job.uuid] = {
382 buf: {stderr: '', stdout: ''},
385 job_auth: @authorizations[job.uuid],
386 stderr_buf_to_flush: '',
387 stderr_flushed_at: Time.new(0),
390 log_throttle_is_open: true,
391 log_throttle_reset_time: Time.now + Rails.configuration.crunch_log_throttle_period,
392 log_throttle_bytes_so_far: 0,
393 log_throttle_lines_so_far: 0,
394 log_throttle_bytes_skipped: 0,
401 # Test for hard cap on total output and for log throttling. Returns whether
402 # the log line should go to output or not. Modifies "line" in place to
403 # replace it with an error if a logging limit is tripped.
404 def rate_limit running_job, line
407 if running_job[:log_throttle_is_open]
408 running_job[:log_throttle_lines_so_far] += 1
409 running_job[:log_throttle_bytes_so_far] += linesize
410 running_job[:bytes_logged] += linesize
412 if (running_job[:bytes_logged] >
413 Rails.configuration.crunch_limit_log_bytes_per_job)
414 message = "Exceeded log limit #{Rails.configuration.crunch_limit_log_bytes_per_job} bytes (crunch_limit_log_bytes_per_job). Log will be truncated."
415 running_job[:log_throttle_reset_time] = Time.now + 100.years
416 running_job[:log_throttle_is_open] = false
418 elsif (running_job[:log_throttle_bytes_so_far] >
419 Rails.configuration.crunch_log_throttle_bytes)
420 remaining_time = running_job[:log_throttle_reset_time] - Time.now
421 message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_bytes} bytes per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_bytes). Logging will be silenced for the next #{remaining_time.round} seconds.\n"
422 running_job[:log_throttle_is_open] = false
424 elsif (running_job[:log_throttle_lines_so_far] >
425 Rails.configuration.crunch_log_throttle_lines)
426 remaining_time = running_job[:log_throttle_reset_time] - Time.now
427 message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_lines} lines per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_lines), logging will be silenced for the next #{remaining_time.round} seconds.\n"
428 running_job[:log_throttle_is_open] = false
432 if not running_job[:log_throttle_is_open]
433 # Don't log anything if any limit has been exceeded. Just count lossage.
434 running_job[:log_throttle_bytes_skipped] += linesize
438 # Yes, write to logs, but use our "rate exceeded" message
439 # instead of the log message that exceeded the limit.
443 running_job[:log_throttle_is_open]
448 @running.each do |job_uuid, j|
452 if now > j[:log_throttle_reset_time]
453 # It has been more than throttle_period seconds since the last
454 # checkpoint so reset the throttle
455 if j[:log_throttle_bytes_skipped] > 0
456 message = "#{job_uuid} ! Skipped #{j[:log_throttle_bytes_skipped]} bytes of log"
458 j[:stderr_buf_to_flush] << "#{Time.now.ctime.to_s} #{message}\n"
461 j[:log_throttle_reset_time] = now + Rails.configuration.crunch_log_throttle_period
462 j[:log_throttle_bytes_so_far] = 0
463 j[:log_throttle_lines_so_far] = 0
464 j[:log_throttle_bytes_skipped] = 0
465 j[:log_throttle_is_open] = true
468 j[:buf].each do |stream, streambuf|
469 # Read some data from the child stream
472 # It's important to use a big enough buffer here. When we're
473 # being flooded with logs, we must read and discard many
474 # bytes at once. Otherwise, we can easily peg a CPU with
475 # time-checking and other loop overhead. (Quick tests show a
476 # 1MiB buffer working 2.5x as fast as a 64 KiB buffer.)
478 # So don't reduce this buffer size!
479 buf = j[stream].read_nonblock(2**20)
480 rescue Errno::EAGAIN, EOFError
483 # Short circuit the counting code if we're just going to throw
484 # away the data anyway.
485 if not j[:log_throttle_is_open]
486 j[:log_throttle_bytes_skipped] += streambuf.size + buf.size
493 # Append to incomplete line from previous read, if any
497 streambuf.each_line do |line|
498 if not line.end_with? $/
499 if line.size > Rails.configuration.crunch_log_throttle_bytes
500 # Without a limit here, we'll use 2x an arbitrary amount
501 # of memory, and waste a lot of time copying strings
502 # around, all without providing any feedback to anyone
503 # about what's going on _or_ hitting any of our throttle
506 # Here we leave "line" alone, knowing it will never be
507 # sent anywhere: rate_limit() will reach
508 # crunch_log_throttle_bytes immediately. However, we'll
509 # leave [...] in bufend: if the trailing end of the long
510 # line does end up getting sent anywhere, it will have
511 # some indication that it is incomplete.
514 # If line length is sane, we'll wait for the rest of the
515 # line to appear in the next read_pipes() call.
520 # rate_limit returns true or false as to whether to actually log
521 # the line or not. It also modifies "line" in place to replace
522 # it with an error if a logging limit is tripped.
523 if rate_limit j, line
524 $stderr.print "#{job_uuid} ! " unless line.index(job_uuid)
526 pub_msg = "#{Time.now.ctime.to_s} #{line.strip}\n"
527 j[:stderr_buf_to_flush] << pub_msg
531 # Leave the trailing incomplete line (if any) in streambuf for
533 streambuf.replace bufend
535 # Flush buffered logs to the logs table, if appropriate. We have
536 # to do this even if we didn't collect any new logs this time:
537 # otherwise, buffered data older than seconds_between_events
538 # won't get flushed until new data arrives.
544 return if 0 == @running.size
550 pid_done = waitpid(-1, Process::WNOHANG | Process::WUNTRACED)
552 j_done = @running.values.
553 select { |j| j[:wait_thr].pid == pid_done }.
556 rescue SystemCallError
557 # I have @running processes but system reports I have no
558 # children. This is likely to happen repeatedly if it happens at
559 # all; I will log this no more than once per child process I
561 if 0 < @running.select { |uuid,j| j[:warned_waitpid_error].nil? }.size
562 children = @running.values.collect { |j| j[:wait_thr].pid }.join ' '
563 $stderr.puts "dispatch: IPC bug: waitpid() error (#{$!}), but I have children #{children}"
565 @running.each do |uuid,j| j[:warned_waitpid_error] = true end
568 @running.each do |uuid, j|
569 if j[:wait_thr].status == false
570 pid_done = j[:wait_thr].pid
578 job_done = j_done[:job]
579 $stderr.puts "dispatch: child #{pid_done} exit"
580 $stderr.puts "dispatch: job #{job_done.uuid} end"
582 # Ensure every last drop of stdout and stderr is consumed.
584 # Reset flush timestamp to make sure log gets written.
585 j_done[:stderr_flushed_at] = Time.new(0)
586 # Write any remaining logs.
589 j_done[:buf].each do |stream, streambuf|
591 $stderr.puts streambuf + "\n"
595 # Wait the thread (returns a Process::Status)
596 exit_status = j_done[:wait_thr].value.exitstatus
598 jobrecord = Job.find_by_uuid(job_done.uuid)
599 if exit_status != 75 and jobrecord.state == "Running"
600 # crunch-job did not return exit code 75 (see below) and left the job in
601 # the "Running" state, which means there was an unhandled error. Fail
603 jobrecord.state = "Failed"
604 if not jobrecord.save
605 $stderr.puts "dispatch: jobrecord.save failed"
608 # Don't fail the job if crunch-job didn't even get as far as
609 # starting it. If the job failed to run due to an infrastructure
610 # issue with crunch-job or slurm, we want the job to stay in the
611 # queue. If crunch-job exited after losing a race to another
612 # crunch-job process, it exits 75 and we should leave the job
613 # record alone so the winner of the race do its thing.
615 # There is still an unhandled race condition: If our crunch-job
616 # process is about to lose a race with another crunch-job
617 # process, but crashes before getting to its "exit 75" (for
618 # example, "cannot fork" or "cannot reach API server") then we
619 # will assume incorrectly that it's our process's fault
620 # jobrecord.started_at is non-nil, and mark the job as failed
621 # even though the winner of the race is probably still doing
625 # Invalidate the per-job auth token, unless the job is still queued and we
626 # might want to try it again.
627 if jobrecord.state != "Queued"
628 j_done[:job_auth].update_attributes expires_at: Time.now
631 @running.delete job_done.uuid
635 expire_tokens = @pipe_auth_tokens.dup
636 @todo_pipelines.each do |p|
637 pipe_auth = (@pipe_auth_tokens[p.uuid] ||= ApiClientAuthorization.
638 create(user: User.where('uuid=?', p.modified_by_user_uuid).first,
640 puts `export ARVADOS_API_TOKEN=#{pipe_auth.api_token} && arv-run-pipeline-instance --run-pipeline-here --no-wait --instance #{p.uuid}`
641 expire_tokens.delete p.uuid
644 expire_tokens.each do |k, v|
645 v.update_attributes expires_at: Time.now
646 @pipe_auth_tokens.delete k
653 @pipe_auth_tokens ||= { }
654 $stderr.puts "dispatch: ready"
655 while !$signal[:term] or @running.size > 0
658 @running.each do |uuid, j|
659 if !j[:started] and j[:sent_int] < 2
661 Process.kill 'INT', j[:wait_thr].pid
663 # No such pid = race condition + desired result is
670 refresh_todo unless did_recently(:refresh_todo, 1.0)
671 update_node_status unless did_recently(:update_node_status, 1.0)
672 unless @todo.empty? or did_recently(:start_jobs, 1.0) or $signal[:term]
675 unless (@todo_pipelines.empty? and @pipe_auth_tokens.empty?) or did_recently(:update_pipelines, 5.0)
680 select(@running.values.collect { |j| [j[:stdout], j[:stderr]] }.flatten,
687 def did_recently(thing, min_interval)
689 if !@did_recently[thing] or @did_recently[thing] < Time.now - min_interval
690 @did_recently[thing] = Time.now
697 # send message to log table. we want these records to be transient
698 def write_log running_job
699 return if running_job[:stderr_buf_to_flush] == ''
701 # Send out to log event if buffer size exceeds the bytes per event or if
702 # it has been at least crunch_log_seconds_between_events seconds since
704 if running_job[:stderr_buf_to_flush].size > Rails.configuration.crunch_log_bytes_per_event or
705 (Time.now - running_job[:stderr_flushed_at]) >= Rails.configuration.crunch_log_seconds_between_events
707 log = Log.new(object_uuid: running_job[:job].uuid,
708 event_type: 'stderr',
709 owner_uuid: running_job[:job].owner_uuid,
710 properties: {"text" => running_job[:stderr_buf_to_flush]})
712 running_job[:events_logged] += 1
714 $stderr.puts "Failed to write logs"
715 $stderr.puts exception.backtrace
717 running_job[:stderr_buf_to_flush] = ''
718 running_job[:stderr_flushed_at] = Time.now
723 # This is how crunch-job child procs know where the "refresh" trigger file is
724 ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.crunch_refresh_trigger