7 (ARGV.any? ? ARGV : ['--jobs', '--pipelines']).each do |arg|
10 $options[:jobs] = true
12 $options[:pipelines] = true
14 abort "Unrecognized command line option '#{arg}'"
17 if not ($options[:jobs] or $options[:pipelines])
18 abort "Nothing to do. Please specify at least one of: --jobs, --pipelines."
21 ARGV.reject! { |a| a =~ /--jobs|--pipelines/ }
25 %w{TERM INT}.each do |sig|
28 $stderr.puts "Received #{signame} signal"
33 if ENV["CRUNCH_DISPATCH_LOCKFILE"]
34 lockfilename = ENV.delete "CRUNCH_DISPATCH_LOCKFILE"
35 lockfile = File.open(lockfilename, File::RDWR|File::CREAT, 0644)
36 unless lockfile.flock File::LOCK_EX|File::LOCK_NB
37 abort "Lock unavailable on #{lockfilename} - exit"
41 ENV["RAILS_ENV"] = ARGV[0] || ENV["RAILS_ENV"] || "development"
43 require File.dirname(__FILE__) + '/../config/boot'
44 require File.dirname(__FILE__) + '/../config/environment'
49 self.utc.strftime "%Y-%m-%d_%H:%M:%S"
54 include ApplicationHelper
57 return act_as_system_user
63 @todo = Job.queue.select(&:repository)
66 if $options[:pipelines]
67 @todo_pipelines = PipelineInstance.queue
71 def each_slurm_line(cmd, outfmt, max_fields=nil)
72 max_fields ||= outfmt.split(":").size
73 max_fields += 1 # To accommodate the node field we add
74 @@slurm_version ||= Gem::Version.new(`sinfo --version`.match(/\b[\d\.]+\b/)[0])
75 if Gem::Version.new('2.3') <= @@slurm_version
76 `#{cmd} --noheader -o '%n:#{outfmt}'`.each_line do |line|
77 yield line.chomp.split(":", max_fields)
80 # Expand rows with hostname ranges (like "foo[1-3,5,9-12]:idle")
81 # into multiple rows with one hostname each.
82 `#{cmd} --noheader -o '%N:#{outfmt}'`.each_line do |line|
83 tokens = line.chomp.split(":", max_fields)
84 if (re = tokens[0].match /^(.*?)\[([-,\d]+)\]$/)
86 re[2].split(",").each do |range|
87 range = range.split("-").collect(&:to_i)
88 (range[0]..range[-1]).each do |n|
89 yield [re[1] + n.to_s] + tokens
101 each_slurm_line("sinfo", "%t") do |hostname, state|
102 # Treat nodes in idle* state as down, because the * means that slurm
103 # hasn't been able to communicate with it recently.
104 state.sub!(/^idle\*/, "down")
105 state.sub!(/\W+$/, "")
106 state = "down" unless %w(idle alloc down).include?(state)
107 slurm_nodes[hostname] = {state: state, job: nil}
109 each_slurm_line("squeue", "%j") do |hostname, job_uuid|
110 slurm_nodes[hostname][:job] = job_uuid if slurm_nodes[hostname]
115 def update_node_status
116 return unless Server::Application.config.crunch_job_wrapper.to_s.match /^slurm/
118 slurm_status.each_pair do |hostname, slurmdata|
119 next if @node_state[hostname] == slurmdata
121 node = Node.where('hostname=?', hostname).order(:last_ping_at).last
123 $stderr.puts "dispatch: update #{hostname} state to #{slurmdata}"
124 node.info["slurm_state"] = slurmdata[:state]
125 node.job_uuid = slurmdata[:job]
127 @node_state[hostname] = slurmdata
129 $stderr.puts "dispatch: failed to update #{node.uuid}: #{node.errors.messages}"
131 elsif slurmdata[:state] != 'down'
132 $stderr.puts "dispatch: SLURM reports '#{hostname}' is not down, but no node has that name"
135 $stderr.puts "dispatch: error updating #{hostname} node status: #{error}"
140 def positive_int(raw_value, default=nil)
141 value = begin raw_value.to_i rescue 0 end
149 NODE_CONSTRAINT_MAP = {
150 # Map Job runtime_constraints keys to the corresponding Node info key.
151 'min_ram_mb_per_node' => 'total_ram_mb',
152 'min_scratch_mb_per_node' => 'total_scratch_mb',
153 'min_cores_per_node' => 'total_cpu_cores',
156 def nodes_available_for_job_now(job)
157 # Find Nodes that satisfy a Job's runtime constraints (by building
158 # a list of Procs and using them to test each Node). If there
159 # enough to run the Job, return an array of their names.
160 # Otherwise, return nil.
161 need_procs = NODE_CONSTRAINT_MAP.each_pair.map do |job_key, node_key|
163 positive_int(node.info[node_key], 0) >=
164 positive_int(job.runtime_constraints[job_key], 0)
167 min_node_count = positive_int(job.runtime_constraints['min_nodes'], 1)
169 Node.find_each do |node|
170 good_node = (node.info['slurm_state'] == 'idle')
171 need_procs.each { |node_test| good_node &&= node_test.call(node) }
174 if usable_nodes.count >= min_node_count
175 return usable_nodes.map { |node| node.hostname }
182 def nodes_available_for_job(job)
183 # Check if there are enough idle nodes with the Job's minimum
184 # hardware requirements to run it. If so, return an array of
185 # their names. If not, up to once per hour, signal start_jobs to
186 # hold off launching Jobs. This delay is meant to give the Node
187 # Manager an opportunity to make new resources available for new
190 # The exact timing parameters here might need to be adjusted for
191 # the best balance between helping the longest-waiting Jobs run,
192 # and making efficient use of immediately available resources.
193 # These are all just first efforts until we have more data to work
195 nodelist = nodes_available_for_job_now(job)
196 if nodelist.nil? and not did_recently(:wait_for_available_nodes, 3600)
197 $stderr.puts "dispatch: waiting for nodes for #{job.uuid}"
198 @node_wait_deadline = Time.now + 5.minutes
203 def fail_job job, message
204 $stderr.puts "dispatch: #{job.uuid}: #{message}"
206 Log.new(object_uuid: job.uuid,
207 event_type: 'dispatch',
208 owner_uuid: job.owner_uuid,
210 properties: {"text" => message}).save!
212 $stderr.puts "dispatch: log.create failed"
216 job.lock @authorizations[job.uuid].user.uuid
219 $stderr.puts "dispatch: save failed setting job #{job.uuid} to failed"
221 rescue ArvadosModel::AlreadyLockedError
222 $stderr.puts "dispatch: tried to mark job #{job.uuid} as failed but it was already locked by someone else"
228 next if @running[job.uuid]
231 case Server::Application.config.crunch_job_wrapper
234 # Don't run more than one at a time.
238 when :slurm_immediate
239 nodelist = nodes_available_for_job(job)
241 if Time.now < @node_wait_deadline
247 cmd_args = ["salloc",
252 "--job-name=#{job.uuid}",
253 "--nodelist=#{nodelist.join(',')}"]
255 raise "Unknown crunch_job_wrapper: #{Server::Application.config.crunch_job_wrapper}"
258 if Server::Application.config.crunch_job_user
259 cmd_args.unshift("sudo", "-E", "-u",
260 Server::Application.config.crunch_job_user,
261 "PATH=#{ENV['PATH']}",
262 "PERLLIB=#{ENV['PERLLIB']}",
263 "PYTHONPATH=#{ENV['PYTHONPATH']}",
264 "RUBYLIB=#{ENV['RUBYLIB']}",
265 "GEM_PATH=#{ENV['GEM_PATH']}")
268 @authorizations ||= {}
269 if @authorizations[job.uuid] and
270 @authorizations[job.uuid].user.uuid != job.modified_by_user_uuid
271 # We already made a token for this job, but we need a new one
272 # because modified_by_user_uuid has changed (the job will run
273 # as a different user).
274 @authorizations[job.uuid].update_attributes expires_at: Time.now
275 @authorizations[job.uuid] = nil
277 if not @authorizations[job.uuid]
278 auth = ApiClientAuthorization.
279 new(user: User.where('uuid=?', job.modified_by_user_uuid).first,
282 $stderr.puts "dispatch: auth.save failed"
285 @authorizations[job.uuid] = auth
288 crunch_job_bin = (ENV['CRUNCH_JOB_BIN'] || `which arv-crunch-job`.strip)
289 if crunch_job_bin == ''
290 raise "No CRUNCH_JOB_BIN env var, and crunch-job not in path."
293 arvados_internal = Rails.configuration.git_internal_dir
294 if not File.exists? arvados_internal
295 $stderr.puts `mkdir -p #{arvados_internal.shellescape} && cd #{arvados_internal.shellescape} && git init --bare`
298 git = "git --git-dir=#{arvados_internal.shellescape}"
300 # @fetched_commits[V]==true if we know commit V exists in the
301 # arvados_internal git repository.
302 @fetched_commits ||= {}
303 if !@fetched_commits[job.script_version]
305 repo_root = Rails.configuration.git_repositories_dir
306 src_repo = File.join(repo_root, job.repository + '.git')
307 if not File.exists? src_repo
308 src_repo = File.join(repo_root, job.repository, '.git')
309 if not File.exists? src_repo
310 fail_job job, "No #{job.repository}.git or #{job.repository}/.git at #{repo_root}"
315 # check if the commit needs to be fetched or not
316 commit_rev = `#{git} rev-list -n1 #{job.script_version.shellescape} 2>/dev/null`.chomp
317 unless $? == 0 and commit_rev == job.script_version
318 # commit does not exist in internal repository, so import the source repository using git fetch-pack
319 cmd = "#{git} fetch-pack --no-progress --all #{src_repo.shellescape}"
320 $stderr.puts "dispatch: #{cmd}"
321 $stderr.puts `#{cmd}`
323 fail_job job, "git fetch-pack failed"
327 @fetched_commits[job.script_version] = true
330 # @job_tags[J]==V if we know commit V has been tagged J in the
331 # arvados_internal repository. (J is a job UUID, V is a commit
334 if not @job_tags[job.uuid]
335 cmd = "#{git} tag #{job.uuid.shellescape} #{job.script_version.shellescape} 2>/dev/null"
336 $stderr.puts "dispatch: #{cmd}"
337 $stderr.puts `#{cmd}`
339 # git tag failed. This may be because the tag already exists, so check for that.
340 tag_rev = `#{git} rev-list -n1 #{job.uuid.shellescape}`.chomp
342 # We got a revision back
343 if tag_rev != job.script_version
344 # Uh oh, the tag doesn't point to the revision we were expecting.
345 # Someone has been monkeying with the job record and/or git.
346 fail_job job, "Existing tag #{job.uuid} points to commit #{tag_rev} but expected commit #{job.script_version}"
349 # we're okay (fall through to setting @job_tags below)
351 # git rev-list failed for some reason.
352 fail_job job, "'git tag' for #{job.uuid} failed but did not find any existing tag using 'git rev-list'"
356 # 'git tag' was successful, or there is an existing tag that points to the same revision.
357 @job_tags[job.uuid] = job.script_version
358 elsif @job_tags[job.uuid] != job.script_version
359 fail_job job, "Existing tag #{job.uuid} points to commit #{@job_tags[job.uuid]} but this job uses commit #{job.script_version}"
363 cmd_args << crunch_job_bin
364 cmd_args << '--job-api-token'
365 cmd_args << @authorizations[job.uuid].api_token
368 cmd_args << '--git-dir'
369 cmd_args << arvados_internal
371 $stderr.puts "dispatch: #{cmd_args.join ' '}"
374 i, o, e, t = Open3.popen3(*cmd_args)
376 $stderr.puts "dispatch: popen3: #{$!}"
381 $stderr.puts "dispatch: job #{job.uuid}"
382 start_banner = "dispatch: child #{t.pid} start #{LogTime.now}"
383 $stderr.puts start_banner
385 @running[job.uuid] = {
391 buf: {stderr: '', stdout: ''},
394 job_auth: @authorizations[job.uuid],
395 stderr_buf_to_flush: '',
396 stderr_flushed_at: Time.new(0),
399 log_throttle_is_open: true,
400 log_throttle_reset_time: Time.now + Rails.configuration.crunch_log_throttle_period,
401 log_throttle_bytes_so_far: 0,
402 log_throttle_lines_so_far: 0,
403 log_throttle_bytes_skipped: 0,
410 # Test for hard cap on total output and for log throttling. Returns whether
411 # the log line should go to output or not. Modifies "line" in place to
412 # replace it with an error if a logging limit is tripped.
413 def rate_limit running_job, line
416 if running_job[:log_throttle_is_open]
417 running_job[:log_throttle_lines_so_far] += 1
418 running_job[:log_throttle_bytes_so_far] += linesize
419 running_job[:bytes_logged] += linesize
421 if (running_job[:bytes_logged] >
422 Rails.configuration.crunch_limit_log_bytes_per_job)
423 message = "Exceeded log limit #{Rails.configuration.crunch_limit_log_bytes_per_job} bytes (crunch_limit_log_bytes_per_job). Log will be truncated."
424 running_job[:log_throttle_reset_time] = Time.now + 100.years
425 running_job[:log_throttle_is_open] = false
427 elsif (running_job[:log_throttle_bytes_so_far] >
428 Rails.configuration.crunch_log_throttle_bytes)
429 remaining_time = running_job[:log_throttle_reset_time] - Time.now
430 message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_bytes} bytes per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_bytes). Logging will be silenced for the next #{remaining_time.round} seconds.\n"
431 running_job[:log_throttle_is_open] = false
433 elsif (running_job[:log_throttle_lines_so_far] >
434 Rails.configuration.crunch_log_throttle_lines)
435 remaining_time = running_job[:log_throttle_reset_time] - Time.now
436 message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_lines} lines per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_lines), logging will be silenced for the next #{remaining_time.round} seconds.\n"
437 running_job[:log_throttle_is_open] = false
441 if not running_job[:log_throttle_is_open]
442 # Don't log anything if any limit has been exceeded. Just count lossage.
443 running_job[:log_throttle_bytes_skipped] += linesize
447 # Yes, write to logs, but use our "rate exceeded" message
448 # instead of the log message that exceeded the limit.
452 running_job[:log_throttle_is_open]
457 @running.each do |job_uuid, j|
461 if now > j[:log_throttle_reset_time]
462 # It has been more than throttle_period seconds since the last
463 # checkpoint so reset the throttle
464 if j[:log_throttle_bytes_skipped] > 0
465 message = "#{job_uuid} ! Skipped #{j[:log_throttle_bytes_skipped]} bytes of log"
467 j[:stderr_buf_to_flush] << "#{LogTime.now} #{message}\n"
470 j[:log_throttle_reset_time] = now + Rails.configuration.crunch_log_throttle_period
471 j[:log_throttle_bytes_so_far] = 0
472 j[:log_throttle_lines_so_far] = 0
473 j[:log_throttle_bytes_skipped] = 0
474 j[:log_throttle_is_open] = true
477 j[:buf].each do |stream, streambuf|
478 # Read some data from the child stream
481 # It's important to use a big enough buffer here. When we're
482 # being flooded with logs, we must read and discard many
483 # bytes at once. Otherwise, we can easily peg a CPU with
484 # time-checking and other loop overhead. (Quick tests show a
485 # 1MiB buffer working 2.5x as fast as a 64 KiB buffer.)
487 # So don't reduce this buffer size!
488 buf = j[stream].read_nonblock(2**20)
489 rescue Errno::EAGAIN, EOFError
492 # Short circuit the counting code if we're just going to throw
493 # away the data anyway.
494 if not j[:log_throttle_is_open]
495 j[:log_throttle_bytes_skipped] += streambuf.size + buf.size
502 # Append to incomplete line from previous read, if any
506 streambuf.each_line do |line|
507 if not line.end_with? $/
508 if line.size > Rails.configuration.crunch_log_throttle_bytes
509 # Without a limit here, we'll use 2x an arbitrary amount
510 # of memory, and waste a lot of time copying strings
511 # around, all without providing any feedback to anyone
512 # about what's going on _or_ hitting any of our throttle
515 # Here we leave "line" alone, knowing it will never be
516 # sent anywhere: rate_limit() will reach
517 # crunch_log_throttle_bytes immediately. However, we'll
518 # leave [...] in bufend: if the trailing end of the long
519 # line does end up getting sent anywhere, it will have
520 # some indication that it is incomplete.
523 # If line length is sane, we'll wait for the rest of the
524 # line to appear in the next read_pipes() call.
529 # rate_limit returns true or false as to whether to actually log
530 # the line or not. It also modifies "line" in place to replace
531 # it with an error if a logging limit is tripped.
532 if rate_limit j, line
533 $stderr.print "#{job_uuid} ! " unless line.index(job_uuid)
535 pub_msg = "#{LogTime.now} #{line.strip}\n"
536 j[:stderr_buf_to_flush] << pub_msg
540 # Leave the trailing incomplete line (if any) in streambuf for
542 streambuf.replace bufend
544 # Flush buffered logs to the logs table, if appropriate. We have
545 # to do this even if we didn't collect any new logs this time:
546 # otherwise, buffered data older than seconds_between_events
547 # won't get flushed until new data arrives.
553 return if 0 == @running.size
559 pid_done = waitpid(-1, Process::WNOHANG | Process::WUNTRACED)
561 j_done = @running.values.
562 select { |j| j[:wait_thr].pid == pid_done }.
565 rescue SystemCallError
566 # I have @running processes but system reports I have no
567 # children. This is likely to happen repeatedly if it happens at
568 # all; I will log this no more than once per child process I
570 if 0 < @running.select { |uuid,j| j[:warned_waitpid_error].nil? }.size
571 children = @running.values.collect { |j| j[:wait_thr].pid }.join ' '
572 $stderr.puts "dispatch: IPC bug: waitpid() error (#{$!}), but I have children #{children}"
574 @running.each do |uuid,j| j[:warned_waitpid_error] = true end
577 @running.each do |uuid, j|
578 if j[:wait_thr].status == false
579 pid_done = j[:wait_thr].pid
587 job_done = j_done[:job]
588 $stderr.puts "dispatch: child #{pid_done} exit"
589 $stderr.puts "dispatch: job #{job_done.uuid} end"
591 # Ensure every last drop of stdout and stderr is consumed.
593 # Reset flush timestamp to make sure log gets written.
594 j_done[:stderr_flushed_at] = Time.new(0)
595 # Write any remaining logs.
598 j_done[:buf].each do |stream, streambuf|
600 $stderr.puts streambuf + "\n"
604 # Wait the thread (returns a Process::Status)
605 exit_status = j_done[:wait_thr].value.exitstatus
607 jobrecord = Job.find_by_uuid(job_done.uuid)
608 if exit_status != 75 and jobrecord.state == "Running"
609 # crunch-job did not return exit code 75 (see below) and left the job in
610 # the "Running" state, which means there was an unhandled error. Fail
612 jobrecord.state = "Failed"
613 if not jobrecord.save
614 $stderr.puts "dispatch: jobrecord.save failed"
617 # Don't fail the job if crunch-job didn't even get as far as
618 # starting it. If the job failed to run due to an infrastructure
619 # issue with crunch-job or slurm, we want the job to stay in the
620 # queue. If crunch-job exited after losing a race to another
621 # crunch-job process, it exits 75 and we should leave the job
622 # record alone so the winner of the race do its thing.
624 # There is still an unhandled race condition: If our crunch-job
625 # process is about to lose a race with another crunch-job
626 # process, but crashes before getting to its "exit 75" (for
627 # example, "cannot fork" or "cannot reach API server") then we
628 # will assume incorrectly that it's our process's fault
629 # jobrecord.started_at is non-nil, and mark the job as failed
630 # even though the winner of the race is probably still doing
634 # Invalidate the per-job auth token, unless the job is still queued and we
635 # might want to try it again.
636 if jobrecord.state != "Queued"
637 j_done[:job_auth].update_attributes expires_at: Time.now
640 @running.delete job_done.uuid
644 expire_tokens = @pipe_auth_tokens.dup
645 @todo_pipelines.each do |p|
646 pipe_auth = (@pipe_auth_tokens[p.uuid] ||= ApiClientAuthorization.
647 create(user: User.where('uuid=?', p.modified_by_user_uuid).first,
649 puts `export ARVADOS_API_TOKEN=#{pipe_auth.api_token} && arv-run-pipeline-instance --run-pipeline-here --no-wait --instance #{p.uuid}`
650 expire_tokens.delete p.uuid
653 expire_tokens.each do |k, v|
654 v.update_attributes expires_at: Time.now
655 @pipe_auth_tokens.delete k
662 @pipe_auth_tokens ||= { }
663 $stderr.puts "dispatch: ready"
664 while !$signal[:term] or @running.size > 0
667 @running.each do |uuid, j|
668 if !j[:started] and j[:sent_int] < 2
670 Process.kill 'INT', j[:wait_thr].pid
672 # No such pid = race condition + desired result is
679 refresh_todo unless did_recently(:refresh_todo, 1.0)
680 update_node_status unless did_recently(:update_node_status, 1.0)
681 unless @todo.empty? or did_recently(:start_jobs, 1.0) or $signal[:term]
684 unless (@todo_pipelines.empty? and @pipe_auth_tokens.empty?) or did_recently(:update_pipelines, 5.0)
689 select(@running.values.collect { |j| [j[:stdout], j[:stderr]] }.flatten,
696 def did_recently(thing, min_interval)
698 if !@did_recently[thing] or @did_recently[thing] < Time.now - min_interval
699 @did_recently[thing] = Time.now
706 # send message to log table. we want these records to be transient
707 def write_log running_job
708 return if running_job[:stderr_buf_to_flush] == ''
710 # Send out to log event if buffer size exceeds the bytes per event or if
711 # it has been at least crunch_log_seconds_between_events seconds since
713 if running_job[:stderr_buf_to_flush].size > Rails.configuration.crunch_log_bytes_per_event or
714 (Time.now - running_job[:stderr_flushed_at]) >= Rails.configuration.crunch_log_seconds_between_events
716 log = Log.new(object_uuid: running_job[:job].uuid,
717 event_type: 'stderr',
718 owner_uuid: running_job[:job].owner_uuid,
719 properties: {"text" => running_job[:stderr_buf_to_flush]})
721 running_job[:events_logged] += 1
723 $stderr.puts "Failed to write logs"
724 $stderr.puts exception.backtrace
726 running_job[:stderr_buf_to_flush] = ''
727 running_job[:stderr_flushed_at] = Time.now
732 # This is how crunch-job child procs know where the "refresh" trigger file is
733 ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.crunch_refresh_trigger