6 (ARGV.any? ? ARGV : ['--jobs', '--pipelines']).each do |arg|
11 $options[:pipelines] = true
13 abort "Unrecognized command line option '#{arg}'"
16 if not ($options[:jobs] or $options[:pipelines])
17 abort "Nothing to do. Please specify at least one of: --jobs, --pipelines."
20 ARGV.reject! { |a| a =~ /--jobs|--pipelines/ }
24 %w{TERM INT}.each do |sig|
27 $stderr.puts "Received #{signame} signal"
32 if ENV["CRUNCH_DISPATCH_LOCKFILE"]
33 lockfilename = ENV.delete "CRUNCH_DISPATCH_LOCKFILE"
34 lockfile = File.open(lockfilename, File::RDWR|File::CREAT, 0644)
35 unless lockfile.flock File::LOCK_EX|File::LOCK_NB
36 abort "Lock unavailable on #{lockfilename} - exit"
40 ENV["RAILS_ENV"] = ARGV[0] || ENV["RAILS_ENV"] || "development"
42 require File.dirname(__FILE__) + '/../config/boot'
43 require File.dirname(__FILE__) + '/../config/environment'
47 include ApplicationHelper
50 return act_as_system_user
56 @todo = Job.queue.select(&:repository)
59 if $options[:pipelines]
60 @todo_pipelines = PipelineInstance.queue
64 def each_slurm_line(cmd, outfmt, max_fields=nil)
65 max_fields ||= outfmt.split(":").size
66 max_fields += 1 # To accommodate the node field we add
67 @@slurm_version ||= Gem::Version.new(`sinfo --version`.match(/\b[\d\.]+\b/)[0])
68 if Gem::Version.new('2.3') <= @@slurm_version
69 `#{cmd} --noheader -o '%n:#{outfmt}'`.each_line do |line|
70 yield line.chomp.split(":", max_fields)
73 # Expand rows with hostname ranges (like "foo[1-3,5,9-12]:idle")
74 # into multiple rows with one hostname each.
75 `#{cmd} --noheader -o '%N:#{outfmt}'`.each_line do |line|
76 tokens = line.chomp.split(":", max_fields)
77 if (re = tokens[0].match /^(.*?)\[([-,\d]+)\]$/)
79 re[2].split(",").each do |range|
80 range = range.split("-").collect(&:to_i)
81 (range[0]..range[-1]).each do |n|
82 yield [re[1] + n.to_s] + tokens
94 each_slurm_line("sinfo", "%t") do |hostname, state|
95 state.sub!(/\W+$/, "")
96 state = "down" unless %w(idle alloc down).include?(state)
97 slurm_nodes[hostname] = {state: state, job: nil}
99 each_slurm_line("squeue", "%j") do |hostname, job_uuid|
100 slurm_nodes[hostname][:job] = job_uuid if slurm_nodes[hostname]
105 def update_node_status
106 return unless Server::Application.config.crunch_job_wrapper.to_s.match /^slurm/
108 slurm_status.each_pair do |hostname, slurmdata|
109 next if @node_state[hostname] == slurmdata
111 node = Node.where('hostname=?', hostname).order(:last_ping_at).last
113 $stderr.puts "dispatch: update #{hostname} state to #{slurmdata}"
114 node.info["slurm_state"] = slurmdata[:state]
115 node.job_uuid = slurmdata[:job]
117 @node_state[hostname] = slurmdata
119 $stderr.puts "dispatch: failed to update #{node.uuid}: #{node.errors.messages}"
121 elsif slurmdata[:state] != 'down'
122 $stderr.puts "dispatch: SLURM reports '#{hostname}' is not down, but no node has that name"
125 $stderr.puts "dispatch: error updating #{hostname} node status: #{error}"
130 def positive_int(raw_value, default=nil)
131 value = begin raw_value.to_i rescue 0 end
139 NODE_CONSTRAINT_MAP = {
140 # Map Job runtime_constraints keys to the corresponding Node info key.
141 'min_ram_mb_per_node' => 'total_ram_mb',
142 'min_scratch_mb_per_node' => 'total_scratch_mb',
143 'min_cores_per_node' => 'total_cpu_cores',
146 def nodes_available_for_job_now(job)
147 # Find Nodes that satisfy a Job's runtime constraints (by building
148 # a list of Procs and using them to test each Node). If there
149 # enough to run the Job, return an array of their names.
150 # Otherwise, return nil.
151 need_procs = NODE_CONSTRAINT_MAP.each_pair.map do |job_key, node_key|
153 positive_int(node.info[node_key], 0) >=
154 positive_int(job.runtime_constraints[job_key], 0)
157 min_node_count = positive_int(job.runtime_constraints['min_nodes'], 1)
159 Node.find_each do |node|
160 good_node = (node.info['slurm_state'] == 'idle')
161 need_procs.each { |node_test| good_node &&= node_test.call(node) }
164 if usable_nodes.count >= min_node_count
165 return usable_nodes.map { |node| node.hostname }
172 def nodes_available_for_job(job)
173 # Check if there are enough idle nodes with the Job's minimum
174 # hardware requirements to run it. If so, return an array of
175 # their names. If not, up to once per hour, signal start_jobs to
176 # hold off launching Jobs. This delay is meant to give the Node
177 # Manager an opportunity to make new resources available for new
180 # The exact timing parameters here might need to be adjusted for
181 # the best balance between helping the longest-waiting Jobs run,
182 # and making efficient use of immediately available resources.
183 # These are all just first efforts until we have more data to work
185 nodelist = nodes_available_for_job_now(job)
186 if nodelist.nil? and not did_recently(:wait_for_available_nodes, 3600)
187 $stderr.puts "dispatch: waiting for nodes for #{job.uuid}"
188 @node_wait_deadline = Time.now + 5.minutes
195 next if @running[job.uuid]
198 case Server::Application.config.crunch_job_wrapper
201 # Don't run more than one at a time.
205 when :slurm_immediate
206 nodelist = nodes_available_for_job(job)
208 if Time.now < @node_wait_deadline
214 cmd_args = ["salloc",
219 "--job-name=#{job.uuid}",
220 "--nodelist=#{nodelist.join(',')}"]
222 raise "Unknown crunch_job_wrapper: #{Server::Application.config.crunch_job_wrapper}"
225 if Server::Application.config.crunch_job_user
226 cmd_args.unshift("sudo", "-E", "-u",
227 Server::Application.config.crunch_job_user,
228 "PATH=#{ENV['PATH']}",
229 "PERLLIB=#{ENV['PERLLIB']}",
230 "PYTHONPATH=#{ENV['PYTHONPATH']}",
231 "RUBYLIB=#{ENV['RUBYLIB']}",
232 "GEM_PATH=#{ENV['GEM_PATH']}")
235 job_auth = ApiClientAuthorization.
236 new(user: User.where('uuid=?', job.modified_by_user_uuid).first,
239 $stderr.puts "dispatch: job_auth.save failed"
243 crunch_job_bin = (ENV['CRUNCH_JOB_BIN'] || `which arv-crunch-job`.strip)
244 if crunch_job_bin == ''
245 raise "No CRUNCH_JOB_BIN env var, and crunch-job not in path."
250 arvados_internal = Rails.configuration.git_internal_dir
251 if not File.exists? arvados_internal
252 $stderr.puts `mkdir -p #{arvados_internal.shellescape} && cd #{arvados_internal.shellescape} && git init --bare`
255 repo_root = Rails.configuration.git_repositories_dir
256 src_repo = File.join(repo_root, job.repository + '.git')
257 if not File.exists? src_repo
258 src_repo = File.join(repo_root, job.repository, '.git')
259 if not File.exists? src_repo
260 $stderr.puts "dispatch: No #{job.repository}.git or #{job.repository}/.git at #{repo_root}"
266 git = "git --git-dir=#{arvados_internal.shellescape}"
268 # check if the commit needs to be fetched or not
269 commit_rev = `#{git} rev-list -n1 #{job.script_version.shellescape} 2>/dev/null`.chomp
270 unless $? == 0 and commit_rev == job.script_version
271 # commit does not exist in internal repository, so import the source repository using git fetch-pack
272 cmd = "#{git} fetch-pack --no-progress --all #{src_repo.shellescape}"
274 $stderr.puts `#{cmd}`
276 $stderr.puts "dispatch: git fetch-pack failed"
282 # check if the commit needs to be tagged with this job uuid
283 tag_rev = `#{git} rev-list -n1 #{job.uuid.shellescape} 2>/dev/null`.chomp
285 # no job tag found, so create one
286 cmd = "#{git} tag #{job.uuid.shellescape} #{job.script_version.shellescape}"
288 $stderr.puts `#{cmd}`
290 $stderr.puts "dispatch: git tag failed"
295 # job tag found, check that it has the expected revision
296 unless tag_rev == job.script_version
297 # Uh oh, the tag doesn't point to the revision we were expecting.
298 # Someone has been monkeying with the job record and/or git.
299 $stderr.puts "dispatch: Already a tag #{job.script_version} pointing to commit #{tag_rev} but expected commit #{job.script_version}"
302 $stderr.puts "dispatch: job.save failed"
309 cmd_args << crunch_job_bin
310 cmd_args << '--job-api-token'
311 cmd_args << job_auth.api_token
314 cmd_args << '--git-dir'
315 cmd_args << arvados_internal
317 $stderr.puts "dispatch: #{cmd_args.join ' '}"
320 i, o, e, t = Open3.popen3(*cmd_args)
322 $stderr.puts "dispatch: popen3: #{$!}"
327 $stderr.puts "dispatch: job #{job.uuid}"
328 start_banner = "dispatch: child #{t.pid} start #{Time.now.ctime.to_s}"
329 $stderr.puts start_banner
331 @running[job.uuid] = {
337 buf: {stderr: '', stdout: ''},
341 stderr_buf_to_flush: '',
342 stderr_flushed_at: 0,
353 @running.each do |job_uuid, j|
356 j[:buf].each do |stream, streambuf|
357 # Read some data from the child stream
360 buf = j[stream].read_nonblock(2**16)
361 rescue Errno::EAGAIN, EOFError
365 # Add to a the buffer
368 # Check for at least one complete line
369 if streambuf.index "\n"
370 lines = streambuf.lines("\n").to_a
372 # check if the last line is partial or not
373 j[:buf][stream] = if streambuf[-1] == "\n"
377 # Put the partial line back into the buffer
381 # Now spool the lines to the log output buffer
383 $stderr.print "#{job_uuid} ! " unless line.index(job_uuid)
385 pub_msg = "#{Time.now.ctime.to_s} #{line.strip} \n"
386 if not j[:log_truncated]
387 j[:stderr_buf_to_flush] << pub_msg
391 # Now actually send the log output to the logs table
392 if not j[:log_truncated]
393 if (Rails.configuration.crunch_log_bytes_per_event < j[:stderr_buf_to_flush].size or
394 (j[:stderr_flushed_at] + Rails.configuration.crunch_log_seconds_between_events < Time.now.to_i))
405 return if 0 == @running.size
411 pid_done = waitpid(-1, Process::WNOHANG | Process::WUNTRACED)
413 j_done = @running.values.
414 select { |j| j[:wait_thr].pid == pid_done }.
417 rescue SystemCallError
418 # I have @running processes but system reports I have no
419 # children. This is likely to happen repeatedly if it happens at
420 # all; I will log this no more than once per child process I
422 if 0 < @running.select { |uuid,j| j[:warned_waitpid_error].nil? }.size
423 children = @running.values.collect { |j| j[:wait_thr].pid }.join ' '
424 $stderr.puts "dispatch: IPC bug: waitpid() error (#{$!}), but I have children #{children}"
426 @running.each do |uuid,j| j[:warned_waitpid_error] = true end
429 @running.each do |uuid, j|
430 if j[:wait_thr].status == false
431 pid_done = j[:wait_thr].pid
439 job_done = j_done[:job]
440 $stderr.puts "dispatch: child #{pid_done} exit"
441 $stderr.puts "dispatch: job #{job_done.uuid} end"
443 # Ensure every last drop of stdout and stderr is consumed
445 write_log j_done # write any remaining logs
447 j_done[:buf].each do |stream, streambuf|
449 $stderr.puts streambuf + "\n"
453 # Wait the thread (returns a Process::Status)
454 exit_status = j_done[:wait_thr].value.exitstatus
456 jobrecord = Job.find_by_uuid(job_done.uuid)
457 if exit_status != 75 and jobrecord.state == "Running"
458 # crunch-job did not return exit code 75 (see below) and left the job in
459 # the "Running" state, which means there was an unhandled error. Fail
461 jobrecord.state = "Failed"
462 if not jobrecord.save
463 $stderr.puts "dispatch: jobrecord.save failed"
466 # Don't fail the job if crunch-job didn't even get as far as
467 # starting it. If the job failed to run due to an infrastructure
468 # issue with crunch-job or slurm, we want the job to stay in the
469 # queue. If crunch-job exited after losing a race to another
470 # crunch-job process, it exits 75 and we should leave the job
471 # record alone so the winner of the race do its thing.
473 # There is still an unhandled race condition: If our crunch-job
474 # process is about to lose a race with another crunch-job
475 # process, but crashes before getting to its "exit 75" (for
476 # example, "cannot fork" or "cannot reach API server") then we
477 # will assume incorrectly that it's our process's fault
478 # jobrecord.started_at is non-nil, and mark the job as failed
479 # even though the winner of the race is probably still doing
483 # Invalidate the per-job auth token
484 j_done[:job_auth].update_attributes expires_at: Time.now
486 @running.delete job_done.uuid
490 expire_tokens = @pipe_auth_tokens.dup
491 @todo_pipelines.each do |p|
492 pipe_auth = (@pipe_auth_tokens[p.uuid] ||= ApiClientAuthorization.
493 create(user: User.where('uuid=?', p.modified_by_user_uuid).first,
495 puts `export ARVADOS_API_TOKEN=#{pipe_auth.api_token} && arv-run-pipeline-instance --run-pipeline-here --no-wait --instance #{p.uuid}`
496 expire_tokens.delete p.uuid
499 expire_tokens.each do |k, v|
500 v.update_attributes expires_at: Time.now
501 @pipe_auth_tokens.delete k
508 @pipe_auth_tokens ||= { }
509 $stderr.puts "dispatch: ready"
510 while !$signal[:term] or @running.size > 0
513 @running.each do |uuid, j|
514 if !j[:started] and j[:sent_int] < 2
516 Process.kill 'INT', j[:wait_thr].pid
518 # No such pid = race condition + desired result is
525 refresh_todo unless did_recently(:refresh_todo, 1.0)
527 unless @todo.empty? or did_recently(:start_jobs, 1.0) or $signal[:term]
530 unless (@todo_pipelines.empty? and @pipe_auth_tokens.empty?) or did_recently(:update_pipelines, 5.0)
535 select(@running.values.collect { |j| [j[:stdout], j[:stderr]] }.flatten,
542 def too_many_bytes_logged_for_job(j)
543 return (j[:bytes_logged] + j[:stderr_buf_to_flush].size >
544 Rails.configuration.crunch_limit_log_event_bytes_per_job)
547 def too_many_events_logged_for_job(j)
548 return (j[:events_logged] >= Rails.configuration.crunch_limit_log_events_per_job)
551 def did_recently(thing, min_interval)
553 if !@did_recently[thing] or @did_recently[thing] < Time.now - min_interval
554 @did_recently[thing] = Time.now
561 # send message to log table. we want these records to be transient
562 def write_log running_job
563 return if running_job[:log_truncated]
564 return if running_job[:stderr_buf_to_flush] == ''
566 # Truncate logs if they exceed crunch_limit_log_event_bytes_per_job
567 # or crunch_limit_log_events_per_job.
568 if (too_many_bytes_logged_for_job(running_job))
569 running_job[:log_truncated] = true
570 running_job[:stderr_buf_to_flush] =
571 "Server configured limit reached (crunch_limit_log_event_bytes_per_job: #{Rails.configuration.crunch_limit_log_event_bytes_per_job}). Subsequent logs truncated"
572 elsif (too_many_events_logged_for_job(running_job))
573 running_job[:log_truncated] = true
574 running_job[:stderr_buf_to_flush] =
575 "Server configured limit reached (crunch_limit_log_events_per_job: #{Rails.configuration.crunch_limit_log_events_per_job}). Subsequent logs truncated"
577 log = Log.new(object_uuid: running_job[:job].uuid,
578 event_type: 'stderr',
579 owner_uuid: running_job[:job].owner_uuid,
580 properties: {"text" => running_job[:stderr_buf_to_flush]})
582 running_job[:bytes_logged] += running_job[:stderr_buf_to_flush].size
583 running_job[:events_logged] += 1
585 running_job[:buf][:stderr] = "Failed to write logs\n" + running_job[:buf][:stderr]
587 running_job[:stderr_buf_to_flush] = ''
588 running_job[:stderr_flushed_at] = Time.now.to_i
593 # This is how crunch-job child procs know where the "refresh" trigger file is
594 ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.crunch_refresh_trigger