include Process
+$warned = {}
$signal = {}
%w{TERM INT}.each do |sig|
signame = sig
end
end
+if ENV["CRUNCH_DISPATCH_LOCKFILE"]
+ lockfilename = ENV.delete "CRUNCH_DISPATCH_LOCKFILE"
+ lockfile = File.open(lockfilename, File::RDWR|File::CREAT, 0644)
+ unless lockfile.flock File::LOCK_EX|File::LOCK_NB
+ abort "Lock unavailable on #{lockfilename} - exit"
+ end
+end
+
ENV["RAILS_ENV"] = ARGV[0] || ENV["RAILS_ENV"] || "development"
require File.dirname(__FILE__) + '/../config/boot'
require 'open3'
$redis ||= Redis.new
+LOG_BUFFER_SIZE = 2**20
class Dispatcher
include ApplicationHelper
def refresh_todo
@todo = Job.queue
+ @todo_pipelines = PipelineInstance.queue
end
- def start_jobs
+ def sinfo
+ @@slurm_version ||= Gem::Version.new(`sinfo --version`.match(/\b[\d\.]+\b/)[0])
+ if Gem::Version.new('2.3') <= @@slurm_version
+ `sinfo --noheader -o '%n:%t'`.strip
+ else
+ # Expand rows with hostname ranges (like "foo[1-3,5,9-12]:idle")
+ # into multiple rows with one hostname each.
+ `sinfo --noheader -o '%N:%t'`.split("\n").collect do |line|
+ tokens = line.split ":"
+ if (re = tokens[0].match /^(.*?)\[([-,\d]+)\]$/)
+ re[2].split(",").collect do |range|
+ range = range.split("-").collect(&:to_i)
+ (range[0]..range[-1]).collect do |n|
+ [re[1] + n.to_s, tokens[1..-1]].join ":"
+ end
+ end
+ else
+ tokens.join ":"
+ end
+ end.flatten.join "\n"
+ end
+ end
+
+ def update_node_status
if Server::Application.config.crunch_job_wrapper.to_s.match /^slurm/
- @idle_slurm_nodes = 0
+ @nodes_in_state = {idle: 0, alloc: 0, down: 0}
+ @node_state ||= {}
+ node_seen = {}
begin
- `sinfo`.
- split("\n").
- collect { |line| line.match /(\d+) +idle/ }.
- each do |re|
- @idle_slurm_nodes = re[1].to_i if re
+ sinfo.split("\n").
+ each do |line|
+ re = line.match /(\S+?):+(idle|alloc|down)/
+ next if !re
+
+ # sinfo tells us about a node N times if it is shared by N partitions
+ next if node_seen[re[1]]
+ node_seen[re[1]] = true
+
+ # count nodes in each state
+ @nodes_in_state[re[2].to_sym] += 1
+
+ # update our database (and cache) when a node's state changes
+ if @node_state[re[1]] != re[2]
+ @node_state[re[1]] = re[2]
+ node = Node.where('hostname=?', re[1]).first
+ if node
+ $stderr.puts "dispatch: update #{re[1]} state to #{re[2]}"
+ node.info[:slurm_state] = re[2]
+ node.save
+ elsif re[2] != 'down'
+ $stderr.puts "dispatch: sinfo reports '#{re[1]}' is not down, but no node has that name"
+ end
+ end
end
rescue
end
end
+ end
+ def start_jobs
@todo.each do |job|
min_nodes = 1
begin
- if job.resource_limits['min_nodes']
- min_nodes = begin job.resource_limits['min_nodes'].to_i rescue 1 end
+ if job.runtime_constraints['min_nodes']
+ min_nodes = begin job.runtime_constraints['min_nodes'].to_i rescue 1 end
end
end
- next if @idle_slurm_nodes and @idle_slurm_nodes < min_nodes
+
+ begin
+ next if @nodes_in_state[:idle] < min_nodes
+ rescue
+ end
next if @running[job.uuid]
next if !take(job)
api_client_id: 0)
job_auth.save
- cmd_args << (ENV['CRUNCH_JOB_BIN'] || `which crunch-job`.strip)
+ crunch_job_bin = (ENV['CRUNCH_JOB_BIN'] || `which arv-crunch-job`.strip)
+ if crunch_job_bin == ''
+ raise "No CRUNCH_JOB_BIN env var, and crunch-job not in path."
+ end
+
+ cmd_args << crunch_job_bin
cmd_args << '--job-api-token'
cmd_args << job_auth.api_token
cmd_args << '--job'
untake(job)
next
end
- $stderr.puts "dispatch: job #{job.uuid} start"
- $stderr.puts "dispatch: child #{t.pid} start"
+
+ $stderr.puts "dispatch: job #{job.uuid}"
+ start_banner = "dispatch: child #{t.pid} start #{Time.now.ctime.to_s}"
+ $stderr.puts start_banner
+ $redis.set job.uuid, start_banner + "\n"
+ $redis.publish job.uuid, start_banner
+ $redis.publish job.owner_uuid, start_banner
+
@running[job.uuid] = {
stdin: i,
stdout: o,
lines.each do |line|
$stderr.print "#{job_uuid} ! " unless line.index(job_uuid)
$stderr.puts line
- $redis.publish job_uuid, "#{Time.now.ctime.to_s} #{line}"
+ pub_msg = "#{Time.now.ctime.to_s} #{line.strip}"
+ $redis.publish job.owner_uuid, pub_msg
+ $redis.publish job_uuid, pub_msg
+ $redis.append job_uuid, pub_msg + "\n"
+ if LOG_BUFFER_SIZE < $redis.strlen(job_uuid)
+ $redis.set(job_uuid,
+ $redis
+ .getrange(job_uuid, (LOG_BUFFER_SIZE >> 1), -1)
+ .sub(/^.*?\n/, ''))
+ end
end
end
end
job_done = j_done[:job]
$stderr.puts "dispatch: child #{pid_done} exit"
$stderr.puts "dispatch: job #{job_done.uuid} end"
- $redis.publish job_done.uuid, "end"
# Ensure every last drop of stdout and stderr is consumed
read_pipes
# Wait the thread
j_done[:wait_thr].value
+ jobrecord = Job.find_by_uuid(job_done.uuid)
+ jobrecord.running = false
+ jobrecord.finished_at ||= Time.now
+ # Don't set 'jobrecord.success = false' because if the job failed to run due to an
+ # issue with crunch-job or slurm, we want the job to stay in the queue.
+ jobrecord.save!
+
# Invalidate the per-job auth token
j_done[:job_auth].update_attributes expires_at: Time.now
+ $redis.publish job_done.uuid, "end"
+
@running.delete job_done.uuid
end
+ def update_pipelines
+ @todo_pipelines.each do |p|
+ pipe_auth = ApiClientAuthorization.
+ new(user: User.where('uuid=?', p.modified_by_user_uuid).first,
+ api_client_id: 0)
+ pipe_auth.save
+
+ puts `export ARVADOS_API_TOKEN=#{pipe_auth.api_token} && arv-run-pipeline-instance --run-here --no-wait --instance #{p.uuid}`
+ end
+ end
+
def run
act_as_system_user
@running ||= {}
end
else
refresh_todo unless did_recently(:refresh_todo, 1.0)
- start_jobs unless @todo.empty? or did_recently(:start_jobs, 1.0)
+ update_node_status
+ unless @todo.empty? or did_recently(:start_jobs, 1.0) or $signal[:term]
+ start_jobs
+ end
+ unless @todo_pipelines.empty? or did_recently(:update_pipelines, 5.0)
+ update_pipelines
+ end
end
reap_children
select(@running.values.collect { |j| [j[:stdout], j[:stderr]] }.flatten,
end
end
+# This is how crunch-job child procs know where the "refresh" trigger file is
+ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.crunch_refresh_trigger
+
Dispatcher.new.run