7 (ARGV.any? ? ARGV : ['--jobs', '--pipelines']).each do |arg|
10 $options[:jobs] = true
12 $options[:pipelines] = true
14 abort "Unrecognized command line option '#{arg}'"
17 if not ($options[:jobs] or $options[:pipelines])
18 abort "Nothing to do. Please specify at least one of: --jobs, --pipelines."
21 ARGV.reject! { |a| a =~ /--jobs|--pipelines/ }
25 %w{TERM INT}.each do |sig|
28 $stderr.puts "Received #{signame} signal"
33 if ENV["CRUNCH_DISPATCH_LOCKFILE"]
34 lockfilename = ENV.delete "CRUNCH_DISPATCH_LOCKFILE"
35 lockfile = File.open(lockfilename, File::RDWR|File::CREAT, 0644)
36 unless lockfile.flock File::LOCK_EX|File::LOCK_NB
37 abort "Lock unavailable on #{lockfilename} - exit"
41 ENV["RAILS_ENV"] = ARGV[0] || ENV["RAILS_ENV"] || "development"
43 require File.dirname(__FILE__) + '/../config/boot'
44 require File.dirname(__FILE__) + '/../config/environment'
49 self.utc.strftime "%Y-%m-%d_%H:%M:%S"
54 include ApplicationHelper
57 EXIT_RETRY_UNLOCKED = 93
58 RETRY_UNLOCKED_LIMIT = 3
61 @crunch_job_bin = (ENV['CRUNCH_JOB_BIN'] || `which arv-crunch-job`.strip)
62 if @crunch_job_bin.empty?
63 raise "No CRUNCH_JOB_BIN env var, and crunch-job not in path."
66 @arvados_internal = Rails.configuration.git_internal_dir
67 if not File.exists? @arvados_internal
68 $stderr.puts `mkdir -p #{@arvados_internal.shellescape} && git init --bare #{@arvados_internal.shellescape}`
69 raise "No internal git repository available" unless ($? == 0)
72 @repo_root = Rails.configuration.git_repositories_dir
73 @arvados_repo_path = Repository.where(name: "arvados").first.server_path
79 @pipe_auth_tokens = {}
82 @todo_job_retries = {}
83 @job_retry_counts = Hash.new(0)
88 return act_as_system_user
93 @todo = @todo_job_retries.values + Job.queue.select(&:repository)
95 if $options[:pipelines]
96 @todo_pipelines = PipelineInstance.queue
100 def each_slurm_line(cmd, outfmt, max_fields=nil)
101 max_fields ||= outfmt.split(":").size
102 max_fields += 1 # To accommodate the node field we add
103 @@slurm_version ||= Gem::Version.new(`sinfo --version`.match(/\b[\d\.]+\b/)[0])
104 if Gem::Version.new('2.3') <= @@slurm_version
105 `#{cmd} --noheader -o '%n:#{outfmt}'`.each_line do |line|
106 yield line.chomp.split(":", max_fields)
109 # Expand rows with hostname ranges (like "foo[1-3,5,9-12]:idle")
110 # into multiple rows with one hostname each.
111 `#{cmd} --noheader -o '%N:#{outfmt}'`.each_line do |line|
112 tokens = line.chomp.split(":", max_fields)
113 if (re = tokens[0].match /^(.*?)\[([-,\d]+)\]$/)
115 re[2].split(",").each do |range|
116 range = range.split("-").collect(&:to_i)
117 (range[0]..range[-1]).each do |n|
118 yield [re[1] + n.to_s] + tokens
130 each_slurm_line("sinfo", "%t") do |hostname, state|
131 # Treat nodes in idle* state as down, because the * means that slurm
132 # hasn't been able to communicate with it recently.
133 state.sub!(/^idle\*/, "down")
134 state.sub!(/\W+$/, "")
135 state = "down" unless %w(idle alloc down).include?(state)
136 slurm_nodes[hostname] = {state: state, job: nil}
138 each_slurm_line("squeue", "%j") do |hostname, job_uuid|
139 slurm_nodes[hostname][:job] = job_uuid if slurm_nodes[hostname]
144 def update_node_status
145 return unless Server::Application.config.crunch_job_wrapper.to_s.match /^slurm/
146 slurm_status.each_pair do |hostname, slurmdata|
147 next if @node_state[hostname] == slurmdata
149 node = Node.where('hostname=?', hostname).order(:last_ping_at).last
151 $stderr.puts "dispatch: update #{hostname} state to #{slurmdata}"
152 node.info["slurm_state"] = slurmdata[:state]
153 node.job_uuid = slurmdata[:job]
155 @node_state[hostname] = slurmdata
157 $stderr.puts "dispatch: failed to update #{node.uuid}: #{node.errors.messages}"
159 elsif slurmdata[:state] != 'down'
160 $stderr.puts "dispatch: SLURM reports '#{hostname}' is not down, but no node has that name"
163 $stderr.puts "dispatch: error updating #{hostname} node status: #{error}"
168 def positive_int(raw_value, default=nil)
169 value = begin raw_value.to_i rescue 0 end
177 NODE_CONSTRAINT_MAP = {
178 # Map Job runtime_constraints keys to the corresponding Node info key.
179 'min_ram_mb_per_node' => 'total_ram_mb',
180 'min_scratch_mb_per_node' => 'total_scratch_mb',
181 'min_cores_per_node' => 'total_cpu_cores',
184 def nodes_available_for_job_now(job)
185 # Find Nodes that satisfy a Job's runtime constraints (by building
186 # a list of Procs and using them to test each Node). If there
187 # enough to run the Job, return an array of their names.
188 # Otherwise, return nil.
189 need_procs = NODE_CONSTRAINT_MAP.each_pair.map do |job_key, node_key|
191 positive_int(node.info[node_key], 0) >=
192 positive_int(job.runtime_constraints[job_key], 0)
195 min_node_count = positive_int(job.runtime_constraints['min_nodes'], 1)
197 Node.find_each do |node|
198 good_node = (node.info['slurm_state'] == 'idle')
199 need_procs.each { |node_test| good_node &&= node_test.call(node) }
202 if usable_nodes.count >= min_node_count
203 return usable_nodes.map { |node| node.hostname }
210 def nodes_available_for_job(job)
211 # Check if there are enough idle nodes with the Job's minimum
212 # hardware requirements to run it. If so, return an array of
213 # their names. If not, up to once per hour, signal start_jobs to
214 # hold off launching Jobs. This delay is meant to give the Node
215 # Manager an opportunity to make new resources available for new
218 # The exact timing parameters here might need to be adjusted for
219 # the best balance between helping the longest-waiting Jobs run,
220 # and making efficient use of immediately available resources.
221 # These are all just first efforts until we have more data to work
223 nodelist = nodes_available_for_job_now(job)
224 if nodelist.nil? and not did_recently(:wait_for_available_nodes, 3600)
225 $stderr.puts "dispatch: waiting for nodes for #{job.uuid}"
226 @node_wait_deadline = Time.now + 5.minutes
231 def fail_job job, message
232 $stderr.puts "dispatch: #{job.uuid}: #{message}"
234 Log.new(object_uuid: job.uuid,
235 event_type: 'dispatch',
236 owner_uuid: job.owner_uuid,
238 properties: {"text" => message}).save!
240 $stderr.puts "dispatch: log.create failed"
244 job.lock @authorizations[job.uuid].user.uuid
247 $stderr.puts "dispatch: save failed setting job #{job.uuid} to failed"
249 rescue ArvadosModel::AlreadyLockedError
250 $stderr.puts "dispatch: tried to mark job #{job.uuid} as failed but it was already locked by someone else"
254 def stdout_s(cmd_a, opts={})
255 IO.popen(cmd_a, "r", opts) do |pipe|
256 return pipe.read.chomp
261 ["git", "--git-dir=#{@arvados_internal}"] + cmd_a
264 def get_authorization(job)
265 if @authorizations[job.uuid] and
266 @authorizations[job.uuid].user.uuid != job.modified_by_user_uuid
267 # We already made a token for this job, but we need a new one
268 # because modified_by_user_uuid has changed (the job will run
269 # as a different user).
270 @authorizations[job.uuid].update_attributes expires_at: Time.now
271 @authorizations[job.uuid] = nil
273 if not @authorizations[job.uuid]
274 auth = ApiClientAuthorization.
275 new(user: User.where('uuid=?', job.modified_by_user_uuid).first,
278 $stderr.puts "dispatch: auth.save failed for #{job.uuid}"
280 @authorizations[job.uuid] = auth
283 @authorizations[job.uuid]
286 def internal_repo_has_commit? sha1
287 if (not @fetched_commits[sha1] and
288 sha1 == stdout_s(git_cmd("rev-list", "-n1", sha1), err: "/dev/null") and
290 @fetched_commits[sha1] = true
292 return @fetched_commits[sha1]
295 def get_commit src_repo, sha1
296 return true if internal_repo_has_commit? sha1
298 # commit does not exist in internal repository, so import the
299 # source repository using git fetch-pack
300 cmd = git_cmd("fetch-pack", "--no-progress", "--all", src_repo)
301 $stderr.puts "dispatch: #{cmd}"
302 $stderr.puts(stdout_s(cmd))
303 @fetched_commits[sha1] = ($? == 0)
306 def tag_commit(commit_hash, tag_name)
307 # @git_tags[T]==V if we know commit V has been tagged T in the
308 # arvados_internal repository.
309 if not @git_tags[tag_name]
310 cmd = git_cmd("tag", tag_name, commit_hash)
311 $stderr.puts "dispatch: #{cmd}"
312 $stderr.puts(stdout_s(cmd, err: "/dev/null"))
314 # git tag failed. This may be because the tag already exists, so check for that.
315 tag_rev = stdout_s(git_cmd("rev-list", "-n1", tag_name))
317 # We got a revision back
318 if tag_rev != commit_hash
319 # Uh oh, the tag doesn't point to the revision we were expecting.
320 # Someone has been monkeying with the job record and/or git.
321 fail_job job, "Existing tag #{tag_name} points to commit #{tag_rev} but expected commit #{commit_hash}"
324 # we're okay (fall through to setting @git_tags below)
326 # git rev-list failed for some reason.
327 fail_job job, "'git tag' for #{tag_name} failed but did not find any existing tag using 'git rev-list'"
331 # 'git tag' was successful, or there is an existing tag that points to the same revision.
332 @git_tags[tag_name] = commit_hash
333 elsif @git_tags[tag_name] != commit_hash
334 fail_job job, "Existing tag #{tag_name} points to commit #{@git_tags[tag_name]} but this job uses commit #{commit_hash}"
342 next if @running[job.uuid]
345 case Server::Application.config.crunch_job_wrapper
348 # Don't run more than one at a time.
352 when :slurm_immediate
353 nodelist = nodes_available_for_job(job)
355 if Time.now < @node_wait_deadline
361 cmd_args = ["salloc",
366 "--job-name=#{job.uuid}",
367 "--nodelist=#{nodelist.join(',')}"]
369 raise "Unknown crunch_job_wrapper: #{Server::Application.config.crunch_job_wrapper}"
372 if Server::Application.config.crunch_job_user
373 cmd_args.unshift("sudo", "-E", "-u",
374 Server::Application.config.crunch_job_user,
375 "PATH=#{ENV['PATH']}",
376 "PERLLIB=#{ENV['PERLLIB']}",
377 "PYTHONPATH=#{ENV['PYTHONPATH']}",
378 "RUBYLIB=#{ENV['RUBYLIB']}",
379 "GEM_PATH=#{ENV['GEM_PATH']}")
382 next unless get_authorization job
384 ready = internal_repo_has_commit? job.script_version
387 # Import the commit from the specified repository into the
388 # internal repository. This should have been done already when
389 # the job was created/updated; this code is obsolete except to
390 # avoid deployment races. Failing the job would be a
391 # reasonable thing to do at this point.
392 repo = Repository.where(name: job.repository).first
393 if repo.nil? or repo.server_path.nil?
394 fail_job "Repository #{job.repository} not found under #{@repo_root}"
397 ready &&= get_commit repo.server_path, job.script_version
398 ready &&= tag_commit job.script_version, job.uuid
401 # This should be unnecessary, because API server does it during
402 # job create/update, but it's still not a bad idea to verify the
403 # tag is correct before starting the job:
404 ready &&= tag_commit job.script_version, job.uuid
406 # The arvados_sdk_version doesn't support use of arbitrary
407 # remote URLs, so the requested version isn't necessarily copied
408 # into the internal repository yet.
409 if job.arvados_sdk_version
410 ready &&= get_commit @arvados_repo_path, job.arvados_sdk_version
411 ready &&= tag_commit job.arvados_sdk_version, "#{job.uuid}-arvados-sdk"
415 fail_job job, "commit not present in internal repository"
419 cmd_args += [@crunch_job_bin,
420 '--job-api-token', @authorizations[job.uuid].api_token,
422 '--git-dir', @arvados_internal]
424 if @todo_job_retries.include?(job.uuid)
425 cmd_args << "--force-unlock"
428 $stderr.puts "dispatch: #{cmd_args.join ' '}"
431 i, o, e, t = Open3.popen3(*cmd_args)
433 $stderr.puts "dispatch: popen3: #{$!}"
438 $stderr.puts "dispatch: job #{job.uuid}"
439 start_banner = "dispatch: child #{t.pid} start #{LogTime.now}"
440 $stderr.puts start_banner
442 @running[job.uuid] = {
448 buf: {stderr: '', stdout: ''},
451 job_auth: @authorizations[job.uuid],
452 stderr_buf_to_flush: '',
453 stderr_flushed_at: Time.new(0),
456 log_throttle_is_open: true,
457 log_throttle_reset_time: Time.now + Rails.configuration.crunch_log_throttle_period,
458 log_throttle_bytes_so_far: 0,
459 log_throttle_lines_so_far: 0,
460 log_throttle_bytes_skipped: 0,
463 @todo_job_retries.delete(job.uuid)
468 # Test for hard cap on total output and for log throttling. Returns whether
469 # the log line should go to output or not. Modifies "line" in place to
470 # replace it with an error if a logging limit is tripped.
471 def rate_limit running_job, line
474 if running_job[:log_throttle_is_open]
475 running_job[:log_throttle_lines_so_far] += 1
476 running_job[:log_throttle_bytes_so_far] += linesize
477 running_job[:bytes_logged] += linesize
479 if (running_job[:bytes_logged] >
480 Rails.configuration.crunch_limit_log_bytes_per_job)
481 message = "Exceeded log limit #{Rails.configuration.crunch_limit_log_bytes_per_job} bytes (crunch_limit_log_bytes_per_job). Log will be truncated."
482 running_job[:log_throttle_reset_time] = Time.now + 100.years
483 running_job[:log_throttle_is_open] = false
485 elsif (running_job[:log_throttle_bytes_so_far] >
486 Rails.configuration.crunch_log_throttle_bytes)
487 remaining_time = running_job[:log_throttle_reset_time] - Time.now
488 message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_bytes} bytes per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_bytes). Logging will be silenced for the next #{remaining_time.round} seconds.\n"
489 running_job[:log_throttle_is_open] = false
491 elsif (running_job[:log_throttle_lines_so_far] >
492 Rails.configuration.crunch_log_throttle_lines)
493 remaining_time = running_job[:log_throttle_reset_time] - Time.now
494 message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_lines} lines per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_lines), logging will be silenced for the next #{remaining_time.round} seconds.\n"
495 running_job[:log_throttle_is_open] = false
499 if not running_job[:log_throttle_is_open]
500 # Don't log anything if any limit has been exceeded. Just count lossage.
501 running_job[:log_throttle_bytes_skipped] += linesize
505 # Yes, write to logs, but use our "rate exceeded" message
506 # instead of the log message that exceeded the limit.
510 running_job[:log_throttle_is_open]
515 @running.each do |job_uuid, j|
519 if now > j[:log_throttle_reset_time]
520 # It has been more than throttle_period seconds since the last
521 # checkpoint so reset the throttle
522 if j[:log_throttle_bytes_skipped] > 0
523 message = "#{job_uuid} ! Skipped #{j[:log_throttle_bytes_skipped]} bytes of log"
525 j[:stderr_buf_to_flush] << "#{LogTime.now} #{message}\n"
528 j[:log_throttle_reset_time] = now + Rails.configuration.crunch_log_throttle_period
529 j[:log_throttle_bytes_so_far] = 0
530 j[:log_throttle_lines_so_far] = 0
531 j[:log_throttle_bytes_skipped] = 0
532 j[:log_throttle_is_open] = true
535 j[:buf].each do |stream, streambuf|
536 # Read some data from the child stream
539 # It's important to use a big enough buffer here. When we're
540 # being flooded with logs, we must read and discard many
541 # bytes at once. Otherwise, we can easily peg a CPU with
542 # time-checking and other loop overhead. (Quick tests show a
543 # 1MiB buffer working 2.5x as fast as a 64 KiB buffer.)
545 # So don't reduce this buffer size!
546 buf = j[stream].read_nonblock(2**20)
547 rescue Errno::EAGAIN, EOFError
550 # Short circuit the counting code if we're just going to throw
551 # away the data anyway.
552 if not j[:log_throttle_is_open]
553 j[:log_throttle_bytes_skipped] += streambuf.size + buf.size
560 # Append to incomplete line from previous read, if any
564 streambuf.each_line do |line|
565 if not line.end_with? $/
566 if line.size > Rails.configuration.crunch_log_throttle_bytes
567 # Without a limit here, we'll use 2x an arbitrary amount
568 # of memory, and waste a lot of time copying strings
569 # around, all without providing any feedback to anyone
570 # about what's going on _or_ hitting any of our throttle
573 # Here we leave "line" alone, knowing it will never be
574 # sent anywhere: rate_limit() will reach
575 # crunch_log_throttle_bytes immediately. However, we'll
576 # leave [...] in bufend: if the trailing end of the long
577 # line does end up getting sent anywhere, it will have
578 # some indication that it is incomplete.
581 # If line length is sane, we'll wait for the rest of the
582 # line to appear in the next read_pipes() call.
587 # rate_limit returns true or false as to whether to actually log
588 # the line or not. It also modifies "line" in place to replace
589 # it with an error if a logging limit is tripped.
590 if rate_limit j, line
591 $stderr.print "#{job_uuid} ! " unless line.index(job_uuid)
593 pub_msg = "#{LogTime.now} #{line.strip}\n"
594 j[:stderr_buf_to_flush] << pub_msg
598 # Leave the trailing incomplete line (if any) in streambuf for
600 streambuf.replace bufend
602 # Flush buffered logs to the logs table, if appropriate. We have
603 # to do this even if we didn't collect any new logs this time:
604 # otherwise, buffered data older than seconds_between_events
605 # won't get flushed until new data arrives.
611 return if 0 == @running.size
617 pid_done = waitpid(-1, Process::WNOHANG | Process::WUNTRACED)
619 j_done = @running.values.
620 select { |j| j[:wait_thr].pid == pid_done }.
623 rescue SystemCallError
624 # I have @running processes but system reports I have no
625 # children. This is likely to happen repeatedly if it happens at
626 # all; I will log this no more than once per child process I
628 if 0 < @running.select { |uuid,j| j[:warned_waitpid_error].nil? }.size
629 children = @running.values.collect { |j| j[:wait_thr].pid }.join ' '
630 $stderr.puts "dispatch: IPC bug: waitpid() error (#{$!}), but I have children #{children}"
632 @running.each do |uuid,j| j[:warned_waitpid_error] = true end
635 @running.each do |uuid, j|
636 if j[:wait_thr].status == false
637 pid_done = j[:wait_thr].pid
645 job_done = j_done[:job]
647 # Ensure every last drop of stdout and stderr is consumed.
649 # Reset flush timestamp to make sure log gets written.
650 j_done[:stderr_flushed_at] = Time.new(0)
651 # Write any remaining logs.
654 j_done[:buf].each do |stream, streambuf|
656 $stderr.puts streambuf + "\n"
660 # Wait the thread (returns a Process::Status)
661 exit_status = j_done[:wait_thr].value.exitstatus
662 exit_tempfail = exit_status == EXIT_TEMPFAIL
664 $stderr.puts "dispatch: child #{pid_done} exit #{exit_status}"
665 $stderr.puts "dispatch: job #{job_done.uuid} end"
667 jobrecord = Job.find_by_uuid(job_done.uuid)
669 if exit_status == EXIT_RETRY_UNLOCKED
670 # The job failed because all of the nodes allocated to it
671 # failed. Only this crunch-dispatch process can retry the job:
672 # it's already locked, and there's no way to put it back in the
673 # Queued state. Put it in our internal todo list unless the job
674 # has failed this way excessively.
675 @job_retry_counts[jobrecord.uuid] += 1
676 exit_tempfail = @job_retry_counts[jobrecord.uuid] <= RETRY_UNLOCKED_LIMIT
678 @todo_job_retries[jobrecord.uuid] = jobrecord
680 $stderr.puts("dispatch: job #{jobrecord.uuid} exceeded node failure retry limit -- giving up")
685 @job_retry_counts.delete(jobrecord.uuid)
686 if jobrecord.state == "Running"
687 # Apparently there was an unhandled error. That could potentially
688 # include "all allocated nodes failed" when we don't to retry
689 # because the job has already been retried RETRY_UNLOCKED_LIMIT
690 # times. Fail the job.
691 jobrecord.state = "Failed"
692 if not jobrecord.save
693 $stderr.puts "dispatch: jobrecord.save failed"
697 # If the job failed to run due to an infrastructure
698 # issue with crunch-job or slurm, we want the job to stay in the
699 # queue. If crunch-job exited after losing a race to another
700 # crunch-job process, it exits 75 and we should leave the job
701 # record alone so the winner of the race can do its thing.
702 # If crunch-job exited after all of its allocated nodes failed,
703 # it exits 93, and we want to retry it later (see the
704 # EXIT_RETRY_UNLOCKED `if` block).
706 # There is still an unhandled race condition: If our crunch-job
707 # process is about to lose a race with another crunch-job
708 # process, but crashes before getting to its "exit 75" (for
709 # example, "cannot fork" or "cannot reach API server") then we
710 # will assume incorrectly that it's our process's fault
711 # jobrecord.started_at is non-nil, and mark the job as failed
712 # even though the winner of the race is probably still doing
716 # Invalidate the per-job auth token, unless the job is still queued and we
717 # might want to try it again.
718 if jobrecord.state != "Queued" and !@todo_job_retries.include?(jobrecord.uuid)
719 j_done[:job_auth].update_attributes expires_at: Time.now
722 @running.delete job_done.uuid
726 expire_tokens = @pipe_auth_tokens.dup
727 @todo_pipelines.each do |p|
728 pipe_auth = (@pipe_auth_tokens[p.uuid] ||= ApiClientAuthorization.
729 create(user: User.where('uuid=?', p.modified_by_user_uuid).first,
731 puts `export ARVADOS_API_TOKEN=#{pipe_auth.api_token} && arv-run-pipeline-instance --run-pipeline-here --no-wait --instance #{p.uuid}`
732 expire_tokens.delete p.uuid
735 expire_tokens.each do |k, v|
736 v.update_attributes expires_at: Time.now
737 @pipe_auth_tokens.delete k
743 $stderr.puts "dispatch: ready"
744 while !$signal[:term] or @running.size > 0
747 @running.each do |uuid, j|
748 if !j[:started] and j[:sent_int] < 2
750 Process.kill 'INT', j[:wait_thr].pid
752 # No such pid = race condition + desired result is
759 refresh_todo unless did_recently(:refresh_todo, 1.0)
760 update_node_status unless did_recently(:update_node_status, 1.0)
761 unless @todo.empty? or did_recently(:start_jobs, 1.0) or $signal[:term]
764 unless (@todo_pipelines.empty? and @pipe_auth_tokens.empty?) or did_recently(:update_pipelines, 5.0)
769 select(@running.values.collect { |j| [j[:stdout], j[:stderr]] }.flatten,
772 # If there are jobs we wanted to retry, we have to mark them as failed now.
773 # Other dispatchers can't pick them up because we hold their lock.
774 @todo_job_retries.each_key do |job_uuid|
775 job = Job.find_by_uuid(job_uuid)
776 if job.state == "Running"
777 fail_job(job, "crunch-dispatch was stopped during job's tempfail retry loop")
784 def did_recently(thing, min_interval)
785 if !@did_recently[thing] or @did_recently[thing] < Time.now - min_interval
786 @did_recently[thing] = Time.now
793 # send message to log table. we want these records to be transient
794 def write_log running_job
795 return if running_job[:stderr_buf_to_flush] == ''
797 # Send out to log event if buffer size exceeds the bytes per event or if
798 # it has been at least crunch_log_seconds_between_events seconds since
800 if running_job[:stderr_buf_to_flush].size > Rails.configuration.crunch_log_bytes_per_event or
801 (Time.now - running_job[:stderr_flushed_at]) >= Rails.configuration.crunch_log_seconds_between_events
803 log = Log.new(object_uuid: running_job[:job].uuid,
804 event_type: 'stderr',
805 owner_uuid: running_job[:job].owner_uuid,
806 properties: {"text" => running_job[:stderr_buf_to_flush]})
808 running_job[:events_logged] += 1
810 $stderr.puts "Failed to write logs"
811 $stderr.puts exception.backtrace
813 running_job[:stderr_buf_to_flush] = ''
814 running_job[:stderr_flushed_at] = Time.now
819 # This is how crunch-job child procs know where the "refresh" trigger file is
820 ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.crunch_refresh_trigger
822 # If salloc can't allocate resources immediately, make it use our temporary
823 # failure exit code. This ensures crunch-dispatch won't mark a job failed
824 # because of an issue with node allocation. This often happens when
825 # another dispatcher wins the race to allocate nodes.
826 ENV["SLURM_EXIT_IMMEDIATE"] = Dispatcher::EXIT_TEMPFAIL.to_s