5 include ApplicationHelper
9 EXIT_RETRY_UNLOCKED = 93
10 RETRY_UNLOCKED_LIMIT = 3
14 self.utc.strftime "%Y-%m-%d_%H:%M:%S"
19 @crunch_job_bin = (ENV['CRUNCH_JOB_BIN'] || `which arv-crunch-job`.strip)
20 if @crunch_job_bin.empty?
21 raise "No CRUNCH_JOB_BIN env var, and crunch-job not in path."
24 @docker_bin = ENV['CRUNCH_JOB_DOCKER_BIN']
26 @arvados_internal = Rails.configuration.git_internal_dir
27 if not File.exists? @arvados_internal
28 $stderr.puts `mkdir -p #{@arvados_internal.shellescape} && git init --bare #{@arvados_internal.shellescape}`
29 raise "No internal git repository available" unless ($? == 0)
32 @repo_root = Rails.configuration.git_repositories_dir
33 @arvados_repo_path = Repository.where(name: "arvados").first.server_path
39 @pipe_auth_tokens = {}
42 @todo_job_retries = {}
43 @job_retry_counts = Hash.new(0)
48 return act_as_system_user
53 @todo = @todo_job_retries.values + Job.queue.select(&:repository)
55 if @runoptions[:pipelines]
56 @todo_pipelines = PipelineInstance.queue
60 def each_slurm_line(cmd, outfmt, max_fields=nil)
61 max_fields ||= outfmt.split(":").size
62 max_fields += 1 # To accommodate the node field we add
63 @@slurm_version ||= Gem::Version.new(`sinfo --version`.match(/\b[\d\.]+\b/)[0])
64 if Gem::Version.new('2.3') <= @@slurm_version
65 `#{cmd} --noheader -o '%n:#{outfmt}'`.each_line do |line|
66 yield line.chomp.split(":", max_fields)
69 # Expand rows with hostname ranges (like "foo[1-3,5,9-12]:idle")
70 # into multiple rows with one hostname each.
71 `#{cmd} --noheader -o '%N:#{outfmt}'`.each_line do |line|
72 tokens = line.chomp.split(":", max_fields)
73 if (re = tokens[0].match /^(.*?)\[([-,\d]+)\]$/)
75 re[2].split(",").each do |range|
76 range = range.split("-").collect(&:to_i)
77 (range[0]..range[-1]).each do |n|
78 yield [re[1] + n.to_s] + tokens
90 each_slurm_line("sinfo", "%t") do |hostname, state|
91 # Treat nodes in idle* state as down, because the * means that slurm
92 # hasn't been able to communicate with it recently.
93 state.sub!(/^idle\*/, "down")
94 state.sub!(/\W+$/, "")
95 state = "down" unless %w(idle alloc down).include?(state)
96 slurm_nodes[hostname] = {state: state, job: nil}
98 each_slurm_line("squeue", "%j") do |hostname, job_uuid|
99 slurm_nodes[hostname][:job] = job_uuid if slurm_nodes[hostname]
104 def update_node_status
105 return unless Server::Application.config.crunch_job_wrapper.to_s.match /^slurm/
106 slurm_status.each_pair do |hostname, slurmdata|
107 next if @node_state[hostname] == slurmdata
109 node = Node.where('hostname=?', hostname).order(:last_ping_at).last
111 $stderr.puts "dispatch: update #{hostname} state to #{slurmdata}"
112 node.info["slurm_state"] = slurmdata[:state]
113 node.job_uuid = slurmdata[:job]
115 @node_state[hostname] = slurmdata
117 $stderr.puts "dispatch: failed to update #{node.uuid}: #{node.errors.messages}"
119 elsif slurmdata[:state] != 'down'
120 $stderr.puts "dispatch: SLURM reports '#{hostname}' is not down, but no node has that name"
123 $stderr.puts "dispatch: error updating #{hostname} node status: #{error}"
128 def positive_int(raw_value, default=nil)
129 value = begin raw_value.to_i rescue 0 end
137 NODE_CONSTRAINT_MAP = {
138 # Map Job runtime_constraints keys to the corresponding Node info key.
139 'min_ram_mb_per_node' => 'total_ram_mb',
140 'min_scratch_mb_per_node' => 'total_scratch_mb',
141 'min_cores_per_node' => 'total_cpu_cores',
144 def nodes_available_for_job_now(job)
145 # Find Nodes that satisfy a Job's runtime constraints (by building
146 # a list of Procs and using them to test each Node). If there
147 # enough to run the Job, return an array of their names.
148 # Otherwise, return nil.
149 need_procs = NODE_CONSTRAINT_MAP.each_pair.map do |job_key, node_key|
151 positive_int(node.properties[node_key], 0) >=
152 positive_int(job.runtime_constraints[job_key], 0)
155 min_node_count = positive_int(job.runtime_constraints['min_nodes'], 1)
157 Node.all.select do |node|
158 node.info['slurm_state'] == 'idle'
159 end.sort_by do |node|
160 # Prefer nodes with no price, then cheap nodes, then expensive nodes
161 node.properties['cloud_node']['price'].to_f rescue 0
163 if need_procs.select { |need_proc| not need_proc.call(node) }.any?
164 # At least one runtime constraint is not satisfied by this node
168 if usable_nodes.count >= min_node_count
169 return usable_nodes.map { |node| node.hostname }
175 def nodes_available_for_job(job)
176 # Check if there are enough idle nodes with the Job's minimum
177 # hardware requirements to run it. If so, return an array of
178 # their names. If not, up to once per hour, signal start_jobs to
179 # hold off launching Jobs. This delay is meant to give the Node
180 # Manager an opportunity to make new resources available for new
183 # The exact timing parameters here might need to be adjusted for
184 # the best balance between helping the longest-waiting Jobs run,
185 # and making efficient use of immediately available resources.
186 # These are all just first efforts until we have more data to work
188 nodelist = nodes_available_for_job_now(job)
189 if nodelist.nil? and not did_recently(:wait_for_available_nodes, 3600)
190 $stderr.puts "dispatch: waiting for nodes for #{job.uuid}"
191 @node_wait_deadline = Time.now + 5.minutes
196 def fail_job job, message
197 $stderr.puts "dispatch: #{job.uuid}: #{message}"
199 Log.new(object_uuid: job.uuid,
200 event_type: 'dispatch',
201 owner_uuid: job.owner_uuid,
203 properties: {"text" => message}).save!
205 $stderr.puts "dispatch: log.create failed"
209 job.lock @authorizations[job.uuid].user.uuid
212 $stderr.puts "dispatch: save failed setting job #{job.uuid} to failed"
214 rescue ArvadosModel::AlreadyLockedError
215 $stderr.puts "dispatch: tried to mark job #{job.uuid} as failed but it was already locked by someone else"
219 def stdout_s(cmd_a, opts={})
220 IO.popen(cmd_a, "r", opts) do |pipe|
221 return pipe.read.chomp
226 ["git", "--git-dir=#{@arvados_internal}"] + cmd_a
229 def get_authorization(job)
230 if @authorizations[job.uuid] and
231 @authorizations[job.uuid].user.uuid != job.modified_by_user_uuid
232 # We already made a token for this job, but we need a new one
233 # because modified_by_user_uuid has changed (the job will run
234 # as a different user).
235 @authorizations[job.uuid].update_attributes expires_at: Time.now
236 @authorizations[job.uuid] = nil
238 if not @authorizations[job.uuid]
239 auth = ApiClientAuthorization.
240 new(user: User.where('uuid=?', job.modified_by_user_uuid).first,
243 $stderr.puts "dispatch: auth.save failed for #{job.uuid}"
245 @authorizations[job.uuid] = auth
248 @authorizations[job.uuid]
251 def internal_repo_has_commit? sha1
252 if (not @fetched_commits[sha1] and
253 sha1 == stdout_s(git_cmd("rev-list", "-n1", sha1), err: "/dev/null") and
255 @fetched_commits[sha1] = true
257 return @fetched_commits[sha1]
260 def get_commit src_repo, sha1
261 return true if internal_repo_has_commit? sha1
263 # commit does not exist in internal repository, so import the
264 # source repository using git fetch-pack
265 cmd = git_cmd("fetch-pack", "--no-progress", "--all", src_repo)
266 $stderr.puts "dispatch: #{cmd}"
267 $stderr.puts(stdout_s(cmd))
268 @fetched_commits[sha1] = ($? == 0)
271 def tag_commit(commit_hash, tag_name)
272 # @git_tags[T]==V if we know commit V has been tagged T in the
273 # arvados_internal repository.
274 if not @git_tags[tag_name]
275 cmd = git_cmd("tag", tag_name, commit_hash)
276 $stderr.puts "dispatch: #{cmd}"
277 $stderr.puts(stdout_s(cmd, err: "/dev/null"))
279 # git tag failed. This may be because the tag already exists, so check for that.
280 tag_rev = stdout_s(git_cmd("rev-list", "-n1", tag_name))
282 # We got a revision back
283 if tag_rev != commit_hash
284 # Uh oh, the tag doesn't point to the revision we were expecting.
285 # Someone has been monkeying with the job record and/or git.
286 fail_job job, "Existing tag #{tag_name} points to commit #{tag_rev} but expected commit #{commit_hash}"
289 # we're okay (fall through to setting @git_tags below)
291 # git rev-list failed for some reason.
292 fail_job job, "'git tag' for #{tag_name} failed but did not find any existing tag using 'git rev-list'"
296 # 'git tag' was successful, or there is an existing tag that points to the same revision.
297 @git_tags[tag_name] = commit_hash
298 elsif @git_tags[tag_name] != commit_hash
299 fail_job job, "Existing tag #{tag_name} points to commit #{@git_tags[tag_name]} but this job uses commit #{commit_hash}"
307 next if @running[job.uuid]
310 case Server::Application.config.crunch_job_wrapper
313 # Don't run more than one at a time.
317 when :slurm_immediate
318 nodelist = nodes_available_for_job(job)
320 if Time.now < @node_wait_deadline
326 cmd_args = ["salloc",
331 "--job-name=#{job.uuid}",
332 "--nodelist=#{nodelist.join(',')}"]
334 raise "Unknown crunch_job_wrapper: #{Server::Application.config.crunch_job_wrapper}"
337 if Server::Application.config.crunch_job_user
338 cmd_args.unshift("sudo", "-E", "-u",
339 Server::Application.config.crunch_job_user,
340 "LD_LIBRARY_PATH=#{ENV['LD_LIBRARY_PATH']}",
341 "PATH=#{ENV['PATH']}",
342 "PERLLIB=#{ENV['PERLLIB']}",
343 "PYTHONPATH=#{ENV['PYTHONPATH']}",
344 "RUBYLIB=#{ENV['RUBYLIB']}",
345 "GEM_PATH=#{ENV['GEM_PATH']}")
348 next unless get_authorization job
350 ready = internal_repo_has_commit? job.script_version
353 # Import the commit from the specified repository into the
354 # internal repository. This should have been done already when
355 # the job was created/updated; this code is obsolete except to
356 # avoid deployment races. Failing the job would be a
357 # reasonable thing to do at this point.
358 repo = Repository.where(name: job.repository).first
359 if repo.nil? or repo.server_path.nil?
360 fail_job "Repository #{job.repository} not found under #{@repo_root}"
363 ready &&= get_commit repo.server_path, job.script_version
364 ready &&= tag_commit job.script_version, job.uuid
367 # This should be unnecessary, because API server does it during
368 # job create/update, but it's still not a bad idea to verify the
369 # tag is correct before starting the job:
370 ready &&= tag_commit job.script_version, job.uuid
372 # The arvados_sdk_version doesn't support use of arbitrary
373 # remote URLs, so the requested version isn't necessarily copied
374 # into the internal repository yet.
375 if job.arvados_sdk_version
376 ready &&= get_commit @arvados_repo_path, job.arvados_sdk_version
377 ready &&= tag_commit job.arvados_sdk_version, "#{job.uuid}-arvados-sdk"
381 fail_job job, "commit not present in internal repository"
385 cmd_args += [@crunch_job_bin,
386 '--job-api-token', @authorizations[job.uuid].api_token,
388 '--git-dir', @arvados_internal]
391 cmd_args += ['--docker-bin', @docker_bin]
394 if @todo_job_retries.include?(job.uuid)
395 cmd_args << "--force-unlock"
398 $stderr.puts "dispatch: #{cmd_args.join ' '}"
401 i, o, e, t = Open3.popen3(*cmd_args)
403 $stderr.puts "dispatch: popen3: #{$!}"
408 $stderr.puts "dispatch: job #{job.uuid}"
409 start_banner = "dispatch: child #{t.pid} start #{LogTime.now}"
410 $stderr.puts start_banner
412 @running[job.uuid] = {
418 buf: {stderr: '', stdout: ''},
421 job_auth: @authorizations[job.uuid],
422 stderr_buf_to_flush: '',
423 stderr_flushed_at: Time.new(0),
426 log_throttle_is_open: true,
427 log_throttle_reset_time: Time.now + Rails.configuration.crunch_log_throttle_period,
428 log_throttle_bytes_so_far: 0,
429 log_throttle_lines_so_far: 0,
430 log_throttle_bytes_skipped: 0,
433 @todo_job_retries.delete(job.uuid)
438 # Test for hard cap on total output and for log throttling. Returns whether
439 # the log line should go to output or not. Modifies "line" in place to
440 # replace it with an error if a logging limit is tripped.
441 def rate_limit running_job, line
444 if running_job[:log_throttle_is_open]
445 running_job[:log_throttle_lines_so_far] += 1
446 running_job[:log_throttle_bytes_so_far] += linesize
447 running_job[:bytes_logged] += linesize
449 if (running_job[:bytes_logged] >
450 Rails.configuration.crunch_limit_log_bytes_per_job)
451 message = "Exceeded log limit #{Rails.configuration.crunch_limit_log_bytes_per_job} bytes (crunch_limit_log_bytes_per_job). Log will be truncated."
452 running_job[:log_throttle_reset_time] = Time.now + 100.years
453 running_job[:log_throttle_is_open] = false
455 elsif (running_job[:log_throttle_bytes_so_far] >
456 Rails.configuration.crunch_log_throttle_bytes)
457 remaining_time = running_job[:log_throttle_reset_time] - Time.now
458 message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_bytes} bytes per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_bytes). Logging will be silenced for the next #{remaining_time.round} seconds.\n"
459 running_job[:log_throttle_is_open] = false
461 elsif (running_job[:log_throttle_lines_so_far] >
462 Rails.configuration.crunch_log_throttle_lines)
463 remaining_time = running_job[:log_throttle_reset_time] - Time.now
464 message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_lines} lines per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_lines), logging will be silenced for the next #{remaining_time.round} seconds.\n"
465 running_job[:log_throttle_is_open] = false
469 if not running_job[:log_throttle_is_open]
470 # Don't log anything if any limit has been exceeded. Just count lossage.
471 running_job[:log_throttle_bytes_skipped] += linesize
475 # Yes, write to logs, but use our "rate exceeded" message
476 # instead of the log message that exceeded the limit.
480 running_job[:log_throttle_is_open]
485 @running.each do |job_uuid, j|
489 if now > j[:log_throttle_reset_time]
490 # It has been more than throttle_period seconds since the last
491 # checkpoint so reset the throttle
492 if j[:log_throttle_bytes_skipped] > 0
493 message = "#{job_uuid} ! Skipped #{j[:log_throttle_bytes_skipped]} bytes of log"
495 j[:stderr_buf_to_flush] << "#{LogTime.now} #{message}\n"
498 j[:log_throttle_reset_time] = now + Rails.configuration.crunch_log_throttle_period
499 j[:log_throttle_bytes_so_far] = 0
500 j[:log_throttle_lines_so_far] = 0
501 j[:log_throttle_bytes_skipped] = 0
502 j[:log_throttle_is_open] = true
505 j[:buf].each do |stream, streambuf|
506 # Read some data from the child stream
509 # It's important to use a big enough buffer here. When we're
510 # being flooded with logs, we must read and discard many
511 # bytes at once. Otherwise, we can easily peg a CPU with
512 # time-checking and other loop overhead. (Quick tests show a
513 # 1MiB buffer working 2.5x as fast as a 64 KiB buffer.)
515 # So don't reduce this buffer size!
516 buf = j[stream].read_nonblock(2**20)
517 rescue Errno::EAGAIN, EOFError
520 # Short circuit the counting code if we're just going to throw
521 # away the data anyway.
522 if not j[:log_throttle_is_open]
523 j[:log_throttle_bytes_skipped] += streambuf.size + buf.size
530 # Append to incomplete line from previous read, if any
534 streambuf.each_line do |line|
535 if not line.end_with? $/
536 if line.size > Rails.configuration.crunch_log_throttle_bytes
537 # Without a limit here, we'll use 2x an arbitrary amount
538 # of memory, and waste a lot of time copying strings
539 # around, all without providing any feedback to anyone
540 # about what's going on _or_ hitting any of our throttle
543 # Here we leave "line" alone, knowing it will never be
544 # sent anywhere: rate_limit() will reach
545 # crunch_log_throttle_bytes immediately. However, we'll
546 # leave [...] in bufend: if the trailing end of the long
547 # line does end up getting sent anywhere, it will have
548 # some indication that it is incomplete.
551 # If line length is sane, we'll wait for the rest of the
552 # line to appear in the next read_pipes() call.
557 # rate_limit returns true or false as to whether to actually log
558 # the line or not. It also modifies "line" in place to replace
559 # it with an error if a logging limit is tripped.
560 if rate_limit j, line
561 $stderr.print "#{job_uuid} ! " unless line.index(job_uuid)
563 pub_msg = "#{LogTime.now} #{line.strip}\n"
564 j[:stderr_buf_to_flush] << pub_msg
568 # Leave the trailing incomplete line (if any) in streambuf for
570 streambuf.replace bufend
572 # Flush buffered logs to the logs table, if appropriate. We have
573 # to do this even if we didn't collect any new logs this time:
574 # otherwise, buffered data older than seconds_between_events
575 # won't get flushed until new data arrives.
581 return if 0 == @running.size
587 pid_done = waitpid(-1, Process::WNOHANG | Process::WUNTRACED)
589 j_done = @running.values.
590 select { |j| j[:wait_thr].pid == pid_done }.
593 rescue SystemCallError
594 # I have @running processes but system reports I have no
595 # children. This is likely to happen repeatedly if it happens at
596 # all; I will log this no more than once per child process I
598 if 0 < @running.select { |uuid,j| j[:warned_waitpid_error].nil? }.size
599 children = @running.values.collect { |j| j[:wait_thr].pid }.join ' '
600 $stderr.puts "dispatch: IPC bug: waitpid() error (#{$!}), but I have children #{children}"
602 @running.each do |uuid,j| j[:warned_waitpid_error] = true end
605 @running.each do |uuid, j|
606 if j[:wait_thr].status == false
607 pid_done = j[:wait_thr].pid
615 job_done = j_done[:job]
617 # Ensure every last drop of stdout and stderr is consumed.
619 # Reset flush timestamp to make sure log gets written.
620 j_done[:stderr_flushed_at] = Time.new(0)
621 # Write any remaining logs.
624 j_done[:buf].each do |stream, streambuf|
626 $stderr.puts streambuf + "\n"
630 # Wait the thread (returns a Process::Status)
631 exit_status = j_done[:wait_thr].value.exitstatus
632 exit_tempfail = exit_status == EXIT_TEMPFAIL
634 $stderr.puts "dispatch: child #{pid_done} exit #{exit_status}"
635 $stderr.puts "dispatch: job #{job_done.uuid} end"
637 jobrecord = Job.find_by_uuid(job_done.uuid)
639 if exit_status == EXIT_RETRY_UNLOCKED
640 # The job failed because all of the nodes allocated to it
641 # failed. Only this crunch-dispatch process can retry the job:
642 # it's already locked, and there's no way to put it back in the
643 # Queued state. Put it in our internal todo list unless the job
644 # has failed this way excessively.
645 @job_retry_counts[jobrecord.uuid] += 1
646 exit_tempfail = @job_retry_counts[jobrecord.uuid] <= RETRY_UNLOCKED_LIMIT
648 @todo_job_retries[jobrecord.uuid] = jobrecord
650 $stderr.puts("dispatch: job #{jobrecord.uuid} exceeded node failure retry limit -- giving up")
655 @job_retry_counts.delete(jobrecord.uuid)
656 if jobrecord.state == "Running"
657 # Apparently there was an unhandled error. That could potentially
658 # include "all allocated nodes failed" when we don't to retry
659 # because the job has already been retried RETRY_UNLOCKED_LIMIT
660 # times. Fail the job.
661 jobrecord.state = "Failed"
662 if not jobrecord.save
663 $stderr.puts "dispatch: jobrecord.save failed"
667 # If the job failed to run due to an infrastructure
668 # issue with crunch-job or slurm, we want the job to stay in the
669 # queue. If crunch-job exited after losing a race to another
670 # crunch-job process, it exits 75 and we should leave the job
671 # record alone so the winner of the race can do its thing.
672 # If crunch-job exited after all of its allocated nodes failed,
673 # it exits 93, and we want to retry it later (see the
674 # EXIT_RETRY_UNLOCKED `if` block).
676 # There is still an unhandled race condition: If our crunch-job
677 # process is about to lose a race with another crunch-job
678 # process, but crashes before getting to its "exit 75" (for
679 # example, "cannot fork" or "cannot reach API server") then we
680 # will assume incorrectly that it's our process's fault
681 # jobrecord.started_at is non-nil, and mark the job as failed
682 # even though the winner of the race is probably still doing
686 # Invalidate the per-job auth token, unless the job is still queued and we
687 # might want to try it again.
688 if jobrecord.state != "Queued" and !@todo_job_retries.include?(jobrecord.uuid)
689 j_done[:job_auth].update_attributes expires_at: Time.now
692 @running.delete job_done.uuid
696 expire_tokens = @pipe_auth_tokens.dup
697 @todo_pipelines.each do |p|
698 pipe_auth = (@pipe_auth_tokens[p.uuid] ||= ApiClientAuthorization.
699 create(user: User.where('uuid=?', p.modified_by_user_uuid).first,
701 puts `export ARVADOS_API_TOKEN=#{pipe_auth.api_token} && arv-run-pipeline-instance --run-pipeline-here --no-wait --instance #{p.uuid}`
702 expire_tokens.delete p.uuid
705 expire_tokens.each do |k, v|
706 v.update_attributes expires_at: Time.now
707 @pipe_auth_tokens.delete k
713 (argv.any? ? argv : ['--jobs', '--pipelines']).each do |arg|
716 @runoptions[:jobs] = true
718 @runoptions[:pipelines] = true
720 abort "Unrecognized command line option '#{arg}'"
723 if not (@runoptions[:jobs] or @runoptions[:pipelines])
724 abort "Nothing to do. Please specify at least one of: --jobs, --pipelines."
731 # We want files written by crunch-dispatch to be writable by other
732 # processes with the same GID, see bug #7228
735 # This is how crunch-job child procs know where the "refresh"
737 ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.crunch_refresh_trigger
739 # If salloc can't allocate resources immediately, make it use our
740 # temporary failure exit code. This ensures crunch-dispatch won't
741 # mark a job failed because of an issue with node allocation.
742 # This often happens when another dispatcher wins the race to
744 ENV["SLURM_EXIT_IMMEDIATE"] = CrunchDispatch::EXIT_TEMPFAIL.to_s
746 if ENV["CRUNCH_DISPATCH_LOCKFILE"]
747 lockfilename = ENV.delete "CRUNCH_DISPATCH_LOCKFILE"
748 lockfile = File.open(lockfilename, File::RDWR|File::CREAT, 0644)
749 unless lockfile.flock File::LOCK_EX|File::LOCK_NB
750 abort "Lock unavailable on #{lockfilename} - exit"
755 %w{TERM INT}.each do |sig|
758 $stderr.puts "Received #{signame} signal"
759 @signal[:term] = true
764 User.first.group_permissions
765 $stderr.puts "dispatch: ready"
766 while !@signal[:term] or @running.size > 0
769 @running.each do |uuid, j|
770 if !j[:started] and j[:sent_int] < 2
772 Process.kill 'INT', j[:wait_thr].pid
774 # No such pid = race condition + desired result is
781 refresh_todo unless did_recently(:refresh_todo, 1.0)
782 update_node_status unless did_recently(:update_node_status, 1.0)
783 unless @todo.empty? or did_recently(:start_jobs, 1.0) or @signal[:term]
786 unless (@todo_pipelines.empty? and @pipe_auth_tokens.empty?) or did_recently(:update_pipelines, 5.0)
791 select(@running.values.collect { |j| [j[:stdout], j[:stderr]] }.flatten,
794 # If there are jobs we wanted to retry, we have to mark them as failed now.
795 # Other dispatchers can't pick them up because we hold their lock.
796 @todo_job_retries.each_key do |job_uuid|
797 job = Job.find_by_uuid(job_uuid)
798 if job.state == "Running"
799 fail_job(job, "crunch-dispatch was stopped during job's tempfail retry loop")
806 def did_recently(thing, min_interval)
807 if !@did_recently[thing] or @did_recently[thing] < Time.now - min_interval
808 @did_recently[thing] = Time.now
815 # send message to log table. we want these records to be transient
816 def write_log running_job
817 return if running_job[:stderr_buf_to_flush] == ''
819 # Send out to log event if buffer size exceeds the bytes per event or if
820 # it has been at least crunch_log_seconds_between_events seconds since
822 if running_job[:stderr_buf_to_flush].size > Rails.configuration.crunch_log_bytes_per_event or
823 (Time.now - running_job[:stderr_flushed_at]) >= Rails.configuration.crunch_log_seconds_between_events
825 log = Log.new(object_uuid: running_job[:job].uuid,
826 event_type: 'stderr',
827 owner_uuid: running_job[:job].owner_uuid,
828 properties: {"text" => running_job[:stderr_buf_to_flush]})
830 running_job[:events_logged] += 1
832 $stderr.puts "Failed to write logs"
833 $stderr.puts exception.backtrace
835 running_job[:stderr_buf_to_flush] = ''
836 running_job[:stderr_flushed_at] = Time.now