@new_resource_attrs ||= params[model_class.to_s.underscore.singularize]
@new_resource_attrs ||= {}
@new_resource_attrs.reject! { |k,v| k.to_s == 'uuid' }
- @object ||= model_class.new @new_resource_attrs
- @object.save!
- show
+ @object ||= model_class.new @new_resource_attrs, params["options"]
+ if @object.save
+ respond_to do |f|
+ f.json { render json: @object.attributes.merge(href: url_for(@object)) }
+ f.html {
+ redirect_to @object
+ }
+ f.js { render }
+ end
+ else
+ self.render_error status: 422
+ end
end
def destroy
@svg = ProvenanceHelper::create_provenance_graph nodes, "provenance_svg", {
:request => request,
- :all_script_parameters => true,
+ :all_script_parameters => true,
:script_version_nodes => true}
end
end
end
+ def cancel
+ @object.cancel
+ redirect_to @object
+ end
+
def show
generate_provenance([@object])
end
end
def show_pane_list
- %w(Attributes Provenance Metadata JSON API)
+ %w(Status Attributes Provenance Metadata JSON API)
end
end
class ArvadosBase < ActiveRecord::Base
self.abstract_class = true
attr_accessor :attribute_sortkey
+ attr_accessor :create_params
def self.arvados_api_client
ArvadosApiClient.new_or_current
end
end
- def initialize raw_params={}
+ def initialize raw_params={}, create_params={}
super self.class.permit_attribute_params(raw_params)
+ @create_params = create_params
@attribute_sortkey ||= {
'id' => nil,
'name' => '000',
ActionController::Parameters.new(raw_params).permit!
end
- def self.create raw_params={}
- super(permit_attribute_params(raw_params))
+ def self.create raw_params={}, create_params={}
+ x = super(permit_attribute_params(raw_params))
+ x.create_params = create_params
+ x
end
def update_attributes raw_params={}
obdata.delete :uuid
resp = arvados_api_client.api(self.class, '/' + uuid, postdata)
else
+ postdata.merge!(@create_params) if @create_params
resp = arvados_api_client.api(self.class, '', postdata)
end
return false if !resp[:etag] || !resp[:uuid]
def self.creatable?
false
end
+
+ def cancel
+ arvados_api_client.api "jobs/#{self.uuid}/", "cancel", {}
+ end
end
--- /dev/null
+
+<div class="pull-right">
+ <% if @object.running %>
+ <%= form_tag "/jobs/#{@object.uuid}/cancel", style: "display:inline; padding-left: 1em" do |f| %>
+ <%= button_tag "Cancel running job", {class: 'btn btn-danger', id: "cancel-job-button"} %>
+ <% end %>
+ <% else %>
+ Re-run job using script version:
+ <%= form_tag '/jobs', style: "display:inline; padding-left: 1em" do |f| %>
+ <% [:script, :script_version, :repository, :output_is_persistent, :supplied_script_version, :nondeterministic].each do |d| %>
+ <%= hidden_field :job, d, :value => @object[d] %>
+ <% end %>
+ <% [:script_parameters, :runtime_constraints].each do |d| %>
+ <%= hidden_field :job, d, :value => JSON.dump(@object[d]) %>
+ <% end %>
+ <%= button_tag "Same as this run", {class: 'btn btn-primary', id: "re-run-same-job-button"} %>
+ <% end %>
+ <% if !@object.supplied_script_version.nil? and !@object.supplied_script_version.empty? and @object.script_version != @object.supplied_script_version%>
+ <%= form_tag '/jobs', style: "display:inline" do |f| %>
+ <% [:script, :repository, :output_is_persistent, :supplied_script_version, :nondeterministic].each do |d| %>
+ <%= hidden_field :job, d, :value => @object[d] %>
+ <% end %>
+ <%= hidden_field :job, :script_version, :value => @object[:supplied_script_version] %>
+ <% [:script_parameters, :runtime_constraints].each do |d| %>
+ <%= hidden_field :job, d, :value => JSON.dump(@object[d]) %>
+ <% end %>
+ <%= button_tag "Latest (#{@object.repository}/#{@object.supplied_script_version})", {class: 'btn btn-primary', id: "re-run-latest-job-button"} %>
+ <% end %>
+ <% end %>
+<% end %>
+</div>
+
+<table class="table pipeline-components-table">
+ <colgroup>
+ <col style="width: 20%" />
+ <col style="width: 24%" />
+ <col style="width: 12%" />
+ <col style="width: 45%" />
+ </colgroup>
+ <thead>
+ <tr><th>
+ script, version
+ </th><th>
+ progress
+ <%# format:'js' here helps browsers avoid using the cached js
+ content in html context (e.g., duplicate tab -> see
+ javascript) %>
+ <%= link_to '(refresh)', {format: :js}, {class: 'refresh hide', remote: true, method: 'get'} %>
+ </th>
+ <th></th>
+ <th>
+ output
+ </th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <td>
+ <%= @object[:script] %><br>
+ <span class="deemphasize"><%= @object[:script_version] %></span>
+ </td>
+ <td>
+ <%= render partial: 'job_progress', locals: {:j => @object} %>
+ <% if @object.running == false %>
+ <% if @object[:job].andand[:uuid] %>
+ <span class="deemphasize">
+ <%= link_to("..."+@object[:job][:uuid].last(15), job_url(id: @object[:job][:uuid])) %>
+ </span>
+
+ <% current_job = @object %>
+ <% if current_job.andand[:log] %>
+ <% fixup = /([a-f0-9]{32}\+\d+)(\+?.*)/.match(current_job[:log])%>
+ <% Collection.limit(1).where(uuid: fixup[1]).each do |c| %>
+ <% c.files.each do |file| %>
+ <br/><span class="deemphasize">
+ <a href="<%= collection_path(current_job[:log]) %>/<%= file[1] %>?disposition=inline&size=<%= file[2] %>">log</a>
+ </span>
+ <% end %>
+ <% end %>
+ <% end %>
+ <% end %>
+ <% end %>
+ </td><td>
+ <%= render(partial: 'job_status_label',
+ locals: { :j => @object }) %>
+ </td><td>
+ <%= link_to_if_arvados_object @object[:output], {:thumbnail => true} %>
+ </td>
+ </tr>
+ <tfoot>
+ <tr><td colspan="5"></td></tr>
+ </tfoot>
+</table>
resources :virtual_machines
resources :authorized_keys
resources :job_tasks
- resources :jobs
+ resources :jobs do
+ post 'cancel', :on => :member
+ end
match '/logout' => 'sessions#destroy', via: [:get, :post]
get '/logged_out' => 'sessions#index'
resources :users do
failed = 0
@components.each do |cname, c|
if c[:job]
- if c[:job][:finished_at]
+ if c[:job][:finished_at] or c[:job][:cancelled_at] or (c[:job][:running] == false and c[:job][:success] == false)
ended += 1
if c[:job][:success] == true
succeeded += 1
$command .= "&& exec arv-mount --allow-other $ENV{TASK_KEEPMOUNT} --exec ";
if ($docker_image)
{
- $command .= "$docker_bin run -i -a stdin -a stdout -a stderr ";
+ $command .= "crunchstat -cgroup-parent=/sys/fs/cgroup/lxc -cgroup-cid=$ENV{TASK_WORK}/docker.cid -poll=1000 ";
+ $command .= "$docker_bin run -i -a stdin -a stdout -a stderr -cidfile=$ENV{TASK_WORK}/docker.cid ";
# Dynamically configure the container to use the host system as its
# DNS server. Get the host's global addresses from the ip command,
# and turn them into docker --dns options using gawk.
}
while (my ($env_key, $env_val) = each %ENV)
{
- $command .= "-e \Q$env_key=$env_val\E ";
+ if ($env_key =~ /^(JOB|TASK)_/) {
+ $command .= "-e \Q$env_key=$env_val\E ";
+ }
}
$command .= "\Q$docker_image\E ";
+ } else {
+ $command .= "crunchstat -cgroup-path=/sys/fs/cgroup "
}
$command .= "$ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
my @execargs = ('bash', '-c', $command);
if !@object
return render_not_found
end
- @object.ping({ ip: params[:local_ipv4] || request.env['REMOTE_ADDR'],
- ping_secret: params[:ping_secret],
- ec2_instance_id: params[:instance_id] })
+ ping_data = {
+ ip: params[:local_ipv4] || request.env['REMOTE_ADDR'],
+ ec2_instance_id: params[:instance_id]
+ }
+ [:ping_secret, :total_cpu_cores, :total_ram_mb, :total_scratch_mb]
+ .each do |key|
+ ping_data[key] = params[key] if params[key]
+ end
+ @object.ping(ping_data)
if @object.info['ping_secret'] == params[:ping_secret]
render json: @object.as_api_response(:superuser)
else
t.add :dependencies
t.add :nondeterministic
t.add :repository
+ t.add :supplied_script_version
end
def assert_finished
if new_record? or script_version_changed?
sha1 = Commit.find_commit_range(current_user, self.repository, nil, self.script_version, nil)[0] rescue nil
if sha1
+ self.supplied_script_version = self.script_version if self.supplied_script_version.nil? or self.supplied_script_version.empty?
self.script_version = sha1
else
raise ArgumentError.new("Specified script_version does not resolve to a commit")
end
end
+ # Record other basic stats
+ ['total_cpu_cores', 'total_ram_mb', 'total_scratch_mb'].each do |key|
+ if value = (o[key] or o[key.to_sym])
+ self.info[key] = value
+ else
+ self.info.delete(key)
+ end
+ end
+
save!
end
--- /dev/null
+class AddSuppliedScriptVersion < ActiveRecord::Migration
+ def up
+ add_column :jobs, :supplied_script_version, :string
+ end
+
+ def down
+ remove_column :jobs, :supplied_script_version, :string
+ end
+end
#
# It's strongly recommended to check this file into your version control system.
-ActiveRecord::Schema.define(:version => 20140602143352) do
-
+ActiveRecord::Schema.define(:version => 20140602143352) do
create_table "api_client_authorizations", :force => true do |t|
t.string "api_token", :null => false
t.boolean "nondeterministic"
t.string "repository"
t.boolean "output_is_persistent", :default => false, :null => false
+ t.string "supplied_script_version"
end
add_index "jobs", ["created_at"], :name => "index_jobs_on_created_at"
--- /dev/null
+#!/usr/bin/env ruby
+
+if ENV["CRUNCH_DISPATCH_LOCKFILE"]
+ lockfilename = ENV.delete "CRUNCH_DISPATCH_LOCKFILE"
+ lockfile = File.open(lockfilename, File::RDWR|File::CREAT, 0644)
+ unless lockfile.flock File::LOCK_EX|File::LOCK_NB
+ abort "Lock unavailable on #{lockfilename} - exit"
+ end
+end
+
+ENV["RAILS_ENV"] = ARGV[0] || ENV["RAILS_ENV"] || "development"
+
+require File.dirname(__FILE__) + '/../config/boot'
+require File.dirname(__FILE__) + '/../config/environment'
+
+def cancel_stale_jobs
+ Job.running.each do |jobrecord|
+ f = Log.where("object_uuid=?", jobrecord.uuid).limit(1).order("created_at desc").first
+ if f
+ age = (Time.now - f.created_at)
+ if age > 300
+ $stderr.puts "dispatch: failing orphan job #{jobrecord.uuid}, last log is #{age} seconds old"
+ # job is marked running, but not known to crunch-dispatcher, and
+ # hasn't produced any log entries for 5 minutes, so mark it as failed.
+ jobrecord.running = false
+ jobrecord.cancelled_at ||= Time.now
+ jobrecord.finished_at ||= Time.now
+ if jobrecord.success.nil?
+ jobrecord.success = false
+ end
+ jobrecord.save!
+ end
+ end
+ end
+end
+
+cancel_stale_jobs
if Server::Application.config.crunch_job_user
cmd_args.unshift("sudo", "-E", "-u",
Server::Application.config.crunch_job_user,
- "PERLLIB=#{ENV['PERLLIB']}")
+ "PATH=#{ENV['PATH']}",
+ "PERLLIB=#{ENV['PERLLIB']}",
+ "PYTHONPATH=#{ENV['PYTHONPATH']}",
+ "RUBYLIB=#{ENV['RUBYLIB']}",
+ "GEM_PATH=#{ENV['GEM_PATH']}")
end
job_auth = ApiClientAuthorization.
info:
:ping_secret: "69udawxvn3zzj45hs8bumvndricrha4lcpi23pd69e44soanc0"
:slurm_state: "idle"
+ total_cpu_cores: 16
assert_not_nil json_response['info']['ping_secret']
end
+ test "ping adds node stats to info" do
+ node = nodes(:idle)
+ post :ping, {
+ id: node.uuid,
+ ping_secret: node.info['ping_secret'],
+ total_cpu_cores: 32,
+ total_ram_mb: 1024,
+ total_scratch_mb: 2048
+ }
+ assert_response :success
+ info = JSON.parse(@response.body)['info']
+ assert_equal(node.info['ping_secret'], info['ping_secret'])
+ assert_equal(32, info['total_cpu_cores'].to_i)
+ assert_equal(1024, info['total_ram_mb'].to_i)
+ assert_equal(2048, info['total_scratch_mb'].to_i)
+ end
end
require 'test_helper'
class NodeTest < ActiveSupport::TestCase
- # test "the truth" do
- # assert true
- # end
+ def ping_node(node_name, ping_data)
+ set_user_from_auth :admin
+ node = nodes(node_name)
+ node.ping({ping_secret: node.info['ping_secret'],
+ ip: node.ip_address}.merge(ping_data))
+ node
+ end
+
+ test "pinging a node can add and update stats" do
+ node = ping_node(:idle, {total_cpu_cores: '12', total_ram_mb: '512'})
+ assert_equal(12, node.info['total_cpu_cores'].to_i)
+ assert_equal(512, node.info['total_ram_mb'].to_i)
+ end
+
+ test "stats disappear if not in a ping" do
+ node = ping_node(:idle, {total_ram_mb: '256'})
+ refute_includes(node.info, 'total_cpu_cores')
+ assert_equal(256, node.info['total_ram_mb'].to_i)
+ end
end
--- /dev/null
+#! /bin/sh
+
+# Wraps the 'go' executable with some environment setup. Sets GOPATH, creates
+# 'pkg' and 'bin' directories, automatically installs dependencies, then runs
+# the underlying 'go' executable with any command line parameters provided to
+# the script.
+
+rootdir=$(readlink -f $(dirname $0))
+GOPATH=$rootdir:$rootdir/../../sdk/go:$GOPATH
+export GOPATH
+
+mkdir -p $rootdir/pkg
+mkdir -p $rootdir/bin
+
+go $*
--- /dev/null
+package main
+
+import (
+ "bufio"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "os/signal"
+ "strings"
+ "syscall"
+ "time"
+)
+
+func ReadLineByLine(inp io.ReadCloser, out chan string, finish chan bool) {
+ s := bufio.NewScanner(inp)
+ for s.Scan() {
+ out <- s.Text()
+ }
+ finish <- true
+}
+
+func OutputChannel(stdout chan string, stderr chan string) {
+ for {
+ select {
+ case s, ok := <-stdout:
+ if ok {
+ fmt.Fprintln(os.Stdout, s)
+ } else {
+ return
+ }
+ case s, ok := <-stderr:
+ if ok {
+ fmt.Fprintln(os.Stderr, s)
+ } else {
+ return
+ }
+ }
+ }
+}
+
+func PollCgroupStats(cgroup_path string, stderr chan string, poll int64) {
+ //var last_usage int64 = 0
+ var last_user int64 = 0
+ var last_sys int64 = 0
+ var last_cpucount int64 = 0
+
+ type Disk struct {
+ last_read int64
+ next_read int64
+ last_write int64
+ next_write int64
+ }
+
+ disk := make(map[string]*Disk)
+
+ //cpuacct_usage := fmt.Sprintf("%s/cpuacct.usage", cgroup_path)
+ cpuacct_stat := fmt.Sprintf("%s/cpuacct.stat", cgroup_path)
+ blkio_io_service_bytes := fmt.Sprintf("%s/blkio.io_service_bytes", cgroup_path)
+ cpuset_cpus := fmt.Sprintf("%s/cpuset.cpus", cgroup_path)
+ memory_stat := fmt.Sprintf("%s/memory.stat", cgroup_path)
+
+ var elapsed int64 = poll
+
+ for {
+ /*{
+ c, _ := os.Open(cpuacct_usage)
+ b, _ := ioutil.ReadAll(c)
+ var next int64
+ fmt.Sscanf(string(b), "%d", &next)
+ if last_usage != 0 {
+ stderr <- fmt.Sprintf("crunchstat: cpuacct.usage %v", (next-last_usage)/10000000)
+ }
+ //fmt.Printf("usage %d %d %d %d%%\n", last_usage, next, next-last_usage, (next-last_usage)/10000000)
+ last_usage = next
+ c.Close()
+ }*/
+ var cpus int64 = 0
+ {
+ c, _ := os.Open(cpuset_cpus)
+ b, _ := ioutil.ReadAll(c)
+ sp := strings.Split(string(b), ",")
+ for _, v := range sp {
+ var min, max int64
+ n, _ := fmt.Sscanf(v, "%d-%d", &min, &max)
+ if n == 2 {
+ cpus += (max - min) + 1
+ } else {
+ cpus += 1
+ }
+ }
+
+ if cpus != last_cpucount {
+ stderr <- fmt.Sprintf("crunchstat: cpuset.cpus %v", cpus)
+ }
+ last_cpucount = cpus
+
+ c.Close()
+ }
+ if cpus == 0 {
+ cpus = 1
+ }
+ {
+ c, _ := os.Open(cpuacct_stat)
+ b, _ := ioutil.ReadAll(c)
+ var next_user int64
+ var next_sys int64
+ fmt.Sscanf(string(b), "user %d\nsystem %d", &next_user, &next_sys)
+ c.Close()
+
+ if last_user != 0 {
+ user_diff := next_user - last_user
+ sys_diff := next_sys - last_sys
+ // Assume we're reading stats based on 100
+ // jiffies per second. Because the ellaspsed
+ // time is in milliseconds, we need to boost
+ // that to 1000 jiffies per second, then boost
+ // it by another 100x to get a percentage, then
+ // finally divide by the actual elapsed time
+ // and the number of cpus to get average load
+ // over the polling period.
+ user_pct := (user_diff * 10 * 100) / (elapsed * cpus)
+ sys_pct := (sys_diff * 10 * 100) / (elapsed * cpus)
+
+ stderr <- fmt.Sprintf("crunchstat: cpuacct.stat user %v", user_pct)
+ stderr <- fmt.Sprintf("crunchstat: cpuacct.stat sys %v", sys_pct)
+ }
+
+ /*fmt.Printf("user %d %d %d%%\n", last_user, next_user, next_user-last_user)
+ fmt.Printf("sys %d %d %d%%\n", last_sys, next_sys, next_sys-last_sys)
+ fmt.Printf("sum %d%%\n", (next_user-last_user)+(next_sys-last_sys))*/
+ last_user = next_user
+ last_sys = next_sys
+ }
+ {
+ c, _ := os.Open(blkio_io_service_bytes)
+ b := bufio.NewScanner(c)
+ var device, op string
+ var next int64
+ for b.Scan() {
+ if _, err := fmt.Sscanf(string(b.Text()), "%s %s %d", &device, &op, &next); err == nil {
+ if disk[device] == nil {
+ disk[device] = new(Disk)
+ }
+ if op == "Read" {
+ disk[device].last_read = disk[device].next_read
+ disk[device].next_read = next
+ if disk[device].last_read > 0 {
+ stderr <- fmt.Sprintf("crunchstat: blkio.io_service_bytes %s read %v", device, disk[device].next_read-disk[device].last_read)
+ }
+ }
+ if op == "Write" {
+ disk[device].last_write = disk[device].next_write
+ disk[device].next_write = next
+ if disk[device].last_write > 0 {
+ stderr <- fmt.Sprintf("crunchstat: blkio.io_service_bytes %s write %v", device, disk[device].next_write-disk[device].last_write)
+ }
+ }
+ }
+ }
+ c.Close()
+ }
+
+ {
+ c, _ := os.Open(memory_stat)
+ b := bufio.NewScanner(c)
+ var stat string
+ var val int64
+ for b.Scan() {
+ if _, err := fmt.Sscanf(string(b.Text()), "%s %d", &stat, &val); err == nil {
+ if stat == "rss" {
+ stderr <- fmt.Sprintf("crunchstat: memory.stat rss %v", val)
+ }
+ }
+ }
+ c.Close()
+ }
+
+ bedtime := time.Now()
+ time.Sleep(time.Duration(poll) * time.Millisecond)
+ morning := time.Now()
+ elapsed = morning.Sub(bedtime).Nanoseconds() / int64(time.Millisecond)
+ }
+}
+
+func main() {
+
+ var (
+ cgroup_path string
+ cgroup_parent string
+ cgroup_cidfile string
+ wait int64
+ poll int64
+ )
+
+ flag.StringVar(&cgroup_path, "cgroup-path", "", "Direct path to cgroup")
+ flag.StringVar(&cgroup_parent, "cgroup-parent", "", "Path to parent cgroup")
+ flag.StringVar(&cgroup_cidfile, "cgroup-cid", "", "Path to container id file")
+ flag.Int64Var(&wait, "wait", 5, "Maximum time (in seconds) to wait for cid file to show up")
+ flag.Int64Var(&poll, "poll", 1000, "Polling frequency, in milliseconds")
+
+ flag.Parse()
+
+ logger := log.New(os.Stderr, "crunchstat: ", 0)
+
+ if cgroup_path == "" && cgroup_cidfile == "" {
+ logger.Fatal("Must provide either -cgroup-path or -cgroup-cid")
+ }
+
+ // Make output channel
+ stdout_chan := make(chan string)
+ stderr_chan := make(chan string)
+ finish_chan := make(chan bool)
+ defer close(stdout_chan)
+ defer close(stderr_chan)
+ defer close(finish_chan)
+
+ go OutputChannel(stdout_chan, stderr_chan)
+
+ var cmd *exec.Cmd
+
+ if len(flag.Args()) > 0 {
+ // Set up subprocess
+ cmd = exec.Command(flag.Args()[0], flag.Args()[1:]...)
+
+ logger.Print("Running ", flag.Args())
+
+ // Forward SIGINT and SIGTERM to inner process
+ term := make(chan os.Signal, 1)
+ go func(sig <-chan os.Signal) {
+ catch := <-sig
+ if cmd.Process != nil {
+ cmd.Process.Signal(catch)
+ }
+ logger.Print("caught signal:", catch)
+ }(term)
+ signal.Notify(term, syscall.SIGTERM)
+ signal.Notify(term, syscall.SIGINT)
+
+ // Funnel stdout and stderr from subprocess to output channels
+ stdout_pipe, err := cmd.StdoutPipe()
+ if err != nil {
+ logger.Fatal(err)
+ }
+ go ReadLineByLine(stdout_pipe, stdout_chan, finish_chan)
+
+ stderr_pipe, err := cmd.StderrPipe()
+ if err != nil {
+ logger.Fatal(err)
+ }
+ go ReadLineByLine(stderr_pipe, stderr_chan, finish_chan)
+
+ // Run subprocess
+ if err := cmd.Start(); err != nil {
+ logger.Fatal(err)
+ }
+ }
+
+ // Read the cid file
+ if cgroup_cidfile != "" {
+ // wait up to 'wait' seconds for the cid file to appear
+ var i time.Duration
+ for i = 0; i < time.Duration(wait)*time.Second; i += (100 * time.Millisecond) {
+ f, err := os.Open(cgroup_cidfile)
+ if err == nil {
+ cid, err2 := ioutil.ReadAll(f)
+ if err2 == nil && len(cid) > 0 {
+ cgroup_path = string(cid)
+ f.Close()
+ break
+ }
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ if cgroup_path == "" {
+ logger.Printf("Could not read cid file %s", cgroup_cidfile)
+ }
+ }
+
+ // add the parent prefix
+ if cgroup_parent != "" {
+ cgroup_path = fmt.Sprintf("%s/%s", cgroup_parent, cgroup_path)
+ }
+
+ logger.Print("Using cgroup ", cgroup_path)
+
+ go PollCgroupStats(cgroup_path, stderr_chan, poll)
+
+ // Wait for each of stdout and stderr to drain
+ <-finish_chan
+ <-finish_chan
+
+ if err := cmd.Wait(); err != nil {
+ if exiterr, ok := err.(*exec.ExitError); ok {
+ // The program has exited with an exit code != 0
+
+ // This works on both Unix and Windows. Although package
+ // syscall is generally platform dependent, WaitStatus is
+ // defined for both Unix and Windows and in both cases has
+ // an ExitStatus() method with the same signature.
+ if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
+ os.Exit(status.ExitStatus())
+ }
+ } else {
+ logger.Fatalf("cmd.Wait: %v", err)
+ }
+ }
+}
import subprocess
import argparse
import daemon
+import signal
if __name__ == '__main__':
# Handle command line parameters
rc = 255
try:
- rc = subprocess.call(args.exec_args, shell=False)
+ sp = subprocess.Popen(args.exec_args, shell=False)
+
+ # forward signals to the process.
+ signal.signal(signal.SIGINT, lambda signum, frame: sp.send_signal(signum))
+ signal.signal(signal.SIGTERM, lambda signum, frame: sp.send_signal(signum))
+ signal.signal(signal.SIGQUIT, lambda signum, frame: sp.send_signal(signum))
+
+ # wait for process to complete.
+ rc = sp.wait()
+
+ # restore default signal handlers.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ signal.signal(signal.SIGQUIT, signal.SIG_DFL)
except OSError as e:
sys.stderr.write('arv-mount: %s -- exec %s\n' % (str(e), args.exec_args))
rc = e.errno