Merge branch 'master' into 2871-preload-objects
authorradhika <radhika@curoverse.com>
Fri, 6 Jun 2014 19:57:19 +0000 (15:57 -0400)
committerradhika <radhika@curoverse.com>
Fri, 6 Jun 2014 19:57:19 +0000 (15:57 -0400)
21 files changed:
apps/workbench/app/controllers/application_controller.rb
apps/workbench/app/controllers/jobs_controller.rb
apps/workbench/app/models/arvados_base.rb
apps/workbench/app/models/job.rb
apps/workbench/app/views/jobs/_show_status.html.erb [new file with mode: 0644]
apps/workbench/config/routes.rb
sdk/cli/bin/arv-run-pipeline-instance
sdk/cli/bin/crunch-job
services/api/app/controllers/arvados/v1/nodes_controller.rb
services/api/app/models/job.rb
services/api/app/models/node.rb
services/api/db/migrate/20140530200539_add_supplied_script_version.rb [new file with mode: 0644]
services/api/db/schema.rb
services/api/script/cancel_stale_jobs.rb [new file with mode: 0755]
services/api/script/crunch-dispatch.rb
services/api/test/fixtures/nodes.yml
services/api/test/functional/arvados/v1/nodes_controller_test.rb
services/api/test/unit/node_test.rb
services/crunch/crunchstat/go.sh [new file with mode: 0755]
services/crunch/crunchstat/src/arvados.org/crunchstat/crunchstat.go [new file with mode: 0644]
services/fuse/bin/arv-mount

index 99bb2ca866e415a414f442ef973e9740c6e04d0c..c9a761fe232d2a73a3a0a729f288eb4e63c056fa 100644 (file)
@@ -145,9 +145,18 @@ class ApplicationController < ActionController::Base
     @new_resource_attrs ||= params[model_class.to_s.underscore.singularize]
     @new_resource_attrs ||= {}
     @new_resource_attrs.reject! { |k,v| k.to_s == 'uuid' }
-    @object ||= model_class.new @new_resource_attrs
-    @object.save!
-    show
+    @object ||= model_class.new @new_resource_attrs, params["options"]
+    if @object.save
+      respond_to do |f|
+        f.json { render json: @object.attributes.merge(href: url_for(@object)) }
+        f.html {
+          redirect_to @object
+        }
+        f.js { render }
+      end
+    else
+      self.render_error status: 422
+    end
   end
 
   def destroy
index 4746635c72a3ea141b64648a1efc675620be2657..841d3a9fdc6827486a7f54948e6c63ee4b3b5b1a 100644 (file)
@@ -16,7 +16,7 @@ class JobsController < ApplicationController
 
     @svg = ProvenanceHelper::create_provenance_graph nodes, "provenance_svg", {
       :request => request,
-      :all_script_parameters => true, 
+      :all_script_parameters => true,
       :script_version_nodes => true}
   end
 
@@ -31,6 +31,11 @@ class JobsController < ApplicationController
     end
   end
 
+  def cancel
+    @object.cancel
+    redirect_to @object
+  end
+
   def show
     generate_provenance([@object])
   end
@@ -44,6 +49,6 @@ class JobsController < ApplicationController
   end
 
   def show_pane_list
-    %w(Attributes Provenance Metadata JSON API)
+    %w(Status Attributes Provenance Metadata JSON API)
   end
 end
index 2eb0b625775a5b249c547eb221849ba7fe9459bc..33e107e3693c94b4954f4e155312b060ab50205e 100644 (file)
@@ -1,6 +1,7 @@
 class ArvadosBase < ActiveRecord::Base
   self.abstract_class = true
   attr_accessor :attribute_sortkey
+  attr_accessor :create_params
 
   def self.arvados_api_client
     ArvadosApiClient.new_or_current
@@ -29,8 +30,9 @@ class ArvadosBase < ActiveRecord::Base
       end
   end
 
-  def initialize raw_params={}
+  def initialize raw_params={}, create_params={}
     super self.class.permit_attribute_params(raw_params)
+    @create_params = create_params
     @attribute_sortkey ||= {
       'id' => nil,
       'name' => '000',
@@ -144,8 +146,10 @@ class ArvadosBase < ActiveRecord::Base
     ActionController::Parameters.new(raw_params).permit!
   end
 
-  def self.create raw_params={}
-    super(permit_attribute_params(raw_params))
+  def self.create raw_params={}, create_params={}
+    x = super(permit_attribute_params(raw_params))
+    x.create_params = create_params
+    x
   end
 
   def update_attributes raw_params={}
@@ -164,6 +168,7 @@ class ArvadosBase < ActiveRecord::Base
       obdata.delete :uuid
       resp = arvados_api_client.api(self.class, '/' + uuid, postdata)
     else
+      postdata.merge!(@create_params) if @create_params
       resp = arvados_api_client.api(self.class, '', postdata)
     end
     return false if !resp[:etag] || !resp[:uuid]
index 92f3910293695bb537959fbee02c2abd1c972e0e..173d3a06964fb5667b9546aee4bab518baf3c190 100644 (file)
@@ -10,4 +10,8 @@ class Job < ArvadosBase
   def self.creatable?
     false
   end
+
+  def cancel
+    arvados_api_client.api "jobs/#{self.uuid}/", "cancel", {}
+  end
 end
diff --git a/apps/workbench/app/views/jobs/_show_status.html.erb b/apps/workbench/app/views/jobs/_show_status.html.erb
new file mode 100644 (file)
index 0000000..a3f38d8
--- /dev/null
@@ -0,0 +1,93 @@
+
+<div class="pull-right">
+  <% if @object.running %>
+    <%= form_tag "/jobs/#{@object.uuid}/cancel", style: "display:inline; padding-left: 1em" do |f| %>
+      <%= button_tag "Cancel running job", {class: 'btn btn-danger', id: "cancel-job-button"} %>
+    <% end %>
+  <% else %>
+    Re-run job using script version:
+    <%= form_tag '/jobs', style: "display:inline; padding-left: 1em" do |f| %>
+      <% [:script, :script_version, :repository, :output_is_persistent, :supplied_script_version, :nondeterministic].each do |d| %>
+        <%= hidden_field :job, d, :value => @object[d] %>
+      <% end %>
+      <% [:script_parameters, :runtime_constraints].each do |d| %>
+        <%= hidden_field :job, d, :value => JSON.dump(@object[d]) %>
+      <% end %>
+      <%= button_tag "Same as this run", {class: 'btn btn-primary', id: "re-run-same-job-button"} %>
+    <% end %>
+  <% if !@object.supplied_script_version.nil? and !@object.supplied_script_version.empty? and @object.script_version != @object.supplied_script_version%>
+      <%= form_tag '/jobs', style: "display:inline" do |f| %>
+      <% [:script, :repository, :output_is_persistent, :supplied_script_version, :nondeterministic].each do |d| %>
+        <%= hidden_field :job, d, :value => @object[d] %>
+      <% end %>
+      <%= hidden_field :job, :script_version, :value => @object[:supplied_script_version] %>
+      <% [:script_parameters, :runtime_constraints].each do |d| %>
+        <%= hidden_field :job, d, :value => JSON.dump(@object[d]) %>
+      <% end %>
+      <%= button_tag "Latest (#{@object.repository}/#{@object.supplied_script_version})", {class: 'btn btn-primary', id: "re-run-latest-job-button"} %>
+    <% end %>
+  <% end %>
+<% end %>
+</div>
+
+<table class="table pipeline-components-table">
+  <colgroup>
+    <col style="width: 20%" />
+    <col style="width: 24%" />
+    <col style="width: 12%" />
+    <col style="width: 45%" />
+  </colgroup>
+  <thead>
+    <tr><th>
+        script, version
+      </th><th>
+        progress
+        <%# format:'js' here helps browsers avoid using the cached js
+            content in html context (e.g., duplicate tab -> see
+                                     javascript) %>
+        <%= link_to '(refresh)', {format: :js}, {class: 'refresh hide', remote: true, method: 'get'} %>
+      </th>
+      <th></th>
+      <th>
+        output
+      </th>
+    </tr>
+  </thead>
+  <tbody>
+    <tr>
+      <td>
+        <%= @object[:script] %><br>
+        <span class="deemphasize"><%= @object[:script_version] %></span>
+      </td>
+      <td>
+        <%= render partial: 'job_progress', locals: {:j => @object} %>
+        <% if @object.running == false %>
+          <% if @object[:job].andand[:uuid] %>
+            <span class="deemphasize">
+              <%= link_to("..."+@object[:job][:uuid].last(15), job_url(id: @object[:job][:uuid])) %>
+            </span>
+
+            <% current_job = @object %>
+            <% if current_job.andand[:log] %>
+              <% fixup = /([a-f0-9]{32}\+\d+)(\+?.*)/.match(current_job[:log])%>
+              <% Collection.limit(1).where(uuid: fixup[1]).each do |c| %>
+                <% c.files.each do |file| %>
+                  <br/><span class="deemphasize">
+                    <a href="<%= collection_path(current_job[:log]) %>/<%= file[1] %>?disposition=inline&size=<%= file[2] %>">log</a>
+                  </span>
+                <% end %>
+              <% end %>
+            <% end %>
+          <% end %>
+        <% end %>
+      </td><td>
+        <%= render(partial: 'job_status_label',
+                   locals: { :j => @object }) %>
+      </td><td>
+        <%= link_to_if_arvados_object @object[:output], {:thumbnail => true} %>
+      </td>
+    </tr>
+    <tfoot>
+      <tr><td colspan="5"></td></tr>
+    </tfoot>
+</table>
index b4da656bc3a42208764c9a7dfc01f1fd14f82f40..383d4421e2eb0e84e7cd1d14b94ea1e51e739e2a 100644 (file)
@@ -18,7 +18,9 @@ ArvadosWorkbench::Application.routes.draw do
   resources :virtual_machines
   resources :authorized_keys
   resources :job_tasks
-  resources :jobs
+  resources :jobs do
+    post 'cancel', :on => :member
+  end
   match '/logout' => 'sessions#destroy', via: [:get, :post]
   get '/logged_out' => 'sessions#index'
   resources :users do
index fc636dff507213ac09bdfc2a9c90c4d7fd62a126..e9b3f00b61e84004a2820cedb6f472e96126c3b2 100755 (executable)
@@ -578,7 +578,7 @@ class WhRunPipelineInstance
     failed = 0
     @components.each do |cname, c|
       if c[:job]
-        if c[:job][:finished_at]
+        if c[:job][:finished_at] or c[:job][:cancelled_at] or (c[:job][:running] == false and c[:job][:success] == false)
           ended += 1
           if c[:job][:success] == true
             succeeded += 1
index 0befdd5dbf9e6c8af770c187824ad90978a05c71..167d3ddbe9074e3edba2716d6247f03bd642aba7 100755 (executable)
@@ -641,7 +641,8 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++)
     $command .= "&& exec arv-mount --allow-other $ENV{TASK_KEEPMOUNT} --exec ";
     if ($docker_image)
     {
-      $command .= "$docker_bin run -i -a stdin -a stdout -a stderr ";
+      $command .= "crunchstat -cgroup-parent=/sys/fs/cgroup/lxc -cgroup-cid=$ENV{TASK_WORK}/docker.cid -poll=1000 ";
+      $command .= "$docker_bin run -i -a stdin -a stdout -a stderr -cidfile=$ENV{TASK_WORK}/docker.cid ";
       # Dynamically configure the container to use the host system as its
       # DNS server.  Get the host's global addresses from the ip command,
       # and turn them into docker --dns options using gawk.
@@ -654,9 +655,13 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++)
       }
       while (my ($env_key, $env_val) = each %ENV)
       {
-        $command .= "-e \Q$env_key=$env_val\E ";
+        if ($env_key =~ /^(JOB|TASK)_/) {
+          $command .= "-e \Q$env_key=$env_val\E ";
+        }
       }
       $command .= "\Q$docker_image\E ";
+    } else {
+      $command .= "crunchstat -cgroup-path=/sys/fs/cgroup "
     }
     $command .= "$ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
     my @execargs = ('bash', '-c', $command);
index 3fbf5fcc6bda25a9a2aedf4b3a72cb149619e31c..5bfeff06f5c11200c08258eba53489b594aa1e3f 100644 (file)
@@ -20,9 +20,15 @@ class Arvados::V1::NodesController < ApplicationController
       if !@object
         return render_not_found
       end
-      @object.ping({ ip: params[:local_ipv4] || request.env['REMOTE_ADDR'],
-                     ping_secret: params[:ping_secret],
-                     ec2_instance_id: params[:instance_id] })
+      ping_data = {
+        ip: params[:local_ipv4] || request.env['REMOTE_ADDR'],
+        ec2_instance_id: params[:instance_id]
+      }
+      [:ping_secret, :total_cpu_cores, :total_ram_mb, :total_scratch_mb]
+        .each do |key|
+        ping_data[key] = params[key] if params[key]
+      end
+      @object.ping(ping_data)
       if @object.info['ping_secret'] == params[:ping_secret]
         render json: @object.as_api_response(:superuser)
       else
index 7bca409d0b664d13179152a577adbc26f4d656cd..51fb7c27832a21eec4193886e720d1c2c3363e23 100644 (file)
@@ -37,6 +37,7 @@ class Job < ArvadosModel
     t.add :dependencies
     t.add :nondeterministic
     t.add :repository
+    t.add :supplied_script_version
   end
 
   def assert_finished
@@ -80,6 +81,7 @@ class Job < ArvadosModel
     if new_record? or script_version_changed?
       sha1 = Commit.find_commit_range(current_user, self.repository, nil, self.script_version, nil)[0] rescue nil
       if sha1
+        self.supplied_script_version = self.script_version if self.supplied_script_version.nil? or self.supplied_script_version.empty?
         self.script_version = sha1
       else
         raise ArgumentError.new("Specified script_version does not resolve to a commit")
index 2ca05f62d59cc620d4e1b4fc8f48799c69c10266..71d4dea2c0cc815c7b29c30c8d0d7dac40c31cf1 100644 (file)
@@ -115,6 +115,15 @@ class Node < ArvadosModel
       end
     end
 
+    # Record other basic stats
+    ['total_cpu_cores', 'total_ram_mb', 'total_scratch_mb'].each do |key|
+      if value = (o[key] or o[key.to_sym])
+        self.info[key] = value
+      else
+        self.info.delete(key)
+      end
+    end
+
     save!
   end
 
diff --git a/services/api/db/migrate/20140530200539_add_supplied_script_version.rb b/services/api/db/migrate/20140530200539_add_supplied_script_version.rb
new file mode 100644 (file)
index 0000000..c054235
--- /dev/null
@@ -0,0 +1,9 @@
+class AddSuppliedScriptVersion < ActiveRecord::Migration
+  def up
+    add_column :jobs, :supplied_script_version, :string
+  end
+
+  def down
+    remove_column :jobs, :supplied_script_version, :string
+  end
+end
index b026dff554e0064820665af8e23b35240465fa44..1ef80ab670389f3a7f87bc9e9581e44cd618a656 100644 (file)
@@ -11,9 +11,8 @@
 #
 # It's strongly recommended to check this file into your version control system.
 
-ActiveRecord::Schema.define(:version => 20140602143352) do
-
 
+ActiveRecord::Schema.define(:version => 20140602143352) do
 
   create_table "api_client_authorizations", :force => true do |t|
     t.string   "api_token",                                           :null => false
@@ -195,6 +194,7 @@ ActiveRecord::Schema.define(:version => 20140602143352) do
     t.boolean  "nondeterministic"
     t.string   "repository"
     t.boolean  "output_is_persistent",     :default => false, :null => false
+    t.string   "supplied_script_version"
   end
 
   add_index "jobs", ["created_at"], :name => "index_jobs_on_created_at"
diff --git a/services/api/script/cancel_stale_jobs.rb b/services/api/script/cancel_stale_jobs.rb
new file mode 100755 (executable)
index 0000000..dde4cbe
--- /dev/null
@@ -0,0 +1,37 @@
+#!/usr/bin/env ruby
+
+if ENV["CRUNCH_DISPATCH_LOCKFILE"]
+  lockfilename = ENV.delete "CRUNCH_DISPATCH_LOCKFILE"
+  lockfile = File.open(lockfilename, File::RDWR|File::CREAT, 0644)
+  unless lockfile.flock File::LOCK_EX|File::LOCK_NB
+    abort "Lock unavailable on #{lockfilename} - exit"
+  end
+end
+
+ENV["RAILS_ENV"] = ARGV[0] || ENV["RAILS_ENV"] || "development"
+
+require File.dirname(__FILE__) + '/../config/boot'
+require File.dirname(__FILE__) + '/../config/environment'
+
+def cancel_stale_jobs
+  Job.running.each do |jobrecord|
+    f = Log.where("object_uuid=?", jobrecord.uuid).limit(1).order("created_at desc").first
+    if f
+      age = (Time.now - f.created_at)
+      if age > 300
+        $stderr.puts "dispatch: failing orphan job #{jobrecord.uuid}, last log is #{age} seconds old"
+        # job is marked running, but not known to crunch-dispatcher, and
+        # hasn't produced any log entries for 5 minutes, so mark it as failed.
+        jobrecord.running = false
+        jobrecord.cancelled_at ||= Time.now
+        jobrecord.finished_at ||= Time.now
+        if jobrecord.success.nil?
+          jobrecord.success = false
+        end
+        jobrecord.save!
+      end
+    end
+  end
+end
+
+cancel_stale_jobs
index a9b75982036669471f7ee1c8cdb05879aa315a37..3ddf83da18ad878bc8f84efb5ab0810ecf3f6552 100755 (executable)
@@ -136,7 +136,11 @@ class Dispatcher
       if Server::Application.config.crunch_job_user
         cmd_args.unshift("sudo", "-E", "-u",
                          Server::Application.config.crunch_job_user,
-                         "PERLLIB=#{ENV['PERLLIB']}")
+                         "PATH=#{ENV['PATH']}",
+                         "PERLLIB=#{ENV['PERLLIB']}",
+                         "PYTHONPATH=#{ENV['PYTHONPATH']}",
+                         "RUBYLIB=#{ENV['RUBYLIB']}",
+                         "GEM_PATH=#{ENV['GEM_PATH']}")
       end
 
       job_auth = ApiClientAuthorization.
index 398bdf5cb06b9c6344acdab4bb107bfd3a7ecaf5..92e78da6c18e0f412a848b243b50b18b92e0555a 100644 (file)
@@ -32,3 +32,4 @@ idle:
   info:
     :ping_secret: "69udawxvn3zzj45hs8bumvndricrha4lcpi23pd69e44soanc0"
     :slurm_state: "idle"
+    total_cpu_cores: 16
index e096a045c60c81b9aef6bf1fcc08d714a48077e9..06695aa6a762a9871deef9f820dec14c33eefedb 100644 (file)
@@ -75,4 +75,20 @@ class Arvados::V1::NodesControllerTest < ActionController::TestCase
     assert_not_nil json_response['info']['ping_secret']
   end
 
+  test "ping adds node stats to info" do
+    node = nodes(:idle)
+    post :ping, {
+      id: node.uuid,
+      ping_secret: node.info['ping_secret'],
+      total_cpu_cores: 32,
+      total_ram_mb: 1024,
+      total_scratch_mb: 2048
+    }
+    assert_response :success
+    info = JSON.parse(@response.body)['info']
+    assert_equal(node.info['ping_secret'], info['ping_secret'])
+    assert_equal(32, info['total_cpu_cores'].to_i)
+    assert_equal(1024, info['total_ram_mb'].to_i)
+    assert_equal(2048, info['total_scratch_mb'].to_i)
+  end
 end
index ccc3765dd94060d98bed4b59f5f55b111292e8e4..5a9a057696041ab76a505977bdd3659b6e2daa94 100644 (file)
@@ -1,7 +1,23 @@
 require 'test_helper'
 
 class NodeTest < ActiveSupport::TestCase
-  # test "the truth" do
-  #   assert true
-  # end
+  def ping_node(node_name, ping_data)
+    set_user_from_auth :admin
+    node = nodes(node_name)
+    node.ping({ping_secret: node.info['ping_secret'],
+                ip: node.ip_address}.merge(ping_data))
+    node
+  end
+
+  test "pinging a node can add and update stats" do
+    node = ping_node(:idle, {total_cpu_cores: '12', total_ram_mb: '512'})
+    assert_equal(12, node.info['total_cpu_cores'].to_i)
+    assert_equal(512, node.info['total_ram_mb'].to_i)
+  end
+
+  test "stats disappear if not in a ping" do
+    node = ping_node(:idle, {total_ram_mb: '256'})
+    refute_includes(node.info, 'total_cpu_cores')
+    assert_equal(256, node.info['total_ram_mb'].to_i)
+  end
 end
diff --git a/services/crunch/crunchstat/go.sh b/services/crunch/crunchstat/go.sh
new file mode 100755 (executable)
index 0000000..640a0d2
--- /dev/null
@@ -0,0 +1,15 @@
+#! /bin/sh
+
+# Wraps the 'go' executable with some environment setup.  Sets GOPATH, creates
+# 'pkg' and 'bin' directories, automatically installs dependencies, then runs
+# the underlying 'go' executable with any command line parameters provided to
+# the script.
+
+rootdir=$(readlink -f $(dirname $0))
+GOPATH=$rootdir:$rootdir/../../sdk/go:$GOPATH
+export GOPATH
+
+mkdir -p $rootdir/pkg
+mkdir -p $rootdir/bin
+
+go $*
diff --git a/services/crunch/crunchstat/src/arvados.org/crunchstat/crunchstat.go b/services/crunch/crunchstat/src/arvados.org/crunchstat/crunchstat.go
new file mode 100644 (file)
index 0000000..7528485
--- /dev/null
@@ -0,0 +1,311 @@
+package main
+
+import (
+       "bufio"
+       "flag"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "os"
+       "os/exec"
+       "os/signal"
+       "strings"
+       "syscall"
+       "time"
+)
+
+func ReadLineByLine(inp io.ReadCloser, out chan string, finish chan bool) {
+       s := bufio.NewScanner(inp)
+       for s.Scan() {
+               out <- s.Text()
+       }
+       finish <- true
+}
+
+func OutputChannel(stdout chan string, stderr chan string) {
+       for {
+               select {
+               case s, ok := <-stdout:
+                       if ok {
+                               fmt.Fprintln(os.Stdout, s)
+                       } else {
+                               return
+                       }
+               case s, ok := <-stderr:
+                       if ok {
+                               fmt.Fprintln(os.Stderr, s)
+                       } else {
+                               return
+                       }
+               }
+       }
+}
+
+func PollCgroupStats(cgroup_path string, stderr chan string, poll int64) {
+       //var last_usage int64 = 0
+       var last_user int64 = 0
+       var last_sys int64 = 0
+       var last_cpucount int64 = 0
+
+       type Disk struct {
+               last_read  int64
+               next_read  int64
+               last_write int64
+               next_write int64
+       }
+
+       disk := make(map[string]*Disk)
+
+       //cpuacct_usage := fmt.Sprintf("%s/cpuacct.usage", cgroup_path)
+       cpuacct_stat := fmt.Sprintf("%s/cpuacct.stat", cgroup_path)
+       blkio_io_service_bytes := fmt.Sprintf("%s/blkio.io_service_bytes", cgroup_path)
+       cpuset_cpus := fmt.Sprintf("%s/cpuset.cpus", cgroup_path)
+       memory_stat := fmt.Sprintf("%s/memory.stat", cgroup_path)
+
+       var elapsed int64 = poll
+
+       for {
+               /*{
+                       c, _ := os.Open(cpuacct_usage)
+                       b, _ := ioutil.ReadAll(c)
+                       var next int64
+                       fmt.Sscanf(string(b), "%d", &next)
+                       if last_usage != 0 {
+                               stderr <- fmt.Sprintf("crunchstat: cpuacct.usage %v", (next-last_usage)/10000000)
+                       }
+                       //fmt.Printf("usage %d %d %d %d%%\n", last_usage, next, next-last_usage, (next-last_usage)/10000000)
+                       last_usage = next
+                       c.Close()
+               }*/
+               var cpus int64 = 0
+               {
+                       c, _ := os.Open(cpuset_cpus)
+                       b, _ := ioutil.ReadAll(c)
+                       sp := strings.Split(string(b), ",")
+                       for _, v := range sp {
+                               var min, max int64
+                               n, _ := fmt.Sscanf(v, "%d-%d", &min, &max)
+                               if n == 2 {
+                                       cpus += (max - min) + 1
+                               } else {
+                                       cpus += 1
+                               }
+                       }
+
+                       if cpus != last_cpucount {
+                               stderr <- fmt.Sprintf("crunchstat: cpuset.cpus %v", cpus)
+                       }
+                       last_cpucount = cpus
+
+                       c.Close()
+               }
+               if cpus == 0 {
+                       cpus = 1
+               }
+               {
+                       c, _ := os.Open(cpuacct_stat)
+                       b, _ := ioutil.ReadAll(c)
+                       var next_user int64
+                       var next_sys int64
+                       fmt.Sscanf(string(b), "user %d\nsystem %d", &next_user, &next_sys)
+                       c.Close()
+
+                       if last_user != 0 {
+                               user_diff := next_user - last_user
+                               sys_diff := next_sys - last_sys
+                               // Assume we're reading stats based on 100
+                               // jiffies per second.  Because the ellaspsed
+                               // time is in milliseconds, we need to boost
+                               // that to 1000 jiffies per second, then boost
+                               // it by another 100x to get a percentage, then
+                               // finally divide by the actual elapsed time
+                               // and the number of cpus to get average load
+                               // over the polling period.
+                               user_pct := (user_diff * 10 * 100) / (elapsed * cpus)
+                               sys_pct := (sys_diff * 10 * 100) / (elapsed * cpus)
+
+                               stderr <- fmt.Sprintf("crunchstat: cpuacct.stat user %v", user_pct)
+                               stderr <- fmt.Sprintf("crunchstat: cpuacct.stat sys %v", sys_pct)
+                       }
+
+                       /*fmt.Printf("user %d %d %d%%\n", last_user, next_user, next_user-last_user)
+                       fmt.Printf("sys %d %d %d%%\n", last_sys, next_sys, next_sys-last_sys)
+                       fmt.Printf("sum %d%%\n", (next_user-last_user)+(next_sys-last_sys))*/
+                       last_user = next_user
+                       last_sys = next_sys
+               }
+               {
+                       c, _ := os.Open(blkio_io_service_bytes)
+                       b := bufio.NewScanner(c)
+                       var device, op string
+                       var next int64
+                       for b.Scan() {
+                               if _, err := fmt.Sscanf(string(b.Text()), "%s %s %d", &device, &op, &next); err == nil {
+                                       if disk[device] == nil {
+                                               disk[device] = new(Disk)
+                                       }
+                                       if op == "Read" {
+                                               disk[device].last_read = disk[device].next_read
+                                               disk[device].next_read = next
+                                               if disk[device].last_read > 0 {
+                                                       stderr <- fmt.Sprintf("crunchstat: blkio.io_service_bytes %s read %v", device, disk[device].next_read-disk[device].last_read)
+                                               }
+                                       }
+                                       if op == "Write" {
+                                               disk[device].last_write = disk[device].next_write
+                                               disk[device].next_write = next
+                                               if disk[device].last_write > 0 {
+                                                       stderr <- fmt.Sprintf("crunchstat: blkio.io_service_bytes %s write %v", device, disk[device].next_write-disk[device].last_write)
+                                               }
+                                       }
+                               }
+                       }
+                       c.Close()
+               }
+
+               {
+                       c, _ := os.Open(memory_stat)
+                       b := bufio.NewScanner(c)
+                       var stat string
+                       var val int64
+                       for b.Scan() {
+                               if _, err := fmt.Sscanf(string(b.Text()), "%s %d", &stat, &val); err == nil {
+                                       if stat == "rss" {
+                                               stderr <- fmt.Sprintf("crunchstat: memory.stat rss %v", val)
+                                       }
+                               }
+                       }
+                       c.Close()
+               }
+
+               bedtime := time.Now()
+               time.Sleep(time.Duration(poll) * time.Millisecond)
+               morning := time.Now()
+               elapsed = morning.Sub(bedtime).Nanoseconds() / int64(time.Millisecond)
+       }
+}
+
+func main() {
+
+       var (
+               cgroup_path    string
+               cgroup_parent  string
+               cgroup_cidfile string
+               wait           int64
+               poll           int64
+       )
+
+       flag.StringVar(&cgroup_path, "cgroup-path", "", "Direct path to cgroup")
+       flag.StringVar(&cgroup_parent, "cgroup-parent", "", "Path to parent cgroup")
+       flag.StringVar(&cgroup_cidfile, "cgroup-cid", "", "Path to container id file")
+       flag.Int64Var(&wait, "wait", 5, "Maximum time (in seconds) to wait for cid file to show up")
+       flag.Int64Var(&poll, "poll", 1000, "Polling frequency, in milliseconds")
+
+       flag.Parse()
+
+       logger := log.New(os.Stderr, "crunchstat: ", 0)
+
+       if cgroup_path == "" && cgroup_cidfile == "" {
+               logger.Fatal("Must provide either -cgroup-path or -cgroup-cid")
+       }
+
+       // Make output channel
+       stdout_chan := make(chan string)
+       stderr_chan := make(chan string)
+       finish_chan := make(chan bool)
+       defer close(stdout_chan)
+       defer close(stderr_chan)
+       defer close(finish_chan)
+
+       go OutputChannel(stdout_chan, stderr_chan)
+
+       var cmd *exec.Cmd
+
+       if len(flag.Args()) > 0 {
+               // Set up subprocess
+               cmd = exec.Command(flag.Args()[0], flag.Args()[1:]...)
+
+               logger.Print("Running ", flag.Args())
+
+               // Forward SIGINT and SIGTERM to inner process
+               term := make(chan os.Signal, 1)
+               go func(sig <-chan os.Signal) {
+                       catch := <-sig
+                       if cmd.Process != nil {
+                               cmd.Process.Signal(catch)
+                       }
+                       logger.Print("caught signal:", catch)
+               }(term)
+               signal.Notify(term, syscall.SIGTERM)
+               signal.Notify(term, syscall.SIGINT)
+
+               // Funnel stdout and stderr from subprocess to output channels
+               stdout_pipe, err := cmd.StdoutPipe()
+               if err != nil {
+                       logger.Fatal(err)
+               }
+               go ReadLineByLine(stdout_pipe, stdout_chan, finish_chan)
+
+               stderr_pipe, err := cmd.StderrPipe()
+               if err != nil {
+                       logger.Fatal(err)
+               }
+               go ReadLineByLine(stderr_pipe, stderr_chan, finish_chan)
+
+               // Run subprocess
+               if err := cmd.Start(); err != nil {
+                       logger.Fatal(err)
+               }
+       }
+
+       // Read the cid file
+       if cgroup_cidfile != "" {
+               // wait up to 'wait' seconds for the cid file to appear
+               var i time.Duration
+               for i = 0; i < time.Duration(wait)*time.Second; i += (100 * time.Millisecond) {
+                       f, err := os.Open(cgroup_cidfile)
+                       if err == nil {
+                               cid, err2 := ioutil.ReadAll(f)
+                               if err2 == nil && len(cid) > 0 {
+                                       cgroup_path = string(cid)
+                                       f.Close()
+                                       break
+                               }
+                       }
+                       time.Sleep(100 * time.Millisecond)
+               }
+               if cgroup_path == "" {
+                       logger.Printf("Could not read cid file %s", cgroup_cidfile)
+               }
+       }
+
+       // add the parent prefix
+       if cgroup_parent != "" {
+               cgroup_path = fmt.Sprintf("%s/%s", cgroup_parent, cgroup_path)
+       }
+
+       logger.Print("Using cgroup ", cgroup_path)
+
+       go PollCgroupStats(cgroup_path, stderr_chan, poll)
+
+       // Wait for each of stdout and stderr to drain
+       <-finish_chan
+       <-finish_chan
+
+       if err := cmd.Wait(); err != nil {
+               if exiterr, ok := err.(*exec.ExitError); ok {
+                       // The program has exited with an exit code != 0
+
+                       // This works on both Unix and Windows. Although package
+                       // syscall is generally platform dependent, WaitStatus is
+                       // defined for both Unix and Windows and in both cases has
+                       // an ExitStatus() method with the same signature.
+                       if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
+                               os.Exit(status.ExitStatus())
+                       }
+               } else {
+                       logger.Fatalf("cmd.Wait: %v", err)
+               }
+       }
+}
index 904fbf1b8d8958804575a0495b35616acc6f79e0..b4afffab061fc2ceaf56bc7dcb93a105fa3d93cb 100755 (executable)
@@ -5,6 +5,7 @@ import arvados
 import subprocess
 import argparse
 import daemon
+import signal
 
 if __name__ == '__main__':
     # Handle command line parameters
@@ -66,7 +67,20 @@ collections on the server.""")
 
         rc = 255
         try:
-            rc = subprocess.call(args.exec_args, shell=False)
+            sp = subprocess.Popen(args.exec_args, shell=False)
+
+            # forward signals to the process.
+            signal.signal(signal.SIGINT, lambda signum, frame: sp.send_signal(signum))
+            signal.signal(signal.SIGTERM, lambda signum, frame: sp.send_signal(signum))
+            signal.signal(signal.SIGQUIT, lambda signum, frame: sp.send_signal(signum))
+
+            # wait for process to complete.
+            rc = sp.wait()
+
+            # restore default signal handlers.
+            signal.signal(signal.SIGINT, signal.SIG_DFL)
+            signal.signal(signal.SIGTERM, signal.SIG_DFL)
+            signal.signal(signal.SIGQUIT, signal.SIG_DFL)
         except OSError as e:
             sys.stderr.write('arv-mount: %s -- exec %s\n' % (str(e), args.exec_args))
             rc = e.errno