Merge branch 'master' into origin-2883-job-log-viewer
authorPeter Amstutz <peter.amstutz@curoverse.com>
Wed, 18 Jun 2014 14:05:00 +0000 (10:05 -0400)
committerPeter Amstutz <peter.amstutz@curoverse.com>
Wed, 18 Jun 2014 14:05:00 +0000 (10:05 -0400)
32 files changed:
apps/workbench/app/assets/javascripts/infinite_scroll.js
apps/workbench/app/assets/javascripts/sizing.js
apps/workbench/app/assets/stylesheets/application.css.scss
apps/workbench/app/controllers/pipeline_instances_controller.rb
apps/workbench/app/views/application/_content.html.erb
apps/workbench/app/views/application/_job_progress.html.erb
apps/workbench/app/views/application/_svg_div.html.erb
sdk/cli/bin/arv
sdk/cli/bin/arv-ws [new symlink]
sdk/cli/bin/crunch-job
sdk/cli/test/test_arv-ws.rb [new file with mode: 0644]
sdk/python/arvados/commands/keepdocker.py
sdk/python/arvados/events.py
sdk/python/bin/arv-ws [new file with mode: 0755]
sdk/python/setup.py
services/api/app/models/arvados_model.rb
services/api/app/models/collection.rb
services/api/app/models/job.rb
services/api/config/application.default.yml
services/api/db/migrate/20140611173003_add_docker_locator_to_jobs.rb [new file with mode: 0644]
services/api/db/schema.rb
services/api/script/crunch-dispatch.rb
services/api/test/fixtures/collections.yml
services/api/test/fixtures/links.yml
services/api/test/fixtures/repositories.yml
services/api/test/functional/arvados/v1/commits_controller_test.rb
services/api/test/integration/crunch_dispatch_test.rb [new file with mode: 0644]
services/api/test/integration/select_test.rb
services/api/test/integration/serialized_encoding_test.rb
services/api/test/test.git.tar
services/api/test/unit/job_test.rb
services/keep/src/arvados.org/keepproxy/keepproxy.go

index 3e15c9d970c3aac09bbc8840f2f04733e1ce484d..64a90e2255d117ba7416e5d362269e53aba5ee70 100644 (file)
@@ -42,7 +42,8 @@ $(document).
     on('ready ajax:complete', function() {
         $('[data-infinite-scroller]').each(function() {
             var $scroller = $($(this).attr('data-infinite-scroller'));
-            if (!$scroller.hasClass('smart-scroll'))
+            if (!$scroller.hasClass('smart-scroll') &&
+               'scroll' != $scroller.css('overflow-y'))
                 $scroller = $(window);
             $scroller.
                 addClass('infinite-scroller').
index 640893fe0ca04f36aa4388f96da60c9834920b94..3d60274439f51232982d6737ed3c23c40b0f0372 100644 (file)
@@ -11,20 +11,19 @@ function graph_zoom(divId, svgId, scale) {
 }
 
 function smart_scroll_fixup(s) {
-    //console.log(s);
+
     if (s != null && s.type == 'shown.bs.tab') {
         s = [s.target];
     }
     else {
         s = $(".smart-scroll");
     }
-    //console.log(s);
-    for (var i = 0; i < s.length; i++) {
-        a = s[i];
-        var h = window.innerHeight - a.getBoundingClientRect().top - 20;
+
+    s.each(function(i, a) {
+        var h = window.innerHeight - $(a).offset().top;
         height = String(h) + "px";
-        a.style['max-height'] = height;
-    }
+        $(a).css('max-height', height);
+    });
 }
 
-$(window).on('load resize scroll', smart_scroll_fixup);
+$(window).on('load ready resize scroll ajax:complete', smart_scroll_fixup);
index 4fea7aebc09261df394dc754437157c9abe24e08..da958e1564ae43e4974ac697cdb585e5b02c00b4 100644 (file)
@@ -134,6 +134,7 @@ nav.navbar-fixed-top .navbar-nav.navbar-right > li > a:hover {
 
 .smart-scroll {
     overflow: auto;
+    margin-bottom: -15px;
 }
 
 .inline-progress-container div.progress {
index 97c5e17662919ce17702f13273c0d186639c2153..e84d0e45975c7207386cb46fb2d43e71534bda5d 100644 (file)
@@ -115,73 +115,80 @@ class PipelineInstancesController < ApplicationController
 
     @rows = []          # each is {name: S, components: [...]}
 
-    # Build a table: x=pipeline y=component
-    @objects.each_with_index do |pi, pi_index|
-      pipeline_jobs(pi).each do |component|
-        # Find a cell with the same name as this component but no
-        # entry for this pipeline
-        target_row = nil
-        @rows.each_with_index do |row, row_index|
-          if row[:name] == component[:name] and !row[:components][pi_index]
-            target_row = row
+    if params['tab_pane'] == "Compare" or params['tab_pane'].nil?
+      # Build a table: x=pipeline y=component
+      @objects.each_with_index do |pi, pi_index|
+        pipeline_jobs(pi).each do |component|
+          # Find a cell with the same name as this component but no
+          # entry for this pipeline
+          target_row = nil
+          @rows.each_with_index do |row, row_index|
+            if row[:name] == component[:name] and !row[:components][pi_index]
+              target_row = row
+            end
           end
+          if !target_row
+            target_row = {name: component[:name], components: []}
+            @rows << target_row
+          end
+          target_row[:components][pi_index] = component
         end
-        if !target_row
-          target_row = {name: component[:name], components: []}
-          @rows << target_row
-        end
-        target_row[:components][pi_index] = component
       end
-    end
 
-    @rows.each do |row|
-      # Build a "normal" pseudo-component for this row by picking the
-      # most common value for each attribute. If all values are
-      # equally common, there is no "normal".
-      normal = {}              # attr => most common value
-      highscore = {}           # attr => how common "normal" is
-      score = {}               # attr => { value => how common }
-      row[:components].each do |pj|
-        next if pj.nil?
-        pj.each do |k,v|
-          vstr = for_comparison v
-          score[k] ||= {}
-          score[k][vstr] = (score[k][vstr] || 0) + 1
-          highscore[k] ||= 0
-          if score[k][vstr] == highscore[k]
-            # tie for first place = no "normal"
-            normal.delete k
-          elsif score[k][vstr] == highscore[k] + 1
-            # more pipelines have v than anything else
-            highscore[k] = score[k][vstr]
-            normal[k] = vstr
+      @rows.each do |row|
+        # Build a "normal" pseudo-component for this row by picking the
+        # most common value for each attribute. If all values are
+        # equally common, there is no "normal".
+        normal = {}              # attr => most common value
+        highscore = {}           # attr => how common "normal" is
+        score = {}               # attr => { value => how common }
+        row[:components].each do |pj|
+          next if pj.nil?
+          pj.each do |k,v|
+            vstr = for_comparison v
+            score[k] ||= {}
+            score[k][vstr] = (score[k][vstr] || 0) + 1
+            highscore[k] ||= 0
+            if score[k][vstr] == highscore[k]
+              # tie for first place = no "normal"
+              normal.delete k
+            elsif score[k][vstr] == highscore[k] + 1
+              # more pipelines have v than anything else
+              highscore[k] = score[k][vstr]
+              normal[k] = vstr
+            end
           end
         end
-      end
 
-      # Add a hash in component[:is_normal]: { attr => is_the_value_normal? }
-      row[:components].each do |pj|
-        next if pj.nil?
-        pj[:is_normal] = {}
-        pj.each do |k,v|
-          pj[:is_normal][k] = (normal.has_key?(k) && normal[k] == for_comparison(v))
+        # Add a hash in component[:is_normal]: { attr => is_the_value_normal? }
+        row[:components].each do |pj|
+          next if pj.nil?
+          pj[:is_normal] = {}
+          pj.each do |k,v|
+            pj[:is_normal][k] = (normal.has_key?(k) && normal[k] == for_comparison(v))
+          end
         end
       end
     end
 
-    provenance, pips = graph(@objects)
+    if params['tab_pane'] == "Graph"
+      provenance, pips = graph(@objects)
 
-    @pipelines = @objects
+      @pipelines = @objects
 
-    if provenance
-      @prov_svg = ProvenanceHelper::create_provenance_graph provenance, "provenance_svg", {
-        :request => request,
-        :all_script_parameters => true,
-        :combine_jobs => :script_and_version,
-        :script_version_nodes => true,
-        :pips => pips }
+      if provenance
+        @prov_svg = ProvenanceHelper::create_provenance_graph provenance, "provenance_svg", {
+          :request => request,
+          :all_script_parameters => true,
+          :combine_jobs => :script_and_version,
+          :script_version_nodes => true,
+          :pips => pips }
+      end
     end
+
     @object = @objects.first
+
+    show
   end
 
   def show_pane_list
index 33ec16f5ae69b5f720ef070d71ea5066f2362acc..418923c85abf033f9b988245f0efa573df5ed4bd 100644 (file)
@@ -26,7 +26,7 @@
     if (!tab_pane_valid_state[pane]) {
       tab_pane_valid_state[pane] = true;
       $(document).trigger('ajax:send');
-      $.ajax('<%=j url_for() %>?tab_pane='+pane, {dataType: 'html', type: 'GET'}).
+      $.ajax('<%=j url_for() %>?<%= raw(controller.request.query_string) %>&tab_pane='+pane, {dataType: 'html', type: 'GET'}).
         done(function(data, status, jqxhr) {
           $('#' + pane + ' > div > div').html(data);
           $(document).trigger('ajax:complete');
@@ -68,7 +68,7 @@
 <% else %>
        data-object-uuid="<%= @object.uuid %>"
 <% end %>
->
+  >
 
 <% content_for :js do %>
   <% if i == 0 %>
index a25acc3a048b29e31fe8020c880e88b534cd76f8..5c19779cccfb29dc418597e60b1a498d5af61bcd 100644 (file)
@@ -1,18 +1,41 @@
-<% percent_total_tasks = 100 / (j[:tasks_summary][:done] + j[:tasks_summary][:running] + j[:tasks_summary][:failed] + j[:tasks_summary][:todo]) rescue 0 %>
+<%
+   failed = j[:tasks_summary][:failed] || 0 rescue 0
+   done = j[:tasks_summary][:done] || 0 rescue 0
+   running = j[:tasks_summary][:running] || 0 rescue 0
+   todo = j[:tasks_summary][:todo] || 0 rescue 0
 
-<% if defined? scaleby %>
-  <% percent_total_tasks *= scaleby %>
-<% end %>
+   if j[:success] == false and done + running + failed == 0
+     # The job failed but no tasks were ever started (i.e. crunch-dispatch
+     # was unable to start the job). Display a full 100% failed progress bar.
+     failed_percent = 100
+     success_percent = 0
+     running_percent = 0
+   elsif done + running + failed + todo == 0
+     # No tasks were ever created for this job;
+     # render an empty progress bar.
+     failed_percent = 0
+     success_percent = 0
+     running_percent = 0
+   else
+     percent_total_tasks = 100.0 / (done + running + failed + todo)
+     if defined? scaleby
+       percent_total_tasks *= scaleby
+     end
+     failed_percent = (failed * percent_total_tasks).ceil
+     success_percent = (done * percent_total_tasks).ceil
+     running_percent = (running * percent_total_tasks).ceil
+   end
+%>
 
 <% if not defined? scaleby %>
   <div class="progress">
 <% end %>
 
-<span class="progress-bar progress-bar-success" style="width: <%= j[:tasks_summary][:done] * percent_total_tasks rescue 0 %>%;">
+<span class="progress-bar progress-bar-success" style="width: <%= success_percent %>%;">
 </span>
-<span class="progress-bar progress-bar-danger" style="width: <%= j[:tasks_summary][:failed] * percent_total_tasks rescue 0 %>%;">
+<span class="progress-bar progress-bar-danger" style="width: <%= failed_percent %>%;">
 </span>
-<span class="progress-bar" style="width: <%= j[:tasks_summary][:running] * percent_total_tasks rescue 0 %>%;">
+<span class="progress-bar" style="width: <%= running_percent %>%;">
 </span>
 
 <% if not defined? scaleby %>
index b35d7068bc7bfc814039bae80f7f598d136234b1..76bedba65bca94ce8b28cc19a1d03da6a9f7825c 100644 (file)
@@ -24,7 +24,7 @@ stroke-linecap: round;
     });
 <% end %>
 
-<div id="_<%= divId %>_container" style="padding-top: 41px; margin-top: -41px">
+<div id="_<%= divId %>_container">
   <div style="text-align: right">
     <a style="cursor: pointer"><span class="glyphicon glyphicon-zoom-out" onclick="graph_zoom('<%= divId %>', '<%= svgId %>', .9)"></span></a>
     <a style="cursor: pointer"><span class="glyphicon glyphicon-zoom-in" onclick="graph_zoom('<%= divId %>', '<%= svgId %>', 1./.9)"></span></a>
@@ -34,5 +34,4 @@ stroke-linecap: round;
     <span id="_<%= divId %>_center" style="padding-left: 0px"></span>
     <%= raw(svg) %>
   </div>
-  <div id="_<%= divId %>_padding" style="padding-bottom: 1em"></div>
 </div>
index b485b7b10f7286fd0fd242d466a3002e5c1319ce..d4aef2c0887ca22149df10e92ae54bf8ae39ef68 100755 (executable)
@@ -68,6 +68,9 @@ when 'pipeline'
 when 'tag'
   ARGV.shift
   exec `which arv-tag`.strip, *ARGV
+when 'ws'
+  ARGV.shift
+  exec `which arv-ws`.strip, *ARGV
 end
 
 ENV['ARVADOS_API_VERSION'] ||= 'v1'
diff --git a/sdk/cli/bin/arv-ws b/sdk/cli/bin/arv-ws
new file mode 120000 (symlink)
index 0000000..622916b
--- /dev/null
@@ -0,0 +1 @@
+../../python/bin/arv-ws
\ No newline at end of file
index 8b4717734a0450f2f25d652eeb35589e4fa2ebc9..6224a64afe16f23e14d7c09f2aeeae7e24368c8e 100755 (executable)
@@ -501,13 +501,23 @@ if (!$have_slurm)
 
 # If this job requires a Docker image, install that.
 my $docker_bin = "/usr/bin/docker.io";
-my $docker_image = $Job->{runtime_constraints}->{docker_image} || "";
-if ($docker_image) {
+my ($docker_locator, $docker_hash);
+if ($docker_locator = $Job->{docker_image_locator}) {
+  $docker_hash = find_docker_hash($docker_locator);
+  if (!$docker_hash)
+  {
+    croak("No Docker image hash found from locator $docker_locator");
+  }
+  my $docker_install_script = qq{
+if ! $docker_bin images -q --no-trunc | grep -qxF \Q$docker_hash\E; then
+    arv-get \Q$docker_locator/$docker_hash.tar\E | $docker_bin load
+fi
+};
   my $docker_pid = fork();
   if ($docker_pid == 0)
   {
-    srun (["srun", "--nodelist=" . join(' ', @node)],
-          [$docker_bin, 'pull', $docker_image]);
+    srun (["srun", "--nodelist=" . join(',', @node)],
+          ["/bin/sh", "-ec", $docker_install_script]);
     exit ($?);
   }
   while (1)
@@ -516,11 +526,9 @@ if ($docker_image) {
     freeze_if_want_freeze ($docker_pid);
     select (undef, undef, undef, 0.1);
   }
-  # If the Docker image was specified as a hash, pull will fail.
-  # Ignore that error.  We'll see what happens when we try to run later.
-  if (($? != 0) && ($docker_image !~ /^[0-9a-fA-F]{5,64}$/))
+  if ($? != 0)
   {
-    croak("Installing Docker image $docker_image returned exit code $?");
+    croak("Installing Docker image from $docker_locator returned exit code $?");
   }
 }
 
@@ -639,7 +647,7 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++)
          "&& perl -";
     }
     $command .= "&& exec arv-mount --allow-other $ENV{TASK_KEEPMOUNT} --exec ";
-    if ($docker_image)
+    if ($docker_hash)
     {
       $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -cgroup-parent=docker -cgroup-cid=$ENV{TASK_WORK}/docker.cid -poll=10000 ";
       $command .= "$docker_bin run -i -a stdin -a stdout -a stderr --cidfile=$ENV{TASK_WORK}/docker.cid ";
@@ -655,11 +663,11 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++)
       }
       while (my ($env_key, $env_val) = each %ENV)
       {
-        if ($env_key =~ /^(JOB|TASK)_/) {
+        if ($env_key =~ /^(ARVADOS|JOB|TASK)_/) {
           $command .= "-e \Q$env_key=$env_val\E ";
         }
       }
-      $command .= "\Q$docker_image\E ";
+      $command .= "\Q$docker_hash\E ";
     } else {
       $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -poll=10000 "
     }
@@ -1437,6 +1445,21 @@ sub must_lock_now
   }
 }
 
+sub find_docker_hash {
+  # Given a Keep locator, search for a matching link to find the Docker hash
+  # of the stored image.
+  my $locator = shift;
+  my $links_result = $arv->{links}->{list}->execute(
+    filters => [["head_uuid", "=", $locator],
+                ["link_class", "=", "docker_image_hash"]],
+    limit => 1);
+  my $docker_hash;
+  foreach my $link (@{$links_result->{items}}) {
+    $docker_hash = lc($link->{name});
+  }
+  return $docker_hash;
+}
+
 __DATA__
 #!/usr/bin/perl
 
diff --git a/sdk/cli/test/test_arv-ws.rb b/sdk/cli/test/test_arv-ws.rb
new file mode 100644 (file)
index 0000000..d972122
--- /dev/null
@@ -0,0 +1,21 @@
+require 'minitest/autorun'
+
+class TestArvWs < Minitest::Test
+  def setup
+  end
+
+  def test_arv_ws_get_help
+    out, err = capture_subprocess_io do
+      system ('arv-ws -h')
+    end
+    assert_equal '', err
+  end
+
+  def test_arv_ws_such_option
+    out, err = capture_subprocess_io do
+      system ('arv-ws --junk')
+    end
+    refute_equal '', err
+  end
+
+end
index abf60f2a8eb5201ac00719451a97371881953000..236d9fc0043c3438401d496116cd33e7ef7f8da8 100644 (file)
@@ -204,7 +204,8 @@ def main(arguments=None):
     make_link('docker_image_hash', image_hash, **link_base)
     if not image_hash.startswith(args.image.lower()):
         make_link('docker_image_repository', args.image, **link_base)
-        make_link('docker_image_tag', args.tag, **link_base)
+        make_link('docker_image_repo+tag', '{}:{}'.format(args.image, args.tag),
+                  **link_base)
 
     # Clean up.
     image_file.close()
index e61b20c70851f8079137c540680842592a3dcb9b..06f3b34410531a8a1f76bcf32c660d496f80d6cd 100644 (file)
@@ -6,6 +6,7 @@ import time
 import ssl
 import re
 import config
+import logging
 
 class EventClient(WebSocketClient):
     def __init__(self, url, filters, on_event):
@@ -26,8 +27,22 @@ class EventClient(WebSocketClient):
     def received_message(self, m):
         self.on_event(json.loads(str(m)))
 
+    def close_connection(self):
+        try:
+            self.sock.shutdown(socket.SHUT_RDWR)
+            self.sock.close()
+        except:
+            pass
+
 def subscribe(api, filters, on_event):
-    url = "{}?api_token={}".format(api._rootDesc['websocketUrl'], config.get('ARVADOS_API_TOKEN'))
-    ws = EventClient(url, filters, on_event)
-    ws.connect()
-    return ws
+    ws = None
+    try:
+        url = "{}?api_token={}".format(api._rootDesc['websocketUrl'], config.get('ARVADOS_API_TOKEN'))
+        ws = EventClient(url, filters, on_event)
+        ws.connect()
+        return ws
+    except:
+        logging.exception('')
+        if (ws):
+          ws.close_connection()        
+        raise
diff --git a/sdk/python/bin/arv-ws b/sdk/python/bin/arv-ws
new file mode 100755 (executable)
index 0000000..58b6281
--- /dev/null
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+import sys
+import logging
+import argparse
+import arvados
+from arvados.events import subscribe
+
+parser = argparse.ArgumentParser()
+parser.add_argument('-u', '--uuid', type=str, default="")
+args = parser.parse_args()
+
+filters = []
+if len(args.uuid)>0: filters = [ ['object_uuid', '=', args.uuid] ]
+
+api = arvados.api('v1', cache=False)
+
+def on_message(ev):
+  print "\n", ev
+
+ws = None
+try:
+  ws = subscribe(api, filters, lambda ev: on_message(ev))
+  ws.run_forever()
+except KeyboardInterrupt:
+  print '' # don't log it
+except:
+  logging.exception('')
+  if (ws):
+    ws.close_connection()
index a2098630f9521532bf4610e26acd5bb50ed8e5f8..9f9c96284e3ae4a56ac587f917bfca6f72b811e1 100644 (file)
@@ -15,6 +15,7 @@ setup(name='arvados-python-client',
         'bin/arv-ls',
         'bin/arv-normalize',
         'bin/arv-put',
+        'bin/arv-ws',
         ],
       install_requires=[
         'python-gflags',
index 95fd055d4939880967e79fdec0d982088e2f70aa..2df6686f2883edd81adde92e9748b3fa95010614 100644 (file)
@@ -91,6 +91,13 @@ class ArvadosModel < ActiveRecord::Base
     # Get rid of troublesome nils
     users_list.compact!
 
+    # Load optional keyword arguments, if they exist.
+    if users_list.last.is_a? Hash
+      kwargs = users_list.pop
+    else
+      kwargs = {}
+    end
+
     # Check if any of the users are admin.  If so, we're done.
     if users_list.select { |u| u.is_admin }.empty?
 
@@ -101,6 +108,7 @@ class ArvadosModel < ActiveRecord::Base
         collect { |uuid| sanitize(uuid) }.join(', ')
       sql_conds = []
       sql_params = []
+      sql_table = kwargs.fetch(:table_name, table_name)
       or_object_uuid = ''
 
       # This row is owned by a member of users_list, or owned by a group
@@ -113,25 +121,25 @@ class ArvadosModel < ActiveRecord::Base
       # to this row, or to the owner of this row (see join() below).
       permitted_uuids = "(SELECT head_uuid FROM links WHERE link_class='permission' AND tail_uuid IN (#{sanitized_uuid_list}))"
 
-      sql_conds += ["#{table_name}.owner_uuid in (?)",
-                    "#{table_name}.uuid in (?)",
-                    "#{table_name}.uuid IN #{permitted_uuids}"]
+      sql_conds += ["#{sql_table}.owner_uuid in (?)",
+                    "#{sql_table}.uuid in (?)",
+                    "#{sql_table}.uuid IN #{permitted_uuids}"]
       sql_params += [uuid_list, user_uuids]
 
-      if self == Link and users_list.any?
+      if sql_table == "links" and users_list.any?
         # This row is a 'permission' or 'resources' link class
         # The uuid for a member of users_list is referenced in either the head
         # or tail of the link
-        sql_conds += ["(#{table_name}.link_class in (#{sanitize 'permission'}, #{sanitize 'resources'}) AND (#{table_name}.head_uuid IN (?) OR #{table_name}.tail_uuid IN (?)))"]
+        sql_conds += ["(#{sql_table}.link_class in (#{sanitize 'permission'}, #{sanitize 'resources'}) AND (#{sql_table}.head_uuid IN (?) OR #{sql_table}.tail_uuid IN (?)))"]
         sql_params += [user_uuids, user_uuids]
       end
 
-      if self == Log and users_list.any?
+      if sql_table == "logs" and users_list.any?
         # Link head points to the object described by this row
-        sql_conds += ["#{table_name}.object_uuid IN #{permitted_uuids}"]
+        sql_conds += ["#{sql_table}.object_uuid IN #{permitted_uuids}"]
 
         # This object described by this row is owned by this user, or owned by a group readable by this user
-        sql_conds += ["#{table_name}.object_owner_uuid in (?)"]
+        sql_conds += ["#{sql_table}.object_owner_uuid in (?)"]
         sql_params += [uuid_list]
       end
 
index 745f0bf38b610d7d276bfae3c87c8fd8f6c87f1a..64a6bb05304b84877bb4f6d55231c3e561bc8ee6 100644 (file)
@@ -147,4 +147,49 @@ class Collection < ArvadosModel
     raise "uuid #{uuid} has no hash part" if !hash_part
     [hash_part, size_part].compact.join '+'
   end
+
+  def self.for_latest_docker_image(search_term, search_tag=nil, readers=nil)
+    readers ||= [Thread.current[:user]]
+    base_search = Link.
+      readable_by(*readers).
+      readable_by(*readers, table_name: "collections").
+      joins("JOIN collections ON links.head_uuid = collections.uuid").
+      order("links.created_at DESC")
+
+    # If the search term is a Collection locator with an associated
+    # Docker image hash link, return that Collection.
+    coll_matches = base_search.
+      where(link_class: "docker_image_hash", collections: {uuid: search_term})
+    if match = coll_matches.first
+      return find_by_uuid(match.head_uuid)
+    end
+
+    # Find Collections with matching Docker image repository+tag pairs.
+    matches = base_search.
+      where(link_class: "docker_image_repo+tag",
+            name: "#{search_term}:#{search_tag || 'latest'}")
+
+    # If that didn't work, find Collections with matching Docker image hashes.
+    if matches.empty?
+      matches = base_search.
+        where("link_class = ? and name LIKE ?",
+              "docker_image_hash", "#{search_term}%")
+    end
+
+    # Select the image that was created most recently.  Note that the
+    # SQL search order and fallback timestamp values are chosen so
+    # that if image timestamps are missing, we use the image with the
+    # newest link.
+    latest_image_link = nil
+    latest_image_timestamp = "1900-01-01T00:00:00Z"
+    matches.find_each do |link|
+      link_timestamp = link.properties.fetch("image_timestamp",
+                                             "1900-01-01T00:00:01Z")
+      if link_timestamp > latest_image_timestamp
+        latest_image_link = link
+        latest_image_timestamp = link_timestamp
+      end
+    end
+    latest_image_link.nil? ? nil : find_by_uuid(latest_image_link.head_uuid)
+  end
 end
index 51fb7c27832a21eec4193886e720d1c2c3363e23..db0734f226f5a38aa4b7e71b8e77ceeca82a2ae9 100644 (file)
@@ -2,9 +2,11 @@ class Job < ArvadosModel
   include HasUuid
   include KindAndEtag
   include CommonApiTemplate
+  attr_protected :docker_image_locator
   serialize :script_parameters, Hash
   serialize :runtime_constraints, Hash
   serialize :tasks_summary, Hash
+  before_validation :find_docker_image_locator
   before_create :ensure_unique_submit_id
   before_create :ensure_script_version_is_commit
   before_update :ensure_script_version_is_commit
@@ -38,6 +40,7 @@ class Job < ArvadosModel
     t.add :nondeterministic
     t.add :repository
     t.add :supplied_script_version
+    t.add :docker_image_locator
   end
 
   def assert_finished
@@ -98,6 +101,21 @@ class Job < ArvadosModel
     true
   end
 
+  def find_docker_image_locator
+    # Find the Collection that holds the Docker image specified in the
+    # runtime constraints, and store its locator in docker_image_locator.
+    image_search = runtime_constraints['docker_image']
+    image_tag = runtime_constraints['docker_image_tag']
+    if image_search.nil?
+      self.docker_image_locator = nil
+    elsif coll = Collection.for_latest_docker_image(image_search, image_tag)
+      self.docker_image_locator = coll.uuid
+    else
+      errors.add(:docker_image_locator, "Docker image not found")
+      false
+    end
+  end
+
   def dependencies
     deps = {}
     queue = self.script_parameters.values
index f18c89f732f12a5927630ddc3f9cb5983bea5c68..827dfe158662e5c22109e8b7acc5ae922a5330b4 100644 (file)
@@ -68,6 +68,19 @@ common:
   # crunch-job must be able to stat() it.
   crunch_refresh_trigger: /tmp/crunch_refresh_trigger
 
+  # Maximum number of log events that may be generated by a single job.
+  crunch_limit_log_events_per_job: 65536
+
+  # Maximum number of total bytes that may be logged by a single job.
+  crunch_limit_log_event_bytes_per_job: 67108864
+
+  # These two settings control how frequently log events are flushed
+  # to the database.  If a job generates two or more events within
+  # crunch_log_seconds_between_events, the log data is not flushed
+  # until crunch_log_bytes_per_event has been reached.
+  crunch_log_bytes_per_event: 4096
+  crunch_log_seconds_between_events: 1
+
   # Path to /etc/dnsmasq.d, or false = do not update dnsmasq data.
   dnsmasq_conf_dir: false
 
diff --git a/services/api/db/migrate/20140611173003_add_docker_locator_to_jobs.rb b/services/api/db/migrate/20140611173003_add_docker_locator_to_jobs.rb
new file mode 100644 (file)
index 0000000..3186e56
--- /dev/null
@@ -0,0 +1,5 @@
+class AddDockerLocatorToJobs < ActiveRecord::Migration
+  def change
+    add_column :jobs, :docker_image_locator, :string
+  end
+end
index 1ef80ab670389f3a7f87bc9e9581e44cd618a656..1f9b5019ab60a528ba72434d4d91190783195bef 100644 (file)
@@ -11,8 +11,9 @@
 #
 # It's strongly recommended to check this file into your version control system.
 
+ActiveRecord::Schema.define(:version => 20140611173003) do
+
 
-ActiveRecord::Schema.define(:version => 20140602143352) do
 
   create_table "api_client_authorizations", :force => true do |t|
     t.string   "api_token",                                           :null => false
@@ -195,6 +196,7 @@ ActiveRecord::Schema.define(:version => 20140602143352) do
     t.string   "repository"
     t.boolean  "output_is_persistent",     :default => false, :null => false
     t.string   "supplied_script_version"
+    t.string   "docker_image_locator"
   end
 
   add_index "jobs", ["created_at"], :name => "index_jobs_on_created_at"
index 59e3aff31e692c4003479615017f768624e49f9e..843fc0db66c2550917b10c42b81065ecaf226100 100755 (executable)
@@ -26,8 +26,6 @@ require File.dirname(__FILE__) + '/../config/boot'
 require File.dirname(__FILE__) + '/../config/environment'
 require 'open3'
 
-LOG_BUFFER_SIZE = 4096
-
 class Dispatcher
   include ApplicationHelper
 
@@ -264,7 +262,10 @@ class Dispatcher
         sent_int: 0,
         job_auth: job_auth,
         stderr_buf_to_flush: '',
-        stderr_flushed_at: 0
+        stderr_flushed_at: 0,
+        bytes_logged: 0,
+        events_logged: 0,
+        log_truncated: false
       }
       i.close
       update_node_status
@@ -314,7 +315,8 @@ class Dispatcher
             j[:stderr_buf_to_flush] << pub_msg
           end
 
-          if (LOG_BUFFER_SIZE < j[:stderr_buf_to_flush].size) || ((j[:stderr_flushed_at]+1) < Time.now.to_i)
+          if (Rails.configuration.crunch_log_bytes_per_event < j[:stderr_buf_to_flush].size or
+              (j[:stderr_flushed_at] + Rails.configuration.crunch_log_seconds_between_events < Time.now.to_i))
             write_log j
           end
         end
@@ -448,6 +450,15 @@ class Dispatcher
 
   protected
 
+  def too_many_bytes_logged_for_job(j)
+    return (j[:bytes_logged] + j[:stderr_buf_to_flush].size >
+            Rails.configuration.crunch_limit_log_event_bytes_per_job)
+  end
+
+  def too_many_events_logged_for_job(j)
+    return (j[:events_logged] >= Rails.configuration.crunch_limit_log_events_per_job)
+  end
+
   def did_recently(thing, min_interval)
     @did_recently ||= {}
     if !@did_recently[thing] or @did_recently[thing] < Time.now - min_interval
@@ -462,11 +473,26 @@ class Dispatcher
   def write_log running_job
     begin
       if (running_job && running_job[:stderr_buf_to_flush] != '')
+        # Truncate logs if they exceed crunch_limit_log_event_bytes_per_job
+        # or crunch_limit_log_events_per_job.
+        if (too_many_bytes_logged_for_job(running_job))
+          return if running_job[:log_truncated]
+          running_job[:log_truncated] = true
+          running_job[:stderr_buf_to_flush] =
+              "Server configured limit reached (crunch_limit_log_event_bytes_per_job: #{Rails.configuration.crunch_limit_log_event_bytes_per_job}). Subsequent logs truncated"
+        elsif (too_many_events_logged_for_job(running_job))
+          return if running_job[:log_truncated]
+          running_job[:log_truncated] = true
+          running_job[:stderr_buf_to_flush] =
+              "Server configured limit reached (crunch_limit_log_events_per_job: #{Rails.configuration.crunch_limit_log_events_per_job}). Subsequent logs truncated"
+        end
         log = Log.new(object_uuid: running_job[:job].uuid,
                       event_type: 'stderr',
                       owner_uuid: running_job[:job].owner_uuid,
                       properties: {"text" => running_job[:stderr_buf_to_flush]})
         log.save!
+        running_job[:bytes_logged] += running_job[:stderr_buf_to_flush].size
+        running_job[:events_logged] += 1
         running_job[:stderr_buf_to_flush] = ''
         running_job[:stderr_flushed_at] = Time.now.to_i
       end
index 201299b8231fa39d4756b8a4c0272f414701d05b..bce7df13ddb25caf3846b3517f2b5bb9af325d3f 100644 (file)
@@ -58,3 +58,14 @@ multilevel_collection_2:
   modified_at: 2014-02-03T17:22:54Z
   updated_at: 2014-02-03T17:22:54Z
   manifest_text: "./dir1/sub1 0:0:a 0:0:b\n./dir2/sub2 0:0:c 0:0:d\n"
+
+docker_image:
+  # This Collection has links with Docker image metadata.
+  uuid: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+  owner_uuid: qr1hi-tpzed-000000000000000
+  created_at: 2014-06-11T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-06-11T17:22:54Z
+  updated_at: 2014-06-11T17:22:54Z
+  manifest_text: ". d21353cfe035e3e384563ee55eadbb2f+67108864 5c77a43e329b9838cbec18ff42790e57+55605760 0:122714624:d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678.tar\n"
index 1729bd227413c1d410b3a3be24825beecf723be2..ae2511b51295b42b642bd64ace9b138d570360d9 100644 (file)
@@ -408,7 +408,7 @@ has_symbol_keys_in_database_somehow:
   uuid: zzzzz-o0j2j-enl1wg58310loc6
   owner_uuid: zzzzz-tpzed-000000000000000
   created_at: 2014-05-28 16:24:02.314722162 Z
-  modified_by_client_uuid: 
+  modified_by_client_uuid:
   modified_by_user_uuid: zzzzz-tpzed-000000000000000
   modified_at: 2014-05-28 16:24:02.314484982 Z
   tail_uuid: ~
@@ -440,3 +440,96 @@ bug2931_link_with_null_head_uuid:
   tail_uuid: ~
   head_uuid: ~
   properties: {}
+
+active_user_permission_to_docker_image_collection:
+  uuid: zzzzz-o0j2j-dp1d8395ldqw33s
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+  properties: {}
+
+docker_image_collection_hash:
+  uuid: zzzzz-o0j2j-dockercollhasha
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-06-11 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-11 14:30:00.184019565 Z
+  updated_at: 2014-06-11 14:30:00.183829316 Z
+  link_class: docker_image_hash
+  name: d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678
+  tail_uuid: ~
+  head_uuid: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+  properties:
+    image_timestamp: 2014-06-10T14:30:00.184019565Z
+
+docker_image_collection_repository:
+  uuid: zzzzz-o0j2j-dockercollrepos
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-06-11 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-11 14:30:00.184019565 Z
+  updated_at: 2014-06-11 14:30:00.183829316 Z
+  link_class: docker_image_repository
+  name: arvados/apitestfixture
+  tail_uuid: ~
+  head_uuid: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+  properties:
+    image_timestamp: 2014-06-10T14:30:00.184019565Z
+
+docker_image_collection_tag:
+  uuid: zzzzz-o0j2j-dockercolltagbb
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-06-11 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-11 14:30:00.184019565 Z
+  updated_at: 2014-06-11 14:30:00.183829316 Z
+  link_class: docker_image_repo+tag
+  name: arvados/apitestfixture:latest
+  tail_uuid: ~
+  head_uuid: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+  properties:
+    image_timestamp: 2014-06-10T14:30:00.184019565Z
+
+docker_image_collection_tag2:
+  uuid: zzzzz-o0j2j-dockercolltagbc
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-06-11 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-11 14:30:00.184019565 Z
+  updated_at: 2014-06-11 14:30:00.183829316 Z
+  link_class: docker_image_repo+tag
+  name: arvados/apitestfixture:june10
+  tail_uuid: ~
+  head_uuid: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+  properties:
+    image_timestamp: 2014-06-10T14:30:00.184019565Z
+
+ancient_docker_image_collection_hash:
+  # This image helps test that searches for Docker images find
+  # the latest available image: the hash is the same as
+  # docker_image_collection_hash, but it points to a different
+  # Collection and has an older image timestamp.
+  uuid: zzzzz-o0j2j-dockercollhashz
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-06-12 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-12 14:30:00.184019565 Z
+  updated_at: 2014-06-12 14:30:00.183829316 Z
+  link_class: docker_image_hash
+  name: d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678
+  tail_uuid: ~
+  head_uuid: b519d9cb706a29fc7ea24dbea2f05851+249025
+  properties:
+    image_timestamp: 2010-06-10T14:30:00.184019565Z
index 13222401eff7107e5816262d63217263c122b779..80682a371cf4b790be95454f0589773610c7e569 100644 (file)
@@ -1,3 +1,8 @@
+crunch_dispatch_test:
+  uuid: zzzzz-s0uqq-382brsig8rp3665
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+  name: crunch_dispatch_test
+
 foo:
   uuid: zzzzz-s0uqq-382brsig8rp3666
   owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
index 955b1f12a531779cd651e97457833096d9763868..788cd83c797be3a8d7b9940d30f42f454becbea7 100644 (file)
@@ -1,6 +1,14 @@
 require 'test_helper'
 load 'test/functional/arvados/v1/git_setup.rb'
 
+# NOTE: calling Commit.find_commit_range(user, nil, nil, 'rev') will produce
+# an error message "fatal: bad object 'rev'" on stderr if 'rev' does not exist
+# in a given repository.  Many of these tests report such errors; their presence
+# does not represent a fatal condition.
+#
+# TODO(twp): consider better error handling of these messages, or
+# decide to abandon it.
+
 class Arvados::V1::CommitsControllerTest < ActionController::TestCase
   fixtures :repositories, :users
 
@@ -15,8 +23,9 @@ class Arvados::V1::CommitsControllerTest < ActionController::TestCase
     assert_equal ['31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
 
   #test "test_branch1" do
+    # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
     a = Commit.find_commit_range(users(:active), nil, nil, 'master', nil)
-    assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57'], a
+    assert_equal ['f35f99b7d32bac257f5989df02b9f12ee1a9b0d6', '077ba2ad3ea24a929091a9e6ce545c93199b8e57'], a
 
   #test "test_branch2" do
     a = Commit.find_commit_range(users(:active), 'foo', nil, 'b1', nil)
@@ -33,10 +42,13 @@ class Arvados::V1::CommitsControllerTest < ActionController::TestCase
     assert_equal nil, a
 
   #test "test_multi_revision" do
+    # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
     a = Commit.find_commit_range(users(:active), nil, '31ce37fe365b3dc204300a3e4c396ad333ed0556', '077ba2ad3ea24a929091a9e6ce545c93199b8e57', nil)
     assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '4fe459abe02d9b365932b8f5dc419439ab4e2577', '31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
 
   #test "test_tag" do
+    # complains "fatal: ambiguous argument 'tag1': unknown revision or path
+    # not in the working tree."
     a = Commit.find_commit_range(users(:active), nil, 'tag1', 'master', nil)
     assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '4fe459abe02d9b365932b8f5dc419439ab4e2577'], a
 
@@ -45,6 +57,7 @@ class Arvados::V1::CommitsControllerTest < ActionController::TestCase
     assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
 
   #test "test_multi_revision_tagged_exclude" do
+    # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
     a = Commit.find_commit_range(users(:active), nil, '31ce37fe365b3dc204300a3e4c396ad333ed0556', '077ba2ad3ea24a929091a9e6ce545c93199b8e57', ['tag1'])
     assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
 
@@ -70,11 +83,13 @@ class Arvados::V1::CommitsControllerTest < ActionController::TestCase
       assert_equal nil, a
 
       # invalid input to 'excludes'
+      # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
       a = Commit.find_commit_range(users(:active), nil, "31ce37fe365b3dc204300a3e4c396ad333ed0556", "077ba2ad3ea24a929091a9e6ce545c93199b8e57", ["4fe459abe02d9b365932b8f5dc419439ab4e2577 ; touch #{touchdir}/uh_oh"])
       assert !File.exists?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'excludes' parameter of find_commit_range is exploitable"
       assert_equal nil, a
 
       # invalid input to 'excludes'
+      # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
       a = Commit.find_commit_range(users(:active), nil, "31ce37fe365b3dc204300a3e4c396ad333ed0556", "077ba2ad3ea24a929091a9e6ce545c93199b8e57", ["$(uname>#{touchdir}/uh_oh)"])
       assert !File.exists?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'excludes' parameter of find_commit_range is exploitable"
       assert_equal nil, a
diff --git a/services/api/test/integration/crunch_dispatch_test.rb b/services/api/test/integration/crunch_dispatch_test.rb
new file mode 100644 (file)
index 0000000..ee1bd9f
--- /dev/null
@@ -0,0 +1,38 @@
+require 'test_helper'
+load 'test/functional/arvados/v1/git_setup.rb'
+
+class CrunchDispatchTest < ActionDispatch::IntegrationTest
+  include GitSetup
+
+  fixtures :all
+
+  @@crunch_dispatch_pid = nil
+
+  def launch_crunch_dispatch
+    @@crunch_dispatch_pid = Process.fork {
+      ENV['PATH'] = ENV['HOME'] + '/arvados/services/crunch:' + ENV['PATH']
+      exec(ENV['HOME'] + '/arvados/services/api/script/crunch-dispatch.rb')
+    }
+  end
+
+  teardown do
+    if @@crunch_dispatch_pid
+      Process.kill "TERM", @@crunch_dispatch_pid
+      Process.wait
+      @@crunch_dispatch_pid = nil
+    end
+  end
+
+  test "job runs" do
+    post "/arvados/v1/jobs", {
+      format: "json",
+      job: {
+        script: "log",
+        repository: "crunch_dispatch_test",
+        script_version: "f35f99b7d32bac257f5989df02b9f12ee1a9b0d6",
+        script_parameters: "{}"
+      }
+    }, auth(:admin)
+    assert_response :success
+  end
+end
index b5f09df7c73f05cff483a9a06bacaea13450342a..20575654e6ac6c97cb9ef1e80f05024c3e2d7823 100644 (file)
@@ -35,6 +35,16 @@ class SelectTest < ActionDispatch::IntegrationTest
     end
   end
 
+  def assert_link_classes_ascend(current_class, prev_class)
+    # Databases and Ruby don't always agree about string ordering with
+    # punctuation.  If the strings aren't ascending normally, check
+    # that they're equal up to punctuation.
+    if current_class < prev_class
+      class_prefix = current_class.split(/\W/).first
+      assert prev_class.start_with?(class_prefix)
+    end
+  end
+
   test "select two columns with order" do
     get "/arvados/v1/links", {:format => :json, :select => ['link_class', 'uuid'], :order => ['link_class asc', "uuid desc"]}, auth(:active)
     assert_response :success
@@ -49,7 +59,7 @@ class SelectTest < ActionDispatch::IntegrationTest
         prev_uuid = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
       end
 
-      assert i['link_class'] >= prev_link_class
+      assert_link_classes_ascend(i['link_class'], prev_link_class)
       assert i['uuid'] < prev_uuid
 
       prev_link_class = i['link_class']
@@ -71,7 +81,7 @@ class SelectTest < ActionDispatch::IntegrationTest
         prev_uuid = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
       end
 
-      assert i['link_class'] >= prev_link_class
+      assert_link_classes_ascend(i['link_class'], prev_link_class)
       assert i['uuid'] < prev_uuid
 
       prev_link_class = i['link_class']
index ed30fdb5df24a40db878a1869e6e7b5ca26a4573..269018d807ab7de16bdb05e243a5308aadbff834 100644 (file)
@@ -13,7 +13,7 @@ class SerializedEncodingTest < ActionDispatch::IntegrationTest
 
     job: {
       repository: 'foo',
-      runtime_constraints: {docker_image: 'arvados/jobs'},
+      runtime_constraints: {docker_image: 'arvados/apitestfixture'},
       script: 'hash',
       script_version: 'master',
       script_parameters: {pattern: 'foobar'},
index fdd7db6a2bc1f3cb25c517e28d8730e27aeaf474..6845160c47215c4d021bc57a6cf0182a2c74a114 100644 (file)
Binary files a/services/api/test/test.git.tar and b/services/api/test/test.git.tar differ
index 5079316934eac12b586c564b52a8a8194257d85c..5f53b2ab9bc07be2eeefff75f06d08be46a40ab0 100644 (file)
@@ -1,7 +1,109 @@
 require 'test_helper'
 
 class JobTest < ActiveSupport::TestCase
-  # test "the truth" do
-  #   assert true
-  # end
+  BAD_COLLECTION = "#{'f' * 32}+0"
+
+  setup do
+    set_user_from_auth :active
+  end
+
+  test "Job without Docker image doesn't get locator" do
+    job = Job.new
+    assert job.valid?
+    assert_nil job.docker_image_locator
+  end
+
+  { 'name' => [:links, :docker_image_collection_repository, :name],
+    'hash' => [:links, :docker_image_collection_hash, :name],
+    'locator' => [:collections, :docker_image, :uuid],
+  }.each_pair do |spec_type, (fixture_type, fixture_name, fixture_attr)|
+    test "Job initialized with Docker image #{spec_type} gets locator" do
+      image_spec = send(fixture_type, fixture_name).send(fixture_attr)
+      job = Job.new(runtime_constraints: {'docker_image' => image_spec})
+      assert(job.valid?, "Docker image #{spec_type} was invalid")
+      assert_equal(collections(:docker_image).uuid, job.docker_image_locator)
+    end
+
+    test "Job modified with Docker image #{spec_type} gets locator" do
+      job = Job.new
+      assert job.valid?
+      assert_nil job.docker_image_locator
+      image_spec = send(fixture_type, fixture_name).send(fixture_attr)
+      job.runtime_constraints['docker_image'] = image_spec
+      assert(job.valid?, "modified Docker image #{spec_type} was invalid")
+      assert_equal(collections(:docker_image).uuid, job.docker_image_locator)
+    end
+  end
+
+  test "removing a Docker runtime constraint removes the locator" do
+    image_locator = collections(:docker_image).uuid
+    job = Job.new(runtime_constraints: {'docker_image' => image_locator})
+    assert job.valid?
+    assert_equal(image_locator, job.docker_image_locator)
+    job.runtime_constraints = {}
+    assert(job.valid?, "clearing runtime constraints made the Job invalid")
+    assert_nil job.docker_image_locator
+  end
+
+  test "locate a Docker image with a repository + tag" do
+    image_repo, image_tag =
+      links(:docker_image_collection_tag2).name.split(':', 2)
+    job = Job.new(runtime_constraints:
+                  {'docker_image' => image_repo,
+                    'docker_image_tag' => image_tag})
+    assert(job.valid?, "Job with Docker tag search invalid")
+    assert_equal(collections(:docker_image).uuid, job.docker_image_locator)
+  end
+
+  test "can't locate a Docker image with a nonexistent tag" do
+    image_repo = links(:docker_image_collection_repository).name
+    image_tag = '__nonexistent tag__'
+    job = Job.new(runtime_constraints:
+                  {'docker_image' => image_repo,
+                    'docker_image_tag' => image_tag})
+    assert(job.invalid?, "Job with bad Docker tag valid")
+  end
+
+  test "locate a Docker image with a partial hash" do
+    image_hash = links(:docker_image_collection_hash).name[0..24]
+    job = Job.new(runtime_constraints: {'docker_image' => image_hash})
+    assert(job.valid?, "Job with partial Docker image hash failed")
+    assert_equal(collections(:docker_image).uuid, job.docker_image_locator)
+  end
+
+  { 'name' => 'arvados_test_nonexistent',
+    'hash' => 'f' * 64,
+    'locator' => BAD_COLLECTION,
+  }.each_pair do |spec_type, image_spec|
+    test "Job validation fails with nonexistent Docker image #{spec_type}" do
+      job = Job.new(runtime_constraints: {'docker_image' => image_spec})
+      assert(job.invalid?, "nonexistent Docker image #{spec_type} was valid")
+    end
+  end
+
+  test "Job validation fails with non-Docker Collection constraint" do
+    job = Job.new(runtime_constraints:
+                  {'docker_image' => collections(:foo_file).uuid})
+    assert(job.invalid?, "non-Docker Collection constraint was valid")
+  end
+
+  test "can't create Job with Docker image locator" do
+    begin
+      job = Job.new(docker_image_locator: BAD_COLLECTION)
+    rescue ActiveModel::MassAssignmentSecurity::Error
+      # Test passes - expected attribute protection
+    else
+      assert_nil job.docker_image_locator
+    end
+  end
+
+  test "can't assign Docker image locator to Job" do
+    job = Job.new
+    begin
+      Job.docker_image_locator = BAD_COLLECTION
+    rescue NoMethodError
+      # Test passes - expected attribute protection
+    end
+    assert_nil job.docker_image_locator
+  end
 end
index 56de1e16d7f0399e77bf283db3d7ecc6724e618c..e3a2ce91945f511b10cfa697c0dd5f07c9ea409d 100644 (file)
@@ -297,14 +297,16 @@ func (this GetBlockHandler) ServeHTTP(resp http.ResponseWriter, req *http.Reques
                blocklen, _, err = kc.AuthorizedAsk(hash, locator.Signature, locator.Timestamp)
        }
 
-       resp.Header().Set("Content-Length", fmt.Sprint(blocklen))
+       if blocklen > 0 {
+               resp.Header().Set("Content-Length", fmt.Sprint(blocklen))
+       }
 
        switch err {
        case nil:
                if reader != nil {
                        n, err2 := io.Copy(resp, reader)
                        if n != blocklen {
-                               log.Printf("%s: %s %s mismatched return %v with Content-Length %v error", GetRemoteAddress(req), req.Method, hash, n, blocklen, err.Error())
+                               log.Printf("%s: %s %s mismatched return %v with Content-Length %v error %v", GetRemoteAddress(req), req.Method, hash, n, blocklen, err2)
                        } else if err2 == nil {
                                log.Printf("%s: %s %s success returned %v bytes", GetRemoteAddress(req), req.Method, hash, n)
                        } else {