on('ready ajax:complete', function() {
$('[data-infinite-scroller]').each(function() {
var $scroller = $($(this).attr('data-infinite-scroller'));
- if (!$scroller.hasClass('smart-scroll'))
+ if (!$scroller.hasClass('smart-scroll') &&
+ 'scroll' != $scroller.css('overflow-y'))
$scroller = $(window);
$scroller.
addClass('infinite-scroller').
}
function smart_scroll_fixup(s) {
- //console.log(s);
+
if (s != null && s.type == 'shown.bs.tab') {
s = [s.target];
}
else {
s = $(".smart-scroll");
}
- //console.log(s);
- for (var i = 0; i < s.length; i++) {
- a = s[i];
- var h = window.innerHeight - a.getBoundingClientRect().top - 20;
+
+ s.each(function(i, a) {
+ var h = window.innerHeight - $(a).offset().top;
height = String(h) + "px";
- a.style['max-height'] = height;
- }
+ $(a).css('max-height', height);
+ });
}
-$(window).on('load resize scroll', smart_scroll_fixup);
+$(window).on('load ready resize scroll ajax:complete', smart_scroll_fixup);
.smart-scroll {
overflow: auto;
+ margin-bottom: -15px;
}
.inline-progress-container div.progress {
@rows = [] # each is {name: S, components: [...]}
- # Build a table: x=pipeline y=component
- @objects.each_with_index do |pi, pi_index|
- pipeline_jobs(pi).each do |component|
- # Find a cell with the same name as this component but no
- # entry for this pipeline
- target_row = nil
- @rows.each_with_index do |row, row_index|
- if row[:name] == component[:name] and !row[:components][pi_index]
- target_row = row
+ if params['tab_pane'] == "Compare" or params['tab_pane'].nil?
+ # Build a table: x=pipeline y=component
+ @objects.each_with_index do |pi, pi_index|
+ pipeline_jobs(pi).each do |component|
+ # Find a cell with the same name as this component but no
+ # entry for this pipeline
+ target_row = nil
+ @rows.each_with_index do |row, row_index|
+ if row[:name] == component[:name] and !row[:components][pi_index]
+ target_row = row
+ end
end
+ if !target_row
+ target_row = {name: component[:name], components: []}
+ @rows << target_row
+ end
+ target_row[:components][pi_index] = component
end
- if !target_row
- target_row = {name: component[:name], components: []}
- @rows << target_row
- end
- target_row[:components][pi_index] = component
end
- end
- @rows.each do |row|
- # Build a "normal" pseudo-component for this row by picking the
- # most common value for each attribute. If all values are
- # equally common, there is no "normal".
- normal = {} # attr => most common value
- highscore = {} # attr => how common "normal" is
- score = {} # attr => { value => how common }
- row[:components].each do |pj|
- next if pj.nil?
- pj.each do |k,v|
- vstr = for_comparison v
- score[k] ||= {}
- score[k][vstr] = (score[k][vstr] || 0) + 1
- highscore[k] ||= 0
- if score[k][vstr] == highscore[k]
- # tie for first place = no "normal"
- normal.delete k
- elsif score[k][vstr] == highscore[k] + 1
- # more pipelines have v than anything else
- highscore[k] = score[k][vstr]
- normal[k] = vstr
+ @rows.each do |row|
+ # Build a "normal" pseudo-component for this row by picking the
+ # most common value for each attribute. If all values are
+ # equally common, there is no "normal".
+ normal = {} # attr => most common value
+ highscore = {} # attr => how common "normal" is
+ score = {} # attr => { value => how common }
+ row[:components].each do |pj|
+ next if pj.nil?
+ pj.each do |k,v|
+ vstr = for_comparison v
+ score[k] ||= {}
+ score[k][vstr] = (score[k][vstr] || 0) + 1
+ highscore[k] ||= 0
+ if score[k][vstr] == highscore[k]
+ # tie for first place = no "normal"
+ normal.delete k
+ elsif score[k][vstr] == highscore[k] + 1
+ # more pipelines have v than anything else
+ highscore[k] = score[k][vstr]
+ normal[k] = vstr
+ end
end
end
- end
- # Add a hash in component[:is_normal]: { attr => is_the_value_normal? }
- row[:components].each do |pj|
- next if pj.nil?
- pj[:is_normal] = {}
- pj.each do |k,v|
- pj[:is_normal][k] = (normal.has_key?(k) && normal[k] == for_comparison(v))
+ # Add a hash in component[:is_normal]: { attr => is_the_value_normal? }
+ row[:components].each do |pj|
+ next if pj.nil?
+ pj[:is_normal] = {}
+ pj.each do |k,v|
+ pj[:is_normal][k] = (normal.has_key?(k) && normal[k] == for_comparison(v))
+ end
end
end
end
- provenance, pips = graph(@objects)
+ if params['tab_pane'] == "Graph"
+ provenance, pips = graph(@objects)
- @pipelines = @objects
+ @pipelines = @objects
- if provenance
- @prov_svg = ProvenanceHelper::create_provenance_graph provenance, "provenance_svg", {
- :request => request,
- :all_script_parameters => true,
- :combine_jobs => :script_and_version,
- :script_version_nodes => true,
- :pips => pips }
+ if provenance
+ @prov_svg = ProvenanceHelper::create_provenance_graph provenance, "provenance_svg", {
+ :request => request,
+ :all_script_parameters => true,
+ :combine_jobs => :script_and_version,
+ :script_version_nodes => true,
+ :pips => pips }
+ end
end
+
@object = @objects.first
+
+ show
end
def show_pane_list
if (!tab_pane_valid_state[pane]) {
tab_pane_valid_state[pane] = true;
$(document).trigger('ajax:send');
- $.ajax('<%=j url_for() %>?tab_pane='+pane, {dataType: 'html', type: 'GET'}).
+ $.ajax('<%=j url_for() %>?<%= raw(controller.request.query_string) %>&tab_pane='+pane, {dataType: 'html', type: 'GET'}).
done(function(data, status, jqxhr) {
$('#' + pane + ' > div > div').html(data);
$(document).trigger('ajax:complete');
<% else %>
data-object-uuid="<%= @object.uuid %>"
<% end %>
->
+ >
<% content_for :js do %>
<% if i == 0 %>
-<% percent_total_tasks = 100 / (j[:tasks_summary][:done] + j[:tasks_summary][:running] + j[:tasks_summary][:failed] + j[:tasks_summary][:todo]) rescue 0 %>
+<%
+ failed = j[:tasks_summary][:failed] || 0 rescue 0
+ done = j[:tasks_summary][:done] || 0 rescue 0
+ running = j[:tasks_summary][:running] || 0 rescue 0
+ todo = j[:tasks_summary][:todo] || 0 rescue 0
-<% if defined? scaleby %>
- <% percent_total_tasks *= scaleby %>
-<% end %>
+ if j[:success] == false and done + running + failed == 0
+ # The job failed but no tasks were ever started (i.e. crunch-dispatch
+ # was unable to start the job). Display a full 100% failed progress bar.
+ failed_percent = 100
+ success_percent = 0
+ running_percent = 0
+ elsif done + running + failed + todo == 0
+ # No tasks were ever created for this job;
+ # render an empty progress bar.
+ failed_percent = 0
+ success_percent = 0
+ running_percent = 0
+ else
+ percent_total_tasks = 100.0 / (done + running + failed + todo)
+ if defined? scaleby
+ percent_total_tasks *= scaleby
+ end
+ failed_percent = (failed * percent_total_tasks).ceil
+ success_percent = (done * percent_total_tasks).ceil
+ running_percent = (running * percent_total_tasks).ceil
+ end
+%>
<% if not defined? scaleby %>
<div class="progress">
<% end %>
-<span class="progress-bar progress-bar-success" style="width: <%= j[:tasks_summary][:done] * percent_total_tasks rescue 0 %>%;">
+<span class="progress-bar progress-bar-success" style="width: <%= success_percent %>%;">
</span>
-<span class="progress-bar progress-bar-danger" style="width: <%= j[:tasks_summary][:failed] * percent_total_tasks rescue 0 %>%;">
+<span class="progress-bar progress-bar-danger" style="width: <%= failed_percent %>%;">
</span>
-<span class="progress-bar" style="width: <%= j[:tasks_summary][:running] * percent_total_tasks rescue 0 %>%;">
+<span class="progress-bar" style="width: <%= running_percent %>%;">
</span>
<% if not defined? scaleby %>
});
<% end %>
-<div id="_<%= divId %>_container" style="padding-top: 41px; margin-top: -41px">
+<div id="_<%= divId %>_container">
<div style="text-align: right">
<a style="cursor: pointer"><span class="glyphicon glyphicon-zoom-out" onclick="graph_zoom('<%= divId %>', '<%= svgId %>', .9)"></span></a>
<a style="cursor: pointer"><span class="glyphicon glyphicon-zoom-in" onclick="graph_zoom('<%= divId %>', '<%= svgId %>', 1./.9)"></span></a>
<span id="_<%= divId %>_center" style="padding-left: 0px"></span>
<%= raw(svg) %>
</div>
- <div id="_<%= divId %>_padding" style="padding-bottom: 1em"></div>
</div>
when 'tag'
ARGV.shift
exec `which arv-tag`.strip, *ARGV
+when 'ws'
+ ARGV.shift
+ exec `which arv-ws`.strip, *ARGV
end
ENV['ARVADOS_API_VERSION'] ||= 'v1'
--- /dev/null
+../../python/bin/arv-ws
\ No newline at end of file
# If this job requires a Docker image, install that.
my $docker_bin = "/usr/bin/docker.io";
-my $docker_image = $Job->{runtime_constraints}->{docker_image} || "";
-if ($docker_image) {
+my ($docker_locator, $docker_hash);
+if ($docker_locator = $Job->{docker_image_locator}) {
+ $docker_hash = find_docker_hash($docker_locator);
+ if (!$docker_hash)
+ {
+ croak("No Docker image hash found from locator $docker_locator");
+ }
+ my $docker_install_script = qq{
+if ! $docker_bin images -q --no-trunc | grep -qxF \Q$docker_hash\E; then
+ arv-get \Q$docker_locator/$docker_hash.tar\E | $docker_bin load
+fi
+};
my $docker_pid = fork();
if ($docker_pid == 0)
{
- srun (["srun", "--nodelist=" . join(' ', @node)],
- [$docker_bin, 'pull', $docker_image]);
+ srun (["srun", "--nodelist=" . join(',', @node)],
+ ["/bin/sh", "-ec", $docker_install_script]);
exit ($?);
}
while (1)
freeze_if_want_freeze ($docker_pid);
select (undef, undef, undef, 0.1);
}
- # If the Docker image was specified as a hash, pull will fail.
- # Ignore that error. We'll see what happens when we try to run later.
- if (($? != 0) && ($docker_image !~ /^[0-9a-fA-F]{5,64}$/))
+ if ($? != 0)
{
- croak("Installing Docker image $docker_image returned exit code $?");
+ croak("Installing Docker image from $docker_locator returned exit code $?");
}
}
"&& perl -";
}
$command .= "&& exec arv-mount --allow-other $ENV{TASK_KEEPMOUNT} --exec ";
- if ($docker_image)
+ if ($docker_hash)
{
$command .= "crunchstat -cgroup-root=/sys/fs/cgroup -cgroup-parent=docker -cgroup-cid=$ENV{TASK_WORK}/docker.cid -poll=10000 ";
$command .= "$docker_bin run -i -a stdin -a stdout -a stderr --cidfile=$ENV{TASK_WORK}/docker.cid ";
}
while (my ($env_key, $env_val) = each %ENV)
{
- if ($env_key =~ /^(JOB|TASK)_/) {
+ if ($env_key =~ /^(ARVADOS|JOB|TASK)_/) {
$command .= "-e \Q$env_key=$env_val\E ";
}
}
- $command .= "\Q$docker_image\E ";
+ $command .= "\Q$docker_hash\E ";
} else {
$command .= "crunchstat -cgroup-root=/sys/fs/cgroup -poll=10000 "
}
}
}
+sub find_docker_hash {
+ # Given a Keep locator, search for a matching link to find the Docker hash
+ # of the stored image.
+ my $locator = shift;
+ my $links_result = $arv->{links}->{list}->execute(
+ filters => [["head_uuid", "=", $locator],
+ ["link_class", "=", "docker_image_hash"]],
+ limit => 1);
+ my $docker_hash;
+ foreach my $link (@{$links_result->{items}}) {
+ $docker_hash = lc($link->{name});
+ }
+ return $docker_hash;
+}
+
__DATA__
#!/usr/bin/perl
--- /dev/null
+require 'minitest/autorun'
+
+class TestArvWs < Minitest::Test
+ def setup
+ end
+
+ def test_arv_ws_get_help
+ out, err = capture_subprocess_io do
+ system ('arv-ws -h')
+ end
+ assert_equal '', err
+ end
+
+ def test_arv_ws_such_option
+ out, err = capture_subprocess_io do
+ system ('arv-ws --junk')
+ end
+ refute_equal '', err
+ end
+
+end
make_link('docker_image_hash', image_hash, **link_base)
if not image_hash.startswith(args.image.lower()):
make_link('docker_image_repository', args.image, **link_base)
- make_link('docker_image_tag', args.tag, **link_base)
+ make_link('docker_image_repo+tag', '{}:{}'.format(args.image, args.tag),
+ **link_base)
# Clean up.
image_file.close()
import ssl
import re
import config
+import logging
class EventClient(WebSocketClient):
def __init__(self, url, filters, on_event):
def received_message(self, m):
self.on_event(json.loads(str(m)))
+ def close_connection(self):
+ try:
+ self.sock.shutdown(socket.SHUT_RDWR)
+ self.sock.close()
+ except:
+ pass
+
def subscribe(api, filters, on_event):
- url = "{}?api_token={}".format(api._rootDesc['websocketUrl'], config.get('ARVADOS_API_TOKEN'))
- ws = EventClient(url, filters, on_event)
- ws.connect()
- return ws
+ ws = None
+ try:
+ url = "{}?api_token={}".format(api._rootDesc['websocketUrl'], config.get('ARVADOS_API_TOKEN'))
+ ws = EventClient(url, filters, on_event)
+ ws.connect()
+ return ws
+ except:
+ logging.exception('')
+ if (ws):
+ ws.close_connection()
+ raise
--- /dev/null
+#!/usr/bin/env python
+
+import sys
+import logging
+import argparse
+import arvados
+from arvados.events import subscribe
+
+parser = argparse.ArgumentParser()
+parser.add_argument('-u', '--uuid', type=str, default="")
+args = parser.parse_args()
+
+filters = []
+if len(args.uuid)>0: filters = [ ['object_uuid', '=', args.uuid] ]
+
+api = arvados.api('v1', cache=False)
+
+def on_message(ev):
+ print "\n", ev
+
+ws = None
+try:
+ ws = subscribe(api, filters, lambda ev: on_message(ev))
+ ws.run_forever()
+except KeyboardInterrupt:
+ print '' # don't log it
+except:
+ logging.exception('')
+ if (ws):
+ ws.close_connection()
'bin/arv-ls',
'bin/arv-normalize',
'bin/arv-put',
+ 'bin/arv-ws',
],
install_requires=[
'python-gflags',
# Get rid of troublesome nils
users_list.compact!
+ # Load optional keyword arguments, if they exist.
+ if users_list.last.is_a? Hash
+ kwargs = users_list.pop
+ else
+ kwargs = {}
+ end
+
# Check if any of the users are admin. If so, we're done.
if users_list.select { |u| u.is_admin }.empty?
collect { |uuid| sanitize(uuid) }.join(', ')
sql_conds = []
sql_params = []
+ sql_table = kwargs.fetch(:table_name, table_name)
or_object_uuid = ''
# This row is owned by a member of users_list, or owned by a group
# to this row, or to the owner of this row (see join() below).
permitted_uuids = "(SELECT head_uuid FROM links WHERE link_class='permission' AND tail_uuid IN (#{sanitized_uuid_list}))"
- sql_conds += ["#{table_name}.owner_uuid in (?)",
- "#{table_name}.uuid in (?)",
- "#{table_name}.uuid IN #{permitted_uuids}"]
+ sql_conds += ["#{sql_table}.owner_uuid in (?)",
+ "#{sql_table}.uuid in (?)",
+ "#{sql_table}.uuid IN #{permitted_uuids}"]
sql_params += [uuid_list, user_uuids]
- if self == Link and users_list.any?
+ if sql_table == "links" and users_list.any?
# This row is a 'permission' or 'resources' link class
# The uuid for a member of users_list is referenced in either the head
# or tail of the link
- sql_conds += ["(#{table_name}.link_class in (#{sanitize 'permission'}, #{sanitize 'resources'}) AND (#{table_name}.head_uuid IN (?) OR #{table_name}.tail_uuid IN (?)))"]
+ sql_conds += ["(#{sql_table}.link_class in (#{sanitize 'permission'}, #{sanitize 'resources'}) AND (#{sql_table}.head_uuid IN (?) OR #{sql_table}.tail_uuid IN (?)))"]
sql_params += [user_uuids, user_uuids]
end
- if self == Log and users_list.any?
+ if sql_table == "logs" and users_list.any?
# Link head points to the object described by this row
- sql_conds += ["#{table_name}.object_uuid IN #{permitted_uuids}"]
+ sql_conds += ["#{sql_table}.object_uuid IN #{permitted_uuids}"]
# This object described by this row is owned by this user, or owned by a group readable by this user
- sql_conds += ["#{table_name}.object_owner_uuid in (?)"]
+ sql_conds += ["#{sql_table}.object_owner_uuid in (?)"]
sql_params += [uuid_list]
end
raise "uuid #{uuid} has no hash part" if !hash_part
[hash_part, size_part].compact.join '+'
end
+
+ def self.for_latest_docker_image(search_term, search_tag=nil, readers=nil)
+ readers ||= [Thread.current[:user]]
+ base_search = Link.
+ readable_by(*readers).
+ readable_by(*readers, table_name: "collections").
+ joins("JOIN collections ON links.head_uuid = collections.uuid").
+ order("links.created_at DESC")
+
+ # If the search term is a Collection locator with an associated
+ # Docker image hash link, return that Collection.
+ coll_matches = base_search.
+ where(link_class: "docker_image_hash", collections: {uuid: search_term})
+ if match = coll_matches.first
+ return find_by_uuid(match.head_uuid)
+ end
+
+ # Find Collections with matching Docker image repository+tag pairs.
+ matches = base_search.
+ where(link_class: "docker_image_repo+tag",
+ name: "#{search_term}:#{search_tag || 'latest'}")
+
+ # If that didn't work, find Collections with matching Docker image hashes.
+ if matches.empty?
+ matches = base_search.
+ where("link_class = ? and name LIKE ?",
+ "docker_image_hash", "#{search_term}%")
+ end
+
+ # Select the image that was created most recently. Note that the
+ # SQL search order and fallback timestamp values are chosen so
+ # that if image timestamps are missing, we use the image with the
+ # newest link.
+ latest_image_link = nil
+ latest_image_timestamp = "1900-01-01T00:00:00Z"
+ matches.find_each do |link|
+ link_timestamp = link.properties.fetch("image_timestamp",
+ "1900-01-01T00:00:01Z")
+ if link_timestamp > latest_image_timestamp
+ latest_image_link = link
+ latest_image_timestamp = link_timestamp
+ end
+ end
+ latest_image_link.nil? ? nil : find_by_uuid(latest_image_link.head_uuid)
+ end
end
include HasUuid
include KindAndEtag
include CommonApiTemplate
+ attr_protected :docker_image_locator
serialize :script_parameters, Hash
serialize :runtime_constraints, Hash
serialize :tasks_summary, Hash
+ before_validation :find_docker_image_locator
before_create :ensure_unique_submit_id
before_create :ensure_script_version_is_commit
before_update :ensure_script_version_is_commit
t.add :nondeterministic
t.add :repository
t.add :supplied_script_version
+ t.add :docker_image_locator
end
def assert_finished
true
end
+ def find_docker_image_locator
+ # Find the Collection that holds the Docker image specified in the
+ # runtime constraints, and store its locator in docker_image_locator.
+ image_search = runtime_constraints['docker_image']
+ image_tag = runtime_constraints['docker_image_tag']
+ if image_search.nil?
+ self.docker_image_locator = nil
+ elsif coll = Collection.for_latest_docker_image(image_search, image_tag)
+ self.docker_image_locator = coll.uuid
+ else
+ errors.add(:docker_image_locator, "Docker image not found")
+ false
+ end
+ end
+
def dependencies
deps = {}
queue = self.script_parameters.values
# crunch-job must be able to stat() it.
crunch_refresh_trigger: /tmp/crunch_refresh_trigger
+ # Maximum number of log events that may be generated by a single job.
+ crunch_limit_log_events_per_job: 65536
+
+ # Maximum number of total bytes that may be logged by a single job.
+ crunch_limit_log_event_bytes_per_job: 67108864
+
+ # These two settings control how frequently log events are flushed
+ # to the database. If a job generates two or more events within
+ # crunch_log_seconds_between_events, the log data is not flushed
+ # until crunch_log_bytes_per_event has been reached.
+ crunch_log_bytes_per_event: 4096
+ crunch_log_seconds_between_events: 1
+
# Path to /etc/dnsmasq.d, or false = do not update dnsmasq data.
dnsmasq_conf_dir: false
--- /dev/null
+class AddDockerLocatorToJobs < ActiveRecord::Migration
+ def change
+ add_column :jobs, :docker_image_locator, :string
+ end
+end
#
# It's strongly recommended to check this file into your version control system.
+ActiveRecord::Schema.define(:version => 20140611173003) do
+
-ActiveRecord::Schema.define(:version => 20140602143352) do
create_table "api_client_authorizations", :force => true do |t|
t.string "api_token", :null => false
t.string "repository"
t.boolean "output_is_persistent", :default => false, :null => false
t.string "supplied_script_version"
+ t.string "docker_image_locator"
end
add_index "jobs", ["created_at"], :name => "index_jobs_on_created_at"
require File.dirname(__FILE__) + '/../config/environment'
require 'open3'
-LOG_BUFFER_SIZE = 4096
-
class Dispatcher
include ApplicationHelper
sent_int: 0,
job_auth: job_auth,
stderr_buf_to_flush: '',
- stderr_flushed_at: 0
+ stderr_flushed_at: 0,
+ bytes_logged: 0,
+ events_logged: 0,
+ log_truncated: false
}
i.close
update_node_status
j[:stderr_buf_to_flush] << pub_msg
end
- if (LOG_BUFFER_SIZE < j[:stderr_buf_to_flush].size) || ((j[:stderr_flushed_at]+1) < Time.now.to_i)
+ if (Rails.configuration.crunch_log_bytes_per_event < j[:stderr_buf_to_flush].size or
+ (j[:stderr_flushed_at] + Rails.configuration.crunch_log_seconds_between_events < Time.now.to_i))
write_log j
end
end
protected
+ def too_many_bytes_logged_for_job(j)
+ return (j[:bytes_logged] + j[:stderr_buf_to_flush].size >
+ Rails.configuration.crunch_limit_log_event_bytes_per_job)
+ end
+
+ def too_many_events_logged_for_job(j)
+ return (j[:events_logged] >= Rails.configuration.crunch_limit_log_events_per_job)
+ end
+
def did_recently(thing, min_interval)
@did_recently ||= {}
if !@did_recently[thing] or @did_recently[thing] < Time.now - min_interval
def write_log running_job
begin
if (running_job && running_job[:stderr_buf_to_flush] != '')
+ # Truncate logs if they exceed crunch_limit_log_event_bytes_per_job
+ # or crunch_limit_log_events_per_job.
+ if (too_many_bytes_logged_for_job(running_job))
+ return if running_job[:log_truncated]
+ running_job[:log_truncated] = true
+ running_job[:stderr_buf_to_flush] =
+ "Server configured limit reached (crunch_limit_log_event_bytes_per_job: #{Rails.configuration.crunch_limit_log_event_bytes_per_job}). Subsequent logs truncated"
+ elsif (too_many_events_logged_for_job(running_job))
+ return if running_job[:log_truncated]
+ running_job[:log_truncated] = true
+ running_job[:stderr_buf_to_flush] =
+ "Server configured limit reached (crunch_limit_log_events_per_job: #{Rails.configuration.crunch_limit_log_events_per_job}). Subsequent logs truncated"
+ end
log = Log.new(object_uuid: running_job[:job].uuid,
event_type: 'stderr',
owner_uuid: running_job[:job].owner_uuid,
properties: {"text" => running_job[:stderr_buf_to_flush]})
log.save!
+ running_job[:bytes_logged] += running_job[:stderr_buf_to_flush].size
+ running_job[:events_logged] += 1
running_job[:stderr_buf_to_flush] = ''
running_job[:stderr_flushed_at] = Time.now.to_i
end
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
manifest_text: "./dir1/sub1 0:0:a 0:0:b\n./dir2/sub2 0:0:c 0:0:d\n"
+
+docker_image:
+ # This Collection has links with Docker image metadata.
+ uuid: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+ owner_uuid: qr1hi-tpzed-000000000000000
+ created_at: 2014-06-11T17:22:54Z
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ modified_at: 2014-06-11T17:22:54Z
+ updated_at: 2014-06-11T17:22:54Z
+ manifest_text: ". d21353cfe035e3e384563ee55eadbb2f+67108864 5c77a43e329b9838cbec18ff42790e57+55605760 0:122714624:d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678.tar\n"
uuid: zzzzz-o0j2j-enl1wg58310loc6
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-05-28 16:24:02.314722162 Z
- modified_by_client_uuid:
+ modified_by_client_uuid:
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-05-28 16:24:02.314484982 Z
tail_uuid: ~
tail_uuid: ~
head_uuid: ~
properties: {}
+
+active_user_permission_to_docker_image_collection:
+ uuid: zzzzz-o0j2j-dp1d8395ldqw33s
+ owner_uuid: zzzzz-tpzed-000000000000000
+ created_at: 2014-01-24 20:42:26 -0800
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2014-01-24 20:42:26 -0800
+ updated_at: 2014-01-24 20:42:26 -0800
+ tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ link_class: permission
+ name: can_read
+ head_uuid: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+ properties: {}
+
+docker_image_collection_hash:
+ uuid: zzzzz-o0j2j-dockercollhasha
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2014-06-11 14:30:00.184389725 Z
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2014-06-11 14:30:00.184019565 Z
+ updated_at: 2014-06-11 14:30:00.183829316 Z
+ link_class: docker_image_hash
+ name: d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678
+ tail_uuid: ~
+ head_uuid: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+ properties:
+ image_timestamp: 2014-06-10T14:30:00.184019565Z
+
+docker_image_collection_repository:
+ uuid: zzzzz-o0j2j-dockercollrepos
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2014-06-11 14:30:00.184389725 Z
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2014-06-11 14:30:00.184019565 Z
+ updated_at: 2014-06-11 14:30:00.183829316 Z
+ link_class: docker_image_repository
+ name: arvados/apitestfixture
+ tail_uuid: ~
+ head_uuid: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+ properties:
+ image_timestamp: 2014-06-10T14:30:00.184019565Z
+
+docker_image_collection_tag:
+ uuid: zzzzz-o0j2j-dockercolltagbb
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2014-06-11 14:30:00.184389725 Z
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2014-06-11 14:30:00.184019565 Z
+ updated_at: 2014-06-11 14:30:00.183829316 Z
+ link_class: docker_image_repo+tag
+ name: arvados/apitestfixture:latest
+ tail_uuid: ~
+ head_uuid: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+ properties:
+ image_timestamp: 2014-06-10T14:30:00.184019565Z
+
+docker_image_collection_tag2:
+ uuid: zzzzz-o0j2j-dockercolltagbc
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2014-06-11 14:30:00.184389725 Z
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2014-06-11 14:30:00.184019565 Z
+ updated_at: 2014-06-11 14:30:00.183829316 Z
+ link_class: docker_image_repo+tag
+ name: arvados/apitestfixture:june10
+ tail_uuid: ~
+ head_uuid: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+ properties:
+ image_timestamp: 2014-06-10T14:30:00.184019565Z
+
+ancient_docker_image_collection_hash:
+ # This image helps test that searches for Docker images find
+ # the latest available image: the hash is the same as
+ # docker_image_collection_hash, but it points to a different
+ # Collection and has an older image timestamp.
+ uuid: zzzzz-o0j2j-dockercollhashz
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2014-06-12 14:30:00.184389725 Z
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2014-06-12 14:30:00.184019565 Z
+ updated_at: 2014-06-12 14:30:00.183829316 Z
+ link_class: docker_image_hash
+ name: d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678
+ tail_uuid: ~
+ head_uuid: b519d9cb706a29fc7ea24dbea2f05851+249025
+ properties:
+ image_timestamp: 2010-06-10T14:30:00.184019565Z
+crunch_dispatch_test:
+ uuid: zzzzz-s0uqq-382brsig8rp3665
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+ name: crunch_dispatch_test
+
foo:
uuid: zzzzz-s0uqq-382brsig8rp3666
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
require 'test_helper'
load 'test/functional/arvados/v1/git_setup.rb'
+# NOTE: calling Commit.find_commit_range(user, nil, nil, 'rev') will produce
+# an error message "fatal: bad object 'rev'" on stderr if 'rev' does not exist
+# in a given repository. Many of these tests report such errors; their presence
+# does not represent a fatal condition.
+#
+# TODO(twp): consider better error handling of these messages, or
+# decide to abandon it.
+
class Arvados::V1::CommitsControllerTest < ActionController::TestCase
fixtures :repositories, :users
assert_equal ['31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
#test "test_branch1" do
+ # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
a = Commit.find_commit_range(users(:active), nil, nil, 'master', nil)
- assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57'], a
+ assert_equal ['f35f99b7d32bac257f5989df02b9f12ee1a9b0d6', '077ba2ad3ea24a929091a9e6ce545c93199b8e57'], a
#test "test_branch2" do
a = Commit.find_commit_range(users(:active), 'foo', nil, 'b1', nil)
assert_equal nil, a
#test "test_multi_revision" do
+ # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
a = Commit.find_commit_range(users(:active), nil, '31ce37fe365b3dc204300a3e4c396ad333ed0556', '077ba2ad3ea24a929091a9e6ce545c93199b8e57', nil)
assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '4fe459abe02d9b365932b8f5dc419439ab4e2577', '31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
#test "test_tag" do
+ # complains "fatal: ambiguous argument 'tag1': unknown revision or path
+ # not in the working tree."
a = Commit.find_commit_range(users(:active), nil, 'tag1', 'master', nil)
assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '4fe459abe02d9b365932b8f5dc419439ab4e2577'], a
assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
#test "test_multi_revision_tagged_exclude" do
+ # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
a = Commit.find_commit_range(users(:active), nil, '31ce37fe365b3dc204300a3e4c396ad333ed0556', '077ba2ad3ea24a929091a9e6ce545c93199b8e57', ['tag1'])
assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
assert_equal nil, a
# invalid input to 'excludes'
+ # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
a = Commit.find_commit_range(users(:active), nil, "31ce37fe365b3dc204300a3e4c396ad333ed0556", "077ba2ad3ea24a929091a9e6ce545c93199b8e57", ["4fe459abe02d9b365932b8f5dc419439ab4e2577 ; touch #{touchdir}/uh_oh"])
assert !File.exists?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'excludes' parameter of find_commit_range is exploitable"
assert_equal nil, a
# invalid input to 'excludes'
+ # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
a = Commit.find_commit_range(users(:active), nil, "31ce37fe365b3dc204300a3e4c396ad333ed0556", "077ba2ad3ea24a929091a9e6ce545c93199b8e57", ["$(uname>#{touchdir}/uh_oh)"])
assert !File.exists?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'excludes' parameter of find_commit_range is exploitable"
assert_equal nil, a
--- /dev/null
+require 'test_helper'
+load 'test/functional/arvados/v1/git_setup.rb'
+
+class CrunchDispatchTest < ActionDispatch::IntegrationTest
+ include GitSetup
+
+ fixtures :all
+
+ @@crunch_dispatch_pid = nil
+
+ def launch_crunch_dispatch
+ @@crunch_dispatch_pid = Process.fork {
+ ENV['PATH'] = ENV['HOME'] + '/arvados/services/crunch:' + ENV['PATH']
+ exec(ENV['HOME'] + '/arvados/services/api/script/crunch-dispatch.rb')
+ }
+ end
+
+ teardown do
+ if @@crunch_dispatch_pid
+ Process.kill "TERM", @@crunch_dispatch_pid
+ Process.wait
+ @@crunch_dispatch_pid = nil
+ end
+ end
+
+ test "job runs" do
+ post "/arvados/v1/jobs", {
+ format: "json",
+ job: {
+ script: "log",
+ repository: "crunch_dispatch_test",
+ script_version: "f35f99b7d32bac257f5989df02b9f12ee1a9b0d6",
+ script_parameters: "{}"
+ }
+ }, auth(:admin)
+ assert_response :success
+ end
+end
end
end
+ def assert_link_classes_ascend(current_class, prev_class)
+ # Databases and Ruby don't always agree about string ordering with
+ # punctuation. If the strings aren't ascending normally, check
+ # that they're equal up to punctuation.
+ if current_class < prev_class
+ class_prefix = current_class.split(/\W/).first
+ assert prev_class.start_with?(class_prefix)
+ end
+ end
+
test "select two columns with order" do
get "/arvados/v1/links", {:format => :json, :select => ['link_class', 'uuid'], :order => ['link_class asc', "uuid desc"]}, auth(:active)
assert_response :success
prev_uuid = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
end
- assert i['link_class'] >= prev_link_class
+ assert_link_classes_ascend(i['link_class'], prev_link_class)
assert i['uuid'] < prev_uuid
prev_link_class = i['link_class']
prev_uuid = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
end
- assert i['link_class'] >= prev_link_class
+ assert_link_classes_ascend(i['link_class'], prev_link_class)
assert i['uuid'] < prev_uuid
prev_link_class = i['link_class']
job: {
repository: 'foo',
- runtime_constraints: {docker_image: 'arvados/jobs'},
+ runtime_constraints: {docker_image: 'arvados/apitestfixture'},
script: 'hash',
script_version: 'master',
script_parameters: {pattern: 'foobar'},
require 'test_helper'
class JobTest < ActiveSupport::TestCase
- # test "the truth" do
- # assert true
- # end
+ BAD_COLLECTION = "#{'f' * 32}+0"
+
+ setup do
+ set_user_from_auth :active
+ end
+
+ test "Job without Docker image doesn't get locator" do
+ job = Job.new
+ assert job.valid?
+ assert_nil job.docker_image_locator
+ end
+
+ { 'name' => [:links, :docker_image_collection_repository, :name],
+ 'hash' => [:links, :docker_image_collection_hash, :name],
+ 'locator' => [:collections, :docker_image, :uuid],
+ }.each_pair do |spec_type, (fixture_type, fixture_name, fixture_attr)|
+ test "Job initialized with Docker image #{spec_type} gets locator" do
+ image_spec = send(fixture_type, fixture_name).send(fixture_attr)
+ job = Job.new(runtime_constraints: {'docker_image' => image_spec})
+ assert(job.valid?, "Docker image #{spec_type} was invalid")
+ assert_equal(collections(:docker_image).uuid, job.docker_image_locator)
+ end
+
+ test "Job modified with Docker image #{spec_type} gets locator" do
+ job = Job.new
+ assert job.valid?
+ assert_nil job.docker_image_locator
+ image_spec = send(fixture_type, fixture_name).send(fixture_attr)
+ job.runtime_constraints['docker_image'] = image_spec
+ assert(job.valid?, "modified Docker image #{spec_type} was invalid")
+ assert_equal(collections(:docker_image).uuid, job.docker_image_locator)
+ end
+ end
+
+ test "removing a Docker runtime constraint removes the locator" do
+ image_locator = collections(:docker_image).uuid
+ job = Job.new(runtime_constraints: {'docker_image' => image_locator})
+ assert job.valid?
+ assert_equal(image_locator, job.docker_image_locator)
+ job.runtime_constraints = {}
+ assert(job.valid?, "clearing runtime constraints made the Job invalid")
+ assert_nil job.docker_image_locator
+ end
+
+ test "locate a Docker image with a repository + tag" do
+ image_repo, image_tag =
+ links(:docker_image_collection_tag2).name.split(':', 2)
+ job = Job.new(runtime_constraints:
+ {'docker_image' => image_repo,
+ 'docker_image_tag' => image_tag})
+ assert(job.valid?, "Job with Docker tag search invalid")
+ assert_equal(collections(:docker_image).uuid, job.docker_image_locator)
+ end
+
+ test "can't locate a Docker image with a nonexistent tag" do
+ image_repo = links(:docker_image_collection_repository).name
+ image_tag = '__nonexistent tag__'
+ job = Job.new(runtime_constraints:
+ {'docker_image' => image_repo,
+ 'docker_image_tag' => image_tag})
+ assert(job.invalid?, "Job with bad Docker tag valid")
+ end
+
+ test "locate a Docker image with a partial hash" do
+ image_hash = links(:docker_image_collection_hash).name[0..24]
+ job = Job.new(runtime_constraints: {'docker_image' => image_hash})
+ assert(job.valid?, "Job with partial Docker image hash failed")
+ assert_equal(collections(:docker_image).uuid, job.docker_image_locator)
+ end
+
+ { 'name' => 'arvados_test_nonexistent',
+ 'hash' => 'f' * 64,
+ 'locator' => BAD_COLLECTION,
+ }.each_pair do |spec_type, image_spec|
+ test "Job validation fails with nonexistent Docker image #{spec_type}" do
+ job = Job.new(runtime_constraints: {'docker_image' => image_spec})
+ assert(job.invalid?, "nonexistent Docker image #{spec_type} was valid")
+ end
+ end
+
+ test "Job validation fails with non-Docker Collection constraint" do
+ job = Job.new(runtime_constraints:
+ {'docker_image' => collections(:foo_file).uuid})
+ assert(job.invalid?, "non-Docker Collection constraint was valid")
+ end
+
+ test "can't create Job with Docker image locator" do
+ begin
+ job = Job.new(docker_image_locator: BAD_COLLECTION)
+ rescue ActiveModel::MassAssignmentSecurity::Error
+ # Test passes - expected attribute protection
+ else
+ assert_nil job.docker_image_locator
+ end
+ end
+
+ test "can't assign Docker image locator to Job" do
+ job = Job.new
+ begin
+ Job.docker_image_locator = BAD_COLLECTION
+ rescue NoMethodError
+ # Test passes - expected attribute protection
+ end
+ assert_nil job.docker_image_locator
+ end
end
blocklen, _, err = kc.AuthorizedAsk(hash, locator.Signature, locator.Timestamp)
}
- resp.Header().Set("Content-Length", fmt.Sprint(blocklen))
+ if blocklen > 0 {
+ resp.Header().Set("Content-Length", fmt.Sprint(blocklen))
+ }
switch err {
case nil:
if reader != nil {
n, err2 := io.Copy(resp, reader)
if n != blocklen {
- log.Printf("%s: %s %s mismatched return %v with Content-Length %v error", GetRemoteAddress(req), req.Method, hash, n, blocklen, err.Error())
+ log.Printf("%s: %s %s mismatched return %v with Content-Length %v error %v", GetRemoteAddress(req), req.Method, hash, n, blocklen, err2)
} else if err2 == nil {
log.Printf("%s: %s %s success returned %v bytes", GetRemoteAddress(req), req.Method, hash, n)
} else {