services/keep/bin
services/keep/pkg
services/keep/src/github.com
+sdk/java/target
+*.class
+++ /dev/null
-#!/usr/bin/env ruby
-
-abort 'Error: Ruby >= 1.9.3 required.' if RUBY_VERSION < '1.9.3'
-
-require 'logger'
-require 'trollop'
-log = Logger.new STDERR
-log.progname = $0.split('/').last
-
-opts = Trollop::options do
- banner ''
- banner "Usage: #{log.progname} " +
- "{user_uuid_or_email} {user_and_repo_name} {vm_uuid}"
- banner ''
- opt :debug, <<-eos
-Show debug messages.
- eos
- opt :create, <<-eos
-Create a new user with the given email address if an existing user \
-is not found.
- eos
- opt :openid_prefix, <<-eos, default: 'https://www.google.com/accounts/o8/id'
-If creating a new user record, require authentication from an OpenID \
-with this OpenID prefix *and* a matching email address in order to \
-claim the account.
- eos
- opt :force, <<-eos
-Continue even if sanity checks raise flags: the given user is already \
-active, the given repository already exists, etc.
- eos
- opt :n, 'Do not change anything, just probe'
-end
-
-log.level = (ENV['DEBUG'] || opts.debug) ? Logger::DEBUG : Logger::WARN
-
-if ARGV.count != 3
- Trollop::die "required arguments are missing"
-end
-user_arg, user_repo_name, vm_uuid = ARGV
-
-require 'arvados'
-arv = Arvados.new(api_version: 'v1')
-
-# Look up the given user by uuid or, failing that, email address.
-user = begin
- arv.user.get(uuid: user_arg)
- rescue Arvados::TransactionFailedError
- found = arv.user.list(where: {email: ARGV[0]})[:items]
- if found.count == 0 and opts.create
- if !opts.force and !user_arg.match(/\w\@\w+\.\w+/)
- abort "About to create new user, but #{user_arg.inspect} " +
- "does not look like an email address. Stop."
- end
- if opts.n
- log.info "-n flag given. Stop before creating new user record."
- exit 0
- end
- new_user = arv.user.create(user: {email: user_arg})
- log.info { "created user: " + new_user[:uuid] }
- login_perm_props = {identity_url_prefix: opts.openid_prefix }
- oid_login_perm = arv.link.create(link: {
- link_class: 'permission',
- name: 'can_login',
- tail_kind: 'email',
- tail_uuid: user_arg,
- head_kind: 'arvados#user',
- head_uuid: new_user[:uuid],
- properties: login_perm_props
- })
- log.info { "openid login permission: " + oid_login_perm[:uuid] }
- found = [new_user]
- end
- if found.count != 1
- abort "Found #{found.count} users " +
- "with uuid or email #{user_arg.inspect}. Stop."
- end
- found.first
- end
-log.info { "user uuid: " + user[:uuid] }
-
-# Look up the given virtual machine just to make sure it really exists.
-begin
- vm = arv.virtual_machine.get(uuid: vm_uuid)
-rescue
- abort "Could not look up virtual machine with uuid #{vm_uuid.inspect}. Stop."
-end
-log.info { "vm uuid: " + vm[:uuid] }
-
-# Look up the "All users" group (we expect uuid *-*-fffffffffffffff).
-group = arv.group.list(where: {name: 'All users'})[:items].select do |g|
- g[:uuid].match /-f+$/
-end.first
-if not group
- abort "Could not look up the 'All users' group with uuid '*-*-fffffffffffffff'. Stop."
-end
-log.info { "\"All users\" group uuid: " + group[:uuid] }
-
-# Look for signs the user has already been activated / set up.
-
-if user[:is_active]
- log.warn "User's is_active flag is already set."
- need_force = true
-end
-
-# Look for existing repository access (perhaps using a different
-# repository/user name).
-repo_perms = arv.link.list(where: {
- tail_uuid: user[:uuid],
- head_kind: 'arvados#repository',
- link_class: 'permission',
- name: 'can_write'})[:items]
-if [] != repo_perms
- log.warn "User already has repository access " +
- repo_perms.collect { |p| p[:uuid] }.inspect + "."
- need_force = true
-end
-
-# Check for an existing repository with the same name we're about to
-# use.
-repo = arv.repository.list(where: {name: user_repo_name})[:items].first
-if repo
- log.warn "Repository already exists with name #{user_repo_name.inspect}: " +
- "#{repo[:uuid]}"
- need_force = true
-end
-
-if opts.n
- log.info "-n flag given. Done."
- exit 0
-end
-
-if need_force and not opts.force
- abort "This does not seem to be a new user[name], and -f was not given. Stop."
-end
-
-# Everything seems to be in order. Create a repository (if needed) and
-# add permissions.
-
-repo ||= arv.repository.create(repository: {name: user_repo_name})
-log.info { "repo uuid: " + repo[:uuid] }
-
-repo_perm = arv.link.create(link: {
- tail_kind: 'arvados#user',
- tail_uuid: user[:uuid],
- head_kind: 'arvados#repository',
- head_uuid: repo[:uuid],
- link_class: 'permission',
- name: 'can_write'})
-log.info { "repo permission: " + repo_perm[:uuid] }
-
-login_perm = arv.link.create(link: {
- tail_kind: 'arvados#user',
- tail_uuid: user[:uuid],
- head_kind: 'arvados#virtualMachine',
- head_uuid: vm[:uuid],
- link_class: 'permission',
- name: 'can_login',
- properties: {username: user_repo_name}})
-log.info { "login permission: " + login_perm[:uuid] }
-
-group_perm = arv.link.create(link: {
- tail_kind: 'arvados#user',
- tail_uuid: user[:uuid],
- head_kind: 'arvados#group',
- head_uuid: group[:uuid],
- link_class: 'permission',
- name: 'can_read'})
-log.info { "group permission: " + group_perm[:uuid] }
# This can be a symlink to ../../../doc/.site in dev setups
/public/doc
+
+# SimpleCov reports
+/coverage
+
+# Dev/test SSL certificates
+/self-signed.key
+/self-signed.pem
source 'https://rubygems.org'
-gem 'rails', '~> 3.2.0'
+gem 'rails', '~> 4.1.0'
+gem 'minitest', '>= 5.0.0'
# Bundle edge Rails instead:
# gem 'rails', :git => 'git://github.com/rails/rails.git'
gem 'oj'
gem 'sass'
+# Note: keeping this out of the "group :assets" section "may" allow us
+# to use Coffescript for UJS responses. It also prevents a
+# warning/problem when running tests: "WARN: tilt autoloading
+# 'coffee_script' in a non thread-safe way; explicit require
+# 'coffee_script' suggested."
+gem 'coffee-rails'
+
# Gems used only for assets and not required
# in production environments by default.
group :assets do
- gem 'sass-rails', '~> 3.2.0'
- gem 'coffee-rails', '~> 3.2.0'
+ gem 'sass-rails'
# See https://github.com/sstephenson/execjs#readme for more supported runtimes
gem 'therubyracer', :platforms => :ruby
gem 'capybara'
gem 'poltergeist'
gem 'headless'
+ # Note: "require: false" here tells bunder not to automatically
+ # 'require' the packages during application startup. Installation is
+ # still mandatory.
+ gem 'simplecov', '~> 0.7.1', require: false
+ gem 'simplecov-rcov', require: false
end
gem 'jquery-rails'
gem 'piwik_analytics'
gem 'httpclient'
-gem 'themes_for_rails'
+
+# This fork has Rails 4 compatible routes
+gem 'themes_for_rails', git: 'https://github.com/holtkampw/themes_for_rails', ref: '1fd2d7897d75ae0d6375f4c390df87b8e91ad417'
+
gem "deep_merge", :require => 'deep_merge/rails_compat'
+GIT
+ remote: https://github.com/holtkampw/themes_for_rails
+ revision: 1fd2d7897d75ae0d6375f4c390df87b8e91ad417
+ ref: 1fd2d7897d75ae0d6375f4c390df87b8e91ad417
+ specs:
+ themes_for_rails (0.5.1)
+ rails (>= 3.0.0)
+
GEM
remote: https://rubygems.org/
specs:
RedCloth (4.2.9)
- actionmailer (3.2.15)
- actionpack (= 3.2.15)
+ actionmailer (4.1.1)
+ actionpack (= 4.1.1)
+ actionview (= 4.1.1)
mail (~> 2.5.4)
- actionpack (3.2.15)
- activemodel (= 3.2.15)
- activesupport (= 3.2.15)
- builder (~> 3.0.0)
+ actionpack (4.1.1)
+ actionview (= 4.1.1)
+ activesupport (= 4.1.1)
+ rack (~> 1.5.2)
+ rack-test (~> 0.6.2)
+ actionview (4.1.1)
+ activesupport (= 4.1.1)
+ builder (~> 3.1)
erubis (~> 2.7.0)
- journey (~> 1.0.4)
- rack (~> 1.4.5)
- rack-cache (~> 1.2)
- rack-test (~> 0.6.1)
- sprockets (~> 2.2.1)
- activemodel (3.2.15)
- activesupport (= 3.2.15)
- builder (~> 3.0.0)
- activerecord (3.2.15)
- activemodel (= 3.2.15)
- activesupport (= 3.2.15)
- arel (~> 3.0.2)
- tzinfo (~> 0.3.29)
- activeresource (3.2.15)
- activemodel (= 3.2.15)
- activesupport (= 3.2.15)
- activesupport (3.2.15)
- i18n (~> 0.6, >= 0.6.4)
- multi_json (~> 1.0)
+ activemodel (4.1.1)
+ activesupport (= 4.1.1)
+ builder (~> 3.1)
+ activerecord (4.1.1)
+ activemodel (= 4.1.1)
+ activesupport (= 4.1.1)
+ arel (~> 5.0.0)
+ activesupport (4.1.1)
+ i18n (~> 0.6, >= 0.6.9)
+ json (~> 1.7, >= 1.7.7)
+ minitest (~> 5.1)
+ thread_safe (~> 0.1)
+ tzinfo (~> 1.1)
andand (1.3.3)
- arel (3.0.2)
+ arel (5.0.1.20140414130214)
bootstrap-sass (3.1.0.1)
sass (~> 3.2)
bootstrap-x-editable-rails (1.5.1.1)
railties (>= 3.0)
- builder (3.0.4)
+ builder (3.2.2)
capistrano (2.15.5)
highline
net-scp (>= 1.0.0)
childprocess (0.5.1)
ffi (~> 1.0, >= 1.0.11)
cliver (0.3.2)
- coffee-rails (3.2.2)
+ coffee-rails (4.0.1)
coffee-script (>= 2.2.0)
- railties (~> 3.2.0)
+ railties (>= 4.0.0, < 5.0)
coffee-script (2.2.0)
coffee-script-source
execjs
- coffee-script-source (1.6.3)
+ coffee-script-source (1.7.0)
commonjs (0.2.7)
daemon_controller (1.1.7)
deep_merge (1.0.1)
highline (1.6.20)
hike (1.2.3)
httpclient (2.3.4.1)
- i18n (0.6.5)
- journey (1.0.4)
+ i18n (0.6.9)
jquery-rails (3.0.4)
railties (>= 3.0, < 5.0)
thor (>= 0.14, < 2.0)
mail (2.5.4)
mime-types (~> 1.16)
treetop (~> 1.4.8)
- mime-types (1.25)
+ mime-types (1.25.1)
mini_portile (0.5.2)
- multi_json (1.8.2)
+ minitest (5.3.3)
+ multi_json (1.10.0)
net-scp (1.1.2)
net-ssh (>= 2.6.5)
net-sftp (2.1.2)
cliver (~> 0.3.1)
multi_json (~> 1.0)
websocket-driver (>= 0.2.0)
- polyglot (0.3.3)
- rack (1.4.5)
- rack-cache (1.2)
- rack (>= 0.4)
- rack-ssl (1.3.3)
- rack
+ polyglot (0.3.4)
+ rack (1.5.2)
rack-test (0.6.2)
rack (>= 1.0)
- rails (3.2.15)
- actionmailer (= 3.2.15)
- actionpack (= 3.2.15)
- activerecord (= 3.2.15)
- activeresource (= 3.2.15)
- activesupport (= 3.2.15)
- bundler (~> 1.0)
- railties (= 3.2.15)
- railties (3.2.15)
- actionpack (= 3.2.15)
- activesupport (= 3.2.15)
- rack-ssl (~> 1.3.2)
+ rails (4.1.1)
+ actionmailer (= 4.1.1)
+ actionpack (= 4.1.1)
+ actionview (= 4.1.1)
+ activemodel (= 4.1.1)
+ activerecord (= 4.1.1)
+ activesupport (= 4.1.1)
+ bundler (>= 1.3.0, < 2.0)
+ railties (= 4.1.1)
+ sprockets-rails (~> 2.0)
+ railties (4.1.1)
+ actionpack (= 4.1.1)
+ activesupport (= 4.1.1)
rake (>= 0.8.7)
- rdoc (~> 3.4)
- thor (>= 0.14.6, < 2.0)
- rake (10.1.0)
- rdoc (3.12.2)
- json (~> 1.4)
+ thor (>= 0.18.1, < 2.0)
+ rake (10.3.1)
ref (1.0.5)
rubyzip (1.1.0)
rvm-capistrano (1.5.1)
capistrano (~> 2.15.4)
sass (3.2.12)
- sass-rails (3.2.6)
- railties (~> 3.2.0)
- sass (>= 3.1.10)
- tilt (~> 1.3)
+ sass-rails (4.0.3)
+ railties (>= 4.0.0, < 5.0)
+ sass (~> 3.2.0)
+ sprockets (~> 2.8, <= 2.11.0)
+ sprockets-rails (~> 2.0)
selenium-webdriver (2.40.0)
childprocess (>= 0.5.0)
multi_json (~> 1.0)
rubyzip (~> 1.0)
websocket (~> 1.0.4)
- sprockets (2.2.2)
+ simplecov (0.7.1)
+ multi_json (~> 1.0)
+ simplecov-html (~> 0.7.1)
+ simplecov-html (0.7.1)
+ simplecov-rcov (0.2.3)
+ simplecov (>= 0.4.1)
+ sprockets (2.11.0)
hike (~> 1.2)
multi_json (~> 1.0)
rack (~> 1.0)
tilt (~> 1.1, != 1.3.0)
+ sprockets-rails (2.1.3)
+ actionpack (>= 3.0)
+ activesupport (>= 3.0)
+ sprockets (~> 2.8)
sqlite3 (1.3.8)
- themes_for_rails (0.5.1)
- rails (>= 3.0.0)
therubyracer (0.12.0)
libv8 (~> 3.16.14.0)
ref
- thor (0.18.1)
+ thor (0.19.1)
+ thread_safe (0.3.3)
tilt (1.4.1)
treetop (1.4.15)
polyglot
polyglot (>= 0.3.1)
- tzinfo (0.3.38)
+ tzinfo (1.1.0)
+ thread_safe (~> 0.1)
uglifier (2.3.1)
execjs (>= 0.3.0)
json (>= 1.8.0)
bootstrap-sass (~> 3.1.0)
bootstrap-x-editable-rails
capybara
- coffee-rails (~> 3.2.0)
+ coffee-rails
deep_merge
headless
httpclient
jquery-rails
less
less-rails
+ minitest (>= 5.0.0)
multi_json
oj
passenger
piwik_analytics
poltergeist
- rails (~> 3.2.0)
+ rails (~> 4.1.0)
rvm-capistrano
sass
- sass-rails (~> 3.2.0)
+ sass-rails
selenium-webdriver
+ simplecov (~> 0.7.1)
+ simplecov-rcov
sqlite3
- themes_for_rails
+ themes_for_rails!
therubyracer
uglifier (>= 1.0.3)
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
'X-CSRF-Token': $('meta[name="csrf-token"]').attr('content')
}
});
- $('.editable').editable();
$('[data-toggle=tooltip]').tooltip();
$('.expand-collapse-row').on('click', function(event) {
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
var toggle_group = $(this).parents('[data-remote-href]').first();
var want_persist = !toggle_group.find('button').hasClass('active');
var want_state = want_persist ? 'persistent' : 'cache';
- console.log(want_persist);
toggle_group.find('button').
toggleClass('active', want_persist).
html(want_persist ? 'Persistent' : 'Cache');
-$.fn.editable.defaults.ajaxOptions = {type: 'put', dataType: 'json'};
+$.fn.editable.defaults.ajaxOptions = {type: 'post', dataType: 'json'};
$.fn.editable.defaults.send = 'always';
// Default for editing is popup. I experimented with inline which is a little
// too narrow, when the popup box will just move to do the right thing.
//$.fn.editable.defaults.mode = 'inline';
+$.fn.editable.defaults.success = function (response, newValue) {
+ $(document).trigger('editable:success', [this, response, newValue]);
+};
+
$.fn.editable.defaults.params = function (params) {
var a = {};
var key = params.pk.key;
- a.id = params.pk.id;
- a[key] = {};
+ a.id = $(this).attr('data-object-uuid') || params.pk.id;
+ a[key] = params.pk.defaults || {};
+ // Remove null values. Otherwise they get transmitted as empty
+ // strings in request params.
+ for (i in a[key]) {
+ if (a[key][i] == null)
+ delete a[key][i];
+ }
a[key][params.name] = params.value;
+ if (!a.id) {
+ a['_method'] = 'post';
+ } else {
+ a['_method'] = 'put';
+ }
return a;
};
}
}
+$(document).
+ on('ready ajax:complete', function() {
+ $('#editable-submit').click(function() {
+ console.log($(this));
+ });
+ $('.editable').
+ editable({
+ success: function(response, newValue) {
+ // If we just created a new object, stash its UUID
+ // so we edit it next time instead of creating
+ // another new object.
+ if (!$(this).attr('data-object-uuid') && response.uuid) {
+ $(this).attr('data-object-uuid', response.uuid);
+ }
+ if (response.href) {
+ $(this).editable('option', 'url', response.href);
+ }
+ return;
+ }
+ }).
+ on('hidden', function(e, reason) {
+ // After saving a new attribute, update the same
+ // information if it appears elsewhere on the page.
+ if (reason != 'save') return;
+ var html = $(this).html();
+ var uuid = $(this).attr('data-object-uuid');
+ var attr = $(this).attr('data-name');
+ var edited = this;
+ if (uuid && attr) {
+ $("[data-object-uuid='" + uuid + "']" +
+ "[data-name='" + attr + "']").each(function() {
+ if (this != edited)
+ $(this).html(html);
+ });
+ }
+ });
+ });
+
$.fn.editabletypes.text.defaults.tpl = '<input type="text" name="editable-text">'
$.fn.editableform.buttons = '\
--- /dev/null
+/*
+ * This js establishes a websockets connection with the API Server.
+ */
+
+/* The subscribe method takes a window element id and object id.
+ Any log events for that particular object id are sent to that window element. */
+function subscribeToEventLog (elementId) {
+ // if websockets are not supported by browser, do not subscribe for events
+ websocketsSupported = ('WebSocket' in window);
+ if (websocketsSupported == false) {
+ return;
+ }
+
+ // grab websocket connection from window, if one exists
+ event_log_disp = $(window).data("arv-websocket");
+ if (event_log_disp == null) {
+ // create the event log dispatcher
+ websocket_url = $('meta[name=arv-websocket-url]').attr("content");
+ if (websocket_url == null)
+ return;
+
+ event_log_disp = new WebSocket(websocket_url);
+
+ event_log_disp.onopen = onEventLogDispatcherOpen;
+ event_log_disp.onmessage = onEventLogDispatcherMessage;
+
+ // store websocket in window to allow reuse when multiple divs subscribe for events
+ $(window).data("arv-websocket", event_log_disp);
+ }
+}
+
+/* send subscribe message to the websockets server */
+function onEventLogDispatcherOpen(event) {
+ this.send('{"method":"subscribe"}');
+}
+
+/* trigger event for all applicable elements waiting for this event */
+function onEventLogDispatcherMessage(event) {
+ parsedData = JSON.parse(event.data);
+ object_uuid = parsedData.object_uuid;
+
+ // if there are any listeners for this object uuid or "all", trigger the event
+ matches = ".arv-log-event-listener[data-object-uuid=\"" + object_uuid + "\"],.arv-log-event-listener[data-object-uuids~=\"" + object_uuid + "\"],.arv-log-event-listener[data-object-uuid=\"all\"]";
+ $(matches).trigger('arv-log-event', event.data);
+}
--- /dev/null
+$(document).
+ on('ready ajax:complete', function() {
+ $("[data-toggle='x-editable']").click(function(e) {
+ e.stopPropagation();
+ $($(this).attr('data-toggle-selector')).editable('toggle');
+ });
+ }).on('paste keyup change', 'input.search-folder-contents', function() {
+ var q = new RegExp($(this).val(), 'i');
+ $(this).closest('div.panel').find('tbody tr').each(function() {
+ $(this).toggle(!!$(this).text().match(q));
+ });
+ });
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+cache_age_in_days = (milliseconds_age) ->
+ ONE_DAY = 1000 * 60 * 60 * 24
+ milliseconds_age / ONE_DAY
+
+cache_age_hover = (milliseconds_age) ->
+ 'Cache age ' + cache_age_in_days(milliseconds_age).toFixed(1) + ' days.'
+
+cache_age_axis_label = (milliseconds_age) ->
+ cache_age_in_days(milliseconds_age).toFixed(0) + ' days'
+
+float_as_percentage = (proportion) ->
+ (proportion.toFixed(4) * 100) + '%'
+
+$.renderHistogram = (histogram_data) ->
+ Morris.Area({
+ element: 'cache-age-vs-disk-histogram',
+ pointSize: 0,
+ lineWidth: 0,
+ data: histogram_data,
+ xkey: 'age',
+ ykeys: ['persisted', 'cache'],
+ labels: ['Persisted Storage Disk Utilization', 'Cached Storage Disk Utilization'],
+ ymax: 1,
+ ymin: 0,
+ xLabelFormat: cache_age_axis_label,
+ yLabelFormat: float_as_percentage,
+ dateFormat: cache_age_hover
+ })
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+function run_pipeline_button_state() {
+ var a = $('a.editable.required.editable-empty');
+ if (a.length > 0) {
+ $(".run-pipeline-button").addClass("disabled");
+ }
+ else {
+ $(".run-pipeline-button").removeClass("disabled");
+ }
+}
-(function() {
- var run_pipeline_button_state = function() {
- var a = $('a.editable.required.editable-empty');
- if (a.length > 0) {
- $("#run-pipeline-button").addClass("disabled");
+$(document).on('editable:success', function(event, tag, response, newValue) {
+ var $tag = $(tag);
+ if ($('.run-pipeline-button').length == 0)
+ return;
+ if ($tag.hasClass("required")) {
+ if (newValue && newValue.trim() != "") {
+ $tag.removeClass("editable-empty");
+ $tag.parent().css("background-color", "");
+ $tag.parent().prev().css("background-color", "");
}
else {
- $("#run-pipeline-button").removeClass("disabled");
+ $tag.addClass("editable-empty");
+ $tag.parent().css("background-color", "#ffdddd");
+ $tag.parent().prev().css("background-color", "#ffdddd");
}
}
-
- $.fn.editable.defaults.success = function (response, newValue) {
- var tag = $(this);
- if (tag.hasClass("required")) {
- if (newValue && newValue.trim() != "") {
- tag.removeClass("editable-empty");
- tag.parent().css("background-color", "");
- tag.parent().prev().css("background-color", "");
- }
- else {
- tag.addClass("editable-empty");
- tag.parent().css("background-color", "#ffdddd");
- tag.parent().prev().css("background-color", "#ffdddd");
- }
- }
- run_pipeline_button_state();
+ if ($tag.attr('data-name')) {
+ // Update other inputs representing the same piece of data
+ $('.editable[data-name="' + $tag.attr('data-name') + '"]').
+ editable('setValue', newValue);
}
+ run_pipeline_button_state();
+});
- $(window).on('load', function() {
- var a = $('a.editable.required');
- for (var i = 0; i < a.length; i++) {
- var tag = $(a[i]);
- if (tag.hasClass("editable-empty")) {
- tag.parent().css("background-color", "#ffdddd");
- tag.parent().prev().css("background-color", "#ffdddd");
- }
- else {
- tag.parent().css("background-color", "");
- tag.parent().prev().css("background-color", "");
- }
+$(document).on('ready ajax:complete', function() {
+ $('a.editable.required').each(function() {
+ var $tag = $(this);
+ if ($tag.hasClass("editable-empty")) {
+ $tag.parent().css("background-color", "#ffdddd");
+ $tag.parent().prev().css("background-color", "#ffdddd");
+ }
+ else {
+ $tag.parent().css("background-color", "");
+ $tag.parent().prev().css("background-color", "");
}
- run_pipeline_button_state();
- } );
+ });
+ run_pipeline_button_state();
+});
-})();
+$(document).on('ajax:complete ready', function() {
+ var a = $('.arv-log-event-listener');
+ if (a.length > 0) {
+ $('.arv-log-event-listener').each(function() {
+ subscribeToEventLog(this.id);
+ });
+ }
+});
+
+$(document).on('arv-log-event', '.arv-log-event-handler-append-logs', function(event, eventData){
+ parsedData = JSON.parse(eventData);
+
+ propertyText = undefined
+
+ properties = parsedData.properties;
+ if (properties !== null) {
+ propertyText = properties.text;
+ }
+
+ if (propertyText !== undefined) {
+ $(this).append(propertyText + "<br/>");
+ } else {
+ $(this).append(parsedData.summary + "<br/>");
+ }
+});
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
--- /dev/null
+$(document).on('click', '.selectable', function() {
+ var $this = $(this);
+ if (!$this.hasClass('multiple')) {
+ $this.closest('.selectable-container').
+ find('.selectable').
+ removeClass('active');
+ }
+ $this.toggleClass('active');
+}).on('click', '.modal button[data-action-href]', function() {
+ var selection = [];
+ var data = {};
+ var $modal = $(this).closest('.modal');
+ $modal.find('.modal-error').removeClass('hide').hide();
+ $modal.find('.selectable.active[data-object-uuid]').each(function() {
+ selection.push($(this).attr('data-object-uuid'));
+ });
+ data[$(this).data('action-data').selection_param] = selection[0];
+ $.ajax($(this).attr('data-action-href'),
+ {dataType: 'json',
+ type: $(this).attr('data-method'),
+ data: data,
+ context: {modal: $modal}}).
+ fail(function(jqxhr, status, error) {
+ if (jqxhr.readyState == 0 || jqxhr.status == 0) {
+ message = "Cancelled."
+ } else if (jqxhr.responseJSON && jqxhr.responseJSON.errors) {
+ message = jqxhr.responseJSON.errors.join("; ");
+ } else {
+ message = "Request failed.";
+ }
+ this.modal.find('.modal-error').
+ html('<div class="alert alert-danger">' + message + '</div>').
+ show();
+ }).
+ success(function() {
+ this.modal.find('.modal-error').hide();
+ window.location.reload();
+ });
+});
}
var update_count = function(e) {
+ var html;
+ var this_object_uuid = $('#selection-form-content').
+ closest('form').
+ find('input[name=uuid]').val();
var lst = get_selection_list();
$("#persistent-selection-count").text(lst.length);
if (lst.length > 0) {
- $('#selection-form-content').html(
- '<li><a href="#" id="clear_selections_button">Clear selections</a></li>'
- + '<li><input type="submit" name="combine_selected_files_into_collection" '
- + ' id="combine_selected_files_into_collection" '
- + ' value="Combine selected collections and files into a new collection" /></li>'
- + '<li class="notification"><table style="width: 100%"></table></li>');
+ html = '<li><a href="#" class="btn btn-xs btn-info" id="clear_selections_button"><i class="fa fa-fw fa-ban"></i> Clear selections</a></li>';
+ if (this_object_uuid.match('-j7d0g-'))
+ html += '<li><button class="btn btn-xs btn-info" type="submit" name="copy_selections_into_folder" id="copy_selections_into_folder"><i class="fa fa-fw fa-folder-open"></i> Copy selections into this folder</button></li>';
+ html += '<li><button class="btn btn-xs btn-info" type="submit" name="combine_selected_files_into_collection" '
+ + ' id="combine_selected_files_into_collection">'
+ + '<i class="fa fa-fw fa-archive"></i> Combine selected collections and files into a new collection</button></li>'
+ + '<li class="notification"><table style="width: 100%"></table></li>';
+ $('#selection-form-content').html(html);
for (var i = 0; i < lst.length; i++) {
$('#selection-form-content > li > table').append("<tr>"
});
add_form_selection_sources = null;
-select_form_sources = null;
+select_form_sources = null;
(function() {
var form_selection_sources = {};
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
a = s[i];
var h = window.innerHeight - a.getBoundingClientRect().top - 20;
height = String(h) + "px";
- a.style.height = height;
+ a.style['max-height'] = height;
}
}
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
table.table-justforlayout {
margin-bottom: 0;
}
+.smaller-text {
+ font-size: .8em;
+}
.deemphasize {
font-size: .8em;
color: #888;
}
+.arvados-uuid {
+ font-size: .8em;
+ font-family: monospace;
+}
table .data-size, .table .data-size {
text-align: right;
}
text-decoration: none;
text-shadow: 0 1px 0 #ffffff;
}
-/*.navbar .nav .dropdown .dropdown-menu li a {
- padding: 2px 20px;
-}*/
-
-ul.arvados-nav {
- list-style: none;
- padding-left: 0em;
- margin-left: 0em;
-}
-
-ul.arvados-nav li ul {
- list-style: none;
- padding-left: 0;
-}
-
-ul.arvados-nav li ul li {
- list-style: none;
- padding-left: 1em;
-}
.dax {
max-width: 10%;
li.notification {
padding: 10px;
}
-.arvados-nav-container {
- top: 70px;
- height: calc(100% - 70px);
- overflow: auto;
- z-index: 2;
-}
-
-.arvados-nav-active {
- background: rgb(66, 139, 202);
-}
-
-.arvados-nav-active a, .arvados-nav-active a:hover {
- color: white;
-}
// See HeaderRowFixer in application.js
table.table-fixed-header-row {
overflow-y: auto;
}
+.row-fill-height, .row-fill-height>div[class*='col-'] {
+ display: flex;
+}
+.row-fill-height>div[class*='col-']>div {
+ width: 100%;
+}
+
+/* Show editable popover above side-nav */
+.editable-popup.popover {
+ z-index:1055;
+}
+
+.navbar-nav.side-nav {
+ box-shadow: inset -1px 0 #e7e7e7;
+}
+.navbar-nav.side-nav > li:first-child {
+ margin-top: 5px; /* keep "hover" bg below top nav bottom border */
+}
+.navbar-nav.side-nav > li > a {
+ padding-top: 10px;
+ padding-bottom: 10px;
+}
+.navbar-nav.side-nav > li.dropdown > ul.dropdown-menu > li > a {
+ padding-top: 5px;
+ padding-bottom: 5px;
+}
+.navbar-nav.side-nav a.active,
+.navbar-nav.side-nav a:hover,
+.navbar-nav.side-nav a:focus {
+ border-right: 1px solid #ffffff;
+ background: #ffffff;
+}
--- /dev/null
+.card {
+ padding-top: 20px;
+ margin: 10px 0 20px 0;
+ background-color: #ffffff;
+ border: 1px solid #d8d8d8;
+ border-top-width: 0;
+ border-bottom-width: 2px;
+ -webkit-border-radius: 3px;
+ -moz-border-radius: 3px;
+ border-radius: 3px;
+ -webkit-box-shadow: none;
+ -moz-box-shadow: none;
+ box-shadow: none;
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+}
+.card.arvados-object {
+ position: relative;
+ display: inline-block;
+ width: 170px;
+ height: 175px;
+ padding-top: 0;
+ margin-left: 20px;
+ overflow: hidden;
+ vertical-align: top;
+}
+.card.arvados-object .card-top.green {
+ background-color: #53a93f;
+}
+.card.arvados-object .card-top.blue {
+ background-color: #427fed;
+}
+.card.arvados-object .card-top {
+ position: absolute;
+ top: 0;
+ left: 0;
+ display: inline-block;
+ width: 170px;
+ height: 25px;
+ background-color: #ffffff;
+}
+.card.arvados-object .card-info {
+ position: absolute;
+ top: 25px;
+ display: inline-block;
+ width: 100%;
+ height: 101px;
+ overflow: hidden;
+ background: #ffffff;
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+}
+.card.arvados-object .card-info .title {
+ display: block;
+ margin: 8px 14px 0 14px;
+ overflow: hidden;
+ font-size: 16px;
+ font-weight: bold;
+ line-height: 18px;
+ color: #404040;
+}
+.card.arvados-object .card-info .desc {
+ display: block;
+ margin: 8px 14px 0 14px;
+ overflow: hidden;
+ font-size: 12px;
+ line-height: 16px;
+ color: #737373;
+ text-overflow: ellipsis;
+}
+.card.arvados-object .card-bottom {
+ position: absolute;
+ bottom: 0;
+ left: 0;
+ display: inline-block;
+ width: 100%;
+ padding: 10px 20px;
+ line-height: 29px;
+ text-align: center;
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+}
+/* Style for _show_files tree view. */
+
+ul#collection_files {
+ padding: 0 .5em;
+}
+
+ul.collection_files {
+ line-height: 2.5em;
+ list-style-type: none;
+ padding-left: 2.3em;
+}
+
+ul.collection_files li {
+ clear: both;
+}
+
+.collection_files_row {
+ padding: 1px; /* Replaced by border for :hover */
+}
+
+.collection_files_row:hover {
+ background-color: #D9EDF7;
+ padding: 0px;
+ border: 1px solid #BCE8F1;
+ border-radius: 3px;
+}
+
+.collection_files_inline {
+ clear: both;
+ width: 80%;
+ height: auto;
+ max-height: 6em;
+ margin: 0 1em;
+}
+
+.collection_files_name {
+ padding-left: .5em;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.collection_files_name i.fa-fw:first-child {
+ width: 1.6em;
+}
+
/*
"active" and "inactive" colors are too similar for a toggle switch
in the default bootstrap theme.
--- /dev/null
+.arv-folder-list > .row {
+ padding-top: 5px;
+ padding-bottom: 5px;
+ padding-right: 1em;
+}
+.arv-folder-list > .row.folder:hover {
+ background: #d9edf7;
+}
+.arv-folder-list > .row.folder.active,
+.arv-folder-list > .row.folder.active:hover {
+ background: #428bca;
+ color: #fff;
+}
// Place all the styles related to the KeepDisks controller here.
// They will automatically be included in application.css.
// You can use Sass (SCSS) here: http://sass-lang.com/
+
+/* Margin allows us some space between the table above. */
+div.graph {
+ margin-top: 20px;
+}
+div.graph h3, div.graph h4 {
+ text-align: center;
+}
--- /dev/null
+/*
+Author: Start Bootstrap - http://startbootstrap.com
+'SB Admin' HTML Template by Start Bootstrap
+
+All Start Bootstrap themes are licensed under Apache 2.0.
+For more info and more free Bootstrap 3 HTML themes, visit http://startbootstrap.com!
+*/
+
+/* ATTN: This is mobile first CSS - to update 786px and up screen width use the media query near the bottom of the document! */
+
+/* Global Styles */
+
+body {
+ margin-top: 50px;
+}
+
+#wrapper {
+ padding-left: 0;
+}
+
+#page-wrapper {
+ width: 100%;
+ padding: 5px 15px;
+}
+
+/* Nav Messages */
+
+.messages-dropdown .dropdown-menu .message-preview .avatar,
+.messages-dropdown .dropdown-menu .message-preview .name,
+.messages-dropdown .dropdown-menu .message-preview .message,
+.messages-dropdown .dropdown-menu .message-preview .time {
+ display: block;
+}
+
+.messages-dropdown .dropdown-menu .message-preview .avatar {
+ float: left;
+ margin-right: 15px;
+}
+
+.messages-dropdown .dropdown-menu .message-preview .name {
+ font-weight: bold;
+}
+
+.messages-dropdown .dropdown-menu .message-preview .message {
+ font-size: 12px;
+}
+
+.messages-dropdown .dropdown-menu .message-preview .time {
+ font-size: 12px;
+}
+
+
+/* Nav Announcements */
+
+.announcement-heading {
+ font-size: 50px;
+ margin: 0;
+}
+
+.announcement-text {
+ margin: 0;
+}
+
+/* Table Headers */
+
+table.tablesorter thead {
+ cursor: pointer;
+}
+
+table.tablesorter thead tr th:hover {
+ background-color: #f5f5f5;
+}
+
+/* Flot Chart Containers */
+
+.flot-chart {
+ display: block;
+ height: 400px;
+}
+
+.flot-chart-content {
+ width: 100%;
+ height: 100%;
+}
+
+/* Edit Below to Customize Widths > 768px */
+@media (min-width:768px) {
+
+ /* Wrappers */
+
+ #wrapper {
+ padding-left: 225px;
+ }
+
+ #page-wrapper {
+ padding: 15px 25px;
+ }
+
+ /* Side Nav */
+
+ .side-nav {
+ margin-left: -225px;
+ left: 225px;
+ width: 225px;
+ position: fixed;
+ top: 50px;
+ height: calc(100% - 50px);
+ border-radius: 0;
+ border: none;
+ background-color: #f8f8f8;
+ overflow-y: auto;
+ overflow-x: hidden; /* no left nav scroll bar */
+ }
+
+ /* Bootstrap Default Overrides - Customized Dropdowns for the Side Nav */
+
+ .side-nav>li.dropdown>ul.dropdown-menu {
+ position: relative;
+ min-width: 225px;
+ margin: 0;
+ padding: 0;
+ border: none;
+ border-radius: 0;
+ background-color: transparent;
+ box-shadow: none;
+ -webkit-box-shadow: none;
+ }
+
+ .side-nav>li.dropdown>ul.dropdown-menu>li>a {
+ color: #777777;
+ padding: 15px 15px 15px 25px;
+ }
+
+ .side-nav>li.dropdown>ul.dropdown-menu>li>a:hover,
+ .side-nav>li.dropdown>ul.dropdown-menu>li>a.active,
+ .side-nav>li.dropdown>ul.dropdown-menu>li>a:focus {
+ background-color: #ffffff;
+ }
+
+ .side-nav>li>a {
+ width: 225px;
+ }
+
+ .navbar-default .navbar-nav.side-nav>li>a:hover,
+ .navbar-default .navbar-nav.side-nav>li>a:focus {
+ background-color: #ffffff;
+ }
+
+ /* Nav Messages */
+
+ .messages-dropdown .dropdown-menu {
+ min-width: 300px;
+ }
+
+ .messages-dropdown .dropdown-menu li a {
+ white-space: normal;
+ }
+
+ .navbar-collapse {
+ padding-left: 15px !important;
+ padding-right: 15px !important;
+ }
+
+}
--- /dev/null
+.selectable.active, .selectable:hover {
+ background: #d9edf7;
+}
width: 500px;
}
-#selection-form-content > li > a, #selection-form-content > li > input {
- display: block;
- padding: 3px 20px;
- clear: both;
- font-weight: normal;
- line-height: 1.42857;
- color: rgb(51, 51, 51);
- white-space: nowrap;
- border: none;
- background: transparent;
- width: 100%;
- text-align: left;
+#selection-form-content > li > a, #selection-form-content > li > button {
+ margin: 3px 20px;
}
#selection-form-content li table tr {
border-top: 1px solid rgb(221, 221, 221);
}
-#selection-form-content a:hover, #selection-form-content a:focus, #selection-form-content input:hover, #selection-form-content input:focus, #selection-form-content tr:hover {
- text-decoration: none;
- color: rgb(38, 38, 38);
- background-color: whitesmoke;
-}
\ No newline at end of file
+#selection-form-content li table tr:last-child {
+ border-bottom: 1px solid rgb(221, 221, 221);
+}
class ActionsController < ApplicationController
- skip_before_filter :find_object_by_uuid, only: :post
+ @@exposed_actions = {}
+ def self.expose_action method, &block
+ @@exposed_actions[method] = true
+ define_method method, block
+ end
+
+ def model_class
+ ArvadosBase::resource_class_for_uuid(params[:uuid])
+ end
+
+ def post
+ params.keys.collect(&:to_sym).each do |param|
+ if @@exposed_actions[param]
+ return self.send(param)
+ end
+ end
+ redirect_to :back
+ end
+
+ expose_action :copy_selections_into_folder do
+ already_named = Link.
+ filter([['tail_uuid','=',@object.uuid],
+ ['head_uuid','in',params["selection"]]]).
+ collect(&:head_uuid)
+ (params["selection"] - already_named).each do |s|
+ Link.create(tail_uuid: @object.uuid,
+ head_uuid: s,
+ link_class: 'name',
+ name: "#{s} added #{Time.now}")
+ end
+ redirect_to @object
+ end
- def combine_selected_files_into_collection
+ expose_action :combine_selected_files_into_collection do
lst = []
files = []
params["selection"].each do |s|
env = Hash[ENV].
merge({
'ARVADOS_API_HOST' =>
- $arvados_api_client.arvados_v1_base.
+ arvados_api_client.arvados_v1_base.
sub(/\/arvados\/v1/, '').
sub(/^https?:\/\//, ''),
'ARVADOS_API_TOKEN' => Thread.current[:arvados_api_token],
redirect_to controller: 'collections', action: :show, id: newc.uuid
end
- def post
- if params["combine_selected_files_into_collection"]
- combine_selected_files_into_collection
- else
- redirect_to :back
- end
- end
end
class ApiClientAuthorizationsController < ApplicationController
- def index
- m = model_class.all
- items_available = m.items_available
- offset = m.result_offset
- limit = m.result_limit
- filtered = m.to_ary.reject do |x|
- x.api_client_id == 0 or (x.expires_at and x.expires_at < Time.now) rescue false
- end
- ArvadosApiClient::patch_paging_vars(filtered, items_available, offset, limit, nil)
- @objects = ArvadosResourceList.new(ApiClientAuthorization)
- @objects.results= filtered
- super
- end
def index_pane_list
%w(Recent Help)
class ApplicationController < ActionController::Base
+ include ArvadosApiClientHelper
+
respond_to :html, :json, :js
protect_from_forgery
ERROR_ACTIONS = [:render_error, :render_not_found]
around_filter :thread_clear
- around_filter(:thread_with_mandatory_api_token,
- except: [:index, :show] + ERROR_ACTIONS)
+ around_filter :thread_with_mandatory_api_token, except: ERROR_ACTIONS
around_filter :thread_with_optional_api_token
before_filter :check_user_agreements, except: ERROR_ACTIONS
before_filter :check_user_notifications, except: ERROR_ACTIONS
- around_filter :using_reader_tokens, only: [:index, :show]
before_filter :find_object_by_uuid, except: [:index] + ERROR_ACTIONS
theme :select_theme
end
def index
+ @limit ||= 200
if params[:limit]
- limit = params[:limit].to_i
- else
- limit = 200
+ @limit = params[:limit].to_i
end
+ @offset ||= 0
if params[:offset]
- offset = params[:offset].to_i
- else
- offset = 0
+ @offset = params[:offset].to_i
+ end
+
+ @filters ||= []
+ if params[:filters]
+ filters = params[:filters]
+ if filters.is_a? String
+ filters = Oj.load filters
+ end
+ @filters += filters
end
- @objects ||= model_class.limit(limit).offset(offset).all
+ @objects ||= model_class
+ @objects = @objects.filter(@filters).limit(@limit).offset(@offset).all
respond_to do |f|
f.json { render json: @objects }
f.html { render }
return render_not_found("object not found")
end
respond_to do |f|
- f.json { render json: @object }
+ f.json { render json: @object.attributes.merge(href: url_for(@object)) }
f.html {
if request.method == 'GET'
render
end
def update
- updates = params[@object.class.to_s.underscore.singularize.to_sym]
- updates.keys.each do |attr|
+ @updates ||= params[@object.class.to_s.underscore.singularize.to_sym]
+ @updates.keys.each do |attr|
if @object.send(attr).is_a? Hash
- if updates[attr].is_a? String
- updates[attr] = Oj.load updates[attr]
+ if @updates[attr].is_a? String
+ @updates[attr] = Oj.load @updates[attr]
end
if params[:merge] || params["merge_#{attr}".to_sym]
# Merge provided Hash with current Hash, instead of
# replacing.
- updates[attr] = @object.send(attr).with_indifferent_access.
- deep_merge(updates[attr].with_indifferent_access)
+ @updates[attr] = @object.send(attr).with_indifferent_access.
+ deep_merge(@updates[attr].with_indifferent_access)
end
end
end
- if @object.update_attributes updates
+ if @object.update_attributes @updates
show
else
self.render_error status: 422
end
def create
- @object ||= model_class.new params[model_class.to_s.underscore.singularize]
+ @new_resource_attrs ||= params[model_class.to_s.underscore.singularize]
+ @new_resource_attrs ||= {}
+ @new_resource_attrs.reject! { |k,v| k.to_s == 'uuid' }
+ @object ||= model_class.new @new_resource_attrs
@object.save!
-
- respond_to do |f|
- f.json { render json: @object }
- f.html {
- redirect_to(params[:return_to] || @object)
- }
- f.js { render }
- end
+ show
end
def destroy
respond_to do |f|
f.html {
if request.method == 'GET'
- redirect_to $arvados_api_client.arvados_login_url(return_to: request.url)
+ redirect_to arvados_api_client.arvados_login_url(return_to: request.url)
else
flash[:error] = "Either you are not logged in, or your session has timed out. I can't automatically log you in and re-attempt this request."
redirect_to :back
false # For convenience to return from callbacks
end
- def using_reader_tokens(login_optional=false)
- if params[:reader_tokens].is_a?(Array) and params[:reader_tokens].any?
- Thread.current[:reader_tokens] = params[:reader_tokens]
- end
- begin
- yield
- rescue ArvadosApiClient::NotLoggedInException
- if login_optional
- raise
- else
- return redirect_to_login
- end
- ensure
- Thread.current[:reader_tokens] = nil
- end
- end
-
def using_specific_api_token(api_token)
start_values = {}
[:arvados_api_token, :user].each do |key|
if params[:id] and params[:id].match /\D/
params[:uuid] = params.delete :id
end
- if params[:uuid].is_a? String
- @object = model_class.find(params[:uuid])
+ if not model_class
+ @object = nil
+ elsif params[:uuid].is_a? String
+ if params[:uuid].empty?
+ @object = nil
+ else
+ @object = model_class.find(params[:uuid])
+ end
else
@object = model_class.where(uuid: params[:uuid]).first
end
class CollectionsController < ApplicationController
- skip_around_filter :thread_with_mandatory_api_token, only: [:show_file]
- skip_before_filter :find_object_by_uuid, only: [:provenance, :show_file]
- skip_before_filter :check_user_agreements, only: [:show_file]
+ skip_around_filter(:thread_with_mandatory_api_token,
+ only: [:show_file, :show_file_links])
+ skip_before_filter(:find_object_by_uuid,
+ only: [:provenance, :show_file, :show_file_links])
+
+ RELATION_LIMIT = 5
def show_pane_list
%w(Files Attributes Metadata Provenance_graph Used_by JSON API)
@request_url = request.url
end
+ def show_file_links
+ Thread.current[:reader_tokens] = [params[:reader_token]]
+ find_object_by_uuid
+ render layout: false
+ end
+
def show_file
# We pipe from arv-get to send the file to the user. Before we start it,
# we ask the API server if the file actually exists. This serves two
# purposes: it lets us return a useful status code for common errors, and
# helps us figure out which token to provide to arv-get.
coll = nil
- usable_token = find_usable_token do
+ tokens = [Thread.current[:arvados_api_token], params[:reader_token]].compact
+ usable_token = find_usable_token(tokens) do
coll = Collection.find(params[:uuid])
end
if usable_token.nil?
def show
return super if !@object
- @provenance = []
- @output2job = {}
- @output2colorindex = {}
- @sourcedata = {params[:uuid] => {uuid: params[:uuid]}}
- @protected = {}
-
- colorindex = -1
- any_hope_left = true
- while any_hope_left
- any_hope_left = false
- Job.where(output: @sourcedata.keys).sort_by { |a| a.finished_at || a.created_at }.reverse.each do |job|
- if !@output2colorindex[job.output]
- any_hope_left = true
- @output2colorindex[job.output] = (colorindex += 1) % 10
- @provenance << {job: job, output: job.output}
- @sourcedata.delete job.output
- @output2job[job.output] = job
- job.dependencies.each do |new_source_data|
- unless @output2colorindex[new_source_data]
- @sourcedata[new_source_data] = {uuid: new_source_data}
- end
- end
- end
- end
- end
-
- Link.where(head_uuid: @sourcedata.keys | @output2job.keys).each do |link|
- if link.link_class == 'resources' and link.name == 'wants'
- @protected[link.head_uuid] = true
- if link.tail_uuid == current_user.uuid
- @is_persistent = true
- end
- end
- end
- Link.where(tail_uuid: @sourcedata.keys).each do |link|
- if link.link_class == 'data_origin'
- @sourcedata[link.tail_uuid][:data_origins] ||= []
- @sourcedata[link.tail_uuid][:data_origins] << [link.name, link.head_uuid]
+ if current_user
+ jobs_with = lambda do |conds|
+ Job.limit(RELATION_LIMIT).where(conds)
+ .results.sort_by { |j| j.finished_at || j.created_at }
end
- end
- Collection.where(uuid: @sourcedata.keys).each do |collection|
- if @sourcedata[collection.uuid]
- @sourcedata[collection.uuid][:collection] = collection
- end
- end
-
- Collection.where(uuid: @object.uuid).each do |u|
- puts request
- @prov_svg = ProvenanceHelper::create_provenance_graph(u.provenance, "provenance_svg",
- {:request => request,
- :direction => :bottom_up,
- :combine_jobs => :script_only}) rescue nil
- @used_by_svg = ProvenanceHelper::create_provenance_graph(u.used_by, "used_by_svg",
- {:request => request,
- :direction => :top_down,
- :combine_jobs => :script_only,
- :pdata_only => true}) rescue nil
- end
+ @output_of = jobs_with.call(output: @object.uuid)
+ @log_of = jobs_with.call(log: @object.uuid)
+ folder_links = Link.limit(RELATION_LIMIT).order("modified_at DESC")
+ .where(head_uuid: @object.uuid, link_class: 'name').results
+ folder_hash = Group.where(uuid: folder_links.map(&:tail_uuid)).to_hash
+ @folders = folder_links.map { |link| folder_hash[link.tail_uuid] }
+ @permissions = Link.limit(RELATION_LIMIT).order("modified_at DESC")
+ .where(head_uuid: @object.uuid, link_class: 'permission',
+ name: 'can_read').results
+ @logs = Log.limit(RELATION_LIMIT).order("created_at DESC")
+ .where(object_uuid: @object.uuid).results
+ @is_persistent = Link.limit(1)
+ .where(head_uuid: @object.uuid, tail_uuid: current_user.uuid,
+ link_class: 'resources', name: 'wants')
+ .results.any?
+ end
+ @prov_svg = ProvenanceHelper::create_provenance_graph(@object.provenance, "provenance_svg",
+ {:request => request,
+ :direction => :bottom_up,
+ :combine_jobs => :script_only}) rescue nil
+ @used_by_svg = ProvenanceHelper::create_provenance_graph(@object.used_by, "used_by_svg",
+ {:request => request,
+ :direction => :top_down,
+ :combine_jobs => :script_only,
+ :pdata_only => true}) rescue nil
end
protected
- def find_usable_token
- # Iterate over every token available to make it the current token and
+ def find_usable_token(token_list)
+ # Iterate over every given token to make it the current token and
# yield the given block.
# If the block succeeds, return the token it used.
# Otherwise, render an error response based on the most specific
# error we encounter, and return nil.
- read_tokens = [Thread.current[:arvados_api_token]].compact
- if params[:reader_tokens].is_a? Array
- read_tokens += params[:reader_tokens]
- end
most_specific_error = [401]
- read_tokens.each do |api_token|
+ token_list.each do |api_token|
using_specific_api_token(api_token) do
begin
yield
end
def file_in_collection?(collection, filename)
- def normalized_path(part_list)
- File.join(part_list).sub(%r{^\./}, '')
- end
- target = normalized_path([filename])
+ target = CollectionsHelper.file_path(File.split(filename))
collection.files.each do |file_spec|
- return true if (normalized_path(file_spec[0, 2]) == target)
+ return true if (CollectionsHelper.file_path(file_spec) == target)
end
false
end
end
class FileStreamer
+ include ArvadosApiClientHelper
def initialize(opts={})
@opts = opts
end
def each
return unless @opts[:uuid] && @opts[:file]
- env = Hash[ENV].
- merge({
- 'ARVADOS_API_HOST' =>
- $arvados_api_client.arvados_v1_base.
- sub(/\/arvados\/v1/, '').
- sub(/^https?:\/\//, ''),
- 'ARVADOS_API_TOKEN' =>
- @opts[:arvados_api_token],
- 'ARVADOS_API_HOST_INSECURE' =>
- Rails.configuration.arvados_insecure_https ? 'true' : 'false'
- })
+
+ env = Hash[ENV].dup
+
+ require 'uri'
+ u = URI.parse(arvados_api_client.arvados_v1_base)
+ env['ARVADOS_API_HOST'] = "#{u.host}:#{u.port}"
+ env['ARVADOS_API_TOKEN'] = @opts[:arvados_api_token]
+ env['ARVADOS_API_HOST_INSECURE'] = "true" if Rails.configuration.arvados_insecure_https
+
IO.popen([env, 'arv-get', "#{@opts[:uuid]}/#{@opts[:file]}"],
'rb') do |io|
- while buf = io.read(2**20)
+ while buf = io.read(2**16)
yield buf
end
end
--- /dev/null
+class FoldersController < ApplicationController
+ def model_class
+ Group
+ end
+
+ def index_pane_list
+ %w(Folders)
+ end
+
+ def remove_item
+ @removed_uuids = []
+ links = []
+ item = ArvadosBase.find params[:item_uuid]
+ if (item.class == Link and
+ item.link_class == 'name' and
+ item.tail_uuid = @object.uuid)
+ # Given uuid is a name link, linking an object to this
+ # folder. First follow the link to find the item we're removing,
+ # then delete the link.
+ links << item
+ item = ArvadosBase.find item.head_uuid
+ else
+ # Given uuid is an object. Delete all names.
+ links += Link.where(tail_uuid: @object.uuid,
+ head_uuid: item.uuid,
+ link_class: 'name')
+ end
+ links.each do |link|
+ @removed_uuids << link.uuid
+ link.destroy
+ end
+ if item.owner_uuid == @object.uuid
+ # Object is owned by this folder. Remove it from the folder by
+ # changing owner to the current user.
+ item.update_attributes owner_uuid: current_user
+ @removed_uuids << item.uuid
+ end
+ end
+
+ def index
+ @objects = Group.where(group_class: 'folder').order('name')
+ parent_of = {current_user.uuid => 'me'}
+ @objects.each do |ob|
+ parent_of[ob.uuid] = ob.owner_uuid
+ end
+ children_of = {false => [], 'me' => [current_user]}
+ @objects.each do |ob|
+ if ob.owner_uuid != current_user.uuid and
+ not parent_of.has_key? ob.owner_uuid
+ parent_of[ob.uuid] = false
+ end
+ children_of[parent_of[ob.uuid]] ||= []
+ children_of[parent_of[ob.uuid]] << ob
+ end
+ buildtree = lambda do |children_of, root_uuid=false|
+ tree = {}
+ children_of[root_uuid].andand.each do |ob|
+ tree[ob] = buildtree.call(children_of, ob.uuid)
+ end
+ tree
+ end
+ sorted_paths = lambda do |tree, depth=0|
+ paths = []
+ tree.keys.sort_by { |ob|
+ ob.is_a?(String) ? ob : ob.friendly_link_name
+ }.each do |ob|
+ paths << {object: ob, depth: depth}
+ paths += sorted_paths.call tree[ob], depth+1
+ end
+ paths
+ end
+ @my_folder_tree =
+ sorted_paths.call buildtree.call(children_of, 'me')
+ @shared_folder_tree =
+ sorted_paths.call({'Shared with me' =>
+ buildtree.call(children_of, false)})
+ end
+
+ def choose
+ index
+ render partial: 'choose'
+ end
+
+ def show
+ @objects = @object.contents include_linked: true
+ @share_links = Link.filter([['head_uuid', '=', @object.uuid],
+ ['link_class', '=', 'permission']])
+ @logs = Log.limit(10).filter([['object_uuid', '=', @object.uuid]])
+
+ @objects_and_names = []
+ @objects.each do |object|
+ if !(name_links = @objects.links_for(object, 'name')).empty?
+ name_links.each do |name_link|
+ @objects_and_names << [object, name_link]
+ end
+ else
+ @objects_and_names << [object,
+ Link.new(tail_uuid: @object.uuid,
+ head_uuid: object.uuid,
+ link_class: "name",
+ name: "")]
+ end
+ end
+
+ super
+ end
+
+ def create
+ @new_resource_attrs = (params['folder'] || {}).merge(group_class: 'folder')
+ @new_resource_attrs[:name] ||= 'New folder'
+ super
+ end
+
+ def update
+ @updates = params['folder']
+ super
+ end
+end
class GroupsController < ApplicationController
def index
- @groups = Group.all
+ @groups = Group.filter [['group_class', 'not in', ['folder']]]
@group_uuids = @groups.collect &:uuid
@links_from = Link.where link_class: 'permission', tail_uuid: @group_uuids
@links_to = Link.where link_class: 'permission', head_uuid: @group_uuids
end
+
+ def show
+ return redirect_to(folder_path(@object)) if @object.group_class == 'folder'
+ super
+ end
end
def index
@svg = ""
if params[:uuid]
- @jobs = Job.where(uuid: params[:uuid])
- generate_provenance(@jobs)
+ @objects = Job.where(uuid: params[:uuid])
+ generate_provenance(@objects)
else
- @jobs = Job.all
+ @limit = 20
+ super
end
end
@object = KeepDisk.new defaults.merge(params[:keep_disk] || {})
super
end
+
+ def index
+ # Retrieve cache age histogram info from logs.
+
+ # In the logs we expect to find it in an ordered list with entries
+ # of the form (mtime, disk proportion free).
+
+ # An entry of the form (1388747781, 0.52) means that if we deleted
+ # the oldest non-presisted blocks until we had 52% of the disk
+ # free, then all blocks with an mtime greater than 1388747781
+ # would be preserved.
+
+ # The chart we want to produce, will tell us how much of the disk
+ # will be free if we use a cache age of x days. Therefore we will
+ # produce output specifying the age, cache and persisted. age is
+ # specified in milliseconds. cache is the size of the cache if we
+ # delete all blocks older than age. persistent is the size of the
+ # persisted blocks. It is constant regardless of age, but it lets
+ # us show a stacked graph.
+
+ # Finally each entry in cache_age_histogram is a dictionary,
+ # because that's what our charting package wats.
+
+ @cache_age_histogram = []
+ @histogram_pretty_date = nil
+ histogram_log = Log.
+ filter([[:event_type, '=', 'block-age-free-space-histogram']]).
+ order(:created_at => :desc).
+ limit(1)
+ histogram_log.each do |log_entry|
+ # We expect this block to only execute at most once since we
+ # specified limit(1)
+ @cache_age_histogram = log_entry['properties'][:histogram]
+ # Javascript wants dates in milliseconds.
+ histogram_date_ms = log_entry['event_at'].to_i * 1000
+ @histogram_pretty_date = log_entry['event_at'].strftime('%b %-d, %Y')
+
+ total_free_cache = @cache_age_histogram[-1][1]
+ persisted_storage = 1 - total_free_cache
+ @cache_age_histogram.map! { |x| {:age => histogram_date_ms - x[0]*1000,
+ :cache => total_free_cache - x[1],
+ :persisted => persisted_storage} }
+ end
+
+ # Do the regular control work needed.
+ super
+ end
end
--- /dev/null
+class KeepServicesController < ApplicationController
+end
end
def show_pane_list
- %w(Components Graph Attributes Metadata JSON API)
+ panes = %w(Components Graph Attributes Metadata JSON API)
+ if @object and @object.state.in? ['New', 'Ready']
+ panes = %w(Inputs) + panes
+ end
+ panes
end
def compare_pane_list
end
def index
- @objects ||= model_class.limit(20).all
+ @limit = 20
super
end
skip_before_filter :find_object_by_uuid, :only => [:destroy, :index]
def destroy
session.clear
- redirect_to $arvados_api_client.arvados_logout_url(return_to: root_url)
+ redirect_to arvados_api_client.arvados_logout_url(return_to: root_url)
end
def index
redirect_to root_url if session[:arvados_api_token]
end
def sudo
- resp = $arvados_api_client.api(ApiClientAuthorization, '', {
- api_client_authorization: {
- owner_uuid: @object.uuid
- }
- })
+ resp = arvados_api_client.api(ApiClientAuthorization, '', {
+ api_client_authorization: {
+ owner_uuid: @object.uuid
+ }
+ })
redirect_to root_url(api_token: resp[:api_token])
end
def human_readable_bytes_html(n)
return h(n) unless n.is_a? Fixnum
+ return "0 bytes" if (n == 0)
orders = {
1 => "bytes",
attrvalue = attrvalue.to_json if attrvalue.is_a? Hash or attrvalue.is_a? Array
- link_to attrvalue.to_s, '#', {
+ ajax_options = {
+ "data-pk" => {
+ id: object.uuid,
+ key: object.class.to_s.underscore
+ }
+ }
+ if object.uuid
+ ajax_options['data-url'] = url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore)
+ else
+ ajax_options['data-url'] = url_for(action: "create", controller: object.class.to_s.pluralize.underscore)
+ ajax_options['data-pk'][:defaults] = object.attributes
+ end
+ ajax_options['data-pk'] = ajax_options['data-pk'].to_json
+
+ content_tag 'span', attrvalue.to_s, {
"data-emptytext" => "none",
"data-placement" => "bottom",
"data-type" => input_type,
- "data-url" => url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore),
"data-title" => "Update #{attr.gsub '_', ' '}",
"data-name" => attr,
- "data-pk" => "{id: \"#{object.uuid}\", key: \"#{object.class.to_s.underscore}\"}",
+ "data-object-uuid" => object.uuid,
:class => "editable"
- }.merge(htmloptions)
+ }.merge(htmloptions).merge(ajax_options)
end
def render_pipeline_component_attribute(object, attr, subattr, value_info, htmloptions={})
"data-pk" => "{id: \"#{object.uuid}\", key: \"#{object.class.to_s.underscore}\"}",
"data-showbuttons" => "false",
"data-value" => attrvalue,
- :class => "editable #{'required' if required}",
+ :class => "editable #{'required' if required} form-control",
:id => id
}.merge(htmloptions)
lt += raw("add_form_selection_sources(#{selectables.to_json});\n")
end
- lt += raw("$('##{id}').editable({source: function() { return select_form_sources('#{dataclass}'); } });\n")
+ lt += raw("$('[data-name=\"#{dn}\"]').editable({source: function() { return select_form_sources('#{dataclass}'); } });\n")
lt += raw("</script>")
lt
end
+
+ def render_arvados_object_list_start(list, button_text, button_href,
+ params={}, *rest, &block)
+ show_max = params.delete(:show_max) || 3
+ params[:class] ||= 'btn btn-xs btn-default'
+ list[0...show_max].each { |item| yield item }
+ unless list[show_max].nil?
+ link_to(h(button_text) +
+ raw(' <i class="fa fa-fw fa-arrow-circle-right"></i>'),
+ button_href, params, *rest)
+ end
+ end
end
--- /dev/null
+module ArvadosApiClientHelper
+ def arvados_api_client
+ ArvadosApiClient.new_or_current
+ end
+end
+
+# For the benefit of themes that still expect $arvados_api_client to work:
+class ArvadosClientProxyHack
+ def method_missing *args
+ ArvadosApiClient.new_or_current.send *args
+ end
+end
+$arvados_api_client = ArvadosClientProxyHack.new
--- /dev/null
+module FoldersHelper
+end
pj
end
+ def pipeline_log_history(job_uuids)
+ results = []
+
+ log_history = Log.where(event_type: 'stderr',
+ object_uuid: job_uuids).order('id DESC')
+ if !log_history.results.empty?
+ reversed_results = log_history.results.reverse
+ reversed_results.each do |entry|
+ if entry.andand.properties
+ properties = entry.properties
+ text = properties[:text]
+ if text
+ results = results.concat text.split("\n")
+ end
+ end
+ end
+ end
+
+ return results
+ end
protected
i = -1
object.components.each do |cname, c|
- puts cname, c
i += 1
pj = {index: i, name: cname}
pj[:job] = c[:job].is_a?(Hash) ? c[:job] : {}
class InvalidApiResponseException < StandardError
end
- @@client_mtx = Mutex.new
- @@api_client = nil
@@profiling_enabled = Rails.configuration.profiling_enabled
+ @@discovery = nil
+
+ # An API client object suitable for handling API requests on behalf
+ # of the current thread.
+ def self.new_or_current
+ # If this thread doesn't have an API client yet, *or* this model
+ # has been reloaded since the existing client was created, create
+ # a new client. Otherwise, keep using the latest client created in
+ # the current thread.
+ unless Thread.current[:arvados_api_client].andand.class == self
+ Thread.current[:arvados_api_client] = new
+ end
+ Thread.current[:arvados_api_client]
+ end
+
+ def initialize *args
+ @api_client = nil
+ @client_mtx = Mutex.new
+ end
def api(resources_kind, action, data=nil)
profile_checkpoint
- @@client_mtx.synchronize do
- if not @@api_client
- @@api_client = HTTPClient.new
+ if not @api_client
+ @client_mtx.synchronize do
+ @api_client = HTTPClient.new
if Rails.configuration.arvados_insecure_https
- @@api_client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE
+ @api_client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE
else
# Use system CA certificates
- @@api_client.ssl_config.add_trust_ca('/etc/ssl/certs')
+ @api_client.ssl_config.add_trust_ca('/etc/ssl/certs')
end
end
end
header = {"Accept" => "application/json"}
- profile_checkpoint { "Prepare request #{url} #{query[:uuid]} #{query[:where]}" }
- msg = @@api_client.post(url,
- query,
- header: header)
+ profile_checkpoint { "Prepare request #{url} #{query[:uuid]} #{query[:where]} #{query[:filters]}" }
+ msg = @client_mtx.synchronize do
+ @api_client.post(url,
+ query,
+ header: header)
+ end
profile_checkpoint 'API transaction'
if msg.status_code == 401
end
def discovery
- @discovery ||= api '../../discovery/v1/apis/arvados/v1/rest', ''
+ @@discovery ||= api '../../discovery/v1/apis/arvados/v1/rest', ''
end
def kind_class(kind)
self.abstract_class = true
attr_accessor :attribute_sortkey
+ def self.arvados_api_client
+ ArvadosApiClient.new_or_current
+ end
+
+ def arvados_api_client
+ ArvadosApiClient.new_or_current
+ end
+
def self.uuid_infix_object_kind
@@uuid_infix_object_kind ||=
begin
infix_kind = {}
- $arvados_api_client.discovery[:schemas].each do |name, schema|
+ arvados_api_client.discovery[:schemas].each do |name, schema|
if schema[:uuidPrefix]
infix_kind[schema[:uuidPrefix]] =
'arvados#' + name.to_s.camelcase(:lower)
end
end
- def initialize(*args)
- super(*args)
+ def initialize raw_params={}
+ super self.class.permit_attribute_params(raw_params)
@attribute_sortkey ||= {
'id' => nil,
- 'uuid' => '000',
- 'owner_uuid' => '001',
- 'created_at' => '002',
- 'modified_at' => '003',
- 'modified_by_user_uuid' => '004',
- 'modified_by_client_uuid' => '005',
- 'name' => '050',
- 'tail_uuid' => '100',
- 'head_uuid' => '101',
- 'info' => 'zzz-000',
- 'updated_at' => 'zzz-999'
+ 'name' => '000',
+ 'owner_uuid' => '002',
+ 'event_type' => '100',
+ 'link_class' => '100',
+ 'group_class' => '100',
+ 'tail_uuid' => '101',
+ 'head_uuid' => '102',
+ 'object_uuid' => '102',
+ 'summary' => '104',
+ 'description' => '104',
+ 'properties' => '150',
+ 'info' => '150',
+ 'created_at' => '200',
+ 'modified_at' => '201',
+ 'modified_by_user_uuid' => '202',
+ 'modified_by_client_uuid' => '203',
+ 'uuid' => '999',
}
end
return @columns unless @columns.nil?
@columns = []
@attribute_info ||= {}
- schema = $arvados_api_client.discovery[:schemas][self.to_s.to_sym]
+ schema = arvados_api_client.discovery[:schemas][self.to_s.to_sym]
return @columns if schema.nil?
schema[:properties].each do |k, coldef|
case k
@columns << column(k, :text)
serialize k, coldef[:type].constantize
end
- attr_accessible k
@attribute_info[k] = coldef
end
end
raise 'argument to find() must be a uuid string. Acceptable formats: warehouse locator or string with format xxxxx-xxxxx-xxxxxxxxxxxxxxx'
end
+ if self == ArvadosBase
+ # Determine type from uuid and defer to the appropriate subclass.
+ return resource_class_for_uuid(uuid).find(uuid, opts)
+ end
+
# Only do one lookup on the API side per {class, uuid, workbench
# request} unless {cache: false} is given via opts.
cache_key = "request_#{Thread.current.object_id}_#{self.to_s}_#{uuid}"
if opts[:cache] == false
- Rails.cache.write cache_key, $arvados_api_client.api(self, '/' + uuid)
+ Rails.cache.write cache_key, arvados_api_client.api(self, '/' + uuid)
end
hash = Rails.cache.fetch cache_key do
- $arvados_api_client.api(self, '/' + uuid)
+ arvados_api_client.api(self, '/' + uuid)
end
new.private_reload(hash)
end
ArvadosResourceList.new(self).all(*args)
end
+ def self.permit_attribute_params raw_params
+ # strong_parameters does not provide security in Workbench: anyone
+ # who can get this far can just as well do a call directly to our
+ # database (Arvados) with the same credentials we use.
+ #
+ # The following permit! is necessary even with
+ # "ActionController::Parameters.permit_all_parameters = true",
+ # because permit_all does not permit nested attributes.
+ ActionController::Parameters.new(raw_params).permit!
+ end
+
+ def self.create raw_params={}
+ super(permit_attribute_params(raw_params))
+ end
+
+ def update_attributes raw_params={}
+ super(self.class.permit_attribute_params(raw_params))
+ end
+
def save
obdata = {}
self.class.columns.each do |col|
if etag
postdata['_method'] = 'PUT'
obdata.delete :uuid
- resp = $arvados_api_client.api(self.class, '/' + uuid, postdata)
+ resp = arvados_api_client.api(self.class, '/' + uuid, postdata)
else
- resp = $arvados_api_client.api(self.class, '', postdata)
+ resp = arvados_api_client.api(self.class, '', postdata)
end
return false if !resp[:etag] || !resp[:uuid]
def destroy
if etag || uuid
postdata = { '_method' => 'DELETE' }
- resp = $arvados_api_client.api(self.class, '/' + uuid, postdata)
+ resp = arvados_api_client.api(self.class, '/' + uuid, postdata)
resp[:etag] && resp[:uuid] && resp
else
true
ok
end
end
- @links = $arvados_api_client.api Link, '', { _method: 'GET', where: o, eager: true }
- @links = $arvados_api_client.unpack_api_response(@links)
+ @links = arvados_api_client.api Link, '', { _method: 'GET', where: o, eager: true }
+ @links = arvados_api_client.unpack_api_response(@links)
end
def all_links
return @all_links if @all_links
- res = $arvados_api_client.api Link, '', {
+ res = arvados_api_client.api Link, '', {
_method: 'GET',
where: {
tail_kind: self.kind,
},
eager: true
}
- @all_links = $arvados_api_client.unpack_api_response(res)
+ @all_links = arvados_api_client.unpack_api_response(res)
end
def reload
if uuid_or_hash.is_a? Hash
hash = uuid_or_hash
else
- hash = $arvados_api_client.api(self.class, '/' + uuid_or_hash)
+ hash = arvados_api_client.api(self.class, '/' + uuid_or_hash)
end
hash.each do |k,v|
if self.respond_to?(k.to_s + '=')
}
end
+ def class_for_display
+ self.class.to_s
+ end
+
def self.creatable?
current_user
end
+ def self.goes_in_folders?
+ false
+ end
+
def editable?
(current_user and current_user.is_active and
(current_user.is_admin or
- current_user.uuid == self.owner_uuid))
+ current_user.uuid == self.owner_uuid or
+ new_record? or
+ (writable_by.include? current_user.uuid rescue false)))
end
def attribute_editable?(attr)
false
elsif not (current_user.andand.is_active)
false
- elsif "uuid owner_uuid".index(attr.to_s) or current_user.is_admin
+ elsif attr == 'uuid'
current_user.is_admin
else
- current_user.uuid == self.owner_uuid or current_user.uuid == self.uuid
+ editable?
end
end
end
resource_class = nil
uuid.match /^[0-9a-z]{5}-([0-9a-z]{5})-[0-9a-z]{15}$/ do |re|
- resource_class ||= $arvados_api_client.
+ resource_class ||= arvados_api_client.
kind_class(self.uuid_infix_object_kind[re[1]])
end
if opts[:referring_object] and
opts[:referring_attr] and
opts[:referring_attr].match /_uuid$/
- resource_class ||= $arvados_api_client.
+ resource_class ||= arvados_api_client.
kind_class(opts[:referring_object].
attributes[opts[:referring_attr].
sub(/_uuid$/, '_kind')])
(name if self.respond_to? :name) || uuid
end
+ def content_summary
+ self.class_for_display
+ end
+
def selection_label
friendly_link_name
end
+ def owner
+ ArvadosBase.find(owner_uuid) rescue nil
+ end
+
protected
def forget_uuid!
class ArvadosResourceList
+ include ArvadosApiClientHelper
include Enumerable
def initialize resource_class=nil
end
cond.keys.select { |x| x.match /_kind$/ }.each do |kind_key|
if cond[kind_key].is_a? Class
- cond = cond.merge({ kind_key => 'arvados#' + $arvados_api_client.class_kind(cond[kind_key]) })
+ cond = cond.merge({ kind_key => 'arvados#' + arvados_api_client.class_kind(cond[kind_key]) })
end
end
api_params = {
api_params[:offset] = @offset if @offset
api_params[:order] = @orderby_spec if @orderby_spec
api_params[:filters] = @filters if @filters
- res = $arvados_api_client.api @resource_class, '', api_params
- @results = $arvados_api_client.unpack_api_response res
+ res = arvados_api_client.api @resource_class, '', api_params
+ @results = arvados_api_client.unpack_api_response res
self
end
self
end
+ def collect
+ results.collect do |m|
+ yield m
+ end
+ end
+
def first
results.first
end
class Collection < ArvadosBase
+ include ApplicationHelper
MD5_EMPTY = 'd41d8cd98f00b204e9800998ecf8427e'
!!locator.to_s.match("^#{MD5_EMPTY}(\\+.*)?\$")
end
+ def self.goes_in_folders?
+ true
+ end
+
+ def content_summary
+ human_readable_bytes_html(total_bytes) + " " + super
+ end
+
def total_bytes
if files
tot = 0
end
end
+ def files_tree
+ tree = files.group_by { |file_spec| File.split(file_spec.first) }
+ # Fill in entries for empty directories.
+ tree.keys.map { |basedir, _| File.split(basedir) }.each do |splitdir|
+ until tree.include?(splitdir)
+ tree[splitdir] = []
+ splitdir = File.split(splitdir.first)
+ end
+ end
+ dir_to_tree = lambda do |dirname|
+ # First list subdirectories, with their files inside.
+ subnodes = tree.keys.select { |bd, td| (bd == dirname) and (td != '.') }
+ .sort.flat_map do |parts|
+ [parts + [nil]] + dir_to_tree.call(File.join(parts))
+ end
+ # Then extend that list with files in this directory.
+ subnodes + tree[File.split(dirname)]
+ end
+ dir_to_tree.call('.')
+ end
+
def attribute_editable?(attr)
false
end
end
def provenance
- $arvados_api_client.api "collections/#{self.uuid}/", "provenance"
+ arvados_api_client.api "collections/#{self.uuid}/", "provenance"
end
def used_by
- $arvados_api_client.api "collections/#{self.uuid}/", "used_by"
+ arvados_api_client.api "collections/#{self.uuid}/", "used_by"
end
end
class Group < ArvadosBase
+ def self.goes_in_folders?
+ true
+ end
+
def contents params={}
- res = $arvados_api_client.api self.class, "/#{self.uuid}/contents", {
+ res = arvados_api_client.api self.class, "/#{self.uuid}/contents", {
_method: 'GET'
}.merge(params)
ret = ArvadosResourceList.new
- ret.results = $arvados_api_client.unpack_api_response(res)
+ ret.results = arvados_api_client.unpack_api_response(res)
ret
end
+
+ def class_for_display
+ group_class == 'folder' ? 'Folder' : super
+ end
+
+ def editable?
+ respond_to?(:writable_by) and
+ writable_by and
+ writable_by.index(current_user.uuid)
+ end
end
class Human < ArvadosBase
+ def self.goes_in_folders?
+ true
+ end
end
class Job < ArvadosBase
+ def self.goes_in_folders?
+ true
+ end
+
def attribute_editable?(attr)
false
end
+
+ def self.creatable?
+ false
+ end
end
--- /dev/null
+class KeepService < ArvadosBase
+ def self.creatable?
+ current_user and current_user.is_admin
+ end
+end
class Log < ArvadosBase
attr_accessor :object
+ def self.creatable?
+ # Technically yes, but not worth offering: it will be empty, and
+ # you won't be able to edit it.
+ false
+ end
end
class PipelineInstance < ArvadosBase
attr_accessor :pipeline_template
+ def self.goes_in_folders?
+ true
+ end
+
def update_job_parameters(new_params)
self.components[:steps].each_with_index do |step, i|
step[:params].each do |param|
end
def attribute_editable?(attr)
- attr.to_sym == :name || (attr.to_sym == :components and self.active == nil)
+ attr && (attr.to_sym == :name ||
+ (attr.to_sym == :components and (self.state == 'New' || self.state == 'Ready')))
end
def attributes_for_display
class PipelineTemplate < ArvadosBase
+ def self.goes_in_folders?
+ true
+ end
+
def self.creatable?
false
end
class Specimen < ArvadosBase
+ def self.goes_in_folders?
+ true
+ end
end
class Trait < ArvadosBase
+ def self.goes_in_folders?
+ true
+ end
end
end
def self.current
- res = $arvados_api_client.api self, '/current'
- $arvados_api_client.unpack_api_response(res)
+ res = arvados_api_client.api self, '/current'
+ arvados_api_client.unpack_api_response(res)
end
def self.system
- $arvados_system_user ||= begin
- res = $arvados_api_client.api self, '/system'
- $arvados_api_client.unpack_api_response(res)
- end
+ @@arvados_system_user ||= begin
+ res = arvados_api_client.api self, '/system'
+ arvados_api_client.unpack_api_response(res)
+ end
end
def full_name
end
def activate
- self.private_reload($arvados_api_client.api(self.class,
- "/#{self.uuid}/activate",
- {}))
+ self.private_reload(arvados_api_client.api(self.class,
+ "/#{self.uuid}/activate",
+ {}))
end
def attributes_for_display
end
def unsetup
- self.private_reload($arvados_api_client.api(self.class,
- "/#{self.uuid}/unsetup",
- {}))
+ self.private_reload(arvados_api_client.api(self.class,
+ "/#{self.uuid}/unsetup",
+ {}))
end
def self.setup params
- $arvados_api_client.api(self, "/setup", params)
+ arvados_api_client.api(self, "/setup", params)
end
end
class UserAgreement < ArvadosBase
def self.signatures
- res = $arvados_api_client.api self, '/signatures'
- $arvados_api_client.unpack_api_response(res)
+ res = arvados_api_client.api self, '/signatures'
+ arvados_api_client.unpack_api_response(res)
end
def self.sign(params)
- res = $arvados_api_client.api self, '/sign', params
- $arvados_api_client.unpack_api_response(res)
+ res = arvados_api_client.api self, '/sign', params
+ arvados_api_client.unpack_api_response(res)
end
end
<% pane_list ||= %w(recent) %>
<% panes = Hash[pane_list.map { |pane|
[pane, render(partial: 'show_' + pane.downcase,
- locals: { comparable: comparable })]
+ locals: { comparable: comparable, objects: @objects })]
}.compact] %>
<ul class="nav nav-tabs">
<% if object.editable? %>
- <%= link_to({action: 'destroy', id: object.uuid}, method: :delete, remote: true, data: {confirm: "You are about to delete #{object.class} #{object.uuid}.\n\nAre you sure?"}) do %>
+ <%= link_to({action: 'destroy', id: object.uuid}, method: :delete, remote: true, data: {confirm: "You are about to delete #{object.class_for_display.downcase} '#{object.friendly_link_name}' (#{object.uuid}).\n\nAre you sure?"}) do %>
<i class="glyphicon glyphicon-trash"></i>
<% end %>
<% end %>
-<% if p.success %>
+<% if p.state == 'Complete' %>
<span class="label label-success">finished</span>
-<% elsif p.success == false %>
+<% elsif p.state == 'Failed' %>
<span class="label label-danger">failed</span>
-<% elsif p.active %>
+<% elsif p.state == 'RunningOnServer' || p.state == 'RunningOnClient' %>
<span class="label label-info">running</span>
<% else %>
<% if (p.components.select do |k,v| v[:job] end).length == 0 %>
-<%if object %>
+<%if object and object.class.goes_in_folders? %>
<% fn = if defined? friendly_name
friendly_name
else
<tr>
<td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "uuid", attrvalue: link.uuid } %></td>
<td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "link_class", attrvalue: link.link_class } %></td>
- <td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "name", attrvalue: link.name } %></td>
+ <td><%= render_editable_attribute link, 'name' %></td>
<td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "properties", attrvalue: link.properties } %></td>
<td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "head_uuid", attrvalue: link.head_uuid } %></td>
</tr>
<td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "uuid", attrvalue: link.uuid } %></td>
<td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "tail_uuid", attrvalue: link.tail_uuid } %></td>
<td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "link_class", attrvalue: link.link_class } %></td>
- <td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "name", attrvalue: link.name } %></td>
+ <td><%= render_editable_attribute link, 'name' %></td>
<td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "properties", attrvalue: link.properties } %></td>
</tr>
<% end %>
--- /dev/null
+<% htmloptions = {class: ''}.merge(htmloptions || {})
+ htmloptions[:class] += " btn-#{size}" rescue nil %>
+<%= link_to_if_arvados_object object, { link_text: raw('Show <i class="fa fa-fw fa-arrow-circle-right"></i>') }, { class: 'btn btn-default ' + htmloptions[:class] } %>
-<% if @objects.empty? %>
+<% if objects.empty? %>
<br/>
<p style="text-align: center">
- No <%= controller.model_class.to_s.pluralize.underscore.gsub '_', ' ' %> to display.
+ No <%= controller.controller_name.humanize.downcase %> to display.
</p>
<% else %>
-<% attr_blacklist = ' created_at modified_at modified_by_user_uuid modified_by_client_uuid updated_at' %>
+<% attr_blacklist = ' created_at modified_at modified_by_user_uuid modified_by_client_uuid updated_at owner_uuid group_class' %>
-<%= render partial: "paging", locals: {results: @objects, object: @object} %>
+<%= render partial: "paging", locals: {results: objects, object: @object} %>
<%= form_tag do |f| %>
<table class="table table-condensed arv-index">
<thead>
<tr>
+ <% if objects.first and objects.first.class.goes_in_folders? %>
+ <th></th>
+ <% end %>
<th></th>
- <% @objects.first.attributes_for_display.each do |attr, attrvalue| %>
+ <% objects.first.attributes_for_display.each do |attr, attrvalue| %>
<% next if attr_blacklist.index(" "+attr) %>
<th class="arv-attr-<%= attr %>">
<%= controller.model_class.attribute_info[attr.to_sym].andand[:column_heading] or attr.sub /_uuid/, '' %>
</thead>
<tbody>
- <% @objects.each do |object| %>
+ <% objects.each do |object| %>
<tr data-object-uuid="<%= object.uuid %>">
+ <% if objects.first.class.goes_in_folders? %>
+ <td>
+ <%= render :partial => "selection_checkbox", :locals => {:object => object} %>
+ </td>
+ <% end %>
<td>
- <%= render :partial => "selection_checkbox", :locals => {:object => object} %>
+ <%= render :partial => "show_object_button", :locals => {object: object, size: 'xs'} %>
</td>
<% object.attributes_for_display.each do |attr, attrvalue| %>
<% next if attr_blacklist.index(" "+attr) %>
<td class="arv-object-<%= object.class.to_s %> arv-attr-<%= attr %>">
<% if attr == 'uuid' %>
- <%= link_to_if_arvados_object object %>
- <%= link_to_if_arvados_object(object, { link_text: raw('<i class="icon-hand-right"></i>') }) %>
+ <span class="arvados-uuid"><%= attrvalue %></span>
<% else %>
<% if object.attribute_editable? attr %>
<%= render_editable_attribute object, attr %>
<% end %>
-<%= render partial: "paging", locals: {results: @objects, object: @object} %>
+<%= render partial: "paging", locals: {results: objects, object: @object} %>
<% end %>
<% content_for :page_title do %>
-<%= controller.model_class.to_s.pluralize.underscore.capitalize.gsub('_', ' ') %>
+<%= controller.controller_name.humanize.capitalize %>
<% end %>
<% content_for :tab_line_buttons do %>
'data-target' => '#user-setup-modal-window', return_to: request.url} %>
<div id="user-setup-modal-window" class="modal fade" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true"></div>
<% else %>
- <%= button_to "Add a new #{controller.model_class.to_s.underscore.gsub '_', ' '}",
- { action: 'create', return_to: request.url },
+ <%= button_to "Add a new #{controller.controller_name.singularize.humanize.downcase}",
+ { action: 'create' },
{ class: 'btn btn-primary pull-right' } %>
<% end %>
-<% content_for :css do %>
-.file-list-inline-image {
- width: 50%;
- height: auto;
-}
-<% end %>
-
<% content_for :tab_line_buttons do %>
<div class="row">
<div class="col-md-6"></div>
</div>
<% end %>
-<table class="table table-condensed table-fixedlayout">
- <colgroup>
- <col width="4%" />
- <col width="35%" />
- <col width="40%" />
- <col width="15%" />
- <col width="10%" />
- </colgroup>
- <thead>
- <tr>
- <th></th>
- <th>path</th>
- <th>file</th>
- <th style="text-align:right">size</th>
- <th>d/l</th>
- </tr>
- </thead><tbody>
- <% if @object then @object.files.sort_by{|f|[f[0],f[1]]}.each do |file| %>
- <% file_path = CollectionsHelper::file_path file %>
- <tr>
- <td>
- <%= check_box_tag 'uuids[]', @object.uuid+'/'+file_path, false, {
+<% file_tree = @object.andand.files_tree %>
+<% if file_tree.nil? or file_tree.empty? %>
+ <p>This collection is empty.</p>
+<% else %>
+ <ul id="collection_files" class="collection_files">
+ <% dirstack = [file_tree.first.first] %>
+ <% file_tree.each_with_index do |(dirname, filename, size), index| %>
+ <% file_path = CollectionsHelper::file_path([dirname, filename]) %>
+ <% while dirstack.any? and (dirstack.last != dirname) %>
+ <% dirstack.pop %></ul></li>
+ <% end %>
+ <li>
+ <% if size.nil? # This is a subdirectory. %>
+ <% dirstack.push(File.join(dirname, filename)) %>
+ <div class="collection_files_row">
+ <div class="collection_files_name"><i class="fa fa-fw fa-folder-open"></i> <%= filename %></div>
+ </div>
+ <ul class="collection_files">
+ <% else %>
+ <% link_params = {controller: 'collections', action: 'show_file',
+ uuid: @object.uuid, file: file_path, size: size} %>
+ <div class="collection_files_row">
+ <div class="collection_files_buttons pull-right">
+ <%= raw(human_readable_bytes_html(size)) %>
+ <%= check_box_tag 'uuids[]', "#{@object.uuid}/#{file_path}", false, {
:class => 'persistent-selection',
:friendly_type => "File",
:friendly_name => "#{@object.uuid}/#{file_path}",
- :href => "#{url_for controller: 'collections', action: 'show', id: @object.uuid }/#{file_path}",
- :title => "Click to add this item to your selection list"
+ :href => url_for(controller: 'collections', action: 'show_file',
+ uuid: @object.uuid, file: file_path),
+ :title => "Include #{file_path} in your selections",
} %>
- </td>
- <td>
- <%= file[0] %>
- </td>
-
- <td>
- <%= link_to (if CollectionsHelper::is_image file[1]
- image_tag "#{url_for @object}/#{file_path}", class: "file-list-inline-image"
- else
- file[1]
- end),
- {controller: 'collections', action: 'show_file', uuid: @object.uuid, file: file_path, size: file[2], disposition: 'inline'},
- {title: file_path} %>
- </td>
-
- <td style="text-align:right">
- <%= raw(human_readable_bytes_html(file[2])) %>
- </td>
-
- <td>
- <div style="display:inline-block">
- <%= link_to raw('<i class="glyphicon glyphicon-download-alt"></i>'), {controller: 'collections', action: 'show_file', uuid: @object.uuid, file: file_path, size: file[2], disposition: 'attachment'}, {class: 'btn btn-info btn-sm', title: 'Download'} %>
- </div>
- </td>
- </tr>
- <% end; end %>
- </tbody>
-</table>
+ <%= link_to(raw('<i class="fa fa-search"></i>'),
+ link_params.merge(disposition: 'inline'),
+ {title: "View #{file_path}", class: "btn btn-info btn-sm"}) %>
+ <%= link_to(raw('<i class="fa fa-download"></i>'),
+ link_params.merge(disposition: 'attachment'),
+ {title: "Download #{file_path}", class: "btn btn-info btn-sm"}) %>
+ </div>
+ <% if CollectionsHelper::is_image(filename) %>
+ <div class="collection_files_name"><i class="fa fa-fw fa-bar-chart-o"></i> <%= filename %></div>
+ </div>
+ <div class="collection_files_inline">
+ <%= link_to(image_tag("#{url_for @object}/#{file_path}"),
+ link_params.merge(disposition: 'inline'),
+ {title: file_path}) %>
+ </div>
+ <% else %>
+ <div class="collection_files_name"><i class="fa fa-fw fa-file"></i> <%= filename %></div>
+ </div>
+ <% end %>
+ </li>
+ <% end # if file or directory %>
+ <% end # file_tree.each %>
+ <%= raw(dirstack.map { |_| "</ul>" }.join("</li>")) %>
+<% end # if file_tree %>
+++ /dev/null
-<table class="topalign table table-bordered">
- <thead>
- <tr class="contain-align-left">
- <th>
- job
- </th><th>
- version
- </th><th>
- status
- </th><th>
- start
- </th><th>
- finish
- </th><th>
- clock time
- </th>
- </tr>
- </thead>
- <tbody>
-
- <% @provenance.reverse.each do |p| %>
- <% j = p[:job] %>
-
- <% if j %>
-
- <tr class="job">
- <td>
- <tt><%= j.uuid %></tt>
- <br />
- <tt class="deemphasize"><%= j.submit_id %></tt>
- </td><td>
- <%= j.script_version %>
- </td><td>
- <span class="label <%= if j.success then 'label-success'; elsif j.running then 'label-primary'; else 'label-warning'; end %>">
- <%= j.success || j.running ? 'ok' : 'failed' %>
- </span>
- </td><td>
- <%= j.started_at %>
- </td><td>
- <%= j.finished_at %>
- </td><td>
- <% if j.started_at and j.finished_at %>
- <%= raw(distance_of_time_in_words(j.started_at, j.finished_at).sub('about ','~').sub(' ',' ')) %>
- <% elsif j.started_at and j.running %>
- <%= raw(distance_of_time_in_words(j.started_at, Time.now).sub('about ','~').sub(' ',' ')) %> (running)
- <% end %>
- </td>
- </tr>
-
- <% else %>
- <tr>
- <td>
- <span class="label label-danger">lookup fail</span>
- <br />
- <tt class="deemphasize"><%= p[:target] %></tt>
- </td><td colspan="4">
- </td>
- </tr>
- <% end %>
-
- <% end %>
-
- </tbody>
-</table>
+++ /dev/null
-<%= content_for :css do %>
-<%# https://github.com/mbostock/d3/wiki/Ordinal-Scales %>
-<% n=-1; %w(#1f77b4 #ff7f0e #2ca02c #d62728 #9467bd #8c564b #e377c2 #7f7f7f #bcbd22 #17becf).each do |color| %>
-.colorseries-10-<%= n += 1 %>, .btn.colorseries-10-<%= n %>:hover, .label.colorseries-10-<%= n %>:hover {
- *background-color: <%= color %>;
- background-color: <%= color %>;
- background-image: none;
-}
-<% end %>
-.colorseries-nil { }
-.label a {
- color: inherit;
-}
-<% end %>
-
-<table class="topalign table table-bordered">
- <thead>
- </thead>
- <tbody>
-
- <% @provenance.reverse.each do |p| %>
- <% j = p[:job] %>
-
- <% if j %>
-
- <tr class="job">
- <td style="padding-bottom: 3em">
- <table class="table" style="margin-bottom: 0; background: #f0f0ff">
- <% j.script_parameters.each do |k,v| %>
- <tr>
- <td style="width: 20%">
- <%= k.to_s %>
- </td><td style="width: 60%">
- <% if v and @output2job.has_key? v %>
- <tt class="label colorseries-10-<%= @output2colorindex[v] %>"><%= link_to_if_arvados_object v %></tt>
- <% else %>
- <span class="deemphasize"><%= link_to_if_arvados_object v %></span>
- <% end %>
- </td><td style="text-align: center; width: 20%">
- <% if v
- if @protected[v]
- labelclass = 'success'
- labeltext = 'keep'
- else
- labelclass = @output2job.has_key?(v) ? 'warning' : 'danger'
- labeltext = 'cache'
- end %>
-
- <tt class="label label-<%= labelclass %>"><%= labeltext %></tt>
- <% end %>
- </td>
- </tr>
- <% end %>
- </table>
- <div style="text-align: center">
- ↓
- <br />
- <span class="label"><%= j.script %><br /><tt><%= link_to_if j.script_version.match(/[0-9a-f]{40}/), j.script_version, "https://arvados.org/projects/arvados/repository/revisions/#{j.script_version}/entry/crunch_scripts/#{j.script}" if j.script_version %></tt></span>
- <br />
- ↓
- <br />
- <tt class="label colorseries-10-<%= @output2colorindex[p[:output]] %>"><%= link_to_if_arvados_object p[:output] %></tt>
- </div>
- </td>
- <td>
- <tt><span class="deemphasize">job:</span><br /><%= link_to_if_arvados_object j %><br /><span class="deemphasize"><%= j.submit_id %></span></tt>
- </td>
- </tr>
-
- <% else %>
- <tr>
- <td>
- <span class="label label-danger">lookup fail</span>
- <br />
- <tt class="deemphasize"><%= p[:target] %></tt>
- </td><td colspan="5">
- </td>
- </tr>
- <% end %>
-
- <% end %>
-
- </tbody>
-</table>
+++ /dev/null
-<table class="table table-bordered table-striped">
- <thead>
- <tr class="contain-align-left">
- <th>
- collection
- </th><th class="data-size">
- data size
- </th><th>
- storage
- </th><th>
- origin
- </th>
- </tr>
- </thead>
- <tbody>
-
- <% @sourcedata.values.each do |sourcedata| %>
-
- <tr class="collection">
- <td>
- <tt class="label"><%= sourcedata[:uuid] %></tt>
- </td><td class="data-size">
- <%= raw(human_readable_bytes_html(sourcedata[:collection].data_size)) if sourcedata[:collection] and sourcedata[:collection].data_size %>
- </td><td>
- <% if @protected[sourcedata[:uuid]] %>
- <span class="label label-success">keep</span>
- <% else %>
- <span class="label label-danger">cache</span>
- <% end %>
- </td><td>
- <% if sourcedata[:data_origins] %>
- <% sourcedata[:data_origins].each do |data_origin| %>
- <span class="deemphasize"><%= data_origin[0] %></span>
- <%= data_origin[2] %>
- <br />
- <% end %>
- <% end %>
- </td>
- </tr>
-
- <% end %>
-
- </tbody>
-</table>
--- /dev/null
+<div class="row row-fill-height">
+ <div class="col-md-6">
+ <div class="panel panel-info">
+ <div class="panel-heading">
+ <h3 class="panel-title">
+ <% default_name = "Collection #{@object.uuid}" %>
+ <% name_html = render_editable_attribute @object, 'name', nil, {data: {emptytext: default_name}} %>
+ <%= (/\S/.match(name_html)) ? name_html : default_name %>
+ </h3>
+ </div>
+ <div class="panel-body">
+ <img src="/favicon.ico" class="pull-right" alt="" style="opacity: 0.3"/>
+ <% if not (@output_of.andand.any? or @log_of.andand.any?) %>
+ <p><i>No source information available.</i></p>
+ <% end %>
+
+ <% if @output_of.andand.any? %>
+ <p>Output of jobs:<br />
+ <%= render_arvados_object_list_start(@output_of, 'Show all jobs',
+ jobs_path(filter: [['output', '=', @object.uuid]].to_json)) do |job| %>
+ <%= link_to_if_arvados_object(job, friendly_name: true) %><br />
+ <% end %>
+ </p>
+ <% end %>
+
+ <% if @log_of.andand.any? %>
+ <p>Log of jobs:<br />
+ <%= render_arvados_object_list_start(@log_of, 'Show all jobs',
+ jobs_path(filter: [['log', '=', @object.uuid]].to_json)) do |job| %>
+ <%= link_to_if_arvados_object(job, friendly_name: true) %><br />
+ <% end %>
+ </p>
+ <% end %>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-3">
+ <div class="panel panel-default">
+ <div class="panel-heading">
+ <h3 class="panel-title">
+ Activity
+ </h3>
+ </div>
+ <div class="panel-body smaller-text">
+ <!--
+ <input type="text" class="form-control" placeholder="Search"/>
+ -->
+ <div style="height:0.5em;"></div>
+ <% if not @logs.andand.any? %>
+ <p>
+ Created: <%= @object.created_at.to_s(:long) %>
+ </p>
+ <p>
+ Last modified: <%= @object.modified_at.to_s(:long) %> by <%= link_to_if_arvados_object @object.modified_by_user_uuid, friendly_name: true %>
+ </p>
+ <% else %>
+ <%= render_arvados_object_list_start(@logs, 'Show all activity',
+ logs_path(filters: [['object_uuid','=',@object.uuid]].to_json)) do |log| %>
+ <p>
+ <%= time_ago_in_words(log.event_at) %> ago: <%= log.summary %>
+ <% if log.object_uuid %>
+ <%= link_to_if_arvados_object log.object_uuid, link_text: raw('<i class="fa fa-hand-o-right"></i>') %>
+ <% end %>
+ </p>
+ <% end %>
+ <% end %>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-3">
+ <div class="panel panel-default">
+ <div class="panel-heading">
+ <h3 class="panel-title">
+ Sharing and permissions
+ </h3>
+ </div>
+ <div class="panel-body">
+ <!--
+ <input type="text" class="form-control" placeholder="Search"/>
+ -->
+ <div style="height:0.5em;"></div>
+ <% if @folders.andand.any? %>
+ <p>Included in folders:<br />
+ <%= render_arvados_object_list_start(@folders, 'Show all folders',
+ links_path(filter: [['head_uuid', '=', @object.uuid],
+ ['link_class', '=', 'name']].to_json)) do |folder| %>
+ <%= link_to_if_arvados_object(folder, friendly_name: true) %><br />
+ <% end %>
+ </p>
+ <% end %>
+ <% if @permissions.andand.any? %>
+ <p>Readable by:<br />
+ <%= render_arvados_object_list_start(@permissions, 'Show all permissions',
+ links_path(filter: [['head_uuid', '=', @object.uuid],
+ ['link_class', '=', 'permission']].to_json)) do |link| %>
+ <%= link_to_if_arvados_object(link.tail_uuid, friendly_name: true) %><br />
+ <% end %>
+ </p>
+ <% end %>
+ </div>
+ </div>
+ </div>
+</div>
+
+<%= render file: 'application/show.html.erb' %>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<% coll_name = (@object.name =~ /\S/) ? @object.name : "Collection #{@object.uuid}" %>
+<% link_opts = {controller: 'collections', action: 'show_file',
+ uuid: @object.uuid, reader_token: params[:reader_token]} %>
+<head>
+ <meta charset="utf-8">
+ <title>
+ <%= coll_name %> / <%= Rails.configuration.site_name %>
+ </title>
+ <meta name="description" content="">
+ <meta name="author" content="">
+ <meta name="robots" content="NOINDEX">
+ <style type="text/css">
+body {
+ margin: 1.5em;
+}
+pre {
+ background-color: #D9EDF7;
+ border-radius: .25em;
+ padding: .75em;
+ overflow: auto;
+}
+.footer {
+ font-size: 82%;
+}
+.footer h2 {
+ font-size: 1.2em;
+}
+ </style>
+</head>
+<body>
+
+<h1><%= coll_name %></h1>
+
+<p>This collection of data files is being shared with you through
+Arvados. You can download individual files listed below. To download
+the entire collection with wget, try:</p>
+
+<pre>$ wget --mirror --no-parent --no-host --cut-dirs=3 <%=
+ url_for(link_opts.merge(action: 'show_file_links', only_path: false))
+ %></pre>
+
+<h2>File Listing</h2>
+
+<% if @object.andand.files_tree.andand.any? %>
+ <ul id="collection_files" class="collection_files">
+ <% dirstack = [@object.files_tree.first.first] %>
+ <% @object.files_tree.each_with_index do |(dirname, filename, size), index| %>
+ <% file_path = CollectionsHelper::file_path([dirname, filename]) %>
+ <% while dirstack.any? and (dirstack.last != dirname) %>
+ <% dirstack.pop %></ul></li>
+ <% end %>
+ <li>
+ <% if size.nil? # This is a subdirectory. %>
+ <% dirstack.push(File.join(dirname, filename)) %>
+ <%= filename %>
+ <ul class="collection_files">
+ <% else %>
+ <%= link_to(filename,
+ {controller: 'collections', action: 'show_file',
+ uuid: @object.uuid, file: file_path,
+ reader_token: params[:reader_token]},
+ {title: "Download #{file_path}"}) %>
+ </li>
+ <% end %>
+ <% end %>
+ <%= raw(dirstack.map { |_| "</ul>" }.join("</li>")) %>
+<% else %>
+ <p>No files in this collection.</p>
+<% end %>
+
+<div class="footer">
+<h2>About Arvados</h2>
+
+<p>Arvados is a free and open source software bioinformatics platform.
+To learn more, visit arvados.org.
+Arvados is not responsible for the files listed on this page.</p>
+</div>
+
+</body>
+</html>
--- /dev/null
+<div class="modal">
+ <div class="modal-dialog">
+ <div class="modal-content">
+
+ <div class="modal-header">
+ <button type="button" class="close" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">×</button>
+ <h4 class="modal-title"><%= params[:title] || 'Choose folder' %></h4>
+ </div>
+
+ <div class="modal-body">
+ <div class="container-fluid arv-folder-list selectable-container" style="height: 15em; overflow-y: scroll">
+ <% [@my_folder_tree, @shared_folder_tree].each do |tree| %>
+ <% tree.each do |foldernode| %>
+ <% if foldernode[:object].is_a? String %>
+ <div class="row" style="padding-left: <%= 1 + foldernode[:depth] %>em;">
+ <i class="fa fa-fw fa-folder-open-o"></i>
+ <%= foldernode[:object] %>
+ </div>
+ <% else %>
+ <div class="<%= 'selectable folder' if !params[:editable] || foldernode[:object].editable? %> row" style="padding-left: <%= 1 + foldernode[:depth] %>em;" data-object-uuid="<%= foldernode[:object].uuid %>">
+ <i class="fa fa-fw fa-folder-o"></i>
+ <% if foldernode[:object].uuid == current_user.uuid %>
+ My Folders
+ <% else %>
+ <%= foldernode[:object].friendly_link_name || 'New folder' %>
+ <% end %>
+ </div>
+ <% end %>
+ <% end %>
+ <% end %>
+ </div>
+ </div>
+
+ <div class="modal-footer">
+ <button class="btn btn-default" data-dismiss="modal" aria-hidden="true">Cancel</button>
+ <button class="btn btn-primary" aria-hidden="true"><%= params[:action_name] || 'Select' %></button>
+ <div class="modal-error hide" style="text-align: left; margin-top: 1em;">
+ </div>
+ </div>
+ </div>
+ </div>
+</div>
--- /dev/null
+$('body > .modal-container').html("<%= escape_javascript(render partial: 'choose.html') %>");
+$('body > .modal-container .modal').modal('show');
+$('body > .modal-container .modal .modal-footer .btn-primary').
+ addClass('<%= j params[:action_class] %>').
+ attr('data-action-href', '<%= j params[:action_href] %>').
+ attr('data-method', '<%= j params[:action_method] %>').
+ data('action-data', <%= raw params[:action_data] %>);
+$(document).trigger('ajax:complete');
--- /dev/null
+<div class="container-fluid arv-folder-list">
+ <% [@my_folder_tree, @shared_folder_tree].each do |tree| %>
+ <% tree.each do |foldernode| %>
+ <% rowtype = foldernode[:object].class %>
+ <div class="<%= 'folder' if rowtype == Group %> row" style="padding-left: <%= 1 + foldernode[:depth] %>em;">
+ <% if rowtype == String %>
+ <i class="fa fa-fw fa-folder-open-o"></i>
+ <%= foldernode[:object] %>
+ <% elsif rowtype == User %>
+ <% if foldernode[:object].uuid == current_user.andand.uuid %>
+ <i class="fa fa-fw fa-folder-open-o"></i>
+ My Folders
+ <% else %>
+ <i class="fa fa-fw fa-folder-o"></i>
+ <%= foldernode[:object].friendly_link_name %>
+ <% end %>
+ <% else %>
+ <i class="fa fa-fw fa-folder-o"></i>
+ <%= link_to foldernode[:object] do %>
+ <%= foldernode[:object].friendly_link_name %>
+ <% end %>
+ <div class="pull-right">
+ <%= render partial: 'delete_object_button', locals: {object: foldernode[:object]} %>
+ </div>
+ <% end %>
+ </div>
+ <% end %>
+ <% end %>
+</div>
--- /dev/null
+<% @removed_uuids.each do |uuid| %>
+$('[data-object-uuid=<%= uuid %>]').hide('slow', function() {
+ $(this).remove();
+});
+<% end %>
--- /dev/null
+<div class="row row-fill-height">
+ <div class="col-md-6">
+ <div class="panel panel-info">
+ <div class="panel-heading">
+ <h3 class="panel-title">
+ <%= render_editable_attribute @object, 'name', nil, {data: {emptytext: "New folder"}} %>
+ </h3>
+ </div>
+ <div class="panel-body">
+ <img src="/favicon.ico" class="pull-right" alt="" style="opacity: 0.3"/>
+ <%= render_editable_attribute @object, 'description', nil, { 'data-emptytext' => "Created: #{@object.created_at.to_s(:long)}", 'data-toggle' => 'manual', 'id' => "#{@object.uuid}-description" } %>
+ <% if @object.attribute_editable? 'description' %>
+ <div style="margin-top: 1em;">
+ <a href="#" class="btn btn-xs btn-default" data-toggle="x-editable" data-toggle-selector="#<%= @object.uuid %>-description"><i class="fa fa-fw fa-pencil"></i> Edit description</a>
+ </div>
+ <% end %>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-3">
+ <div class="panel panel-default">
+ <div class="panel-heading">
+ <h3 class="panel-title">
+ Activity
+ </h3>
+ </div>
+ <div class="panel-body smaller-text">
+ <!--
+ <input type="text" class="form-control" placeholder="Search"/>
+ -->
+ <div style="height:0.5em;"></div>
+ <% if @logs.any? %>
+ <%= render_arvados_object_list_start(@logs, 'Show all activity',
+ logs_path(filters: [['object_uuid','=',@object.uuid]].to_json)) do |log| %>
+ <p>
+ <%= time_ago_in_words(log.event_at) %> ago: <%= log.summary %>
+ <% if log.object_uuid %>
+ <%= link_to_if_arvados_object log.object_uuid, link_text: raw('<i class="fa fa-hand-o-right"></i>') %>
+ <% end %>
+ </p>
+ <% end %>
+ <% else %>
+ <p>
+ Created: <%= @object.created_at.to_s(:long) %>
+ </p>
+ <p>
+ Last modified: <%= @object.modified_at.to_s(:long) %> by <%= link_to_if_arvados_object @object.modified_by_user_uuid, friendly_name: true %>
+ </p>
+ <% end %>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-3">
+ <div class="panel panel-default">
+ <div class="panel-heading">
+ <h3 class="panel-title">
+ Sharing and permissions
+ </h3>
+ </div>
+ <div class="panel-body">
+ <!--
+ <input type="text" class="form-control" placeholder="Search"/>
+ -->
+ <div style="height:0.5em;"></div>
+ <% if @object.owner %>
+ <p>Permissions inherited from:
+ <br />
+ <% if User == resource_class_for_uuid(@object.owner_uuid) %>
+ <i class="fa fa-fw fa-user"></i>
+ <% else %>
+ <i class="fa fa-fw fa-folder"></i>
+ <% end %>
+ <%= link_to_if_arvados_object @object.owner_uuid, friendly_name: true %>
+ <%= button_to('Move to...',
+ choose_folders_path(
+ title: 'Move to...',
+ editable: true,
+ action_name: 'Move',
+ action_href: folder_path(@object.uuid),
+ action_method: 'put',
+ action_data: {selection_param: 'folder[owner_uuid]'}.to_json),
+ { class: "btn btn-default btn-xs arv-move-to-folder", remote: true, method: 'get' }) %>
+ </p>
+ <hr />
+ <% end %>
+ <p>
+ <% if not @share_links.any? %>
+ <span class="deemphasize">(No additional permissions)</span>
+ <% else %>
+ Also shared with:
+ <% @share_links.andand.each do |link| %>
+ <br /><%= link_to_if_arvados_object link.tail_uuid, friendly_name: true %>
+ <% end %>
+ <% end %>
+ </p>
+ </div>
+ </div>
+ </div>
+</div>
+
+<% if @show_cards %>
+<!-- disable cards section until we have bookmarks -->
+<div class="row">
+ <% @objects[0..3].each do |object| %>
+ <div class="card arvados-object">
+ <div class="card-top blue">
+ <a href="#">
+ <img src="/favicon.ico" alt=""/>
+ </a>
+ </div>
+ <div class="card-info">
+ <span class="title"><%= @objects.name_for(object) || object.class_for_display %></span>
+ <div class="desc"><%= object.respond_to?(:description) ? object.description : object.uuid %></div>
+ </div>
+ <div class="card-bottom">
+ <%= render :partial => "show_object_button", :locals => {object: object, htmloptions: {class: 'btn-default btn-block'}} %>
+ </div>
+ </div>
+ <% end %>
+</div>
+<!-- end disabled cards section -->
+<% end %>
+
+<div class="row">
+ <div class="col-md-12">
+ <div class="panel panel-info">
+ <div class="panel-heading">
+ <div class="row">
+ <div class="col-md-6">
+ <h3 class="panel-title" style="vertical-align:middle;">
+ Contents
+ </h3>
+ </div>
+ <div class="col-md-6">
+ <div class="input-group input-group-sm pull-right">
+ <input type="text" class="form-control search-folder-contents" placeholder="Search folder contents"/>
+ </div>
+ </div>
+ </div>
+ </div>
+ <div class="panel-body">
+ <p>
+ </p>
+ <table class="table table-condensed arv-index">
+ <tbody>
+ <colgroup>
+ <col width="3%" />
+ <col width="8%" />
+ <col width="30%" />
+ <col width="15%" />
+ <col width="15%" />
+ <col width="20%" />
+ <col width="8%" />
+ </colgroup>
+ <% @objects_and_names.each do |object, name_link| %>
+ <tr data-object-uuid="<%= (name_link && name_link.uuid) || object.uuid %>">
+ <td>
+ <%= render :partial => "selection_checkbox", :locals => {object: object} %>
+ </td>
+ <td>
+ <%= render :partial => "show_object_button", :locals => {object: object, size: 'xs'} %>
+ </td>
+ <td>
+ <%= render_editable_attribute name_link, 'name', nil, {data: {emptytext: "Unnamed #{object.class_for_display}"}} %>
+ </td>
+ <td>
+ <%= object.content_summary %>
+ </td>
+ <td title="<%= object.modified_at %>">
+ <span>
+ <%= raw distance_of_time_in_words(object.modified_at, Time.now).sub('about ','~').sub(' ',' ') + ' ago' rescue object.modified_at %>
+ </span>
+ </td>
+ <td class="arvados-uuid">
+ <%= object.uuid %>
+ </td>
+ <td>
+ <% if @object.editable? %>
+ <%= link_to({action: 'remove_item', id: @object.uuid, item_uuid: ((name_link && name_link.uuid) || object.uuid)}, method: :delete, remote: true, data: {confirm: "You are about to remove #{object.class_for_display} #{object.uuid} from this folder.\n\nAre you sure?"}, class: 'btn btn-xs btn-default') do %>
+ Remove <i class="fa fa-fw fa-ban"></i>
+ <% end %>
+ <% end %>
+ </td>
+ </tr>
+ <% end %>
+ </tbody>
+ <thead>
+ <tr>
+ <th>
+ </th>
+ <th>
+ </th>
+ <th>
+ name
+ </th>
+ <th>
+ type
+ </th>
+ <th>
+ modified
+ </th>
+ <th>
+ uuid
+ </th>
+ <th>
+ </th>
+ </tr>
+ </thead>
+ </table>
+ <p></p>
+ </div>
+ </div>
+ </div>
+</div>
}
<% end %>
+<%= render partial: "paging", locals: {results: objects, object: @object} %>
+
<table class="topalign table">
<thead>
<tr class="contain-align-left">
</thead>
<tbody>
- <% @jobs.sort_by { |j| j[:created_at] }.reverse.each do |j| %>
+ <% @objects.sort_by { |j| j[:created_at] }.reverse.each do |j| %>
<tr class="cell-noborder">
<td>
</div>
</td>
<td>
- <%= link_to_if_arvados_object j.uuid %>
+ <%= link_to_if_arvados_object j %>
</td>
<td>
<%= j.script %>
--- /dev/null
+<% unless @histogram_pretty_date.nil? %>
+ <% content_for :tab_panes do %>
+ <%# We use protocol-relative paths here to avoid browsers refusing to load javascript over http in a page that was loaded over https. %>
+ <%= javascript_include_tag '//cdnjs.cloudflare.com/ajax/libs/raphael/2.1.2/raphael-min.js' %>
+ <%= javascript_include_tag '//cdnjs.cloudflare.com/ajax/libs/morris.js/0.4.3/morris.min.js' %>
+ <script type="text/javascript">
+ $(document).ready(function(){
+ $.renderHistogram(<%= raw @cache_age_histogram.to_json %>);
+ });
+ </script>
+ <div class='graph'>
+ <h3>Cache Age vs. Disk Utilization</h3>
+ <h4>circa <%= @histogram_pretty_date %></h4>
+ <div id='cache-age-vs-disk-histogram'>
+ </div>
+ </div>
+ <% end %>
+<% end %>
+<%= content_for :content_top %>
+<%= content_for :tab_line_buttons %>
+<%= content_for :tab_panes %>
<link rel="shortcut icon" href="/favicon.ico" type="image/x-icon">
<meta name="description" content="">
<meta name="author" content="">
+ <% if current_user %>
+ <meta name="arv-websocket-url" content="<%=$arvados_api_client.discovery[:websocketUrl]%>?api_token=<%=Thread.current[:arvados_api_token]%>">
+ <% end %>
+ <meta name="robots" content="NOINDEX, NOFOLLOW">
<%= stylesheet_link_tag "application", :media => "all" %>
<%= javascript_include_tag "application" %>
<%= csrf_meta_tags %>
padding-top: 70px; /* 70px to make the container go all the way to the bottom of the navbar */
}
- body > div.container-fluid > div.col-sm-9.col-sm-offset-3 {
- overflow: auto;
- }
-
@media (max-width: 979px) { body { padding-top: 0; } }
.navbar .nav li.nav-separator > span.glyphicon.glyphicon-arrow-right {
padding-top: 1.25em;
}
- @media (min-width: 768px) {
- .left-nav {
- position: fixed;
- }
- }
@media (max-width: 767px) {
.breadcrumbs {
display: none;
}
}
</style>
+ <link href="//netdna.bootstrapcdn.com/font-awesome/4.0.3/css/font-awesome.css" rel="stylesheet">
</head>
<body>
-
- <div class="navbar navbar-default navbar-fixed-top">
- <div class="container-fluid">
+ <div id="wrapper">
+ <nav class="navbar navbar-default navbar-fixed-top" role="navigation">
<div class="navbar-header">
- <button type="button" class="navbar-toggle" data-toggle="collapse" data-target="#workbench-navbar.navbar-collapse">
+ <button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-collapse">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<a class="navbar-brand" href="/"><%= Rails.configuration.site_name rescue Rails.application.class.parent_name %></a>
</div>
- <div class="collapse navbar-collapse" id="workbench-navbar">
- <ul class="nav navbar-nav navbar-left breadcrumbs">
- <% if current_user %>
- <% if content_for?(:breadcrumbs) %>
- <%= yield(:breadcrumbs) %>
- <% else %>
- <li class="nav-separator"><span class="glyphicon glyphicon-arrow-right"></span></li>
- <li>
- <%= link_to(
- controller.model_class.to_s.pluralize.underscore.gsub('_', ' '),
- url_for({controller: params[:controller]})) %>
- </li>
- <% if params[:action] != 'index' %>
- <li class="nav-separator">
- <span class="glyphicon glyphicon-arrow-right"></span>
- </li>
- <li>
- <%= link_to_if_arvados_object @object %>
+ <div class="collapse navbar-collapse">
+ <% if current_user.andand.is_active %>
+ <ul class="nav navbar-nav side-nav">
+
+ <li class="<%= 'arvados-nav-active' if params[:action] == 'home' %>">
+ <a href="/"><i class="fa fa-lg fa-dashboard fa-fw"></i> Dashboard</a>
</li>
- <li style="padding: 14px 0 14px">
- <%= form_tag do |f| %>
- <%= render :partial => "selection_checkbox", :locals => {:object => @object} %>
- <% end %>
+
+ <li class="dropdown">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown"><i class="fa fa-lg fa-hand-o-up fa-fw"></i> Help <b class="caret"></b></a>
+ <ul class="dropdown-menu">
+ <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> Tutorials and User guide'), "#{Rails.configuration.arvados_docsite}/user", target: "_blank" %></li>
+ <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> API Reference'), "#{Rails.configuration.arvados_docsite}/api", target: "_blank" %></li>
+ <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> SDK Reference'), "#{Rails.configuration.arvados_docsite}/sdk", target: "_blank" %></li>
+ </ul>
</li>
- <% end %>
- <% end %>
- <% end %>
- </ul>
-
- <ul class="nav navbar-nav navbar-right">
-
- <li>
- <a><i class="rotating loading glyphicon glyphicon-refresh"></i></a>
- </li>
-
- <% if current_user %>
- <!-- XXX placeholder for this when search is implemented
- <li>
- <form class="navbar-form" role="search">
- <div class="input-group" style="width: 220px">
- <input type="text" class="form-control" placeholder="search">
- <span class="input-group-addon"><span class="glyphicon glyphicon-search"></span></span>
- </div>
- </form>
- </li>
- -->
-
- <li class="dropdown notification-menu">
- <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="collections-menu">
- <span class="glyphicon glyphicon-paperclip"></span>
- <span class="badge" id="persistent-selection-count"></span>
- <span class="caret"></span>
- </a>
- <ul class="dropdown-menu" role="menu" id="persistent-selection-list">
- <%= form_tag '/actions' do %>
- <div id="selection-form-content"></div>
- <% end %>
- </ul>
- </li>
-
- <% if current_user.is_active %>
- <li class="dropdown notification-menu">
- <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="notifications-menu">
- <span class="glyphicon glyphicon-envelope"></span>
- <span class="badge badge-alert notification-count"><%= @notification_count %></span>
- <span class="caret"></span>
- </a>
- <ul class="dropdown-menu" role="menu">
- <% if (@notifications || []).length > 0 %>
- <% @notifications.each_with_index do |n, i| %>
- <% if i > 0 %><li class="divider"></li><% end %>
- <li class="notification"><%= n.call(self) %></li>
- <% end %>
- <% else %>
- <li class="notification empty">No notifications.</li>
+
+ <li class="dropdown">
+ <a href="/folders">
+ <i class="fa fa-lg fa-folder-o fa-fw"></i> Folders
+ </a></li>
+ <li><a href="/collections">
+ <i class="fa fa-lg fa-briefcase fa-fw"></i> Collections (data files)
+ </a></li>
+ <li><a href="/jobs">
+ <i class="fa fa-lg fa-tasks fa-fw"></i> Jobs
+ </a></li>
+ <li><a href="/pipeline_instances">
+ <i class="fa fa-lg fa-tasks fa-fw"></i> Pipeline instances
+ </a></li>
+ <li><a href="/pipeline_templates">
+ <i class="fa fa-lg fa-gears fa-fw"></i> Pipeline templates
+ </a></li>
+ <li> </li>
+ <li><a href="/repositories">
+ <i class="fa fa-lg fa-code-fork fa-fw"></i> Repositories
+ </a></li>
+ <li><a href="/virtual_machines">
+ <i class="fa fa-lg fa-terminal fa-fw"></i> Virtual machines
+ </a></li>
+ <li><a href="/humans">
+ <i class="fa fa-lg fa-male fa-fw"></i> Humans
+ </a></li>
+ <li><a href="/specimens">
+ <i class="fa fa-lg fa-flask fa-fw"></i> Specimens
+ </a></li>
+ <li><a href="/traits">
+ <i class="fa fa-lg fa-clipboard fa-fw"></i> Traits
+ </a></li>
+ <li><a href="/links">
+ <i class="fa fa-lg fa-arrows-h fa-fw"></i> Links
+ </a></li>
+ <% if current_user.andand.is_admin %>
+ <li><a href="/users">
+ <i class="fa fa-lg fa-user fa-fw"></i> Users
+ </a></li>
<% end %>
+ <li><a href="/groups">
+ <i class="fa fa-lg fa-users fa-fw"></i> Groups
+ </a></li>
+ <li><a href="/nodes">
+ <i class="fa fa-lg fa-cloud fa-fw"></i> Compute nodes
+ </a></li>
+ <li><a href="/keep_services">
+ <i class="fa fa-lg fa-exchange fa-fw"></i> Keep services
+ </a></li>
+ <li><a href="/keep_disks">
+ <i class="fa fa-lg fa-hdd-o fa-fw"></i> Keep disks
+ </a></li>
</ul>
- </li>
<% end %>
- <li class="dropdown">
- <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="user-menu">
- <span class="glyphicon glyphicon-user"></span><span class="caret"></span>
- </a>
- <ul class="dropdown-menu" role="menu">
- <li role="presentation" class="dropdown-header"><%= current_user.email %></li>
- <% if current_user.is_active %>
- <li role="presentation" class="divider"></li>
- <li role="presentation"><a href="/authorized_keys" role="menuitem">Manage ssh keys</a></li>
- <li role="presentation"><a href="/api_client_authorizations" role="menuitem">Manage API tokens</a></li>
- <li role="presentation" class="divider"></li>
+ <ul class="nav navbar-nav navbar-left breadcrumbs">
+ <% if current_user %>
+ <% if content_for?(:breadcrumbs) %>
+ <%= yield(:breadcrumbs) %>
+ <% else %>
+ <li class="nav-separator"><span class="glyphicon glyphicon-arrow-right"></span></li>
+ <li>
+ <%= link_to(
+ controller.controller_name.humanize.downcase,
+ url_for({controller: params[:controller]})) %>
+ </li>
+ <% if params[:action] != 'index' %>
+ <li class="nav-separator">
+ <span class="glyphicon glyphicon-arrow-right"></span>
+ </li>
+ <li>
+ <%= link_to_if_arvados_object @object, {friendly_name: true}, {data: {object_uuid: @object.andand.uuid, name: 'name'}} %>
+ </li>
+ <li style="padding: 14px 0 14px">
+ <%= form_tag do |f| %>
+ <%= render :partial => "selection_checkbox", :locals => {:object => @object} %>
+ <% end %>
+ </li>
+ <% end %>
<% end %>
- <li role="presentation"><a href="<%= logout_path %>" role="menuitem">Log out</a></li>
- </ul>
- </li>
- <% else -%>
- <li><a href="<%= $arvados_api_client.arvados_login_url(return_to: root_url) %>">Log in</a></li>
- <% end -%>
- </ul>
- </div><!-- /.navbar-collapse -->
- </div><!-- /.container-fluid -->
- </div>
+ <% end %>
+ </ul>
- <div class="container-fluid">
- <div class="col-sm-9 col-sm-offset-3">
- <div id="content" class="body-content">
- <%= yield %>
- </div>
- </div>
- <div class="col-sm-3 left-nav">
- <div class="arvados-nav-container">
- <% if current_user.andand.is_active %>
- <div class="well">
- <ul class="arvados-nav">
- <li class="<%= 'arvados-nav-active' if params[:action] == 'home' %>">
- <a href="/">Dashboard</a>
+ <ul class="nav navbar-nav navbar-right">
+
+ <li>
+ <a><i class="rotating loading glyphicon glyphicon-refresh"></i></a>
</li>
- <% [['Data', [['collections', 'Collections (data files)'],
- ['humans'],
- ['traits'],
- ['specimens'],
- ['links']]],
- ['Activity', [['pipeline_instances', 'Recent pipeline instances'],
- ['jobs', 'Recent jobs']]],
- ['Compute', [['pipeline_templates'],
- ['repositories', 'Code repositories'],
- ['virtual_machines']]],
- ['System', [['users'],
- ['groups'],
- ['nodes', 'Compute nodes'],
- ['keep_disks']]]].each do |j| %>
- <li><%= j[0] %>
- <ul>
- <% j[1].each do |k| %>
- <% unless k[0] == 'users' and !current_user.andand.is_admin %>
- <li class="<%= 'arvados-nav-active' if (params[:controller] == k[0] && params[:action] != 'home') %>">
- <a href="/<%= k[0] %>">
- <%= if k[1] then k[1] else k[0].capitalize.gsub('_', ' ') end %>
- </a>
- </li>
+ <% if current_user %>
+ <!-- XXX placeholder for this when search is implemented
+ <li>
+ <form class="navbar-form" role="search">
+ <div class="input-group" style="width: 220px">
+ <input type="text" class="form-control" placeholder="search">
+ <span class="input-group-addon"><span class="glyphicon glyphicon-search"></span></span>
+ </div>
+ </form>
+ </li>
+ -->
+
+ <li class="dropdown notification-menu">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="collections-menu">
+ <span class="glyphicon glyphicon-paperclip"></span>
+ <span class="badge" id="persistent-selection-count"></span>
+ <span class="caret"></span>
+ </a>
+ <ul class="dropdown-menu" role="menu" id="persistent-selection-list">
+ <%= form_tag '/actions' do %>
+ <%= hidden_field_tag 'uuid', @object.andand.uuid %>
+ <div id="selection-form-content"></div>
<% end %>
+ </ul>
+ </li>
+
+ <% if current_user.is_active %>
+ <li class="dropdown notification-menu">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="notifications-menu">
+ <span class="glyphicon glyphicon-envelope"></span>
+ <span class="badge badge-alert notification-count"><%= @notification_count %></span>
+ <span class="caret"></span>
+ </a>
+ <ul class="dropdown-menu" role="menu">
+ <% if (@notifications || []).length > 0 %>
+ <% @notifications.each_with_index do |n, i| %>
+ <% if i > 0 %><li class="divider"></li><% end %>
+ <li class="notification"><%= n.call(self) %></li>
+ <% end %>
+ <% else %>
+ <li class="notification empty">No notifications.</li>
<% end %>
- </ul>
- </li>
+ </ul>
+ </li>
<% end %>
- <li>Help
- <ul>
- <li><%= link_to 'Tutorials and User guide', "#{Rails.configuration.arvados_docsite}/user", target: "_blank" %></li>
- <li><%= link_to 'API Reference', "#{Rails.configuration.arvados_docsite}/api", target: "_blank" %></li>
- <li><%= link_to 'SDK Reference', "#{Rails.configuration.arvados_docsite}/sdk", target: "_blank" %></li>
+ <li class="dropdown">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="user-menu">
+ <span class="glyphicon glyphicon-user"></span><span class="caret"></span>
+ </a>
+ <ul class="dropdown-menu" role="menu">
+ <li role="presentation" class="dropdown-header"><%= current_user.email %></li>
+ <% if current_user.is_active %>
+ <li role="presentation" class="divider"></li>
+ <li role="presentation"><a href="/authorized_keys" role="menuitem"><i class="fa fa-key fa-fw"></i> Manage ssh keys</a></li>
+ <li role="presentation"><a href="/api_client_authorizations" role="menuitem"><i class="fa fa-ticket fa-fw"></i> Manage API tokens</a></li>
+ <li role="presentation" class="divider"></li>
+ <% end %>
+ <li role="presentation"><a href="<%= logout_path %>" role="menuitem"><i class="fa fa-sign-out fa-fw"></i> Log out</a></li>
</ul>
</li>
+ <% else %>
+ <li><a href="<%= arvados_api_client.arvados_login_url(return_to: root_url) %>">Log in</a></li>
+ <% end %>
</ul>
- </div>
- <% end %>
- </div>
- </div>
+ </div><!-- /.navbar-collapse -->
+ </nav>
+
+ <div id="page-wrapper">
+ <%= yield %>
+ </div>
</div>
+</div>
+
<%= yield :footer_html %>
<%= piwik_tracking_tag %>
<%= javascript_tag do %>
<%= yield :footer_js %>
<% end %>
+<div class="modal-container"></div>
</body>
</html>
<td>
<% if current_user and (current_user.is_admin or current_user.uuid == link.owner_uuid) %>
- <%= link_to raw('<i class="glyphicon glyphicon-trash"></i>'), { action: 'destroy', id: link.uuid }, { confirm: 'Delete this link?', method: 'delete' } %>
+ <%= link_to raw('<i class="glyphicon glyphicon-trash"></i>'), { action: 'destroy', id: link.uuid }, data: {confirm: 'Delete this link?', method: 'delete'} %>
<% end %>
</td>
+++ /dev/null
-<%= render :partial => 'application/arvados_object' %>
<%= content_for :content_top do %>
<h2>
- <%= render_editable_attribute @object, 'name', nil, { 'data-emptytext' => 'Unnamed pipeline', 'data-mode' => 'inline' } %>
+ <%= render_editable_attribute @object, 'name', nil, { 'data-emptytext' => 'Unnamed pipeline' } %>
</h2>
<% if template %>
<h4>
<% end %>
<% end %>
-<% if @object.active != nil %>
+<% pipeline_job_uuids = [] %>
+
+<% if !@object.state.in? ['New', 'Ready', 'Paused'] %>
<table class="table pipeline-components-table">
<colgroup>
<col style="width: 15%" />
</th><th>
script, version
</th><th>
- progress
- <%= link_to '(refresh)', request.fullpath, class: 'refresh hide', remote: true, method: 'get' %>
+ job
+ <%# format:'js' here helps browsers avoid using the cached js
+ content in html context (e.g., duplicate tab -> see
+ javascript) %>
+ <%= link_to '(refresh)', {format: :js}, {class: 'refresh hide', remote: true, method: 'get'} %>
</th><th>
</th><th>
output
</thead>
<tbody>
<% render_pipeline_jobs.each do |pj| %>
+ <% if pj[:job].andand[:uuid]
+ pipeline_job_uuids << pj[:job][:uuid]
+ end %>
<tr>
<td>
- <% job_status = render(partial: 'job_status_label',
- locals: { :j => pj[:job], :title => pj[:name] }) %>
- <% if pj[:job].andand[:uuid] %>
- <%= link_to(job_status, job_url(id: pj[:job][:uuid])) %>
- <% else %>
- <%= job_status %>
- <% end %>
+ <%= pj[:name] %>
</td><td>
<%= pj[:script] %>
<br /><span class="deemphasize"><%= pj[:script_version] %></span>
</td><td>
<%= pj[:progress_bar] %>
+ <% if @object.state == 'Complete' || @object.state == 'Failed' %>
+ <% if pj[:job].andand[:uuid] %>
+ <span class="deemphasize">
+ <%= link_to("..."+pj[:job][:uuid].last(15), job_url(id: pj[:job][:uuid])) %>
+ </span>
+
+ <% current_job = Job.find(pj[:job][:uuid]) rescue nil %>
+ <% if current_job.andand[:log] %>
+ <% fixup = /([a-f0-9]{32}\+\d+)(\+?.*)/.match(current_job[:log])%>
+ <% Collection.limit(1).where(uuid: fixup[1]).each do |c| %>
+ <% c.files.each do |file| %>
+ <br/><span class="deemphasize">
+ <a href="<%= collection_path(current_job[:log]) %>/<%= file[1] %>?disposition=inline&size=<%= file[2] %>">log</a>
+ </span>
+ <% end %>
+ <% end %>
+ <% end %>
+ <% end %>
+ <% end %>
</td><td>
<%= render(partial: 'job_status_label',
locals: { :j => pj[:job] }) %>
</tfoot>
</table>
-<% if @object.active %>
+<% if @object.state == 'RunningOnServer' || @object.state == 'RunningOnClient' %>
<% content_for :js do %>
setInterval(function(){$('a.refresh').click()}, 15000);
<% end %>
<% content_for :tab_line_buttons do %>
<%= form_tag @object, :method => :put do |f| %>
- <%= hidden_field @object.class.to_s.underscore.singularize.to_sym, :active, :value => false %>
+ <%= hidden_field @object.class.to_s.underscore.singularize.to_sym, :state, :value => 'Paused' %>
- <%= button_tag "Stop pipeline", {class: 'btn btn-primary pull-right', id: "run-pipeline-button"} %>
+ <%= button_tag({class: 'btn btn-primary pull-right run-pipeline-button'}) do %>
+ Stop <i class="fa fa-fw fa-stop"></i>
+ <% end %>
<% end %>
<% end %>
-<% end %>
-
-<% else %>
+ <% if !pipeline_job_uuids.empty? %>
+ <h4>Log messages from running jobs</h4>
+ <% log_history = pipeline_log_history(pipeline_job_uuids) %>
+ <div id="pipeline_event_log_history_div">
+ <% log_history.each do |entry| %>
+ <%=entry%><br/>
+ <% end %>
+ </div>
+ <div class="arv-log-event-listener arv-log-event-handler-append-logs" id="pipeline_event_log_div" data-object-uuids="<%=pipeline_job_uuids.join(" ")%>"/>
+ <% end %>
+ <% end %>
- <p>Please set the desired input parameters for the components of this pipeline. Parameters highlighted in red are required.</p>
+<% else %> <%# State new or ready or paused %>
+ <% if @object.state == 'New' %>
+ <p>Please set the desired input parameters for the components of this pipeline. Parameters highlighted in red are required.</p>
+ <% end %>
<% content_for :tab_line_buttons do %>
<%= form_tag @object, :method => :put do |f| %>
- <%= hidden_field @object.class.to_s.underscore.singularize.to_sym, :active, :value => true %>
+ <%= hidden_field @object.class.to_s.underscore.singularize.to_sym, :state, :value => 'RunningOnServer' %>
- <%= button_tag "Run pipeline", {class: 'btn btn-primary pull-right', id: "run-pipeline-button"} %>
+ <%= button_tag({class: 'btn btn-primary pull-right run-pipeline-button'}) do %>
+ Run <i class="fa fa-fw fa-play"></i>
+ <% end %>
<% end %>
<% end %>
- <%= render partial: 'show_components_editable', locals: {editable: true} %>
-
+ <% if @object.state.in? ['New', 'Ready'] %>
+ <%= render partial: 'show_components_editable', locals: {editable: true} %>
+ <% else %>
+ <%= render partial: 'show_components_editable', locals: {editable: false} %>
+ <% end %>
<% end %>
<% @object.components.each do |k, component| %>
<% next if !component %>
<tr>
- <td><span class="label label-default"><%= k %></span></td>
+ <td><%= k %></td>
- <td><%= render_pipeline_component_attribute (editable && @object), :components, [k, :script], component[:script] %></td>
+ <td><%= component[:script] %></td>
<td>script version</td>
--- /dev/null
+<% n_inputs = 0 %>
+
+<% content_for :pi_input_form do %>
+<form role="form" style="width:60%">
+ <div class="form-group">
+ <% @object.components.each do |cname, component| %>
+ <% next if !component %>
+ <% component[:script_parameters].andand.each do |pname, pvalue_spec| %>
+ <% if pvalue_spec.is_a? Hash %>
+ <% if (pvalue_spec[:description] or
+ (pvalue_spec[:required] and not pvalue_spec[:value])) %>
+ <% n_inputs += 1 %>
+ <label for="<% "#{cname}-#{pname}" %>">
+ <%= pvalue_spec[:title] ||
+ "\"#{pname.to_s}\" parameter for #{component[:script]} script in #{cname} component" %>
+ </label>
+ <div>
+ <p class="form-control-static">
+ <%= render_pipeline_component_attribute @object, :components, [cname, :script_parameters, pname.to_sym], pvalue_spec %>
+ </p>
+ </div>
+ <p class="help-block">
+ <%= pvalue_spec[:description] %>
+ </p>
+ <% end %>
+ <% end %>
+ <% end %>
+ <% end %>
+ </div>
+</form>
+<% end %>
+
+<% if n_inputs == 0 %>
+ <p>This pipeline does not need any further inputs specified. You can start it by clicking the "Run" button.</p>
+<% else %>
+ <p><i>Provide <%= n_inputs > 1 ? 'values' : 'a value' %> for the following <%= n_inputs > 1 ? 'parameters' : 'parameter' %>, then click the "Run" button to start the pipeline.</i></p>
+ <%= content_for :pi_input_form %>
+
+ <%= form_tag @object, :method => :put do |f| %>
+ <%= hidden_field @object.class.to_s.underscore.singularize.to_sym, :state, :value => 'RunningOnServer' %>
+ <%= button_tag({class: 'btn btn-primary run-pipeline-button'}) do %>
+ Run <i class="fa fa-fw fa-play"></i>
+ <% end %>
+ <% end %>
+
+<% end %>
+
+<div style="margin-top: 1em;">
+ <p>Click the "Components" tab above to see a full list of pipeline components and parameters.</p>
+</div>
<col width="25%" />
<col width="20%" />
<col width="15%" />
- <col width="20%" />
+ <col width="15%" />
+ <col width="5%" />
</colgroup>
<thead>
<tr class="contain-align-left">
Owner
</th><th>
Age
+ </th><th>
</th>
</tr>
</thead>
<%= link_to_if_arvados_object ob.owner_uuid, friendly_name: true %>
</td><td>
<%= distance_of_time_in_words(ob.created_at, Time.now) %>
+ </td><td>
+ <%= render partial: 'delete_object_button', locals: {object:ob} %>
</td>
</tr>
<tr>
<td style="border-top: 0;" colspan="2">
</td>
- <td style="border-top: 0; opacity: 0.5;" colspan="5">
+ <td style="border-top: 0; opacity: 0.5;" colspan="6">
<% ob.components.each do |cname, c| %>
<% if c[:job] %>
<%= render partial: "job_status_label", locals: {:j => c[:job], :title => cname.to_s } %>
<% self.formats = [:html] %>
var new_content = "<%= escape_javascript(render template: 'pipeline_instances/show') %>";
var selected_tab_hrefs = [];
-if ($('div.body-content').html() != new_content) {
+if ($('div#page-wrapper').html() != new_content) {
$('.nav-tabs li.active a').each(function() {
selected_tab_hrefs.push($(this).attr('href'));
});
- $('div.body-content').html(new_content);
+ $('div#page-wrapper').html(new_content);
- // Show the same tabs that were active before we rewrote body-content
+ // Show the same tabs that were active before we rewrote page-wrapper
$.each(selected_tab_hrefs, function(i, href) {
$('.nav-tabs li a[href="' + href + '"]').tab('show');
});
-<% content_for :css do %>
- .playbutton {
- color: white;
- background: rgb(91, 192, 222);
- border: 0px;
- border-radius: 3px;
- padding: 0px 3px;
- }
- .playbutton:hover {
- background: rgb(57, 179, 215);
- }
-<% end %>
-
<%= render partial: "paging", locals: {results: @objects, object: @object} %>
-<table class="table table-hover">
+<table class="table table-condensed arv-index">
+ <colgroup>
+ <col width="8%" />
+ <col width="10%" />
+ <col width="22%" />
+ <col width="45%" />
+ <col width="15%" />
+ </colgroup>
<thead>
<tr class="contain-align-left">
<th>
</th><th>
- id
</th><th>
- name
+ name
</th><th>
- owner
+ description/components
</th><th>
- components
+ owner
</th>
</tr>
</thead>
<td>
<%= form_tag '/pipeline_instances' do |f| %>
<%= hidden_field :pipeline_instance, :pipeline_template_uuid, :value => ob.uuid %>
- <%= button_tag nil, {class: 'playbutton', title: "Run #{ob.name}"} do %>
- <span class="glyphicon glyphicon-play"></span>
+ <%= button_tag nil, {class: "btn btn-default btn-xs", title: "Run #{ob.name}"} do %>
+ Run <i class="fa fa-fw fa-play"></i>
<% end %>
<% end %>
</td>
<td>
- <%= link_to_if_arvados_object ob %>
+ <%= render :partial => "show_object_button", :locals => {object: ob, size: 'xs'} %>
</td><td>
<%= render_editable_attribute ob, 'name' %>
</td><td>
- <%= link_to_if_arvados_object ob.owner_uuid, friendly_name: true %>
+ <% if ob.respond_to?(:description) and ob.description %>
+ <%= ob.description %>
+ <br />
+ <% end %>
+ <% ob.components.collect { |k,v| k.to_s }.each do |k| %>
+ <span class="label label-default"><%= k %></span>
+ <% end %>
</td><td>
- <%= ob.components.collect { |k,v| k.to_s }.join(", ") %>
+ <%= link_to_if_arvados_object ob.owner_uuid, friendly_name: true %>
</td>
</tr>
<% end %>
<% end %>
-<%= form_for(@required_user_agreements.first, {url: {action: 'sign', controller: 'user_agreements'}}) do |f| %>
+<%= form_for(@required_user_agreements.first, {url: {action: 'sign', controller: 'user_agreements'}, method: 'post'}) do |f| %>
<%= hidden_field_tag :return_to, request.url %>
<div id="open_user_agreement">
<div class="alert alert-info">
<p>As an admin, you can deactivate and reset this user. This will remove all repository/VM permissions for the user. If you "setup" the user again, the user will have to sign the user agreement again.</p>
<blockquote>
-<%= button_to "Deactivate #{@object.full_name}", unsetup_user_url(id: @object.uuid), class: 'btn btn-primary', confirm: "Are you sure you want to deactivate #{@object.full_name}?"%>
+<%= button_to "Deactivate #{@object.full_name}", unsetup_user_url(id: @object.uuid), class: 'btn btn-primary', data: {confirm: "Are you sure you want to deactivate #{@object.full_name}?"} %>
</blockquote>
<% content_for :footer_html do %>
<% if current_user.andand.is_active %>
<div>
<strong>Recent jobs</strong>
- <%= link_to '(refresh)', request.fullpath, class: 'refresh', remote: true, method: 'get' %>
+ <%= link_to '(refresh)', {format: :js}, {class: 'refresh', remote: true, method: 'get'} %>
<%= link_to raw("Show all jobs →"), jobs_path, class: 'pull-right' %>
<% if not current_user.andand.is_active or @my_jobs.empty? %>
<p>(None)</p>
<a href="<%= collection_path(j.log) %>/<%= file[1] %>?disposition=inline&size=<%= file[2] %>">Log</a>
<% end %>
<% end %>
- <% elsif j.respond_to? :log_buffer and j.log_buffer %>
+ <% elsif j.respond_to? :log_buffer and j.log_buffer.is_a? String %>
<% buf = j.log_buffer.strip.split("\n").last %>
<span title="<%= buf %>"><%= buf %></span>
<% end %>
<div>
<strong>Recent pipeline instances</strong>
- <%= link_to '(refresh)', request.fullpath, class: 'refresh', remote: true, method: 'get' %>
+ <%= link_to '(refresh)', {format: :js}, {class: 'refresh', remote: true, method: 'get'} %>
<%= link_to raw("Show all pipeline instances →"), pipeline_instances_path, class: 'pull-right' %>
<% if not current_user.andand.is_active or @my_pipelines.empty? %>
<p>(None)</p>
<div>
<strong>Recent collections</strong>
- <%= link_to '(refresh)', request.fullpath, class: 'refresh', remote: true, method: 'get' %>
+ <%= link_to '(refresh)', {format: :js}, {class: 'refresh', remote: true, method: 'get'} %>
<%= link_to raw("Show all collections →"), collections_path, class: 'pull-right' %>
<div class="pull-right" style="padding-right: 1em; width: 30%;">
<%= form_tag collections_path,
beyond that.
</p>
<p>
- <a class="pull-right btn btn-primary" href="<%= $arvados_api_client.arvados_login_url(return_to: request.url) %>">
+ <a class="pull-right btn btn-primary" href="<%= arvados_api_client.arvados_login_url(return_to: request.url) %>">
Click here to log in to <%= Rails.configuration.site_name %> with a Google account</a>
</p>
</div>
$("#PutStuffHere").append(content + "<br>");
};
-var dispatcher = new WebSocket('<%= $arvados_api_client.discovery[:websocketUrl] %>?api_token=<%= Thread.current[:arvados_api_token] %>');
+var dispatcher = new WebSocket('<%= arvados_api_client.discovery[:websocketUrl] %>?api_token=<%= Thread.current[:arvados_api_token] %>');
dispatcher.onmessage = function(event) {
//putStuffThere(JSON.parse(event.data));
putStuffThere(event.data);
development:
cache_classes: false
- whiny_nils: true
+ eager_load: true
consider_all_requests_local: true
action_controller.perform_caching: false
action_mailer.raise_delivery_errors: false
active_support.deprecation: :log
action_dispatch.best_standards_support: :builtin
- active_record.mass_assignment_sanitizer: :strict
- active_record.auto_explain_threshold_in_seconds: 0.5
- assets.compress: false
assets.debug: true
profiling_enabled: true
site_name: Arvados Workbench (dev)
production:
force_ssl: true
cache_classes: true
+ eager_load: true
consider_all_requests_local: false
action_controller.perform_caching: true
serve_static_assets: false
- assets.compress: true
assets.compile: false
assets.digest: true
i18n.fallbacks: true
test:
cache_classes: true
+ eager_load: false
serve_static_assets: true
static_cache_control: public, max-age=3600
- whiny_nils: true
consider_all_requests_local: true
action_controller.perform_caching: false
action_dispatch.show_exceptions: false
action_controller.allow_forgery_protection: false
action_mailer.delivery_method: :test
- active_record.mass_assignment_sanitizer: :strict
active_support.deprecation: :stderr
profiling_enabled: false
secret_token: <%= rand(2**256).to_s(36) %>
+ secret_key_base: <%= rand(2**256).to_s(36) %>
# When you run the Workbench's integration tests, it starts the API
# server as a dependency. These settings should match the API
site_name: Workbench:test
common:
+ assets.js_compressor: false
+ assets.css_compressor: false
data_import_dir: /tmp/arvados-workbench-upload
data_export_dir: /tmp/arvados-workbench-download
arvados_login_base: https://arvados.local/login
arvados_theme: default
show_user_agreement_inline: false
secret_token: ~
+ secret_key_base: false
default_openid_prefix: https://www.google.com/accounts/o8/id
send_user_setup_notification_email: true
require 'rails/all'
-if defined?(Bundler)
- # If you precompile assets before deploying to production, use this line
- Bundler.require(*Rails.groups(:assets => %w(development test)))
- # If you want your assets lazily compiled in production, use this line
- # Bundler.require(:default, :assets, Rails.env)
-end
+Bundler.require(:default, Rails.env)
module ArvadosWorkbench
class Application < Rails::Application
# like if you have constraints or database-specific column types
# config.active_record.schema_format = :sql
- # Enforce whitelist mode for mass assignment.
- # This will create an empty whitelist of attributes available for mass-assignment for all models
- # in your app. As such, your models will need to explicitly whitelist or blacklist accessible
- # parameters by using an attr_accessible or attr_protected declaration.
- config.active_record.whitelist_attributes = true
-
# Enable the asset pipeline
config.assets.enabled = true
config.assets.version = '1.0'
end
end
+
+require File.expand_path('../load_config', __FILE__)
# since you don't have to restart the web server when you make code changes.
config.cache_classes = false
- # Log error messages when you accidentally call methods on nil.
- config.whiny_nils = true
-
# Show full error reports and disable caching
config.consider_all_requests_local = true
config.action_controller.perform_caching = false
# Only use best-standards-support built into browsers
config.action_dispatch.best_standards_support = :builtin
- # Raise exception on mass assignment protection for Active Record models
- config.active_record.mass_assignment_sanitizer = :strict
-
- # Log the query plan for queries taking more than this (works
- # with SQLite, MySQL, and PostgreSQL)
- config.active_record.auto_explain_threshold_in_seconds = 0.5
-
# Do not compress assets
- config.assets.compress = false
+ config.assets.js_compressor = false
# Expands the lines which load the assets
config.assets.debug = true
config.serve_static_assets = false
# Compress JavaScripts and CSS
- config.assets.compress = true
+ config.assets.js_compressor = :yui
# Don't fallback to assets pipeline if a precompiled asset is missed
config.assets.compile = false
# Send deprecation notices to registered listeners
config.active_support.deprecation = :notify
- # Log the query plan for queries taking more than this (works
- # with SQLite, MySQL, and PostgreSQL)
- # config.active_record.auto_explain_threshold_in_seconds = 0.5
-
# Log timing data for API transactions
config.profiling_enabled = false
config.serve_static_assets = true
config.static_cache_control = "public, max-age=3600"
- # Log error messages when you accidentally call methods on nil
- config.whiny_nils = true
-
# Show full error reports and disable caching
config.consider_all_requests_local = true
config.action_controller.perform_caching = false
# ActionMailer::Base.deliveries array.
config.action_mailer.delivery_method = :test
- # Raise exception on mass assignment protection for Active Record models
- config.active_record.mass_assignment_sanitizer = :strict
-
# Print deprecation notices to the stderr
config.active_support.deprecation = :stderr
+++ /dev/null
-# The client object must be instantiated _after_ zza_load_config.rb
-# runs, because it relies on configuration settings.
-#
-if not $application_config
- raise "Fatal: Config must be loaded before instantiating ArvadosApiClient."
-end
-
-$arvados_api_client = ArvadosApiClient.new
themes_for_rails
resources :keep_disks
+ resources :keep_services
resources :user_agreements do
- put 'sign', on: :collection
+ post 'sign', on: :collection
get 'signatures', on: :collection
end
get '/user_agreements/signatures' => 'user_agreements#signatures'
resources :authorized_keys
resources :job_tasks
resources :jobs
- match '/logout' => 'sessions#destroy'
- match '/logged_out' => 'sessions#index'
+ match '/logout' => 'sessions#destroy', via: [:get, :post]
+ get '/logged_out' => 'sessions#index'
resources :users do
get 'home', :on => :member
get 'welcome', :on => :collection
get 'compare', on: :collection
end
resources :links
- match '/collections/graph' => 'collections#graph'
+ get '/collections/graph' => 'collections#graph'
resources :collections do
post 'set_persistent', on: :member
end
+ get('/collections/download/:uuid/:reader_token/*file' => 'collections#show_file',
+ format: false)
+ get '/collections/download/:uuid/:reader_token' => 'collections#show_file_links'
get '/collections/:uuid/*file' => 'collections#show_file', :format => false
+ resources :folders do
+ match 'remove/:item_uuid', on: :member, via: :delete, action: :remove_item
+ get 'choose', on: :collection
+ end
post 'actions' => 'actions#post'
get 'websockets' => 'websocket#index'
# Send unroutable requests to an arbitrary controller
# (ends up at ApplicationController#render_not_found)
- match '*a', :to => 'links#render_not_found'
+ match '*a', to: 'links#render_not_found', via: [:get, :post]
end
-User-Agent: *
-Disallow: /
"session token does not belong to #{client_auth}")
end
+ def show_collection(params, session={}, response=:success)
+ params = collection_params(params) if not params.is_a? Hash
+ session = session_for(session) if not session.is_a? Hash
+ get(:show, params, session)
+ assert_response response
+ end
+
# Mock the collection file reader to avoid external calls and return
# a predictable string.
CollectionsController.class_eval do
end
test "viewing a collection" do
- params = collection_params(:foo_file)
- sess = session_for(:active)
- get(:show, params, sess)
- assert_response :success
+ show_collection(:foo_file, :active)
assert_equal([['.', 'foo', 3]], assigns(:object).files)
end
- test "viewing a collection with a reader token" do
- params = collection_params(:foo_file)
- params[:reader_tokens] =
- [api_fixture('api_client_authorizations')['active']['api_token']]
- get(:show, params)
- assert_response :success
- assert_equal([['.', 'foo', 3]], assigns(:object).files)
- assert_no_session
+ test "viewing a collection fetches related folders" do
+ show_collection(:foo_file, :active)
+ assert_includes(assigns(:folders).map(&:uuid),
+ api_fixture('groups')['afolder']['uuid'],
+ "controller did not find linked folder")
+ end
+
+ test "viewing a collection fetches related permissions" do
+ show_collection(:bar_file, :active)
+ assert_includes(assigns(:permissions).map(&:uuid),
+ api_fixture('links')['bar_file_readable_by_active']['uuid'],
+ "controller did not find permission link")
+ end
+
+ test "viewing a collection fetches jobs that output it" do
+ show_collection(:bar_file, :active)
+ assert_includes(assigns(:output_of).map(&:uuid),
+ api_fixture('jobs')['foobar']['uuid'],
+ "controller did not find output job")
+ end
+
+ test "viewing a collection fetches jobs that logged it" do
+ show_collection(:baz_file, :active)
+ assert_includes(assigns(:log_of).map(&:uuid),
+ api_fixture('jobs')['foobar']['uuid'],
+ "controller did not find logger job")
end
- test "viewing the index with a reader token" do
- params = {reader_tokens:
- [api_fixture('api_client_authorizations')['spectator']['api_token']]
- }
- get(:index, params)
+ test "viewing a collection fetches logs about it" do
+ show_collection(:foo_file, :active)
+ assert_includes(assigns(:logs).map(&:uuid),
+ api_fixture('logs')['log4']['uuid'],
+ "controller did not find related log")
+ end
+
+ test "viewing collection files with a reader token" do
+ params = collection_params(:foo_file)
+ params[:reader_token] =
+ api_fixture('api_client_authorizations')['active']['api_token']
+ get(:show_file_links, params)
assert_response :success
+ assert_equal([['.', 'foo', 3]], assigns(:object).files)
assert_no_session
- listed_collections = assigns(:collections).map { |c| c.uuid }
- assert_includes(listed_collections,
- api_fixture('collections')['bar_file']['uuid'],
- "spectator reader token didn't list bar file")
- refute_includes(listed_collections,
- api_fixture('collections')['foo_file']['uuid'],
- "spectator reader token listed foo file")
end
test "getting a file from Keep" do
params = collection_params(:foo_file, 'foo')
sess = session_for(:spectator)
get(:show_file, params, sess)
- assert_includes([403, 404], @response.code.to_i)
+ assert_response 404
end
test "trying to get a nonexistent file from Keep returns a 404" do
test "getting a file from Keep with a good reader token" do
params = collection_params(:foo_file, 'foo')
read_token = api_fixture('api_client_authorizations')['active']['api_token']
- params[:reader_tokens] = [read_token]
+ params[:reader_token] = read_token
get(:show_file, params)
assert_response :success
assert_equal(expected_contents(params, read_token), @response.body,
test "trying to get from Keep with an unscoped reader token prompts login" do
params = collection_params(:foo_file, 'foo')
- read_token =
+ params[:reader_token] =
api_fixture('api_client_authorizations')['active_noscope']['api_token']
- params[:reader_tokens] = [read_token]
get(:show_file, params)
assert_response :redirect
end
params = collection_params(:foo_file, 'foo')
sess = session_for(:expired)
read_token = api_fixture('api_client_authorizations')['active']['api_token']
- params[:reader_tokens] = [read_token]
+ params[:reader_token] = read_token
get(:show_file, params, sess)
assert_response :success
assert_equal(expected_contents(params, read_token), @response.body,
--- /dev/null
+require 'test_helper'
+
+class FoldersControllerTest < ActionController::TestCase
+ # test "the truth" do
+ # assert true
+ # end
+end
require 'headless'
class CollectionsTest < ActionDispatch::IntegrationTest
-
def change_persist oldstate, newstate
find "div[data-persistent-state='#{oldstate}']"
page.assert_no_selector "div[data-persistent-state='#{newstate}']"
change_persist 'persistent', 'cache'
end
+ test "Collection page renders default name links" do
+ uuid = api_fixture('collections')['foo_file']['uuid']
+ coll_name = api_fixture('links')['foo_collection_name_in_afolder']['name']
+ visit page_with_token('active', "/collections/#{uuid}")
+ assert(page.has_text?(coll_name), "Collection page did not include name")
+ # Now check that the page is otherwise normal, and the collection name
+ # isn't only showing up in an error message.
+ assert(page.has_link?('foo'), "Collection page did not include file link")
+ end
+
+ test "can download an entire collection with a reader token" do
+ uuid = api_fixture('collections')['foo_file']['uuid']
+ token = api_fixture('api_client_authorizations')['active_all_collections']['api_token']
+ url_head = "/collections/download/#{uuid}/#{token}/"
+ visit url_head
+ # It seems that Capybara can't inspect tags outside the body, so this is
+ # a very blunt approach.
+ assert_no_match(/<\s*meta[^>]+\bnofollow\b/i, page.html,
+ "wget prohibited from recursing the collection page")
+ # TODO: When we can test against a Keep server, actually follow links
+ # and check their contents, rather than testing the href directly
+ # (this is too closely tied to implementation details).
+ hrefs = page.all('a').map do |anchor|
+ link = anchor[:href] || ''
+ if link.start_with? url_head
+ link[url_head.size .. -1]
+ elsif link.start_with? '/'
+ nil
+ else
+ link
+ end
+ end
+ assert_equal(['foo'], hrefs.compact.sort,
+ "download page did provide strictly file links")
+ end
end
--- /dev/null
+require 'integration_helper'
+require 'selenium-webdriver'
+require 'headless'
+
+class FoldersTest < ActionDispatch::IntegrationTest
+ setup do
+ Capybara.current_driver = Capybara.javascript_driver
+ end
+
+ test 'Find a folder and edit its description' do
+ visit page_with_token 'active', '/'
+ find('nav a', text: 'Folders').click
+ find('.arv-folder-list a,button', text: 'A Folder').
+ click
+ within('.panel', text: api_fixture('groups')['afolder']['name']) do
+ find('span', text: api_fixture('groups')['afolder']['name']).click
+ find('.glyphicon-ok').click
+ find('.btn', text: 'Edit description').click
+ find('.editable-input textarea').set('I just edited this.')
+ find('.editable-submit').click
+ wait_for_ajax
+ end
+ visit current_path
+ assert(find?('.panel', text: 'I just edited this.'),
+ "Description update did not survive page refresh")
+ end
+
+ test 'Add a new name, then edit it, without creating a duplicate' do
+ folder_uuid = api_fixture('groups')['afolder']['uuid']
+ specimen_uuid = api_fixture('specimens')['owned_by_afolder_with_no_name_link']['uuid']
+ visit page_with_token 'active', '/folders/' + folder_uuid
+ within('.panel tr', text: specimen_uuid) do
+ find(".editable[data-name='name']").click
+ find('.editable-input input').set('Now I have a name.')
+ find('.glyphicon-ok').click
+ find('.editable', text: 'Now I have a name.').click
+ find('.editable-input input').set('Now I have a new name.')
+ find('.glyphicon-ok').click
+ wait_for_ajax
+ find('.editable', text: 'Now I have a new name.')
+ end
+ visit current_path
+ within '.panel', text: 'Contents' do
+ find '.editable', text: 'Now I have a new name.'
+ page.assert_no_selector '.editable', text: 'Now I have a name.'
+ end
+ end
+
+ test 'Create a folder and move it into a different folder' do
+ visit page_with_token 'active', '/folders'
+ find('input[value="Add a new folder"]').click
+
+ within('.panel', text: 'New folder') do
+ find('.panel-title span', text: 'New folder').click
+ find('.editable-input input').set('Folder 1234')
+ find('.glyphicon-ok').click
+ end
+ wait_for_ajax
+
+ visit '/folders'
+ find('input[value="Add a new folder"]').click
+ within('.panel', text: 'New folder') do
+ find('.panel-title span', text: 'New folder').click
+ find('.editable-input input').set('Folder 5678')
+ find('.glyphicon-ok').click
+ end
+ wait_for_ajax
+
+ find('input[value="Move to..."]').click
+ find('.selectable', text: 'Folder 1234').click
+ find('a,button', text: 'Move').click
+ wait_for_ajax
+
+ # Wait for the page to refresh and show the new parent folder in
+ # the Permissions panel:
+ find('.panel', text: 'Folder 1234')
+
+ assert(find('.panel', text: 'Permissions inherited from').
+ all('*', text: 'Folder 1234').any?,
+ "Folder 5678 should now be inside folder 1234")
+ end
+
+end
--- /dev/null
+require 'integration_helper'
+require 'selenium-webdriver'
+require 'headless'
+
+class PipelineInstancesTest < ActionDispatch::IntegrationTest
+ setup do
+ # Selecting collections requiresLocalStorage
+ headless = Headless.new
+ headless.start
+ Capybara.current_driver = :selenium
+ end
+
+ test 'Create and run a pipeline' do
+ visit page_with_token('active_trustedclient')
+
+ click_link 'Pipeline templates'
+ within('tr', text: 'Two Part Pipeline Template') do
+ find('a,button', text: 'Run').click
+ end
+
+ instance_page = current_path
+
+ # Go over to the collections page and select something
+ click_link 'Collections (data files)'
+ within('tr', text: 'GNU_General_Public_License') do
+ find('input[type=checkbox]').click
+ end
+ find('#persistent-selection-count').click
+
+ # Go back to the pipeline instance page to use the new selection
+ visit instance_page
+
+ page.assert_selector 'a.disabled,button.disabled', text: 'Run'
+ assert find('p', text: 'Provide a value')
+
+ find('div.form-group', text: 'Foo/bar pair').
+ find('a,input').
+ click
+ find('.editable-input select').click
+ find('.editable-input').
+ first(:option, 'b519d9cb706a29fc7ea24dbea2f05851+249025').click
+ wait_for_ajax
+
+ # "Run" button is now enabled
+ page.assert_no_selector 'a.disabled,button.disabled', text: 'Run'
+
+ first('a,button', text: 'Run').click
+
+ # Pipeline is running. We have a "Stop" button instead now.
+ page.assert_selector 'a,button', text: 'Stop'
+ find('a,button', text: 'Stop').click
+
+ # Pipeline is stopped. We have the option to resume it.
+ page.assert_selector 'a,button', text: 'Run'
+ end
+end
require 'uri'
class SmokeTest < ActionDispatch::IntegrationTest
+ setup do
+ Capybara.current_driver = Capybara.javascript_driver
+ end
+
def assert_visit_success(allowed=[200])
assert_includes(allowed, status_code,
"#{current_url} returned #{status_code}, not one of " +
visit page_with_token('active_trustedclient', '/')
assert_visit_success
click_link 'user-menu'
- urls = [all_links_in('.arvados-nav'),
+ urls = [all_links_in('nav'),
all_links_in('.navbar', /^Manage /)].flatten
seen_urls = ['/']
while not (url = urls.shift).nil?
assert (text.include? 'true false'), 'Expected is_active'
end
- click_link 'zzzzz-tpzed-xurymjxw79nv3jz'
+ find('tr', text: 'zzzzz-tpzed-xurymjxw79nv3jz').
+ find('a,button', text: 'Show').
+ click
assert page.has_text? 'Attributes'
assert page.has_text? 'Metadata'
assert page.has_text? 'Admin'
# go to the Attributes tab
click_link 'Attributes'
assert page.has_text? 'modified_by_user_uuid'
- page.within(:xpath, '//a[@data-name="is_active"]') do
+ page.within(:xpath, '//span[@data-name="is_active"]') do
assert_equal "true", text, "Expected user's is_active to be true"
end
- page.within(:xpath, '//a[@data-name="is_admin"]') do
+ page.within(:xpath, '//span[@data-name="is_admin"]') do
assert_equal "false", text, "Expected user's is_admin to be false"
end
fill_in "email", :with => "foo@example.com"
fill_in "repo_name", :with => "test_repo"
click_button "Submit"
+ wait_for_ajax
end
- sleep(0.1)
-
- # verify that the new user showed up in the users page
- assert page.has_text? 'foo@example.com'
-
- new_user_uuid = nil
- all("tr").each do |elem|
- if elem.text.include? 'foo@example.com'
- new_user_uuid = elem.text.split[0]
- break
- end
- end
+ visit '/users'
+ # verify that the new user showed up in the users page and find
+ # the new user's UUID
+ new_user_uuid =
+ find('tr[data-object-uuid]', text: 'foo@example.com').
+ find('td', text: '-tpzed-').
+ text
assert new_user_uuid, "Expected new user uuid not found"
# go to the new user's page
- click_link new_user_uuid
+ find('tr', text: new_user_uuid).
+ find('a,button', text: 'Show').
+ click
assert page.has_text? 'modified_by_user_uuid'
- page.within(:xpath, '//a[@data-name="is_active"]') do
+ page.within(:xpath, '//span[@data-name="is_active"]') do
assert_equal "false", text, "Expected new user's is_active to be false"
end
click_link 'Users'
- assert page.has_link? 'zzzzz-tpzed-xurymjxw79nv3jz'
-
# click on active user
- click_link 'zzzzz-tpzed-xurymjxw79nv3jz'
+ find('tr', text: 'zzzzz-tpzed-xurymjxw79nv3jz').
+ find('a,button', text: 'Show').
+ click
# Setup user
click_link 'Admin'
assert has_text? 'Virtual Machine'
fill_in "repo_name", :with => "test_repo"
click_button "Submit"
+ wait_for_ajax
end
- sleep(1)
assert page.has_text? 'modified_by_client_uuid'
click_link 'Metadata'
fill_in "repo_name", :with => "second_test_repo"
select("testvm.shell", :from => 'vm_uuid')
click_button "Submit"
+ wait_for_ajax
end
- sleep(0.1)
assert page.has_text? 'modified_by_client_uuid'
click_link 'Metadata'
click_link 'Users'
- assert page.has_link? 'zzzzz-tpzed-xurymjxw79nv3jz'
-
# click on active user
- click_link 'zzzzz-tpzed-xurymjxw79nv3jz'
+ find('tr', text: 'zzzzz-tpzed-xurymjxw79nv3jz').
+ find('a,button', text: 'Show').
+ click
# Verify that is_active is set
- click_link 'Attributes'
+ find('a,button', text: 'Attributes').click
assert page.has_text? 'modified_by_user_uuid'
- page.within(:xpath, '//a[@data-name="is_active"]') do
+ page.within(:xpath, '//span[@data-name="is_active"]') do
assert_equal "true", text, "Expected user's is_active to be true"
end
# Should now be back in the Attributes tab for the user
page.driver.browser.switch_to.alert.accept
assert page.has_text? 'modified_by_user_uuid'
- page.within(:xpath, '//a[@data-name="is_active"]') do
+ page.within(:xpath, '//span[@data-name="is_active"]') do
assert_equal "false", text, "Expected user's is_active to be false after unsetup"
end
fill_in "repo_name", :with => "second_test_repo"
select("testvm.shell", :from => 'vm_uuid')
click_button "Submit"
+ wait_for_ajax
end
- sleep(0.1)
assert page.has_text? 'modified_by_client_uuid'
click_link 'Metadata'
click_link 'Virtual machines'
assert page.has_text? 'testvm.shell'
click_on 'Add a new virtual machine'
- assert page.has_text? 'none'
- click_link 'none'
+ find('tr', text: 'hostname').
+ find('span', text: 'none').click
assert page.has_text? 'Update hostname'
fill_in 'editable-text', with: 'testname'
click_button 'editable-submit'
require 'uri'
require 'yaml'
+module WaitForAjax
+ Capybara.default_wait_time = 5
+ def wait_for_ajax
+ Timeout.timeout(Capybara.default_wait_time) do
+ loop until finished_all_ajax_requests?
+ end
+ end
+
+ def finished_all_ajax_requests?
+ page.evaluate_script('jQuery.active').zero?
+ end
+end
+
class ActionDispatch::IntegrationTest
# Make the Capybara DSL available in all integration tests
include Capybara::DSL
include ApiFixtureLoader
+ include WaitForAjax
@@API_AUTHS = self.api_fixture('api_client_authorizations')
+ def setup
+ reset_session!
+ super
+ end
+
def page_with_token(token, path='/')
# Generate a page path with an embedded API token.
# Typical usage: visit page_with_token('token_name', page)
q_string = URI.encode_www_form('api_token' => api_token)
"#{path}#{sep}#{q_string}"
end
+
+ # Find a page element, but return false instead of raising an
+ # exception if not found. Use this with assertions to explain that
+ # the error signifies a failed test rather than an unexpected error
+ # during a testing procedure.
+ def find? *args
+ begin
+ find *args
+ rescue Capybara::ElementNotFound
+ false
+ end
+ end
+
+ @@screenshot_count = 0
+ def screenshot
+ image_file = "./tmp/workbench-fail-#{@@screenshot_count += 1}.png"
+ page.save_screenshot image_file
+ puts "Saved #{image_file}"
+ end
+
+ teardown do
+ if not passed?
+ screenshot
+ end
+ if Capybara.current_driver == :selenium
+ page.execute_script("window.localStorage.clear()")
+ end
+ end
end
ENV["RAILS_ENV"] = "test"
+unless ENV["NO_COVERAGE_TEST"]
+ begin
+ require 'simplecov'
+ require 'simplecov-rcov'
+ class SimpleCov::Formatter::MergedFormatter
+ def format(result)
+ SimpleCov::Formatter::HTMLFormatter.new.format(result)
+ SimpleCov::Formatter::RcovFormatter.new.format(result)
+ end
+ end
+ SimpleCov.formatter = SimpleCov::Formatter::MergedFormatter
+ SimpleCov.start do
+ add_filter '/test/'
+ add_filter 'initializers/secret_token'
+ end
+ rescue Exception => e
+ $stderr.puts "SimpleCov unavailable (#{e}). Proceeding without."
+ end
+end
+
require File.expand_path('../../config/environment', __FILE__)
require 'rails/test_help'
-$ARV_API_SERVER_DIR = File.expand_path('../../../../services/api', __FILE__)
-SERVER_PID_PATH = 'tmp/pids/server.pid'
-
class ActiveSupport::TestCase
# Setup all fixtures in test/fixtures/*.(yml|csv) for all tests in
# alphabetical order.
def teardown
Thread.current[:arvados_api_token] = nil
+ Thread.current[:reader_tokens] = nil
super
end
end
# Returns the data structure from the named API server test fixture.
@@api_fixtures[name] ||= \
begin
- path = File.join($ARV_API_SERVER_DIR, 'test', 'fixtures', "#{name}.yml")
+ path = File.join(ApiServerForTests::ARV_API_SERVER_DIR,
+ 'test', 'fixtures', "#{name}.yml")
YAML.load(IO.read(path))
end
end
end
end
-class ApiServerBackedTestRunner < MiniTest::Unit
- # Make a hash that unsets Bundle's environment variables.
- # We'll use this environment when we launch Bundle commands in the API
- # server. Otherwise, those commands will try to use Workbench's gems, etc.
- @@APIENV = Hash[ENV.map { |key, val|
- (key =~ /^BUNDLE_/) ? [key, nil] : nil
- }.compact]
+class ApiServerForTests
+ ARV_API_SERVER_DIR = File.expand_path('../../../../services/api', __FILE__)
+ SERVER_PID_PATH = File.expand_path('tmp/pids/wbtest-server.pid', ARV_API_SERVER_DIR)
+ @main_process_pid = $$
- def _system(*cmd)
- if not system(@@APIENV, *cmd)
- raise RuntimeError, "#{cmd[0]} returned exit code #{$?.exitstatus}"
+ def self._system(*cmd)
+ $stderr.puts "_system #{cmd.inspect}"
+ Bundler.with_clean_env do
+ if not system({'RAILS_ENV' => 'test'}, *cmd)
+ raise RuntimeError, "#{cmd[0]} returned exit code #{$?.exitstatus}"
+ end
+ end
+ end
+
+ def self.make_ssl_cert
+ unless File.exists? './self-signed.key'
+ _system('openssl', 'req', '-new', '-x509', '-nodes',
+ '-out', './self-signed.pem',
+ '-keyout', './self-signed.key',
+ '-days', '3650',
+ '-subj', '/CN=localhost')
end
end
- def _run(args=[])
+ def self.kill_server
+ if (pid = find_server_pid)
+ $stderr.puts "Sending TERM to API server, pid #{pid}"
+ Process.kill 'TERM', pid
+ end
+ end
+
+ def self.find_server_pid
+ pid = nil
+ begin
+ pid = IO.read(SERVER_PID_PATH).to_i
+ $stderr.puts "API server is running, pid #{pid.inspect}"
+ rescue Errno::ENOENT
+ end
+ return pid
+ end
+
+ def self.run(args=[])
+ ::MiniTest.after_run do
+ self.kill_server
+ end
+
+ # Kill server left over from previous test run
+ self.kill_server
+
Capybara.javascript_driver = :poltergeist
- server_pid = Dir.chdir($ARV_API_SERVER_DIR) do |apidir|
+ Dir.chdir(ARV_API_SERVER_DIR) do |apidir|
+ ENV["NO_COVERAGE_TEST"] = "1"
+ make_ssl_cert
_system('bundle', 'exec', 'rake', 'db:test:load')
_system('bundle', 'exec', 'rake', 'db:fixtures:load')
- _system('bundle', 'exec', 'rails', 'server', '-d')
+ _system('bundle', 'exec', 'passenger', 'start', '-d', '-p3001',
+ '--pid-file', SERVER_PID_PATH,
+ '--ssl',
+ '--ssl-certificate', 'self-signed.pem',
+ '--ssl-certificate-key', 'self-signed.key')
timeout = Time.now.tv_sec + 10
- begin
+ good_pid = false
+ while (not good_pid) and (Time.now.tv_sec < timeout)
sleep 0.2
- begin
- server_pid = IO.read(SERVER_PID_PATH).to_i
- good_pid = (server_pid > 0) and (Process.kill(0, pid) rescue false)
- rescue Errno::ENOENT
- good_pid = false
- end
- end while (not good_pid) and (Time.now.tv_sec < timeout)
+ server_pid = find_server_pid
+ good_pid = (server_pid and
+ (server_pid > 0) and
+ (Process.kill(0, server_pid) rescue false))
+ end
if not good_pid
raise RuntimeError, "could not find API server Rails pid"
end
- server_pid
- end
- begin
- super(args)
- ensure
- Process.kill('TERM', server_pid)
end
end
end
-MiniTest::Unit.runner = ApiServerBackedTestRunner.new
+ApiServerForTests.run
assert_equal false, Collection.is_empty_blob_locator?(x)
end
end
+
+ def get_files_tree(coll_name)
+ use_token :admin
+ Collection.find(api_fixture('collections')[coll_name]['uuid']).files_tree
+ end
+
+ test "easy files_tree" do
+ files_in = lambda do |dirname|
+ (1..3).map { |n| [dirname, "file#{n}", 0] }
+ end
+ assert_equal([['.', 'dir1', nil], ['./dir1', 'subdir', nil]] +
+ files_in['./dir1/subdir'] + files_in['./dir1'] +
+ [['.', 'dir2', nil]] + files_in['./dir2'] + files_in['.'],
+ get_files_tree('multilevel_collection_1'),
+ "Collection file tree was malformed")
+ end
+
+ test "files_tree with files deep in subdirectories" do
+ # This test makes sure files_tree generates synthetic directory entries.
+ # The manifest doesn't list directories with no files.
+ assert_equal([['.', 'dir1', nil], ['./dir1', 'sub1', nil],
+ ['./dir1/sub1', 'a', 0], ['./dir1/sub1', 'b', 0],
+ ['.', 'dir2', nil], ['./dir2', 'sub2', nil],
+ ['./dir2/sub2', 'c', 0], ['./dir2/sub2', 'd', 0]],
+ get_files_tree('multilevel_collection_2'),
+ "Collection file tree was malformed")
+ end
end
require 'test_helper'
class CollectionsHelperTest < ActionView::TestCase
+ test "file_path generates short names" do
+ assert_equal('foo', CollectionsHelper.file_path(['.', 'foo', 0]),
+ "wrong result for filename in collection root")
+ assert_equal('foo/bar', CollectionsHelper.file_path(['foo', 'bar', 0]),
+ "wrong result for filename in directory without leading .")
+ assert_equal('foo/bar', CollectionsHelper.file_path(['./foo', 'bar', 0]),
+ "wrong result for filename in directory with leading .")
+ end
end
--- /dev/null
+require 'test_helper'
+
+class FoldersHelperTest < ActionView::TestCase
+end
- sdk/perl/index.html.textile.liquid
- Ruby:
- sdk/ruby/index.html.textile.liquid
+ - Java:
+ - sdk/java/index.html.textile.liquid
- CLI:
- sdk/cli/index.html.textile.liquid
api:
- api/methods/jobs.html.textile.liquid
- api/methods/job_tasks.html.textile.liquid
- api/methods/keep_disks.html.textile.liquid
+ - api/methods/keep_services.html.textile.liquid
- api/methods/links.html.textile.liquid
- api/methods/logs.html.textile.liquid
- api/methods/nodes.html.textile.liquid
- api/schema/Job.html.textile.liquid
- api/schema/JobTask.html.textile.liquid
- api/schema/KeepDisk.html.textile.liquid
+ - api/schema/KeepService.html.textile.liquid
- api/schema/Link.html.textile.liquid
- api/schema/Log.html.textile.liquid
- api/schema/Node.html.textile.liquid
- admin/cheat_sheet.html.textile.liquid
installguide:
- Install:
- - install/index.html.md.liquid
+ - install/index.html.textile.liquid
- install/install-sso.html.textile.liquid
- install/install-api-server.html.textile.liquid
- install/install-workbench-app.html.textile.liquid
- - install/client.html.textile.liquid
- install/create-standard-objects.html.textile.liquid
- install/install-crunch-dispatch.html.textile.liquid
These resources govern the Arvados infrastructure itself: Git repositories, Keep disks, active nodes, etc.
-* "CommitAncestor":schema/CommitAncestor.html
-* "Commit":schema/Commit.html
* "KeepDisk":schema/KeepDisk.html
* "Node":schema/Node.html
* "Repository":schema/Repository.html
table(table table-bordered table-condensed).
|*Parameter name*|*Value*|*Description*|
-|limit |integer|Maximum number of resources to return|
-|offset |integer|Skip the first 'offset' objects|
-|filters |array |Conditions for selecting resources to return|
-|order |array |List of fields to use to determine sorting order for returned objects|
-|select |array |Specify which fields to return|
-|distinct|boolean|true: (default) do not return duplicate objects<br> false: permitted to return duplicates|
+|limit |integer|Maximum number of resources to return.|
+|offset |integer|Skip the first 'offset' resources that match the given filter conditions.|
+|filters |array |Conditions for selecting resources to return (see below).|
+|order |array |Attributes to use as sort keys to determine the order resources are returned, each optionally followed by @asc@ or @desc@ to indicate ascending or descending order.
+Example: @["head_uuid asc","modified_at desc"]@
+Default: @["created_at desc"]@|
+|select |array |Set of attributes to include in the response.
+Example: @["head_uuid","tail_uuid"]@
+Default: all available attributes, minus "manifest_text" in the case of collections.|
+|distinct|boolean|@true@: (default) do not return duplicate objects
+@false@: permitted to return duplicates|
+
+h3. Filters
+
+The value of the @filters@ parameter is an array of conditions. The @list@ method returns only the resources that satisfy all of the given conditions. In other words, the conjunction @AND@ is implicit.
+
+Each condition is expressed as an array with three elements: @[attribute, operator, operand]@.
+
+table(table table-bordered table-condensed).
+|_. Index|_. Element|_. Type|_. Description|_. Examples|
+|0|attribute|string|Name of the attribute to compare|@script_version@, @head_uuid@|
+|1|operator|string|Comparison operator|@>@, @>=@, @like@, @not in@|
+|2|operand|string, array, or null|Value to compare with the resource attribute|@"d00220fb%"@, @"1234"@, @["foo","bar"]@, @nil@|
+
+The following operators are available.
+
+table(table table-bordered table-condensed).
+|_. Operator|_. Operand type|_. Example|
+|@<@, @<=@, @>=@, @>@, @like@|string|@["script_version","like","d00220fb%"]@|
+|@=@, @!=@|string or null|@["tail_uuid","=","xyzzy-j7d0g-fffffffffffffff"]@
+@["tail_uuid","!=",null]@|
+|@in@, @not in@|array of strings|@["script_version","in",["master","d00220fb38d4b85ca8fc28a8151702a2b9d1dec5"]]@|
+|@is_a@|string|@["head_uuid","is_a","arvados#pipelineInstance"]@|
h2. Create
--- /dev/null
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "keep_services"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/keep_services@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h2. accessible
+
+Get a list of keep services that are accessible to the requesting client. This
+is context-sensitive, for example providing the list of actual Keep servers
+when inside the cluster, but providing a proxy service if client contacts
+Arvados from outside the cluster.
+
+Takes no arguments.
+
+h2. create
+
+Create a new KeepService.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|keep_disk|object||query||
+
+h2. delete
+
+Delete an existing KeepService.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path||
+
+h2. get
+
+Gets a KeepService's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path||
+
+h2. list
+
+List keep_services.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of keep_services to return.|query||
+|order|string|Order in which to return matching keep_services.|query||
+|filters|array|Conditions for filtering keep_services.|query||
+
+h2. update
+
+Update attributes of an existing KeepService.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path||
+|keep_service|object||query||
h2. create
-Create a new Log.
+Create a new log entry.
Arguments:
h2. delete
-Delete an existing Log.
+Delete an existing log entry. This method can only be used by privileged (system administrator) users.
Arguments:
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Log in question.|path||
+{background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path||
h2. get
-Gets a Log's metadata by UUID.
+Retrieve a log entry.
Arguments:
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Log in question.|path||
+{background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path||
h2. list
-List logs.
+List log entries.
Arguments:
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|limit|integer (default 100)|Maximum number of logs to return.|query||
-|order|string|Order in which to return matching logs.|query||
-|filters|array|Conditions for filtering logs.|query||
+|limit|integer (default 100)|Maximum number of log entries to return.|query||
+|order|string|Order in which to return matching log entries.|query||
+|filters|array|Conditions for filtering log entries.|query||
h2. update
-Update attributes of an existing Log.
+Update attributes of an existing log entry. This method can only be used by privileged (system administrator) users.
Arguments:
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Log in question.|path||
+{background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path||
|log|object||query||
h3. Side effects of creating a Collection
-Referenced data can be protected from garbage collection. See the section about "resources" links on the "Links":Links.html page.
+Referenced data can be protected from garbage collection. See the section about "resources" links on the "Links":Link.html page.
Data can be shared with other users via the Arvados permission model.
table(table table-bordered table-condensed).
|_. Key|_. Type|_. Description|_. Implemented|
+|docker_image|string|The name of a Docker image that this Job needs to run. If specified, Crunch will create a Docker container from this image, and run the Job's script inside that. The Keep mount and work directories will be available as volumes inside this container. You may specify the image in any format that Docker accepts, such as "arvados/jobs" or a hash identifier. If you specify a name, Crunch will try to install the latest version using @docker.io pull@.|✓|
|min_nodes|integer||✓|
|max_nodes|integer|||
|max_tasks_per_node|integer|Maximum simultaneous tasks on a single node|✓|
|last_read_at|datetime|||
|last_write_at|datetime|||
|last_ping_at|datetime|||
-|service_host|string|||
-|service_port|integer|||
-|service_ssl_flag|boolean|||
+|keep_service_uuid|string|||
--- /dev/null
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: KeepService
+
+...
+
+A **KeepService** is a service endpoint that supports the Keep protocol.
+
+h2. Methods
+
+See "keep_services":{{site.baseurl}}/api/methods/keep_services.html
+
+h2. Resource
+
+Each KeepService has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|service_host|string|||
+|service_port|integer|||
+|service_ssl_flag|boolean|||
+|service_type|string|||
\ No newline at end of file
+++ /dev/null
----
-layout: default
-navsection: installguide
-title: Overview
-...
-
-{% include 'alert_stub' %}
-
-# Installation Overview
-
-1. Set up a cluster, or use Amazon
-1. Create and mount Keep volumes
-1. [Install the Single Sign On (SSO) server](install-sso.html)
-1. [Install the Arvados REST API server](install-api-server.html)
-1. [Install the Arvados workbench application](install-workbench-app.html)
-1. [Install the Crunch dispatcher](install-crunch-dispatch.html)
-1. [Create standard objects](create-standard-objects.html)
-1. [Install client libraries](client.html)
--- /dev/null
+---
+layout: default
+navsection: installguide
+title: Overview
+...
+
+{% include 'alert_stub' %}
+
+h2. Installation Overview
+
+# Set up a cluster, or use Amazon
+# Create and mount Keep volumes
+# "Install the Single Sign On (SSO) server":install-sso.html
+# "Install the Arvados REST API server":install-api-server.html
+# "Install the Arvados workbench application":install-workbench-app.html
+# "Install the Crunch dispatcher":install-crunch-dispatch.html
+# "Create standard objects":create-standard-objects.html
+# Install client libraries (see "SDK Reference":{{site.baseurl}}/sdk/index.html).
h2. Download the source tree
<notextile>
-<pre><code>~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
+<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
+~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
</code></pre></notextile>
See also: "Downloading the source code":https://arvados.org/projects/arvados/wiki/Download on the Arvados wiki.
</code></pre>
</notextile>
-h2. Add an admin user
+h2(#admin-user). Add an admin user
Point your browser to the API server's login endpoint:
h4. Perl SDK dependencies
-* @apt-get install libjson-perl libwww-perl libio-socket-ssl-perl libipc-system-simple-perl@
+Install the Perl SDK on the controller.
-Add this to @/etc/apt/sources.list@
-
-@deb http://git.oxf.freelogy.org/apt wheezy main contrib@
-
-Then
-
-@apt-get install libwarehouse-perl@
+* See "Perl SDK":{{site.baseurl}}/sdk/perl/index.html page for details.
h4. Python SDK dependencies
-On controller and all compute nodes:
+Install the Python SDK and CLI tools on controller and all compute nodes.
-* @apt-get install python-pip@
-* @pip install --upgrade virtualenv arvados-python-client@
+* See "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html page for details.
h4. Likely crunch job dependencies
h4. Repositories
-Crunch scripts must be in Git repositories in @/var/cache/git/*/.git@ (or whatever is configured in @services/api/config/environments/production.rb@).
-
-h4. Importing commits
-
-@services/api/script/import_commits.rb production@ must run periodically. Example @/var/service/arvados_import_commits/run@ script for daemontools or runit:
-
-<pre>
-#!/bin/sh
-set -e
-while sleep 60
-do
- cd /path/to/arvados/services/api
- setuidgid www-data env RAILS_ENV=production /usr/local/rvm/bin/rvm-exec 2.0.0 bundle exec ./script/import_commits.rb 2>&1
-done
-</pre>
+Crunch scripts must be in Git repositories in @/var/lib/arvados/git/*.git@ (or whatever is configured in @services/api/config/environments/production.rb@).
-Once you have imported some commits, you should be able to create a new job:
+Once you have a repository with commits -- and you have read access to the repository -- you should be able to create a new job:
<pre>
read -rd $'\000' newjob <<EOF; arv job create --job "$newjob"
{"script_parameters":{"input":"f815ec01d5d2f11cb12874ab2ed50daa"},
"script_version":"master",
- "script":"hash"}
+ "script":"hash",
+ "repository":"arvados"}
EOF
</pre>
<pre>
#!/bin/sh
set -e
+
+rvmexec=""
+## uncomment this line if you use rvm:
+#rvmexec="/usr/local/rvm/bin/rvm-exec 2.1.1"
+
export PATH="$PATH":/path/to/arvados/services/crunch
-export PERLLIB=/path/to/arvados/sdk/perl/lib:/path/to/warehouse-apps/libwarehouse-perl/lib
export ARVADOS_API_HOST={{ site.arvados_api_host }}
export CRUNCH_DISPATCH_LOCKFILE=/var/lock/crunch-dispatch
cd /path/to/arvados/services/api
export RAILS_ENV=production
-exec /usr/local/rvm/bin/rvm-exec 2.0.0 bundle exec ./script/crunch-dispatch.rb 2>&1
+exec $rvmexec bundle exec ./script/crunch-dispatch.rb 2>&1
</pre>
...
<notextile>
-<pre><code>~$ <span class="userinput">git clone https://github.com/curoverse/sso-devise-omniauth-provider.git</span>
+<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
+~$ <span class="userinput">git clone https://github.com/curoverse/sso-devise-omniauth-provider.git</span>
~$ <span class="userinput">cd sso-devise-omniauth-provider</span>
~/sso-devise-omniauth-provider$ <span class="userinput">bundle install</span>
~/sso-devise-omniauth-provider$ <span class="userinput">rake db:create</span>
h2. Download the source tree
-Please follow the instructions on the "Download page":https://arvados.org/projects/arvados/wiki/Download in the wiki.
+<notextile>
+<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
+~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
+</code></pre></notextile>
+
+See also: "Downloading the source code":https://arvados.org/projects/arvados/wiki/Download on the Arvados wiki.
The Workbench application is in @apps/workbench@ in the source tree.
~/arvados/apps/workbench$ <span class="userinput">bundle install --path=vendor/bundle</span>
</code></pre></notextile>
+The @bundle install@ command might produce a warning about the themes_for_rails gem. This is OK:
+
+<notextile>
+<pre><code>themes_for_rails at /home/<b>you</b>/.rvm/gems/ruby-2.1.1/bundler/gems/themes_for_rails-1fd2d7897d75 did not have a valid gemspec.
+This prevents bundler from installing bins or native extensions, but that may not affect its functionality.
+The validation message from Rubygems was:
+ duplicate dependency on rails (= 3.0.11, development), (>= 3.0.0) use:
+ add_runtime_dependency 'rails', '= 3.0.11', '>= 3.0.0'
+Using themes_for_rails (0.5.1) from https://github.com/holtkampw/themes_for_rails (at 1fd2d78)
+</code></pre></notextile>
+
h2. Configure the Workbench application
This application needs a secret token. Generate a new secret:
* Set @secret_token@ to the string you generated with @rake secret@.
* Point @arvados_login_base@ and @arvados_v1_base@ at your "API server":install-api-server.html
* @site_name@ can be any string to identify this Workbench.
-* Assuming that the SSL certificate you use for development isn't signed by a CA, make sure @arvados_insecure_https@ is @true@.
+* If the SSL certificate you use for development isn't signed by a CA, make sure @arvados_insecure_https@ is @true@.
Copy @config/piwik.yml.example@ to @config/piwik.yml@ and edit to suit.
-h3. Apache/Passenger (optional)
+h2. Start a standalone server
-Set up Apache and Passenger. Point them to the apps/workbench directory in the source tree.
+For testing and development, the easiest way to get started is to run the web server that comes with Rails.
+
+<notextile>
+<pre><code>~/arvados/apps/workbench$ <span class="userinput">bundle exec rails server --port=3031</span>
+</code></pre>
+</notextile>
+
+Point your browser to <notextile><code>http://<b>your.host</b>:3031/</code></notextile>.
h2. Trusted client setting
-Log in to Workbench once (this ensures that the Arvados API server has a record of the Workbench client).
+Log in to Workbench once to ensure that the Arvados API server has a record of the Workbench client. (It's OK if Workbench says your account hasn't been activated yet. We'll deal with that next.)
In the API server project root, start the rails console. Locate the ApiClient record for your Workbench installation (typically, while you're setting this up, the @last@ one in the database is the one you want), then set the @is_trusted@ flag for the appropriate client record:
-<notextile><pre><code>~/arvados/services/api$ <span class="userinput">RAILS_ENV=development bundle exec rails console</span>
+<notextile><pre><code>~/arvados/services/api$ <span class="userinput">bundle exec rails console</span>
irb(main):001:0> <span class="userinput">wb = ApiClient.all.last; [wb.url_prefix, wb.created_at]</span>
=> ["https://workbench.example.com/", Sat, 19 Apr 2014 03:35:12 UTC +00:00]
irb(main):002:0> <span class="userinput">include CurrentApiClient</span>
=> true
</code></pre>
</notextile>
+
+h2. Activate your own account
+
+Unless you already activated your account when installing the API server, the first time you log in to Workbench you will see a message that your account is awaiting activation.
+
+Activate your own account and give yourself administrator privileges by following the instructions in the "'Add an admin user' section of the API server install page":install-api-server.html#admin-user.
* "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html
* "Perl SDK":{{site.baseurl}}/sdk/perl/index.html
* "Ruby SDK":{{site.baseurl}}/sdk/ruby/index.html
+* "Java SDK":{{site.baseurl}}/sdk/java/index.html
* "Command line SDK":{{site.baseurl}}/sdk/cli/index.html ("arv")
SDKs not yet implemented:
* Rails SDK: Workbench uses an ActiveRecord-like interface to Arvados. This hasn't yet been extracted from Workbench and packaged as a gem.
-* R and Java: We plan to support these, but they have not been implemented yet.
+* R: We plan to support this, but it has not been implemented yet.
--- /dev/null
+---
+layout: default
+navsection: sdk
+navmenu: Java
+title: "Java SDK"
+
+...
+
+The Java SDK provides a generic set of wrappers so you can make API calls in java.
+
+h3. Introdution
+
+* The Java SDK requires Java 6 or later
+
+* The Java SDK is implemented as a maven project. Hence, you would need a working
+maven environment to be able to build the source code. If you do not have maven setup,
+you may find the "Maven in 5 Minutes":http://maven.apache.org/guides/getting-started/maven-in-five-minutes.html link useful.
+
+* In this document $ARVADOS_HOME is used to refer to the directory where
+arvados code is cloned in your system. For ex: $ARVADOS_HOME = $HOME/arvados
+
+
+h3. Setting up the environment
+
+* The SDK requires a running Arvados API server. The following information
+ about the API server needs to be passed to the SDK using environment
+ variables or during the construction of the Arvados instance.
+
+<notextile>
+<pre>
+ARVADOS_API_TOKEN: API client token to be used to authorize with API server.
+
+ARVADOS_API_HOST: Host name of the API server.
+
+ARVADOS_API_HOST_INSECURE: Set this to true if you are using self-signed
+ certificates and would like to bypass certificate validations.
+</pre>
+</notextile>
+
+* Please see "api-tokens":{{site.baseurl}}/user/reference/api-tokens.html for full details.
+
+
+h3. Building the Arvados SDK
+
+<notextile>
+<pre>
+$ <code class="userinput">cd $ARVADOS_HOME/sdk/java</code>
+
+$ <code class="userinput">mvn -Dmaven.test.skip=true clean package</code>
+ This will generate arvados sdk jar file in the target directory
+</pre>
+</notextile>
+
+
+h3. Implementing your code to use SDK
+
+* The following two sample programs serve as sample implementations using the SDK.
+<code class="userinput">$ARVADOS_HOME/sdk/java/ArvadosSDKJavaExample.java</code> is a simple program
+ that makes a few calls to API server.
+<code class="userinput">$ARVADOS_HOME/sdk/java/ArvadosSDKJavaExampleWithPrompt.java</code> can be
+ used to make calls to API server interactively.
+
+Please use these implementations to see how you would want use the SDK from your java program.
+
+Also, refer to <code class="userinput">$ARVADOS_HOME/arvados/sdk/java/src/test/java/org/arvados/sdk/java/ArvadosTest.java</code>
+for more sample API invocation examples.
+
+Below are the steps to compile and run these java program.
+
+* These programs create an instance of Arvados SDK class and use it to
+make various <code class="userinput">call</code> requests.
+
+* To compile the examples
+<notextile>
+<pre>
+$ <code class="userinput">javac -cp $ARVADOS_HOME/sdk/java/target/arvados-sdk-1.0-jar-with-dependencies.jar \
+ArvadosSDKJavaExample*.java</code>
+This results in the generation of the ArvadosSDKJavaExample*.class files
+in the same directory as the java files
+</pre>
+</notextile>
+
+* To run the samples
+<notextile>
+<pre>
+$ <code class="userinput">java -cp .:$ARVADOS_HOME/sdk/java/target/arvados-sdk-1.0-jar-with-dependencies.jar \
+ArvadosSDKJavaExample</code>
+$ <code class="userinput">java -cp .:$ARVADOS_HOME/sdk/java/target/arvados-sdk-1.0-jar-with-dependencies.jar \
+ArvadosSDKJavaExampleWithPrompt</code>
+</pre>
+</notextile>
+
+
+h3. Viewing and Managing SDK logging
+
+* SDK uses log4j logging
+
+* The default location of the log file is
+ <code class="userinput">$ARVADOS_HOME/sdk/java/log/arvados_sdk_java.log</code>
+
+* Update <code class="userinput">log4j.properties</code> file to change name and location of the log file.
+
+<notextile>
+<pre>
+$ <code class="userinput">nano $ARVADOS_HOME/sdk/java/src/main/resources/log4j.properties</code>
+and modify the <code class="userinput">log4j.appender.fileAppender.File</code> property as needed.
+
+Rebuild the SDK:
+$ <code class="userinput">mvn -Dmaven.test.skip=true clean package</code>
+</pre>
+</notextile>
+
+
+h3. Using the SDK in eclipse
+
+* To develop in eclipse, you can use the provided <code class="userinput">eclipse project</code>
+
+* Install "m2eclipse":https://www.eclipse.org/m2e/ plugin in your eclipse
+
+* Set <code class="userinput">M2_REPO</code> classpath variable in eclipse to point to your local repository.
+The local repository is usually located in your home directory at <code class="userinput">$HOME/.m2/repository</code>.
+
+<notextile>
+<pre>
+In Eclipse IDE:
+Window -> Preferences -> Java -> Build Path -> Classpath Variables
+ Click on the "New..." button and add a new
+ M2_REPO variable and set it to your local Maven repository
+</pre>
+</notextile>
+
+
+* Open the SDK project in eclipse
+<notextile>
+<pre>
+In Eclipse IDE:
+File -> Import -> Existing Projects into Workspace -> Next -> Browse
+ and select $ARVADOS_HOME/sdk/java
+</pre>
+</notextile>
<notextile>
<pre>
-$ <code class="userinput">sudo apt-get install libjson-perl libio-socket-ssl-perl libwww-perl</code>
+$ <code class="userinput">sudo apt-get install libjson-perl libio-socket-ssl-perl libwww-perl libipc-system-simple-perl</code>
$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
$ <code class="userinput">cd arvados/sdk/perl</code>
$ <code class="userinput">perl Makefile.PL</code>
<notextile>
<pre>
-$ <code class="userinput">sudo apt-get install python-pip python-dev libattr1-dev libfuse-dev pkg-config</code>
+$ <code class="userinput">sudo apt-get install python-pip python-dev libattr1-dev libfuse-dev pkg-config python-yaml</code>
$ <code class="userinput">sudo pip install arvados-python-client</code>
</pre>
</notextile>
<notextile>
<pre>
-$ <code class="userinput">sudo apt-get install python-dev libattr1-dev libfuse-dev pkg-config</code>
-$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
-$ <code class="userinput">cd arvados/sdk/python</code>
-$ <code class="userinput">./build.sh</code>
-$ <code class="userinput">sudo python setup.py install</code>
+~$ <code class="userinput">sudo apt-get install python-dev libattr1-dev libfuse-dev pkg-config</code>
+~$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
+~$ <code class="userinput">cd arvados/sdk/python</code>
+~/arvados/sdk/python$ <code class="userinput">sudo python setup.py install</code>
</pre>
</notextile>
<notextile>
<pre>
$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
-$ <code class="userinput">cd arvados/sdk/cli</code>
+$ <code class="userinput">cd arvados/sdk/ruby</code>
$ <code class="userinput">gem build arvados.gemspec</code>
$ <code class="userinput">sudo gem install arvados-*.gem</code>
</pre>
Next, on the Arvados virtual machine, clone your Git repository:
<notextile>
-<pre><code>~$ <span class="userinput">git clone git@git.{{ site.arvados_api_host }}:<b>you</b>.git</span>
+<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
+~$ <span class="userinput">git clone git@git.{{ site.arvados_api_host }}:<b>you</b>.git</span>
Cloning into '<b>you</b>'...</code></pre>
</notextile>
-This will create a Git repository in the directory called *@you@*.
+This will create a Git repository in the directory called *@you@* in your home directory. Say yes when prompted to continue with connection.
+Ignore any warning that you are cloning an empty repository.
{% include 'notebox_begin' %}
For more information about using Git, try
<notextile>
<pre><code>~/<b>you</b>/crunch_scripts$ <span class="userinput">git commit -m"my first script"</span>
[master (root-commit) 27fd88b] my first script
- 1 file changed, 33 insertions(+)
+ 1 file changed, 45 insertions(+)
create mode 100755 crunch_scripts/hash.py</code></pre>
</notextile>
Compressing objects: 100% (2/2), done.
Writing objects: 100% (4/4), 682 bytes, done.
Total 4 (delta 0), reused 0 (delta 0)
-To git@git.qr1hi.arvadosapi.com:you.git
+To git@git.qr1hi.arvadosapi.com:<b>you</b>.git
* [new branch] master -> master</code></pre>
</notextile>
BASE_DEPS = base/Dockerfile $(BASE_GENERATED)
+JOBS_DEPS = jobs/Dockerfile
+
API_DEPS = api/Dockerfile $(API_GENERATED)
DOC_DEPS = doc/Dockerfile doc/apache2_vhost
mkdir -p build
rsync -rlp --exclude=docker/ --exclude='**/log/*' --exclude='**/tmp/*' \
--chmod=Da+rx,Fa+rX ../ build/
+ find build/ -name \*.gem -delete
+ cd build/sdk/python/ && ./build.sh
+ cd build/sdk/cli && gem build arvados-cli.gemspec
+ cd build/sdk/ruby && gem build arvados.gemspec
touch build/.buildstamp
$(BASE_GENERATED): config.yml $(BUILD)
$(DOCKER_BUILD) -t arvados/doc doc
date >doc-image
+jobs-image: base-image $(BUILD) $(JOBS_DEPS)
+ $(DOCKER_BUILD) -t arvados/jobs jobs
+ date >jobs-image
+
workbench-image: passenger-image $(BUILD) $(WORKBENCH_DEPS)
mkdir -p workbench/generated
tar -czf workbench/generated/workbench.tar.gz -C build/apps workbench
--- /dev/null
+FROM arvados/base
+MAINTAINER Brett Smith <brett@curoverse.com>
+
+# Install dependencies and set up system.
+# The FUSE packages help ensure that we can install the Python SDK (arv-mount).
+RUN /usr/bin/apt-get install -q -y python-dev python-llfuse python-pip \
+ libio-socket-ssl-perl libjson-perl liburi-perl libwww-perl \
+ fuse libattr1-dev libfuse-dev && \
+ /usr/sbin/adduser --disabled-password \
+ --gecos 'Crunch execution user' crunch && \
+ /usr/bin/install -d -o crunch -g crunch -m 0700 /tmp/crunch-job && \
+ /bin/ln -s /usr/src/arvados /usr/local/src/arvados
+
+# Install Arvados packages.
+RUN find /usr/src/arvados/sdk -name '*.gem' -print0 | \
+ xargs -0rn 1 gem install && \
+ cd /usr/src/arvados/sdk/python && \
+ python setup.py install
+
+USER crunch
s.executables << "arv-run-pipeline-instance"
s.executables << "arv-crunch-job"
s.executables << "arv-tag"
+ s.required_ruby_version = '>= 2.1.0'
s.add_runtime_dependency 'arvados', '~> 0.1.0'
s.add_runtime_dependency 'google-api-client', '~> 0.6.3'
s.add_runtime_dependency 'activesupport', '~> 3.2', '>= 3.2.13'
exit
end
-request_parameters = {}.merge(method_opts)
+request_parameters = {_profile:true}.merge(method_opts)
resource_body = request_parameters.delete(resource_schema.to_sym)
if resource_body
request_body = {
resource_schema => resource_body
}
else
- request_body = {}
+ request_body = nil
end
case api_method
end
exit 0
else
- request_body[:api_token] = ENV['ARVADOS_API_TOKEN']
- request_body[:_profile] = true
result = client.execute(:api_method => eval(api_method),
:parameters => request_parameters,
:body => request_body,
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
end
begin
:parameters => {
:uuid => uuid
},
- :body => {
- :api_token => ENV['ARVADOS_API_TOKEN']
- },
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
j = JSON.parse result.body, :symbolize_names => true
unless j.is_a? Hash and j[:uuid]
debuglog "Failed to get pipeline_instance: #{j[:errors] rescue nil}", 0
def self.create(attributes)
result = $client.execute(:api_method => $arvados.pipeline_instances.create,
:body => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
:pipeline_instance => attributes
},
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
j = JSON.parse result.body, :symbolize_names => true
unless j.is_a? Hash and j[:uuid]
abort "Failed to create pipeline_instance: #{j[:errors] rescue nil} #{j.inspect}"
:uuid => @pi[:uuid]
},
:body => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
:pipeline_instance => @attributes_to_update.to_json
},
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
j = JSON.parse result.body, :symbolize_names => true
unless j.is_a? Hash and j[:uuid]
debuglog "Failed to save pipeline_instance: #{j[:errors] rescue nil}", 0
@cache ||= {}
result = $client.execute(:api_method => $arvados.jobs.get,
:parameters => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
:uuid => uuid
},
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
@cache[uuid] = JSON.parse result.body, :symbolize_names => true
end
def self.where(conditions)
result = $client.execute(:api_method => $arvados.jobs.list,
:parameters => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
:limit => 10000,
:where => conditions.to_json
},
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
list = JSON.parse result.body, :symbolize_names => true
if list and list[:items].is_a? Array
list[:items]
def self.create(job, create_params)
@cache ||= {}
result = $client.execute(:api_method => $arvados.jobs.create,
- :parameters => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
+ :body => {
:job => job.to_json
}.merge(create_params),
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
j = JSON.parse result.body, :symbolize_names => true
if j.is_a? Hash and j[:uuid]
@cache[j[:uuid]] = j
else
result = $client.execute(:api_method => $arvados.pipeline_templates.get,
:parameters => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
:uuid => template
},
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
@template = JSON.parse result.body, :symbolize_names => true
if !@template[:uuid]
abort "#{$0}: fatal: failed to retrieve pipeline template #{template} #{@template[:errors].inspect rescue nil}"
end
def setup_instance
- @instance ||= PipelineInstance.
- create(:components => @components,
+ if $options[:submit]
+ @instance ||= PipelineInstance.
+ create(:components => @components,
+ :pipeline_template_uuid => @template[:uuid],
+ :state => 'New')
+ else
+ @instance ||= PipelineInstance.
+ create(:components => @components,
:pipeline_template_uuid => @template[:uuid],
- :active => true)
+ :state => 'RunningOnClient')
+ end
self
end
def run
moretodo = true
+ interrupted = false
+
while moretodo
moretodo = false
@components.each do |cname, c|
job = nil
+ owner_uuid = @instance[:owner_uuid]
# Is the job satisfying this component already known to be
# finished? (Already meaning "before we query API server about
# the job's current state")
:repository => c[:repository],
:nondeterministic => c[:nondeterministic],
:output_is_persistent => c[:output_is_persistent] || false,
+ :owner_uuid => owner_uuid,
# TODO: Delete the following three attributes when
# supporting pre-20140418 API servers is no longer
# important. New API servers take these as flags that
tail_kind: 'arvados#user',
tail_uuid: @my_user_uuid,
head_kind: 'arvados#collection',
- head_uuid: wanted
+ head_uuid: wanted,
+ owner_uuid: owner_uuid
}
debuglog "added link, uuid #{newlink[:uuid]}"
end
end
end
@instance[:components] = @components
- @instance[:active] = moretodo
report_status
if @options[:no_wait]
sleep 10
rescue Interrupt
debuglog "interrupt", 0
- abort
+ interrupted = true
+ break
end
end
end
end
end
- if ended == @components.length or failed > 0
- @instance[:active] = false
- @instance[:success] = (succeeded == @components.length)
+ success = (succeeded == @components.length)
+
+ if interrupted
+ if success
+ @instance[:state] = 'Complete'
+ else
+ @instance[:state] = 'Paused'
+ end
+ else
+ if ended == @components.length or failed > 0
+ @instance[:state] = success ? 'Complete' : 'Failed'
+ end
end
+ # set components_summary
+ components_summary = {"todo" => @components.length - ended, "done" => succeeded, "failed" => failed}
+ @instance[:components_summary] = components_summary
+
@instance.save
end
def cleanup
- if @instance
- @instance[:active] = false
+ if @instance and @instance[:state] == 'RunningOnClient'
+ @instance[:state] = 'Paused'
@instance.save
end
end
my $arv = Arvados->new('apiVersion' => 'v1');
-my $metastream;
+my $local_logfile;
my $User = $arv->{'users'}->{'current'}->execute;
$job_id = $Job->{'uuid'};
my $keep_logfile = $job_id . '.log.txt';
-my $local_logfile = File::Temp->new();
+$local_logfile = File::Temp->new();
$Job->{'runtime_constraints'} ||= {};
$Job->{'runtime_constraints'}->{'max_tasks_per_node'} ||= 0;
must_lock_now("$ENV{CRUNCH_TMP}/.lock", "a job is already running here.");
}
-
+# If this job requires a Docker image, install that.
+my $docker_bin = "/usr/bin/docker.io";
+my $docker_image = $Job->{runtime_constraints}->{docker_image} || "";
+if ($docker_image) {
+ my $docker_pid = fork();
+ if ($docker_pid == 0)
+ {
+ srun (["srun", "--nodelist=" . join(' ', @node)],
+ [$docker_bin, 'pull', $docker_image]);
+ exit ($?);
+ }
+ while (1)
+ {
+ last if $docker_pid == waitpid (-1, WNOHANG);
+ freeze_if_want_freeze ($docker_pid);
+ select (undef, undef, undef, 0.1);
+ }
+ # If the Docker image was specified as a hash, pull will fail.
+ # Ignore that error. We'll see what happens when we try to run later.
+ if (($? != 0) && ($docker_image !~ /^[0-9a-fA-F]{5,64}$/))
+ {
+ croak("Installing Docker image $docker_image returned exit code $?");
+ }
+}
foreach (qw (script script_version script_parameters runtime_constraints))
{
qw(-n1 -c1 -N1 -D), $ENV{'TMPDIR'},
"--job-name=$job_id.$id.$$",
);
- my @execargs = qw(sh);
my $build_script_to_send = "";
my $command =
"if [ -e $ENV{TASK_WORK} ]; then rm -rf $ENV{TASK_WORK}; fi; "
$command .=
"&& perl -";
}
- $command .=
- "&& exec arv-mount $ENV{TASK_KEEPMOUNT} --exec $ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
+ $command .= "&& exec arv-mount --allow-other $ENV{TASK_KEEPMOUNT} --exec ";
+ if ($docker_image)
+ {
+ $command .= "$docker_bin run -i -a stdin -a stdout -a stderr ";
+ # Dynamically configure the container to use the host system as its
+ # DNS server. Get the host's global addresses from the ip command,
+ # and turn them into docker --dns options using gawk.
+ $command .=
+ q{$(ip -o address show scope global |
+ gawk 'match($4, /^([0-9\.:]+)\//, x){print "--dns", x[1]}') };
+ foreach my $env_key (qw(CRUNCH_SRC CRUNCH_TMP TASK_KEEPMOUNT))
+ {
+ $command .= "-v \Q$ENV{$env_key}:$ENV{$env_key}:rw\E ";
+ }
+ while (my ($env_key, $env_val) = each %ENV)
+ {
+ $command .= "-e \Q$env_key=$env_val\E ";
+ }
+ $command .= "\Q$docker_image\E ";
+ }
+ $command .= "$ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
my @execargs = ('bash', '-c', $command);
srun (\@srunargs, \@execargs, undef, $build_script_to_send);
exit (111);
delete $proc{$pid};
# Load new tasks
- my $newtask_list = $arv->{'job_tasks'}->{'list'}->execute(
- 'where' => {
- 'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid}
- },
- 'order' => 'qsequence'
- );
- foreach my $arvados_task (@{$newtask_list->{'items'}}) {
+ my $newtask_list = [];
+ my $newtask_results;
+ do {
+ $newtask_results = $arv->{'job_tasks'}->{'list'}->execute(
+ 'where' => {
+ 'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid}
+ },
+ 'order' => 'qsequence',
+ 'offset' => scalar(@$newtask_list),
+ );
+ push(@$newtask_list, @{$newtask_results->{items}});
+ } while (@{$newtask_results->{items}});
+ foreach my $arvados_task (@$newtask_list) {
my $jobstep = {
'level' => $arvados_task->{'sequence'},
'failures' => 0,
$message =~ s{([^ -\176])}{"\\" . sprintf ("%03o", ord($1))}ge;
$message .= "\n";
my $datetime;
- if ($metastream || -t STDERR) {
+ if ($local_logfile || -t STDERR) {
my @gmtime = gmtime;
$datetime = sprintf ("%04d-%02d-%02d_%02d:%02d:%02d",
$gmtime[5]+1900, $gmtime[4]+1, @gmtime[3,2,1,0]);
}
print STDERR ((-t STDERR) ? ($datetime." ".$message) : $message);
- if ($metastream) {
- print $metastream $datetime . " " . $message;
+ if ($local_logfile) {
+ print $local_logfile $datetime . " " . $message;
}
}
freeze() if @jobstep_todo;
collate_output() if @jobstep_todo;
cleanup();
- save_meta() if $metastream;
+ save_meta() if $local_logfile;
die;
}
. quotemeta($local_logfile->filename);
my $loglocator = `$cmd`;
die "system $cmd failed: $?" if $?;
+ chomp($loglocator);
$local_logfile = undef; # the temp file is automatically deleted
Log (undef, "log manifest is $loglocator");
--- /dev/null
+require 'minitest/autorun'
+
+class TestRunPipelineInstance < Minitest::Test
+ def setup
+ end
+
+ def test_run_pipeline_instance_get_help
+ out, err = capture_subprocess_io do
+ system ('arv-run-pipeline-instance -h')
+ end
+ assert_equal '', err
+ end
+
+ def test_run_pipeline_instance_with_no_such_option
+ out, err = capture_subprocess_io do
+ system ('arv-run-pipeline-instance --junk')
+ end
+ refute_equal '', err
+ end
+
+ def test_run_pipeline_instance_for_bogus_template_uuid
+ out, err = capture_subprocess_io do
+ # fails with error SSL_connect error because HOST_INSECURE is not being used
+ # system ('arv-run-pipeline-instance --template bogus-abcde-fghijklmnopqrs input=c1bad4b39ca5a924e481008009d94e32+210')
+
+ # fails with error: fatal: cannot load such file -- arvados
+ # system ('./bin/arv-run-pipeline-instance --template bogus-abcde-fghijklmnopqrs input=c1bad4b39ca5a924e481008009d94e32+210')
+ end
+ #refute_equal '', err
+ assert_equal '', err
+ end
+
+end
--- /dev/null
+#! /bin/sh
+
+# Wraps the 'go' executable with some environment setup. Sets GOPATH, creates
+# 'pkg' and 'bin' directories, automatically installs dependencies, then runs
+# the underlying 'go' executable with any command line parameters provided to
+# the script.
+
+rootdir=$(readlink -f $(dirname $0))
+GOPATH=$rootdir:$GOPATH
+export GOPATH
+
+mkdir -p $rootdir/pkg
+mkdir -p $rootdir/bin
+
+go get gopkg.in/check.v1
+
+go $*
--- /dev/null
+// Lightweight implementation of io.ReadCloser that checks the contents read
+// from the underlying io.Reader a against checksum hash. To avoid reading the
+// entire contents into a buffer up front, the hash is updated with each read,
+// and the actual checksum is not checked until the underlying reader returns
+// EOF.
+package keepclient
+
+import (
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+)
+
+var BadChecksum = errors.New("Reader failed checksum")
+
+type HashCheckingReader struct {
+ // The underlying data source
+ io.Reader
+
+ // The hashing function to use
+ hash.Hash
+
+ // The hash value to check against. Must be a hex-encoded lowercase string.
+ Check string
+}
+
+// Read from the underlying reader, update the hashing function, and pass the
+// results through. Will return BadChecksum on the last read instead of EOF if
+// the checksum doesn't match.
+func (this HashCheckingReader) Read(p []byte) (n int, err error) {
+ n, err = this.Reader.Read(p)
+ if err == nil {
+ this.Hash.Write(p[:n])
+ } else if err == io.EOF {
+ sum := this.Hash.Sum(make([]byte, 0, this.Hash.Size()))
+ if fmt.Sprintf("%x", sum) != this.Check {
+ err = BadChecksum
+ }
+ }
+ return n, err
+}
+
+// Write entire contents of this.Reader to 'dest'. Returns BadChecksum if the
+// data written to 'dest' doesn't match the hash code of this.Check.
+func (this HashCheckingReader) WriteTo(dest io.Writer) (written int64, err error) {
+ if writeto, ok := this.Reader.(io.WriterTo); ok {
+ written, err = writeto.WriteTo(io.MultiWriter(dest, this.Hash))
+ } else {
+ written, err = io.Copy(io.MultiWriter(dest, this.Hash), this.Reader)
+ }
+
+ sum := this.Hash.Sum(make([]byte, 0, this.Hash.Size()))
+
+ if fmt.Sprintf("%x", sum) != this.Check {
+ err = BadChecksum
+ }
+
+ return written, err
+}
+
+// Close() the underlying Reader if it is castable to io.ReadCloser. This will
+// drain the underlying reader of any remaining data and check the checksum.
+func (this HashCheckingReader) Close() (err error) {
+ _, err = io.Copy(this.Hash, this.Reader)
+
+ if closer, ok := this.Reader.(io.ReadCloser); ok {
+ err = closer.Close()
+ }
+
+ sum := this.Hash.Sum(make([]byte, 0, this.Hash.Size()))
+ if fmt.Sprintf("%x", sum) != this.Check {
+ err = BadChecksum
+ }
+
+ return err
+}
--- /dev/null
+package keepclient
+
+import (
+ "bytes"
+ "crypto/md5"
+ "fmt"
+ . "gopkg.in/check.v1"
+ "io"
+ "io/ioutil"
+)
+
+type HashcheckSuiteSuite struct{}
+
+// Gocheck boilerplate
+var _ = Suite(&HashcheckSuiteSuite{})
+
+func (h *HashcheckSuiteSuite) TestRead(c *C) {
+ hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+ {
+ r, w := io.Pipe()
+ hcr := HashCheckingReader{r, md5.New(), hash}
+ go func() {
+ w.Write([]byte("foo"))
+ w.Close()
+ }()
+ p, err := ioutil.ReadAll(hcr)
+ c.Check(len(p), Equals, 3)
+ c.Check(err, Equals, nil)
+ }
+
+ {
+ r, w := io.Pipe()
+ hcr := HashCheckingReader{r, md5.New(), hash}
+ go func() {
+ w.Write([]byte("bar"))
+ w.Close()
+ }()
+ p, err := ioutil.ReadAll(hcr)
+ c.Check(len(p), Equals, 3)
+ c.Check(err, Equals, BadChecksum)
+ }
+}
+
+func (h *HashcheckSuiteSuite) TestWriteTo(c *C) {
+ hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+ {
+ bb := bytes.NewBufferString("foo")
+ hcr := HashCheckingReader{bb, md5.New(), hash}
+ r, w := io.Pipe()
+ done := make(chan bool)
+ go func() {
+ p, err := ioutil.ReadAll(r)
+ c.Check(len(p), Equals, 3)
+ c.Check(err, Equals, nil)
+ done <- true
+ }()
+
+ n, err := hcr.WriteTo(w)
+ w.Close()
+ c.Check(n, Equals, int64(3))
+ c.Check(err, Equals, nil)
+ <-done
+ }
+
+ {
+ bb := bytes.NewBufferString("bar")
+ hcr := HashCheckingReader{bb, md5.New(), hash}
+ r, w := io.Pipe()
+ done := make(chan bool)
+ go func() {
+ p, err := ioutil.ReadAll(r)
+ c.Check(len(p), Equals, 3)
+ c.Check(err, Equals, nil)
+ done <- true
+ }()
+
+ n, err := hcr.WriteTo(w)
+ w.Close()
+ c.Check(n, Equals, int64(3))
+ c.Check(err, Equals, BadChecksum)
+ <-done
+ }
+}
--- /dev/null
+/* Provides low-level Get/Put primitives for accessing Arvados Keep blocks. */
+package keepclient
+
+import (
+ "arvados.org/streamer"
+ "crypto/md5"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+ "regexp"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unsafe"
+)
+
+// A Keep "block" is 64MB.
+const BLOCKSIZE = 64 * 1024 * 1024
+
+var BlockNotFound = errors.New("Block not found")
+var InsufficientReplicasError = errors.New("Could not write sufficient replicas")
+var OversizeBlockError = errors.New("Block too big")
+var MissingArvadosApiHost = errors.New("Missing required environment variable ARVADOS_API_HOST")
+var MissingArvadosApiToken = errors.New("Missing required environment variable ARVADOS_API_TOKEN")
+
+const X_Keep_Desired_Replicas = "X-Keep-Desired-Replicas"
+const X_Keep_Replicas_Stored = "X-Keep-Replicas-Stored"
+
+// Information about Arvados and Keep servers.
+type KeepClient struct {
+ ApiServer string
+ ApiToken string
+ ApiInsecure bool
+ Want_replicas int
+ Client *http.Client
+ Using_proxy bool
+ External bool
+ service_roots *[]string
+ lock sync.Mutex
+}
+
+// Create a new KeepClient, initialized with standard Arvados environment
+// variables ARVADOS_API_HOST, ARVADOS_API_TOKEN, and (optionally)
+// ARVADOS_API_HOST_INSECURE. This will contact the API server to discover
+// Keep servers.
+func MakeKeepClient() (kc KeepClient, err error) {
+ insecure := (os.Getenv("ARVADOS_API_HOST_INSECURE") == "true")
+ external := (os.Getenv("ARVADOS_EXTERNAL_CLIENT") == "true")
+
+ kc = KeepClient{
+ ApiServer: os.Getenv("ARVADOS_API_HOST"),
+ ApiToken: os.Getenv("ARVADOS_API_TOKEN"),
+ ApiInsecure: insecure,
+ Want_replicas: 2,
+ Client: &http.Client{Transport: &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure}}},
+ Using_proxy: false,
+ External: external}
+
+ if os.Getenv("ARVADOS_API_HOST") == "" {
+ return kc, MissingArvadosApiHost
+ }
+ if os.Getenv("ARVADOS_API_TOKEN") == "" {
+ return kc, MissingArvadosApiToken
+ }
+
+ err = (&kc).DiscoverKeepServers()
+
+ return kc, err
+}
+
+// Put a block given the block hash, a reader with the block data, and the
+// expected length of that data. The desired number of replicas is given in
+// KeepClient.Want_replicas. Returns the number of replicas that were written
+// and if there was an error. Note this will return InsufficientReplias
+// whenever 0 <= replicas < this.Wants_replicas.
+func (this KeepClient) PutHR(hash string, r io.Reader, expectedLength int64) (locator string, replicas int, err error) {
+
+ // Buffer for reads from 'r'
+ var bufsize int
+ if expectedLength > 0 {
+ if expectedLength > BLOCKSIZE {
+ return "", 0, OversizeBlockError
+ }
+ bufsize = int(expectedLength)
+ } else {
+ bufsize = BLOCKSIZE
+ }
+
+ t := streamer.AsyncStreamFromReader(bufsize, HashCheckingReader{r, md5.New(), hash})
+ defer t.Close()
+
+ return this.putReplicas(hash, t, expectedLength)
+}
+
+// Put a block given the block hash and a byte buffer. The desired number of
+// replicas is given in KeepClient.Want_replicas. Returns the number of
+// replicas that were written and if there was an error. Note this will return
+// InsufficientReplias whenever 0 <= replicas < this.Wants_replicas.
+func (this KeepClient) PutHB(hash string, buf []byte) (locator string, replicas int, err error) {
+ t := streamer.AsyncStreamFromSlice(buf)
+ defer t.Close()
+
+ return this.putReplicas(hash, t, int64(len(buf)))
+}
+
+// Put a block given a buffer. The hash will be computed. The desired number
+// of replicas is given in KeepClient.Want_replicas. Returns the number of
+// replicas that were written and if there was an error. Note this will return
+// InsufficientReplias whenever 0 <= replicas < this.Wants_replicas.
+func (this KeepClient) PutB(buffer []byte) (locator string, replicas int, err error) {
+ hash := fmt.Sprintf("%x", md5.Sum(buffer))
+ return this.PutHB(hash, buffer)
+}
+
+// Put a block, given a Reader. This will read the entire reader into a buffer
+// to compute the hash. The desired number of replicas is given in
+// KeepClient.Want_replicas. Returns the number of replicas that were written
+// and if there was an error. Note this will return InsufficientReplias
+// whenever 0 <= replicas < this.Wants_replicas. Also nhote that if the block
+// hash and data size are available, PutHR() is more efficient.
+func (this KeepClient) PutR(r io.Reader) (locator string, replicas int, err error) {
+ if buffer, err := ioutil.ReadAll(r); err != nil {
+ return "", 0, err
+ } else {
+ return this.PutB(buffer)
+ }
+}
+
+// Get a block given a hash. Return a reader, the expected data length, the
+// URL the block was fetched from, and if there was an error. If the block
+// checksum does not match, the final Read() on the reader returned by this
+// method will return a BadChecksum error instead of EOF.
+func (this KeepClient) Get(hash string) (reader io.ReadCloser,
+ contentLength int64, url string, err error) {
+ return this.AuthorizedGet(hash, "", "")
+}
+
+// Get a block given a hash, with additional authorization provided by
+// signature and timestamp. Return a reader, the expected data length, the URL
+// the block was fetched from, and if there was an error. If the block
+// checksum does not match, the final Read() on the reader returned by this
+// method will return a BadChecksum error instead of EOF.
+func (this KeepClient) AuthorizedGet(hash string,
+ signature string,
+ timestamp string) (reader io.ReadCloser,
+ contentLength int64, url string, err error) {
+
+ // Calculate the ordering for asking servers
+ sv := this.shuffledServiceRoots(hash)
+
+ for _, host := range sv {
+ var req *http.Request
+ var err error
+ var url string
+ if signature != "" {
+ url = fmt.Sprintf("%s/%s+A%s@%s", host, hash,
+ signature, timestamp)
+ } else {
+ url = fmt.Sprintf("%s/%s", host, hash)
+ }
+ if req, err = http.NewRequest("GET", url, nil); err != nil {
+ continue
+ }
+
+ req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.ApiToken))
+
+ var resp *http.Response
+ if resp, err = this.Client.Do(req); err != nil {
+ continue
+ }
+
+ if resp.StatusCode == http.StatusOK {
+ return HashCheckingReader{resp.Body, md5.New(), hash}, resp.ContentLength, url, nil
+ }
+ }
+
+ return nil, 0, "", BlockNotFound
+}
+
+// Determine if a block with the given hash is available and readable, but does
+// not return the block contents.
+func (this KeepClient) Ask(hash string) (contentLength int64, url string, err error) {
+ return this.AuthorizedAsk(hash, "", "")
+}
+
+// Determine if a block with the given hash is available and readable with the
+// given signature and timestamp, but does not return the block contents.
+func (this KeepClient) AuthorizedAsk(hash string, signature string,
+ timestamp string) (contentLength int64, url string, err error) {
+ // Calculate the ordering for asking servers
+ sv := this.shuffledServiceRoots(hash)
+
+ for _, host := range sv {
+ var req *http.Request
+ var err error
+ if signature != "" {
+ url = fmt.Sprintf("%s/%s+A%s@%s", host, hash,
+ signature, timestamp)
+ } else {
+ url = fmt.Sprintf("%s/%s", host, hash)
+ }
+
+ if req, err = http.NewRequest("HEAD", url, nil); err != nil {
+ continue
+ }
+
+ req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.ApiToken))
+
+ var resp *http.Response
+ if resp, err = this.Client.Do(req); err != nil {
+ continue
+ }
+
+ if resp.StatusCode == http.StatusOK {
+ return resp.ContentLength, url, nil
+ }
+ }
+
+ return 0, "", BlockNotFound
+
+}
+
+// Atomically read the service_roots field.
+func (this *KeepClient) ServiceRoots() []string {
+ r := (*[]string)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&this.service_roots))))
+ return *r
+}
+
+// Atomically update the service_roots field. Enables you to update
+// service_roots without disrupting any GET or PUT operations that might
+// already be in progress.
+func (this *KeepClient) SetServiceRoots(svc []string) {
+ // Must be sorted for ShuffledServiceRoots() to produce consistent
+ // results.
+ roots := make([]string, len(svc))
+ copy(roots, svc)
+ sort.Strings(roots)
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&this.service_roots)),
+ unsafe.Pointer(&roots))
+}
+
+type Locator struct {
+ Hash string
+ Size int
+ Signature string
+ Timestamp string
+}
+
+func MakeLocator2(hash string, hints string) (locator Locator) {
+ locator.Hash = hash
+ if hints != "" {
+ signature_pat, _ := regexp.Compile("^A([[:xdigit:]]+)@([[:xdigit:]]{8})$")
+ for _, hint := range strings.Split(hints, "+") {
+ if hint != "" {
+ if match, _ := regexp.MatchString("^[[:digit:]]+$", hint); match {
+ fmt.Sscanf(hint, "%d", &locator.Size)
+ } else if m := signature_pat.FindStringSubmatch(hint); m != nil {
+ locator.Signature = m[1]
+ locator.Timestamp = m[2]
+ } else if match, _ := regexp.MatchString("^[:upper:]", hint); match {
+ // Any unknown hint that starts with an uppercase letter is
+ // presumed to be valid and ignored, to permit forward compatibility.
+ } else {
+ // Unknown format; not a valid locator.
+ return Locator{"", 0, "", ""}
+ }
+ }
+ }
+ }
+ return locator
+}
+
+func MakeLocator(path string) Locator {
+ pathpattern, err := regexp.Compile("^([0-9a-f]{32})([+].*)?$")
+ if err != nil {
+ log.Print("Don't like regexp", err)
+ }
+
+ sm := pathpattern.FindStringSubmatch(path)
+ if sm == nil {
+ log.Print("Failed match ", path)
+ return Locator{"", 0, "", ""}
+ }
+
+ return MakeLocator2(sm[1], sm[2])
+}
--- /dev/null
+package keepclient
+
+import (
+ "arvados.org/streamer"
+ "crypto/md5"
+ "flag"
+ "fmt"
+ . "gopkg.in/check.v1"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "os"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+ TestingT(t)
+}
+
+// Gocheck boilerplate
+var _ = Suite(&ServerRequiredSuite{})
+var _ = Suite(&StandaloneSuite{})
+
+var no_server = flag.Bool("no-server", false, "Skip 'ServerRequireSuite'")
+
+// Tests that require the Keep server running
+type ServerRequiredSuite struct{}
+
+// Standalone tests
+type StandaloneSuite struct{}
+
+func pythonDir() string {
+ gopath := os.Getenv("GOPATH")
+ return fmt.Sprintf("%s/../python", strings.Split(gopath, ":")[0])
+}
+
+func (s *ServerRequiredSuite) SetUpSuite(c *C) {
+ if *no_server {
+ c.Skip("Skipping tests that require server")
+ } else {
+ os.Chdir(pythonDir())
+ if err := exec.Command("python", "run_test_server.py", "start").Run(); err != nil {
+ panic("'python run_test_server.py start' returned error")
+ }
+ if err := exec.Command("python", "run_test_server.py", "start_keep").Run(); err != nil {
+ panic("'python run_test_server.py start_keep' returned error")
+ }
+ }
+}
+
+func (s *ServerRequiredSuite) TearDownSuite(c *C) {
+ os.Chdir(pythonDir())
+ exec.Command("python", "run_test_server.py", "stop_keep").Run()
+ exec.Command("python", "run_test_server.py", "stop").Run()
+}
+
+func (s *ServerRequiredSuite) TestMakeKeepClient(c *C) {
+ os.Setenv("ARVADOS_API_HOST", "localhost:3001")
+ os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+ os.Setenv("ARVADOS_API_HOST_INSECURE", "")
+
+ kc, err := MakeKeepClient()
+ c.Check(kc.ApiServer, Equals, "localhost:3001")
+ c.Check(kc.ApiToken, Equals, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+ c.Check(kc.ApiInsecure, Equals, false)
+
+ os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
+
+ kc, err = MakeKeepClient()
+ c.Check(kc.ApiServer, Equals, "localhost:3001")
+ c.Check(kc.ApiToken, Equals, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+ c.Check(kc.ApiInsecure, Equals, true)
+ c.Check(kc.Client.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, Equals, true)
+
+ c.Assert(err, Equals, nil)
+ c.Check(len(kc.ServiceRoots()), Equals, 2)
+ c.Check(kc.ServiceRoots()[0], Equals, "http://localhost:25107")
+ c.Check(kc.ServiceRoots()[1], Equals, "http://localhost:25108")
+}
+
+func (s *StandaloneSuite) TestShuffleServiceRoots(c *C) {
+ kc := KeepClient{}
+ kc.SetServiceRoots([]string{"http://localhost:25107", "http://localhost:25108", "http://localhost:25109", "http://localhost:25110", "http://localhost:25111", "http://localhost:25112", "http://localhost:25113", "http://localhost:25114", "http://localhost:25115", "http://localhost:25116", "http://localhost:25117", "http://localhost:25118", "http://localhost:25119", "http://localhost:25120", "http://localhost:25121", "http://localhost:25122", "http://localhost:25123"})
+
+ // "foo" acbd18db4cc2f85cedef654fccc4a4d8
+ foo_shuffle := []string{"http://localhost:25116", "http://localhost:25120", "http://localhost:25119", "http://localhost:25122", "http://localhost:25108", "http://localhost:25114", "http://localhost:25112", "http://localhost:25107", "http://localhost:25118", "http://localhost:25111", "http://localhost:25113", "http://localhost:25121", "http://localhost:25110", "http://localhost:25117", "http://localhost:25109", "http://localhost:25115", "http://localhost:25123"}
+ c.Check(kc.shuffledServiceRoots("acbd18db4cc2f85cedef654fccc4a4d8"), DeepEquals, foo_shuffle)
+
+ // "bar" 37b51d194a7513e45b56f6524f2d51f2
+ bar_shuffle := []string{"http://localhost:25108", "http://localhost:25112", "http://localhost:25119", "http://localhost:25107", "http://localhost:25110", "http://localhost:25116", "http://localhost:25122", "http://localhost:25120", "http://localhost:25121", "http://localhost:25117", "http://localhost:25111", "http://localhost:25123", "http://localhost:25118", "http://localhost:25113", "http://localhost:25114", "http://localhost:25115", "http://localhost:25109"}
+ c.Check(kc.shuffledServiceRoots("37b51d194a7513e45b56f6524f2d51f2"), DeepEquals, bar_shuffle)
+}
+
+type StubPutHandler struct {
+ c *C
+ expectPath string
+ expectApiToken string
+ expectBody string
+ handled chan string
+}
+
+func (this StubPutHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+ this.c.Check(req.URL.Path, Equals, "/"+this.expectPath)
+ this.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("OAuth2 %s", this.expectApiToken))
+ body, err := ioutil.ReadAll(req.Body)
+ this.c.Check(err, Equals, nil)
+ this.c.Check(body, DeepEquals, []byte(this.expectBody))
+ resp.WriteHeader(200)
+ this.handled <- fmt.Sprintf("http://%s", req.Host)
+}
+
+func RunBogusKeepServer(st http.Handler, port int) (listener net.Listener, url string) {
+ var err error
+ listener, err = net.ListenTCP("tcp", &net.TCPAddr{Port: port})
+ if err != nil {
+ panic(fmt.Sprintf("Could not listen on tcp port %v", port))
+ }
+
+ url = fmt.Sprintf("http://localhost:%d", port)
+
+ go http.Serve(listener, st)
+ return listener, url
+}
+
+func UploadToStubHelper(c *C, st http.Handler, f func(KeepClient, string,
+ io.ReadCloser, io.WriteCloser, chan uploadStatus)) {
+
+ listener, url := RunBogusKeepServer(st, 2990)
+ defer listener.Close()
+
+ kc, _ := MakeKeepClient()
+ kc.ApiToken = "abc123"
+
+ reader, writer := io.Pipe()
+ upload_status := make(chan uploadStatus)
+
+ f(kc, url, reader, writer, upload_status)
+}
+
+func (s *StandaloneSuite) TestUploadToStubKeepServer(c *C) {
+ log.Printf("TestUploadToStubKeepServer")
+
+ st := StubPutHandler{
+ c,
+ "acbd18db4cc2f85cedef654fccc4a4d8",
+ "abc123",
+ "foo",
+ make(chan string)}
+
+ UploadToStubHelper(c, st,
+ func(kc KeepClient, url string, reader io.ReadCloser,
+ writer io.WriteCloser, upload_status chan uploadStatus) {
+
+ go kc.uploadToKeepServer(url, st.expectPath, reader, upload_status, int64(len("foo")))
+
+ writer.Write([]byte("foo"))
+ writer.Close()
+
+ <-st.handled
+ status := <-upload_status
+ c.Check(status, DeepEquals, uploadStatus{nil, fmt.Sprintf("%s/%s", url, st.expectPath), 200, 1, ""})
+ })
+
+ log.Printf("TestUploadToStubKeepServer done")
+}
+
+func (s *StandaloneSuite) TestUploadToStubKeepServerBufferReader(c *C) {
+ log.Printf("TestUploadToStubKeepServerBufferReader")
+
+ st := StubPutHandler{
+ c,
+ "acbd18db4cc2f85cedef654fccc4a4d8",
+ "abc123",
+ "foo",
+ make(chan string)}
+
+ UploadToStubHelper(c, st,
+ func(kc KeepClient, url string, reader io.ReadCloser,
+ writer io.WriteCloser, upload_status chan uploadStatus) {
+
+ tr := streamer.AsyncStreamFromReader(512, reader)
+ defer tr.Close()
+
+ br1 := tr.MakeStreamReader()
+
+ go kc.uploadToKeepServer(url, st.expectPath, br1, upload_status, 3)
+
+ writer.Write([]byte("foo"))
+ writer.Close()
+
+ <-st.handled
+
+ status := <-upload_status
+ c.Check(status, DeepEquals, uploadStatus{nil, fmt.Sprintf("%s/%s", url, st.expectPath), 200, 1, ""})
+ })
+
+ log.Printf("TestUploadToStubKeepServerBufferReader done")
+}
+
+type FailHandler struct {
+ handled chan string
+}
+
+func (this FailHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+ resp.WriteHeader(500)
+ this.handled <- fmt.Sprintf("http://%s", req.Host)
+}
+
+func (s *StandaloneSuite) TestFailedUploadToStubKeepServer(c *C) {
+ log.Printf("TestFailedUploadToStubKeepServer")
+
+ st := FailHandler{
+ make(chan string)}
+
+ hash := "acbd18db4cc2f85cedef654fccc4a4d8"
+
+ UploadToStubHelper(c, st,
+ func(kc KeepClient, url string, reader io.ReadCloser,
+ writer io.WriteCloser, upload_status chan uploadStatus) {
+
+ go kc.uploadToKeepServer(url, hash, reader, upload_status, 3)
+
+ writer.Write([]byte("foo"))
+ writer.Close()
+
+ <-st.handled
+
+ status := <-upload_status
+ c.Check(status.url, Equals, fmt.Sprintf("%s/%s", url, hash))
+ c.Check(status.statusCode, Equals, 500)
+ })
+ log.Printf("TestFailedUploadToStubKeepServer done")
+}
+
+type KeepServer struct {
+ listener net.Listener
+ url string
+}
+
+func RunSomeFakeKeepServers(st http.Handler, n int, port int) (ks []KeepServer) {
+ ks = make([]KeepServer, n)
+
+ for i := 0; i < n; i += 1 {
+ boguslistener, bogusurl := RunBogusKeepServer(st, port+i)
+ ks[i] = KeepServer{boguslistener, bogusurl}
+ }
+
+ return ks
+}
+
+func (s *StandaloneSuite) TestPutB(c *C) {
+ log.Printf("TestPutB")
+
+ hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+ st := StubPutHandler{
+ c,
+ hash,
+ "abc123",
+ "foo",
+ make(chan string, 2)}
+
+ kc, _ := MakeKeepClient()
+
+ kc.Want_replicas = 2
+ kc.ApiToken = "abc123"
+ service_roots := make([]string, 5)
+
+ ks := RunSomeFakeKeepServers(st, 5, 2990)
+
+ for i := 0; i < len(ks); i += 1 {
+ service_roots[i] = ks[i].url
+ defer ks[i].listener.Close()
+ }
+
+ kc.SetServiceRoots(service_roots)
+
+ kc.PutB([]byte("foo"))
+
+ shuff := kc.shuffledServiceRoots(fmt.Sprintf("%x", md5.Sum([]byte("foo"))))
+
+ s1 := <-st.handled
+ s2 := <-st.handled
+ c.Check((s1 == shuff[0] && s2 == shuff[1]) ||
+ (s1 == shuff[1] && s2 == shuff[0]),
+ Equals,
+ true)
+
+ log.Printf("TestPutB done")
+}
+
+func (s *StandaloneSuite) TestPutHR(c *C) {
+ log.Printf("TestPutHR")
+
+ hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+ st := StubPutHandler{
+ c,
+ hash,
+ "abc123",
+ "foo",
+ make(chan string, 2)}
+
+ kc, _ := MakeKeepClient()
+
+ kc.Want_replicas = 2
+ kc.ApiToken = "abc123"
+ service_roots := make([]string, 5)
+
+ ks := RunSomeFakeKeepServers(st, 5, 2990)
+
+ for i := 0; i < len(ks); i += 1 {
+ service_roots[i] = ks[i].url
+ defer ks[i].listener.Close()
+ }
+
+ kc.SetServiceRoots(service_roots)
+
+ reader, writer := io.Pipe()
+
+ go func() {
+ writer.Write([]byte("foo"))
+ writer.Close()
+ }()
+
+ kc.PutHR(hash, reader, 3)
+
+ shuff := kc.shuffledServiceRoots(hash)
+ log.Print(shuff)
+
+ s1 := <-st.handled
+ s2 := <-st.handled
+
+ c.Check((s1 == shuff[0] && s2 == shuff[1]) ||
+ (s1 == shuff[1] && s2 == shuff[0]),
+ Equals,
+ true)
+
+ log.Printf("TestPutHR done")
+}
+
+func (s *StandaloneSuite) TestPutWithFail(c *C) {
+ log.Printf("TestPutWithFail")
+
+ hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+ st := StubPutHandler{
+ c,
+ hash,
+ "abc123",
+ "foo",
+ make(chan string, 2)}
+
+ fh := FailHandler{
+ make(chan string, 1)}
+
+ kc, _ := MakeKeepClient()
+
+ kc.Want_replicas = 2
+ kc.ApiToken = "abc123"
+ service_roots := make([]string, 5)
+
+ ks1 := RunSomeFakeKeepServers(st, 4, 2990)
+ ks2 := RunSomeFakeKeepServers(fh, 1, 2995)
+
+ for i, k := range ks1 {
+ service_roots[i] = k.url
+ defer k.listener.Close()
+ }
+ for i, k := range ks2 {
+ service_roots[len(ks1)+i] = k.url
+ defer k.listener.Close()
+ }
+
+ kc.SetServiceRoots(service_roots)
+
+ shuff := kc.shuffledServiceRoots(fmt.Sprintf("%x", md5.Sum([]byte("foo"))))
+
+ phash, replicas, err := kc.PutB([]byte("foo"))
+
+ <-fh.handled
+
+ c.Check(err, Equals, nil)
+ c.Check(phash, Equals, "")
+ c.Check(replicas, Equals, 2)
+ c.Check(<-st.handled, Equals, shuff[1])
+ c.Check(<-st.handled, Equals, shuff[2])
+}
+
+func (s *StandaloneSuite) TestPutWithTooManyFail(c *C) {
+ log.Printf("TestPutWithTooManyFail")
+
+ hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+ st := StubPutHandler{
+ c,
+ hash,
+ "abc123",
+ "foo",
+ make(chan string, 1)}
+
+ fh := FailHandler{
+ make(chan string, 4)}
+
+ kc, _ := MakeKeepClient()
+
+ kc.Want_replicas = 2
+ kc.ApiToken = "abc123"
+ service_roots := make([]string, 5)
+
+ ks1 := RunSomeFakeKeepServers(st, 1, 2990)
+ ks2 := RunSomeFakeKeepServers(fh, 4, 2991)
+
+ for i, k := range ks1 {
+ service_roots[i] = k.url
+ defer k.listener.Close()
+ }
+ for i, k := range ks2 {
+ service_roots[len(ks1)+i] = k.url
+ defer k.listener.Close()
+ }
+
+ kc.SetServiceRoots(service_roots)
+
+ shuff := kc.shuffledServiceRoots(fmt.Sprintf("%x", md5.Sum([]byte("foo"))))
+
+ _, replicas, err := kc.PutB([]byte("foo"))
+
+ c.Check(err, Equals, InsufficientReplicasError)
+ c.Check(replicas, Equals, 1)
+ c.Check(<-st.handled, Equals, shuff[1])
+
+ log.Printf("TestPutWithTooManyFail done")
+}
+
+type StubGetHandler struct {
+ c *C
+ expectPath string
+ expectApiToken string
+ returnBody []byte
+}
+
+func (this StubGetHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+ this.c.Check(req.URL.Path, Equals, "/"+this.expectPath)
+ this.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("OAuth2 %s", this.expectApiToken))
+ resp.Header().Set("Content-Length", fmt.Sprintf("%d", len(this.returnBody)))
+ resp.Write(this.returnBody)
+}
+
+func (s *StandaloneSuite) TestGet(c *C) {
+ log.Printf("TestGet")
+
+ hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+ st := StubGetHandler{
+ c,
+ hash,
+ "abc123",
+ []byte("foo")}
+
+ listener, url := RunBogusKeepServer(st, 2990)
+ defer listener.Close()
+
+ kc, _ := MakeKeepClient()
+ kc.ApiToken = "abc123"
+ kc.SetServiceRoots([]string{url})
+
+ r, n, url2, err := kc.Get(hash)
+ defer r.Close()
+ c.Check(err, Equals, nil)
+ c.Check(n, Equals, int64(3))
+ c.Check(url2, Equals, fmt.Sprintf("%s/%s", url, hash))
+
+ content, err2 := ioutil.ReadAll(r)
+ c.Check(err2, Equals, nil)
+ c.Check(content, DeepEquals, []byte("foo"))
+
+ log.Printf("TestGet done")
+}
+
+func (s *StandaloneSuite) TestGetFail(c *C) {
+ hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+ st := FailHandler{make(chan string, 1)}
+
+ listener, url := RunBogusKeepServer(st, 2990)
+ defer listener.Close()
+
+ kc, _ := MakeKeepClient()
+ kc.ApiToken = "abc123"
+ kc.SetServiceRoots([]string{url})
+
+ r, n, url2, err := kc.Get(hash)
+ c.Check(err, Equals, BlockNotFound)
+ c.Check(n, Equals, int64(0))
+ c.Check(url2, Equals, "")
+ c.Check(r, Equals, nil)
+}
+
+type BarHandler struct {
+ handled chan string
+}
+
+func (this BarHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+ resp.Write([]byte("bar"))
+ this.handled <- fmt.Sprintf("http://%s", req.Host)
+}
+
+func (s *StandaloneSuite) TestChecksum(c *C) {
+ foohash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+ barhash := fmt.Sprintf("%x", md5.Sum([]byte("bar")))
+
+ st := BarHandler{make(chan string, 1)}
+
+ listener, url := RunBogusKeepServer(st, 2990)
+ defer listener.Close()
+
+ kc, _ := MakeKeepClient()
+ kc.ApiToken = "abc123"
+ kc.SetServiceRoots([]string{url})
+
+ r, n, _, err := kc.Get(barhash)
+ _, err = ioutil.ReadAll(r)
+ c.Check(n, Equals, int64(3))
+ c.Check(err, Equals, nil)
+
+ <-st.handled
+
+ r, n, _, err = kc.Get(foohash)
+ _, err = ioutil.ReadAll(r)
+ c.Check(n, Equals, int64(3))
+ c.Check(err, Equals, BadChecksum)
+
+ <-st.handled
+}
+
+func (s *StandaloneSuite) TestGetWithFailures(c *C) {
+
+ hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+ fh := FailHandler{
+ make(chan string, 1)}
+
+ st := StubGetHandler{
+ c,
+ hash,
+ "abc123",
+ []byte("foo")}
+
+ kc, _ := MakeKeepClient()
+ kc.ApiToken = "abc123"
+ service_roots := make([]string, 5)
+
+ ks1 := RunSomeFakeKeepServers(st, 1, 2990)
+ ks2 := RunSomeFakeKeepServers(fh, 4, 2991)
+
+ for i, k := range ks1 {
+ service_roots[i] = k.url
+ defer k.listener.Close()
+ }
+ for i, k := range ks2 {
+ service_roots[len(ks1)+i] = k.url
+ defer k.listener.Close()
+ }
+
+ kc.SetServiceRoots(service_roots)
+
+ r, n, url2, err := kc.Get(hash)
+ <-fh.handled
+ c.Check(err, Equals, nil)
+ c.Check(n, Equals, int64(3))
+ c.Check(url2, Equals, fmt.Sprintf("%s/%s", ks1[0].url, hash))
+
+ content, err2 := ioutil.ReadAll(r)
+ c.Check(err2, Equals, nil)
+ c.Check(content, DeepEquals, []byte("foo"))
+}
+
+func (s *ServerRequiredSuite) TestPutGetHead(c *C) {
+ os.Setenv("ARVADOS_API_HOST", "localhost:3001")
+ os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+ os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
+
+ kc, err := MakeKeepClient()
+ c.Assert(err, Equals, nil)
+
+ hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+ {
+ n, _, err := kc.Ask(hash)
+ c.Check(err, Equals, BlockNotFound)
+ c.Check(n, Equals, int64(0))
+ }
+ {
+ hash2, replicas, err := kc.PutB([]byte("foo"))
+ c.Check(hash2, Equals, fmt.Sprintf("%s+%v", hash, 3))
+ c.Check(replicas, Equals, 2)
+ c.Check(err, Equals, nil)
+ }
+ {
+ r, n, url2, err := kc.Get(hash)
+ c.Check(err, Equals, nil)
+ c.Check(n, Equals, int64(3))
+ c.Check(url2, Equals, fmt.Sprintf("http://localhost:25108/%s", hash))
+
+ content, err2 := ioutil.ReadAll(r)
+ c.Check(err2, Equals, nil)
+ c.Check(content, DeepEquals, []byte("foo"))
+ }
+ {
+ n, url2, err := kc.Ask(hash)
+ c.Check(err, Equals, nil)
+ c.Check(n, Equals, int64(3))
+ c.Check(url2, Equals, fmt.Sprintf("http://localhost:25108/%s", hash))
+ }
+}
+
+type StubProxyHandler struct {
+ handled chan string
+}
+
+func (this StubProxyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+ resp.Header().Set("X-Keep-Replicas-Stored", "2")
+ this.handled <- fmt.Sprintf("http://%s", req.Host)
+}
+
+func (s *StandaloneSuite) TestPutProxy(c *C) {
+ log.Printf("TestPutProxy")
+
+ st := StubProxyHandler{make(chan string, 1)}
+
+ kc, _ := MakeKeepClient()
+
+ kc.Want_replicas = 2
+ kc.Using_proxy = true
+ kc.ApiToken = "abc123"
+ service_roots := make([]string, 1)
+
+ ks1 := RunSomeFakeKeepServers(st, 1, 2990)
+
+ for i, k := range ks1 {
+ service_roots[i] = k.url
+ defer k.listener.Close()
+ }
+
+ kc.SetServiceRoots(service_roots)
+
+ _, replicas, err := kc.PutB([]byte("foo"))
+ <-st.handled
+
+ c.Check(err, Equals, nil)
+ c.Check(replicas, Equals, 2)
+
+ log.Printf("TestPutProxy done")
+}
+
+func (s *StandaloneSuite) TestPutProxyInsufficientReplicas(c *C) {
+ log.Printf("TestPutProxy")
+
+ st := StubProxyHandler{make(chan string, 1)}
+
+ kc, _ := MakeKeepClient()
+
+ kc.Want_replicas = 3
+ kc.Using_proxy = true
+ kc.ApiToken = "abc123"
+ service_roots := make([]string, 1)
+
+ ks1 := RunSomeFakeKeepServers(st, 1, 2990)
+
+ for i, k := range ks1 {
+ service_roots[i] = k.url
+ defer k.listener.Close()
+ }
+ kc.SetServiceRoots(service_roots)
+
+ _, replicas, err := kc.PutB([]byte("foo"))
+ <-st.handled
+
+ c.Check(err, Equals, InsufficientReplicasError)
+ c.Check(replicas, Equals, 2)
+
+ log.Printf("TestPutProxy done")
+}
+
+func (s *StandaloneSuite) TestMakeLocator(c *C) {
+ l := MakeLocator("91f372a266fe2bf2823cb8ec7fda31ce+3+Aabcde@12345678")
+
+ c.Check(l.Hash, Equals, "91f372a266fe2bf2823cb8ec7fda31ce")
+ c.Check(l.Size, Equals, 3)
+ c.Check(l.Signature, Equals, "abcde")
+ c.Check(l.Timestamp, Equals, "12345678")
+}
--- /dev/null
+/* Internal methods to support keepclient.go */
+package keepclient
+
+import (
+ "arvados.org/streamer"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+ "strconv"
+ "strings"
+)
+
+type keepDisk struct {
+ Hostname string `json:"service_host"`
+ Port int `json:"service_port"`
+ SSL bool `json:"service_ssl_flag"`
+ SvcType string `json:"service_type"`
+}
+
+func (this *KeepClient) DiscoverKeepServers() error {
+ if prx := os.Getenv("ARVADOS_KEEP_PROXY"); prx != "" {
+ this.SetServiceRoots([]string{prx})
+ this.Using_proxy = true
+ return nil
+ }
+
+ // Construct request of keep disk list
+ var req *http.Request
+ var err error
+
+ if req, err = http.NewRequest("GET", fmt.Sprintf("https://%s/arvados/v1/keep_services/accessible?format=json", this.ApiServer), nil); err != nil {
+ return err
+ }
+
+ // Add api token header
+ req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.ApiToken))
+ if this.External {
+ req.Header.Add("X-External-Client", "1")
+ }
+
+ // Make the request
+ var resp *http.Response
+ if resp, err = this.Client.Do(req); err != nil {
+ return err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ // fall back on keep disks
+ if req, err = http.NewRequest("GET", fmt.Sprintf("https://%s/arvados/v1/keep_disks", this.ApiServer), nil); err != nil {
+ return err
+ }
+ req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.ApiToken))
+ if resp, err = this.Client.Do(req); err != nil {
+ return err
+ }
+ if resp.StatusCode != http.StatusOK {
+ return errors.New(resp.Status)
+ }
+ }
+
+ type svcList struct {
+ Items []keepDisk `json:"items"`
+ }
+
+ // Decode json reply
+ dec := json.NewDecoder(resp.Body)
+ var m svcList
+ if err := dec.Decode(&m); err != nil {
+ return err
+ }
+
+ listed := make(map[string]bool)
+ service_roots := make([]string, 0, len(m.Items))
+
+ for _, element := range m.Items {
+ n := ""
+
+ if element.SSL {
+ n = "s"
+ }
+
+ // Construct server URL
+ url := fmt.Sprintf("http%s://%s:%d", n, element.Hostname, element.Port)
+
+ // Skip duplicates
+ if !listed[url] {
+ listed[url] = true
+ service_roots = append(service_roots, url)
+ }
+ if element.SvcType == "proxy" {
+ this.Using_proxy = true
+ }
+ }
+
+ this.SetServiceRoots(service_roots)
+
+ return nil
+}
+
+func (this KeepClient) shuffledServiceRoots(hash string) (pseq []string) {
+ // Build an ordering with which to query the Keep servers based on the
+ // contents of the hash. "hash" is a hex-encoded number at least 8
+ // digits (32 bits) long
+
+ // seed used to calculate the next keep server from 'pool' to be added
+ // to 'pseq'
+ seed := hash
+
+ // Keep servers still to be added to the ordering
+ service_roots := this.ServiceRoots()
+ pool := make([]string, len(service_roots))
+ copy(pool, service_roots)
+
+ // output probe sequence
+ pseq = make([]string, 0, len(service_roots))
+
+ // iterate while there are servers left to be assigned
+ for len(pool) > 0 {
+
+ if len(seed) < 8 {
+ // ran out of digits in the seed
+ if len(pseq) < (len(hash) / 4) {
+ // the number of servers added to the probe
+ // sequence is less than the number of 4-digit
+ // slices in 'hash' so refill the seed with the
+ // last 4 digits.
+ seed = hash[len(hash)-4:]
+ }
+ seed += hash
+ }
+
+ // Take the next 8 digits (32 bytes) and interpret as an integer,
+ // then modulus with the size of the remaining pool to get the next
+ // selected server.
+ probe, _ := strconv.ParseUint(seed[0:8], 16, 32)
+ probe %= uint64(len(pool))
+
+ // Append the selected server to the probe sequence and remove it
+ // from the pool.
+ pseq = append(pseq, pool[probe])
+ pool = append(pool[:probe], pool[probe+1:]...)
+
+ // Remove the digits just used from the seed
+ seed = seed[8:]
+ }
+ return pseq
+}
+
+type uploadStatus struct {
+ err error
+ url string
+ statusCode int
+ replicas_stored int
+ response string
+}
+
+func (this KeepClient) uploadToKeepServer(host string, hash string, body io.ReadCloser,
+ upload_status chan<- uploadStatus, expectedLength int64) {
+
+ log.Printf("Uploading %s to %s", hash, host)
+
+ var req *http.Request
+ var err error
+ var url = fmt.Sprintf("%s/%s", host, hash)
+ if req, err = http.NewRequest("PUT", url, nil); err != nil {
+ upload_status <- uploadStatus{err, url, 0, 0, ""}
+ body.Close()
+ return
+ }
+
+ if expectedLength > 0 {
+ req.ContentLength = expectedLength
+ }
+
+ req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.ApiToken))
+ req.Header.Add("Content-Type", "application/octet-stream")
+
+ if this.Using_proxy {
+ req.Header.Add(X_Keep_Desired_Replicas, fmt.Sprint(this.Want_replicas))
+ }
+
+ req.Body = body
+
+ var resp *http.Response
+ if resp, err = this.Client.Do(req); err != nil {
+ upload_status <- uploadStatus{err, url, 0, 0, ""}
+ body.Close()
+ return
+ }
+
+ rep := 1
+ if xr := resp.Header.Get(X_Keep_Replicas_Stored); xr != "" {
+ fmt.Sscanf(xr, "%d", &rep)
+ }
+
+ respbody, err2 := ioutil.ReadAll(&io.LimitedReader{resp.Body, 4096})
+ if err2 != nil && err2 != io.EOF {
+ upload_status <- uploadStatus{err2, url, resp.StatusCode, rep, string(respbody)}
+ return
+ }
+
+ locator := strings.TrimSpace(string(respbody))
+
+ if resp.StatusCode == http.StatusOK {
+ upload_status <- uploadStatus{nil, url, resp.StatusCode, rep, locator}
+ } else {
+ upload_status <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, locator}
+ }
+}
+
+func (this KeepClient) putReplicas(
+ hash string,
+ tr *streamer.AsyncStream,
+ expectedLength int64) (locator string, replicas int, err error) {
+
+ // Calculate the ordering for uploading to servers
+ sv := this.shuffledServiceRoots(hash)
+
+ // The next server to try contacting
+ next_server := 0
+
+ // The number of active writers
+ active := 0
+
+ // Used to communicate status from the upload goroutines
+ upload_status := make(chan uploadStatus)
+ defer close(upload_status)
+
+ // Desired number of replicas
+
+ remaining_replicas := this.Want_replicas
+
+ for remaining_replicas > 0 {
+ for active < remaining_replicas {
+ // Start some upload requests
+ if next_server < len(sv) {
+ go this.uploadToKeepServer(sv[next_server], hash, tr.MakeStreamReader(), upload_status, expectedLength)
+ next_server += 1
+ active += 1
+ } else {
+ if active == 0 {
+ return locator, (this.Want_replicas - remaining_replicas), InsufficientReplicasError
+ } else {
+ break
+ }
+ }
+ }
+
+ // Now wait for something to happen.
+ status := <-upload_status
+ if status.statusCode == 200 {
+ // good news!
+ remaining_replicas -= status.replicas_stored
+ locator = status.response
+ } else {
+ // writing to keep server failed for some reason
+ log.Printf("Keep server put to %v failed with '%v'",
+ status.url, status.err)
+ }
+ active -= 1
+ log.Printf("Upload to %v status code: %v remaining replicas: %v active: %v", status.url, status.statusCode, remaining_replicas, active)
+ }
+
+ return locator, this.Want_replicas, nil
+}
--- /dev/null
+/* AsyncStream pulls data in from a io.Reader source (such as a file or network
+socket) and fans out to any number of StreamReader sinks.
+
+Unlike io.TeeReader() or io.MultiWriter(), new StreamReaders can be created at
+any point in the lifetime of the AsyncStream, and each StreamReader will read
+the contents of the buffer up to the "frontier" of the buffer, at which point
+the StreamReader blocks until new data is read from the source.
+
+This is useful for minimizing readthrough latency as sinks can read and act on
+data from the source without waiting for the source to be completely buffered.
+It is also useful as a cache in situations where re-reading the original source
+potentially is costly, since the buffer retains a copy of the source data.
+
+Usage:
+
+Begin reading into a buffer with maximum size 'buffersize' from 'source':
+ stream := AsyncStreamFromReader(buffersize, source)
+
+To create a new reader (this can be called multiple times, each reader starts
+at the beginning of the buffer):
+ reader := tr.MakeStreamReader()
+
+Make sure to close the reader when you're done with it.
+ reader.Close()
+
+When you're done with the stream:
+ stream.Close()
+
+Alternately, if you already have a filled buffer and just want to read out from it:
+ stream := AsyncStreamFromSlice(buf)
+
+ r := tr.MakeStreamReader()
+
+*/
+
+package streamer
+
+import (
+ "io"
+)
+
+type AsyncStream struct {
+ buffer []byte
+ requests chan sliceRequest
+ add_reader chan bool
+ subtract_reader chan bool
+ wait_zero_readers chan bool
+}
+
+// Reads from the buffer managed by the Transfer()
+type StreamReader struct {
+ offset int
+ stream *AsyncStream
+ responses chan sliceResult
+}
+
+func AsyncStreamFromReader(buffersize int, source io.Reader) *AsyncStream {
+ t := &AsyncStream{make([]byte, buffersize), make(chan sliceRequest), make(chan bool), make(chan bool), make(chan bool)}
+
+ go t.transfer(source)
+ go t.readersMonitor()
+
+ return t
+}
+
+func AsyncStreamFromSlice(buf []byte) *AsyncStream {
+ t := &AsyncStream{buf, make(chan sliceRequest), make(chan bool), make(chan bool), make(chan bool)}
+
+ go t.transfer(nil)
+ go t.readersMonitor()
+
+ return t
+}
+
+func (this *AsyncStream) MakeStreamReader() *StreamReader {
+ this.add_reader <- true
+ return &StreamReader{0, this, make(chan sliceResult)}
+}
+
+// Reads from the buffer managed by the Transfer()
+func (this *StreamReader) Read(p []byte) (n int, err error) {
+ this.stream.requests <- sliceRequest{this.offset, len(p), this.responses}
+ rr, valid := <-this.responses
+ if valid {
+ this.offset += len(rr.slice)
+ return copy(p, rr.slice), rr.err
+ } else {
+ return 0, io.ErrUnexpectedEOF
+ }
+}
+
+func (this *StreamReader) WriteTo(dest io.Writer) (written int64, err error) {
+ // Record starting offset in order to correctly report the number of bytes sent
+ starting_offset := this.offset
+ for {
+ this.stream.requests <- sliceRequest{this.offset, 32 * 1024, this.responses}
+ rr, valid := <-this.responses
+ if valid {
+ this.offset += len(rr.slice)
+ if rr.err != nil {
+ if rr.err == io.EOF {
+ // EOF is not an error.
+ return int64(this.offset - starting_offset), nil
+ } else {
+ return int64(this.offset - starting_offset), rr.err
+ }
+ } else {
+ dest.Write(rr.slice)
+ }
+ } else {
+ return int64(this.offset), io.ErrUnexpectedEOF
+ }
+ }
+}
+
+// Close the responses channel
+func (this *StreamReader) Close() error {
+ this.stream.subtract_reader <- true
+ close(this.responses)
+ this.stream = nil
+ return nil
+}
+
+func (this *AsyncStream) Close() {
+ this.wait_zero_readers <- true
+ close(this.requests)
+ close(this.add_reader)
+ close(this.subtract_reader)
+ close(this.wait_zero_readers)
+}
--- /dev/null
+package streamer
+
+import (
+ . "gopkg.in/check.v1"
+ "io"
+ "testing"
+ "time"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) { TestingT(t) }
+
+var _ = Suite(&StandaloneSuite{})
+
+// Standalone tests
+type StandaloneSuite struct{}
+
+func (s *StandaloneSuite) TestReadIntoBuffer(c *C) {
+ ReadIntoBufferHelper(c, 225)
+ ReadIntoBufferHelper(c, 224)
+}
+
+func HelperWrite128andCheck(c *C, buffer []byte, writer io.Writer, slices chan nextSlice) {
+ out := make([]byte, 128)
+ for i := 0; i < 128; i += 1 {
+ out[i] = byte(i)
+ }
+ writer.Write(out)
+ s1 := <-slices
+ c.Check(len(s1.slice), Equals, 128)
+ c.Check(s1.reader_error, Equals, nil)
+ for i := 0; i < 128; i += 1 {
+ c.Check(s1.slice[i], Equals, byte(i))
+ }
+ for i := 0; i < len(buffer); i += 1 {
+ if i < 128 {
+ c.Check(buffer[i], Equals, byte(i))
+ } else {
+ c.Check(buffer[i], Equals, byte(0))
+ }
+ }
+}
+
+func HelperWrite96andCheck(c *C, buffer []byte, writer io.Writer, slices chan nextSlice) {
+ out := make([]byte, 96)
+ for i := 0; i < 96; i += 1 {
+ out[i] = byte(i / 2)
+ }
+ writer.Write(out)
+ s1 := <-slices
+ c.Check(len(s1.slice), Equals, 96)
+ c.Check(s1.reader_error, Equals, nil)
+ for i := 0; i < 96; i += 1 {
+ c.Check(s1.slice[i], Equals, byte(i/2))
+ }
+ for i := 0; i < len(buffer); i += 1 {
+ if i < 128 {
+ c.Check(buffer[i], Equals, byte(i))
+ } else if i < (128 + 96) {
+ c.Check(buffer[i], Equals, byte((i-128)/2))
+ } else {
+ c.Check(buffer[i], Equals, byte(0))
+ }
+ }
+}
+
+func ReadIntoBufferHelper(c *C, bufsize int) {
+ buffer := make([]byte, bufsize)
+
+ reader, writer := io.Pipe()
+ slices := make(chan nextSlice)
+
+ go readIntoBuffer(buffer, reader, slices)
+
+ HelperWrite128andCheck(c, buffer, writer, slices)
+ HelperWrite96andCheck(c, buffer, writer, slices)
+
+ writer.Close()
+ s1 := <-slices
+ c.Check(len(s1.slice), Equals, 0)
+ c.Check(s1.reader_error, Equals, io.EOF)
+}
+
+func (s *StandaloneSuite) TestReadIntoShortBuffer(c *C) {
+ buffer := make([]byte, 223)
+ reader, writer := io.Pipe()
+ slices := make(chan nextSlice)
+
+ go readIntoBuffer(buffer, reader, slices)
+
+ HelperWrite128andCheck(c, buffer, writer, slices)
+
+ out := make([]byte, 96)
+ for i := 0; i < 96; i += 1 {
+ out[i] = byte(i / 2)
+ }
+
+ // Write will deadlock because it can't write all the data, so
+ // spin it off to a goroutine
+ go writer.Write(out)
+ s1 := <-slices
+
+ c.Check(len(s1.slice), Equals, 95)
+ c.Check(s1.reader_error, Equals, nil)
+ for i := 0; i < 95; i += 1 {
+ c.Check(s1.slice[i], Equals, byte(i/2))
+ }
+ for i := 0; i < len(buffer); i += 1 {
+ if i < 128 {
+ c.Check(buffer[i], Equals, byte(i))
+ } else if i < (128 + 95) {
+ c.Check(buffer[i], Equals, byte((i-128)/2))
+ } else {
+ c.Check(buffer[i], Equals, byte(0))
+ }
+ }
+
+ writer.Close()
+ s1 = <-slices
+ c.Check(len(s1.slice), Equals, 0)
+ c.Check(s1.reader_error, Equals, io.ErrShortBuffer)
+}
+
+func (s *StandaloneSuite) TestTransfer(c *C) {
+ reader, writer := io.Pipe()
+
+ tr := AsyncStreamFromReader(512, reader)
+
+ br1 := tr.MakeStreamReader()
+ out := make([]byte, 128)
+
+ {
+ // Write some data, and read into a buffer shorter than
+ // available data
+ for i := 0; i < 128; i += 1 {
+ out[i] = byte(i)
+ }
+
+ writer.Write(out[:100])
+
+ in := make([]byte, 64)
+ n, err := br1.Read(in)
+
+ c.Check(n, Equals, 64)
+ c.Check(err, Equals, nil)
+
+ for i := 0; i < 64; i += 1 {
+ c.Check(in[i], Equals, out[i])
+ }
+ }
+
+ {
+ // Write some more data, and read into buffer longer than
+ // available data
+ in := make([]byte, 64)
+ n, err := br1.Read(in)
+ c.Check(n, Equals, 36)
+ c.Check(err, Equals, nil)
+
+ for i := 0; i < 36; i += 1 {
+ c.Check(in[i], Equals, out[64+i])
+ }
+
+ }
+
+ {
+ // Test read before write
+ type Rd struct {
+ n int
+ err error
+ }
+ rd := make(chan Rd)
+ in := make([]byte, 64)
+
+ go func() {
+ n, err := br1.Read(in)
+ rd <- Rd{n, err}
+ }()
+
+ time.Sleep(100 * time.Millisecond)
+ writer.Write(out[100:])
+
+ got := <-rd
+
+ c.Check(got.n, Equals, 28)
+ c.Check(got.err, Equals, nil)
+
+ for i := 0; i < 28; i += 1 {
+ c.Check(in[i], Equals, out[100+i])
+ }
+ }
+
+ br2 := tr.MakeStreamReader()
+ {
+ // Test 'catch up' reader
+ in := make([]byte, 256)
+ n, err := br2.Read(in)
+
+ c.Check(n, Equals, 128)
+ c.Check(err, Equals, nil)
+
+ for i := 0; i < 128; i += 1 {
+ c.Check(in[i], Equals, out[i])
+ }
+ }
+
+ {
+ // Test closing the reader
+ writer.Close()
+
+ in := make([]byte, 256)
+ n1, err1 := br1.Read(in)
+ n2, err2 := br2.Read(in)
+ c.Check(n1, Equals, 0)
+ c.Check(err1, Equals, io.EOF)
+ c.Check(n2, Equals, 0)
+ c.Check(err2, Equals, io.EOF)
+ }
+
+ {
+ // Test 'catch up' reader after closing
+ br3 := tr.MakeStreamReader()
+ in := make([]byte, 256)
+ n, err := br3.Read(in)
+
+ c.Check(n, Equals, 128)
+ c.Check(err, Equals, nil)
+
+ for i := 0; i < 128; i += 1 {
+ c.Check(in[i], Equals, out[i])
+ }
+
+ n, err = br3.Read(in)
+
+ c.Check(n, Equals, 0)
+ c.Check(err, Equals, io.EOF)
+ }
+}
+
+func (s *StandaloneSuite) TestTransferShortBuffer(c *C) {
+ reader, writer := io.Pipe()
+
+ tr := AsyncStreamFromReader(100, reader)
+ defer tr.Close()
+
+ sr := tr.MakeStreamReader()
+ defer sr.Close()
+
+ out := make([]byte, 101)
+ go writer.Write(out)
+
+ n, err := sr.Read(out)
+ c.Check(n, Equals, 100)
+
+ n, err = sr.Read(out)
+ c.Check(n, Equals, 0)
+ c.Check(err, Equals, io.ErrShortBuffer)
+}
+
+func (s *StandaloneSuite) TestTransferFromBuffer(c *C) {
+ // Buffer for reads from 'r'
+ buffer := make([]byte, 100)
+ for i := 0; i < 100; i += 1 {
+ buffer[i] = byte(i)
+ }
+
+ tr := AsyncStreamFromSlice(buffer)
+
+ br1 := tr.MakeStreamReader()
+
+ in := make([]byte, 64)
+ {
+ n, err := br1.Read(in)
+
+ c.Check(n, Equals, 64)
+ c.Check(err, Equals, nil)
+
+ for i := 0; i < 64; i += 1 {
+ c.Check(in[i], Equals, buffer[i])
+ }
+ }
+ {
+ n, err := br1.Read(in)
+
+ c.Check(n, Equals, 36)
+ c.Check(err, Equals, nil)
+
+ for i := 0; i < 36; i += 1 {
+ c.Check(in[i], Equals, buffer[64+i])
+ }
+ }
+ {
+ n, err := br1.Read(in)
+
+ c.Check(n, Equals, 0)
+ c.Check(err, Equals, io.EOF)
+ }
+}
+
+func (s *StandaloneSuite) TestTransferIoCopy(c *C) {
+ // Buffer for reads from 'r'
+ buffer := make([]byte, 100)
+ for i := 0; i < 100; i += 1 {
+ buffer[i] = byte(i)
+ }
+
+ tr := AsyncStreamFromSlice(buffer)
+ defer tr.Close()
+
+ br1 := tr.MakeStreamReader()
+ defer br1.Close()
+
+ reader, writer := io.Pipe()
+
+ go func() {
+ p := make([]byte, 100)
+ n, err := reader.Read(p)
+ c.Check(n, Equals, 100)
+ c.Check(err, Equals, nil)
+ c.Check(p, DeepEquals, buffer)
+ }()
+
+ io.Copy(writer, br1)
+}
+
+func (s *StandaloneSuite) TestManyReaders(c *C) {
+ reader, writer := io.Pipe()
+
+ tr := AsyncStreamFromReader(512, reader)
+ defer tr.Close()
+
+ sr := tr.MakeStreamReader()
+ go func() {
+ time.Sleep(100 * time.Millisecond)
+ sr.Close()
+ }()
+
+ for i := 0; i < 200; i += 1 {
+ go func() {
+ br1 := tr.MakeStreamReader()
+ defer br1.Close()
+
+ p := make([]byte, 3)
+ n, err := br1.Read(p)
+ c.Check(n, Equals, 3)
+ c.Check(p[0:3], DeepEquals, []byte("foo"))
+
+ n, err = br1.Read(p)
+ c.Check(n, Equals, 3)
+ c.Check(p[0:3], DeepEquals, []byte("bar"))
+
+ n, err = br1.Read(p)
+ c.Check(n, Equals, 3)
+ c.Check(p[0:3], DeepEquals, []byte("baz"))
+
+ n, err = br1.Read(p)
+ c.Check(n, Equals, 0)
+ c.Check(err, Equals, io.EOF)
+ }()
+ }
+
+ writer.Write([]byte("foo"))
+ writer.Write([]byte("bar"))
+ writer.Write([]byte("baz"))
+ writer.Close()
+}
--- /dev/null
+/* Internal implementation of AsyncStream.
+Outline of operation:
+
+The kernel is the transfer() goroutine. It manages concurrent reads and
+appends to the "body" slice. "body" is a slice of "source_buffer" that
+represents the segment of the buffer that is already filled in and available
+for reading.
+
+To fill in the buffer, transfer() starts the readIntoBuffer() goroutine to read
+from the io.Reader source directly into source_buffer. Each read goes into a
+slice of buffer which spans the section immediately following the end of the
+current "body". Each time a Read completes, a slice representing the the
+section just filled in (or any read errors/EOF) is sent over the "slices"
+channel back to the transfer() function.
+
+Meanwhile, the transfer() function selects() on two channels, the "requests"
+channel and the "slices" channel.
+
+When a message is recieved on the "slices" channel, this means the a new
+section of the buffer has data, or an error is signaled. Since the data has
+been read directly into the source_buffer, it is able to simply increases the
+size of the body slice to encompass the newly filled in section. Then any
+pending reads are serviced with handleReadRequest (described below).
+
+When a message is recieved on the "requests" channel, it means a StreamReader
+wants access to a slice of the buffer. This is passed to handleReadRequest().
+
+The handleReadRequest() function takes a sliceRequest consisting of a buffer
+offset, maximum size, and channel to send the response. If there was an error
+reported from the source reader, it is returned. If the offset is less than
+the size of the body, the request can proceed, and it sends a body slice
+spanning the segment from offset to min(offset+maxsize, end of the body). If
+source reader status is EOF (done filling the buffer) and the read request
+offset is beyond end of the body, it responds with EOF. Otherwise, the read
+request is for a slice beyond the current size of "body" but we expect the body
+to expand as more data is added, so the request gets added to a wait list.
+
+The transfer() runs until the requests channel is closed by AsyncStream.Close()
+
+To track readers, streamer uses the readersMonitor() goroutine. This goroutine
+chooses which channels to receive from based on the number of outstanding
+readers. When a new reader is created, it sends a message on the add_reader
+channel. If the number of readers is already at MAX_READERS, this blocks the
+sender until an existing reader is closed. When a reader is closed, it sends a
+message on the subtract_reader channel. Finally, when AsyncStream.Close() is
+called, it sends a message on the wait_zero_readers channel, which will block
+the sender unless there are zero readers and it is safe to shut down the
+AsyncStream.
+*/
+
+package streamer
+
+import (
+ "io"
+)
+
+const MAX_READERS = 100
+
+// A slice passed from readIntoBuffer() to transfer()
+type nextSlice struct {
+ slice []byte
+ reader_error error
+}
+
+// A read request to the Transfer() function
+type sliceRequest struct {
+ offset int
+ maxsize int
+ result chan<- sliceResult
+}
+
+// A read result from the Transfer() function
+type sliceResult struct {
+ slice []byte
+ err error
+}
+
+// Supports writing into a buffer
+type bufferWriter struct {
+ buf []byte
+ ptr int
+}
+
+// Copy p into this.buf, increment pointer and return number of bytes read.
+func (this *bufferWriter) Write(p []byte) (n int, err error) {
+ n = copy(this.buf[this.ptr:], p)
+ this.ptr += n
+ return n, nil
+}
+
+// Read repeatedly from the reader and write sequentially into the specified
+// buffer, and report each read to channel 'c'. Completes when Reader 'r'
+// reports on the error channel and closes channel 'c'.
+func readIntoBuffer(buffer []byte, r io.Reader, slices chan<- nextSlice) {
+ defer close(slices)
+
+ if writeto, ok := r.(io.WriterTo); ok {
+ n, err := writeto.WriteTo(&bufferWriter{buffer, 0})
+ if err != nil {
+ slices <- nextSlice{nil, err}
+ } else {
+ slices <- nextSlice{buffer[:n], nil}
+ slices <- nextSlice{nil, io.EOF}
+ }
+ return
+ } else {
+ // Initially entire buffer is available
+ ptr := buffer[:]
+ for {
+ var n int
+ var err error
+ if len(ptr) > 0 {
+ const readblock = 64 * 1024
+ // Read 64KiB into the next part of the buffer
+ if len(ptr) > readblock {
+ n, err = r.Read(ptr[:readblock])
+ } else {
+ n, err = r.Read(ptr)
+ }
+ } else {
+ // Ran out of buffer space, try reading one more byte
+ var b [1]byte
+ n, err = r.Read(b[:])
+
+ if n > 0 {
+ // Reader has more data but we have nowhere to
+ // put it, so we're stuffed
+ slices <- nextSlice{nil, io.ErrShortBuffer}
+ } else {
+ // Return some other error (hopefully EOF)
+ slices <- nextSlice{nil, err}
+ }
+ return
+ }
+
+ // End on error (includes EOF)
+ if err != nil {
+ slices <- nextSlice{nil, err}
+ return
+ }
+
+ if n > 0 {
+ // Make a slice with the contents of the read
+ slices <- nextSlice{ptr[:n], nil}
+
+ // Adjust the scratch space slice
+ ptr = ptr[n:]
+ }
+ }
+ }
+}
+
+// Handle a read request. Returns true if a response was sent, and false if
+// the request should be queued.
+func handleReadRequest(req sliceRequest, body []byte, reader_status error) bool {
+ if (reader_status != nil) && (reader_status != io.EOF) {
+ req.result <- sliceResult{nil, reader_status}
+ return true
+ } else if req.offset < len(body) {
+ var end int
+ if req.offset+req.maxsize < len(body) {
+ end = req.offset + req.maxsize
+ } else {
+ end = len(body)
+ }
+ req.result <- sliceResult{body[req.offset:end], nil}
+ return true
+ } else if (reader_status == io.EOF) && (req.offset >= len(body)) {
+ req.result <- sliceResult{nil, io.EOF}
+ return true
+ } else {
+ return false
+ }
+}
+
+// Mediates between reads and appends.
+// If 'source_reader' is not nil, reads data from 'source_reader' and stores it
+// in the provided buffer. Otherwise, use the contents of 'buffer' as is.
+// Accepts read requests on the buffer on the 'requests' channel. Completes
+// when 'requests' channel is closed.
+func (this *AsyncStream) transfer(source_reader io.Reader) {
+ source_buffer := this.buffer
+ requests := this.requests
+
+ // currently buffered data
+ var body []byte
+
+ // for receiving slices from readIntoBuffer
+ var slices chan nextSlice = nil
+
+ // indicates the status of the underlying reader
+ var reader_status error = nil
+
+ if source_reader != nil {
+ // 'body' is the buffer slice representing the body content read so far
+ body = source_buffer[:0]
+
+ // used to communicate slices of the buffer as they are
+ // readIntoBuffer will close 'slices' when it is done with it
+ slices = make(chan nextSlice)
+
+ // Spin it off
+ go readIntoBuffer(source_buffer, source_reader, slices)
+ } else {
+ // use the whole buffer
+ body = source_buffer[:]
+
+ // buffer is complete
+ reader_status = io.EOF
+ }
+
+ pending_requests := make([]sliceRequest, 0)
+
+ for {
+ select {
+ case req, valid := <-requests:
+ // Handle a buffer read request
+ if valid {
+ if !handleReadRequest(req, body, reader_status) {
+ pending_requests = append(pending_requests, req)
+ }
+ } else {
+ // closed 'requests' channel indicates we're done
+ return
+ }
+
+ case bk, valid := <-slices:
+ // Got a new slice from the reader
+ if valid {
+ reader_status = bk.reader_error
+
+ if bk.slice != nil {
+ // adjust body bounds now that another slice has been read
+ body = source_buffer[0 : len(body)+len(bk.slice)]
+ }
+
+ // handle pending reads
+ n := 0
+ for n < len(pending_requests) {
+ if handleReadRequest(pending_requests[n], body, reader_status) {
+ // move the element from the back of the slice to
+ // position 'n', then shorten the slice by one element
+ pending_requests[n] = pending_requests[len(pending_requests)-1]
+ pending_requests = pending_requests[0 : len(pending_requests)-1]
+ } else {
+
+ // Request wasn't handled, so keep it in the request slice
+ n += 1
+ }
+ }
+ } else {
+ if reader_status == io.EOF {
+ // no more reads expected, so this is ok
+ } else {
+ // slices channel closed without signaling EOF
+ reader_status = io.ErrUnexpectedEOF
+ }
+ slices = nil
+ }
+ }
+ }
+}
+
+func (this *AsyncStream) readersMonitor() {
+ var readers int = 0
+
+ for {
+ if readers == 0 {
+ select {
+ case _, ok := <-this.wait_zero_readers:
+ if ok {
+ // nothing, just implicitly unblock the sender
+ } else {
+ return
+ }
+ case _, ok := <-this.add_reader:
+ if ok {
+ readers += 1
+ } else {
+ return
+ }
+ }
+ } else if readers > 0 && readers < MAX_READERS {
+ select {
+ case _, ok := <-this.add_reader:
+ if ok {
+ readers += 1
+ } else {
+ return
+ }
+
+ case _, ok := <-this.subtract_reader:
+ if ok {
+ readers -= 1
+ } else {
+ return
+ }
+ }
+ } else if readers == MAX_READERS {
+ _, ok := <-this.subtract_reader
+ if ok {
+ readers -= 1
+ } else {
+ return
+ }
+ }
+ }
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+ <classpathentry including="**/*.java" kind="src" output="target/test-classes" path="src/test/java"/>
+ <classpathentry including="**/*.java" kind="src" path="src/main/java"/>
+ <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
+ <classpathentry kind="var" path="M2_REPO/com/google/apis/google-api-services-discovery/v1-rev42-1.18.0-rc/google-api-services-discovery-v1-rev42-1.18.0-rc.jar"/>
+ <classpathentry kind="var" path="M2_REPO/com/google/api-client/google-api-client/1.18.0-rc/google-api-client-1.18.0-rc.jar"/>
+ <classpathentry kind="var" path="M2_REPO/com/google/http-client/google-http-client/1.18.0-rc/google-http-client-1.18.0-rc.jar"/>
+ <classpathentry kind="var" path="M2_REPO/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar"/>
+ <classpathentry kind="var" path="M2_REPO/org/apache/httpcomponents/httpclient/4.0.1/httpclient-4.0.1.jar"/>
+ <classpathentry kind="var" path="M2_REPO/org/apache/httpcomponents/httpcore/4.0.1/httpcore-4.0.1.jar"/>
+ <classpathentry kind="var" path="M2_REPO/commons-logging/commons-logging/1.1.1/commons-logging-1.1.1.jar"/>
+ <classpathentry kind="var" path="M2_REPO/commons-codec/commons-codec/1.3/commons-codec-1.3.jar"/>
+ <classpathentry kind="var" path="M2_REPO/com/google/http-client/google-http-client-jackson2/1.18.0-rc/google-http-client-jackson2-1.18.0-rc.jar"/>
+ <classpathentry kind="var" path="M2_REPO/com/fasterxml/jackson/core/jackson-core/2.1.3/jackson-core-2.1.3.jar"/>
+ <classpathentry kind="var" path="M2_REPO/com/google/guava/guava/r05/guava-r05.jar"/>
+ <classpathentry kind="var" path="M2_REPO/log4j/log4j/1.2.16/log4j-1.2.16.jar"/>
+ <classpathentry kind="var" path="M2_REPO/com/googlecode/json-simple/json-simple/1.1.1/json-simple-1.1.1.jar"/>
+ <classpathentry kind="var" path="M2_REPO/junit/junit/4.8.1/junit-4.8.1.jar"/>
+ <classpathentry kind="output" path="target/classes"/>
+</classpath>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+ <name>java</name>
+ <comment>NO_M2ECLIPSE_SUPPORT: Project files created with the maven-eclipse-plugin are not supported in M2Eclipse.</comment>
+ <projects/>
+ <buildSpec>
+ <buildCommand>
+ <name>org.eclipse.jdt.core.javabuilder</name>
+ </buildCommand>
+ </buildSpec>
+ <natures>
+ <nature>org.eclipse.jdt.core.javanature</nature>
+ </natures>
+</projectDescription>
\ No newline at end of file
--- /dev/null
+#Mon Apr 28 10:33:40 EDT 2014
+org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
+eclipse.preferences.version=1
+org.eclipse.jdt.core.compiler.source=1.6
+org.eclipse.jdt.core.compiler.compliance=1.6
--- /dev/null
+/**
+ * This Sample test program is useful in getting started with working with Arvados Java SDK.
+ * @author radhika
+ *
+ */
+
+import org.arvados.sdk.java.Arvados;
+
+import java.io.File;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+public class ArvadosSDKJavaExample {
+ /** Make sure the following environment variables are set before using Arvados:
+ * ARVADOS_API_TOKEN, ARVADOS_API_HOST and ARVADOS_API_HOST_INSECURE
+ * Set ARVADOS_API_HOST_INSECURE to true if you are using self-singed
+ * certificates in development and want to bypass certificate validations.
+ *
+ * If you are not using env variables, you can pass them to Arvados constructor.
+ *
+ * Please refer to http://doc.arvados.org/api/index.html for a complete list
+ * of the available API methods.
+ */
+ public static void main(String[] args) throws Exception {
+ String apiName = "arvados";
+ String apiVersion = "v1";
+
+ Arvados arv = new Arvados(apiName, apiVersion);
+
+ // Make a users list call. Here list on users is the method being invoked.
+ // Expect a Map containing the list of users as the response.
+ System.out.println("Making an arvados users.list api call");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Map response = arv.call("users", "list", params);
+ System.out.println("Arvados users.list:\n");
+ printResponse(response);
+
+ // get uuid of the first user from the response
+ List items = (List)response.get("items");
+
+ Map firstUser = (Map)items.get(0);
+ String userUuid = (String)firstUser.get("uuid");
+
+ // Make a users get call on the uuid obtained above
+ System.out.println("\n\n\nMaking a users.get call for " + userUuid);
+ params = new HashMap<String, Object>();
+ params.put("uuid", userUuid);
+ response = arv.call("users", "get", params);
+ System.out.println("Arvados users.get:\n");
+ printResponse(response);
+
+ // Make a pipeline_templates list call
+ System.out.println("\n\n\nMaking a pipeline_templates.list call.");
+
+ params = new HashMap<String, Object>();
+ response = arv.call("pipeline_templates", "list", params);
+
+ System.out.println("Arvados pipelinetempates.list:\n");
+ printResponse(response);
+ }
+
+ private static void printResponse(Map response){
+ Set<Entry<String,Object>> entrySet = (Set<Entry<String,Object>>)response.entrySet();
+ for (Map.Entry<String, Object> entry : entrySet) {
+ if ("items".equals(entry.getKey())) {
+ List items = (List)entry.getValue();
+ for (Object item : items) {
+ System.out.println(" " + item);
+ }
+ } else {
+ System.out.println(entry.getKey() + " = " + entry.getValue());
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+/**
+ * This Sample test program is useful in getting started with using Arvados Java SDK.
+ * This program creates an Arvados instance using the configured environment variables.
+ * It then provides a prompt to input method name and input parameters.
+ * The program them invokes the API server to execute the specified method.
+ *
+ * @author radhika
+ */
+
+import org.arvados.sdk.java.Arvados;
+
+import java.io.File;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+
+public class ArvadosSDKJavaExampleWithPrompt {
+ /**
+ * Make sure the following environment variables are set before using Arvados:
+ * ARVADOS_API_TOKEN, ARVADOS_API_HOST and ARVADOS_API_HOST_INSECURE Set
+ * ARVADOS_API_HOST_INSECURE to true if you are using self-singed certificates
+ * in development and want to bypass certificate validations.
+ *
+ * Please refer to http://doc.arvados.org/api/index.html for a complete list
+ * of the available API methods.
+ */
+ public static void main(String[] args) throws Exception {
+ String apiName = "arvados";
+ String apiVersion = "v1";
+
+ System.out.print("Welcome to Arvados Java SDK.");
+ System.out.println("\nYou can use this example to call API methods interactively.");
+ System.out.println("\nPlease refer to http://doc.arvados.org/api/index.html for api documentation");
+ System.out.println("\nTo make the calls, enter input data at the prompt.");
+ System.out.println("When entering parameters, you may enter a simple string or a well-formed json.");
+ System.out.println("For example to get a user you may enter: user, zzzzz-12345-67890");
+ System.out.println("Or to filter links, you may enter: filters, [[ \"name\", \"=\", \"can_manage\"]]");
+
+ System.out.println("\nEnter ^C when you want to quit");
+
+ // use configured env variables for API TOKEN, HOST and HOST_INSECURE
+ Arvados arv = new Arvados(apiName, apiVersion);
+
+ while (true) {
+ try {
+ // prompt for resource
+ System.out.println("\n\nEnter Resource name (for example users)");
+ System.out.println("\nAvailable resources are: " + arv.getAvailableResourses());
+ System.out.print("\n>>> ");
+
+ // read resource name
+ BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
+ String resourceName = in.readLine().trim();
+ if ("".equals(resourceName)) {
+ throw (new Exception("No resource name entered"));
+ }
+ // read method name
+ System.out.println("\nEnter method name (for example get)");
+ System.out.println("\nAvailable methods are: " + arv.getAvailableMethodsForResourse(resourceName));
+ System.out.print("\n>>> ");
+ String methodName = in.readLine().trim();
+ if ("".equals(methodName)) {
+ throw (new Exception("No method name entered"));
+ }
+
+ // read method parameters
+ System.out.println("\nEnter parameter name, value (for example uuid, uuid-value)");
+ System.out.println("\nAvailable parameters are: " +
+ arv.getAvailableParametersForMethod(resourceName, methodName));
+
+ System.out.print("\n>>> ");
+ Map paramsMap = new HashMap();
+ String param = "";
+ try {
+ do {
+ param = in.readLine();
+ if (param.isEmpty())
+ break;
+ int index = param.indexOf(","); // first comma
+ String paramName = param.substring(0, index);
+ String paramValue = param.substring(index+1);
+ paramsMap.put(paramName.trim(), paramValue.trim());
+
+ System.out.println("\nEnter parameter name, value (for example uuid, uuid-value)");
+ System.out.print("\n>>> ");
+ } while (!param.isEmpty());
+ } catch (Exception e) {
+ System.out.println (e.getMessage());
+ System.out.println ("\nSet up a new call");
+ continue;
+ }
+
+ // Make a "call" for the given resource name and method name
+ try {
+ System.out.println ("Making a call for " + resourceName + " " + methodName);
+ Map response = arv.call(resourceName, methodName, paramsMap);
+
+ Set<Entry<String,Object>> entrySet = (Set<Entry<String,Object>>)response.entrySet();
+ for (Map.Entry<String, Object> entry : entrySet) {
+ if ("items".equals(entry.getKey())) {
+ List items = (List)entry.getValue();
+ for (Object item : items) {
+ System.out.println(" " + item);
+ }
+ } else {
+ System.out.println(entry.getKey() + " = " + entry.getValue());
+ }
+ }
+ } catch (Exception e){
+ System.out.println (e.getMessage());
+ System.out.println ("\nSet up a new call");
+ }
+ } catch (Exception e) {
+ System.out.println (e.getMessage());
+ System.out.println ("\nSet up a new call");
+ }
+ }
+ }
+}
--- /dev/null
+Welcome to Arvados Java SDK.
+
+Please refer to http://doc.arvados.org/sdk/java/index.html to get started
+ with Arvados Java SDK.
--- /dev/null
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.arvados.sdk.java</groupId>
+ <artifactId>java</artifactId>
+ <packaging>jar</packaging>
+ <version>1.0-SNAPSHOT</version>
+ <name>java</name>
+ <url>http://maven.apache.org</url>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.google.apis</groupId>
+ <artifactId>google-api-services-discovery</artifactId>
+ <version>v1-rev42-1.18.0-rc</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.api-client</groupId>
+ <artifactId>google-api-client</artifactId>
+ <version>1.18.0-rc</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.http-client</groupId>
+ <artifactId>google-http-client-jackson2</artifactId>
+ <version>1.18.0-rc</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ <version>r05</version>
+ </dependency>
+ <dependency>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ <version>1.2.16</version>
+ </dependency>
+ <dependency>
+ <groupId>com.googlecode.json-simple</groupId>
+ <artifactId>json-simple</artifactId>
+ <version>1.1.1</version>
+ </dependency>
+
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>4.8.1</version>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <finalName>arvados-sdk-1.0</finalName>
+
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.1</version>
+ <configuration>
+ <source>1.6</source>
+ <target>1.6</target>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <executions>
+ <execution>
+ <goals>
+ <goal>attached</goal>
+ </goals>
+ <phase>package</phase>
+ <configuration>
+ <descriptorRefs>
+ <descriptorRef>jar-with-dependencies</descriptorRef>
+ </descriptorRefs>
+ <archive>
+ <manifest>
+ <mainClass>org.arvados.sdk.Arvados</mainClass>
+ </manifest>
+ <manifestEntries>
+ <!--<Premain-Class>Your.agent.class</Premain-Class> <Agent-Class>Your.agent.class</Agent-Class> -->
+ <Can-Redefine-Classes>true</Can-Redefine-Classes>
+ <Can-Retransform-Classes>true</Can-Retransform-Classes>
+ </manifestEntries>
+ </archive>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ <resources>
+ <resource>
+ <directory>src/main/resources</directory>
+ <targetPath>${basedir}/target/classes</targetPath>
+ <includes>
+ <include>log4j.properties</include>
+ </includes>
+ <filtering>true</filtering>
+ </resource>
+ <resource>
+ <directory>src/test/resources</directory>
+ <filtering>true</filtering>
+ </resource>
+ </resources>
+ </build>
+</project>
--- /dev/null
+package org.arvados.sdk.java;
+
+import com.google.api.client.http.javanet.*;
+import com.google.api.client.http.ByteArrayContent;
+import com.google.api.client.http.GenericUrl;
+import com.google.api.client.http.HttpContent;
+import com.google.api.client.http.HttpRequest;
+import com.google.api.client.http.HttpRequestFactory;
+import com.google.api.client.http.HttpTransport;
+import com.google.api.client.http.UriTemplate;
+import com.google.api.client.json.JsonFactory;
+import com.google.api.client.json.jackson2.JacksonFactory;
+import com.google.api.client.util.Maps;
+import com.google.api.services.discovery.Discovery;
+import com.google.api.services.discovery.model.JsonSchema;
+import com.google.api.services.discovery.model.RestDescription;
+import com.google.api.services.discovery.model.RestMethod;
+import com.google.api.services.discovery.model.RestMethod.Request;
+import com.google.api.services.discovery.model.RestResource;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.log4j.Logger;
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+
+/**
+ * This class provides a java SDK interface to Arvados API server.
+ *
+ * Please refer to http://doc.arvados.org/api/ to learn about the
+ * various resources and methods exposed by the API server.
+ *
+ * @author radhika
+ */
+public class Arvados {
+ // HttpTransport and JsonFactory are thread-safe. So, use global instances.
+ private HttpTransport httpTransport;
+ private final JsonFactory jsonFactory = JacksonFactory.getDefaultInstance();
+
+ private String arvadosApiToken;
+ private String arvadosApiHost;
+ private boolean arvadosApiHostInsecure;
+
+ private String arvadosRootUrl;
+
+ private static final Logger logger = Logger.getLogger(Arvados.class);
+
+ // Get it once and reuse on the call requests
+ RestDescription restDescription = null;
+ String apiName = null;
+ String apiVersion = null;
+
+ public Arvados (String apiName, String apiVersion) throws Exception {
+ this (apiName, apiVersion, null, null, null);
+ }
+
+ public Arvados (String apiName, String apiVersion, String token,
+ String host, String hostInsecure) throws Exception {
+ this.apiName = apiName;
+ this.apiVersion = apiVersion;
+
+ // Read needed environmental variables if they are not passed
+ if (token != null) {
+ arvadosApiToken = token;
+ } else {
+ arvadosApiToken = System.getenv().get("ARVADOS_API_TOKEN");
+ if (arvadosApiToken == null) {
+ throw new Exception("Missing environment variable: ARVADOS_API_TOKEN");
+ }
+ }
+
+ if (host != null) {
+ arvadosApiHost = host;
+ } else {
+ arvadosApiHost = System.getenv().get("ARVADOS_API_HOST");
+ if (arvadosApiHost == null) {
+ throw new Exception("Missing environment variable: ARVADOS_API_HOST");
+ }
+ }
+ arvadosRootUrl = "https://" + arvadosApiHost;
+ arvadosRootUrl += (arvadosApiHost.endsWith("/")) ? "" : "/";
+
+ if (hostInsecure != null) {
+ arvadosApiHostInsecure = Boolean.valueOf(hostInsecure);
+ } else {
+ arvadosApiHostInsecure =
+ "true".equals(System.getenv().get("ARVADOS_API_HOST_INSECURE")) ? true : false;
+ }
+
+ // Create HTTP_TRANSPORT object
+ NetHttpTransport.Builder builder = new NetHttpTransport.Builder();
+ if (arvadosApiHostInsecure) {
+ builder.doNotValidateCertificate();
+ }
+ httpTransport = builder.build();
+
+ // initialize rest description
+ restDescription = loadArvadosApi();
+ }
+
+ /**
+ * Make a call to API server with the provide call information.
+ * @param resourceName
+ * @param methodName
+ * @param paramsMap
+ * @return Map
+ * @throws Exception
+ */
+ public Map call(String resourceName, String methodName,
+ Map<String, Object> paramsMap) throws Exception {
+ RestMethod method = getMatchingMethod(resourceName, methodName);
+
+ HashMap<String, Object> parameters = loadParameters(paramsMap, method);
+
+ GenericUrl url = new GenericUrl(UriTemplate.expand(
+ arvadosRootUrl + restDescription.getBasePath() + method.getPath(),
+ parameters, true));
+
+ try {
+ // construct the request
+ HttpRequestFactory requestFactory;
+ requestFactory = httpTransport.createRequestFactory();
+
+ // possibly required content
+ HttpContent content = null;
+
+ if (!method.getHttpMethod().equals("GET") &&
+ !method.getHttpMethod().equals("DELETE")) {
+ String objectName = resourceName.substring(0, resourceName.length()-1);
+ Object requestBody = paramsMap.get(objectName);
+ if (requestBody == null) {
+ error("POST method requires content object " + objectName);
+ }
+
+ content = new ByteArrayContent("application/json",((String)requestBody).getBytes());
+ }
+
+ HttpRequest request =
+ requestFactory.buildRequest(method.getHttpMethod(), url, content);
+
+ // make the request
+ List<String> authHeader = new ArrayList<String>();
+ authHeader.add("OAuth2 " + arvadosApiToken);
+ request.getHeaders().put("Authorization", authHeader);
+ String response = request.execute().parseAsString();
+
+ Map responseMap = jsonFactory.createJsonParser(response).parse(HashMap.class);
+
+ logger.debug(responseMap);
+
+ return responseMap;
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw e;
+ }
+ }
+
+ /**
+ * Get all supported resources by the API
+ * @return Set
+ */
+ public Set<String> getAvailableResourses() {
+ return (restDescription.getResources().keySet());
+ }
+
+ /**
+ * Get all supported method names for the given resource
+ * @param resourceName
+ * @return Set
+ * @throws Exception
+ */
+ public Set<String> getAvailableMethodsForResourse(String resourceName)
+ throws Exception {
+ Map<String, RestMethod> methodMap = getMatchingMethodMap (resourceName);
+ return (methodMap.keySet());
+ }
+
+ /**
+ * Get the parameters for the method in the resource sought.
+ * @param resourceName
+ * @param methodName
+ * @return Set
+ * @throws Exception
+ */
+ public Map<String,List<String>> getAvailableParametersForMethod(String resourceName, String methodName)
+ throws Exception {
+ RestMethod method = getMatchingMethod(resourceName, methodName);
+ Map<String, List<String>> parameters = new HashMap<String, List<String>>();
+ List<String> requiredParameters = new ArrayList<String>();
+ List<String> optionalParameters = new ArrayList<String>();
+ parameters.put ("required", requiredParameters);
+ parameters.put("optional", optionalParameters);
+
+ try {
+ // get any request parameters
+ Request request = method.getRequest();
+ if (request != null) {
+ Object required = request.get("required");
+ Object requestProperties = request.get("properties");
+ if (requestProperties != null) {
+ if (requestProperties instanceof Map) {
+ Map properties = (Map)requestProperties;
+ Set<String> propertyKeys = properties.keySet();
+ for (String property : propertyKeys) {
+ if (Boolean.TRUE.equals(required)) {
+ requiredParameters.add(property);
+ } else {
+ optionalParameters.add(property);
+ }
+ }
+ }
+ }
+ }
+
+ // get other listed parameters
+ Map<String,JsonSchema> methodParameters = method.getParameters();
+ for (Map.Entry<String, JsonSchema> entry : methodParameters.entrySet()) {
+ if (Boolean.TRUE.equals(entry.getValue().getRequired())) {
+ requiredParameters.add(entry.getKey());
+ } else {
+ optionalParameters.add(entry.getKey());
+ }
+ }
+ } catch (Exception e){
+ logger.error(e);
+ }
+
+ return parameters;
+ }
+
+ private HashMap<String, Object> loadParameters(Map<String, Object> paramsMap,
+ RestMethod method) throws Exception {
+ HashMap<String, Object> parameters = Maps.newHashMap();
+
+ // required parameters
+ if (method.getParameterOrder() != null) {
+ for (String parameterName : method.getParameterOrder()) {
+ JsonSchema parameter = method.getParameters().get(parameterName);
+ if (Boolean.TRUE.equals(parameter.getRequired())) {
+ Object parameterValue = paramsMap.get(parameterName);
+ if (parameterValue == null) {
+ error("missing required parameter: " + parameter);
+ } else {
+ putParameter(null, parameters, parameterName, parameter, parameterValue);
+ }
+ }
+ }
+ }
+
+ for (Map.Entry<String, Object> entry : paramsMap.entrySet()) {
+ String parameterName = entry.getKey();
+ Object parameterValue = entry.getValue();
+
+ if (parameterName.equals("contentType")) {
+ if (method.getHttpMethod().equals("GET") || method.getHttpMethod().equals("DELETE")) {
+ error("HTTP content type cannot be specified for this method: " + parameterName);
+ }
+ } else {
+ JsonSchema parameter = null;
+ if (restDescription.getParameters() != null) {
+ parameter = restDescription.getParameters().get(parameterName);
+ }
+ if (parameter == null && method.getParameters() != null) {
+ parameter = method.getParameters().get(parameterName);
+ }
+ putParameter(parameterName, parameters, parameterName, parameter, parameterValue);
+ }
+ }
+
+ return parameters;
+ }
+
+ private RestMethod getMatchingMethod(String resourceName, String methodName)
+ throws Exception {
+ Map<String, RestMethod> methodMap = getMatchingMethodMap(resourceName);
+
+ if (methodName == null) {
+ error("missing method name");
+ }
+
+ RestMethod method =
+ methodMap == null ? null : methodMap.get(methodName);
+ if (method == null) {
+ error("method not found: ");
+ }
+
+ return method;
+ }
+
+ private Map<String, RestMethod> getMatchingMethodMap(String resourceName)
+ throws Exception {
+ if (resourceName == null) {
+ error("missing resource name");
+ }
+
+ Map<String, RestMethod> methodMap = null;
+ Map<String, RestResource> resources = restDescription.getResources();
+ RestResource resource = resources.get(resourceName);
+ if (resource == null) {
+ error("resource not found");
+ }
+ methodMap = resource.getMethods();
+ return methodMap;
+ }
+
+ /**
+ * Not thread-safe. So, create for each request.
+ * @param apiName
+ * @param apiVersion
+ * @return
+ * @throws Exception
+ */
+ private RestDescription loadArvadosApi()
+ throws Exception {
+ try {
+ Discovery discovery;
+
+ Discovery.Builder discoveryBuilder =
+ new Discovery.Builder(httpTransport, jsonFactory, null);
+
+ discoveryBuilder.setRootUrl(arvadosRootUrl);
+ discoveryBuilder.setApplicationName(apiName);
+
+ discovery = discoveryBuilder.build();
+
+ return discovery.apis().getRest(apiName, apiVersion).execute();
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw e;
+ }
+ }
+
+ /**
+ * Convert the input parameter into its equivalent json string.
+ * Add this json string value to the parameters map to be sent to server.
+ * @param argName
+ * @param parameters
+ * @param parameterName
+ * @param parameter
+ * @param parameterValue
+ * @throws Exception
+ */
+ private void putParameter(String argName, Map<String, Object> parameters,
+ String parameterName, JsonSchema parameter, Object parameterValue)
+ throws Exception {
+ Object value = parameterValue;
+ if (parameter != null) {
+ if ("boolean".equals(parameter.getType())) {
+ value = Boolean.valueOf(parameterValue.toString());
+ } else if ("number".equals(parameter.getType())) {
+ value = new BigDecimal(parameterValue.toString());
+ } else if ("integer".equals(parameter.getType())) {
+ value = new BigInteger(parameterValue.toString());
+ } else if ("float".equals(parameter.getType())) {
+ value = new BigDecimal(parameterValue.toString());
+ } else if ("Java.util.Calendar".equals(parameter.getType())) {
+ value = new BigDecimal(parameterValue.toString());
+ } else if (("array".equals(parameter.getType())) ||
+ ("Array".equals(parameter.getType()))) {
+ if (parameterValue.getClass().isArray()){
+ value = getJsonValueFromArrayType(parameterValue);
+ } else if (List.class.isAssignableFrom(parameterValue.getClass())) {
+ value = getJsonValueFromListType(parameterValue);
+ }
+ } else if (("Hash".equals(parameter.getType())) ||
+ ("hash".equals(parameter.getType()))) {
+ value = getJsonValueFromMapType(parameterValue);
+ } else {
+ if (parameterValue.getClass().isArray()){
+ value = getJsonValueFromArrayType(parameterValue);
+ } else if (List.class.isAssignableFrom(parameterValue.getClass())) {
+ value = getJsonValueFromListType(parameterValue);
+ } else if (Map.class.isAssignableFrom(parameterValue.getClass())) {
+ value = getJsonValueFromMapType(parameterValue);
+ }
+ }
+ }
+
+ parameters.put(parameterName, value);
+ }
+
+ /**
+ * Convert the given input array into json string before sending to server.
+ * @param parameterValue
+ * @return
+ */
+ private String getJsonValueFromArrayType (Object parameterValue) {
+ String arrayStr = Arrays.deepToString((Object[])parameterValue);
+
+ // we can expect either an array of array objects or an array of objects
+ if (arrayStr.startsWith("[[") && arrayStr.endsWith("]]")) {
+ Object[][] array = new Object[1][];
+ arrayStr = arrayStr.substring(2, arrayStr.length()-2);
+ String jsonStr = getJsonStringForArrayStr(arrayStr);
+ String value = "[" + jsonStr + "]";
+ return value;
+ } else {
+ arrayStr = arrayStr.substring(1, arrayStr.length()-1);
+ return (getJsonStringForArrayStr(arrayStr));
+ }
+ }
+
+ private String getJsonStringForArrayStr(String arrayStr) {
+ Object[] array = arrayStr.split(",");
+ Object[] trimmedArray = new Object[array.length];
+ for (int i=0; i<array.length; i++){
+ trimmedArray[i] = array[i].toString().trim();
+ }
+ String value = JSONArray.toJSONString(Arrays.asList(trimmedArray));
+ return value;
+ }
+
+ /**
+ * Convert the given input List into json string before sending to server.
+ * @param parameterValue
+ * @return
+ */
+ private String getJsonValueFromListType (Object parameterValue) {
+ List paramList = (List)parameterValue;
+ Object[] array = new Object[paramList.size()];
+ Arrays.deepToString(paramList.toArray(array));
+ return (getJsonValueFromArrayType(array));
+ }
+
+ /**
+ * Convert the given input map into json string before sending to server.
+ * @param parameterValue
+ * @return
+ */
+ private String getJsonValueFromMapType (Object parameterValue) {
+ JSONObject json = new JSONObject((Map)parameterValue);
+ return json.toString();
+ }
+
+ private static void error(String detail) throws Exception {
+ String errorDetail = "ERROR: " + detail;
+
+ logger.debug(errorDetail);
+ throw new Exception(errorDetail);
+ }
+
+ public static void main(String[] args){
+ System.out.println("Welcome to Arvados Java SDK.");
+ System.out.println("Please refer to http://doc.arvados.org/sdk/java/index.html to get started with the the SDK.");
+ }
+
+}
--- /dev/null
+package org.arvados.sdk.java;
+
+import com.google.api.client.util.Lists;
+import com.google.api.client.util.Sets;
+
+import java.util.ArrayList;
+import java.util.SortedSet;
+
+public class MethodDetails implements Comparable<MethodDetails> {
+ String name;
+ ArrayList<String> requiredParameters = Lists.newArrayList();
+ SortedSet<String> optionalParameters = Sets.newTreeSet();
+ boolean hasContent;
+
+ @Override
+ public int compareTo(MethodDetails o) {
+ if (o == this) {
+ return 0;
+ }
+ return name.compareTo(o.name);
+ }
+}
\ No newline at end of file
--- /dev/null
+# To change log location, change log4j.appender.fileAppender.File
+
+log4j.rootLogger=DEBUG, fileAppender
+
+log4j.appender.fileAppender=org.apache.log4j.RollingFileAppender
+log4j.appender.fileAppender.File=${basedir}/log/arvados_sdk_java.log
+log4j.appender.fileAppender.Append=true
+log4j.appender.file.MaxFileSize=10MB
+log4j.appender.file.MaxBackupIndex=10
+log4j.appender.fileAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.fileAppender.layout.ConversionPattern=[%d] %-5p %c %L %x - %m%n
--- /dev/null
+package org.arvados.sdk.java;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.math.BigDecimal;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.GregorianCalendar;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/**
+ * Unit test for Arvados.
+ */
+public class ArvadosTest {
+
+ /**
+ * Test users.list api
+ * @throws Exception
+ */
+ @Test
+ public void testCallUsersList() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Map response = arv.call("users", "list", params);
+ assertEquals("Expected kind to be users.list", "arvados#userList", response.get("kind"));
+
+ List items = (List)response.get("items");
+ assertNotNull("expected users list items", items);
+ assertTrue("expected at least one item in users list", items.size()>0);
+
+ Map firstUser = (Map)items.get(0);
+ assertNotNull ("Expcted at least one user", firstUser);
+
+ assertEquals("Expected kind to be user", "arvados#user", firstUser.get("kind"));
+ assertNotNull("Expected uuid for first user", firstUser.get("uuid"));
+ }
+
+ /**
+ * Test users.get <uuid> api
+ * @throws Exception
+ */
+ @Test
+ public void testCallUsersGet() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ // call user.system and get uuid of this user
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Map response = arv.call("users", "list", params);
+
+ assertNotNull("expected users list", response);
+ List items = (List)response.get("items");
+ assertNotNull("expected users list items", items);
+
+ Map firstUser = (Map)items.get(0);
+ String userUuid = (String)firstUser.get("uuid");
+
+ // invoke users.get with the system user uuid
+ params = new HashMap<String, Object>();
+ params.put("uuid", userUuid);
+
+ response = arv.call("users", "get", params);
+
+ assertNotNull("Expected uuid for first user", response.get("uuid"));
+ assertEquals("Expected system user uuid", userUuid, response.get("uuid"));
+ }
+
+ /**
+ * Test users.create api
+ * @throws Exception
+ */
+ @Test
+ public void testCreateUser() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+ params.put("user", "{}");
+ Map response = arv.call("users", "create", params);
+
+ assertEquals("Expected kind to be user", "arvados#user", response.get("kind"));
+
+ Object uuid = response.get("uuid");
+ assertNotNull("Expected uuid for first user", uuid);
+
+ // delete the object
+ params = new HashMap<String, Object>();
+ params.put("uuid", uuid);
+ response = arv.call("users", "delete", params);
+
+ // invoke users.get with the system user uuid
+ params = new HashMap<String, Object>();
+ params.put("uuid", uuid);
+
+ Exception caught = null;
+ try {
+ arv.call("users", "get", params);
+ } catch (Exception e) {
+ caught = e;
+ }
+
+ assertNotNull ("expected exception", caught);
+ assertTrue ("Expected 404", caught.getMessage().contains("Path not found"));
+ }
+
+ @Test
+ public void testCreateUserWithMissingRequiredParam() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Exception caught = null;
+ try {
+ arv.call("users", "create", params);
+ } catch (Exception e) {
+ caught = e;
+ }
+
+ assertNotNull ("expected exception", caught);
+ assertTrue ("Expected POST method requires content object user",
+ caught.getMessage().contains("ERROR: POST method requires content object user"));
+ }
+
+ /**
+ * Test users.create api
+ * @throws Exception
+ */
+ @Test
+ public void testCreateAndUpdateUser() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+ params.put("user", "{}");
+ Map response = arv.call("users", "create", params);
+
+ assertEquals("Expected kind to be user", "arvados#user", response.get("kind"));
+
+ Object uuid = response.get("uuid");
+ assertNotNull("Expected uuid for first user", uuid);
+
+ // update this user
+ params = new HashMap<String, Object>();
+ params.put("user", "{}");
+ params.put("uuid", uuid);
+ response = arv.call("users", "update", params);
+
+ assertEquals("Expected kind to be user", "arvados#user", response.get("kind"));
+
+ uuid = response.get("uuid");
+ assertNotNull("Expected uuid for first user", uuid);
+
+ // delete the object
+ params = new HashMap<String, Object>();
+ params.put("uuid", uuid);
+ response = arv.call("users", "delete", params);
+ }
+
+ /**
+ * Test unsupported api version api
+ * @throws Exception
+ */
+ @Test
+ public void testUnsupportedApiName() throws Exception {
+ Exception caught = null;
+ try {
+ Arvados arv = new Arvados("not_arvados", "v1");
+ } catch (Exception e) {
+ caught = e;
+ }
+
+ assertNotNull ("expected exception", caught);
+ assertTrue ("Expected 404 when unsupported api is used", caught.getMessage().contains("404 Not Found"));
+ }
+
+ /**
+ * Test unsupported api version api
+ * @throws Exception
+ */
+ @Test
+ public void testUnsupportedVersion() throws Exception {
+ Exception caught = null;
+ try {
+ Arvados arv = new Arvados("arvados", "v2");
+ } catch (Exception e) {
+ caught = e;
+ }
+
+ assertNotNull ("expected exception", caught);
+ assertTrue ("Expected 404 when unsupported version is used", caught.getMessage().contains("404 Not Found"));
+ }
+
+ /**
+ * Test unsupported api version api
+ * @throws Exception
+ */
+ @Test
+ public void testCallForNoSuchResrouce() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Exception caught = null;
+ try {
+ arv.call("abcd", "list", null);
+ } catch (Exception e) {
+ caught = e;
+ }
+
+ assertNotNull ("expected exception", caught);
+ assertTrue ("Expected ERROR: 404 not found", caught.getMessage().contains("ERROR: resource not found"));
+ }
+
+ /**
+ * Test unsupported api version api
+ * @throws Exception
+ */
+ @Test
+ public void testCallForNoSuchResrouceMethod() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Exception caught = null;
+ try {
+ arv.call("users", "abcd", null);
+ } catch (Exception e) {
+ caught = e;
+ }
+
+ assertNotNull ("expected exception", caught);
+ assertTrue ("Expected ERROR: 404 not found", caught.getMessage().contains("ERROR: method not found"));
+ }
+
+ /**
+ * Test pipeline_tempates.create api
+ * @throws Exception
+ */
+ @Test
+ public void testCreateAndGetPipelineTemplate() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ File file = new File(getClass().getResource( "/first_pipeline.json" ).toURI());
+ byte[] data = new byte[(int)file.length()];
+ try {
+ FileInputStream is = new FileInputStream(file);
+ is.read(data);
+ is.close();
+ }catch(Exception e) {
+ e.printStackTrace();
+ }
+
+ Map<String, Object> params = new HashMap<String, Object>();
+ params.put("pipeline_template", new String(data));
+ Map response = arv.call("pipeline_templates", "create", params);
+
+ assertEquals("Expected kind to be user", "arvados#pipelineTemplate", response.get("kind"));
+ String uuid = (String)response.get("uuid");
+ assertNotNull("Expected uuid for pipeline template", uuid);
+
+ // get the pipeline
+ params = new HashMap<String, Object>();
+ params.put("uuid", uuid);
+ response = arv.call("pipeline_templates", "get", params);
+
+ assertEquals("Expected kind to be user", "arvados#pipelineTemplate", response.get("kind"));
+ assertEquals("Expected uuid for pipeline template", uuid, response.get("uuid"));
+
+ // delete the object
+ params = new HashMap<String, Object>();
+ params.put("uuid", uuid);
+ response = arv.call("pipeline_templates", "delete", params);
+ }
+
+ /**
+ * Test users.list api
+ * @throws Exception
+ */
+ @Test
+ public void testArvadosWithTokenPassed() throws Exception {
+ String token = System.getenv().get("ARVADOS_API_TOKEN");
+ String host = System.getenv().get("ARVADOS_API_HOST");
+ String hostInsecure = System.getenv().get("ARVADOS_API_HOST_INSECURE");
+
+ Arvados arv = new Arvados("arvados", "v1", token, host, hostInsecure);
+
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Map response = arv.call("users", "list", params);
+ assertEquals("Expected kind to be users.list", "arvados#userList", response.get("kind"));
+ }
+
+ /**
+ * Test users.list api
+ * @throws Exception
+ */
+ @Test
+ public void testCallUsersListWithLimit() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Map response = arv.call("users", "list", params);
+ assertEquals("Expected users.list in response", "arvados#userList", response.get("kind"));
+
+ List items = (List)response.get("items");
+ assertNotNull("expected users list items", items);
+ assertTrue("expected at least one item in users list", items.size()>0);
+
+ int numUsersListItems = items.size();
+
+ // make the request again with limit
+ params = new HashMap<String, Object>();
+ params.put("limit", numUsersListItems-1);
+
+ response = arv.call("users", "list", params);
+
+ assertEquals("Expected kind to be users.list", "arvados#userList", response.get("kind"));
+
+ items = (List)response.get("items");
+ assertNotNull("expected users list items", items);
+ assertTrue("expected at least one item in users list", items.size()>0);
+
+ int numUsersListItems2 = items.size();
+ assertEquals ("Got more users than requested", numUsersListItems-1, numUsersListItems2);
+ }
+
+ @Test
+ public void testGetLinksWithFilters() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Map response = arv.call("links", "list", params);
+ assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+
+ String[][] filters = new String[1][];
+ String[] condition = new String[3];
+ condition[0] = "name";
+ condition[1] = "=";
+ condition[2] = "can_manage";
+ filters[0] = condition;
+ params.put("filters", filters);
+
+ response = arv.call("links", "list", params);
+
+ assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+ assertFalse("Expected no can_manage in response", response.toString().contains("\"name\":\"can_manage\""));
+ }
+
+ @Test
+ public void testGetLinksWithFiltersAsList() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Map response = arv.call("links", "list", params);
+ assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+
+ List<List> filters = new ArrayList<List>();
+ List<String> condition = new ArrayList<String>();
+ condition.add("name");
+ condition.add("is_a");
+ condition.add("can_manage");
+ filters.add(condition);
+ params.put("filters", filters);
+
+ response = arv.call("links", "list", params);
+
+ assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+ assertFalse("Expected no can_manage in response", response.toString().contains("\"name\":\"can_manage\""));
+ }
+
+ @Test
+ public void testGetLinksWithTimestampFilters() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Map response = arv.call("links", "list", params);
+ assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+
+ // get links created "tomorrow". Expect none in response
+ Calendar calendar = new GregorianCalendar();
+ calendar.setTime(new Date());
+ calendar.add(Calendar.DAY_OF_MONTH, 1);
+
+ Object[][] filters = new Object[1][];
+ Object[] condition = new Object[3];
+ condition[0] = "created_at";
+ condition[1] = ">";
+ condition[2] = calendar.get(Calendar.YEAR) + "-" + (calendar.get(Calendar.MONTH)+1) + "-" + calendar.get(Calendar.DAY_OF_MONTH);
+ filters[0] = condition;
+ params.put("filters", filters);
+
+ response = arv.call("links", "list", params);
+
+ assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+ int items_avail = ((BigDecimal)response.get("items_available")).intValue();
+ assertEquals("Expected zero links", items_avail, 0);
+ }
+
+ @Test
+ public void testGetLinksWithWhereClause() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Map<String, String> where = new HashMap<String, String>();
+ where.put("where", "updated_at > '2014-05-01'");
+
+ params.put("where", where);
+
+ Map response = arv.call("links", "list", params);
+
+ assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+ }
+
+ @Test
+ public void testGetAvailableResources() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+ Set<String> resources = arv.getAvailableResourses();
+ assertNotNull("Expected resources", resources);
+ assertTrue("Excected users in resrouces", resources.contains("users"));
+ }
+
+ @Test
+ public void testGetAvailableMethodsResources() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+ Set<String> methods = arv.getAvailableMethodsForResourse("users");
+ assertNotNull("Expected resources", methods);
+ assertTrue("Excected create method for users", methods.contains("create"));
+ }
+
+ @Test
+ public void testGetAvailableParametersForUsersGetMethod() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+ Map<String,List<String>> parameters = arv.getAvailableParametersForMethod("users", "get");
+ assertNotNull("Expected parameters", parameters);
+ assertTrue("Excected uuid parameter for get method for users", parameters.get("required").contains("uuid"));
+ }
+
+ @Test
+ public void testGetAvailableParametersForUsersCreateMethod() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+ Map<String,List<String>> parameters = arv.getAvailableParametersForMethod("users", "create");
+ assertNotNull("Expected parameters", parameters);
+ assertTrue("Excected user parameter for get method for users", parameters.get("required").contains("user"));
+ }
+
+ @Test
+ public void testGetAvailableParametersForUsersListMethod() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+ Map<String,List<String>> parameters = arv.getAvailableParametersForMethod("users", "list");
+ assertNotNull("Expected parameters", parameters);
+ assertTrue("Excected no required parameter for list method for users", parameters.get("required").size() == 0);
+ assertTrue("Excected some optional parameters for list method for users", parameters.get("optional").contains("filters"));
+ }
+
+}
\ No newline at end of file
--- /dev/null
+{
+ "name":"first pipeline",
+ "components":{
+ "do_hash":{
+ "script":"hash.py",
+ "script_parameters":{
+ "input":{
+ "required": true,
+ "dataclass": "Collection"
+ }
+ },
+ "script_version":"master",
+ "output_is_persistent":true
+ }
+ }
+}
Protocol scheme. Default: C<ARVADOS_API_PROTOCOL_SCHEME> environment
variable, or C<https>
-=item apiToken
+=item authToken
Authorization token. Default: C<ARVADOS_API_TOKEN> environment variable
{
my $self = shift;
my %req;
- $req{$self->{'method'}} = $self->{'uri'};
+ my %content;
+ my $method = $self->{'method'};
+ if ($method eq 'GET' || $method eq 'HEAD') {
+ $content{'_method'} = $method;
+ $method = 'POST';
+ }
+ $req{$method} = $self->{'uri'};
$self->{'req'} = new HTTP::Request (%req);
$self->{'req'}->header('Authorization' => ('OAuth2 ' . $self->{'authToken'})) if $self->{'authToken'};
$self->{'req'}->header('Accept' => 'application/json');
- my %content;
my ($p, $v);
while (($p, $v) = each %{$self->{'queryParams'}}) {
$content{$p} = (ref($v) eq "") ? $v : JSON::encode_json($v);
/dist/
/*.egg-info
/tmp
-setup.py
from httplib import BadStatusLine
if 'headers' not in kwargs:
kwargs['headers'] = {}
+
+ if config.get("ARVADOS_EXTERNAL_CLIENT", "") == "true":
+ kwargs['headers']['X-External-Client'] = '1'
+
kwargs['headers']['Authorization'] = 'OAuth2 %s' % config.get('ARVADOS_API_TOKEN', 'ARVADOS_API_TOKEN_not_set')
try:
return self.orig_http_request(uri, **kwargs)
path = None
return path
-def api(version=None):
+def api(version=None, cache=True):
global services
if 'ARVADOS_DEBUG' in config.settings():
logging.basicConfig(level=logging.DEBUG)
- if not services.get(version):
+ if not cache or not services.get(version):
apiVersion = version
if not version:
apiVersion = 'v1'
ca_certs = None # use httplib2 default
http = httplib2.Http(ca_certs=ca_certs,
- cache=http_cache('discovery'))
+ cache=(http_cache('discovery') if cache else None))
http = credentials.authorize(http)
if re.match(r'(?i)^(true|1|yes)$',
config.get('ARVADOS_API_HOST_INSECURE', 'no')):
http.disable_ssl_certificate_validation=True
services[version] = apiclient.discovery.build(
'arvados', apiVersion, http=http, discoveryServiceUrl=url)
+ http.cache = None
return services[version]
-
pass
class NotImplementedError(Exception):
pass
+class NoKeepServersError(Exception):
+ pass
--- /dev/null
+from ws4py.client.threadedclient import WebSocketClient
+import thread
+import json
+import os
+import time
+import ssl
+import re
+import config
+
+class EventClient(WebSocketClient):
+ def __init__(self, url, filters, on_event):
+ ssl_options = None
+ if re.match(r'(?i)^(true|1|yes)$',
+ config.get('ARVADOS_API_HOST_INSECURE', 'no')):
+ ssl_options={'cert_reqs': ssl.CERT_NONE}
+ else:
+ ssl_options={'cert_reqs': ssl.CERT_REQUIRED}
+
+ super(EventClient, self).__init__(url, ssl_options)
+ self.filters = filters
+ self.on_event = on_event
+
+ def opened(self):
+ self.send(json.dumps({"method": "subscribe", "filters": self.filters}))
+
+ def received_message(self, m):
+ self.on_event(json.loads(str(m)))
+
+def subscribe(api, filters, on_event):
+ url = "{}?api_token={}".format(api._rootDesc['websocketUrl'], config.get('ARVADOS_API_TOKEN'))
+ ws = EventClient(url, filters, on_event)
+ ws.connect()
+ return ws
+++ /dev/null
-#
-# FUSE driver for Arvados Keep
-#
-
-import os
-import sys
-
-import llfuse
-import errno
-import stat
-import threading
-import arvados
-import pprint
-
-from time import time
-from llfuse import FUSEError
-
-class Directory(object):
- '''Generic directory object, backed by a dict.
- Consists of a set of entries with the key representing the filename
- and the value referencing a File or Directory object.
- '''
-
- def __init__(self, parent_inode):
- self.inode = None
- self.parent_inode = parent_inode
- self._entries = {}
-
- def __getitem__(self, item):
- return self._entries[item]
-
- def __setitem__(self, key, item):
- self._entries[key] = item
-
- def __iter__(self):
- return self._entries.iterkeys()
-
- def items(self):
- return self._entries.items()
-
- def __contains__(self, k):
- return k in self._entries
-
- def size(self):
- return 0
-
-class MagicDirectory(Directory):
- '''A special directory that logically contains the set of all extant
- keep locators. When a file is referenced by lookup(), it is tested
- to see if it is a valid keep locator to a manifest, and if so, loads the manifest
- contents as a subdirectory of this directory with the locator as the directory name.
- Since querying a list of all extant keep locators is impractical, only loaded collections
- are visible to readdir().'''
-
- def __init__(self, parent_inode, inodes):
- super(MagicDirectory, self).__init__(parent_inode)
- self.inodes = inodes
-
- def __contains__(self, k):
- if k in self._entries:
- return True
- try:
- if arvados.Keep.get(k):
- return True
- else:
- return False
- except Exception as e:
- #print 'exception keep', e
- return False
-
- def __getitem__(self, item):
- if item not in self._entries:
- collection = arvados.CollectionReader(arvados.Keep.get(item))
- self._entries[item] = self.inodes.add_entry(Directory(self.inode))
- self.inodes.load_collection(self._entries[item], collection)
- return self._entries[item]
-
-class File(object):
- '''Wraps a StreamFileReader for use by Directory.'''
-
- def __init__(self, parent_inode, reader):
- self.inode = None
- self.parent_inode = parent_inode
- self.reader = reader
-
- def size(self):
- return self.reader.size()
-
-class FileHandle(object):
- '''Connects a numeric file handle to a File or Directory object that has
- been opened by the client.'''
-
- def __init__(self, fh, entry):
- self.fh = fh
- self.entry = entry
-
-class Inodes(object):
- '''Manage the set of inodes. This is the mapping from a numeric id
- to a concrete File or Directory object'''
-
- def __init__(self):
- self._entries = {}
- self._counter = llfuse.ROOT_INODE
-
- def __getitem__(self, item):
- return self._entries[item]
-
- def __setitem__(self, key, item):
- self._entries[key] = item
-
- def __iter__(self):
- return self._entries.iterkeys()
-
- def items(self):
- return self._entries.items()
-
- def __contains__(self, k):
- return k in self._entries
-
- def load_collection(self, parent_dir, collection):
- '''parent_dir is the Directory object that will be populated by the collection.
- collection is the arvados.CollectionReader to use as the source'''
- for s in collection.all_streams():
- cwd = parent_dir
- for part in s.name().split('/'):
- if part != '' and part != '.':
- if part not in cwd:
- cwd[part] = self.add_entry(Directory(cwd.inode))
- cwd = cwd[part]
- for k, v in s.files().items():
- cwd[k] = self.add_entry(File(cwd.inode, v))
-
- def add_entry(self, entry):
- entry.inode = self._counter
- self._entries[entry.inode] = entry
- self._counter += 1
- return entry
-
-class Operations(llfuse.Operations):
- '''This is the main interface with llfuse. The methods on this object are
- called by llfuse threads to service FUSE events to query and read from
- the file system.
-
- llfuse has its own global lock which is acquired before calling a request handler,
- so request handlers do not run concurrently unless the lock is explicitly released
- with llfuse.lock_released.'''
-
- def __init__(self, uid, gid):
- super(Operations, self).__init__()
-
- self.inodes = Inodes()
- self.uid = uid
- self.gid = gid
-
- # dict of inode to filehandle
- self._filehandles = {}
- self._filehandles_counter = 1
-
- # Other threads that need to wait until the fuse driver
- # is fully initialized should wait() on this event object.
- self.initlock = threading.Event()
-
- def init(self):
- # Allow threads that are waiting for the driver to be finished
- # initializing to continue
- self.initlock.set()
-
- def access(self, inode, mode, ctx):
- return True
-
- def getattr(self, inode):
- e = self.inodes[inode]
-
- entry = llfuse.EntryAttributes()
- entry.st_ino = inode
- entry.generation = 0
- entry.entry_timeout = 300
- entry.attr_timeout = 300
-
- entry.st_mode = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
- if isinstance(e, Directory):
- entry.st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | stat.S_IFDIR
- else:
- entry.st_mode |= stat.S_IFREG
-
- entry.st_nlink = 1
- entry.st_uid = self.uid
- entry.st_gid = self.gid
- entry.st_rdev = 0
-
- entry.st_size = e.size()
-
- entry.st_blksize = 1024
- entry.st_blocks = e.size()/1024
- if e.size()/1024 != 0:
- entry.st_blocks += 1
- entry.st_atime = 0
- entry.st_mtime = 0
- entry.st_ctime = 0
-
- return entry
-
- def lookup(self, parent_inode, name):
- #print "lookup: parent_inode", parent_inode, "name", name
- inode = None
-
- if name == '.':
- inode = parent_inode
- else:
- if parent_inode in self.inodes:
- p = self.inodes[parent_inode]
- if name == '..':
- inode = p.parent_inode
- elif name in p:
- inode = p[name].inode
-
- if inode != None:
- return self.getattr(inode)
- else:
- raise llfuse.FUSEError(errno.ENOENT)
-
- def open(self, inode, flags):
- if inode in self.inodes:
- p = self.inodes[inode]
- else:
- raise llfuse.FUSEError(errno.ENOENT)
-
- if (flags & os.O_WRONLY) or (flags & os.O_RDWR):
- raise llfuse.FUSEError(errno.EROFS)
-
- if isinstance(p, Directory):
- raise llfuse.FUSEError(errno.EISDIR)
-
- fh = self._filehandles_counter
- self._filehandles_counter += 1
- self._filehandles[fh] = FileHandle(fh, p)
- return fh
-
- def read(self, fh, off, size):
- #print "read", fh, off, size
- if fh in self._filehandles:
- handle = self._filehandles[fh]
- else:
- raise llfuse.FUSEError(errno.EBADF)
-
- try:
- with llfuse.lock_released:
- return handle.entry.reader.readfrom(off, size)
- except:
- raise llfuse.FUSEError(errno.EIO)
-
- def release(self, fh):
- if fh in self._filehandles:
- del self._filehandles[fh]
-
- def opendir(self, inode):
- #print "opendir: inode", inode
-
- if inode in self.inodes:
- p = self.inodes[inode]
- else:
- raise llfuse.FUSEError(errno.ENOENT)
-
- if not isinstance(p, Directory):
- raise llfuse.FUSEError(errno.ENOTDIR)
-
- fh = self._filehandles_counter
- self._filehandles_counter += 1
- if p.parent_inode in self.inodes:
- parent = self.inodes[p.parent_inode]
- else:
- parent = None
- self._filehandles[fh] = FileHandle(fh, [('.', p), ('..', parent)] + list(p.items()))
- return fh
-
- def readdir(self, fh, off):
- #print "readdir: fh", fh, "off", off
-
- if fh in self._filehandles:
- handle = self._filehandles[fh]
- else:
- raise llfuse.FUSEError(errno.EBADF)
-
- #print "handle.entry", handle.entry
-
- e = off
- while e < len(handle.entry):
- yield (handle.entry[e][0], self.getattr(handle.entry[e][1].inode), e+1)
- e += 1
-
- def releasedir(self, fh):
- del self._filehandles[fh]
-
- def statfs(self):
- st = llfuse.StatvfsData()
- st.f_bsize = 1024 * 1024
- st.f_blocks = 0
- st.f_files = 0
-
- st.f_bfree = 0
- st.f_bavail = 0
-
- st.f_ffree = 0
- st.f_favail = 0
-
- st.f_frsize = 0
- return st
-
- # The llfuse documentation recommends only overloading functions that
- # are actually implemented, as the default implementation will raise ENOSYS.
- # However, there is a bug in the llfuse default implementation of create()
- # "create() takes exactly 5 positional arguments (6 given)" which will crash
- # arv-mount.
- # The workaround is to implement it with the proper number of parameters,
- # and then everything works out.
- def create(self, p1, p2, p3, p4, p5):
- raise llfuse.FUSEError(errno.EROFS)
def __init__(self, todo):
self._todo = todo
self._done = 0
+ self._response = None
self._todo_lock = threading.Semaphore(todo)
self._done_lock = threading.Lock()
with self._done_lock:
return (self._done < self._todo)
- def increment_done(self):
+ def save_response(self, response_body, replicas_stored):
"""
- Report that the current thread was successful.
+ Records a response body (a locator, possibly signed) returned by
+ the Keep server. It is not necessary to save more than
+ one response, since we presume that any locator returned
+ in response to a successful request is valid.
"""
with self._done_lock:
- self._done += 1
+ self._done += replicas_stored
+ self._response = response_body
+
+ def response(self):
+ """
+ Returns the body from the response to a PUT request.
+ """
+ with self._done_lock:
+ return self._response
def done(self):
"""
class KeepWriterThread(threading.Thread):
"""
- Write a blob of data to the given Keep server. Call
- increment_done() of the given ThreadLimiter if the write
- succeeds.
+ Write a blob of data to the given Keep server. On success, call
+ save_response() of the given ThreadLimiter to save the returned
+ locator.
"""
def __init__(self, **kwargs):
super(KeepClient.KeepWriterThread, self).__init__()
url = self.args['service_root'] + self.args['data_hash']
api_token = config.get('ARVADOS_API_TOKEN')
headers = {'Authorization': "OAuth2 %s" % api_token}
+
+ if self.args['using_proxy']:
+ # We're using a proxy, so tell the proxy how many copies we
+ # want it to store
+ headers['X-Keep-Desired-Replication'] = str(self.args['want_copies'])
+
try:
+ logging.debug("Uploading to {}".format(url))
resp, content = h.request(url.encode('utf-8'), 'PUT',
headers=headers,
body=self.args['data'])
(str(threading.current_thread()),
self.args['data_hash'],
self.args['service_root']))
- return limiter.increment_done()
+ replicas_stored = 1
+ if 'x-keep-replicas-stored' in resp:
+ # Tick the 'done' counter for the number of replica
+ # reported stored by the server, for the case that
+ # we're talking to a proxy or other backend that
+ # stores to multiple copies for us.
+ try:
+ replicas_stored = int(resp['x-keep-replicas-stored'])
+ except ValueError:
+ pass
+ return limiter.save_response(content.strip(), replicas_stored)
+
logging.warning("Request fail: PUT %s => %s %s" %
(url, resp['status'], content))
except (httplib2.HttpLib2Error, httplib.HTTPException) as e:
self._cache = []
# default 256 megabyte cache
self.cache_max = 256 * 1024 * 1024
+ self.using_proxy = False
def shuffled_service_roots(self, hash):
if self.service_roots == None:
self.lock.acquire()
- try:
- keep_disks = arvados.api().keep_disks().list().execute()['items']
- roots = (("http%s://%s:%d/" %
- ('s' if f['service_ssl_flag'] else '',
- f['service_host'],
- f['service_port']))
- for f in keep_disks)
- self.service_roots = sorted(set(roots))
- logging.debug(str(self.service_roots))
- finally:
- self.lock.release()
+ # Override normal keep disk lookup with an explict proxy
+ # configuration.
+ keep_proxy_env = config.get("ARVADOS_KEEP_PROXY")
+ if keep_proxy_env != None and len(keep_proxy_env) > 0:
+
+ if keep_proxy_env[-1:] != '/':
+ keep_proxy_env += "/"
+ self.service_roots = [keep_proxy_env]
+ self.using_proxy = True
+ else:
+ try:
+ try:
+ keep_services = arvados.api().keep_services().accessible().execute()['items']
+ except Exception:
+ keep_services = arvados.api().keep_disks().list().execute()['items']
+
+ if len(keep_services) == 0:
+ raise arvados.errors.NoKeepServersError()
+
+ if 'service_type' in keep_services[0] and keep_services[0]['service_type'] == 'proxy':
+ self.using_proxy = True
+
+ roots = (("http%s://%s:%d/" %
+ ('s' if f['service_ssl_flag'] else '',
+ f['service_host'],
+ f['service_port']))
+ for f in keep_services)
+ self.service_roots = sorted(set(roots))
+ logging.debug(str(self.service_roots))
+ finally:
+ self.lock.release()
+
+ # Build an ordering with which to query the Keep servers based on the
+ # contents of the hash.
+ # "hash" is a hex-encoded number at least 8 digits
+ # (32 bits) long
+
+ # seed used to calculate the next keep server from 'pool'
+ # to be added to 'pseq'
seed = hash
+
+ # Keep servers still to be added to the ordering
pool = self.service_roots[:]
+
+ # output probe sequence
pseq = []
+
+ # iterate while there are servers left to be assigned
while len(pool) > 0:
if len(seed) < 8:
- if len(pseq) < len(hash) / 4: # first time around
+ # ran out of digits in the seed
+ if len(pseq) < len(hash) / 4:
+ # the number of servers added to the probe sequence is less
+ # than the number of 4-digit slices in 'hash' so refill the
+ # seed with the last 4 digits and then append the contents
+ # of 'hash'.
seed = hash[-4:] + hash
else:
+ # refill the seed with the contents of 'hash'
seed += hash
+
+ # Take the next 8 digits (32 bytes) and interpret as an integer,
+ # then modulus with the size of the remaining pool to get the next
+ # selected server.
probe = int(seed[0:8], 16) % len(pool)
+
+ # Append the selected server to the probe sequence and remove it
+ # from the pool.
pseq += [pool[probe]]
pool = pool[:probe] + pool[probe+1:]
+
+ # Remove the digits just used from the seed
seed = seed[8:]
logging.debug(str(pseq))
return pseq
self._cache_lock.release()
def reserve_cache(self, locator):
- '''Reserve a cache slot for the specified locator,
+ '''Reserve a cache slot for the specified locator,
or return the existing slot.'''
self._cache_lock.acquire()
try:
try:
for service_root in self.shuffled_service_roots(expect_hash):
- url = service_root + expect_hash
+ url = service_root + locator
api_token = config.get('ARVADOS_API_TOKEN')
headers = {'Authorization': "OAuth2 %s" % api_token,
'Accept': 'application/octet-stream'}
for location_hint in re.finditer(r'\+K@([a-z0-9]+)', locator):
instance = location_hint.group(1)
- url = 'http://keep.' + instance + '.arvadosapi.com/' + expect_hash
+ url = 'http://keep.' + instance + '.arvadosapi.com/' + locator
blob = self.get_url(url, {}, expect_hash)
if blob:
slot.set(blob)
with timer.Timer() as t:
resp, content = h.request(url.encode('utf-8'), 'GET',
headers=headers)
- logging.info("Received %s bytes in %s msec (%s MiB/sec)" % (len(content),
- t.msecs,
+ logging.info("Received %s bytes in %s msec (%s MiB/sec)" % (len(content),
+ t.msecs,
(len(content)/(1024*1024))/t.secs))
if re.match(r'^2\d\d$', resp['status']):
m = hashlib.new('md5')
t = KeepClient.KeepWriterThread(data=data,
data_hash=data_hash,
service_root=service_root,
- thread_limiter=thread_limiter)
+ thread_limiter=thread_limiter,
+ using_proxy=self.using_proxy,
+ want_copies=(want_copies if self.using_proxy else 1))
t.start()
threads += [t]
for t in threads:
t.join()
have_copies = thread_limiter.done()
- if have_copies == want_copies:
- return (data_hash + '+' + str(len(data)))
+ # If we're done, return the response from Keep
+ if have_copies >= want_copies:
+ return thread_limiter.response()
raise arvados.errors.KeepWriteError(
"Write fail for %s: wanted %d but wrote %d" %
(data_hash, want_copies, have_copies))
+++ /dev/null
-#!/usr/bin/env python
-
-from arvados.fuse import *
-import arvados
-import subprocess
-import argparse
-
-if __name__ == '__main__':
- # Handle command line parameters
- parser = argparse.ArgumentParser(
- description='Mount Keep data under the local filesystem.',
- epilog="""
-Note: When using the --exec feature, you must either specify the
-mountpoint before --exec, or mark the end of your --exec arguments
-with "--".
-""")
- parser.add_argument('mountpoint', type=str, help="""Mount point.""")
- parser.add_argument('--collection', type=str, help="""Collection locator""")
- parser.add_argument('--debug', action='store_true', help="""Debug mode""")
- parser.add_argument('--exec', type=str, nargs=argparse.REMAINDER,
- dest="exec_args", metavar=('command', 'args', '...', '--'),
- help="""Mount, run a command, then unmount and exit""")
-
- args = parser.parse_args()
-
- # Create the request handler
- operations = Operations(os.getuid(), os.getgid())
-
- if args.collection != None:
- # Set up the request handler with the collection at the root
- e = operations.inodes.add_entry(Directory(llfuse.ROOT_INODE))
- operations.inodes.load_collection(e, arvados.CollectionReader(arvados.Keep.get(args.collection)))
- else:
- # Set up the request handler with the 'magic directory' at the root
- operations.inodes.add_entry(MagicDirectory(llfuse.ROOT_INODE, operations.inodes))
-
- # FUSE options, see mount.fuse(8)
- opts = []
-
- # Enable FUSE debugging (logs each FUSE request)
- if args.debug:
- opts += ['debug']
-
- # Initialize the fuse connection
- llfuse.init(operations, args.mountpoint, opts)
-
- if args.exec_args:
- t = threading.Thread(None, lambda: llfuse.main())
- t.start()
-
- # wait until the driver is finished initializing
- operations.initlock.wait()
-
- rc = 255
- try:
- rc = subprocess.call(args.exec_args, shell=False)
- except OSError as e:
- sys.stderr.write('arv-mount: %s -- exec %s\n' % (str(e), args.exec_args))
- rc = e.errno
- except Exception as e:
- sys.stderr.write('arv-mount: %s\n' % str(e))
- finally:
- subprocess.call(["fusermount", "-u", "-z", args.mountpoint])
-
- exit(rc)
- else:
- llfuse.main()
+++ /dev/null
-#!/bin/sh
-#
-# Apparently the only reliable way to distribute Python packages with pypi and
-# install them via pip is as source packages (sdist).
-#
-# That means that setup.py is run on the system the package is being installed on,
-# outside of the Arvados git tree.
-#
-# In turn, this means that we can not build the minor_version on the fly when
-# setup.py is being executed. Instead, we use this script to generate a 'static'
-# version of setup.py which will can be distributed via pypi.
-
-minor_version=`git log --format=format:%ct.%h -n1 .`
-
-sed "s|%%MINOR_VERSION%%|$minor_version|" < setup.py.src > setup.py
-
-google-api-python-client==1.2
-httplib2==0.8
-python-gflags==2.0
-urllib3==1.7.1
-llfuse==0.40
+google-api-python-client>=1.2
+httplib2>=0.7
+python-gflags>=1.5
+urllib3>=1.3
+ws4py>=0.3
+PyYAML>=3.0
--- /dev/null
+import subprocess
+import time
+import os
+import signal
+import yaml
+import sys
+import argparse
+import arvados.config
+import arvados.api
+import shutil
+import tempfile
+
+ARV_API_SERVER_DIR = '../../services/api'
+KEEP_SERVER_DIR = '../../services/keep'
+SERVER_PID_PATH = 'tmp/pids/webrick-test.pid'
+WEBSOCKETS_SERVER_PID_PATH = 'tmp/pids/passenger-test.pid'
+
+def find_server_pid(PID_PATH, wait=10):
+ now = time.time()
+ timeout = now + wait
+ good_pid = False
+ while (not good_pid) and (now <= timeout):
+ time.sleep(0.2)
+ try:
+ with open(PID_PATH, 'r') as f:
+ server_pid = int(f.read())
+ good_pid = (os.kill(server_pid, 0) == None)
+ except IOError:
+ good_pid = False
+ except OSError:
+ good_pid = False
+ now = time.time()
+
+ if not good_pid:
+ return None
+
+ return server_pid
+
+def kill_server_pid(PID_PATH, wait=10):
+ try:
+ now = time.time()
+ timeout = now + wait
+ with open(PID_PATH, 'r') as f:
+ server_pid = int(f.read())
+ while now <= timeout:
+ os.kill(server_pid, signal.SIGTERM) == None
+ os.getpgid(server_pid) # throw OSError if no such pid
+ now = time.time()
+ time.sleep(0.1)
+ except IOError:
+ good_pid = False
+ except OSError:
+ good_pid = False
+
+def run(websockets=False, reuse_server=False):
+ cwd = os.getcwd()
+ os.chdir(os.path.join(os.path.dirname(__file__), ARV_API_SERVER_DIR))
+
+ if websockets:
+ pid_file = WEBSOCKETS_SERVER_PID_PATH
+ else:
+ pid_file = SERVER_PID_PATH
+
+ test_pid = find_server_pid(pid_file, 0)
+
+ if test_pid == None or not reuse_server:
+ # do not try to run both server variants at once
+ stop()
+
+ # delete cached discovery document
+ shutil.rmtree(arvados.http_cache('discovery'))
+
+ # Setup database
+ os.environ["RAILS_ENV"] = "test"
+ subprocess.call(['bundle', 'exec', 'rake', 'tmp:cache:clear'])
+ subprocess.call(['bundle', 'exec', 'rake', 'db:test:load'])
+ subprocess.call(['bundle', 'exec', 'rake', 'db:fixtures:load'])
+
+ if websockets:
+ os.environ["ARVADOS_WEBSOCKETS"] = "true"
+ subprocess.call(['openssl', 'req', '-new', '-x509', '-nodes',
+ '-out', './self-signed.pem',
+ '-keyout', './self-signed.key',
+ '-days', '3650',
+ '-subj', '/CN=localhost'])
+ subprocess.call(['bundle', 'exec',
+ 'passenger', 'start', '-d', '-p3333',
+ '--pid-file',
+ os.path.join(os.getcwd(), WEBSOCKETS_SERVER_PID_PATH),
+ '--ssl',
+ '--ssl-certificate', 'self-signed.pem',
+ '--ssl-certificate-key', 'self-signed.key'])
+ os.environ["ARVADOS_API_HOST"] = "127.0.0.1:3333"
+ else:
+ subprocess.call(['bundle', 'exec', 'rails', 'server', '-d',
+ '--pid',
+ os.path.join(os.getcwd(), SERVER_PID_PATH),
+ '-p3001'])
+ os.environ["ARVADOS_API_HOST"] = "127.0.0.1:3001"
+
+ pid = find_server_pid(SERVER_PID_PATH)
+
+ os.environ["ARVADOS_API_HOST_INSECURE"] = "true"
+ os.environ["ARVADOS_API_TOKEN"] = ""
+ os.chdir(cwd)
+
+def stop():
+ cwd = os.getcwd()
+ os.chdir(os.path.join(os.path.dirname(__file__), ARV_API_SERVER_DIR))
+
+ kill_server_pid(WEBSOCKETS_SERVER_PID_PATH, 0)
+ kill_server_pid(SERVER_PID_PATH, 0)
+
+ try:
+ os.unlink('self-signed.pem')
+ except:
+ pass
+
+ try:
+ os.unlink('self-signed.key')
+ except:
+ pass
+
+ os.chdir(cwd)
+
+def _start_keep(n, keep_args):
+ keep0 = tempfile.mkdtemp()
+ keep_cmd = ["bin/keep",
+ "-volumes={}".format(keep0),
+ "-listen=:{}".format(25107+n),
+ "-pid={}".format("tmp/keep{}.pid".format(n))]
+
+ for arg, val in keep_args.iteritems():
+ keep_cmd.append("{}={}".format(arg, val))
+
+ kp0 = subprocess.Popen(keep_cmd)
+ with open("tmp/keep{}.pid".format(n), 'w') as f:
+ f.write(str(kp0.pid))
+
+ with open("tmp/keep{}.volume".format(n), 'w') as f:
+ f.write(keep0)
+
+def run_keep(blob_signing_key=None, enforce_permissions=False):
+ stop_keep()
+
+ cwd = os.getcwd()
+ os.chdir(os.path.join(os.path.dirname(__file__), KEEP_SERVER_DIR))
+ if os.environ.get('GOPATH') == None:
+ os.environ["GOPATH"] = os.getcwd()
+ else:
+ os.environ["GOPATH"] = os.getcwd() + ":" + os.environ["GOPATH"]
+
+ subprocess.call(["./go.sh", "install", "keep"])
+
+ if not os.path.exists("tmp"):
+ os.mkdir("tmp")
+
+ keep_args = {}
+ if blob_signing_key:
+ with open("tmp/keep.blob_signing_key", "w") as f:
+ f.write(blob_signing_key)
+ keep_args['--permission-key-file'] = 'tmp/keep.blob_signing_key'
+ if enforce_permissions:
+ keep_args['--enforce-permissions'] = 'true'
+
+ _start_keep(0, keep_args)
+ _start_keep(1, keep_args)
+
+ os.environ["ARVADOS_API_HOST"] = "127.0.0.1:3001"
+ os.environ["ARVADOS_API_HOST_INSECURE"] = "true"
+
+ authorize_with("admin")
+ api = arvados.api('v1', cache=False)
+ for d in api.keep_services().list().execute()['items']:
+ api.keep_services().delete(uuid=d['uuid']).execute()
+ for d in api.keep_disks().list().execute()['items']:
+ api.keep_disks().delete(uuid=d['uuid']).execute()
+
+ s1 = api.keep_services().create(body={"keep_service": {"service_host": "localhost", "service_port": 25107, "service_type": "disk"} }).execute()
+ s2 = api.keep_services().create(body={"keep_service": {"service_host": "localhost", "service_port": 25108, "service_type": "disk"} }).execute()
+ api.keep_disks().create(body={"keep_disk": {"keep_service_uuid": s1["uuid"] } }).execute()
+ api.keep_disks().create(body={"keep_disk": {"keep_service_uuid": s2["uuid"] } }).execute()
+
+ os.chdir(cwd)
+
+def _stop_keep(n):
+ kill_server_pid("tmp/keep{}.pid".format(n), 0)
+ if os.path.exists("tmp/keep{}.volume".format(n)):
+ with open("tmp/keep{}.volume".format(n), 'r') as r:
+ shutil.rmtree(r.read(), True)
+ os.unlink("tmp/keep{}.volume".format(n))
+ if os.path.exists("tmp/keep.blob_signing_key"):
+ os.remove("tmp/keep.blob_signing_key")
+
+def stop_keep():
+ cwd = os.getcwd()
+ os.chdir(os.path.join(os.path.dirname(__file__), KEEP_SERVER_DIR))
+
+ _stop_keep(0)
+ _stop_keep(1)
+
+ os.chdir(cwd)
+
+def run_keep_proxy(auth):
+ stop_keep_proxy()
+
+ cwd = os.getcwd()
+ os.chdir(os.path.join(os.path.dirname(__file__), KEEP_SERVER_DIR))
+ if os.environ.get('GOPATH') == None:
+ os.environ["GOPATH"] = os.getcwd()
+ else:
+ os.environ["GOPATH"] = os.getcwd() + ":" + os.environ["GOPATH"]
+
+ subprocess.call(["./go.sh", "install", "arvados.org/keepproxy"])
+
+ if not os.path.exists("tmp"):
+ os.mkdir("tmp")
+
+ os.environ["ARVADOS_API_HOST"] = "127.0.0.1:3001"
+ os.environ["ARVADOS_API_HOST_INSECURE"] = "true"
+ os.environ["ARVADOS_API_TOKEN"] = fixture("api_client_authorizations")[auth]["api_token"]
+
+ kp0 = subprocess.Popen(["bin/keepproxy", "-pid=tmp/keepproxy.pid", "-listen=:{}".format(25101)])
+
+ authorize_with("admin")
+ api = arvados.api('v1', cache=False)
+ api.keep_services().create(body={"keep_service": {"service_host": "localhost", "service_port": 25101, "service_type": "proxy"} }).execute()
+
+ arvados.config.settings()["ARVADOS_KEEP_PROXY"] = "http://localhost:25101"
+
+ os.chdir(cwd)
+
+def stop_keep_proxy():
+ cwd = os.getcwd()
+ os.chdir(os.path.join(os.path.dirname(__file__), KEEP_SERVER_DIR))
+ kill_server_pid("tmp/keepproxy.pid", 0)
+ os.chdir(cwd)
+
+def fixture(fix):
+ '''load a fixture yaml file'''
+ with open(os.path.join(os.path.dirname(__file__), ARV_API_SERVER_DIR, "test", "fixtures",
+ fix + ".yml")) as f:
+ return yaml.load(f.read())
+
+def authorize_with(token):
+ '''token is the symbolic name of the token from the api_client_authorizations fixture'''
+ arvados.config.settings()["ARVADOS_API_TOKEN"] = fixture("api_client_authorizations")[token]["api_token"]
+ arvados.config.settings()["ARVADOS_API_HOST"] = os.environ.get("ARVADOS_API_HOST")
+ arvados.config.settings()["ARVADOS_API_HOST_INSECURE"] = "true"
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument('action', type=str, help='''one of "start", "stop", "start_keep", "stop_keep"''')
+ parser.add_argument('--websockets', action='store_true', default=False)
+ parser.add_argument('--reuse', action='store_true', default=False)
+ parser.add_argument('--auth', type=str, help='Print authorization info for given api_client_authorizations fixture')
+ args = parser.parse_args()
+
+ if args.action == 'start':
+ run(websockets=args.websockets, reuse_server=args.reuse)
+ if args.auth != None:
+ authorize_with(args.auth)
+ print("export ARVADOS_API_HOST={}".format(arvados.config.settings()["ARVADOS_API_HOST"]))
+ print("export ARVADOS_API_TOKEN={}".format(arvados.config.settings()["ARVADOS_API_TOKEN"]))
+ print("export ARVADOS_API_HOST_INSECURE={}".format(arvados.config.settings()["ARVADOS_API_HOST_INSECURE"]))
+ elif args.action == 'stop':
+ stop()
+ elif args.action == 'start_keep':
+ run_keep()
+ elif args.action == 'stop_keep':
+ stop_keep()
+ elif args.action == 'start_keep_proxy':
+ run_keep_proxy("admin")
+ elif args.action == 'stop_keep_proxy':
+ stop_keep_proxy()
+ else:
+ print('Unrecognized action "{}", actions are "start", "stop", "start_keep", "stop_keep"'.format(args.action))
from setuptools import setup
-import subprocess
-
-minor_version = '%%MINOR_VERSION%%'
setup(name='arvados-python-client',
- version='0.1.' + minor_version,
+ version='0.1',
description='Arvados client library',
author='Arvados',
author_email='info@arvados.org',
scripts=[
'bin/arv-get',
'bin/arv-put',
- 'bin/arv-mount',
'bin/arv-ls',
'bin/arv-normalize',
],
'google-api-python-client',
'httplib2',
'urllib3',
- 'llfuse'
+ 'ws4py'
],
zip_safe=False)
import unittest
import arvados
import os
+import run_test_server
class KeepTestCase(unittest.TestCase):
- def setUp(self):
+ @classmethod
+ def setUpClass(cls):
+ super(KeepTestCase, cls).setUpClass()
try:
del os.environ['KEEP_LOCAL_STORE']
except KeyError:
pass
-class KeepBasicRWTest(KeepTestCase):
- def runTest(self):
+ # Make sure these are clear, we want to talk to the Keep servers
+ # directly.
+ os.environ["ARVADOS_KEEP_PROXY"] = ""
+ os.environ["ARVADOS_EXTERNAL_CLIENT"] = ""
+
+ run_test_server.run()
+ run_test_server.run_keep()
+ arvados.keep.global_client_object = None
+ arvados.config._settings = None
+ run_test_server.authorize_with("admin")
+
+ @classmethod
+ def tearDownClass(cls):
+ super(KeepTestCase, cls).tearDownClass()
+ run_test_server.stop()
+ run_test_server.stop_keep()
+
+ def test_KeepBasicRWTest(self):
foo_locator = arvados.Keep.put('foo')
self.assertEqual(foo_locator,
'acbd18db4cc2f85cedef654fccc4a4d8+3',
'foo',
'wrong content from Keep.get(md5("foo"))')
-class KeepBinaryRWTest(KeepTestCase):
- def runTest(self):
+ def test_KeepBinaryRWTest(self):
blob_str = '\xff\xfe\xf7\x00\x01\x02'
blob_locator = arvados.Keep.put(blob_str)
self.assertEqual(blob_locator,
blob_str,
'wrong content from Keep.get(md5(<binarydata>))')
-class KeepLongBinaryRWTest(KeepTestCase):
- def runTest(self):
+ def test_KeepLongBinaryRWTest(self):
blob_str = '\xff\xfe\xfd\xfc\x00\x01\x02\x03'
for i in range(0,23):
blob_str = blob_str + blob_str
blob_str,
'wrong content from Keep.get(md5(<binarydata>))')
-class KeepSingleCopyRWTest(KeepTestCase):
- def runTest(self):
+ def test_KeepSingleCopyRWTest(self):
blob_str = '\xff\xfe\xfd\xfc\x00\x01\x02\x03'
blob_locator = arvados.Keep.put(blob_str, copies=1)
self.assertEqual(blob_locator,
self.assertEqual(arvados.Keep.get(blob_locator),
blob_str,
'wrong content from Keep.get(md5(<binarydata>))')
+
+class KeepPermissionTestCase(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ try:
+ del os.environ['KEEP_LOCAL_STORE']
+ except KeyError:
+ pass
+
+ run_test_server.run()
+ run_test_server.run_keep(blob_signing_key='abcdefghijk0123456789',
+ enforce_permissions=True)
+
+ @classmethod
+ def tearDownClass(cls):
+ run_test_server.stop()
+ run_test_server.stop_keep()
+
+ def test_KeepBasicRWTest(self):
+ run_test_server.authorize_with('active')
+ foo_locator = arvados.Keep.put('foo')
+ self.assertRegexpMatches(
+ foo_locator,
+ r'^acbd18db4cc2f85cedef654fccc4a4d8\+3\+A[a-f0-9]+@[a-f0-9]+$',
+ 'invalid locator from Keep.put("foo"): ' + foo_locator)
+ self.assertEqual(arvados.Keep.get(foo_locator),
+ 'foo',
+ 'wrong content from Keep.get(md5("foo"))')
+
+ # With Keep permissions enabled, a GET request without a signature will fail.
+ bar_locator = arvados.Keep.put('bar')
+ self.assertRegexpMatches(
+ bar_locator,
+ r'^37b51d194a7513e45b56f6524f2d51f2\+3\+A[a-f0-9]+@[a-f0-9]+$',
+ 'invalid locator from Keep.put("bar"): ' + bar_locator)
+ self.assertRaises(arvados.errors.NotFoundError,
+ arvados.Keep.get,
+ "37b51d194a7513e45b56f6524f2d51f2")
+
+ # A request without an API token will also fail.
+ del arvados.config.settings()["ARVADOS_API_TOKEN"]
+ self.assertRaises(arvados.errors.NotFoundError,
+ arvados.Keep.get,
+ bar_locator)
+
+# KeepOptionalPermission: starts Keep with --permission-key-file
+# but not --enforce-permissions (i.e. generate signatures on PUT
+# requests, but do not require them for GET requests)
+#
+# All of these requests should succeed when permissions are optional:
+# * authenticated request, signed locator
+# * authenticated request, unsigned locator
+# * unauthenticated request, signed locator
+# * unauthenticated request, unsigned locator
+
+class KeepOptionalPermission(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ try:
+ del os.environ['KEEP_LOCAL_STORE']
+ except KeyError:
+ pass
+ run_test_server.run()
+ run_test_server.run_keep(blob_signing_key='abcdefghijk0123456789',
+ enforce_permissions=False)
+
+ @classmethod
+ def tearDownClass(cls):
+ run_test_server.stop()
+ run_test_server.stop_keep()
+
+ def test_KeepAuthenticatedSignedTest(self):
+ run_test_server.authorize_with('active')
+ signed_locator = arvados.Keep.put('foo')
+ self.assertRegexpMatches(
+ signed_locator,
+ r'^acbd18db4cc2f85cedef654fccc4a4d8\+3\+A[a-f0-9]+@[a-f0-9]+$',
+ 'invalid locator from Keep.put("foo"): ' + signed_locator)
+ self.assertEqual(arvados.Keep.get(signed_locator),
+ 'foo',
+ 'wrong content from Keep.get(md5("foo"))')
+
+ def test_KeepAuthenticatedUnsignedTest(self):
+ run_test_server.authorize_with('active')
+ signed_locator = arvados.Keep.put('foo')
+ self.assertRegexpMatches(
+ signed_locator,
+ r'^acbd18db4cc2f85cedef654fccc4a4d8\+3\+A[a-f0-9]+@[a-f0-9]+$',
+ 'invalid locator from Keep.put("foo"): ' + signed_locator)
+ self.assertEqual(arvados.Keep.get("acbd18db4cc2f85cedef654fccc4a4d8"),
+ 'foo',
+ 'wrong content from Keep.get(md5("foo"))')
+
+ def test_KeepUnauthenticatedSignedTest(self):
+ # Since --enforce-permissions is not in effect, GET requests
+ # need not be authenticated.
+ run_test_server.authorize_with('active')
+ signed_locator = arvados.Keep.put('foo')
+ self.assertRegexpMatches(
+ signed_locator,
+ r'^acbd18db4cc2f85cedef654fccc4a4d8\+3\+A[a-f0-9]+@[a-f0-9]+$',
+ 'invalid locator from Keep.put("foo"): ' + signed_locator)
+
+ del arvados.config.settings()["ARVADOS_API_TOKEN"]
+ self.assertEqual(arvados.Keep.get(signed_locator),
+ 'foo',
+ 'wrong content from Keep.get(md5("foo"))')
+
+ def test_KeepUnauthenticatedUnsignedTest(self):
+ # Since --enforce-permissions is not in effect, GET requests
+ # need not be authenticated.
+ run_test_server.authorize_with('active')
+ signed_locator = arvados.Keep.put('foo')
+ self.assertRegexpMatches(
+ signed_locator,
+ r'^acbd18db4cc2f85cedef654fccc4a4d8\+3\+A[a-f0-9]+@[a-f0-9]+$',
+ 'invalid locator from Keep.put("foo"): ' + signed_locator)
+
+ del arvados.config.settings()["ARVADOS_API_TOKEN"]
+ self.assertEqual(arvados.Keep.get("acbd18db4cc2f85cedef654fccc4a4d8"),
+ 'foo',
+ 'wrong content from Keep.get(md5("foo"))')
+
+
+class KeepProxyTestCase(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ super(KeepProxyTestCase, cls).setUpClass()
+
+ try:
+ del os.environ['KEEP_LOCAL_STORE']
+ except KeyError:
+ pass
+
+ os.environ["ARVADOS_KEEP_PROXY"] = ""
+ os.environ["ARVADOS_EXTERNAL_CLIENT"] = ""
+
+ run_test_server.run()
+ run_test_server.run_keep()
+ arvados.keep.global_client_object = None
+ arvados.config._settings = None
+ run_test_server.run_keep_proxy("admin")
+ KeepProxyTestCase.arvados_keep_proxy = arvados.config.get("ARVADOS_KEEP_PROXY")
+
+ @classmethod
+ def tearDownClass(cls):
+ super(KeepProxyTestCase, cls).tearDownClass()
+ run_test_server.stop()
+ run_test_server.stop_keep()
+ run_test_server.stop_keep_proxy()
+
+ def test_KeepProxyTest1(self):
+ # Will use ARVADOS_KEEP_PROXY environment variable that is set by
+ # run_keep_proxy() in setUpClass()
+
+ os.environ["ARVADOS_KEEP_PROXY"] = KeepProxyTestCase.arvados_keep_proxy
+ os.environ["ARVADOS_EXTERNAL_CLIENT"] = ""
+ arvados.keep.global_client_object = None
+ arvados.config._settings = None
+
+ baz_locator = arvados.Keep.put('baz')
+ self.assertEqual(baz_locator,
+ '73feffa4b7f6bb68e44cf984c85f6e88+3',
+ 'wrong md5 hash from Keep.put("baz"): ' + baz_locator)
+ self.assertEqual(arvados.Keep.get(baz_locator),
+ 'baz',
+ 'wrong content from Keep.get(md5("baz"))')
+
+ self.assertEqual(True, arvados.Keep.global_client_object().using_proxy)
+
+ def test_KeepProxyTest2(self):
+ # We don't want to use ARVADOS_KEEP_PROXY from run_keep_proxy() in
+ # setUpClass(), so clear it and set ARVADOS_EXTERNAL_CLIENT which will
+ # contact the API server.
+ os.environ["ARVADOS_KEEP_PROXY"] = ""
+ os.environ["ARVADOS_EXTERNAL_CLIENT"] = "true"
+ arvados.keep.global_client_object = None
+ arvados.config._settings = None
+
+ # Will send X-External-Client to server and get back the proxy from
+ # keep_services/accessible
+
+ baz_locator = arvados.Keep.put('baz2')
+ self.assertEqual(baz_locator,
+ '91f372a266fe2bf2823cb8ec7fda31ce+4',
+ 'wrong md5 hash from Keep.put("baz2"): ' + baz_locator)
+ self.assertEqual(arvados.Keep.get(baz_locator),
+ 'baz2',
+ 'wrong content from Keep.get(md5("baz2"))')
+
+ self.assertEqual(True, arvados.Keep.global_client_object().using_proxy)
+++ /dev/null
-import unittest
-import arvados
-import arvados.fuse as fuse
-import threading
-import time
-import os
-import llfuse
-import tempfile
-import shutil
-import subprocess
-import glob
-
-class FuseMountTest(unittest.TestCase):
- def setUp(self):
- self.keeptmp = tempfile.mkdtemp()
- os.environ['KEEP_LOCAL_STORE'] = self.keeptmp
-
- cw = arvados.CollectionWriter()
-
- cw.start_new_file('thing1.txt')
- cw.write("data 1")
- cw.start_new_file('thing2.txt')
- cw.write("data 2")
- cw.start_new_stream('dir1')
-
- cw.start_new_file('thing3.txt')
- cw.write("data 3")
- cw.start_new_file('thing4.txt')
- cw.write("data 4")
-
- cw.start_new_stream('dir2')
- cw.start_new_file('thing5.txt')
- cw.write("data 5")
- cw.start_new_file('thing6.txt')
- cw.write("data 6")
-
- cw.start_new_stream('dir2/dir3')
- cw.start_new_file('thing7.txt')
- cw.write("data 7")
-
- cw.start_new_file('thing8.txt')
- cw.write("data 8")
-
- self.testcollection = cw.finish()
-
- def runTest(self):
- # Create the request handler
- operations = fuse.Operations(os.getuid(), os.getgid())
- e = operations.inodes.add_entry(fuse.Directory(llfuse.ROOT_INODE))
- operations.inodes.load_collection(e, arvados.CollectionReader(arvados.Keep.get(self.testcollection)))
-
- self.mounttmp = tempfile.mkdtemp()
-
- llfuse.init(operations, self.mounttmp, [])
- t = threading.Thread(None, lambda: llfuse.main())
- t.start()
-
- # wait until the driver is finished initializing
- operations.initlock.wait()
-
- # now check some stuff
- d1 = os.listdir(self.mounttmp)
- d1.sort()
- self.assertEqual(d1, ['dir1', 'dir2', 'thing1.txt', 'thing2.txt'])
-
- d2 = os.listdir(os.path.join(self.mounttmp, 'dir1'))
- d2.sort()
- self.assertEqual(d2, ['thing3.txt', 'thing4.txt'])
-
- d3 = os.listdir(os.path.join(self.mounttmp, 'dir2'))
- d3.sort()
- self.assertEqual(d3, ['dir3', 'thing5.txt', 'thing6.txt'])
-
- d4 = os.listdir(os.path.join(self.mounttmp, 'dir2/dir3'))
- d4.sort()
- self.assertEqual(d4, ['thing7.txt', 'thing8.txt'])
-
- files = {'thing1.txt': 'data 1',
- 'thing2.txt': 'data 2',
- 'dir1/thing3.txt': 'data 3',
- 'dir1/thing4.txt': 'data 4',
- 'dir2/thing5.txt': 'data 5',
- 'dir2/thing6.txt': 'data 6',
- 'dir2/dir3/thing7.txt': 'data 7',
- 'dir2/dir3/thing8.txt': 'data 8'}
-
- for k, v in files.items():
- with open(os.path.join(self.mounttmp, k)) as f:
- self.assertEqual(f.read(), v)
-
-
- def tearDown(self):
- # llfuse.close is buggy, so use fusermount instead.
- #llfuse.close(unmount=True)
- subprocess.call(["fusermount", "-u", self.mounttmp])
-
- os.rmdir(self.mounttmp)
- shutil.rmtree(self.keeptmp)
-
-class FuseMagicTest(unittest.TestCase):
- def setUp(self):
- self.keeptmp = tempfile.mkdtemp()
- os.environ['KEEP_LOCAL_STORE'] = self.keeptmp
-
- cw = arvados.CollectionWriter()
-
- cw.start_new_file('thing1.txt')
- cw.write("data 1")
-
- self.testcollection = cw.finish()
-
- def runTest(self):
- # Create the request handler
- operations = fuse.Operations(os.getuid(), os.getgid())
- e = operations.inodes.add_entry(fuse.MagicDirectory(llfuse.ROOT_INODE, operations.inodes))
-
- self.mounttmp = tempfile.mkdtemp()
-
- llfuse.init(operations, self.mounttmp, [])
- t = threading.Thread(None, lambda: llfuse.main())
- t.start()
-
- # wait until the driver is finished initializing
- operations.initlock.wait()
-
- # now check some stuff
- d1 = os.listdir(self.mounttmp)
- d1.sort()
- self.assertEqual(d1, [])
-
- d2 = os.listdir(os.path.join(self.mounttmp, self.testcollection))
- d2.sort()
- self.assertEqual(d2, ['thing1.txt'])
-
- d3 = os.listdir(self.mounttmp)
- d3.sort()
- self.assertEqual(d3, [self.testcollection])
-
- files = {}
- files[os.path.join(self.mounttmp, self.testcollection, 'thing1.txt')] = 'data 1'
-
- for k, v in files.items():
- with open(os.path.join(self.mounttmp, k)) as f:
- self.assertEqual(f.read(), v)
-
-
- def tearDown(self):
- # llfuse.close is buggy, so use fusermount instead.
- #llfuse.close(unmount=True)
- subprocess.call(["fusermount", "-u", self.mounttmp])
-
- os.rmdir(self.mounttmp)
- shutil.rmtree(self.keeptmp)
import unittest
import arvados
import apiclient
+import run_test_server
class PipelineTemplateTest(unittest.TestCase):
+ def setUp(self):
+ run_test_server.run()
+
def runTest(self):
- pt_uuid = arvados.api('v1').pipeline_templates().create(
+ run_test_server.authorize_with("admin")
+ pt_uuid = arvados.api('v1', cache=False).pipeline_templates().create(
body={'name':__file__}
).execute()['uuid']
self.assertEqual(len(pt_uuid), 27,
'spass_box': False,
'spass-box': [True, 'Maybe', False]
}
- update_response = arvados.api('v1').pipeline_templates().update(
+ update_response = arvados.api('v1', cache=False).pipeline_templates().update(
uuid=pt_uuid,
body={'components':components}
).execute()
self.assertEqual(update_response['name'], __file__,
'update() response has a different name (%s, not %s)'
% (update_response['name'], __file__))
- get_response = arvados.api('v1').pipeline_templates().get(
+ get_response = arvados.api('v1', cache=False).pipeline_templates().get(
uuid=pt_uuid
).execute()
self.assertEqual(get_response['components'], components,
'components got munged by server (%s -> %s)'
% (components, update_response['components']))
- delete_response = arvados.api('v1').pipeline_templates().delete(
+ delete_response = arvados.api('v1', cache=False).pipeline_templates().delete(
uuid=pt_uuid
).execute()
self.assertEqual(delete_response['uuid'], pt_uuid,
'delete() response has wrong uuid (%s, not %s)'
% (delete_response['uuid'], pt_uuid))
with self.assertRaises(apiclient.errors.HttpError):
- geterror_response = arvados.api('v1').pipeline_templates().get(
+ geterror_response = arvados.api('v1', cache=False).pipeline_templates().get(
uuid=pt_uuid
).execute()
+
+ def tearDown(self):
+ run_test_server.stop()
--- /dev/null
+import run_test_server
+import unittest
+import arvados
+import arvados.events
+import time
+
+class WebsocketTest(unittest.TestCase):
+ def setUp(self):
+ run_test_server.run(websockets=True)
+
+ def on_event(self, ev):
+ if self.state == 1:
+ self.assertEqual(200, ev['status'])
+ self.state = 2
+ elif self.state == 2:
+ self.assertEqual(self.h[u'uuid'], ev[u'object_uuid'])
+ self.state = 3
+ elif self.state == 3:
+ self.fail()
+
+ def runTest(self):
+ self.state = 1
+
+ run_test_server.authorize_with("admin")
+ api = arvados.api('v1', cache=False)
+ arvados.events.subscribe(api, [['object_uuid', 'is_a', 'arvados#human']], lambda ev: self.on_event(ev))
+ time.sleep(1)
+ self.h = api.humans().create(body={}).execute()
+ time.sleep(1)
+
+ def tearDown(self):
+ run_test_server.stop()
+Gemfile.lock
arvados*gem
+++ /dev/null
-PATH
- remote: .
- specs:
- arvados (0.1.20140228213600)
- activesupport (>= 3.2.13)
- andand
- google-api-client (~> 0.6.3)
- json (>= 1.7.7)
-
-GEM
- remote: https://rubygems.org/
- specs:
- activesupport (3.2.17)
- i18n (~> 0.6, >= 0.6.4)
- multi_json (~> 1.0)
- addressable (2.3.5)
- andand (1.3.3)
- autoparse (0.3.3)
- addressable (>= 2.3.1)
- extlib (>= 0.9.15)
- multi_json (>= 1.0.0)
- extlib (0.9.16)
- faraday (0.8.9)
- multipart-post (~> 1.2.0)
- google-api-client (0.6.4)
- addressable (>= 2.3.2)
- autoparse (>= 0.3.3)
- extlib (>= 0.9.15)
- faraday (~> 0.8.4)
- jwt (>= 0.1.5)
- launchy (>= 2.1.1)
- multi_json (>= 1.0.0)
- signet (~> 0.4.5)
- uuidtools (>= 2.1.0)
- i18n (0.6.9)
- json (1.8.1)
- jwt (0.1.11)
- multi_json (>= 1.5)
- launchy (2.4.2)
- addressable (~> 2.3)
- minitest (5.2.2)
- multi_json (1.8.4)
- multipart-post (1.2.0)
- rake (10.1.1)
- signet (0.4.5)
- addressable (>= 2.2.3)
- faraday (~> 0.8.1)
- jwt (>= 0.1.5)
- multi_json (>= 1.0.0)
- uuidtools (2.1.4)
-
-PLATFORMS
- ruby
-
-DEPENDENCIES
- arvados!
- minitest (>= 5.0.0)
- rake
s.email = 'gem-dev@curoverse.com'
s.licenses = ['Apache License, Version 2.0']
s.files = ["lib/arvados.rb"]
+ s.required_ruby_version = '>= 2.1.0'
s.add_dependency('google-api-client', '~> 0.6.3')
s.add_dependency('activesupport', '>= 3.2.13')
s.add_dependency('json', '>= 1.7.7')
end
def self.api_exec(method, parameters={})
api_method = arvados_api.send(api_models_sym).send(method.name.to_sym)
- parameters = parameters.
- merge(:api_token => arvados.config['ARVADOS_API_TOKEN'])
parameters.each do |k,v|
parameters[k] = v.to_json if v.is_a? Array or v.is_a? Hash
end
execute(:api_method => api_method,
:authenticated => false,
:parameters => parameters,
- :body => body)
+ :body => body,
+ :headers => {
+ authorization: 'OAuth2 '+arvados.config['ARVADOS_API_TOKEN']
+ })
resp = JSON.parse result.body, :symbolize_names => true
if resp[:errors]
raise Arvados::TransactionFailedError.new(resp[:errors])
/Capfile*
/config/deploy*
+# SimpleCov reports
+/coverage
+
+# Dev/test SSL certificates
+/self-signed.key
+/self-signed.pem
# gem 'rails', :git => 'git://github.com/rails/rails.git'
group :test, :development do
- gem 'sqlite3'
+ # Note: "require: false" here tells bunder not to automatically
+ # 'require' the packages during application startup. Installation is
+ # still mandatory.
+ gem 'simplecov', '~> 0.7.1', require: false
+ gem 'simplecov-rcov', require: false
end
# This might not be needed in :test and :development, but we load it
addressable (2.3.6)
andand (1.3.3)
arel (3.0.3)
- arvados (0.1.20140414145041)
+ arvados (0.1.20140513131358)
activesupport (>= 3.2.13)
andand
google-api-client (~> 0.6.3)
json (>= 1.7.7)
- arvados-cli (0.1.20140414145041)
+ arvados-cli (0.1.20140513131358)
activesupport (~> 3.2, >= 3.2.13)
andand (~> 1.3, >= 1.3.3)
arvados (~> 0.1.0)
railties (>= 3.0, < 5.0)
thor (>= 0.14, < 2.0)
json (1.8.1)
- jwt (0.1.11)
+ jwt (0.1.13)
multi_json (>= 1.5)
launchy (2.4.2)
addressable (~> 2.3)
mime-types (~> 1.16)
treetop (~> 1.4.8)
mime-types (1.25.1)
- multi_json (1.9.2)
+ multi_json (1.10.0)
multipart-post (1.2.0)
net-scp (1.2.0)
net-ssh (>= 2.6.5)
jwt (~> 0.1.4)
multi_json (~> 1.0)
rack (~> 1.2)
- oj (2.7.3)
+ oj (2.9.0)
omniauth (1.1.1)
hashie (~> 1.2)
rack
faraday (~> 0.8.1)
jwt (>= 0.1.5)
multi_json (>= 1.0.0)
+ simplecov (0.7.1)
+ multi_json (~> 1.0)
+ simplecov-html (~> 0.7.1)
+ simplecov-html (0.7.1)
+ simplecov-rcov (0.2.3)
+ simplecov (>= 0.4.1)
sprockets (2.2.2)
hike (~> 1.2)
multi_json (~> 1.0)
rack (~> 1.0)
tilt (~> 1.1, != 1.3.0)
- sqlite3 (1.3.9)
test_after_commit (0.2.3)
themes_for_rails (0.5.1)
rails (>= 3.0.0)
redis
rvm-capistrano
sass-rails (>= 3.2.0)
- sqlite3
+ simplecov (~> 0.7.1)
+ simplecov-rcov
test_after_commit
themes_for_rails
therubyracer
require File.expand_path('../config/application', __FILE__)
+begin
+ ok = PgPower
+rescue
+ abort "Hm, pg_power is missing. Make sure you use 'bundle exec rake ...'"
+end
+
Server::Application.load_tasks
show
end
- def self._contents_requires_parameters
- _index_requires_parameters.
- merge({
- include_linked: {
- type: 'boolean', required: false, default: false
- },
- })
- end
-
- def contents
- all_objects = []
- all_available = 0
-
- # Trick apply_where_limit_order_params into applying suitable
- # per-table values. *_all are the real ones we'll apply to the
- # aggregate set.
- limit_all = @limit
- offset_all = @offset
- @orders = []
-
- ArvadosModel.descendants.reject(&:abstract_class?).sort_by(&:to_s).
- each do |klass|
- case klass.to_s
- # We might expect klass==Link etc. here, but we would be
- # disappointed: when Rails reloads model classes, we get two
- # distinct classes called Link which do not equal each
- # other. But we can still rely on klass.to_s to be "Link".
- when 'ApiClientAuthorization', 'UserAgreement'
- # Do not want.
- else
- @objects = klass.readable_by(*@read_users)
- cond_sql = "#{klass.table_name}.owner_uuid = ?"
- cond_params = [@object.uuid]
- if params[:include_linked]
- cond_sql += " OR #{klass.table_name}.uuid IN (SELECT head_uuid FROM links WHERE link_class=#{klass.sanitize 'name'} AND links.tail_uuid=#{klass.sanitize @object.uuid})"
- end
- @objects = @objects.where(cond_sql, *cond_params).order("#{klass.table_name}.uuid")
- @limit = limit_all - all_objects.count
- apply_where_limit_order_params
- items_available = @objects.
- except(:limit).except(:offset).
- count(:id, distinct: true)
- all_available += items_available
- @offset = [@offset - items_available, 0].max
-
- all_objects += @objects.to_a
- end
- end
- @objects = all_objects || []
- @links = Link.where('link_class=? and tail_uuid=?'\
- ' and head_uuid in (?)',
- 'name',
- @object.uuid,
- @objects.collect(&:uuid))
- @object_list = {
- :kind => "arvados#objectList",
- :etag => "",
- :self_link => "",
- :links => @links.as_api_response(nil),
- :offset => offset_all,
- :limit => limit_all,
- :items_available => all_available,
- :items => @objects.as_api_response(nil)
- }
- render json: @object_list
- end
-
def catch_redirect_hint
if !current_user
if params.has_key?('redirect_to') then
logger.warn "User #{current_user.andand.uuid} tried to set collection owner_uuid to #{owner_uuid}"
raise ArvadosModel::PermissionDeniedError
end
+
+ # Check permissions on the collection manifest.
+ # If any signature cannot be verified, return 403 Permission denied.
+ perms_ok = true
+ api_token = current_api_client_authorization.andand.api_token
+ signing_opts = {
+ key: Rails.configuration.blob_signing_key,
+ api_token: api_token,
+ ttl: Rails.configuration.blob_signing_ttl,
+ }
+ resource_attrs[:manifest_text].lines.each do |entry|
+ entry.split[1..-1].each do |tok|
+ # TODO(twp): in Phase 4, fail the request if the locator
+ # lacks a permission signature. (see #2755)
+ loc = Locator.parse(tok)
+ if loc and loc.signature
+ if !api_token
+ logger.warn "No API token present; cannot verify signature on #{loc}"
+ perms_ok = false
+ elsif !Blob.verify_signature tok, signing_opts
+ logger.warn "Invalid signature on locator #{loc}"
+ perms_ok = false
+ end
+ end
+ end
+ end
+ unless perms_ok
+ raise ArvadosModel::PermissionDeniedError
+ end
+
+ # Remove any permission signatures from the manifest.
+ resource_attrs[:manifest_text]
+ .gsub!(/ [[:xdigit:]]{32}(\+[[:digit:]]+)?(\+\S+)/) { |word|
+ word.strip!
+ loc = Locator.parse(word)
+ if loc
+ " " + loc.without_signature.to_s
+ else
+ " " + word
+ end
+ }
+
+ # Save the collection with the stripped manifest.
act_as_system_user do
@object = model_class.new resource_attrs.reject { |k,v| k == :owner_uuid }
begin
@object = @existing_object || @object
end
end
-
if @object
link_attrs = {
owner_uuid: owner_uuid,
end
def show
+ if current_api_client_authorization
+ signing_opts = {
+ key: Rails.configuration.blob_signing_key,
+ api_token: current_api_client_authorization.api_token,
+ ttl: Rails.configuration.blob_signing_ttl,
+ }
+ @object[:manifest_text]
+ .gsub!(/ [[:xdigit:]]{32}(\+[[:digit:]]+)?(\+\S+)/) { |word|
+ word.strip!
+ loc = Locator.parse(word)
+ if loc
+ " " + Blob.sign_locator(word, signing_opts)
+ else
+ " " + word
+ end
+ }
+ end
render json: @object.as_api_response(:with_data)
end
end
end
end
-
end
class Arvados::V1::GroupsController < ApplicationController
+
+ def self._contents_requires_parameters
+ _index_requires_parameters.
+ merge({
+ include_linked: {
+ type: 'boolean', required: false, default: false
+ },
+ })
+ end
+
+ def contents
+ all_objects = []
+ all_available = 0
+
+ # Trick apply_where_limit_order_params into applying suitable
+ # per-table values. *_all are the real ones we'll apply to the
+ # aggregate set.
+ limit_all = @limit
+ offset_all = @offset
+ @orders = []
+
+ [Group, Job, PipelineInstance, PipelineTemplate,
+ Human, Specimen, Trait,
+ Collection].each do |klass|
+ @objects = klass.readable_by(*@read_users)
+ cond_sql = "#{klass.table_name}.owner_uuid = ?"
+ cond_params = [@object.uuid]
+ if params[:include_linked]
+ cond_sql += " OR #{klass.table_name}.uuid IN (SELECT head_uuid FROM links WHERE link_class=#{klass.sanitize 'name'} AND links.tail_uuid=#{klass.sanitize @object.uuid})"
+ end
+ @objects = @objects.where(cond_sql, *cond_params).order("#{klass.table_name}.uuid")
+ @limit = limit_all - all_objects.count
+ apply_where_limit_order_params
+ items_available = @objects.
+ except(:limit).except(:offset).
+ count(:id, distinct: true)
+ all_available += items_available
+ @offset = [@offset - items_available, 0].max
+
+ all_objects += @objects.to_a
+ end
+ @objects = all_objects || []
+ @links = Link.where('link_class=? and tail_uuid=?'\
+ ' and head_uuid in (?)',
+ 'name',
+ @object.uuid,
+ @objects.collect(&:uuid))
+ @object_list = {
+ :kind => "arvados#objectList",
+ :etag => "",
+ :self_link => "",
+ :links => @links.as_api_response(nil),
+ :offset => offset_all,
+ :limit => limit_all,
+ :items_available => all_available,
+ :items => @objects.as_api_response(nil)
+ }
+ render json: @object_list
+ end
+
end
end
def queue
+ params[:order] ||= ['priority desc', 'created_at']
+ load_limit_offset_order_params
load_where_param
@where.merge!({
started_at: nil,
cancelled_at: nil,
success: nil
})
- params[:order] ||= ['priority desc', 'created_at']
+ load_filters_param
find_objects_for_index
index
end
--- /dev/null
+class Arvados::V1::KeepServicesController < ApplicationController
+
+ skip_before_filter :find_object_by_uuid, only: :accessible
+ skip_before_filter :render_404_if_no_object, only: :accessible
+
+ def find_objects_for_index
+ # all users can list all keep services
+ @objects = model_class.where('1=1')
+ super
+ end
+
+ def accessible
+ if request.headers['X-External-Client'] == '1'
+ @objects = model_class.where('service_type=?', 'proxy')
+ else
+ @objects = model_class.where('service_type=?', 'disk')
+ end
+ render_list
+ end
+
+end
description: "The API to interact with Arvados.",
documentationLink: "http://doc.arvados.org/api/index.html",
protocol: "rest",
- baseUrl: root_url + "/arvados/v1/",
+ baseUrl: root_url + "arvados/v1/",
basePath: "/arvados/v1/",
rootUrl: root_url,
servicePath: "arvados/v1/",
if Rails.application.config.websocket_address
discovery[:websocketUrl] = Rails.application.config.websocket_address
elsif ENV['ARVADOS_WEBSOCKETS']
- discovery[:websocketUrl] = (root_url.sub /^http/, 'ws') + "/websocket"
+ discovery[:websocketUrl] = (root_url.sub /^http/, 'ws') + "websocket"
end
ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |k|
limit: {
type: "integer",
description: "Maximum number of #{k.to_s.underscore.pluralize} to return.",
- default: 100,
+ default: "100",
format: "int32",
- minimum: 0,
+ minimum: "0",
location: "query",
},
offset: {
type: "integer",
description: "Number of #{k.to_s.underscore.pluralize} to skip before first returned record.",
- default: 0,
+ default: "0",
format: "int32",
- minimum: 0,
+ minimum: "0",
location: "query",
},
filters: {
else
method[:parameters][k] = {}
end
+ if !method[:parameters][k][:default].nil?
+ method[:parameters][k][:default] = 'string'
+ end
method[:parameters][k][:type] ||= 'string'
method[:parameters][k][:description] ||= ''
method[:parameters][k][:location] = (route.segment_keys.include?(k) ? 'path' : 'query')
# omniauth callback method
def create
omniauth = env['omniauth.auth']
- #logger.debug "+++ #{omniauth}"
identity_url_ok = (omniauth['info']['identity_url'].length > 0) rescue false
unless identity_url_ok
# "unauthorized":
Thread.current[:user] = user
- user.save!
+ user.save or raise Exception.new(user.errors.messages)
omniauth.delete('extra')
class ApiClient < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
has_many :api_client_authorizations
end
def scopes_allow_request?(request)
- scopes_allow? [request.method, request.path].join(' ')
+ scopes_allow? [request.request_method, request.path].join(' ')
end
def logged_attributes
-require 'assign_uuid'
+require 'has_uuid'
+
class ArvadosModel < ActiveRecord::Base
self.abstract_class = true
before_save :ensure_ownership_path_leads_to_user
before_destroy :ensure_owner_uuid_is_permitted
before_destroy :ensure_permission_to_destroy
-
before_create :update_modified_by_fields
before_update :maybe_update_modified_by_fields
after_create :log_create
# Note: This only returns permission links. It does not account for
# permissions obtained via user.is_admin or
# user.uuid==object.owner_uuid.
- has_many :permissions, :foreign_key => :head_uuid, :class_name => 'Link', :primary_key => :uuid, :conditions => "link_class = 'permission'"
+ has_many :permissions, :foreign_key => :head_uuid, :class_name => 'Link', :primary_key => :uuid, :conditions => "link_class = 'permission'", dependent: :destroy
class PermissionDeniedError < StandardError
def http_status
self.columns.select { |col| col.name == attr.to_s }.first
end
+ # Return nil if current user is not allowed to see the list of
+ # writers. Otherwise, return a list of user_ and group_uuids with
+ # write permission. (If not returning nil, current_user is always in
+ # the list because can_manage permission is needed to see the list
+ # of writers.)
+ def writable_by
+ unless (owner_uuid == current_user.uuid or
+ current_user.is_admin or
+ current_user.groups_i_can(:manage).index(owner_uuid))
+ return nil
+ end
+ [owner_uuid, current_user.uuid] + permissions.collect do |p|
+ if ['can_write', 'can_manage'].index p.name
+ p.tail_uuid
+ end
+ end.compact.uniq
+ end
+
# Return a query with read permissions restricted to the union of of the
# permissions of the members of users_list, i.e. if something is readable by
# any user in users_list, it will be readable in the query returned by this
def ensure_owner_uuid_is_permitted
raise PermissionDeniedError if !current_user
- self.owner_uuid ||= current_user.uuid
+ if respond_to? :owner_uuid=
+ self.owner_uuid ||= current_user.uuid
+ end
if self.owner_uuid_changed?
if current_user.uuid == self.owner_uuid or
current_user.can? write: self.owner_uuid
def maybe_update_modified_by_fields
update_modified_by_fields if self.changed? or self.new_record?
+ true
end
def update_modified_by_fields
self.modified_at = Time.now
self.modified_by_user_uuid = current_user ? current_user.uuid : nil
self.modified_by_client_uuid = current_api_client ? current_api_client.uuid : nil
+ true
end
def ensure_serialized_attribute_type
class AuthorizedKey < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
before_create :permission_to_set_authorized_user_uuid
class Collection < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
+require 'can_be_an_owner'
+
class Group < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
+ include CanBeAnOwner
+ after_create :invalidate_permissions_cache
+ after_update :maybe_invalidate_permissions_cache
api_accessible :user, extend: :common do |t|
t.add :name
t.add :group_class
t.add :description
+ t.add :writable_by
+ end
+
+ def maybe_invalidate_permissions_cache
+ if uuid_changed? or owner_uuid_changed?
+ # This can change users' permissions on other groups as well as
+ # this one.
+ invalidate_permissions_cache
+ end
+ end
+
+ def invalidate_permissions_cache
+ # Ensure a new group can be accessed by the appropriate users
+ # immediately after being created.
+ User.invalidate_permissions_cache
end
end
class Human < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :properties, Hash
class Job < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :script_parameters, Hash
class JobTask < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :parameters, Hash
class KeepDisk < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
before_validation :ensure_ping_secret
t.add :service_host
t.add :service_port
t.add :service_ssl_flag
+ t.add :keep_service_uuid
end
api_accessible :superuser, :extend => :user do |t|
t.add :ping_secret
@bypass_arvados_authorization = true
self.update_attributes!(o.select { |k,v|
- [:service_host,
- :service_port,
- :service_ssl_flag,
- :bytes_total,
+ [:bytes_total,
:bytes_free,
:is_readable,
:is_writable,
}.merge(last_ping_at: Time.now))
end
+ def service_host
+ KeepService.find_by_uuid(self.keep_service_uuid).andand.service_host
+ end
+
+ def service_port
+ KeepService.find_by_uuid(self.keep_service_uuid).andand.service_port
+ end
+
+ def service_ssl_flag
+ KeepService.find_by_uuid(self.keep_service_uuid).andand.service_ssl_flag
+ end
+
protected
def ensure_ping_secret
--- /dev/null
+class KeepService < ArvadosModel
+ include HasUuid
+ include KindAndEtag
+ include CommonApiTemplate
+
+ api_accessible :user, extend: :common do |t|
+ t.add :service_host
+ t.add :service_port
+ t.add :service_ssl_flag
+ t.add :service_type
+ end
+ api_accessible :superuser, :extend => :user do |t|
+ end
+
+end
class Link < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :properties, Hash
--- /dev/null
+# A Locator is used to parse and manipulate Keep locator strings.
+#
+# Locators obey the following syntax:
+#
+# locator ::= address hint*
+# address ::= digest size-hint
+# digest ::= <32 hexadecimal digits>
+# size-hint ::= "+" [0-9]+
+# hint ::= "+" hint-type hint-content
+# hint-type ::= [A-Z]
+# hint-content ::= [A-Za-z0-9@_-]+
+#
+# Individual hints may have their own required format:
+#
+# sign-hint ::= "+A" <40 lowercase hex digits> "@" sign-timestamp
+# sign-timestamp ::= <8 lowercase hex digits>
+
+class Locator
+ def initialize(hasharg, sizearg, hintarg)
+ @hash = hasharg
+ @size = sizearg
+ @hints = hintarg
+ end
+
+ # Locator.parse returns a Locator object parsed from the string tok.
+ # Returns nil if tok could not be parsed as a valid locator.
+ def self.parse(tok)
+ begin
+ Locator.parse!(tok)
+ rescue ArgumentError => e
+ nil
+ end
+ end
+
+ # Locator.parse! returns a Locator object parsed from the string tok,
+ # raising an ArgumentError if tok cannot be parsed.
+ def self.parse!(tok)
+ m = /^([[:xdigit:]]{32})(\+([[:digit:]]+))?(\+([[:upper:]][[:alnum:]+@_-]*))?$/.match(tok.strip)
+ unless m
+ raise ArgumentError.new "could not parse #{tok}"
+ end
+
+ tokhash, _, toksize, _, trailer = m[1..5]
+ tokhints = []
+ if trailer
+ trailer.split('+').each do |hint|
+ if hint =~ /^[[:upper:]][[:alnum:]@_-]+$/
+ tokhints.push(hint)
+ else
+ raise ArgumentError.new "unknown hint #{hint}"
+ end
+ end
+ end
+
+ Locator.new(tokhash, toksize, tokhints)
+ end
+
+ # Returns the signature hint supplied with this locator,
+ # or nil if the locator was not signed.
+ def signature
+ @hints.grep(/^A/).first
+ end
+
+ # Returns an unsigned Locator.
+ def without_signature
+ Locator.new(@hash, @size, @hints.reject { |o| o.start_with?("A") })
+ end
+
+ def hash
+ @hash
+ end
+
+ def size
+ @size
+ end
+
+ def hints
+ @hints
+ end
+
+ def to_s
+ [ @hash, @size, *@hints ].join('+')
+ end
+end
class Log < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :properties, Hash
class Node < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :info, Hash
class PipelineInstance < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :components, Hash
end
# Supported states for a pipeline instance
- New = 'New'
- Ready = 'Ready'
- RunningOnServer = 'RunningOnServer'
- RunningOnClient = 'RunningOnClient'
- Paused = 'Paused'
- Failed = 'Failed'
- Complete = 'Complete'
+ States =
+ [
+ (New = 'New'),
+ (Ready = 'Ready'),
+ (RunningOnServer = 'RunningOnServer'),
+ (RunningOnClient = 'RunningOnClient'),
+ (Paused = 'Paused'),
+ (Failed = 'Failed'),
+ (Complete = 'Complete'),
+ ]
def dependencies
dependency_search(self.components).keys
end
def self.queue
- self.where('active = true')
+ self.where("state = 'RunningOnServer'")
end
protected
end
def verify_status
- if active_changed?
- if self.active
- self.state = RunningOnServer
- else
- if self.components_look_ready?
- self.state = Ready
- else
- self.state = New
- end
- end
- elsif success_changed?
- if self.success
- self.active = false
- self.state = Complete
- else
- self.active = false
- self.state = Failed
- end
- elsif state_changed?
+ changed_attributes = self.changed
+
+ if 'state'.in? changed_attributes
case self.state
when New, Ready, Paused
- self.active = false
+ self.active = nil
self.success = nil
when RunningOnServer
self.active = true
self.success = nil
when RunningOnClient
- self.active = false
+ self.active = nil
self.success = nil
when Failed
self.active = false
else
return false
end
- elsif components_changed?
- if !self.state || self.state == New || !self.active
- if self.components_look_ready?
+ elsif 'success'.in? changed_attributes
+ logger.info "pipeline_instance changed_attributes has success for #{self.uuid}"
+ if self.success
+ self.active = false
+ self.state = Complete
+ else
+ self.active = false
+ self.state = Failed
+ end
+ elsif 'active'.in? changed_attributes
+ logger.info "pipeline_instance changed_attributes has active for #{self.uuid}"
+ if self.active
+ if self.state.in? [New, Ready, Paused]
+ self.state = RunningOnServer
+ end
+ else
+ if self.state == RunningOnServer # state was RunningOnServer
+ self.active = nil
+ self.state = Paused
+ elsif self.components_look_ready?
self.state = Ready
else
self.state = New
end
end
+ elsif new_record? and self.state.nil?
+ # No state, active, or success given
+ self.state = New
+ end
+
+ if new_record? or 'components'.in? changed_attributes
+ self.state ||= New
+ if self.state == New and self.components_look_ready?
+ self.state = Ready
+ end
+ end
+
+ if self.state.in?(States)
+ true
+ else
+ errors.add :state, "'#{state.inspect} must be one of: [#{States.join ', '}]"
+ false
end
end
def set_state_before_save
- if !self.state || self.state == New
+ if !self.state || self.state == New || self.state == Ready || self.state == Paused
if self.active
self.state = RunningOnServer
- elsif self.components_look_ready?
+ elsif self.components_look_ready? && (!self.state || self.state == New)
self.state = Ready
- else
- self.state = New
end
end
end
class PipelineTemplate < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :components, Hash
api_accessible :user, extend: :common do |t|
t.add :name
t.add :components
+ t.add :description
end
end
class Repository < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
class Specimen < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :properties, Hash
class Trait < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :properties, Hash
+require 'can_be_an_owner'
+
class User < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
+ include CanBeAnOwner
+
serialize :prefs, Hash
has_many :api_client_authorizations
before_update :prevent_privilege_escalation
protected
+ def ensure_ownership_path_leads_to_user
+ true
+ end
+
def permission_to_update
# users must be able to update themselves (even if they are
# inactive) in order to create sessions
perm_exists = false
login_perms.each do |perm|
- if perm.properties[:username] == repo_name
+ if perm.properties['username'] == repo_name
perm_exists = true
break
end
head_uuid: vm[:uuid],
link_class: 'permission',
name: 'can_login',
- properties: {username: repo_name})
+ properties: {'username' => repo_name})
logger.info { "login permission: " + login_perm[:uuid] }
else
login_perm = login_perms.first
class VirtualMachine < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
common:
secret_token: ~
+ blob_signing_key: ~
uuid_prefix: <%= Digest::MD5.hexdigest(`hostname`).to_i(16).to_s(36)[0..4] %>
# Git repositories must be readable by api server, or you won't be
assets.version: "1.0"
arvados_theme: default
+
+ # Default: do not advertise a websocket server.
+ websocket_address: false
+
+ # You can run the websocket server separately from the regular HTTP service
+ # by setting "ARVADOS_WEBSOCKETS=ws-only" in the environment before running
+ # the websocket server. When you do this, you need to set the following
+ # configuration variable so that the primary server can give out the correct
+ # address of the dedicated websocket server:
+ #websocket_address: wss://127.0.0.1:3333/websocket
+
+ # Amount of time (in seconds) for which a blob permission signature
+ # remains valid. Default: 2 weeks (1209600 seconds)
+ blob_signing_ttl: 1209600
# 5. Section in application.default.yml called "common"
development:
+ # The blob_signing_key is a string of alphanumeric characters used
+ # to sign permission hints for Keep locators. It must be identical
+ # to the permission key given to Keep. If you run both apiserver
+ # and Keep in development, change this to a hardcoded string and
+ # make sure both systems use the same value.
+ blob_signing_key: ~
production:
# At minimum, you need a nice long randomly generated secret_token here.
+ # Use a long string of alphanumeric characters (at least 36).
secret_token: ~
+ # blob_signing_key is required and must be identical to the
+ # permission secret provisioned to Keep.
+ # Use a long string of alphanumeric characters (at least 36).
+ blob_signing_key: ~
+
uuid_prefix: bogus
# compute_node_domain: example.org
#git_repositories_dir: /var/cache/git
#git_internal_dir: /var/cache/arvados/internal.git
- # You can run the websocket server separately from the regular HTTP service
- # by setting "ARVADOS_WEBSOCKETS=ws-only" in the environment before running
- # the websocket server. When you do this, you need to set the following
- # configuration variable so that the primary server can give out the correct
- # address of the dedicated websocket server:
- #websocket_address: wss://websocket.local/websocket
+++ /dev/null
-require 'assign_uuid'
require 'eventbus'
+# See application.yml for details about configuring the websocket service.
+
Server::Application.configure do
# Enables websockets if ARVADOS_WEBSOCKETS is defined with any value. If
# ARVADOS_WEBSOCKETS=ws-only, server will only accept websocket connections
:websocket_only => (ENV['ARVADOS_WEBSOCKETS'] == "ws-only")
}
end
-
- # Define websocket_address configuration option, can be overridden in config files.
- # See application.yml.example for details.
- config.websocket_address = nil
end
+++ /dev/null
-# Be sure to restart your server when you modify this file.
-
-# Your secret key for verifying the integrity of signed cookies.
-# If you change this key, all old signed cookies will become invalid!
-# Make sure the secret is at least 30 characters and all random,
-# no regular words or you'll be exposed to dictionary attacks.
-Server::Application.config.secret_token = 'a107d661bc696fd1263e92c76e7e88d8fa44b6a9793e8f56ccfb23f17cfc95ea8894e28ed7dd132a3a6069673961fb1bf32edd7f8a94c8e88d8a7047bfacdde2'
resources :keep_disks do
post 'ping', on: :collection
end
+ resources :keep_services do
+ get 'accessible', on: :collection
+ end
resources :links
resources :logs
resources :nodes do
add_column :pipeline_instances, :components_summary, :text
end
+ PipelineInstance.reset_column_information
+
act_as_system_user do
PipelineInstance.all.each do |pi|
pi.state = PipelineInstance::New
--- /dev/null
+class CreateKeepServices < ActiveRecord::Migration
+ include CurrentApiClient
+
+ def change
+ act_as_system_user do
+ create_table :keep_services do |t|
+ t.string :uuid, :null => false
+ t.string :owner_uuid, :null => false
+ t.string :modified_by_client_uuid
+ t.string :modified_by_user_uuid
+ t.datetime :modified_at
+ t.string :service_host
+ t.integer :service_port
+ t.boolean :service_ssl_flag
+ t.string :service_type
+
+ t.timestamps
+ end
+ add_index :keep_services, :uuid, :unique => true
+
+ add_column :keep_disks, :keep_service_uuid, :string
+
+ KeepDisk.reset_column_information
+
+ services = {}
+
+ KeepDisk.find_each do |k|
+ services["#{k[:service_host]}_#{k[:service_port]}_#{k[:service_ssl_flag]}"] = {
+ service_host: k[:service_host],
+ service_port: k[:service_port],
+ service_ssl_flag: k[:service_ssl_flag],
+ service_type: 'disk',
+ owner_uuid: k[:owner_uuid]
+ }
+ end
+
+ services.each do |k, v|
+ v['uuid'] = KeepService.create(v).uuid
+ end
+
+ KeepDisk.find_each do |k|
+ k.keep_service_uuid = services["#{k[:service_host]}_#{k[:service_port]}_#{k[:service_ssl_flag]}"]['uuid']
+ k.save
+ end
+
+ remove_column :keep_disks, :service_host
+ remove_column :keep_disks, :service_port
+ remove_column :keep_disks, :service_ssl_flag
+ end
+ end
+end
--- /dev/null
+class AddDescriptionToPipelineTemplates < ActiveRecord::Migration
+ def change
+ add_column :pipeline_templates, :description, :text
+ end
+end
#
# It's strongly recommended to check this file into your version control system.
-ActiveRecord::Schema.define(:version => 20140501165548) do
+ActiveRecord::Schema.define(:version => 20140527152921) do
t.datetime "last_ping_at"
t.datetime "created_at", :null => false
t.datetime "updated_at", :null => false
- t.string "service_host"
- t.integer "service_port"
- t.boolean "service_ssl_flag"
+ t.string "keep_service_uuid"
end
add_index "keep_disks", ["filesystem_uuid"], :name => "index_keep_disks_on_filesystem_uuid"
add_index "keep_disks", ["last_ping_at"], :name => "index_keep_disks_on_last_ping_at"
add_index "keep_disks", ["node_uuid"], :name => "index_keep_disks_on_node_uuid"
- add_index "keep_disks", ["service_host", "service_port", "last_ping_at"], :name => "keep_disks_service_host_port_ping_at_index"
add_index "keep_disks", ["uuid"], :name => "index_keep_disks_on_uuid", :unique => true
+ create_table "keep_services", :force => true do |t|
+ t.string "uuid", :null => false
+ t.string "owner_uuid", :null => false
+ t.string "modified_by_client_uuid"
+ t.string "modified_by_user_uuid"
+ t.datetime "modified_at"
+ t.string "service_host"
+ t.integer "service_port"
+ t.boolean "service_ssl_flag"
+ t.string "service_type"
+ t.datetime "created_at", :null => false
+ t.datetime "updated_at", :null => false
+ end
+
+ add_index "keep_services", ["uuid"], :name => "index_keep_services_on_uuid", :unique => true
+
create_table "links", :force => true do |t|
t.string "uuid"
t.string "owner_uuid"
t.string "name"
t.text "components"
t.datetime "updated_at", :null => false
+ t.text "description"
end
add_index "pipeline_templates", ["created_at"], :name => "index_pipeline_templates_on_created_at"
+++ /dev/null
-module AssignUuid
-
- def self.included(base)
- base.extend(ClassMethods)
- base.before_create :assign_uuid
- end
-
- module ClassMethods
- def uuid_prefix
- Digest::MD5.hexdigest(self.to_s).to_i(16).to_s(36)[-5..-1]
- end
- def generate_uuid
- [Server::Application.config.uuid_prefix,
- self.uuid_prefix,
- rand(2**256).to_s(36)[-15..-1]].
- join '-'
- end
- end
-
- protected
-
- def respond_to_uuid?
- self.respond_to? :uuid
- end
-
- def assign_uuid
- return true if !self.respond_to_uuid?
- return true if uuid and current_user and current_user.is_admin
- self.uuid = self.class.generate_uuid
- end
-end
--- /dev/null
+# Protect referential integrity of owner_uuid columns in other tables
+# that can refer to the uuid column in this table.
+
+module CanBeAnOwner
+
+ def self.included(base)
+ # Rails' "has_many" can prevent us from destroying the owner
+ # record when other objects refer to it.
+ ActiveRecord::Base.connection.tables.each do |t|
+ next if t == base.table_name
+ next if t == 'schema_migrations'
+ klass = t.classify.constantize
+ next unless klass and 'owner_uuid'.in?(klass.columns.collect(&:name))
+ base.has_many(t.to_sym,
+ foreign_key: :owner_uuid,
+ primary_key: :uuid,
+ dependent: :restrict)
+ end
+ # We need custom protection for changing an owner's primary
+ # key. (Apart from this restriction, admins are allowed to change
+ # UUIDs.)
+ base.validate :restrict_uuid_change_breaking_associations
+ end
+
+ protected
+
+ def restrict_uuid_change_breaking_associations
+ return true if new_record? or not uuid_changed?
+
+ # Check for objects that have my old uuid listed as their owner.
+ self.class.reflect_on_all_associations(:has_many).each do |assoc|
+ next unless assoc.foreign_key == :owner_uuid
+ if assoc.klass.where(owner_uuid: uuid_was).any?
+ errors.add(:uuid,
+ "cannot be changed on a #{self.class} that owns objects")
+ return false
+ end
+ end
+
+ # if I owned myself before, I'll just continue to own myself with
+ # my new uuid.
+ if owner_uuid == uuid_was
+ self.owner_uuid = uuid
+ end
+ end
+
+end
--- /dev/null
+module HasUuid
+
+ def self.included(base)
+ base.extend(ClassMethods)
+ base.before_create :assign_uuid
+ base.before_destroy :destroy_permission_links
+ base.has_many :links_via_head, class_name: 'Link', foreign_key: :head_uuid, primary_key: :uuid, conditions: "not (link_class = 'permission')", dependent: :restrict
+ base.has_many :links_via_tail, class_name: 'Link', foreign_key: :tail_uuid, primary_key: :uuid, conditions: "not (link_class = 'permission')", dependent: :restrict
+ end
+
+ module ClassMethods
+ def uuid_prefix
+ Digest::MD5.hexdigest(self.to_s).to_i(16).to_s(36)[-5..-1]
+ end
+ def generate_uuid
+ [Server::Application.config.uuid_prefix,
+ self.uuid_prefix,
+ rand(2**256).to_s(36)[-15..-1]].
+ join '-'
+ end
+ end
+
+ protected
+
+ def respond_to_uuid?
+ self.respond_to? :uuid
+ end
+
+ def assign_uuid
+ return true if !self.respond_to_uuid?
+ if (uuid.is_a?(String) and uuid.length>0 and
+ current_user and current_user.is_admin)
+ return true
+ end
+ self.uuid = self.class.generate_uuid
+ end
+
+ def destroy_permission_links
+ Link.destroy_all(['link_class=? and (head_uuid=? or tail_uuid=?)',
+ 'permission', uuid, uuid])
+ end
+end
module RecordFilters
# Input:
- # +filters+ Arvados filters as list of lists.
+ # +filters+ array of conditions, each being [column, operator, operand]
# +ar_table_name+ name of SQL table
#
# Output:
raise ArgumentError.new("Invalid attribute '#{attr}' in filter")
end
case operator.downcase
- when '=', '<', '<=', '>', '>=', 'like'
+ when '=', '<', '<=', '>', '>=', '!=', 'like'
if operand.is_a? String
+ if operator == '!='
+ operator = '<>'
+ end
cond_out << "#{ar_table_name}.#{attr} #{operator} ?"
if (# any operator that operates on value rather than
# representation:
param_out << operand
elsif operand.nil? and operator == '='
cond_out << "#{ar_table_name}.#{attr} is null"
+ elsif operand.nil? and operator == '!='
+ cond_out << "#{ar_table_name}.#{attr} is not null"
else
raise ArgumentError.new("Invalid operand type '#{operand.class}' "\
"for '#{operator}' operator in filters")
end
- when 'in'
+ when 'in', 'not in'
if operand.is_a? Array
- cond_out << "#{ar_table_name}.#{attr} IN (?)"
+ cond_out << "#{ar_table_name}.#{attr} #{operator} (?)"
param_out << operand
+ if operator == 'not in' and not operand.include?(nil)
+ # explicitly allow NULL
+ cond_out[-1] = "(#{cond_out[-1]} OR #{ar_table_name}.#{attr} IS NULL)"
+ end
else
raise ArgumentError.new("Invalid operand type '#{operand.class}' "\
"for '#{operator}' operator in filters")
require File.dirname(__FILE__) + '/../config/environment'
require 'open3'
-$redis ||= Redis.new
-LOG_BUFFER_SIZE = 2**20
+LOG_BUFFER_SIZE = 4096
class Dispatcher
include ApplicationHelper
$stderr.puts "dispatch: job #{job.uuid}"
start_banner = "dispatch: child #{t.pid} start #{Time.now.ctime.to_s}"
$stderr.puts start_banner
- $redis.set job.uuid, start_banner + "\n"
- $redis.publish job.uuid, start_banner
- $redis.publish job.owner_uuid, start_banner
@running[job.uuid] = {
stdin: i,
stderr_buf: '',
started: false,
sent_int: 0,
- job_auth: job_auth
+ job_auth: job_auth,
+ stderr_buf_to_flush: '',
+ stderr_flushed_at: 0
}
i.close
end
lines.each do |line|
$stderr.print "#{job_uuid} ! " unless line.index(job_uuid)
$stderr.puts line
- pub_msg = "#{Time.now.ctime.to_s} #{line.strip}"
- $redis.publish job.owner_uuid, pub_msg
- $redis.publish job_uuid, pub_msg
- $redis.append job_uuid, pub_msg + "\n"
- if LOG_BUFFER_SIZE < $redis.strlen(job_uuid)
- $redis.set(job_uuid,
- $redis
- .getrange(job_uuid, (LOG_BUFFER_SIZE >> 1), -1)
- .sub(/^.*?\n/, ''))
- end
+ pub_msg = "#{Time.now.ctime.to_s} #{line.strip} \n"
+ j[:stderr_buf_to_flush] << pub_msg
+ end
+
+ if (LOG_BUFFER_SIZE < j[:stderr_buf_to_flush].size) || ((j[:stderr_flushed_at]+1) < Time.now.to_i)
+ write_log j
end
end
end
# Ensure every last drop of stdout and stderr is consumed
read_pipes
+ write_log j_done # write any remaining logs
+
if j_done[:stderr_buf] and j_done[:stderr_buf] != ''
$stderr.puts j_done[:stderr_buf] + "\n"
end
j_done[:wait_thr].value
jobrecord = Job.find_by_uuid(job_done.uuid)
- jobrecord.running = false
- jobrecord.finished_at ||= Time.now
- # Don't set 'jobrecord.success = false' because if the job failed to run due to an
- # issue with crunch-job or slurm, we want the job to stay in the queue.
- jobrecord.save!
+ if jobrecord.started_at
+ # Clean up state fields in case crunch-job exited without
+ # putting the job in a suitable "finished" state.
+ jobrecord.running = false
+ jobrecord.finished_at ||= Time.now
+ if jobrecord.success.nil?
+ jobrecord.success = false
+ end
+ jobrecord.save!
+ else
+ # Don't fail the job if crunch-job didn't even get as far as
+ # starting it. If the job failed to run due to an infrastructure
+ # issue with crunch-job or slurm, we want the job to stay in the
+ # queue.
+ end
# Invalidate the per-job auth token
j_done[:job_auth].update_attributes expires_at: Time.now
- $redis.publish job_done.uuid, "end"
-
@running.delete job_done.uuid
end
true
end
end
+
+ # send message to log table. we want these records to be transient
+ def write_log running_job
+ begin
+ if (running_job && running_job[:stderr_buf_to_flush] != '')
+ log = Log.new(object_uuid: running_job[:job].uuid,
+ event_type: 'stderr',
+ owner_uuid: running_job[:job].owner_uuid,
+ properties: {"text" => running_job[:stderr_buf_to_flush]})
+ log.save!
+ running_job[:stderr_buf_to_flush] = ''
+ running_job[:stderr_flushed_at] = Time.now.to_i
+ end
+ rescue
+ running_job[:stderr_buf] = "Failed to write logs \n"
+ running_job[:stderr_buf_to_flush] = ''
+ running_job[:stderr_flushed_at] = Time.now.to_i
+ end
+ end
+
end
# This is how crunch-job child procs know where the "refresh" trigger file is
+++ /dev/null
-#!/usr/bin/env ruby
-
-ENV["RAILS_ENV"] = ARGV[0] || ENV["RAILS_ENV"] || "development"
-
-require File.dirname(__FILE__) + '/../config/boot'
-require File.dirname(__FILE__) + '/../config/environment'
-require 'shellwords'
-
-Commit.import_all
expires_at: 2038-01-01 00:00:00
scopes: []
+active_all_collections:
+ api_client: untrusted
+ user: active
+ api_token: activecollectionsabcdefghijklmnopqrstuvwxyz1234567
+ expires_at: 2038-01-01 00:00:00
+ scopes: ["GET /arvados/v1/collections/", "GET /arvados/v1/keep_disks"]
+
active_userlist:
api_client: untrusted
user: active
user_agreement:
uuid: b519d9cb706a29fc7ea24dbea2f05851+249025
- owner_uuid: qr1hi-tpzed-tpj2ff66551eyym
+ owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2013-12-26T19:22:54Z
modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
foo_file:
uuid: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
- owner_uuid: qr1hi-tpzed-000000000000000
+ owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-02-03T17:22:54Z
modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
bar_file:
uuid: fa7aeb5140e2848d39b416daeef4ffc5+45
- owner_uuid: qr1hi-tpzed-000000000000000
+ owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-02-03T17:22:54Z
modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
baz_file:
uuid: ea10d51bcf88862dbcc36eb292017dfd+45
- owner_uuid: qr1hi-tpzed-000000000000000
+ owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-02-03T17:22:54Z
modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+
+multilevel_collection_1:
+ uuid: 1fd08fc162a5c6413070a8bd0bffc818+150
+ owner_uuid: qr1hi-tpzed-000000000000000
+ created_at: 2014-02-03T17:22:54Z
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ modified_at: 2014-02-03T17:22:54Z
+ updated_at: 2014-02-03T17:22:54Z
+ manifest_text: ". 0:0:file1 0:0:file2 0:0:file3\n./dir1 0:0:file1 0:0:file2 0:0:file3\n./dir1/subdir 0:0:file1 0:0:file2 0:0:file3\n./dir2 0:0:file1 0:0:file2 0:0:file3\n"
+
+multilevel_collection_2:
+ # All of this collection's files are deep in subdirectories.
+ uuid: 80cf6dd2cf079dd13f272ec4245cb4a8+48
+ owner_uuid: qr1hi-tpzed-000000000000000
+ created_at: 2014-02-03T17:22:54Z
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ modified_at: 2014-02-03T17:22:54Z
+ updated_at: 2014-02-03T17:22:54Z
+ manifest_text: "./dir1/sub1 0:0:a 0:0:b\n./dir2/sub2 0:0:c 0:0:d\n"
success: true
output: fa7aeb5140e2848d39b416daeef4ffc5+45
priority: ~
- log: d41d8cd98f00b204e9800998ecf8427e+0
+ log: ea10d51bcf88862dbcc36eb292017dfd+45
is_locked_by_uuid: ~
tasks_summary:
failed: 0
running: 1
done: 0
runtime_constraints: {}
+
+queued:
+ uuid: zzzzz-8i9sb-grx15v5mjnsyxk7
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ cancelled_at: ~
+ cancelled_by_user_uuid: ~
+ cancelled_by_client_uuid: ~
+ started_at: ~
+ finished_at: ~
+ script: foo
+ script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+ script_parameters: {}
+ running: ~
+ success: ~
+ output: ~
+ priority: ~
+ log: ~
+ is_locked_by_uuid: ~
+ tasks_summary: {}
+ runtime_constraints: {}
uuid: zzzzz-penuu-5w2o2t1q5wy7fhn
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
node_uuid: zzzzz-7ekkf-53y36l1lu5ijveb
- service_host: keep0.qr1hi.arvadosapi.com
- service_port: 25107
- service_ssl_flag: false
+ keep_service_uuid: zzzzz-bi6l4-6zhilxar6r8ey90
last_read_at: <%= 1.minute.ago.to_s(:db) %>
last_write_at: <%= 2.minute.ago.to_s(:db) %>
last_ping_at: <%= 3.minute.ago.to_s(:db) %>
uuid: zzzzz-penuu-4kmq58ui07xuftx
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
node_uuid: zzzzz-7ekkf-53y36l1lu5ijveb
- service_host: keep0.qr1hi.arvadosapi.com
- service_port: 25107
- service_ssl_flag: false
+ keep_service_uuid: zzzzz-bi6l4-6zhilxar6r8ey90
last_read_at: <%= 1.minute.ago.to_s(:db) %>
last_write_at: <%= 2.day.ago.to_s(:db) %>
last_ping_at: <%= 3.minute.ago.to_s(:db) %>
uuid: zzzzz-penuu-1ydrih9k2er5j11
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
node_uuid: zzzzz-7ekkf-2z3mc76g2q73aio
- service_host: keep1.qr1hi.arvadosapi.com
- service_port: 25107
- service_ssl_flag: false
+ keep_service_uuid: zzzzz-bi6l4-rsnj3c76ndxb7o0
last_read_at: <%= 1.minute.ago.to_s(:db) %>
last_write_at: <%= 2.minute.ago.to_s(:db) %>
last_ping_at: <%= 3.minute.ago.to_s(:db) %>
--- /dev/null
+keep0:
+ uuid: zzzzz-bi6l4-6zhilxar6r8ey90
+ owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ service_host: keep0.qr1hi.arvadosapi.com
+ service_port: 25107
+ service_ssl_flag: false
+ service_type: disk
+
+keep1:
+ uuid: zzzzz-bi6l4-rsnj3c76ndxb7o0
+ owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ service_host: keep1.qr1hi.arvadosapi.com
+ service_port: 25107
+ service_ssl_flag: false
+ service_type: disk
+
+proxy:
+ uuid: zzzzz-bi6l4-h0a0xwut9qa6g3a
+ owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ service_host: keep.qr1hi.arvadosapi.com
+ service_port: 25333
+ service_ssl_flag: true
+ service_type: proxy
name: "I'm a job in a folder"
properties: {}
+foo_collection_name_in_afolder:
+ uuid: zzzzz-o0j2j-foofoldername12
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ modified_at: 2014-04-21 15:37:48 -0400
+ updated_at: 2014-04-21 15:37:48 -0400
+ tail_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+ head_uuid: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+ link_class: name
+ # This should resemble the default name assigned when a
+ # Collection is added to a Folder.
+ name: "1f4b0bc7583c2a7f9102c395f4ffc5e3+45 added sometime"
+ properties: {}
+
foo_collection_tag:
uuid: zzzzz-o0j2j-eedahfaho8aphiv
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
name: can_manage
head_uuid: zzzzz-j7d0g-cx2al9cqkmsf1hs
properties: {}
+
+multilevel_collection_1_readable_by_active:
+ uuid: zzzzz-o0j2j-dp1d8395ldqw22j
+ owner_uuid: zzzzz-tpzed-000000000000000
+ created_at: 2014-01-24 20:42:26 -0800
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2014-01-24 20:42:26 -0800
+ updated_at: 2014-01-24 20:42:26 -0800
+ tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ link_class: permission
+ name: can_read
+ head_uuid: 1fd08fc162a5c6413070a8bd0bffc818+150
+ properties: {}
id: 1
uuid: zzzzz-xxxxx-pshmckwoma9plh7
object_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+ event_at: <%= 1.minute.ago.to_s(:db) %>
log2: # admin changes repository2, which is owned by active user
id: 2
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
object_uuid: zzzzz-2x53u-382brsig8rp3667 # repository foo
object_owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+ event_at: <%= 2.minute.ago.to_s(:db) %>
log3: # admin changes specimen owned_by_spectator
id: 3
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
object_uuid: zzzzz-2x53u-3b0xxwzlbzxq5yr # specimen owned_by_spectator
object_owner_uuid: zzzzz-tpzed-l1s2piq4t4mps8r # spectator user
+ event_at: <%= 3.minute.ago.to_s(:db) %>
log4: # foo collection added, readable by active through link
id: 4
owner_uuid: zzzzz-tpzed-000000000000000 # system user
object_uuid: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45 # foo file
object_owner_uuid: zzzzz-tpzed-000000000000000 # system user
+ event_at: <%= 4.minute.ago.to_s(:db) %>
log5: # baz collection added, readable by active and spectator through group 'all users' group membership
id: 5
owner_uuid: zzzzz-tpzed-000000000000000 # system user
object_uuid: ea10d51bcf88862dbcc36eb292017dfd+45 # baz file
object_owner_uuid: zzzzz-tpzed-000000000000000 # system user
+ event_at: <%= 5.minute.ago.to_s(:db) %>
new_pipeline:
+ state: New
uuid: zzzzz-d1hrv-f4gneyn6br1xize
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
has_component_with_no_script_parameters:
+ state: Ready
uuid: zzzzz-d1hrv-1xfj6xkicf2muk2
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
components:
script_parameters: {}
has_component_with_empty_script_parameters:
+ state: Ready
uuid: zzzzz-d1hrv-jq16l10gcsnyumo
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
components:
script_parameters:
input:
required: true
- dataclass: collection
+ dataclass: Collection
+ title: "Foo/bar pair"
+ description: "Provide a collection containing at least two files."
part-two:
script: bar
script_version: master
owned_by_active_user:
uuid: zzzzz-j58dm-3zx463qyo0k4xrn
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_at: 2014-04-21 15:37:48 -0400
owned_by_private_group:
uuid: zzzzz-j58dm-5m3qwg45g3nlpu6
owner_uuid: zzzzz-j7d0g-rew6elm53kancon
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_at: 2014-04-21 15:37:48 -0400
owned_by_spectator:
uuid: zzzzz-j58dm-3b0xxwzlbzxq5yr
owner_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_at: 2014-04-21 15:37:48 -0400
in_afolder:
uuid: zzzzz-j58dm-7r18rnd5nzhg5yk
owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_at: 2014-04-21 15:37:48 -0400
in_asubfolder:
uuid: zzzzz-j58dm-c40lddwcqqr1ffs
owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_at: 2014-04-21 15:37:48 -0400
in_afolder_linked_from_asubfolder:
uuid: zzzzz-j58dm-5gid26432uujf79
owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_at: 2014-04-21 15:37:48 -0400
+
+owned_by_afolder_with_no_name_link:
+ uuid: zzzzz-j58dm-ypsjlol9dofwijz
+ owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+ created_at: 2014-05-05 04:11:52 -0400
+ modified_at: 2014-05-05 04:11:52 -0400
assert_equal true, !!found.index('1f4b0bc7583c2a7f9102c395f4ffc5e3+45')
end
+ test "create collection with signed manifest" do
+ authorize_with :active
+ locators = %w(
+ d41d8cd98f00b204e9800998ecf8427e+0
+ acbd18db4cc2f85cedef654fccc4a4d8+3
+ ea10d51bcf88862dbcc36eb292017dfd+45)
+
+ unsigned_manifest = locators.map { |loc|
+ ". " + loc + " 0:0:foo.txt\n"
+ }.join()
+ manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +
+ '+' +
+ unsigned_manifest.length.to_s
+
+ # build a manifest with both signed and unsigned locators.
+ # TODO(twp): in phase 4, all locators will need to be signed, so
+ # this test should break and will need to be rewritten. Issue #2755.
+ signing_opts = {
+ key: Rails.configuration.blob_signing_key,
+ api_token: api_token(:active),
+ }
+ signed_manifest =
+ ". " + locators[0] + " 0:0:foo.txt\n" +
+ ". " + Blob.sign_locator(locators[1], signing_opts) + " 0:0:foo.txt\n" +
+ ". " + Blob.sign_locator(locators[2], signing_opts) + " 0:0:foo.txt\n"
+
+ post :create, {
+ collection: {
+ manifest_text: signed_manifest,
+ uuid: manifest_uuid,
+ }
+ }
+ assert_response :success
+ assert_not_nil assigns(:object)
+ resp = JSON.parse(@response.body)
+ assert_equal manifest_uuid, resp['uuid']
+ assert_equal 48, resp['data_size']
+ # All of the locators in the output must be signed.
+ resp['manifest_text'].lines.each do |entry|
+ m = /([[:xdigit:]]{32}\+\S+)/.match(entry)
+ if m
+ assert Blob.verify_signature m[0], signing_opts
+ end
+ end
+ end
+
+ test "create collection with signed manifest and explicit TTL" do
+ authorize_with :active
+ locators = %w(
+ d41d8cd98f00b204e9800998ecf8427e+0
+ acbd18db4cc2f85cedef654fccc4a4d8+3
+ ea10d51bcf88862dbcc36eb292017dfd+45)
+
+ unsigned_manifest = locators.map { |loc|
+ ". " + loc + " 0:0:foo.txt\n"
+ }.join()
+ manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +
+ '+' +
+ unsigned_manifest.length.to_s
+
+ # build a manifest with both signed and unsigned locators.
+ # TODO(twp): in phase 4, all locators will need to be signed, so
+ # this test should break and will need to be rewritten. Issue #2755.
+ signing_opts = {
+ key: Rails.configuration.blob_signing_key,
+ api_token: api_token(:active),
+ ttl: 3600 # 1 hour
+ }
+ signed_manifest =
+ ". " + locators[0] + " 0:0:foo.txt\n" +
+ ". " + Blob.sign_locator(locators[1], signing_opts) + " 0:0:foo.txt\n" +
+ ". " + Blob.sign_locator(locators[2], signing_opts) + " 0:0:foo.txt\n"
+
+ post :create, {
+ collection: {
+ manifest_text: signed_manifest,
+ uuid: manifest_uuid,
+ }
+ }
+ assert_response :success
+ assert_not_nil assigns(:object)
+ resp = JSON.parse(@response.body)
+ assert_equal manifest_uuid, resp['uuid']
+ assert_equal 48, resp['data_size']
+ # All of the locators in the output must be signed.
+ resp['manifest_text'].lines.each do |entry|
+ m = /([[:xdigit:]]{32}\+\S+)/.match(entry)
+ if m
+ assert Blob.verify_signature m[0], signing_opts
+ end
+ end
+ end
+
+ test "create fails with invalid signature" do
+ authorize_with :active
+ signing_opts = {
+ key: Rails.configuration.blob_signing_key,
+ api_token: api_token(:active),
+ }
+
+ # Generate a locator with a bad signature.
+ unsigned_locator = "d41d8cd98f00b204e9800998ecf8427e+0"
+ bad_locator = unsigned_locator + "+Affffffff@ffffffff"
+ assert !Blob.verify_signature(bad_locator, signing_opts)
+
+ # Creating a collection with this locator should
+ # produce 403 Permission denied.
+ unsigned_manifest = ". #{unsigned_locator} 0:0:foo.txt\n"
+ manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +
+ '+' +
+ unsigned_manifest.length.to_s
+
+ bad_manifest = ". #{bad_locator} 0:0:foo.txt\n"
+ post :create, {
+ collection: {
+ manifest_text: bad_manifest,
+ uuid: manifest_uuid
+ }
+ }
+
+ assert_response 403
+ end
+
+ test "create fails with uuid of signed manifest" do
+ authorize_with :active
+ signing_opts = {
+ key: Rails.configuration.blob_signing_key,
+ api_token: api_token(:active),
+ }
+
+ unsigned_locator = "d41d8cd98f00b204e9800998ecf8427e+0"
+ signed_locator = Blob.sign_locator(unsigned_locator, signing_opts)
+ signed_manifest = ". #{signed_locator} 0:0:foo.txt\n"
+ manifest_uuid = Digest::MD5.hexdigest(signed_manifest) +
+ '+' +
+ signed_manifest.length.to_s
+
+ post :create, {
+ collection: {
+ manifest_text: signed_manifest,
+ uuid: manifest_uuid
+ }
+ }
+
+ assert_response 422
+ end
+
+ test "multiple locators per line" do
+ authorize_with :active
+ locators = %w(
+ d41d8cd98f00b204e9800998ecf8427e+0
+ acbd18db4cc2f85cedef654fccc4a4d8+3
+ ea10d51bcf88862dbcc36eb292017dfd+45)
+
+ manifest_text = [".", *locators, "0:0:foo.txt\n"].join(" ")
+ manifest_uuid = Digest::MD5.hexdigest(manifest_text) +
+ '+' +
+ manifest_text.length.to_s
+
+ post :create, {
+ collection: {
+ manifest_text: manifest_text,
+ uuid: manifest_uuid,
+ }
+ }
+ assert_response :success
+ assert_not_nil assigns(:object)
+ resp = JSON.parse(@response.body)
+ assert_equal manifest_uuid, resp['uuid']
+ assert_equal 48, resp['data_size']
+ assert_equal resp['manifest_text'], manifest_text
+ end
+
+ test "multiple signed locators per line" do
+ authorize_with :active
+ locators = %w(
+ d41d8cd98f00b204e9800998ecf8427e+0
+ acbd18db4cc2f85cedef654fccc4a4d8+3
+ ea10d51bcf88862dbcc36eb292017dfd+45)
+
+ signing_opts = {
+ key: Rails.configuration.blob_signing_key,
+ api_token: api_token(:active),
+ }
+
+ unsigned_manifest = [".", *locators, "0:0:foo.txt\n"].join(" ")
+ manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +
+ '+' +
+ unsigned_manifest.length.to_s
+
+ signed_locators = locators.map { |loc| Blob.sign_locator loc, signing_opts }
+ signed_manifest = [".", *signed_locators, "0:0:foo.txt\n"].join(" ")
+
+ post :create, {
+ collection: {
+ manifest_text: signed_manifest,
+ uuid: manifest_uuid,
+ }
+ }
+ assert_response :success
+ assert_not_nil assigns(:object)
+ resp = JSON.parse(@response.body)
+ assert_equal manifest_uuid, resp['uuid']
+ assert_equal 48, resp['data_size']
+ # All of the locators in the output must be signed.
+ # Each line is of the form "path locator locator ... 0:0:file.txt"
+ # entry.split[1..-2] will yield just the tokens in the middle of the line
+ returned_locator_count = 0
+ resp['manifest_text'].lines.each do |entry|
+ entry.split[1..-2].each do |tok|
+ returned_locator_count += 1
+ assert Blob.verify_signature tok, signing_opts
+ end
+ end
+ assert_equal locators.count, returned_locator_count
+ end
end
--- /dev/null
+require 'test_helper'
+
+class Arvados::V1::FiltersTest < ActionController::TestCase
+ test '"not in" filter passes null values' do
+ @controller = Arvados::V1::GroupsController.new
+ authorize_with :admin
+ get :index, {
+ filters: [ ['group_class', 'not in', ['folder']] ],
+ controller: 'groups',
+ }
+ assert_response :success
+ found = assigns(:objects)
+ assert_includes(found.collect(&:group_class), nil,
+ "'group_class not in ['folder']' filter should pass null")
+ end
+end
end
end
+ test 'get writable_by list for owned group' do
+ authorize_with :active
+ get :show, {
+ id: groups(:afolder).uuid,
+ format: :json
+ }
+ assert_response :success
+ assert_not_nil(json_response['writable_by'],
+ "Should receive uuid list in 'writable_by' field")
+ assert_includes(json_response['writable_by'], users(:active).uuid,
+ "owner should be included in writable_by list")
+ end
+
+ test 'no writable_by list for group with read-only access' do
+ authorize_with :rominiadmin
+ get :show, {
+ id: groups(:testusergroup_admins).uuid,
+ format: :json
+ }
+ assert_response :success
+ assert_nil(json_response['writable_by'],
+ "Should not receive uuid list in 'writable_by' field")
+ end
+
+ test 'get writable_by list by admin user' do
+ authorize_with :admin
+ get :show, {
+ id: groups(:testusergroup_admins).uuid,
+ format: :json
+ }
+ assert_response :success
+ assert_not_nil(json_response['writable_by'],
+ "Should receive uuid list in 'writable_by' field")
+ assert_includes(json_response['writable_by'],
+ users(:admin).uuid,
+ "Current user should be included in 'writable_by' field")
+ end
end
'zzzzz-8i9sb-pshmckwoma9plh7']
end
+ test "search jobs by uuid with 'not in' query" do
+ exclude_uuids = [jobs(:running).uuid,
+ jobs(:running_cancelled).uuid]
+ authorize_with :active
+ get :index, {
+ filters: [['uuid', 'not in', exclude_uuids]]
+ }
+ assert_response :success
+ found = assigns(:objects).collect(&:uuid)
+ assert_not_empty found, "'not in' query returned nothing"
+ assert_empty(found & exclude_uuids,
+ "'not in' query returned uuids I asked not to get")
+ end
+
+ ['=', '!='].each do |operator|
+ [['uuid', 'zzzzz-8i9sb-pshmckwoma9plh7'],
+ ['output', nil]].each do |attr, operand|
+ test "search jobs with #{attr} #{operator} #{operand.inspect} query" do
+ authorize_with :active
+ get :index, {
+ filters: [[attr, operator, operand]]
+ }
+ assert_response :success
+ values = assigns(:objects).collect { |x| x.send(attr) }
+ assert_not_empty values, "query should return non-empty result"
+ if operator == '='
+ assert_empty values - [operand], "query results do not satisfy query"
+ else
+ assert_empty values & [operand], "query results do not satisfy query"
+ end
+ end
+ end
+ end
+
test "search jobs by started_at with < query" do
authorize_with :active
get :index, {
assert_response :success
end
+ [:active, :admin].each do |which_token|
+ test "get job queue as #{which_token} user" do
+ authorize_with which_token
+ get :queue
+ assert_response :success
+ assert_operator 1, :<=, assigns(:objects).count
+ end
+ test "get job queue as #{which_token} user, with a filter" do
+ authorize_with which_token
+ get :queue, { filters: [['script','=','foo']] }
+ assert_response :success
+ assert_equal ['foo'], assigns(:objects).collect(&:script).uniq
+ end
+ end
+
end
authorize_with :admin
post :ping, {
ping_secret: '', # required by discovery doc, but ignored
- service_host: '::1',
- service_port: 55555,
- service_ssl_flag: false,
filesystem_uuid: 'eb1e77a1-db84-4193-b6e6-ca2894f67d5f'
}
assert_response :success
authorize_with :admin
opts = {
ping_secret: '',
- service_host: '::1',
- service_port: 55555,
- service_ssl_flag: false
}
post :ping, opts
assert_response :success
test "refuse to add keep disk without admin token" do
post :ping, {
ping_secret: '',
- service_host: '::1',
- service_port: 55555,
- service_ssl_flag: false
}
assert_response 404
end
assert_response :success
items = JSON.parse(@response.body)['items']
assert_not_equal 0, items.size
+
+ # Check these are still included
+ assert items[0]['service_host']
+ assert items[0]['service_port']
end
# active user sees non-secret attributes of keep disks
end
end
- test "search keep_disks by service_port with >= query" do
- authorize_with :active
- get :index, {
- filters: [['service_port', '>=', 25107]]
- }
- assert_response :success
- assert_equal true, assigns(:objects).any?
- end
-
- test "search keep_disks by service_port with < query" do
- authorize_with :active
- get :index, {
- filters: [['service_port', '<', 25107]]
- }
- assert_response :success
- assert_equal false, assigns(:objects).any?
- end
-
- test "search keep_disks with 'any' operator" do
+ test "search keep_services with 'any' operator" do
authorize_with :active
get :index, {
where: { any: ['contains', 'o2t1q5w'] }
assert_equal true, !!found.index('zzzzz-penuu-5w2o2t1q5wy7fhn')
end
+
end
--- /dev/null
+require 'test_helper'
+
+class Arvados::V1::KeepServicesControllerTest < ActionController::TestCase
+
+ test "search keep_services by service_port with < query" do
+ authorize_with :active
+ get :index, {
+ filters: [['service_port', '<', 25107]]
+ }
+ assert_response :success
+ assert_equal false, assigns(:objects).any?
+ end
+
+ test "search keep_disks by service_port with >= query" do
+ authorize_with :active
+ get :index, {
+ filters: [['service_port', '>=', 25107]]
+ }
+ assert_response :success
+ assert_equal true, assigns(:objects).any?
+ end
+
+end
--- /dev/null
+require 'test_helper'
+
+class KeepProxyTest < ActionDispatch::IntegrationTest
+ test "request keep disks" do
+ get "/arvados/v1/keep_services/accessible", {:format => :json}, auth(:active)
+ assert_response :success
+ services = json_response['items']
+
+ assert_equal 2, services.length
+ assert_equal 'disk', services[0]['service_type']
+ assert_equal 'disk', services[1]['service_type']
+
+ get "/arvados/v1/keep_services/accessible", {:format => :json}, auth(:active).merge({'HTTP_X_EXTERNAL_CLIENT' => '1'})
+ assert_response :success
+ services = json_response['items']
+
+ assert_equal 1, services.length
+
+ assert_equal "zzzzz-bi6l4-h0a0xwut9qa6g3a", services[0]['uuid']
+ assert_equal "keep.qr1hi.arvadosapi.com", services[0]['service_host']
+ assert_equal 25333, services[0]['service_port']
+ assert_equal true, services[0]['service_ssl_flag']
+ assert_equal 'proxy', services[0]['service_type']
+ end
+end
--- /dev/null
+require 'test_helper'
+
+class UserSessionsApiTest < ActionDispatch::IntegrationTest
+ test 'create new user during omniauth callback' do
+ mock = {
+ 'provider' => 'josh_id',
+ 'uid' => 'https://edward.example.com',
+ 'info' => {
+ 'identity_url' => 'https://edward.example.com',
+ 'name' => 'Edward Example',
+ 'first_name' => 'Edward',
+ 'last_name' => 'Example',
+ 'email' => 'edward@example.com',
+ },
+ }
+ client_url = 'https://wb.example.com'
+ post('/auth/josh_id/callback',
+ {return_to: client_url},
+ {'omniauth.auth' => mock})
+ assert_response :redirect, 'Did not redirect to client with token'
+ assert_equal(0, @response.redirect_url.index(client_url),
+ 'Redirected to wrong address after succesful login: was ' +
+ @response.redirect_url + ', expected ' + client_url + '[...]')
+ assert_not_nil(@response.redirect_url.index('api_token='),
+ 'Expected api_token in query string of redirect url ' +
+ @response.redirect_url)
+ end
+end
ws.on :open do |event|
opened = true
if timeout
- EM::Timer.new 3 do
+ EM::Timer.new 4 do
too_long = true
EM.stop_event_loop
end
ENV["RAILS_ENV"] = "test"
+unless ENV["NO_COVERAGE_TEST"]
+ begin
+ require 'simplecov'
+ require 'simplecov-rcov'
+ class SimpleCov::Formatter::MergedFormatter
+ def format(result)
+ SimpleCov::Formatter::HTMLFormatter.new.format(result)
+ SimpleCov::Formatter::RcovFormatter.new.format(result)
+ end
+ end
+ SimpleCov.formatter = SimpleCov::Formatter::MergedFormatter
+ SimpleCov.start do
+ add_filter '/test/'
+ add_filter 'initializers/secret_token'
+ add_filter 'initializers/omniauth'
+ end
+ rescue Exception => e
+ $stderr.puts "SimpleCov unavailable (#{e}). Proceeding without."
+ end
+end
+
require File.expand_path('../../config/environment', __FILE__)
require 'rails/test_help'
--- /dev/null
+require 'test_helper'
+
+class ArvadosModelTest < ActiveSupport::TestCase
+ fixtures :all
+
+ def create_with_attrs attrs
+ a = Specimen.create({material: 'caloric'}.merge(attrs))
+ a if a.valid?
+ end
+
+ test 'non-admin cannot assign uuid' do
+ set_user_from_auth :active_trustedclient
+ want_uuid = Specimen.generate_uuid
+ a = create_with_attrs(uuid: want_uuid)
+ assert_not_equal want_uuid, a.uuid, "Non-admin should not assign uuid."
+ assert a.uuid.length==27, "Auto assigned uuid length is wrong."
+ end
+
+ test 'admin can assign valid uuid' do
+ set_user_from_auth :admin_trustedclient
+ want_uuid = Specimen.generate_uuid
+ a = create_with_attrs(uuid: want_uuid)
+ assert_equal want_uuid, a.uuid, "Admin should assign valid uuid."
+ assert a.uuid.length==27, "Auto assigned uuid length is wrong."
+ end
+
+ test 'admin cannot assign empty uuid' do
+ set_user_from_auth :admin_trustedclient
+ a = create_with_attrs(uuid: "")
+ assert_not_equal "", a.uuid, "Admin should not assign empty uuid."
+ assert a.uuid.length==27, "Auto assigned uuid length is wrong."
+ end
+
+end
--- /dev/null
+require 'test_helper'
+
+class KeepServiceTest < ActiveSupport::TestCase
+ # test "the truth" do
+ # assert true
+ # end
+end
fixtures :all
setup do
- Thread.current[:user] = users(:active)
+ set_user_from_auth :admin_trustedclient
end
test 'name links with the same tail_uuid must be unique' do
assert a.invalid?, "invalid name was accepted as valid?"
end
end
+
+ test "cannot delete an object referenced by links" do
+ ob = Specimen.create
+ link = Link.create(tail_uuid: users(:active).uuid,
+ head_uuid: ob.uuid,
+ link_class: 'test',
+ name: 'test')
+ assert_raises(ActiveRecord::DeleteRestrictionError,
+ "should not delete #{ob.uuid} with link #{link.uuid}") do
+ ob.destroy
+ end
+ end
end
--- /dev/null
+require 'test_helper'
+
+# Test referential integrity: ensure we cannot leave any object
+# without owners by deleting a user or group.
+#
+# "o" is an owner.
+# "i" is an item.
+
+class OwnerTest < ActiveSupport::TestCase
+ fixtures :users, :groups, :specimens
+
+ setup do
+ set_user_from_auth :admin_trustedclient
+ end
+
+ User.all
+ Group.all
+ [User, Group].each do |o_class|
+ test "create object with legit #{o_class} owner" do
+ o = o_class.create
+ i = Specimen.create(owner_uuid: o.uuid)
+ assert i.valid?, "new item should pass validation"
+ assert i.uuid, "new item should have an ID"
+ assert Specimen.where(uuid: i.uuid).any?, "new item should really be in DB"
+ end
+
+ test "create object with non-existent #{o_class} owner" do
+ assert_raises(ActiveRecord::RecordInvalid,
+ "create should fail with random owner_uuid") do
+ i = Specimen.create!(owner_uuid: o_class.generate_uuid)
+ end
+
+ i = Specimen.create(owner_uuid: o_class.generate_uuid)
+ assert !i.valid?, "object with random owner_uuid should not be valid?"
+
+ i = Specimen.new(owner_uuid: o_class.generate_uuid)
+ assert !i.valid?, "new item should not pass validation"
+ assert !i.uuid, "new item should not have an ID"
+ end
+
+ [User, Group].each do |new_o_class|
+ test "change owner from legit #{o_class} to legit #{new_o_class} owner" do
+ o = o_class.create
+ i = Specimen.create(owner_uuid: o.uuid)
+ new_o = new_o_class.create
+ assert(Specimen.where(uuid: i.uuid).any?,
+ "new item should really be in DB")
+ assert(i.update_attributes(owner_uuid: new_o.uuid),
+ "should change owner_uuid from #{o.uuid} to #{new_o.uuid}")
+ end
+ end
+
+ test "delete #{o_class} that owns nothing" do
+ o = o_class.create
+ assert(o_class.where(uuid: o.uuid).any?,
+ "new #{o_class} should really be in DB")
+ assert(o.destroy, "should delete #{o_class} that owns nothing")
+ assert_equal(false, o_class.where(uuid: o.uuid).any?,
+ "#{o.uuid} should not be in DB after deleting")
+ end
+
+ test "change uuid of #{o_class} that owns nothing" do
+ # (we're relying on our admin credentials here)
+ o = o_class.create
+ assert(o_class.where(uuid: o.uuid).any?,
+ "new #{o_class} should really be in DB")
+ old_uuid = o.uuid
+ new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])
+ assert(o.update_attributes(uuid: new_uuid),
+ "should change #{o_class} uuid from #{old_uuid} to #{new_uuid}")
+ assert_equal(false, o_class.where(uuid: old_uuid).any?,
+ "#{old_uuid} should disappear when renamed to #{new_uuid}")
+ end
+ end
+
+ ['users(:active)', 'groups(:afolder)'].each do |ofixt|
+ test "delete #{ofixt} that owns other objects" do
+ o = eval ofixt
+ assert_equal(true, Specimen.where(owner_uuid: o.uuid).any?,
+ "need something to be owned by #{o.uuid} for this test")
+
+ assert_raises(ActiveRecord::DeleteRestrictionError,
+ "should not delete #{ofixt} that owns objects") do
+ o.destroy
+ end
+ end
+
+ test "change uuid of #{ofixt} that owns other objects" do
+ o = eval ofixt
+ assert_equal(true, Specimen.where(owner_uuid: o.uuid).any?,
+ "need something to be owned by #{o.uuid} for this test")
+ old_uuid = o.uuid
+ new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])
+ assert(!o.update_attributes(uuid: new_uuid),
+ "should not change uuid of #{ofixt} that owns objects")
+ end
+ end
+
+ test "delete User that owns self" do
+ o = User.create
+ assert User.where(uuid: o.uuid).any?, "new User should really be in DB"
+ assert_equal(true, o.update_attributes(owner_uuid: o.uuid),
+ "setting owner to self should work")
+ assert(o.destroy, "should delete User that owns self")
+ assert_equal(false, User.where(uuid: o.uuid).any?,
+ "#{o.uuid} should not be in DB after deleting")
+ end
+
+ test "change uuid of User that owns self" do
+ o = User.create
+ assert User.where(uuid: o.uuid).any?, "new User should really be in DB"
+ assert_equal(true, o.update_attributes(owner_uuid: o.uuid),
+ "setting owner to self should work")
+ old_uuid = o.uuid
+ new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])
+ assert(o.update_attributes(uuid: new_uuid),
+ "should change uuid of User that owns self")
+ assert_equal(false, User.where(uuid: old_uuid).any?,
+ "#{old_uuid} should not be in DB after deleting")
+ assert_equal(true, User.where(uuid: new_uuid).any?,
+ "#{new_uuid} should be in DB after renaming")
+ assert_equal(new_uuid, User.where(uuid: new_uuid).first.owner_uuid,
+ "#{new_uuid} should be its own owner in DB after renaming")
+ end
+
+end
test "check active and success for a pipeline in new state" do
pi = pipeline_instances :new_pipeline
- assert !pi.active, 'expected active to be false for a new pipeline'
- assert !pi.success, 'expected success to be false for a new pipeline'
- assert !pi.state, 'expected state to be nil because the fixture had no state specified'
+ assert !pi.active, 'expected active to be false for :new_pipeline'
+ assert !pi.success, 'expected success to be false for :new_pipeline'
+ assert_equal 'New', pi.state, 'expected state to be New for :new_pipeline'
# save the pipeline and expect state to be New
Thread.current[:user] = users(:admin)
assert !pi.success, 'expected success to be false for a new pipeline'
end
+ test "check active and success for a newly created pipeline" do
+ set_user_from_auth :active
+
+ pi = PipelineInstance.create(state: 'Ready')
+ pi.save
+
+ assert pi.valid?, 'expected newly created empty pipeline to be valid ' + pi.errors.messages.to_s
+ assert !pi.active, 'expected active to be false for a new pipeline'
+ assert !pi.success, 'expected success to be false for a new pipeline'
+ assert_equal 'Ready', pi.state, 'expected state to be Ready for a new empty pipeline'
+ end
+
test "update attributes for pipeline" do
Thread.current[:user] = users(:admin)
assert !pi.success, 'expected success to be false for a new pipeline'
pi.active = true
- pi.save
+ assert_equal true, pi.save, 'expected pipeline instance to save, but ' + pi.errors.messages.to_s
pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
assert_equal PipelineInstance::RunningOnServer, pi.state, 'expected state to be RunningOnServer after updating active to true'
assert pi.active, 'expected active to be true after update'
Thread.current[:user] = users(:active)
# Make sure we go through the "active_changed? and active" code:
- pi.update_attributes active: true
- pi.update_attributes active: false
- assert_equal PipelineInstance::Ready, pi.state
+ assert_equal true, pi.update_attributes(active: true), pi.errors.messages
+ assert_equal true, pi.update_attributes(active: false), pi.errors.messages
+ assert_equal PipelineInstance::Paused, pi.state
end
end
end
--- /dev/null
+#! /usr/bin/env python
+
+import arvados
+
+import argparse
+import cgi
+import csv
+import json
+import logging
+import math
+import pprint
+import re
+import threading
+import urllib2
+
+from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
+from collections import defaultdict, Counter
+from functools import partial
+from operator import itemgetter
+from SocketServer import ThreadingMixIn
+
+arv = arvados.api('v1')
+
+# Adapted from http://stackoverflow.com/questions/4180980/formatting-data-quantity-capacity-as-string
+byteunits = ('B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB')
+def fileSizeFormat(value):
+ exponent = 0 if value == 0 else int(math.log(value, 1024))
+ return "%7.2f %-3s" % (float(value) / pow(1024, exponent),
+ byteunits[exponent])
+
+def percentageFloor(x):
+ """ Returns a float which is the input rounded down to the neared 0.01.
+
+e.g. precentageFloor(0.941354) = 0.94
+"""
+ return math.floor(x*100) / 100.0
+
+
+def byteSizeFromValidUuid(valid_uuid):
+ return int(valid_uuid.split('+')[1])
+
+class maxdict(dict):
+ """A dictionary that holds the largest value entered for each key."""
+ def addValue(self, key, value):
+ dict.__setitem__(self, key, max(dict.get(self, key), value))
+ def addValues(self, kv_pairs):
+ for key,value in kv_pairs:
+ self.addValue(key, value)
+ def addDict(self, d):
+ self.addValues(d.items())
+
+class CollectionInfo:
+ DEFAULT_PERSISTER_REPLICATION_LEVEL=2
+ all_by_uuid = {}
+
+ def __init__(self, uuid):
+ if CollectionInfo.all_by_uuid.has_key(uuid):
+ raise ValueError('Collection for uuid "%s" already exists.' % uuid)
+ self.uuid = uuid
+ self.block_uuids = set() # uuids of keep blocks in this collection
+ self.reader_uuids = set() # uuids of users who can read this collection
+ self.persister_uuids = set() # uuids of users who want this collection saved
+ # map from user uuid to replication level they desire
+ self.persister_replication = maxdict()
+
+ # The whole api response in case we need anything else later.
+ self.api_response = []
+ CollectionInfo.all_by_uuid[uuid] = self
+
+ def byteSize(self):
+ return sum(map(byteSizeFromValidUuid, self.block_uuids))
+
+ def __str__(self):
+ return ('CollectionInfo uuid: %s\n'
+ ' %d block(s) containing %s\n'
+ ' reader_uuids: %s\n'
+ ' persister_replication: %s' %
+ (self.uuid,
+ len(self.block_uuids),
+ fileSizeFormat(self.byteSize()),
+ pprint.pformat(self.reader_uuids, indent = 15),
+ pprint.pformat(self.persister_replication, indent = 15)))
+
+ @staticmethod
+ def get(uuid):
+ if not CollectionInfo.all_by_uuid.has_key(uuid):
+ CollectionInfo(uuid)
+ return CollectionInfo.all_by_uuid[uuid]
+
+
+def extractUuid(candidate):
+ """ Returns a canonical (hash+size) uuid from a valid uuid, or None if candidate is not a valid uuid."""
+ match = re.match('([0-9a-fA-F]{32}\+[0-9]+)(\+[^+]+)*$', candidate)
+ return match and match.group(1)
+
+def checkUserIsAdmin():
+ current_user = arv.users().current().execute()
+
+ if not current_user['is_admin']:
+ log.warning('Current user %s (%s - %s) does not have '
+ 'admin access and will not see much of the data.',
+ current_user['full_name'],
+ current_user['email'],
+ current_user['uuid'])
+ if args.require_admin_user:
+ log.critical('Exiting, rerun with --no-require-admin-user '
+ 'if you wish to continue.')
+ exit(1)
+
+def buildCollectionsList():
+ if args.uuid:
+ return [args.uuid,]
+ else:
+ collections_list_response = arv.collections().list(limit=args.max_api_results).execute()
+
+ print ('Returned %d of %d collections.' %
+ (len(collections_list_response['items']),
+ collections_list_response['items_available']))
+
+ return [item['uuid'] for item in collections_list_response['items']]
+
+
+def readCollections(collection_uuids):
+ for collection_uuid in collection_uuids:
+ collection_block_uuids = set()
+ collection_response = arv.collections().get(uuid=collection_uuid).execute()
+ collection_info = CollectionInfo.get(collection_uuid)
+ collection_info.api_response = collection_response
+ manifest_lines = collection_response['manifest_text'].split('\n')
+
+ if args.verbose:
+ print 'Manifest text for %s:' % collection_uuid
+ pprint.pprint(manifest_lines)
+
+ for manifest_line in manifest_lines:
+ if manifest_line:
+ manifest_tokens = manifest_line.split(' ')
+ if args.verbose:
+ print 'manifest tokens: ' + pprint.pformat(manifest_tokens)
+ stream_name = manifest_tokens[0]
+
+ line_block_uuids = set(filter(None,
+ [extractUuid(candidate)
+ for candidate in manifest_tokens[1:]]))
+ collection_info.block_uuids.update(line_block_uuids)
+
+ # file_tokens = [token
+ # for token in manifest_tokens[1:]
+ # if extractUuid(token) is None]
+
+ # # Sort file tokens by start position in case they aren't already
+ # file_tokens.sort(key=lambda file_token: int(file_token.split(':')[0]))
+
+ # if args.verbose:
+ # print 'line_block_uuids: ' + pprint.pformat(line_block_uuids)
+ # print 'file_tokens: ' + pprint.pformat(file_tokens)
+
+
+def readLinks():
+ link_classes = set()
+
+ for collection_uuid,collection_info in CollectionInfo.all_by_uuid.items():
+ # TODO(misha): We may not be seing all the links, but since items
+ # available does not return an accurate number, I don't knos how
+ # to confirm that we saw all of them.
+ collection_links_response = arv.links().list(where={'head_uuid':collection_uuid}).execute()
+ link_classes.update([link['link_class'] for link in collection_links_response['items']])
+ for link in collection_links_response['items']:
+ if link['link_class'] == 'permission':
+ collection_info.reader_uuids.add(link['tail_uuid'])
+ elif link['link_class'] == 'resources':
+ replication_level = link['properties'].get(
+ 'replication',
+ CollectionInfo.DEFAULT_PERSISTER_REPLICATION_LEVEL)
+ collection_info.persister_replication.addValue(
+ link['tail_uuid'],
+ replication_level)
+ collection_info.persister_uuids.add(link['tail_uuid'])
+
+ print 'Found the following link classes:'
+ pprint.pprint(link_classes)
+
+def reportMostPopularCollections():
+ most_popular_collections = sorted(
+ CollectionInfo.all_by_uuid.values(),
+ key=lambda info: len(info.reader_uuids) + 10 * len(info.persister_replication),
+ reverse=True)[:10]
+
+ print 'Most popular Collections:'
+ for collection_info in most_popular_collections:
+ print collection_info
+
+
+def buildMaps():
+ for collection_uuid,collection_info in CollectionInfo.all_by_uuid.items():
+ # Add the block holding the manifest itself for all calculations
+ block_uuids = collection_info.block_uuids.union([collection_uuid,])
+ for block_uuid in block_uuids:
+ block_to_collections[block_uuid].add(collection_uuid)
+ block_to_readers[block_uuid].update(collection_info.reader_uuids)
+ block_to_persisters[block_uuid].update(collection_info.persister_uuids)
+ block_to_persister_replication[block_uuid].addDict(
+ collection_info.persister_replication)
+ for reader_uuid in collection_info.reader_uuids:
+ reader_to_collections[reader_uuid].add(collection_uuid)
+ reader_to_blocks[reader_uuid].update(block_uuids)
+ for persister_uuid in collection_info.persister_uuids:
+ persister_to_collections[persister_uuid].add(collection_uuid)
+ persister_to_blocks[persister_uuid].update(block_uuids)
+
+
+def itemsByValueLength(original):
+ return sorted(original.items(),
+ key=lambda item:len(item[1]),
+ reverse=True)
+
+
+def reportBusiestUsers():
+ busiest_readers = itemsByValueLength(reader_to_collections)
+ print 'The busiest readers are:'
+ for reader,collections in busiest_readers:
+ print '%s reading %d collections.' % (reader, len(collections))
+ busiest_persisters = itemsByValueLength(persister_to_collections)
+ print 'The busiest persisters are:'
+ for persister,collections in busiest_persisters:
+ print '%s reading %d collections.' % (persister, len(collections))
+
+
+def blockDiskUsage(block_uuid):
+ """Returns the disk usage of a block given its uuid.
+
+ Will return 0 before reading the contents of the keep servers.
+ """
+ return byteSizeFromValidUuid(block_uuid) * block_to_replication[block_uuid]
+
+def blockPersistedUsage(user_uuid, block_uuid):
+ return (byteSizeFromValidUuid(block_uuid) *
+ block_to_persister_replication[block_uuid].get(user_uuid, 0))
+
+memo_computeWeightedReplicationCosts = {}
+def computeWeightedReplicationCosts(replication_levels):
+ """Computes the relative cost of varied replication levels.
+
+ replication_levels: a tuple of integers representing the desired
+ replication level. If n users want a replication level of x then x
+ should appear n times in replication_levels.
+
+ Returns a dictionary from replication level to cost.
+
+ The basic thinking is that the cost of replicating at level x should
+ be shared by everyone who wants replication of level x or higher.
+
+ For example, if we have two users who want 1 copy, one user who
+ wants 3 copies and two users who want 6 copies:
+ the input would be [1, 1, 3, 6, 6] (or any permutation)
+
+ The cost of the first copy is shared by all 5 users, so they each
+ pay 1 copy / 5 users = 0.2.
+ The cost of the second and third copies shared by 3 users, so they
+ each pay 2 copies / 3 users = 0.67 (plus the above costs)
+ The cost of the fourth, fifth and sixth copies is shared by two
+ users, so they each pay 3 copies / 2 users = 1.5 (plus the above costs)
+
+ Here are some other examples:
+ computeWeightedReplicationCosts([1,]) -> {1:1.0}
+ computeWeightedReplicationCosts([2,]) -> {2:2.0}
+ computeWeightedReplicationCosts([1,1]) -> {1:0.5}
+ computeWeightedReplicationCosts([2,2]) -> {1:1.0}
+ computeWeightedReplicationCosts([1,2]) -> {1:0.5,2:1.5}
+ computeWeightedReplicationCosts([1,3]) -> {1:0.5,2:2.5}
+ computeWeightedReplicationCosts([1,3,6,6,10]) -> {1:0.2,3:0.7,6:1.7,10:5.7}
+ """
+ replication_level_counts = sorted(Counter(replication_levels).items())
+
+ memo_key = str(replication_level_counts)
+
+ if not memo_key in memo_computeWeightedReplicationCosts:
+ last_level = 0
+ current_cost = 0
+ total_interested = float(sum(map(itemgetter(1), replication_level_counts)))
+ cost_for_level = {}
+ for replication_level, count in replication_level_counts:
+ copies_added = replication_level - last_level
+ # compute marginal cost from last level and add it to the last cost
+ current_cost += copies_added / total_interested
+ cost_for_level[replication_level] = current_cost
+ # update invariants
+ last_level = replication_level
+ total_interested -= count
+ memo_computeWeightedReplicationCosts[memo_key] = cost_for_level
+
+ return memo_computeWeightedReplicationCosts[memo_key]
+
+def blockPersistedWeightedUsage(user_uuid, block_uuid):
+ persister_replication_for_block = block_to_persister_replication[block_uuid]
+ user_replication = persister_replication_for_block[user_uuid]
+ return (
+ byteSizeFromValidUuid(block_uuid) *
+ computeWeightedReplicationCosts(
+ persister_replication_for_block.values())[user_replication])
+
+
+def computeUserStorageUsage():
+ for user, blocks in reader_to_blocks.items():
+ user_to_usage[user][UNWEIGHTED_READ_SIZE_COL] = sum(map(
+ byteSizeFromValidUuid,
+ blocks))
+ user_to_usage[user][WEIGHTED_READ_SIZE_COL] = sum(map(
+ lambda block_uuid:(float(byteSizeFromValidUuid(block_uuid))/
+ len(block_to_readers[block_uuid])),
+ blocks))
+ for user, blocks in persister_to_blocks.items():
+ user_to_usage[user][UNWEIGHTED_PERSIST_SIZE_COL] = sum(map(
+ partial(blockPersistedUsage, user),
+ blocks))
+ user_to_usage[user][WEIGHTED_PERSIST_SIZE_COL] = sum(map(
+ partial(blockPersistedWeightedUsage, user),
+ blocks))
+
+def printUserStorageUsage():
+ print ('user: unweighted readable block size, weighted readable block size, '
+ 'unweighted persisted block size, weighted persisted block size:')
+ for user, usage in user_to_usage.items():
+ print ('%s: %s %s %s %s' %
+ (user,
+ fileSizeFormat(usage[UNWEIGHTED_READ_SIZE_COL]),
+ fileSizeFormat(usage[WEIGHTED_READ_SIZE_COL]),
+ fileSizeFormat(usage[UNWEIGHTED_PERSIST_SIZE_COL]),
+ fileSizeFormat(usage[WEIGHTED_PERSIST_SIZE_COL])))
+
+def logUserStorageUsage():
+ for user, usage in user_to_usage.items():
+ body = {}
+ # user could actually represent a user or a group. We don't set
+ # the object_type field since we don't know which we have.
+ body['object_uuid'] = user
+ body['event_type'] = args.user_storage_log_event_type
+ properties = {}
+ properties['read_collections_total_bytes'] = usage[UNWEIGHTED_READ_SIZE_COL]
+ properties['read_collections_weighted_bytes'] = (
+ usage[WEIGHTED_READ_SIZE_COL])
+ properties['persisted_collections_total_bytes'] = (
+ usage[UNWEIGHTED_PERSIST_SIZE_COL])
+ properties['persisted_collections_weighted_bytes'] = (
+ usage[WEIGHTED_PERSIST_SIZE_COL])
+ body['properties'] = properties
+ # TODO(misha): Confirm that this will throw an exception if it
+ # fails to create the log entry.
+ arv.logs().create(body=body).execute()
+
+def getKeepServers():
+ response = arv.keep_disks().list().execute()
+ return [[keep_server['service_host'], keep_server['service_port']]
+ for keep_server in response['items']]
+
+
+def getKeepBlocks(keep_servers):
+ blocks = []
+ for host,port in keep_servers:
+ response = urllib2.urlopen('http://%s:%d/index' % (host, port))
+ server_blocks = [line.split(' ')
+ for line in response.read().split('\n')
+ if line]
+ server_blocks = [(block_id, int(mtime))
+ for block_id, mtime in server_blocks]
+ blocks.append(server_blocks)
+ return blocks
+
+def getKeepStats(keep_servers):
+ MOUNT_COLUMN = 5
+ TOTAL_COLUMN = 1
+ FREE_COLUMN = 3
+ DISK_BLOCK_SIZE = 1024
+ stats = []
+ for host,port in keep_servers:
+ response = urllib2.urlopen('http://%s:%d/status.json' % (host, port))
+
+ parsed_json = json.load(response)
+ df_entries = [line.split()
+ for line in parsed_json['df'].split('\n')
+ if line]
+ keep_volumes = [columns
+ for columns in df_entries
+ if 'keep' in columns[MOUNT_COLUMN]]
+ total_space = DISK_BLOCK_SIZE*sum(map(int,map(itemgetter(TOTAL_COLUMN),
+ keep_volumes)))
+ free_space = DISK_BLOCK_SIZE*sum(map(int,map(itemgetter(FREE_COLUMN),
+ keep_volumes)))
+ stats.append([total_space, free_space])
+ return stats
+
+
+def computeReplication(keep_blocks):
+ for server_blocks in keep_blocks:
+ for block_uuid, _ in server_blocks:
+ block_to_replication[block_uuid] += 1
+ log.debug('Seeing the following replication levels among blocks: %s',
+ str(set(block_to_replication.values())))
+
+
+def computeGarbageCollectionCandidates():
+ for server_blocks in keep_blocks:
+ block_to_latest_mtime.addValues(server_blocks)
+ empty_set = set()
+ garbage_collection_priority = sorted(
+ [(block,mtime)
+ for block,mtime in block_to_latest_mtime.items()
+ if len(block_to_persisters.get(block,empty_set)) == 0],
+ key = itemgetter(1))
+ global garbage_collection_report
+ garbage_collection_report = []
+ cumulative_disk_size = 0
+ for block,mtime in garbage_collection_priority:
+ disk_size = blockDiskUsage(block)
+ cumulative_disk_size += disk_size
+ garbage_collection_report.append(
+ (block,
+ mtime,
+ disk_size,
+ cumulative_disk_size,
+ float(free_keep_space + cumulative_disk_size)/total_keep_space))
+
+ print 'The oldest Garbage Collection Candidates: '
+ pprint.pprint(garbage_collection_report[:20])
+
+
+def outputGarbageCollectionReport(filename):
+ with open(filename, 'wb') as csvfile:
+ gcwriter = csv.writer(csvfile)
+ gcwriter.writerow(['block uuid', 'latest mtime', 'disk size',
+ 'cumulative size', 'disk free'])
+ for line in garbage_collection_report:
+ gcwriter.writerow(line)
+
+def computeGarbageCollectionHistogram():
+ # TODO(misha): Modify this to allow users to specify the number of
+ # histogram buckets through a flag.
+ histogram = []
+ last_percentage = -1
+ for _,mtime,_,_,disk_free in garbage_collection_report:
+ curr_percentage = percentageFloor(disk_free)
+ if curr_percentage > last_percentage:
+ histogram.append( (mtime, curr_percentage) )
+ last_percentage = curr_percentage
+
+ log.info('Garbage collection histogram is: %s', histogram)
+
+ return histogram
+
+
+def logGarbageCollectionHistogram():
+ body = {}
+ # TODO(misha): Decide whether we should specify an object_uuid in
+ # the body and if so, which uuid to use.
+ body['event_type'] = args.block_age_free_space_histogram_log_event_type
+ properties = {}
+ properties['histogram'] = garbage_collection_histogram
+ body['properties'] = properties
+ # TODO(misha): Confirm that this will throw an exception if it
+ # fails to create the log entry.
+ arv.logs().create(body=body).execute()
+
+
+def detectReplicationProblems():
+ blocks_not_in_any_collections.update(
+ set(block_to_replication.keys()).difference(block_to_collections.keys()))
+ underreplicated_persisted_blocks.update(
+ [uuid
+ for uuid, persister_replication in block_to_persister_replication.items()
+ if len(persister_replication) > 0 and
+ block_to_replication[uuid] < max(persister_replication.values())])
+ overreplicated_persisted_blocks.update(
+ [uuid
+ for uuid, persister_replication in block_to_persister_replication.items()
+ if len(persister_replication) > 0 and
+ block_to_replication[uuid] > max(persister_replication.values())])
+
+ log.info('Found %d blocks not in any collections, e.g. %s...',
+ len(blocks_not_in_any_collections),
+ ','.join(list(blocks_not_in_any_collections)[:5]))
+ log.info('Found %d underreplicated blocks, e.g. %s...',
+ len(underreplicated_persisted_blocks),
+ ','.join(list(underreplicated_persisted_blocks)[:5]))
+ log.info('Found %d overreplicated blocks, e.g. %s...',
+ len(overreplicated_persisted_blocks),
+ ','.join(list(overreplicated_persisted_blocks)[:5]))
+
+ # TODO:
+ # Read blocks sorted by mtime
+ # Cache window vs % free space
+ # Collections which candidates will appear in
+ # Youngest underreplicated read blocks that appear in collections.
+ # Report Collections that have blocks which are missing from (or
+ # underreplicated in) keep.
+
+
+# This is the main flow here
+
+parser = argparse.ArgumentParser(description='Report on keep disks.')
+"""The command line argument parser we use.
+
+We only use it in the __main__ block, but leave it outside the block
+in case another package wants to use it or customize it by specifying
+it as a parent to their commandline parser.
+"""
+parser.add_argument('-m',
+ '--max-api-results',
+ type=int,
+ default=5000,
+ help=('The max results to get at once.'))
+parser.add_argument('-p',
+ '--port',
+ type=int,
+ default=9090,
+ help=('The port number to serve on. 0 means no server.'))
+parser.add_argument('-v',
+ '--verbose',
+ help='increase output verbosity',
+ action='store_true')
+parser.add_argument('-u',
+ '--uuid',
+ help='uuid of specific collection to process')
+parser.add_argument('--require-admin-user',
+ action='store_true',
+ default=True,
+ help='Fail if the user is not an admin [default]')
+parser.add_argument('--no-require-admin-user',
+ dest='require_admin_user',
+ action='store_false',
+ help=('Allow users without admin permissions with '
+ 'only a warning.'))
+parser.add_argument('--log-to-workbench',
+ action='store_true',
+ default=False,
+ help='Log findings to workbench')
+parser.add_argument('--no-log-to-workbench',
+ dest='log_to_workbench',
+ action='store_false',
+ help='Don\'t log findings to workbench [default]')
+parser.add_argument('--user-storage-log-event-type',
+ default='user-storage-report',
+ help=('The event type to set when logging user '
+ 'storage usage to workbench.'))
+parser.add_argument('--block-age-free-space-histogram-log-event-type',
+ default='block-age-free-space-histogram',
+ help=('The event type to set when logging user '
+ 'storage usage to workbench.'))
+parser.add_argument('--garbage-collection-file',
+ default='',
+ help=('The file to write a garbage collection report, or '
+ 'leave empty for no report.'))
+
+args = None
+
+# TODO(misha): Think about moving some of this to the __main__ block.
+log = logging.getLogger('arvados.services.datamanager')
+stderr_handler = logging.StreamHandler()
+log.setLevel(logging.INFO)
+stderr_handler.setFormatter(
+ logging.Formatter('%(asctime)-15s %(levelname)-8s %(message)s'))
+log.addHandler(stderr_handler)
+
+# Global Data - don't try this at home
+collection_uuids = []
+
+# These maps all map from uuids to a set of uuids
+block_to_collections = defaultdict(set) # keep blocks
+reader_to_collections = defaultdict(set) # collection(s) for which the user has read access
+persister_to_collections = defaultdict(set) # collection(s) which the user has persisted
+block_to_readers = defaultdict(set)
+block_to_persisters = defaultdict(set)
+block_to_persister_replication = defaultdict(maxdict)
+reader_to_blocks = defaultdict(set)
+persister_to_blocks = defaultdict(set)
+
+UNWEIGHTED_READ_SIZE_COL = 0
+WEIGHTED_READ_SIZE_COL = 1
+UNWEIGHTED_PERSIST_SIZE_COL = 2
+WEIGHTED_PERSIST_SIZE_COL = 3
+NUM_COLS = 4
+user_to_usage = defaultdict(lambda : [0,]*NUM_COLS)
+
+keep_servers = []
+keep_blocks = []
+keep_stats = []
+total_keep_space = 0
+free_keep_space = 0
+
+block_to_replication = defaultdict(lambda: 0)
+block_to_latest_mtime = maxdict()
+
+garbage_collection_report = []
+"""A list of non-persisted blocks, sorted by increasing mtime
+
+Each entry is of the form (block uuid, latest mtime, disk size,
+cumulative size)
+
+* block uuid: The id of the block we want to delete
+* latest mtime: The latest mtime of the block across all keep servers.
+* disk size: The total disk space used by this block (block size
+multiplied by current replication level)
+* cumulative disk size: The sum of this block's disk size and all the
+blocks listed above it
+* disk free: The proportion of our disk space that would be free if we
+deleted this block and all the above. So this is (free disk space +
+cumulative disk size) / total disk capacity
+"""
+
+garbage_collection_histogram = []
+""" Shows the tradeoff of keep block age vs keep disk free space.
+
+Each entry is of the form (mtime, Disk Proportion).
+
+An entry of the form (1388747781, 0.52) means that if we deleted the
+oldest non-presisted blocks until we had 52% of the disk free, then
+all blocks with an mtime greater than 1388747781 would be preserved.
+"""
+
+# Stuff to report on
+blocks_not_in_any_collections = set()
+underreplicated_persisted_blocks = set()
+overreplicated_persisted_blocks = set()
+
+all_data_loaded = False
+
+def loadAllData():
+ checkUserIsAdmin()
+
+ log.info('Building Collection List')
+ global collection_uuids
+ collection_uuids = filter(None, [extractUuid(candidate)
+ for candidate in buildCollectionsList()])
+
+ log.info('Reading Collections')
+ readCollections(collection_uuids)
+
+ if args.verbose:
+ pprint.pprint(CollectionInfo.all_by_uuid)
+
+ log.info('Reading Links')
+ readLinks()
+
+ reportMostPopularCollections()
+
+ log.info('Building Maps')
+ buildMaps()
+
+ reportBusiestUsers()
+
+ log.info('Getting Keep Servers')
+ global keep_servers
+ keep_servers = getKeepServers()
+
+ print keep_servers
+
+ log.info('Getting Blocks from each Keep Server.')
+ global keep_blocks
+ keep_blocks = getKeepBlocks(keep_servers)
+
+ log.info('Getting Stats from each Keep Server.')
+ global keep_stats, total_keep_space, free_keep_space
+ keep_stats = getKeepStats(keep_servers)
+
+ total_keep_space = sum(map(itemgetter(0), keep_stats))
+ free_keep_space = sum(map(itemgetter(1), keep_stats))
+
+ # TODO(misha): Delete this hack when the keep servers are fixed!
+ # This hack deals with the fact that keep servers report each other's disks.
+ total_keep_space /= len(keep_stats)
+ free_keep_space /= len(keep_stats)
+
+ log.info('Total disk space: %s, Free disk space: %s (%d%%).' %
+ (fileSizeFormat(total_keep_space),
+ fileSizeFormat(free_keep_space),
+ 100*free_keep_space/total_keep_space))
+
+ computeReplication(keep_blocks)
+
+ log.info('average replication level is %f',
+ (float(sum(block_to_replication.values())) /
+ len(block_to_replication)))
+
+ computeGarbageCollectionCandidates()
+
+ if args.garbage_collection_file:
+ log.info('Writing garbage Collection report to %s',
+ args.garbage_collection_file)
+ outputGarbageCollectionReport(args.garbage_collection_file)
+
+ global garbage_collection_histogram
+ garbage_collection_histogram = computeGarbageCollectionHistogram()
+
+ if args.log_to_workbench:
+ logGarbageCollectionHistogram()
+
+ detectReplicationProblems()
+
+ computeUserStorageUsage()
+ printUserStorageUsage()
+ if args.log_to_workbench:
+ logUserStorageUsage()
+
+ global all_data_loaded
+ all_data_loaded = True
+
+
+class DataManagerHandler(BaseHTTPRequestHandler):
+ USER_PATH = 'user'
+ COLLECTION_PATH = 'collection'
+ BLOCK_PATH = 'block'
+
+ def userLink(self, uuid):
+ return ('<A HREF="/%(path)s/%(uuid)s">%(uuid)s</A>' %
+ {'uuid': uuid,
+ 'path': DataManagerHandler.USER_PATH})
+
+ def collectionLink(self, uuid):
+ return ('<A HREF="/%(path)s/%(uuid)s">%(uuid)s</A>' %
+ {'uuid': uuid,
+ 'path': DataManagerHandler.COLLECTION_PATH})
+
+ def blockLink(self, uuid):
+ return ('<A HREF="/%(path)s/%(uuid)s">%(uuid)s</A>' %
+ {'uuid': uuid,
+ 'path': DataManagerHandler.BLOCK_PATH})
+
+ def writeTop(self, title):
+ self.wfile.write('<HTML><HEAD><TITLE>%s</TITLE></HEAD>\n<BODY>' % title)
+
+ def writeBottom(self):
+ self.wfile.write('</BODY></HTML>\n')
+
+ def writeHomePage(self):
+ self.send_response(200)
+ self.end_headers()
+ self.writeTop('Home')
+ self.wfile.write('<TABLE>')
+ self.wfile.write('<TR><TH>user'
+ '<TH>unweighted readable block size'
+ '<TH>weighted readable block size'
+ '<TH>unweighted persisted block size'
+ '<TH>weighted persisted block size</TR>\n')
+ for user, usage in user_to_usage.items():
+ self.wfile.write('<TR><TD>%s<TD>%s<TD>%s<TD>%s<TD>%s</TR>\n' %
+ (self.userLink(user),
+ fileSizeFormat(usage[UNWEIGHTED_READ_SIZE_COL]),
+ fileSizeFormat(usage[WEIGHTED_READ_SIZE_COL]),
+ fileSizeFormat(usage[UNWEIGHTED_PERSIST_SIZE_COL]),
+ fileSizeFormat(usage[WEIGHTED_PERSIST_SIZE_COL])))
+ self.wfile.write('</TABLE>\n')
+ self.writeBottom()
+
+ def userExists(self, uuid):
+ # Currently this will return false for a user who exists but
+ # doesn't appear on any manifests.
+ # TODO(misha): Figure out if we need to fix this.
+ return user_to_usage.has_key(uuid)
+
+ def writeUserPage(self, uuid):
+ if not self.userExists(uuid):
+ self.send_error(404,
+ 'User (%s) Not Found.' % cgi.escape(uuid, quote=False))
+ else:
+ # Here we assume that since a user exists, they don't need to be
+ # html escaped.
+ self.send_response(200)
+ self.end_headers()
+ self.writeTop('User %s' % uuid)
+ self.wfile.write('<TABLE>')
+ self.wfile.write('<TR><TH>user'
+ '<TH>unweighted readable block size'
+ '<TH>weighted readable block size'
+ '<TH>unweighted persisted block size'
+ '<TH>weighted persisted block size</TR>\n')
+ usage = user_to_usage[uuid]
+ self.wfile.write('<TR><TD>%s<TD>%s<TD>%s<TD>%s<TD>%s</TR>\n' %
+ (self.userLink(uuid),
+ fileSizeFormat(usage[UNWEIGHTED_READ_SIZE_COL]),
+ fileSizeFormat(usage[WEIGHTED_READ_SIZE_COL]),
+ fileSizeFormat(usage[UNWEIGHTED_PERSIST_SIZE_COL]),
+ fileSizeFormat(usage[WEIGHTED_PERSIST_SIZE_COL])))
+ self.wfile.write('</TABLE>\n')
+ self.wfile.write('<P>Persisting Collections: %s\n' %
+ ', '.join(map(self.collectionLink,
+ persister_to_collections[uuid])))
+ self.wfile.write('<P>Reading Collections: %s\n' %
+ ', '.join(map(self.collectionLink,
+ reader_to_collections[uuid])))
+ self.writeBottom()
+
+ def collectionExists(self, uuid):
+ return CollectionInfo.all_by_uuid.has_key(uuid)
+
+ def writeCollectionPage(self, uuid):
+ if not self.collectionExists(uuid):
+ self.send_error(404,
+ 'Collection (%s) Not Found.' % cgi.escape(uuid, quote=False))
+ else:
+ collection = CollectionInfo.get(uuid)
+ # Here we assume that since a collection exists, its id doesn't
+ # need to be html escaped.
+ self.send_response(200)
+ self.end_headers()
+ self.writeTop('Collection %s' % uuid)
+ self.wfile.write('<H1>Collection %s</H1>\n' % uuid)
+ self.wfile.write('<P>Total size %s (not factoring in replication).\n' %
+ fileSizeFormat(collection.byteSize()))
+ self.wfile.write('<P>Readers: %s\n' %
+ ', '.join(map(self.userLink, collection.reader_uuids)))
+
+ if len(collection.persister_replication) == 0:
+ self.wfile.write('<P>No persisters\n')
+ else:
+ replication_to_users = defaultdict(set)
+ for user,replication in collection.persister_replication.items():
+ replication_to_users[replication].add(user)
+ replication_levels = sorted(replication_to_users.keys())
+
+ self.wfile.write('<P>%d persisters in %d replication level(s) maxing '
+ 'out at %dx replication:\n' %
+ (len(collection.persister_replication),
+ len(replication_levels),
+ replication_levels[-1]))
+
+ # TODO(misha): This code is used twice, let's move it to a method.
+ self.wfile.write('<TABLE><TR><TH>%s</TR>\n' %
+ '<TH>'.join(['Replication Level ' + str(x)
+ for x in replication_levels]))
+ self.wfile.write('<TR>\n')
+ for replication_level in replication_levels:
+ users = replication_to_users[replication_level]
+ self.wfile.write('<TD valign="top">%s\n' % '<BR>\n'.join(
+ map(self.userLink, users)))
+ self.wfile.write('</TR></TABLE>\n')
+
+ replication_to_blocks = defaultdict(set)
+ for block in collection.block_uuids:
+ replication_to_blocks[block_to_replication[block]].add(block)
+ replication_levels = sorted(replication_to_blocks.keys())
+ self.wfile.write('<P>%d blocks in %d replication level(s):\n' %
+ (len(collection.block_uuids), len(replication_levels)))
+ self.wfile.write('<TABLE><TR><TH>%s</TR>\n' %
+ '<TH>'.join(['Replication Level ' + str(x)
+ for x in replication_levels]))
+ self.wfile.write('<TR>\n')
+ for replication_level in replication_levels:
+ blocks = replication_to_blocks[replication_level]
+ self.wfile.write('<TD valign="top">%s\n' % '<BR>\n'.join(blocks))
+ self.wfile.write('</TR></TABLE>\n')
+
+
+ def do_GET(self):
+ if not all_data_loaded:
+ self.send_error(503,
+ 'Sorry, but I am still loading all the data I need.')
+ else:
+ # Removing leading '/' and process request path
+ split_path = self.path[1:].split('/')
+ request_type = split_path[0]
+ log.debug('path (%s) split as %s with request_type %s' % (self.path,
+ split_path,
+ request_type))
+ if request_type == '':
+ self.writeHomePage()
+ elif request_type == DataManagerHandler.USER_PATH:
+ self.writeUserPage(split_path[1])
+ elif request_type == DataManagerHandler.COLLECTION_PATH:
+ self.writeCollectionPage(split_path[1])
+ else:
+ self.send_error(404, 'Unrecognized request path.')
+ return
+
+class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
+ """Handle requests in a separate thread."""
+
+
+if __name__ == '__main__':
+ args = parser.parse_args()
+
+ if args.port == 0:
+ loadAllData()
+ else:
+ loader = threading.Thread(target = loadAllData, name = 'loader')
+ loader.start()
+
+ server = ThreadedHTTPServer(('localhost', args.port), DataManagerHandler)
+ server.serve_forever()
--- /dev/null
+#! /usr/bin/env python
+
+import datamanager
+import unittest
+
+class TestComputeWeightedReplicationCosts(unittest.TestCase):
+ def test_obvious(self):
+ self.assertEqual(datamanager.computeWeightedReplicationCosts([1,]),
+ {1:1.0})
+
+ def test_simple(self):
+ self.assertEqual(datamanager.computeWeightedReplicationCosts([2,]),
+ {2:2.0})
+
+ def test_even_split(self):
+ self.assertEqual(datamanager.computeWeightedReplicationCosts([1,1]),
+ {1:0.5})
+
+ def test_even_split_bigger(self):
+ self.assertEqual(datamanager.computeWeightedReplicationCosts([2,2]),
+ {2:1.0})
+
+ def test_uneven_split(self):
+ self.assertEqual(datamanager.computeWeightedReplicationCosts([1,2]),
+ {1:0.5, 2:1.5})
+
+ def test_uneven_split_bigger(self):
+ self.assertEqual(datamanager.computeWeightedReplicationCosts([1,3]),
+ {1:0.5, 3:2.5})
+
+ def test_uneven_split_jumble(self):
+ self.assertEqual(datamanager.computeWeightedReplicationCosts([1,3,6,6,10]),
+ {1:0.2, 3:0.7, 6:1.7, 10:5.7})
+
+ def test_documentation_example(self):
+ self.assertEqual(datamanager.computeWeightedReplicationCosts([1,1,3,6,6]),
+ {1:0.2, 3: 0.2 + 2.0 / 3, 6: 0.2 + 2.0 / 3 + 1.5})
+
+
+if __name__ == '__main__':
+ unittest.main()
--- /dev/null
+../../sdk/python/.gitignore
\ No newline at end of file
--- /dev/null
+#
+# FUSE driver for Arvados Keep
+#
+
+import os
+import sys
+
+import llfuse
+import errno
+import stat
+import threading
+import arvados
+import pprint
+import arvados.events
+import re
+import apiclient
+import json
+
+from time import time
+from llfuse import FUSEError
+
+class FreshBase(object):
+ '''Base class for maintaining fresh/stale state to determine when to update.'''
+ def __init__(self):
+ self._stale = True
+ self._poll = False
+ self._last_update = time()
+ self._poll_time = 60
+
+ # Mark the value as stale
+ def invalidate(self):
+ self._stale = True
+
+ # Test if the entries dict is stale
+ def stale(self):
+ if self._stale:
+ return True
+ if self._poll:
+ return (self._last_update + self._poll_time) < time()
+ return False
+
+ def fresh(self):
+ self._stale = False
+ self._last_update = time()
+
+
+class File(FreshBase):
+ '''Base for file objects.'''
+
+ def __init__(self, parent_inode):
+ super(File, self).__init__()
+ self.inode = None
+ self.parent_inode = parent_inode
+
+ def size(self):
+ return 0
+
+ def readfrom(self, off, size):
+ return ''
+
+
+class StreamReaderFile(File):
+ '''Wraps a StreamFileReader as a file.'''
+
+ def __init__(self, parent_inode, reader):
+ super(StreamReaderFile, self).__init__(parent_inode)
+ self.reader = reader
+
+ def size(self):
+ return self.reader.size()
+
+ def readfrom(self, off, size):
+ return self.reader.readfrom(off, size)
+
+ def stale(self):
+ return False
+
+
+class ObjectFile(File):
+ '''Wraps a dict as a serialized json object.'''
+
+ def __init__(self, parent_inode, contents):
+ super(ObjectFile, self).__init__(parent_inode)
+ self.contentsdict = contents
+ self.uuid = self.contentsdict['uuid']
+ self.contents = json.dumps(self.contentsdict, indent=4, sort_keys=True)
+
+ def size(self):
+ return len(self.contents)
+
+ def readfrom(self, off, size):
+ return self.contents[off:(off+size)]
+
+
+class Directory(FreshBase):
+ '''Generic directory object, backed by a dict.
+ Consists of a set of entries with the key representing the filename
+ and the value referencing a File or Directory object.
+ '''
+
+ def __init__(self, parent_inode):
+ super(Directory, self).__init__()
+
+ '''parent_inode is the integer inode number'''
+ self.inode = None
+ if not isinstance(parent_inode, int):
+ raise Exception("parent_inode should be an int")
+ self.parent_inode = parent_inode
+ self._entries = {}
+
+ # Overriden by subclasses to implement logic to update the entries dict
+ # when the directory is stale
+ def update(self):
+ pass
+
+ # Only used when computing the size of the disk footprint of the directory
+ # (stub)
+ def size(self):
+ return 0
+
+ def checkupdate(self):
+ if self.stale():
+ try:
+ self.update()
+ except apiclient.errors.HttpError as e:
+ print e
+
+ def __getitem__(self, item):
+ self.checkupdate()
+ return self._entries[item]
+
+ def items(self):
+ self.checkupdate()
+ return self._entries.items()
+
+ def __iter__(self):
+ self.checkupdate()
+ return self._entries.iterkeys()
+
+ def __contains__(self, k):
+ self.checkupdate()
+ return k in self._entries
+
+ def merge(self, items, fn, same, new_entry):
+ '''Helper method for updating the contents of the directory.
+
+ items: array with new directory contents
+
+ fn: function to take an entry in 'items' and return the desired file or
+ directory name
+
+ same: function to compare an existing entry with an entry in the items
+ list to determine whether to keep the existing entry.
+
+ new_entry: function to create a new directory entry from array entry.
+ '''
+
+ oldentries = self._entries
+ self._entries = {}
+ for i in items:
+ n = fn(i)
+ if n in oldentries and same(oldentries[n], i):
+ self._entries[n] = oldentries[n]
+ del oldentries[n]
+ else:
+ self._entries[n] = self.inodes.add_entry(new_entry(i))
+ for n in oldentries:
+ llfuse.invalidate_entry(self.inode, str(n))
+ self.inodes.del_entry(oldentries[n])
+ self.fresh()
+
+
+class CollectionDirectory(Directory):
+ '''Represents the root of a directory tree holding a collection.'''
+
+ def __init__(self, parent_inode, inodes, collection_locator):
+ super(CollectionDirectory, self).__init__(parent_inode)
+ self.inodes = inodes
+ self.collection_locator = collection_locator
+
+ def same(self, i):
+ return i['uuid'] == self.collection_locator
+
+ def update(self):
+ try:
+ collection = arvados.CollectionReader(self.collection_locator)
+ for s in collection.all_streams():
+ cwd = self
+ for part in s.name().split('/'):
+ if part != '' and part != '.':
+ if part not in cwd._entries:
+ cwd._entries[part] = self.inodes.add_entry(Directory(cwd.inode))
+ cwd = cwd._entries[part]
+ for k, v in s.files().items():
+ cwd._entries[k] = self.inodes.add_entry(StreamReaderFile(cwd.inode, v))
+ print "found"
+ self.fresh()
+ except Exception as detail:
+ print("%s: error: %s" % (self.collection_locator,detail) )
+
+class MagicDirectory(Directory):
+ '''A special directory that logically contains the set of all extant keep
+ locators. When a file is referenced by lookup(), it is tested to see if it
+ is a valid keep locator to a manifest, and if so, loads the manifest
+ contents as a subdirectory of this directory with the locator as the
+ directory name. Since querying a list of all extant keep locators is
+ impractical, only collections that have already been accessed are visible
+ to readdir().
+ '''
+
+ def __init__(self, parent_inode, inodes):
+ super(MagicDirectory, self).__init__(parent_inode)
+ self.inodes = inodes
+
+ def __contains__(self, k):
+ if k in self._entries:
+ return True
+ try:
+ if arvados.Keep.get(k):
+ return True
+ else:
+ return False
+ except Exception as e:
+ #print 'exception keep', e
+ return False
+
+ def __getitem__(self, item):
+ if item not in self._entries:
+ self._entries[item] = self.inodes.add_entry(CollectionDirectory(self.inode, self.inodes, item))
+ return self._entries[item]
+
+
+class TagsDirectory(Directory):
+ '''A special directory that contains as subdirectories all tags visible to the user.'''
+
+ def __init__(self, parent_inode, inodes, api, poll_time=60):
+ super(TagsDirectory, self).__init__(parent_inode)
+ self.inodes = inodes
+ self.api = api
+ try:
+ arvados.events.subscribe(self.api, [['object_uuid', 'is_a', 'arvados#link']], lambda ev: self.invalidate())
+ except:
+ self._poll = True
+ self._poll_time = poll_time
+
+ def invalidate(self):
+ with llfuse.lock:
+ super(TagsDirectory, self).invalidate()
+ for a in self._entries:
+ self._entries[a].invalidate()
+
+ def update(self):
+ tags = self.api.links().list(filters=[['link_class', '=', 'tag']], select=['name'], distinct = True).execute()
+ self.merge(tags['items'],
+ lambda i: i['name'],
+ lambda a, i: a.tag == i,
+ lambda i: TagDirectory(self.inode, self.inodes, self.api, i['name'], poll=self._poll, poll_time=self._poll_time))
+
+class TagDirectory(Directory):
+ '''A special directory that contains as subdirectories all collections visible
+ to the user that are tagged with a particular tag.
+ '''
+
+ def __init__(self, parent_inode, inodes, api, tag, poll=False, poll_time=60):
+ super(TagDirectory, self).__init__(parent_inode)
+ self.inodes = inodes
+ self.api = api
+ self.tag = tag
+ self._poll = poll
+ self._poll_time = poll_time
+
+ def update(self):
+ taggedcollections = self.api.links().list(filters=[['link_class', '=', 'tag'],
+ ['name', '=', self.tag],
+ ['head_uuid', 'is_a', 'arvados#collection']],
+ select=['head_uuid']).execute()
+ self.merge(taggedcollections['items'],
+ lambda i: i['head_uuid'],
+ lambda a, i: a.collection_locator == i['head_uuid'],
+ lambda i: CollectionDirectory(self.inode, self.inodes, i['head_uuid']))
+
+
+class GroupsDirectory(Directory):
+ '''A special directory that contains as subdirectories all groups visible to the user.'''
+
+ def __init__(self, parent_inode, inodes, api, poll_time=60):
+ super(GroupsDirectory, self).__init__(parent_inode)
+ self.inodes = inodes
+ self.api = api
+ try:
+ arvados.events.subscribe(self.api, [], lambda ev: self.invalidate())
+ except:
+ self._poll = True
+ self._poll_time = poll_time
+
+ def invalidate(self):
+ with llfuse.lock:
+ super(GroupsDirectory, self).invalidate()
+ for a in self._entries:
+ self._entries[a].invalidate()
+
+ def update(self):
+ groups = self.api.groups().list().execute()
+ self.merge(groups['items'],
+ lambda i: i['uuid'],
+ lambda a, i: a.uuid == i['uuid'],
+ lambda i: GroupDirectory(self.inode, self.inodes, self.api, i, poll=self._poll, poll_time=self._poll_time))
+
+
+class GroupDirectory(Directory):
+ '''A special directory that contains the contents of a group.'''
+
+ def __init__(self, parent_inode, inodes, api, uuid, poll=False, poll_time=60):
+ super(GroupDirectory, self).__init__(parent_inode)
+ self.inodes = inodes
+ self.api = api
+ self.uuid = uuid['uuid']
+ self._poll = poll
+ self._poll_time = poll_time
+
+ def invalidate(self):
+ with llfuse.lock:
+ super(GroupDirectory, self).invalidate()
+ for a in self._entries:
+ self._entries[a].invalidate()
+
+ def createDirectory(self, i):
+ if re.match(r'[0-9a-f]{32}\+\d+', i['uuid']):
+ return CollectionDirectory(self.inode, self.inodes, i['uuid'])
+ elif re.match(r'[a-z0-9]{5}-j7d0g-[a-z0-9]{15}', i['uuid']):
+ return GroupDirectory(self.parent_inode, self.inodes, self.api, i, self._poll, self._poll_time)
+ elif re.match(r'[a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}', i['uuid']):
+ return ObjectFile(self.parent_inode, i)
+ return None
+
+ def update(self):
+ contents = self.api.groups().contents(uuid=self.uuid, include_linked=True).execute()
+ links = {}
+ for a in contents['links']:
+ links[a['head_uuid']] = a['name']
+
+ def choose_name(i):
+ if i['uuid'] in links:
+ return links[i['uuid']]
+ else:
+ return i['uuid']
+
+ def same(a, i):
+ if isinstance(a, CollectionDirectory):
+ return a.collection_locator == i['uuid']
+ elif isinstance(a, GroupDirectory):
+ return a.uuid == i['uuid']
+ elif isinstance(a, ObjectFile):
+ return a.uuid == i['uuid'] and not a.stale()
+ return False
+
+ self.merge(contents['items'],
+ choose_name,
+ same,
+ self.createDirectory)
+
+
+class FileHandle(object):
+ '''Connects a numeric file handle to a File or Directory object that has
+ been opened by the client.'''
+
+ def __init__(self, fh, entry):
+ self.fh = fh
+ self.entry = entry
+
+
+class Inodes(object):
+ '''Manage the set of inodes. This is the mapping from a numeric id
+ to a concrete File or Directory object'''
+
+ def __init__(self):
+ self._entries = {}
+ self._counter = llfuse.ROOT_INODE
+
+ def __getitem__(self, item):
+ return self._entries[item]
+
+ def __setitem__(self, key, item):
+ self._entries[key] = item
+
+ def __iter__(self):
+ return self._entries.iterkeys()
+
+ def items(self):
+ return self._entries.items()
+
+ def __contains__(self, k):
+ return k in self._entries
+
+ def add_entry(self, entry):
+ entry.inode = self._counter
+ self._entries[entry.inode] = entry
+ self._counter += 1
+ return entry
+
+ def del_entry(self, entry):
+ llfuse.invalidate_inode(entry.inode)
+ del self._entries[entry.inode]
+
+class Operations(llfuse.Operations):
+ '''This is the main interface with llfuse. The methods on this object are
+ called by llfuse threads to service FUSE events to query and read from
+ the file system.
+
+ llfuse has its own global lock which is acquired before calling a request handler,
+ so request handlers do not run concurrently unless the lock is explicitly released
+ with llfuse.lock_released.'''
+
+ def __init__(self, uid, gid):
+ super(Operations, self).__init__()
+
+ self.inodes = Inodes()
+ self.uid = uid
+ self.gid = gid
+
+ # dict of inode to filehandle
+ self._filehandles = {}
+ self._filehandles_counter = 1
+
+ # Other threads that need to wait until the fuse driver
+ # is fully initialized should wait() on this event object.
+ self.initlock = threading.Event()
+
+ def init(self):
+ # Allow threads that are waiting for the driver to be finished
+ # initializing to continue
+ self.initlock.set()
+
+ def access(self, inode, mode, ctx):
+ return True
+
+ def getattr(self, inode):
+ if inode not in self.inodes:
+ raise llfuse.FUSEError(errno.ENOENT)
+
+ e = self.inodes[inode]
+
+ entry = llfuse.EntryAttributes()
+ entry.st_ino = inode
+ entry.generation = 0
+ entry.entry_timeout = 300
+ entry.attr_timeout = 300
+
+ entry.st_mode = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
+ if isinstance(e, Directory):
+ entry.st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | stat.S_IFDIR
+ else:
+ entry.st_mode |= stat.S_IFREG
+
+ entry.st_nlink = 1
+ entry.st_uid = self.uid
+ entry.st_gid = self.gid
+ entry.st_rdev = 0
+
+ entry.st_size = e.size()
+
+ entry.st_blksize = 1024
+ entry.st_blocks = e.size()/1024
+ if e.size()/1024 != 0:
+ entry.st_blocks += 1
+ entry.st_atime = 0
+ entry.st_mtime = 0
+ entry.st_ctime = 0
+
+ return entry
+
+ def lookup(self, parent_inode, name):
+ #print "lookup: parent_inode", parent_inode, "name", name
+ inode = None
+
+ if name == '.':
+ inode = parent_inode
+ else:
+ if parent_inode in self.inodes:
+ p = self.inodes[parent_inode]
+ if name == '..':
+ inode = p.parent_inode
+ elif name in p:
+ inode = p[name].inode
+
+ if inode != None:
+ return self.getattr(inode)
+ else:
+ raise llfuse.FUSEError(errno.ENOENT)
+
+ def open(self, inode, flags):
+ if inode in self.inodes:
+ p = self.inodes[inode]
+ else:
+ raise llfuse.FUSEError(errno.ENOENT)
+
+ if (flags & os.O_WRONLY) or (flags & os.O_RDWR):
+ raise llfuse.FUSEError(errno.EROFS)
+
+ if isinstance(p, Directory):
+ raise llfuse.FUSEError(errno.EISDIR)
+
+ fh = self._filehandles_counter
+ self._filehandles_counter += 1
+ self._filehandles[fh] = FileHandle(fh, p)
+ return fh
+
+ def read(self, fh, off, size):
+ #print "read", fh, off, size
+ if fh in self._filehandles:
+ handle = self._filehandles[fh]
+ else:
+ raise llfuse.FUSEError(errno.EBADF)
+
+ try:
+ with llfuse.lock_released:
+ return handle.entry.readfrom(off, size)
+ except:
+ raise llfuse.FUSEError(errno.EIO)
+
+ def release(self, fh):
+ if fh in self._filehandles:
+ del self._filehandles[fh]
+
+ def opendir(self, inode):
+ #print "opendir: inode", inode
+
+ if inode in self.inodes:
+ p = self.inodes[inode]
+ else:
+ raise llfuse.FUSEError(errno.ENOENT)
+
+ if not isinstance(p, Directory):
+ raise llfuse.FUSEError(errno.ENOTDIR)
+
+ fh = self._filehandles_counter
+ self._filehandles_counter += 1
+ if p.parent_inode in self.inodes:
+ parent = self.inodes[p.parent_inode]
+ else:
+ raise llfuse.FUSEError(errno.EIO)
+
+ self._filehandles[fh] = FileHandle(fh, [('.', p), ('..', parent)] + list(p.items()))
+ return fh
+
+ def readdir(self, fh, off):
+ #print "readdir: fh", fh, "off", off
+
+ if fh in self._filehandles:
+ handle = self._filehandles[fh]
+ else:
+ raise llfuse.FUSEError(errno.EBADF)
+
+ #print "handle.entry", handle.entry
+
+ e = off
+ while e < len(handle.entry):
+ if handle.entry[e][1].inode in self.inodes:
+ yield (handle.entry[e][0], self.getattr(handle.entry[e][1].inode), e+1)
+ e += 1
+
+ def releasedir(self, fh):
+ del self._filehandles[fh]
+
+ def statfs(self):
+ st = llfuse.StatvfsData()
+ st.f_bsize = 1024 * 1024
+ st.f_blocks = 0
+ st.f_files = 0
+
+ st.f_bfree = 0
+ st.f_bavail = 0
+
+ st.f_ffree = 0
+ st.f_favail = 0
+
+ st.f_frsize = 0
+ return st
+
+ # The llfuse documentation recommends only overloading functions that
+ # are actually implemented, as the default implementation will raise ENOSYS.
+ # However, there is a bug in the llfuse default implementation of create()
+ # "create() takes exactly 5 positional arguments (6 given)" which will crash
+ # arv-mount.
+ # The workaround is to implement it with the proper number of parameters,
+ # and then everything works out.
+ def create(self, p1, p2, p3, p4, p5):
+ raise llfuse.FUSEError(errno.EROFS)
--- /dev/null
+#!/usr/bin/env python
+
+from arvados_fuse import *
+import arvados
+import subprocess
+import argparse
+import daemon
+
+if __name__ == '__main__':
+ # Handle command line parameters
+ parser = argparse.ArgumentParser(
+ description='''Mount Keep data under the local filesystem. By default, if neither
+ --collection or --tags is specified, this mounts as a virtual directory
+ under which all Keep collections are available as subdirectories named
+ with the Keep locator; however directories will not be visible to 'ls'
+ until a program tries to access them.''',
+ epilog="""
+Note: When using the --exec feature, you must either specify the
+mountpoint before --exec, or mark the end of your --exec arguments
+with "--".
+""")
+ parser.add_argument('mountpoint', type=str, help="""Mount point.""")
+ parser.add_argument('--allow-other', action='store_true',
+ help="""Let other users read the mount""")
+ parser.add_argument('--collection', type=str, help="""Mount only the specified collection at the mount point.""")
+ parser.add_argument('--tags', action='store_true', help="""Mount as a virtual directory consisting of subdirectories representing tagged
+collections on the server.""")
+ parser.add_argument('--groups', action='store_true', help="""Mount as a virtual directory consisting of subdirectories representing groups on the server.""")
+ parser.add_argument('--debug', action='store_true', help="""Debug mode""")
+ parser.add_argument('--foreground', action='store_true', help="""Run in foreground (default is to daemonize unless --exec specified)""", default=False)
+ parser.add_argument('--exec', type=str, nargs=argparse.REMAINDER,
+ dest="exec_args", metavar=('command', 'args', '...', '--'),
+ help="""Mount, run a command, then unmount and exit""")
+
+ args = parser.parse_args()
+
+ # Create the request handler
+ operations = Operations(os.getuid(), os.getgid())
+
+ if args.groups:
+ api = arvados.api('v1')
+ e = operations.inodes.add_entry(GroupsDirectory(llfuse.ROOT_INODE, operations.inodes, api))
+ elif args.tags:
+ api = arvados.api('v1')
+ e = operations.inodes.add_entry(TagsDirectory(llfuse.ROOT_INODE, operations.inodes, api))
+ elif args.collection != None:
+ # Set up the request handler with the collection at the root
+ e = operations.inodes.add_entry(CollectionDirectory(llfuse.ROOT_INODE, operations.inodes, args.collection))
+ else:
+ # Set up the request handler with the 'magic directory' at the root
+ operations.inodes.add_entry(MagicDirectory(llfuse.ROOT_INODE, operations.inodes))
+
+ # FUSE options, see mount.fuse(8)
+ opts = [optname for optname in ['allow_other', 'debug']
+ if getattr(args, optname)]
+
+ if args.exec_args:
+ # Initialize the fuse connection
+ llfuse.init(operations, args.mountpoint, opts)
+
+ t = threading.Thread(None, lambda: llfuse.main())
+ t.start()
+
+ # wait until the driver is finished initializing
+ operations.initlock.wait()
+
+ rc = 255
+ try:
+ rc = subprocess.call(args.exec_args, shell=False)
+ except OSError as e:
+ sys.stderr.write('arv-mount: %s -- exec %s\n' % (str(e), args.exec_args))
+ rc = e.errno
+ except Exception as e:
+ sys.stderr.write('arv-mount: %s\n' % str(e))
+ finally:
+ subprocess.call(["fusermount", "-u", "-z", args.mountpoint])
+
+ exit(rc)
+ else:
+ if args.foreground:
+ # Initialize the fuse connection
+ llfuse.init(operations, args.mountpoint, opts)
+ llfuse.main()
+ else:
+ # Initialize the fuse connection
+ llfuse.init(operations, args.mountpoint, opts)
+ with daemon.DaemonContext():
+ llfuse.main()
--- /dev/null
+arvados-python-client>=0.1
+llfuse>=0.37
+python-daemon>=1.5
--- /dev/null
+../../sdk/python/run_test_server.py
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env python
+
+from setuptools import setup
+
+setup(name='arvados_fuse',
+ version='0.1',
+ description='Arvados FUSE driver',
+ author='Arvados',
+ author_email='info@arvados.org',
+ url="https://arvados.org",
+ download_url="https://github.com/curoverse/arvados.git",
+ license='GNU Affero General Public License, version 3.0',
+ packages=['arvados_fuse'],
+ scripts=[
+ 'bin/arv-mount'
+ ],
+ install_requires=[
+ 'arvados-python-client',
+ 'llfuse',
+ 'python-daemon'
+ ],
+ zip_safe=False)
--- /dev/null
+import unittest
+import arvados
+import arvados_fuse as fuse
+import threading
+import time
+import os
+import llfuse
+import tempfile
+import shutil
+import subprocess
+import glob
+import run_test_server
+import json
+
+class MountTestBase(unittest.TestCase):
+ def setUp(self):
+ self.keeptmp = tempfile.mkdtemp()
+ os.environ['KEEP_LOCAL_STORE'] = self.keeptmp
+ self.mounttmp = tempfile.mkdtemp()
+
+ def tearDown(self):
+ # llfuse.close is buggy, so use fusermount instead.
+ #llfuse.close(unmount=True)
+ subprocess.call(["fusermount", "-u", self.mounttmp])
+
+ os.rmdir(self.mounttmp)
+ shutil.rmtree(self.keeptmp)
+
+
+class FuseMountTest(MountTestBase):
+ def setUp(self):
+ super(FuseMountTest, self).setUp()
+
+ cw = arvados.CollectionWriter()
+
+ cw.start_new_file('thing1.txt')
+ cw.write("data 1")
+ cw.start_new_file('thing2.txt')
+ cw.write("data 2")
+ cw.start_new_stream('dir1')
+
+ cw.start_new_file('thing3.txt')
+ cw.write("data 3")
+ cw.start_new_file('thing4.txt')
+ cw.write("data 4")
+
+ cw.start_new_stream('dir2')
+ cw.start_new_file('thing5.txt')
+ cw.write("data 5")
+ cw.start_new_file('thing6.txt')
+ cw.write("data 6")
+
+ cw.start_new_stream('dir2/dir3')
+ cw.start_new_file('thing7.txt')
+ cw.write("data 7")
+
+ cw.start_new_file('thing8.txt')
+ cw.write("data 8")
+
+ self.testcollection = cw.finish()
+
+ def runTest(self):
+ # Create the request handler
+ operations = fuse.Operations(os.getuid(), os.getgid())
+ e = operations.inodes.add_entry(fuse.CollectionDirectory(llfuse.ROOT_INODE, operations.inodes, self.testcollection))
+
+ llfuse.init(operations, self.mounttmp, [])
+ t = threading.Thread(None, lambda: llfuse.main())
+ t.start()
+
+ # wait until the driver is finished initializing
+ operations.initlock.wait()
+
+ # now check some stuff
+ d1 = os.listdir(self.mounttmp)
+ d1.sort()
+ self.assertEqual(['dir1', 'dir2', 'thing1.txt', 'thing2.txt'], d1)
+
+ d2 = os.listdir(os.path.join(self.mounttmp, 'dir1'))
+ d2.sort()
+ self.assertEqual(['thing3.txt', 'thing4.txt'], d2)
+
+ d3 = os.listdir(os.path.join(self.mounttmp, 'dir2'))
+ d3.sort()
+ self.assertEqual(['dir3', 'thing5.txt', 'thing6.txt'], d3)
+
+ d4 = os.listdir(os.path.join(self.mounttmp, 'dir2/dir3'))
+ d4.sort()
+ self.assertEqual(['thing7.txt', 'thing8.txt'], d4)
+
+ files = {'thing1.txt': 'data 1',
+ 'thing2.txt': 'data 2',
+ 'dir1/thing3.txt': 'data 3',
+ 'dir1/thing4.txt': 'data 4',
+ 'dir2/thing5.txt': 'data 5',
+ 'dir2/thing6.txt': 'data 6',
+ 'dir2/dir3/thing7.txt': 'data 7',
+ 'dir2/dir3/thing8.txt': 'data 8'}
+
+ for k, v in files.items():
+ with open(os.path.join(self.mounttmp, k)) as f:
+ self.assertEqual(v, f.read())
+
+
+class FuseMagicTest(MountTestBase):
+ def setUp(self):
+ super(FuseMagicTest, self).setUp()
+
+ cw = arvados.CollectionWriter()
+
+ cw.start_new_file('thing1.txt')
+ cw.write("data 1")
+
+ self.testcollection = cw.finish()
+
+ def runTest(self):
+ # Create the request handler
+ operations = fuse.Operations(os.getuid(), os.getgid())
+ e = operations.inodes.add_entry(fuse.MagicDirectory(llfuse.ROOT_INODE, operations.inodes))
+
+ self.mounttmp = tempfile.mkdtemp()
+
+ llfuse.init(operations, self.mounttmp, [])
+ t = threading.Thread(None, lambda: llfuse.main())
+ t.start()
+
+ # wait until the driver is finished initializing
+ operations.initlock.wait()
+
+ # now check some stuff
+ d1 = os.listdir(self.mounttmp)
+ d1.sort()
+ self.assertEqual([], d1)
+
+ d2 = os.listdir(os.path.join(self.mounttmp, self.testcollection))
+ d2.sort()
+ self.assertEqual(['thing1.txt'], d2)
+
+ d3 = os.listdir(self.mounttmp)
+ d3.sort()
+ self.assertEqual([self.testcollection], d3)
+
+ files = {}
+ files[os.path.join(self.mounttmp, self.testcollection, 'thing1.txt')] = 'data 1'
+
+ for k, v in files.items():
+ with open(os.path.join(self.mounttmp, k)) as f:
+ self.assertEqual(v, f.read())
+
+
+class FuseTagsTest(MountTestBase):
+ def setUp(self):
+ super(FuseTagsTest, self).setUp()
+
+ cw = arvados.CollectionWriter()
+
+ cw.start_new_file('foo')
+ cw.write("foo")
+
+ self.testcollection = cw.finish()
+
+ run_test_server.run()
+
+ def runTest(self):
+ run_test_server.authorize_with("admin")
+ api = arvados.api('v1', cache=False)
+
+ operations = fuse.Operations(os.getuid(), os.getgid())
+ e = operations.inodes.add_entry(fuse.TagsDirectory(llfuse.ROOT_INODE, operations.inodes, api))
+
+ llfuse.init(operations, self.mounttmp, [])
+ t = threading.Thread(None, lambda: llfuse.main())
+ t.start()
+
+ # wait until the driver is finished initializing
+ operations.initlock.wait()
+
+ d1 = os.listdir(self.mounttmp)
+ d1.sort()
+ self.assertEqual(['foo_tag'], d1)
+
+ d2 = os.listdir(os.path.join(self.mounttmp, 'foo_tag'))
+ d2.sort()
+ self.assertEqual(['1f4b0bc7583c2a7f9102c395f4ffc5e3+45'], d2)
+
+ d3 = os.listdir(os.path.join(self.mounttmp, 'foo_tag', '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'))
+ d3.sort()
+ self.assertEqual(['foo'], d3)
+
+ files = {}
+ files[os.path.join(self.mounttmp, 'foo_tag', '1f4b0bc7583c2a7f9102c395f4ffc5e3+45', 'foo')] = 'foo'
+
+ for k, v in files.items():
+ with open(os.path.join(self.mounttmp, k)) as f:
+ self.assertEqual(v, f.read())
+
+
+ def tearDown(self):
+ run_test_server.stop()
+
+ super(FuseTagsTest, self).tearDown()
+
+class FuseTagsUpdateTestBase(MountTestBase):
+
+ def runRealTest(self):
+ run_test_server.authorize_with("admin")
+ api = arvados.api('v1', cache=False)
+
+ operations = fuse.Operations(os.getuid(), os.getgid())
+ e = operations.inodes.add_entry(fuse.TagsDirectory(llfuse.ROOT_INODE, operations.inodes, api, poll_time=1))
+
+ llfuse.init(operations, self.mounttmp, [])
+ t = threading.Thread(None, lambda: llfuse.main())
+ t.start()
+
+ # wait until the driver is finished initializing
+ operations.initlock.wait()
+
+ d1 = os.listdir(self.mounttmp)
+ d1.sort()
+ self.assertEqual(['foo_tag'], d1)
+
+ api.links().create(body={'link': {
+ 'head_uuid': 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+ 'link_class': 'tag',
+ 'name': 'bar_tag'
+ }}).execute()
+
+ time.sleep(1)
+
+ d2 = os.listdir(self.mounttmp)
+ d2.sort()
+ self.assertEqual(['bar_tag', 'foo_tag'], d2)
+
+ d3 = os.listdir(os.path.join(self.mounttmp, 'bar_tag'))
+ d3.sort()
+ self.assertEqual(['fa7aeb5140e2848d39b416daeef4ffc5+45'], d3)
+
+ l = api.links().create(body={'link': {
+ 'head_uuid': 'ea10d51bcf88862dbcc36eb292017dfd+45',
+ 'link_class': 'tag',
+ 'name': 'bar_tag'
+ }}).execute()
+
+ time.sleep(1)
+
+ d4 = os.listdir(os.path.join(self.mounttmp, 'bar_tag'))
+ d4.sort()
+ self.assertEqual(['ea10d51bcf88862dbcc36eb292017dfd+45', 'fa7aeb5140e2848d39b416daeef4ffc5+45'], d4)
+
+ api.links().delete(uuid=l['uuid']).execute()
+
+ time.sleep(1)
+
+ d5 = os.listdir(os.path.join(self.mounttmp, 'bar_tag'))
+ d5.sort()
+ self.assertEqual(['fa7aeb5140e2848d39b416daeef4ffc5+45'], d5)
+
+
+class FuseTagsUpdateTestWebsockets(FuseTagsUpdateTestBase):
+ def setUp(self):
+ super(FuseTagsUpdateTestWebsockets, self).setUp()
+ run_test_server.run(True)
+
+ def runTest(self):
+ self.runRealTest()
+
+ def tearDown(self):
+ run_test_server.stop()
+ super(FuseTagsUpdateTestWebsockets, self).tearDown()
+
+
+class FuseTagsUpdateTestPoll(FuseTagsUpdateTestBase):
+ def setUp(self):
+ super(FuseTagsUpdateTestPoll, self).setUp()
+ run_test_server.run(False)
+
+ def runTest(self):
+ self.runRealTest()
+
+ def tearDown(self):
+ run_test_server.stop()
+ super(FuseTagsUpdateTestPoll, self).tearDown()
+
+
+class FuseGroupsTest(MountTestBase):
+ def setUp(self):
+ super(FuseGroupsTest, self).setUp()
+ run_test_server.run()
+
+ def runTest(self):
+ run_test_server.authorize_with("admin")
+ api = arvados.api('v1', cache=False)
+
+ operations = fuse.Operations(os.getuid(), os.getgid())
+ e = operations.inodes.add_entry(fuse.GroupsDirectory(llfuse.ROOT_INODE, operations.inodes, api))
+
+ llfuse.init(operations, self.mounttmp, [])
+ t = threading.Thread(None, lambda: llfuse.main())
+ t.start()
+
+ # wait until the driver is finished initializing
+ operations.initlock.wait()
+
+ d1 = os.listdir(self.mounttmp)
+ d1.sort()
+ self.assertIn('zzzzz-j7d0g-v955i6s2oi1cbso', d1)
+
+ d2 = os.listdir(os.path.join(self.mounttmp, 'zzzzz-j7d0g-v955i6s2oi1cbso'))
+ d2.sort()
+ self.assertEqual(['1f4b0bc7583c2a7f9102c395f4ffc5e3+45 added sometime',
+ "I'm a job in a folder",
+ "I'm a template in a folder",
+ "zzzzz-j58dm-5gid26432uujf79",
+ "zzzzz-j58dm-7r18rnd5nzhg5yk",
+ "zzzzz-j58dm-ypsjlol9dofwijz",
+ "zzzzz-j7d0g-axqo7eu9pwvna1x"
+ ], d2)
+
+ d3 = os.listdir(os.path.join(self.mounttmp, 'zzzzz-j7d0g-v955i6s2oi1cbso', 'zzzzz-j7d0g-axqo7eu9pwvna1x'))
+ d3.sort()
+ self.assertEqual(["I'm in a subfolder, too",
+ "zzzzz-j58dm-c40lddwcqqr1ffs"
+ ], d3)
+
+ with open(os.path.join(self.mounttmp, 'zzzzz-j7d0g-v955i6s2oi1cbso', "I'm a template in a folder")) as f:
+ j = json.load(f)
+ self.assertEqual("Two Part Pipeline Template", j['name'])
+
+ def tearDown(self):
+ run_test_server.stop()
+ super(FuseGroupsTest, self).tearDown()
+++ /dev/null
-#! /bin/sh
-
-# This script builds a Keep executable and installs it in
-# ./bin/keep.
-#
-# In idiomatic Go style, a user would install Keep with something
-# like:
-#
-# go get arvados.org/keep
-# go install arvados.org/keep
-#
-# which would download both the Keep source and any third-party
-# packages it depends on.
-#
-# Since the Keep source is bundled within the overall Arvados source,
-# "go get" is not the primary tool for delivering Keep source and this
-# process doesn't work. Instead, this script sets the environment
-# properly and fetches any necessary dependencies by hand.
-
-if [ -z "$GOPATH" ]
-then
- GOPATH=$(pwd)
-else
- GOPATH=$(pwd):${GOPATH}
-fi
-
-export GOPATH
-
-set -o errexit # fail if any command returns an error
-
-mkdir -p pkg
-mkdir -p bin
-go get github.com/gorilla/mux
-go install keep
-ls -l bin/keep
-echo "success!"
--- /dev/null
+#! /bin/sh
+
+# Wraps the 'go' executable with some environment setup. Sets GOPATH, creates
+# 'pkg' and 'bin' directories, automatically installs dependencies, then runs
+# the underlying 'go' executable with any command line parameters provided to
+# the script.
+
+rootdir=$(readlink -f $(dirname $0))
+GOPATH=$rootdir:$rootdir/../../sdk/go:$GOPATH
+export GOPATH
+
+mkdir -p $rootdir/pkg
+mkdir -p $rootdir/bin
+
+go get github.com/gorilla/mux
+
+go $*
--- /dev/null
+package main
+
+import (
+ "arvados.org/keepclient"
+ "flag"
+ "fmt"
+ "github.com/gorilla/mux"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "time"
+)
+
+// Default TCP address on which to listen for requests.
+// Initialized by the -listen flag.
+const DEFAULT_ADDR = ":25107"
+
+var listener net.Listener
+
+func main() {
+ var (
+ listen string
+ no_get bool
+ no_put bool
+ default_replicas int
+ pidfile string
+ )
+
+ flagset := flag.NewFlagSet("default", flag.ExitOnError)
+
+ flagset.StringVar(
+ &listen,
+ "listen",
+ DEFAULT_ADDR,
+ "Interface on which to listen for requests, in the format "+
+ "ipaddr:port. e.g. -listen=10.0.1.24:8000. Use -listen=:port "+
+ "to listen on all network interfaces.")
+
+ flagset.BoolVar(
+ &no_get,
+ "no-get",
+ false,
+ "If set, disable GET operations")
+
+ flagset.BoolVar(
+ &no_put,
+ "no-put",
+ false,
+ "If set, disable PUT operations")
+
+ flagset.IntVar(
+ &default_replicas,
+ "default-replicas",
+ 2,
+ "Default number of replicas to write if not specified by the client.")
+
+ flagset.StringVar(
+ &pidfile,
+ "pid",
+ "",
+ "Path to write pid file")
+
+ flagset.Parse(os.Args[1:])
+
+ kc, err := keepclient.MakeKeepClient()
+ if err != nil {
+ log.Fatalf("Error setting up keep client %s", err.Error())
+ }
+
+ if pidfile != "" {
+ f, err := os.Create(pidfile)
+ if err == nil {
+ fmt.Fprint(f, os.Getpid())
+ f.Close()
+ } else {
+ log.Printf("Error writing pid file (%s): %s", pidfile, err.Error())
+ }
+ }
+
+ kc.Want_replicas = default_replicas
+
+ listener, err = net.Listen("tcp", listen)
+ if err != nil {
+ log.Fatalf("Could not listen on %v", listen)
+ }
+
+ go RefreshServicesList(&kc)
+
+ // Shut down the server gracefully (by closing the listener)
+ // if SIGTERM is received.
+ term := make(chan os.Signal, 1)
+ go func(sig <-chan os.Signal) {
+ s := <-sig
+ log.Println("caught signal:", s)
+ listener.Close()
+ }(term)
+ signal.Notify(term, syscall.SIGTERM)
+
+ if pidfile != "" {
+ f, err := os.Create(pidfile)
+ if err == nil {
+ fmt.Fprint(f, os.Getpid())
+ f.Close()
+ } else {
+ log.Printf("Error writing pid file (%s): %s", pidfile, err.Error())
+ }
+ }
+
+ log.Printf("Arvados Keep proxy started listening on %v with server list %v", listener.Addr(), kc.ServiceRoots())
+
+ // Start listening for requests.
+ http.Serve(listener, MakeRESTRouter(!no_get, !no_put, &kc))
+
+ log.Println("shutting down")
+
+ if pidfile != "" {
+ os.Remove(pidfile)
+ }
+}
+
+type ApiTokenCache struct {
+ tokens map[string]int64
+ lock sync.Mutex
+ expireTime int64
+}
+
+// Refresh the keep service list every five minutes.
+func RefreshServicesList(kc *keepclient.KeepClient) {
+ for {
+ time.Sleep(300 * time.Second)
+ oldservices := kc.ServiceRoots()
+ kc.DiscoverKeepServers()
+ newservices := kc.ServiceRoots()
+ s1 := fmt.Sprint(oldservices)
+ s2 := fmt.Sprint(newservices)
+ if s1 != s2 {
+ log.Printf("Updated server list to %v", s2)
+ }
+ }
+}
+
+// Cache the token and set an expire time. If we already have an expire time
+// on the token, it is not updated.
+func (this *ApiTokenCache) RememberToken(token string) {
+ this.lock.Lock()
+ defer this.lock.Unlock()
+
+ now := time.Now().Unix()
+ if this.tokens[token] == 0 {
+ this.tokens[token] = now + this.expireTime
+ }
+}
+
+// Check if the cached token is known and still believed to be valid.
+func (this *ApiTokenCache) RecallToken(token string) bool {
+ this.lock.Lock()
+ defer this.lock.Unlock()
+
+ now := time.Now().Unix()
+ if this.tokens[token] == 0 {
+ // Unknown token
+ return false
+ } else if now < this.tokens[token] {
+ // Token is known and still valid
+ return true
+ } else {
+ // Token is expired
+ this.tokens[token] = 0
+ return false
+ }
+}
+
+func GetRemoteAddress(req *http.Request) string {
+ if realip := req.Header.Get("X-Real-IP"); realip != "" {
+ if forwarded := req.Header.Get("X-Forwarded-For"); forwarded != realip {
+ return fmt.Sprintf("%s (X-Forwarded-For %s)", realip, forwarded)
+ } else {
+ return realip
+ }
+ }
+ return req.RemoteAddr
+}
+
+func CheckAuthorizationHeader(kc keepclient.KeepClient, cache *ApiTokenCache, req *http.Request) bool {
+ var auth string
+ if auth = req.Header.Get("Authorization"); auth == "" {
+ return false
+ }
+
+ var tok string
+ _, err := fmt.Sscanf(auth, "OAuth2 %s", &tok)
+ if err != nil {
+ // Scanning error
+ return false
+ }
+
+ if cache.RecallToken(tok) {
+ // Valid in the cache, short circut
+ return true
+ }
+
+ var usersreq *http.Request
+
+ if usersreq, err = http.NewRequest("HEAD", fmt.Sprintf("https://%s/arvados/v1/users/current", kc.ApiServer), nil); err != nil {
+ // Can't construct the request
+ log.Printf("%s: CheckAuthorizationHeader error: %v", GetRemoteAddress(req), err)
+ return false
+ }
+
+ // Add api token header
+ usersreq.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", tok))
+
+ // Actually make the request
+ var resp *http.Response
+ if resp, err = kc.Client.Do(usersreq); err != nil {
+ // Something else failed
+ log.Printf("%s: CheckAuthorizationHeader error connecting to API server: %v", GetRemoteAddress(req), err.Error())
+ return false
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ // Bad status
+ log.Printf("%s: CheckAuthorizationHeader API server responded: %v", GetRemoteAddress(req), resp.Status)
+ return false
+ }
+
+ // Success! Update cache
+ cache.RememberToken(tok)
+
+ return true
+}
+
+type GetBlockHandler struct {
+ *keepclient.KeepClient
+ *ApiTokenCache
+}
+
+type PutBlockHandler struct {
+ *keepclient.KeepClient
+ *ApiTokenCache
+}
+
+type InvalidPathHandler struct{}
+
+// MakeRESTRouter
+// Returns a mux.Router that passes GET and PUT requests to the
+// appropriate handlers.
+//
+func MakeRESTRouter(
+ enable_get bool,
+ enable_put bool,
+ kc *keepclient.KeepClient) *mux.Router {
+
+ t := &ApiTokenCache{tokens: make(map[string]int64), expireTime: 300}
+
+ rest := mux.NewRouter()
+
+ if enable_get {
+ rest.Handle(`/{hash:[0-9a-f]{32}}+{hints}`,
+ GetBlockHandler{kc, t}).Methods("GET", "HEAD")
+ rest.Handle(`/{hash:[0-9a-f]{32}}`, GetBlockHandler{kc, t}).Methods("GET", "HEAD")
+ }
+
+ if enable_put {
+ rest.Handle(`/{hash:[0-9a-f]{32}}+{hints}`, PutBlockHandler{kc, t}).Methods("PUT")
+ rest.Handle(`/{hash:[0-9a-f]{32}}`, PutBlockHandler{kc, t}).Methods("PUT")
+ }
+
+ rest.NotFoundHandler = InvalidPathHandler{}
+
+ return rest
+}
+
+func (this InvalidPathHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+ log.Printf("%s: %s %s unroutable", GetRemoteAddress(req), req.Method, req.URL.Path)
+ http.Error(resp, "Bad request", http.StatusBadRequest)
+}
+
+func (this GetBlockHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+
+ kc := *this.KeepClient
+
+ hash := mux.Vars(req)["hash"]
+ hints := mux.Vars(req)["hints"]
+
+ locator := keepclient.MakeLocator2(hash, hints)
+
+ log.Printf("%s: %s %s", GetRemoteAddress(req), req.Method, hash)
+
+ if !CheckAuthorizationHeader(kc, this.ApiTokenCache, req) {
+ http.Error(resp, "Missing or invalid Authorization header", http.StatusForbidden)
+ return
+ }
+
+ var reader io.ReadCloser
+ var err error
+ var blocklen int64
+
+ if req.Method == "GET" {
+ reader, blocklen, _, err = kc.AuthorizedGet(hash, locator.Signature, locator.Timestamp)
+ defer reader.Close()
+ } else if req.Method == "HEAD" {
+ blocklen, _, err = kc.AuthorizedAsk(hash, locator.Signature, locator.Timestamp)
+ }
+
+ resp.Header().Set("Content-Length", fmt.Sprint(blocklen))
+
+ switch err {
+ case nil:
+ if reader != nil {
+ n, err2 := io.Copy(resp, reader)
+ if n != blocklen {
+ log.Printf("%s: %s %s mismatched return %v with Content-Length %v error", GetRemoteAddress(req), req.Method, hash, n, blocklen, err.Error())
+ } else if err2 == nil {
+ log.Printf("%s: %s %s success returned %v bytes", GetRemoteAddress(req), req.Method, hash, n)
+ } else {
+ log.Printf("%s: %s %s returned %v bytes error %v", GetRemoteAddress(req), req.Method, hash, n, err.Error())
+ }
+ } else {
+ log.Printf("%s: %s %s success", GetRemoteAddress(req), req.Method, hash)
+ }
+ case keepclient.BlockNotFound:
+ http.Error(resp, "Not found", http.StatusNotFound)
+ default:
+ http.Error(resp, err.Error(), http.StatusBadGateway)
+ }
+
+ if err != nil {
+ log.Printf("%s: %s %s error %s", GetRemoteAddress(req), req.Method, hash, err.Error())
+ }
+}
+
+func (this PutBlockHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+
+ kc := *this.KeepClient
+
+ hash := mux.Vars(req)["hash"]
+ hints := mux.Vars(req)["hints"]
+
+ locator := keepclient.MakeLocator2(hash, hints)
+
+ var contentLength int64 = -1
+ if req.Header.Get("Content-Length") != "" {
+ _, err := fmt.Sscanf(req.Header.Get("Content-Length"), "%d", &contentLength)
+ if err != nil {
+ resp.Header().Set("Content-Length", fmt.Sprintf("%d", contentLength))
+ }
+
+ }
+
+ log.Printf("%s: %s %s Content-Length %v", GetRemoteAddress(req), req.Method, hash, contentLength)
+
+ if contentLength < 1 {
+ http.Error(resp, "Must include Content-Length header", http.StatusLengthRequired)
+ return
+ }
+
+ if locator.Size > 0 && int64(locator.Size) != contentLength {
+ http.Error(resp, "Locator size hint does not match Content-Length header", http.StatusBadRequest)
+ return
+ }
+
+ if !CheckAuthorizationHeader(kc, this.ApiTokenCache, req) {
+ http.Error(resp, "Missing or invalid Authorization header", http.StatusForbidden)
+ return
+ }
+
+ // Check if the client specified the number of replicas
+ if req.Header.Get("X-Keep-Desired-Replicas") != "" {
+ var r int
+ _, err := fmt.Sscanf(req.Header.Get(keepclient.X_Keep_Desired_Replicas), "%d", &r)
+ if err != nil {
+ kc.Want_replicas = r
+ }
+ }
+
+ // Now try to put the block through
+ hash, replicas, err := kc.PutHR(hash, req.Body, contentLength)
+
+ // Tell the client how many successful PUTs we accomplished
+ resp.Header().Set(keepclient.X_Keep_Replicas_Stored, fmt.Sprintf("%d", replicas))
+
+ switch err {
+ case nil:
+ // Default will return http.StatusOK
+ log.Printf("%s: %s %s finished, stored %v replicas (desired %v)", GetRemoteAddress(req), req.Method, hash, replicas, kc.Want_replicas)
+ n, err2 := io.WriteString(resp, hash)
+ if err2 != nil {
+ log.Printf("%s: wrote %v bytes to response body and got error %v", n, err2.Error())
+ }
+
+ case keepclient.OversizeBlockError:
+ // Too much data
+ http.Error(resp, fmt.Sprintf("Exceeded maximum blocksize %d", keepclient.BLOCKSIZE), http.StatusRequestEntityTooLarge)
+
+ case keepclient.InsufficientReplicasError:
+ if replicas > 0 {
+ // At least one write is considered success. The
+ // client can decide if getting less than the number of
+ // replications it asked for is a fatal error.
+ // Default will return http.StatusOK
+ n, err2 := io.WriteString(resp, hash)
+ if err2 != nil {
+ log.Printf("%s: wrote %v bytes to response body and got error %v", n, err2.Error())
+ }
+ } else {
+ http.Error(resp, "", http.StatusServiceUnavailable)
+ }
+
+ default:
+ http.Error(resp, err.Error(), http.StatusBadGateway)
+ }
+
+ if err != nil {
+ log.Printf("%s: %s %s stored %v replicas (desired %v) got error %v", GetRemoteAddress(req), req.Method, hash, replicas, kc.Want_replicas, err.Error())
+ }
+
+}
--- /dev/null
+package main
+
+import (
+ "arvados.org/keepclient"
+ "crypto/md5"
+ "crypto/tls"
+ "fmt"
+ . "gopkg.in/check.v1"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/url"
+ "os"
+ "os/exec"
+ "strings"
+ "testing"
+ "time"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+ TestingT(t)
+}
+
+// Gocheck boilerplate
+var _ = Suite(&ServerRequiredSuite{})
+
+// Tests that require the Keep server running
+type ServerRequiredSuite struct{}
+
+func pythonDir() string {
+ gopath := os.Getenv("GOPATH")
+ return fmt.Sprintf("%s/../../sdk/python", strings.Split(gopath, ":")[0])
+}
+
+func (s *ServerRequiredSuite) SetUpSuite(c *C) {
+ cwd, _ := os.Getwd()
+ defer os.Chdir(cwd)
+
+ os.Chdir(pythonDir())
+
+ if err := exec.Command("python", "run_test_server.py", "start").Run(); err != nil {
+ panic("'python run_test_server.py start' returned error")
+ }
+ if err := exec.Command("python", "run_test_server.py", "start_keep").Run(); err != nil {
+ panic("'python run_test_server.py start_keep' returned error")
+ }
+
+ os.Setenv("ARVADOS_API_HOST", "localhost:3001")
+ os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+ os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
+}
+
+func (s *ServerRequiredSuite) TearDownSuite(c *C) {
+ cwd, _ := os.Getwd()
+ defer os.Chdir(cwd)
+
+ os.Chdir(pythonDir())
+ exec.Command("python", "run_test_server.py", "stop_keep").Run()
+ exec.Command("python", "run_test_server.py", "stop").Run()
+}
+
+func setupProxyService() {
+
+ client := &http.Client{Transport: &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}}
+
+ var req *http.Request
+ var err error
+ if req, err = http.NewRequest("POST", fmt.Sprintf("https://%s/arvados/v1/keep_services", os.Getenv("ARVADOS_API_HOST")), nil); err != nil {
+ panic(err.Error())
+ }
+ req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", os.Getenv("ARVADOS_API_TOKEN")))
+
+ reader, writer := io.Pipe()
+
+ req.Body = reader
+
+ go func() {
+ data := url.Values{}
+ data.Set("keep_service", `{
+ "service_host": "localhost",
+ "service_port": 29950,
+ "service_ssl_flag": false,
+ "service_type": "proxy"
+}`)
+
+ writer.Write([]byte(data.Encode()))
+ writer.Close()
+ }()
+
+ var resp *http.Response
+ if resp, err = client.Do(req); err != nil {
+ panic(err.Error())
+ }
+ if resp.StatusCode != 200 {
+ panic(resp.Status)
+ }
+}
+
+func runProxy(c *C, args []string, token string, port int) keepclient.KeepClient {
+ os.Args = append(args, fmt.Sprintf("-listen=:%v", port))
+ os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+
+ go main()
+ time.Sleep(100 * time.Millisecond)
+
+ os.Setenv("ARVADOS_KEEP_PROXY", fmt.Sprintf("http://localhost:%v", port))
+ os.Setenv("ARVADOS_API_TOKEN", token)
+ kc, err := keepclient.MakeKeepClient()
+ c.Check(kc.Using_proxy, Equals, true)
+ c.Check(len(kc.ServiceRoots()), Equals, 1)
+ c.Check(kc.ServiceRoots()[0], Equals, fmt.Sprintf("http://localhost:%v", port))
+ c.Check(err, Equals, nil)
+ os.Setenv("ARVADOS_KEEP_PROXY", "")
+ log.Print("keepclient created")
+ return kc
+}
+
+func (s *ServerRequiredSuite) TestPutAskGet(c *C) {
+ log.Print("TestPutAndGet start")
+
+ os.Args = []string{"keepproxy", "-listen=:29950"}
+ os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+ go main()
+ time.Sleep(100 * time.Millisecond)
+
+ setupProxyService()
+
+ os.Setenv("ARVADOS_EXTERNAL_CLIENT", "true")
+ kc, err := keepclient.MakeKeepClient()
+ c.Check(kc.External, Equals, true)
+ c.Check(kc.Using_proxy, Equals, true)
+ c.Check(len(kc.ServiceRoots()), Equals, 1)
+ c.Check(kc.ServiceRoots()[0], Equals, "http://localhost:29950")
+ c.Check(err, Equals, nil)
+ os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
+ log.Print("keepclient created")
+
+ defer listener.Close()
+
+ hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+ var hash2 string
+
+ {
+ _, _, err := kc.Ask(hash)
+ c.Check(err, Equals, keepclient.BlockNotFound)
+ log.Print("Ask 1")
+ }
+
+ {
+ var rep int
+ var err error
+ hash2, rep, err = kc.PutB([]byte("foo"))
+ c.Check(hash2, Equals, fmt.Sprintf("%s+3", hash))
+ c.Check(rep, Equals, 2)
+ c.Check(err, Equals, nil)
+ log.Print("PutB")
+ }
+
+ {
+ blocklen, _, err := kc.Ask(hash2)
+ c.Assert(err, Equals, nil)
+ c.Check(blocklen, Equals, int64(3))
+ log.Print("Ask 2")
+ }
+
+ {
+ reader, blocklen, _, err := kc.Get(hash2)
+ c.Assert(err, Equals, nil)
+ all, err := ioutil.ReadAll(reader)
+ c.Check(all, DeepEquals, []byte("foo"))
+ c.Check(blocklen, Equals, int64(3))
+ log.Print("Get")
+ }
+
+ log.Print("TestPutAndGet done")
+}
+
+func (s *ServerRequiredSuite) TestPutAskGetForbidden(c *C) {
+ log.Print("TestPutAndGet start")
+
+ kc := runProxy(c, []string{"keepproxy"}, "123abc", 29951)
+ defer listener.Close()
+
+ log.Print("keepclient created")
+
+ hash := fmt.Sprintf("%x", md5.Sum([]byte("bar")))
+
+ {
+ _, _, err := kc.Ask(hash)
+ c.Check(err, Equals, keepclient.BlockNotFound)
+ log.Print("Ask 1")
+ }
+
+ {
+ hash2, rep, err := kc.PutB([]byte("bar"))
+ c.Check(hash2, Equals, "")
+ c.Check(rep, Equals, 0)
+ c.Check(err, Equals, keepclient.InsufficientReplicasError)
+ log.Print("PutB")
+ }
+
+ {
+ blocklen, _, err := kc.Ask(hash)
+ c.Assert(err, Equals, keepclient.BlockNotFound)
+ c.Check(blocklen, Equals, int64(0))
+ log.Print("Ask 2")
+ }
+
+ {
+ _, blocklen, _, err := kc.Get(hash)
+ c.Assert(err, Equals, keepclient.BlockNotFound)
+ c.Check(blocklen, Equals, int64(0))
+ log.Print("Get")
+ }
+
+ log.Print("TestPutAndGetForbidden done")
+}
+
+func (s *ServerRequiredSuite) TestGetDisabled(c *C) {
+ log.Print("TestGetDisabled start")
+
+ kc := runProxy(c, []string{"keepproxy", "-no-get"}, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h", 29952)
+ defer listener.Close()
+
+ hash := fmt.Sprintf("%x", md5.Sum([]byte("baz")))
+
+ {
+ _, _, err := kc.Ask(hash)
+ c.Check(err, Equals, keepclient.BlockNotFound)
+ log.Print("Ask 1")
+ }
+
+ {
+ hash2, rep, err := kc.PutB([]byte("baz"))
+ c.Check(hash2, Equals, fmt.Sprintf("%s+3", hash))
+ c.Check(rep, Equals, 2)
+ c.Check(err, Equals, nil)
+ log.Print("PutB")
+ }
+
+ {
+ blocklen, _, err := kc.Ask(hash)
+ c.Assert(err, Equals, keepclient.BlockNotFound)
+ c.Check(blocklen, Equals, int64(0))
+ log.Print("Ask 2")
+ }
+
+ {
+ _, blocklen, _, err := kc.Get(hash)
+ c.Assert(err, Equals, keepclient.BlockNotFound)
+ c.Check(blocklen, Equals, int64(0))
+ log.Print("Get")
+ }
+
+ log.Print("TestGetDisabled done")
+}
+
+func (s *ServerRequiredSuite) TestPutDisabled(c *C) {
+ log.Print("TestPutDisabled start")
+
+ kc := runProxy(c, []string{"keepproxy", "-no-put"}, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h", 29953)
+ defer listener.Close()
+
+ {
+ hash2, rep, err := kc.PutB([]byte("quux"))
+ c.Check(hash2, Equals, "")
+ c.Check(rep, Equals, 0)
+ c.Check(err, Equals, keepclient.InsufficientReplicasError)
+ log.Print("PutB")
+ }
+
+ log.Print("TestPutDisabled done")
+}
--- /dev/null
+// Tests for Keep HTTP handlers:
+//
+// GetBlockHandler
+// PutBlockHandler
+// IndexHandler
+//
+// The HTTP handlers are responsible for enforcing permission policy,
+// so these tests must exercise all possible permission permutations.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/gorilla/mux"
+ "net/http"
+ "net/http/httptest"
+ "regexp"
+ "strings"
+ "testing"
+ "time"
+)
+
+// A RequestTester represents the parameters for an HTTP request to
+// be issued on behalf of a unit test.
+type RequestTester struct {
+ uri string
+ api_token string
+ method string
+ request_body []byte
+}
+
+// Test GetBlockHandler on the following situations:
+// - permissions off, unauthenticated request, unsigned locator
+// - permissions on, authenticated request, signed locator
+// - permissions on, authenticated request, unsigned locator
+// - permissions on, unauthenticated request, signed locator
+// - permissions on, authenticated request, expired locator
+//
+func TestGetHandler(t *testing.T) {
+ defer teardown()
+
+ // Prepare two test Keep volumes. Our block is stored on the second volume.
+ KeepVM = MakeTestVolumeManager(2)
+ defer func() { KeepVM.Quit() }()
+
+ vols := KeepVM.Volumes()
+ if err := vols[0].Put(TEST_HASH, TEST_BLOCK); err != nil {
+ t.Error(err)
+ }
+
+ // Set up a REST router for testing the handlers.
+ rest := MakeRESTRouter()
+
+ // Create locators for testing.
+ // Turn on permission settings so we can generate signed locators.
+ enforce_permissions = true
+ PermissionSecret = []byte(known_key)
+ permission_ttl = time.Duration(300) * time.Second
+
+ var (
+ unsigned_locator = "http://localhost:25107/" + TEST_HASH
+ valid_timestamp = time.Now().Add(permission_ttl)
+ expired_timestamp = time.Now().Add(-time.Hour)
+ signed_locator = "http://localhost:25107/" + SignLocator(TEST_HASH, known_token, valid_timestamp)
+ expired_locator = "http://localhost:25107/" + SignLocator(TEST_HASH, known_token, expired_timestamp)
+ )
+
+ // -----------------
+ // Test unauthenticated request with permissions off.
+ enforce_permissions = false
+
+ // Unauthenticated request, unsigned locator
+ // => OK
+ response := IssueRequest(rest,
+ &RequestTester{
+ method: "GET",
+ uri: unsigned_locator,
+ })
+ ExpectStatusCode(t,
+ "Unauthenticated request, unsigned locator", http.StatusOK, response)
+ ExpectBody(t,
+ "Unauthenticated request, unsigned locator",
+ string(TEST_BLOCK),
+ response)
+ received_xbs := response.Header().Get("X-Block-Size")
+ expected_xbs := fmt.Sprintf("%d", len(TEST_BLOCK))
+ if received_xbs != expected_xbs {
+ t.Errorf("expected X-Block-Size %s, got %s", expected_xbs, received_xbs)
+ }
+
+ // ----------------
+ // Permissions: on.
+ enforce_permissions = true
+
+ // Authenticated request, signed locator
+ // => OK
+ response = IssueRequest(rest, &RequestTester{
+ method: "GET",
+ uri: signed_locator,
+ api_token: known_token,
+ })
+ ExpectStatusCode(t,
+ "Authenticated request, signed locator", http.StatusOK, response)
+ ExpectBody(t,
+ "Authenticated request, signed locator", string(TEST_BLOCK), response)
+ received_xbs = response.Header().Get("X-Block-Size")
+ expected_xbs = fmt.Sprintf("%d", len(TEST_BLOCK))
+ if received_xbs != expected_xbs {
+ t.Errorf("expected X-Block-Size %s, got %s", expected_xbs, received_xbs)
+ }
+
+ // Authenticated request, unsigned locator
+ // => PermissionError
+ response = IssueRequest(rest, &RequestTester{
+ method: "GET",
+ uri: unsigned_locator,
+ api_token: known_token,
+ })
+ ExpectStatusCode(t, "unsigned locator", PermissionError.HTTPCode, response)
+
+ // Unauthenticated request, signed locator
+ // => PermissionError
+ response = IssueRequest(rest, &RequestTester{
+ method: "GET",
+ uri: signed_locator,
+ })
+ ExpectStatusCode(t,
+ "Unauthenticated request, signed locator",
+ PermissionError.HTTPCode, response)
+
+ // Authenticated request, expired locator
+ // => ExpiredError
+ response = IssueRequest(rest, &RequestTester{
+ method: "GET",
+ uri: expired_locator,
+ api_token: known_token,
+ })
+ ExpectStatusCode(t,
+ "Authenticated request, expired locator",
+ ExpiredError.HTTPCode, response)
+}
+
+// Test PutBlockHandler on the following situations:
+// - no server key
+// - with server key, authenticated request, unsigned locator
+// - with server key, unauthenticated request, unsigned locator
+//
+func TestPutHandler(t *testing.T) {
+ defer teardown()
+
+ // Prepare two test Keep volumes.
+ KeepVM = MakeTestVolumeManager(2)
+ defer func() { KeepVM.Quit() }()
+
+ // Set up a REST router for testing the handlers.
+ rest := MakeRESTRouter()
+
+ // --------------
+ // No server key.
+
+ // Unauthenticated request, no server key
+ // => OK (unsigned response)
+ unsigned_locator := "http://localhost:25107/" + TEST_HASH
+ response := IssueRequest(rest,
+ &RequestTester{
+ method: "PUT",
+ uri: unsigned_locator,
+ request_body: TEST_BLOCK,
+ })
+
+ ExpectStatusCode(t,
+ "Unauthenticated request, no server key", http.StatusOK, response)
+ ExpectBody(t,
+ "Unauthenticated request, no server key",
+ TEST_HASH_PUT_RESPONSE, response)
+
+ // ------------------
+ // With a server key.
+
+ PermissionSecret = []byte(known_key)
+ permission_ttl = time.Duration(300) * time.Second
+
+ // When a permission key is available, the locator returned
+ // from an authenticated PUT request will be signed.
+
+ // Authenticated PUT, signed locator
+ // => OK (signed response)
+ response = IssueRequest(rest,
+ &RequestTester{
+ method: "PUT",
+ uri: unsigned_locator,
+ request_body: TEST_BLOCK,
+ api_token: known_token,
+ })
+
+ ExpectStatusCode(t,
+ "Authenticated PUT, signed locator, with server key",
+ http.StatusOK, response)
+ response_locator := strings.TrimSpace(response.Body.String())
+ if !VerifySignature(response_locator, known_token) {
+ t.Errorf("Authenticated PUT, signed locator, with server key:\n"+
+ "response '%s' does not contain a valid signature",
+ response_locator)
+ }
+
+ // Unauthenticated PUT, unsigned locator
+ // => OK
+ response = IssueRequest(rest,
+ &RequestTester{
+ method: "PUT",
+ uri: unsigned_locator,
+ request_body: TEST_BLOCK,
+ })
+
+ ExpectStatusCode(t,
+ "Unauthenticated PUT, unsigned locator, with server key",
+ http.StatusOK, response)
+ ExpectBody(t,
+ "Unauthenticated PUT, unsigned locator, with server key",
+ TEST_HASH_PUT_RESPONSE, response)
+}
+
+// Test /index requests:
+// - enforce_permissions off | unauthenticated /index request
+// - enforce_permissions off | unauthenticated /index/prefix request
+// - enforce_permissions off | authenticated /index request | non-superuser
+// - enforce_permissions off | authenticated /index/prefix request | non-superuser
+// - enforce_permissions off | authenticated /index request | superuser
+// - enforce_permissions off | authenticated /index/prefix request | superuser
+// - enforce_permissions on | unauthenticated /index request
+// - enforce_permissions on | unauthenticated /index/prefix request
+// - enforce_permissions on | authenticated /index request | non-superuser
+// - enforce_permissions on | authenticated /index/prefix request | non-superuser
+// - enforce_permissions on | authenticated /index request | superuser
+// - enforce_permissions on | authenticated /index/prefix request | superuser
+//
+// The only /index requests that should succeed are those issued by the
+// superuser when enforce_permissions = true.
+//
+func TestIndexHandler(t *testing.T) {
+ defer teardown()
+
+ // Set up Keep volumes and populate them.
+ // Include multiple blocks on different volumes, and
+ // some metadata files (which should be omitted from index listings)
+ KeepVM = MakeTestVolumeManager(2)
+ defer func() { KeepVM.Quit() }()
+
+ vols := KeepVM.Volumes()
+ vols[0].Put(TEST_HASH, TEST_BLOCK)
+ vols[1].Put(TEST_HASH_2, TEST_BLOCK_2)
+ vols[0].Put(TEST_HASH+".meta", []byte("metadata"))
+ vols[1].Put(TEST_HASH_2+".meta", []byte("metadata"))
+
+ // Set up a REST router for testing the handlers.
+ rest := MakeRESTRouter()
+
+ data_manager_token = "DATA MANAGER TOKEN"
+
+ unauthenticated_req := &RequestTester{
+ method: "GET",
+ uri: "http://localhost:25107/index",
+ }
+ authenticated_req := &RequestTester{
+ method: "GET",
+ uri: "http://localhost:25107/index",
+ api_token: known_token,
+ }
+ superuser_req := &RequestTester{
+ method: "GET",
+ uri: "http://localhost:25107/index",
+ api_token: data_manager_token,
+ }
+ unauth_prefix_req := &RequestTester{
+ method: "GET",
+ uri: "http://localhost:25107/index/" + TEST_HASH[0:3],
+ }
+ auth_prefix_req := &RequestTester{
+ method: "GET",
+ uri: "http://localhost:25107/index/" + TEST_HASH[0:3],
+ api_token: known_token,
+ }
+ superuser_prefix_req := &RequestTester{
+ method: "GET",
+ uri: "http://localhost:25107/index/" + TEST_HASH[0:3],
+ api_token: data_manager_token,
+ }
+
+ // ----------------------------
+ // enforce_permissions disabled
+ // All /index requests should fail.
+ enforce_permissions = false
+
+ // unauthenticated /index request
+ // => PermissionError
+ response := IssueRequest(rest, unauthenticated_req)
+ ExpectStatusCode(t,
+ "enforce_permissions off, unauthenticated request",
+ PermissionError.HTTPCode,
+ response)
+
+ // unauthenticated /index/prefix request
+ // => PermissionError
+ response = IssueRequest(rest, unauth_prefix_req)
+ ExpectStatusCode(t,
+ "enforce_permissions off, unauthenticated /index/prefix request",
+ PermissionError.HTTPCode,
+ response)
+
+ // authenticated /index request, non-superuser
+ // => PermissionError
+ response = IssueRequest(rest, authenticated_req)
+ ExpectStatusCode(t,
+ "enforce_permissions off, authenticated request, non-superuser",
+ PermissionError.HTTPCode,
+ response)
+
+ // authenticated /index/prefix request, non-superuser
+ // => PermissionError
+ response = IssueRequest(rest, auth_prefix_req)
+ ExpectStatusCode(t,
+ "enforce_permissions off, authenticated /index/prefix request, non-superuser",
+ PermissionError.HTTPCode,
+ response)
+
+ // authenticated /index request, superuser
+ // => PermissionError
+ response = IssueRequest(rest, superuser_req)
+ ExpectStatusCode(t,
+ "enforce_permissions off, superuser request",
+ PermissionError.HTTPCode,
+ response)
+
+ // superuser /index/prefix request
+ // => PermissionError
+ response = IssueRequest(rest, superuser_prefix_req)
+ ExpectStatusCode(t,
+ "enforce_permissions off, superuser /index/prefix request",
+ PermissionError.HTTPCode,
+ response)
+
+ // ---------------------------
+ // enforce_permissions enabled
+ // Only the superuser should be allowed to issue /index requests.
+ enforce_permissions = true
+
+ // unauthenticated /index request
+ // => PermissionError
+ response = IssueRequest(rest, unauthenticated_req)
+ ExpectStatusCode(t,
+ "enforce_permissions on, unauthenticated request",
+ PermissionError.HTTPCode,
+ response)
+
+ // unauthenticated /index/prefix request
+ // => PermissionError
+ response = IssueRequest(rest, unauth_prefix_req)
+ ExpectStatusCode(t,
+ "permissions on, unauthenticated /index/prefix request",
+ PermissionError.HTTPCode,
+ response)
+
+ // authenticated /index request, non-superuser
+ // => PermissionError
+ response = IssueRequest(rest, authenticated_req)
+ ExpectStatusCode(t,
+ "permissions on, authenticated request, non-superuser",
+ PermissionError.HTTPCode,
+ response)
+
+ // authenticated /index/prefix request, non-superuser
+ // => PermissionError
+ response = IssueRequest(rest, auth_prefix_req)
+ ExpectStatusCode(t,
+ "permissions on, authenticated /index/prefix request, non-superuser",
+ PermissionError.HTTPCode,
+ response)
+
+ // superuser /index request
+ // => OK
+ response = IssueRequest(rest, superuser_req)
+ ExpectStatusCode(t,
+ "permissions on, superuser request",
+ http.StatusOK,
+ response)
+
+ expected := `^` + TEST_HASH + `\+\d+ \d+\n` +
+ TEST_HASH_2 + `\+\d+ \d+\n$`
+ match, _ := regexp.MatchString(expected, response.Body.String())
+ if !match {
+ t.Errorf(
+ "permissions on, superuser request: expected %s, got:\n%s",
+ expected, response.Body.String())
+ }
+
+ // superuser /index/prefix request
+ // => OK
+ response = IssueRequest(rest, superuser_prefix_req)
+ ExpectStatusCode(t,
+ "permissions on, superuser request",
+ http.StatusOK,
+ response)
+
+ expected = `^` + TEST_HASH + `\+\d+ \d+\n$`
+ match, _ = regexp.MatchString(expected, response.Body.String())
+ if !match {
+ t.Errorf(
+ "permissions on, superuser /index/prefix request: expected %s, got:\n%s",
+ expected, response.Body.String())
+ }
+}
+
+// ====================
+// Helper functions
+// ====================
+
+// IssueTestRequest executes an HTTP request described by rt, to a
+// specified REST router. It returns the HTTP response to the request.
+func IssueRequest(router *mux.Router, rt *RequestTester) *httptest.ResponseRecorder {
+ response := httptest.NewRecorder()
+ body := bytes.NewReader(rt.request_body)
+ req, _ := http.NewRequest(rt.method, rt.uri, body)
+ if rt.api_token != "" {
+ req.Header.Set("Authorization", "OAuth2 "+rt.api_token)
+ }
+ router.ServeHTTP(response, req)
+ return response
+}
+
+// ExpectStatusCode checks whether a response has the specified status code,
+// and reports a test failure if not.
+func ExpectStatusCode(
+ t *testing.T,
+ testname string,
+ expected_status int,
+ response *httptest.ResponseRecorder) {
+ if response.Code != expected_status {
+ t.Errorf("%s: expected status %s, got %+v",
+ testname, expected_status, response)
+ }
+}
+
+func ExpectBody(
+ t *testing.T,
+ testname string,
+ expected_body string,
+ response *httptest.ResponseRecorder) {
+ if response.Body.String() != expected_body {
+ t.Errorf("%s: expected response body '%s', got %+v",
+ testname, expected_body, response)
+ }
+}
"io"
"io/ioutil"
"log"
+ "net"
"net/http"
"os"
+ "os/signal"
"regexp"
+ "runtime"
+ "strconv"
"strings"
"syscall"
+ "time"
)
// ======================
// and/or configuration file settings.
// Default TCP address on which to listen for requests.
+// Initialized by the --listen flag.
const DEFAULT_ADDR = ":25107"
// A Keep "block" is 64MB.
var PROC_MOUNTS = "/proc/mounts"
// The Keep VolumeManager maintains a list of available volumes.
+// Initialized by the --volumes flag (or by FindKeepVolumes).
var KeepVM VolumeManager
+// enforce_permissions controls whether permission signatures
+// should be enforced (affecting GET and DELETE requests).
+// Initialized by the --enforce-permissions flag.
+var enforce_permissions bool
+
+// permission_ttl is the time duration for which new permission
+// signatures (returned by PUT requests) will be valid.
+// Initialized by the --permission-ttl flag.
+var permission_ttl time.Duration
+
+// data_manager_token represents the API token used by the
+// Data Manager, and is required on certain privileged operations.
+// Initialized by the --data-manager-token-file flag.
+var data_manager_token string
+
// ==========
// Error types.
//
}
var (
- CollisionError = &KeepError{400, "Collision"}
- MD5Error = &KeepError{401, "MD5 Failure"}
- CorruptError = &KeepError{402, "Corruption"}
- NotFoundError = &KeepError{404, "Not Found"}
- GenericError = &KeepError{500, "Fail"}
- FullError = &KeepError{503, "Full"}
- TooLongError = &KeepError{504, "Too Long"}
+ BadRequestError = &KeepError{400, "Bad Request"}
+ CollisionError = &KeepError{400, "Collision"}
+ MD5Error = &KeepError{401, "MD5 Failure"}
+ PermissionError = &KeepError{401, "Permission denied"}
+ CorruptError = &KeepError{402, "Corruption"}
+ ExpiredError = &KeepError{403, "Expired permission signature"}
+ NotFoundError = &KeepError{404, "Not Found"}
+ GenericError = &KeepError{500, "Fail"}
+ FullError = &KeepError{503, "Full"}
+ TooLongError = &KeepError{504, "Too Long"}
)
func (e *KeepError) Error() string {
// data exceeds BLOCKSIZE bytes.
var ReadErrorTooLong = errors.New("Too long")
+// TODO(twp): continue moving as much code as possible out of main
+// so it can be effectively tested. Esp. handling and postprocessing
+// of command line flags (identifying Keep volumes and initializing
+// permission arguments).
+
func main() {
+ log.Println("Keep started: pid", os.Getpid())
+
// Parse command-line flags:
//
// -listen=ipaddr:port
// by looking at currently mounted filesystems for /keep top-level
// directories.
- var listen, volumearg string
- var serialize_io bool
- flag.StringVar(&listen, "listen", DEFAULT_ADDR,
- "interface on which to listen for requests, in the format ipaddr:port. e.g. -listen=10.0.1.24:8000. Use -listen=:port to listen on all network interfaces.")
- flag.StringVar(&volumearg, "volumes", "",
- "Comma-separated list of directories to use for Keep volumes, e.g. -volumes=/var/keep1,/var/keep2. If empty or not supplied, Keep will scan mounted filesystems for volumes with a /keep top-level directory.")
- flag.BoolVar(&serialize_io, "serialize", false,
- "If set, all read and write operations on local Keep volumes will be serialized.")
+ var (
+ data_manager_token_file string
+ listen string
+ permission_key_file string
+ permission_ttl_sec int
+ serialize_io bool
+ volumearg string
+ pidfile string
+ )
+ flag.StringVar(
+ &data_manager_token_file,
+ "data-manager-token-file",
+ "",
+ "File with the API token used by the Data Manager. All DELETE "+
+ "requests or GET /index requests must carry this token.")
+ flag.BoolVar(
+ &enforce_permissions,
+ "enforce-permissions",
+ false,
+ "Enforce permission signatures on requests.")
+ flag.StringVar(
+ &listen,
+ "listen",
+ DEFAULT_ADDR,
+ "Interface on which to listen for requests, in the format "+
+ "ipaddr:port. e.g. -listen=10.0.1.24:8000. Use -listen=:port "+
+ "to listen on all network interfaces.")
+ flag.StringVar(
+ &permission_key_file,
+ "permission-key-file",
+ "",
+ "File containing the secret key for generating and verifying "+
+ "permission signatures.")
+ flag.IntVar(
+ &permission_ttl_sec,
+ "permission-ttl",
+ 300,
+ "Expiration time (in seconds) for newly generated permission "+
+ "signatures.")
+ flag.BoolVar(
+ &serialize_io,
+ "serialize",
+ false,
+ "If set, all read and write operations on local Keep volumes will "+
+ "be serialized.")
+ flag.StringVar(
+ &volumearg,
+ "volumes",
+ "",
+ "Comma-separated list of directories to use for Keep volumes, "+
+ "e.g. -volumes=/var/keep1,/var/keep2. If empty or not "+
+ "supplied, Keep will scan mounted filesystems for volumes "+
+ "with a /keep top-level directory.")
+
+ flag.StringVar(
+ &pidfile,
+ "pid",
+ "",
+ "Path to write pid file")
+
flag.Parse()
// Look for local keep volumes.
log.Fatal("could not find any keep volumes")
}
+ // Initialize data manager token and permission key.
+ // If these tokens are specified but cannot be read,
+ // raise a fatal error.
+ if data_manager_token_file != "" {
+ if buf, err := ioutil.ReadFile(data_manager_token_file); err == nil {
+ data_manager_token = strings.TrimSpace(string(buf))
+ } else {
+ log.Fatalf("reading data manager token: %s\n", err)
+ }
+ }
+ if permission_key_file != "" {
+ if buf, err := ioutil.ReadFile(permission_key_file); err == nil {
+ PermissionSecret = bytes.TrimSpace(buf)
+ } else {
+ log.Fatalf("reading permission key: %s\n", err)
+ }
+ }
+
+ // Initialize permission TTL
+ permission_ttl = time.Duration(permission_ttl_sec) * time.Second
+
+ // If --enforce-permissions is true, we must have a permission key
+ // to continue.
+ if PermissionSecret == nil {
+ if enforce_permissions {
+ log.Fatal("--enforce-permissions requires a permission key")
+ } else {
+ log.Println("Running without a PermissionSecret. Block locators " +
+ "returned by this server will not be signed, and will be rejected " +
+ "by a server that enforces permissions.")
+ log.Println("To fix this, run Keep with --permission-key-file=<path> " +
+ "to define the location of a file containing the permission key.")
+ }
+ }
+
// Start a round-robin VolumeManager with the volumes we have found.
KeepVM = MakeRRVolumeManager(goodvols)
- // Set up REST handlers.
- //
- // Start with a router that will route each URL path to an
- // appropriate handler.
- //
+ // Tell the built-in HTTP server to direct all requests to the REST
+ // router.
+ http.Handle("/", MakeRESTRouter())
+
+ // Set up a TCP listener.
+ listener, err := net.Listen("tcp", listen)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Shut down the server gracefully (by closing the listener)
+ // if SIGTERM is received.
+ term := make(chan os.Signal, 1)
+ go func(sig <-chan os.Signal) {
+ s := <-sig
+ log.Println("caught signal:", s)
+ listener.Close()
+ }(term)
+ signal.Notify(term, syscall.SIGTERM)
+
+ if pidfile != "" {
+ f, err := os.Create(pidfile)
+ if err == nil {
+ fmt.Fprint(f, os.Getpid())
+ f.Close()
+ } else {
+ log.Printf("Error writing pid file (%s): %s", pidfile, err.Error())
+ }
+ }
+
+ // Start listening for requests.
+ srv := &http.Server{Addr: listen}
+ srv.Serve(listener)
+
+ log.Println("shutting down")
+
+ if pidfile != "" {
+ os.Remove(pidfile)
+ }
+}
+
+// MakeRESTRouter
+// Returns a mux.Router that passes GET and PUT requests to the
+// appropriate handlers.
+//
+func MakeRESTRouter() *mux.Router {
rest := mux.NewRouter()
- rest.HandleFunc(`/{hash:[0-9a-f]{32}}`, GetBlockHandler).Methods("GET", "HEAD")
+
+ rest.HandleFunc(
+ `/{hash:[0-9a-f]{32}}`, GetBlockHandler).Methods("GET", "HEAD")
+ rest.HandleFunc(
+ `/{hash:[0-9a-f]{32}}+{hints}`,
+ GetBlockHandler).Methods("GET", "HEAD")
+
rest.HandleFunc(`/{hash:[0-9a-f]{32}}`, PutBlockHandler).Methods("PUT")
+
+ // For IndexHandler we support:
+ // /index - returns all locators
+ // /index/{prefix} - returns all locators that begin with {prefix}
+ // {prefix} is a string of hexadecimal digits between 0 and 32 digits.
+ // If {prefix} is the empty string, return an index of all locators
+ // (so /index and /index/ behave identically)
+ // A client may supply a full 32-digit locator string, in which
+ // case the server will return an index with either zero or one
+ // entries. This usage allows a client to check whether a block is
+ // present, and its size and upload time, without retrieving the
+ // entire block.
+ //
rest.HandleFunc(`/index`, IndexHandler).Methods("GET", "HEAD")
- rest.HandleFunc(`/index/{prefix:[0-9a-f]{0,32}}`, IndexHandler).Methods("GET", "HEAD")
+ rest.HandleFunc(
+ `/index/{prefix:[0-9a-f]{0,32}}`, IndexHandler).Methods("GET", "HEAD")
rest.HandleFunc(`/status.json`, StatusHandler).Methods("GET", "HEAD")
- // Tell the built-in HTTP server to direct all requests to the REST
- // router.
- http.Handle("/", rest)
+ // Any request which does not match any of these routes gets
+ // 400 Bad Request.
+ rest.NotFoundHandler = http.HandlerFunc(BadRequestHandler)
- // Start listening for requests.
- http.ListenAndServe(listen, nil)
+ return rest
+}
+
+func BadRequestHandler(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, BadRequestError.Error(), BadRequestError.HTTPCode)
}
// FindKeepVolumes
for scanner.Scan() {
args := strings.Fields(scanner.Text())
dev, mount := args[0], args[1]
- if (dev == "tmpfs" || strings.HasPrefix(dev, "/dev/")) && mount != "/" {
+ if mount != "/" &&
+ (dev == "tmpfs" || strings.HasPrefix(dev, "/dev/")) {
keep := mount + "/keep"
if st, err := os.Stat(keep); err == nil && st.IsDir() {
vols = append(vols, keep)
return vols
}
-func GetBlockHandler(w http.ResponseWriter, req *http.Request) {
+func GetBlockHandler(resp http.ResponseWriter, req *http.Request) {
hash := mux.Vars(req)["hash"]
+ log.Printf("%s %s", req.Method, hash)
+
+ hints := mux.Vars(req)["hints"]
+
+ // Parse the locator string and hints from the request.
+ // TODO(twp): implement a Locator type.
+ var signature, timestamp string
+ if hints != "" {
+ signature_pat, _ := regexp.Compile("^A([[:xdigit:]]+)@([[:xdigit:]]{8})$")
+ for _, hint := range strings.Split(hints, "+") {
+ if match, _ := regexp.MatchString("^[[:digit:]]+$", hint); match {
+ // Server ignores size hints
+ } else if m := signature_pat.FindStringSubmatch(hint); m != nil {
+ signature = m[1]
+ timestamp = m[2]
+ } else if match, _ := regexp.MatchString("^[[:upper:]]", hint); match {
+ // Any unknown hint that starts with an uppercase letter is
+ // presumed to be valid and ignored, to permit forward compatibility.
+ } else {
+ // Unknown format; not a valid locator.
+ http.Error(resp, BadRequestError.Error(), BadRequestError.HTTPCode)
+ return
+ }
+ }
+ }
+
+ // If permission checking is in effect, verify this
+ // request's permission signature.
+ if enforce_permissions {
+ if signature == "" || timestamp == "" {
+ http.Error(resp, PermissionError.Error(), PermissionError.HTTPCode)
+ return
+ } else if IsExpired(timestamp) {
+ http.Error(resp, ExpiredError.Error(), ExpiredError.HTTPCode)
+ return
+ } else {
+ req_locator := req.URL.Path[1:] // strip leading slash
+ if !VerifySignature(req_locator, GetApiToken(req)) {
+ http.Error(resp, PermissionError.Error(), PermissionError.HTTPCode)
+ return
+ }
+ }
+ }
+
block, err := GetBlock(hash)
+
+ // Garbage collect after each GET. Fixes #2865.
+ // TODO(twp): review Keep memory usage and see if there's
+ // a better way to do this than blindly garbage collecting
+ // after every block.
+ defer runtime.GC()
+
if err != nil {
- http.Error(w, err.Error(), 404)
+ // This type assertion is safe because the only errors
+ // GetBlock can return are CorruptError or NotFoundError.
+ http.Error(resp, err.Error(), err.(*KeepError).HTTPCode)
return
}
- _, err = w.Write(block)
+ resp.Header().Set("X-Block-Size", fmt.Sprintf("%d", len(block)))
+
+ _, err = resp.Write(block)
if err != nil {
log.Printf("GetBlockHandler: writing response: %s", err)
}
return
}
-func PutBlockHandler(w http.ResponseWriter, req *http.Request) {
+func PutBlockHandler(resp http.ResponseWriter, req *http.Request) {
+ // Garbage collect after each PUT. Fixes #2865.
+ // See also GetBlockHandler.
+ defer runtime.GC()
+
hash := mux.Vars(req)["hash"]
+ log.Printf("%s %s", req.Method, hash)
+
// Read the block data to be stored.
// If the request exceeds BLOCKSIZE bytes, issue a HTTP 500 error.
//
//
if buf, err := ReadAtMost(req.Body, BLOCKSIZE); err == nil {
if err := PutBlock(buf, hash); err == nil {
- w.WriteHeader(http.StatusOK)
+ // Success; add a size hint, sign the locator if
+ // possible, and return it to the client.
+ return_hash := fmt.Sprintf("%s+%d", hash, len(buf))
+ api_token := GetApiToken(req)
+ if PermissionSecret != nil && api_token != "" {
+ expiry := time.Now().Add(permission_ttl)
+ return_hash = SignLocator(return_hash, api_token, expiry)
+ }
+ resp.Write([]byte(return_hash + "\n"))
} else {
ke := err.(*KeepError)
- http.Error(w, ke.Error(), ke.HTTPCode)
+ http.Error(resp, ke.Error(), ke.HTTPCode)
}
} else {
log.Println("error reading request: ", err)
// the maximum request size.
errmsg = fmt.Sprintf("Max request size %d bytes", BLOCKSIZE)
}
- http.Error(w, errmsg, 500)
+ http.Error(resp, errmsg, 500)
}
}
// IndexHandler
// A HandleFunc to address /index and /index/{prefix} requests.
//
-func IndexHandler(w http.ResponseWriter, req *http.Request) {
+func IndexHandler(resp http.ResponseWriter, req *http.Request) {
prefix := mux.Vars(req)["prefix"]
+ // Only the data manager may issue /index requests,
+ // and only if enforce_permissions is enabled.
+ // All other requests return 403 Permission denied.
+ api_token := GetApiToken(req)
+ if !enforce_permissions ||
+ api_token == "" ||
+ data_manager_token != api_token {
+ http.Error(resp, PermissionError.Error(), PermissionError.HTTPCode)
+ return
+ }
var index string
for _, vol := range KeepVM.Volumes() {
index = index + vol.Index(prefix)
}
- w.Write([]byte(index))
+ resp.Write([]byte(index))
}
// StatusHandler
Volumes []*VolumeStatus `json:"volumes"`
}
-func StatusHandler(w http.ResponseWriter, req *http.Request) {
+func StatusHandler(resp http.ResponseWriter, req *http.Request) {
st := GetNodeStatus()
if jstat, err := json.Marshal(st); err == nil {
- w.Write(jstat)
+ resp.Write(jstat)
} else {
log.Printf("json.Marshal: %s\n", err)
log.Printf("NodeStatus = %v\n", st)
- http.Error(w, err.Error(), 500)
+ http.Error(resp, err.Error(), 500)
}
}
// they should be sent directly to an event manager at high
// priority or logged as urgent problems.
//
- log.Printf("%s: checksum mismatch for request %s (actual hash %s)\n",
+ log.Printf("%s: checksum mismatch for request %s (actual %s)\n",
vol, hash, filehash)
return buf, CorruptError
}
// If we already have a block on disk under this identifier, return
// success (but check for MD5 collisions).
// The only errors that GetBlock can return are ErrCorrupt and ErrNotFound.
- // In either case, we want to write our new (good) block to disk, so there is
- // nothing special to do if err != nil.
+ // In either case, we want to write our new (good) block to disk,
+ // so there is nothing special to do if err != nil.
if oldblock, err := GetBlock(hash); err == nil {
if bytes.Compare(block, oldblock) == 0 {
return nil
log.Printf("IsValidLocator: %s\n", err)
return false
}
+
+// GetApiToken returns the OAuth2 token from the Authorization
+// header of a HTTP request, or an empty string if no matching
+// token is found.
+func GetApiToken(req *http.Request) string {
+ if auth, ok := req.Header["Authorization"]; ok {
+ if pat, err := regexp.Compile(`^OAuth2\s+(.*)`); err != nil {
+ log.Println(err)
+ } else if match := pat.FindStringSubmatch(auth[0]); match != nil {
+ return match[1]
+ }
+ }
+ return ""
+}
+
+// IsExpired returns true if the given Unix timestamp (expressed as a
+// hexadecimal string) is in the past, or if timestamp_hex cannot be
+// parsed as a hexadecimal string.
+func IsExpired(timestamp_hex string) bool {
+ ts, err := strconv.ParseInt(timestamp_hex, 16, 0)
+ if err != nil {
+ log.Printf("IsExpired: %s\n", err)
+ return true
+ }
+ return time.Unix(ts, 0).Before(time.Now())
+}
var TEST_BLOCK = []byte("The quick brown fox jumps over the lazy dog.")
var TEST_HASH = "e4d909c290d0fb1ca068ffaddf22cbd0"
+var TEST_HASH_PUT_RESPONSE = "e4d909c290d0fb1ca068ffaddf22cbd0+44\n"
var TEST_BLOCK_2 = []byte("Pack my box with five dozen liquor jugs.")
var TEST_HASH_2 = "f15ac516f788aec4f30932ffb6395c39"
match, err := regexp.MatchString(expected, index)
if err == nil {
if !match {
- t.Errorf("IndexLocators returned:\n-----\n%s-----\n", index)
+ t.Errorf("IndexLocators returned:\n%s", index)
}
} else {
t.Errorf("regexp.MatchString: %s", err)
// Cleanup to perform after each test.
//
func teardown() {
+ data_manager_token = ""
+ enforce_permissions = false
+ PermissionSecret = nil
KeepVM = nil
}
// key.
var PermissionSecret []byte
-// makePermSignature returns a string representing the signed permission
+// MakePermSignature returns a string representing the signed permission
// hint for the blob identified by blob_hash, api_token and expiration timestamp.
-func makePermSignature(blob_hash string, api_token string, expiry string) string {
+func MakePermSignature(blob_hash string, api_token string, expiry string) string {
hmac := hmac.New(sha1.New, PermissionSecret)
hmac.Write([]byte(blob_hash))
hmac.Write([]byte("@"))
// SignLocator takes a blob_locator, an api_token and an expiry time, and
// returns a signed locator string.
func SignLocator(blob_locator string, api_token string, expiry time.Time) string {
+ // If no permission secret or API token is available,
+ // return an unsigned locator.
+ if PermissionSecret == nil || api_token == "" {
+ return blob_locator
+ }
// Extract the hash from the blob locator, omitting any size hint that may be present.
blob_hash := strings.Split(blob_locator, "+")[0]
// Return the signed locator string.
timestamp_hex := fmt.Sprintf("%08x", expiry.Unix())
return blob_locator +
- "+A" + makePermSignature(blob_hash, api_token, timestamp_hex) +
+ "+A" + MakePermSignature(blob_hash, api_token, timestamp_hex) +
"@" + timestamp_hex
}
// VerifySignature returns true if the signature on the signed_locator
// can be verified using the given api_token.
func VerifySignature(signed_locator string, api_token string) bool {
- if re, err := regexp.Compile(`^(.*)\+A(.*)@(.*)$`); err == nil {
+ if re, err := regexp.Compile(`^([a-f0-9]{32}(\+[0-9]+)?).*\+A[[:xdigit:]]+@([[:xdigit:]]{8})`); err == nil {
if matches := re.FindStringSubmatch(signed_locator); matches != nil {
blob_locator := matches[1]
timestamp_hex := matches[3]