# SimpleCov reports
/coverage
+
+# Dev/test SSL certificates
+/self-signed.key
+/self-signed.pem
source 'https://rubygems.org'
-gem 'rails', '~> 3.2.0'
+gem 'rails', '~> 4.1.0'
+gem 'minitest', '>= 5.0.0'
# Bundle edge Rails instead:
# gem 'rails', :git => 'git://github.com/rails/rails.git'
gem 'oj'
gem 'sass'
+# Note: keeping this out of the "group :assets" section "may" allow us
+# to use Coffescript for UJS responses. It also prevents a
+# warning/problem when running tests: "WARN: tilt autoloading
+# 'coffee_script' in a non thread-safe way; explicit require
+# 'coffee_script' suggested."
+gem 'coffee-rails'
+
# Gems used only for assets and not required
# in production environments by default.
group :assets do
- gem 'sass-rails', '~> 3.2.0'
- gem 'coffee-rails', '~> 3.2.0'
+ gem 'sass-rails'
# See https://github.com/sstephenson/execjs#readme for more supported runtimes
gem 'therubyracer', :platforms => :ruby
gem 'piwik_analytics'
gem 'httpclient'
-gem 'themes_for_rails'
+
+# This fork has Rails 4 compatible routes
+gem 'themes_for_rails', git: 'https://github.com/holtkampw/themes_for_rails', ref: '1fd2d7897d75ae0d6375f4c390df87b8e91ad417'
+
gem "deep_merge", :require => 'deep_merge/rails_compat'
+GIT
+ remote: https://github.com/holtkampw/themes_for_rails
+ revision: 1fd2d7897d75ae0d6375f4c390df87b8e91ad417
+ ref: 1fd2d7897d75ae0d6375f4c390df87b8e91ad417
+ specs:
+ themes_for_rails (0.5.1)
+ rails (>= 3.0.0)
+
GEM
remote: https://rubygems.org/
specs:
RedCloth (4.2.9)
- actionmailer (3.2.15)
- actionpack (= 3.2.15)
+ actionmailer (4.1.1)
+ actionpack (= 4.1.1)
+ actionview (= 4.1.1)
mail (~> 2.5.4)
- actionpack (3.2.15)
- activemodel (= 3.2.15)
- activesupport (= 3.2.15)
- builder (~> 3.0.0)
+ actionpack (4.1.1)
+ actionview (= 4.1.1)
+ activesupport (= 4.1.1)
+ rack (~> 1.5.2)
+ rack-test (~> 0.6.2)
+ actionview (4.1.1)
+ activesupport (= 4.1.1)
+ builder (~> 3.1)
erubis (~> 2.7.0)
- journey (~> 1.0.4)
- rack (~> 1.4.5)
- rack-cache (~> 1.2)
- rack-test (~> 0.6.1)
- sprockets (~> 2.2.1)
- activemodel (3.2.15)
- activesupport (= 3.2.15)
- builder (~> 3.0.0)
- activerecord (3.2.15)
- activemodel (= 3.2.15)
- activesupport (= 3.2.15)
- arel (~> 3.0.2)
- tzinfo (~> 0.3.29)
- activeresource (3.2.15)
- activemodel (= 3.2.15)
- activesupport (= 3.2.15)
- activesupport (3.2.15)
- i18n (~> 0.6, >= 0.6.4)
- multi_json (~> 1.0)
+ activemodel (4.1.1)
+ activesupport (= 4.1.1)
+ builder (~> 3.1)
+ activerecord (4.1.1)
+ activemodel (= 4.1.1)
+ activesupport (= 4.1.1)
+ arel (~> 5.0.0)
+ activesupport (4.1.1)
+ i18n (~> 0.6, >= 0.6.9)
+ json (~> 1.7, >= 1.7.7)
+ minitest (~> 5.1)
+ thread_safe (~> 0.1)
+ tzinfo (~> 1.1)
andand (1.3.3)
- arel (3.0.2)
+ arel (5.0.1.20140414130214)
bootstrap-sass (3.1.0.1)
sass (~> 3.2)
bootstrap-x-editable-rails (1.5.1.1)
railties (>= 3.0)
- builder (3.0.4)
+ builder (3.2.2)
capistrano (2.15.5)
highline
net-scp (>= 1.0.0)
childprocess (0.5.1)
ffi (~> 1.0, >= 1.0.11)
cliver (0.3.2)
- coffee-rails (3.2.2)
+ coffee-rails (4.0.1)
coffee-script (>= 2.2.0)
- railties (~> 3.2.0)
+ railties (>= 4.0.0, < 5.0)
coffee-script (2.2.0)
coffee-script-source
execjs
- coffee-script-source (1.6.3)
+ coffee-script-source (1.7.0)
commonjs (0.2.7)
daemon_controller (1.1.7)
deep_merge (1.0.1)
highline (1.6.20)
hike (1.2.3)
httpclient (2.3.4.1)
- i18n (0.6.5)
- journey (1.0.4)
+ i18n (0.6.9)
jquery-rails (3.0.4)
railties (>= 3.0, < 5.0)
thor (>= 0.14, < 2.0)
mail (2.5.4)
mime-types (~> 1.16)
treetop (~> 1.4.8)
- mime-types (1.25)
+ mime-types (1.25.1)
mini_portile (0.5.2)
- multi_json (1.8.2)
+ minitest (5.3.3)
+ multi_json (1.10.0)
net-scp (1.1.2)
net-ssh (>= 2.6.5)
net-sftp (2.1.2)
cliver (~> 0.3.1)
multi_json (~> 1.0)
websocket-driver (>= 0.2.0)
- polyglot (0.3.3)
- rack (1.4.5)
- rack-cache (1.2)
- rack (>= 0.4)
- rack-ssl (1.3.3)
- rack
+ polyglot (0.3.4)
+ rack (1.5.2)
rack-test (0.6.2)
rack (>= 1.0)
- rails (3.2.15)
- actionmailer (= 3.2.15)
- actionpack (= 3.2.15)
- activerecord (= 3.2.15)
- activeresource (= 3.2.15)
- activesupport (= 3.2.15)
- bundler (~> 1.0)
- railties (= 3.2.15)
- railties (3.2.15)
- actionpack (= 3.2.15)
- activesupport (= 3.2.15)
- rack-ssl (~> 1.3.2)
+ rails (4.1.1)
+ actionmailer (= 4.1.1)
+ actionpack (= 4.1.1)
+ actionview (= 4.1.1)
+ activemodel (= 4.1.1)
+ activerecord (= 4.1.1)
+ activesupport (= 4.1.1)
+ bundler (>= 1.3.0, < 2.0)
+ railties (= 4.1.1)
+ sprockets-rails (~> 2.0)
+ railties (4.1.1)
+ actionpack (= 4.1.1)
+ activesupport (= 4.1.1)
rake (>= 0.8.7)
- rdoc (~> 3.4)
- thor (>= 0.14.6, < 2.0)
- rake (10.1.0)
- rdoc (3.12.2)
- json (~> 1.4)
+ thor (>= 0.18.1, < 2.0)
+ rake (10.3.1)
ref (1.0.5)
rubyzip (1.1.0)
rvm-capistrano (1.5.1)
capistrano (~> 2.15.4)
sass (3.2.12)
- sass-rails (3.2.6)
- railties (~> 3.2.0)
- sass (>= 3.1.10)
- tilt (~> 1.3)
+ sass-rails (4.0.3)
+ railties (>= 4.0.0, < 5.0)
+ sass (~> 3.2.0)
+ sprockets (~> 2.8, <= 2.11.0)
+ sprockets-rails (~> 2.0)
selenium-webdriver (2.40.0)
childprocess (>= 0.5.0)
multi_json (~> 1.0)
simplecov-html (0.7.1)
simplecov-rcov (0.2.3)
simplecov (>= 0.4.1)
- sprockets (2.2.2)
+ sprockets (2.11.0)
hike (~> 1.2)
multi_json (~> 1.0)
rack (~> 1.0)
tilt (~> 1.1, != 1.3.0)
+ sprockets-rails (2.1.3)
+ actionpack (>= 3.0)
+ activesupport (>= 3.0)
+ sprockets (~> 2.8)
sqlite3 (1.3.8)
- themes_for_rails (0.5.1)
- rails (>= 3.0.0)
therubyracer (0.12.0)
libv8 (~> 3.16.14.0)
ref
- thor (0.18.1)
+ thor (0.19.1)
+ thread_safe (0.3.3)
tilt (1.4.1)
treetop (1.4.15)
polyglot
polyglot (>= 0.3.1)
- tzinfo (0.3.38)
+ tzinfo (1.1.0)
+ thread_safe (~> 0.1)
uglifier (2.3.1)
execjs (>= 0.3.0)
json (>= 1.8.0)
bootstrap-sass (~> 3.1.0)
bootstrap-x-editable-rails
capybara
- coffee-rails (~> 3.2.0)
+ coffee-rails
deep_merge
headless
httpclient
jquery-rails
less
less-rails
+ minitest (>= 5.0.0)
multi_json
oj
passenger
piwik_analytics
poltergeist
- rails (~> 3.2.0)
+ rails (~> 4.1.0)
rvm-capistrano
sass
- sass-rails (~> 3.2.0)
+ sass-rails
selenium-webdriver
simplecov (~> 0.7.1)
simplecov-rcov
sqlite3
- themes_for_rails
+ themes_for_rails!
therubyracer
uglifier (>= 1.0.3)
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
var toggle_group = $(this).parents('[data-remote-href]').first();
var want_persist = !toggle_group.find('button').hasClass('active');
var want_state = want_persist ? 'persistent' : 'cache';
- console.log(want_persist);
toggle_group.find('button').
toggleClass('active', want_persist).
html(want_persist ? 'Persistent' : 'Cache');
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
-
cache_age_in_days = (milliseconds_age) ->
ONE_DAY = 1000 * 60 * 60 * 24
milliseconds_age / ONE_DAY
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
-
(function() {
var run_pipeline_button_state = function() {
var a = $('a.editable.required.editable-empty');
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+++ /dev/null
-# Place all the behaviors and hooks related to the matching controller here.
-# All this logic will automatically be available in application.js.
-# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+/* Style for _show_files tree view. */
+
+ul#collection_files {
+ padding: 0 .5em;
+}
+
+ul.collection_files {
+ line-height: 2.5em;
+ list-style-type: none;
+ padding-left: 2.3em;
+}
+
+ul.collection_files li {
+ clear: both;
+}
+
+.collection_files_row {
+ padding: 1px; /* Replaced by border for :hover */
+}
+
+.collection_files_row:hover {
+ background-color: #D9EDF7;
+ padding: 0px;
+ border: 1px solid #BCE8F1;
+ border-radius: 3px;
+}
+
+.collection_files_inline {
+ clear: both;
+ width: 80%;
+ height: auto;
+ max-height: 6em;
+ margin: 0 1em;
+}
+
+.collection_files_name {
+ padding-left: .5em;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.collection_files_name i.fa-fw:first-child {
+ width: 1.6em;
+}
+
/*
"active" and "inactive" colors are too similar for a toggle switch
in the default bootstrap theme.
div.graph {
margin-top: 20px;
}
-div.graph h3,h4 {
+div.graph h3, div.graph h4 {
text-align: center;
}
env = Hash[ENV].
merge({
'ARVADOS_API_HOST' =>
- $arvados_api_client.arvados_v1_base.
+ arvados_api_client.arvados_v1_base.
sub(/\/arvados\/v1/, '').
sub(/^https?:\/\//, ''),
'ARVADOS_API_TOKEN' => Thread.current[:arvados_api_token],
class ApiClientAuthorizationsController < ApplicationController
- def index
- m = model_class.all
- items_available = m.items_available
- offset = m.result_offset
- limit = m.result_limit
- filtered = m.to_ary.reject do |x|
- x.api_client_id == 0 or (x.expires_at and x.expires_at < Time.now) rescue false
- end
- ArvadosApiClient::patch_paging_vars(filtered, items_available, offset, limit, nil)
- @objects = ArvadosResourceList.new(ApiClientAuthorization)
- @objects.results= filtered
- super
- end
def index_pane_list
%w(Recent Help)
class ApplicationController < ActionController::Base
+ include ArvadosApiClientHelper
+
respond_to :html, :json, :js
protect_from_forgery
ERROR_ACTIONS = [:render_error, :render_not_found]
around_filter :thread_clear
- around_filter(:thread_with_mandatory_api_token,
- except: [:index, :show] + ERROR_ACTIONS)
+ around_filter :thread_with_mandatory_api_token, except: ERROR_ACTIONS
around_filter :thread_with_optional_api_token
before_filter :check_user_agreements, except: ERROR_ACTIONS
before_filter :check_user_notifications, except: ERROR_ACTIONS
- around_filter :using_reader_tokens, only: [:index, :show]
before_filter :find_object_by_uuid, except: [:index] + ERROR_ACTIONS
before_filter :check_my_folders, :except => ERROR_ACTIONS
theme :select_theme
end
def index
+ @limit ||= 200
if params[:limit]
- limit = params[:limit].to_i
- else
- limit = 200
+ @limit = params[:limit].to_i
end
+ @offset ||= 0
if params[:offset]
- offset = params[:offset].to_i
- else
- offset = 0
+ @offset = params[:offset].to_i
end
+ @filters ||= []
if params[:filters]
filters = params[:filters]
if filters.is_a? String
filters = Oj.load filters
end
- else
- filters = []
+ @filters += filters
end
- @objects ||= model_class.filter(filters).limit(limit).offset(offset).all
+ @objects ||= model_class
+ @objects = @objects.filter(@filters).limit(@limit).offset(@offset).all
respond_to do |f|
f.json { render json: @objects }
f.html { render }
respond_to do |f|
f.html {
if request.method == 'GET'
- redirect_to $arvados_api_client.arvados_login_url(return_to: request.url)
+ redirect_to arvados_api_client.arvados_login_url(return_to: request.url)
else
flash[:error] = "Either you are not logged in, or your session has timed out. I can't automatically log you in and re-attempt this request."
redirect_to :back
false # For convenience to return from callbacks
end
- def using_reader_tokens(login_optional=false)
- if params[:reader_tokens].is_a?(Array) and params[:reader_tokens].any?
- Thread.current[:reader_tokens] = params[:reader_tokens]
- end
- begin
- yield
- rescue ArvadosApiClient::NotLoggedInException
- if login_optional
- raise
- else
- return redirect_to_login
- end
- ensure
- Thread.current[:reader_tokens] = nil
- end
- end
-
def using_specific_api_token(api_token)
start_values = {}
[:arvados_api_token, :user].each do |key|
class CollectionsController < ApplicationController
- skip_around_filter :thread_with_mandatory_api_token, only: [:show_file]
- skip_before_filter :find_object_by_uuid, only: [:provenance, :show_file]
- skip_before_filter :check_user_agreements, only: [:show_file]
+ skip_around_filter(:thread_with_mandatory_api_token,
+ only: [:show_file, :show_file_links])
+ skip_before_filter(:find_object_by_uuid,
+ only: [:provenance, :show_file, :show_file_links])
+
+ RELATION_LIMIT = 5
def show_pane_list
%w(Files Attributes Metadata Provenance_graph Used_by JSON API)
@request_url = request.url
end
+ def show_file_links
+ Thread.current[:reader_tokens] = [params[:reader_token]]
+ find_object_by_uuid
+ render layout: false
+ end
+
def show_file
# We pipe from arv-get to send the file to the user. Before we start it,
# we ask the API server if the file actually exists. This serves two
# purposes: it lets us return a useful status code for common errors, and
# helps us figure out which token to provide to arv-get.
coll = nil
- usable_token = find_usable_token do
+ tokens = [Thread.current[:arvados_api_token], params[:reader_token]].compact
+ usable_token = find_usable_token(tokens) do
coll = Collection.find(params[:uuid])
end
if usable_token.nil?
def show
return super if !@object
- @provenance = []
- @output2job = {}
- @output2colorindex = {}
- @sourcedata = {params[:uuid] => {uuid: params[:uuid]}}
- @protected = {}
-
- colorindex = -1
- any_hope_left = true
- while any_hope_left
- any_hope_left = false
- Job.where(output: @sourcedata.keys).sort_by { |a| a.finished_at || a.created_at }.reverse.each do |job|
- if !@output2colorindex[job.output]
- any_hope_left = true
- @output2colorindex[job.output] = (colorindex += 1) % 10
- @provenance << {job: job, output: job.output}
- @sourcedata.delete job.output
- @output2job[job.output] = job
- job.dependencies.each do |new_source_data|
- unless @output2colorindex[new_source_data]
- @sourcedata[new_source_data] = {uuid: new_source_data}
- end
- end
- end
- end
- end
-
- Link.where(head_uuid: @sourcedata.keys | @output2job.keys).each do |link|
- if link.link_class == 'resources' and link.name == 'wants'
- @protected[link.head_uuid] = true
- if link.tail_uuid == current_user.uuid
- @is_persistent = true
- end
- end
- end
- Link.where(tail_uuid: @sourcedata.keys).each do |link|
- if link.link_class == 'data_origin'
- @sourcedata[link.tail_uuid][:data_origins] ||= []
- @sourcedata[link.tail_uuid][:data_origins] << [link.name, link.head_uuid]
+ if current_user
+ jobs_with = lambda do |conds|
+ Job.limit(RELATION_LIMIT).where(conds)
+ .results.sort_by { |j| j.finished_at || j.created_at }
end
- end
- Collection.where(uuid: @sourcedata.keys).each do |collection|
- if @sourcedata[collection.uuid]
- @sourcedata[collection.uuid][:collection] = collection
- end
- end
-
- Collection.where(uuid: @object.uuid).each do |u|
- @prov_svg = ProvenanceHelper::create_provenance_graph(u.provenance, "provenance_svg",
- {:request => request,
- :direction => :bottom_up,
- :combine_jobs => :script_only}) rescue nil
- @used_by_svg = ProvenanceHelper::create_provenance_graph(u.used_by, "used_by_svg",
- {:request => request,
- :direction => :top_down,
- :combine_jobs => :script_only,
- :pdata_only => true}) rescue nil
- end
+ @output_of = jobs_with.call(output: @object.uuid)
+ @log_of = jobs_with.call(log: @object.uuid)
+ folder_links = Link.limit(RELATION_LIMIT).order("modified_at DESC")
+ .where(head_uuid: @object.uuid, link_class: 'name').results
+ folder_hash = Group.where(uuid: folder_links.map(&:tail_uuid)).to_hash
+ @folders = folder_links.map { |link| folder_hash[link.tail_uuid] }
+ @permissions = Link.limit(RELATION_LIMIT).order("modified_at DESC")
+ .where(head_uuid: @object.uuid, link_class: 'permission',
+ name: 'can_read').results
+ @logs = Log.limit(RELATION_LIMIT).order("created_at DESC")
+ .where(object_uuid: @object.uuid).results
+ @is_persistent = Link.limit(1)
+ .where(head_uuid: @object.uuid, tail_uuid: current_user.uuid,
+ link_class: 'resources', name: 'wants')
+ .results.any?
+ end
+ @prov_svg = ProvenanceHelper::create_provenance_graph(@object.provenance, "provenance_svg",
+ {:request => request,
+ :direction => :bottom_up,
+ :combine_jobs => :script_only}) rescue nil
+ @used_by_svg = ProvenanceHelper::create_provenance_graph(@object.used_by, "used_by_svg",
+ {:request => request,
+ :direction => :top_down,
+ :combine_jobs => :script_only,
+ :pdata_only => true}) rescue nil
end
protected
- def find_usable_token
- # Iterate over every token available to make it the current token and
+ def find_usable_token(token_list)
+ # Iterate over every given token to make it the current token and
# yield the given block.
# If the block succeeds, return the token it used.
# Otherwise, render an error response based on the most specific
# error we encounter, and return nil.
- read_tokens = [Thread.current[:arvados_api_token]].compact
- if params[:reader_tokens].is_a? Array
- read_tokens += params[:reader_tokens]
- end
most_specific_error = [401]
- read_tokens.each do |api_token|
+ token_list.each do |api_token|
using_specific_api_token(api_token) do
begin
yield
end
def file_in_collection?(collection, filename)
- def normalized_path(part_list)
- File.join(part_list).sub(%r{^\./}, '')
- end
- target = normalized_path([filename])
+ target = CollectionsHelper.file_path(File.split(filename))
collection.files.each do |file_spec|
- return true if (normalized_path(file_spec[0, 2]) == target)
+ return true if (CollectionsHelper.file_path(file_spec) == target)
end
false
end
end
class FileStreamer
+ include ArvadosApiClientHelper
def initialize(opts={})
@opts = opts
end
def each
return unless @opts[:uuid] && @opts[:file]
- env = Hash[ENV].
- merge({
- 'ARVADOS_API_HOST' =>
- $arvados_api_client.arvados_v1_base.
- sub(/\/arvados\/v1/, '').
- sub(/^https?:\/\//, ''),
- 'ARVADOS_API_TOKEN' =>
- @opts[:arvados_api_token],
- 'ARVADOS_API_HOST_INSECURE' =>
- Rails.configuration.arvados_insecure_https ? 'true' : 'false'
- })
+
+ env = Hash[ENV].dup
+
+ require 'uri'
+ u = URI.parse(arvados_api_client.arvados_v1_base)
+ env['ARVADOS_API_HOST'] = "#{u.host}:#{u.port}"
+ env['ARVADOS_API_TOKEN'] = @opts[:arvados_api_token]
+ env['ARVADOS_API_HOST_INSECURE'] = "true" if Rails.configuration.arvados_insecure_https
+
IO.popen([env, 'arv-get', "#{@opts[:uuid]}/#{@opts[:file]}"],
'rb') do |io|
- while buf = io.read(2**20)
+ while buf = io.read(2**16)
yield buf
end
end
def index
@svg = ""
if params[:uuid]
- @jobs = Job.where(uuid: params[:uuid])
- generate_provenance(@jobs)
+ @objects = Job.where(uuid: params[:uuid])
+ generate_provenance(@objects)
else
- @jobs = Job.all
+ @limit = 20
+ super
end
end
--- /dev/null
+class KeepServicesController < ApplicationController
+end
end
def index
- @objects ||= model_class.limit(20).all
+ @limit = 20
super
end
skip_before_filter :find_object_by_uuid, :only => [:destroy, :index]
def destroy
session.clear
- redirect_to $arvados_api_client.arvados_logout_url(return_to: root_url)
+ redirect_to arvados_api_client.arvados_logout_url(return_to: root_url)
end
def index
redirect_to root_url if session[:arvados_api_token]
end
def sudo
- resp = $arvados_api_client.api(ApiClientAuthorization, '', {
- api_client_authorization: {
- owner_uuid: @object.uuid
- }
- })
+ resp = arvados_api_client.api(ApiClientAuthorization, '', {
+ api_client_authorization: {
+ owner_uuid: @object.uuid
+ }
+ })
redirect_to root_url(api_token: resp[:api_token])
end
def human_readable_bytes_html(n)
return h(n) unless n.is_a? Fixnum
+ return "0 bytes" if (n == 0)
orders = {
1 => "bytes",
lt
end
+
+ def render_arvados_object_list_start(list, button_text, button_href,
+ params={}, *rest, &block)
+ show_max = params.delete(:show_max) || 3
+ params[:class] ||= 'btn btn-xs btn-default'
+ list[0...show_max].each { |item| yield item }
+ unless list[show_max].nil?
+ link_to(h(button_text) +
+ raw(' <i class="fa fa-fw fa-arrow-circle-right"></i>'),
+ button_href, params, *rest)
+ end
+ end
end
--- /dev/null
+module ArvadosApiClientHelper
+ def arvados_api_client
+ ArvadosApiClient.new_or_current
+ end
+end
+
+# For the benefit of themes that still expect $arvados_api_client to work:
+class ArvadosClientProxyHack
+ def method_missing *args
+ ArvadosApiClient.new_or_current.send *args
+ end
+end
+$arvados_api_client = ArvadosClientProxyHack.new
class InvalidApiResponseException < StandardError
end
- @@client_mtx = Mutex.new
- @@api_client = nil
@@profiling_enabled = Rails.configuration.profiling_enabled
+ @@discovery = nil
+
+ # An API client object suitable for handling API requests on behalf
+ # of the current thread.
+ def self.new_or_current
+ # If this thread doesn't have an API client yet, *or* this model
+ # has been reloaded since the existing client was created, create
+ # a new client. Otherwise, keep using the latest client created in
+ # the current thread.
+ unless Thread.current[:arvados_api_client].andand.class == self
+ Thread.current[:arvados_api_client] = new
+ end
+ Thread.current[:arvados_api_client]
+ end
+
+ def initialize *args
+ @api_client = nil
+ @client_mtx = Mutex.new
+ end
def api(resources_kind, action, data=nil)
profile_checkpoint
- @@client_mtx.synchronize do
- if not @@api_client
- @@api_client = HTTPClient.new
+ if not @api_client
+ @client_mtx.synchronize do
+ @api_client = HTTPClient.new
if Rails.configuration.arvados_insecure_https
- @@api_client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE
+ @api_client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE
else
# Use system CA certificates
- @@api_client.ssl_config.add_trust_ca('/etc/ssl/certs')
+ @api_client.ssl_config.add_trust_ca('/etc/ssl/certs')
end
end
end
header = {"Accept" => "application/json"}
- profile_checkpoint { "Prepare request #{url} #{query[:uuid]} #{query[:where]}" }
- msg = @@api_client.post(url,
- query,
- header: header)
+ profile_checkpoint { "Prepare request #{url} #{query[:uuid]} #{query[:where]} #{query[:filters]}" }
+ msg = @client_mtx.synchronize do
+ @api_client.post(url,
+ query,
+ header: header)
+ end
profile_checkpoint 'API transaction'
if msg.status_code == 401
end
def discovery
- @discovery ||= api '../../discovery/v1/apis/arvados/v1/rest', ''
+ @@discovery ||= api '../../discovery/v1/apis/arvados/v1/rest', ''
end
def kind_class(kind)
self.abstract_class = true
attr_accessor :attribute_sortkey
+ def self.arvados_api_client
+ ArvadosApiClient.new_or_current
+ end
+
+ def arvados_api_client
+ ArvadosApiClient.new_or_current
+ end
+
def self.uuid_infix_object_kind
@@uuid_infix_object_kind ||=
begin
infix_kind = {}
- $arvados_api_client.discovery[:schemas].each do |name, schema|
+ arvados_api_client.discovery[:schemas].each do |name, schema|
if schema[:uuidPrefix]
infix_kind[schema[:uuidPrefix]] =
'arvados#' + name.to_s.camelcase(:lower)
end
end
- def initialize(*args)
- super(*args)
+ def initialize raw_params={}
+ super self.class.permit_attribute_params(raw_params)
@attribute_sortkey ||= {
'id' => nil,
'name' => '000',
return @columns unless @columns.nil?
@columns = []
@attribute_info ||= {}
- schema = $arvados_api_client.discovery[:schemas][self.to_s.to_sym]
+ schema = arvados_api_client.discovery[:schemas][self.to_s.to_sym]
return @columns if schema.nil?
schema[:properties].each do |k, coldef|
case k
@columns << column(k, :text)
serialize k, coldef[:type].constantize
end
- attr_accessible k
@attribute_info[k] = coldef
end
end
# request} unless {cache: false} is given via opts.
cache_key = "request_#{Thread.current.object_id}_#{self.to_s}_#{uuid}"
if opts[:cache] == false
- Rails.cache.write cache_key, $arvados_api_client.api(self, '/' + uuid)
+ Rails.cache.write cache_key, arvados_api_client.api(self, '/' + uuid)
end
hash = Rails.cache.fetch cache_key do
- $arvados_api_client.api(self, '/' + uuid)
+ arvados_api_client.api(self, '/' + uuid)
end
new.private_reload(hash)
end
ArvadosResourceList.new(self).all(*args)
end
+ def self.permit_attribute_params raw_params
+ # strong_parameters does not provide security in Workbench: anyone
+ # who can get this far can just as well do a call directly to our
+ # database (Arvados) with the same credentials we use.
+ #
+ # The following permit! is necessary even with
+ # "ActionController::Parameters.permit_all_parameters = true",
+ # because permit_all does not permit nested attributes.
+ ActionController::Parameters.new(raw_params).permit!
+ end
+
+ def self.create raw_params={}
+ super(permit_attribute_params(raw_params))
+ end
+
+ def update_attributes raw_params={}
+ super(self.class.permit_attribute_params(raw_params))
+ end
+
def save
obdata = {}
self.class.columns.each do |col|
if etag
postdata['_method'] = 'PUT'
obdata.delete :uuid
- resp = $arvados_api_client.api(self.class, '/' + uuid, postdata)
+ resp = arvados_api_client.api(self.class, '/' + uuid, postdata)
else
- resp = $arvados_api_client.api(self.class, '', postdata)
+ resp = arvados_api_client.api(self.class, '', postdata)
end
return false if !resp[:etag] || !resp[:uuid]
def destroy
if etag || uuid
postdata = { '_method' => 'DELETE' }
- resp = $arvados_api_client.api(self.class, '/' + uuid, postdata)
+ resp = arvados_api_client.api(self.class, '/' + uuid, postdata)
resp[:etag] && resp[:uuid] && resp
else
true
ok
end
end
- @links = $arvados_api_client.api Link, '', { _method: 'GET', where: o, eager: true }
- @links = $arvados_api_client.unpack_api_response(@links)
+ @links = arvados_api_client.api Link, '', { _method: 'GET', where: o, eager: true }
+ @links = arvados_api_client.unpack_api_response(@links)
end
def all_links
return @all_links if @all_links
- res = $arvados_api_client.api Link, '', {
+ res = arvados_api_client.api Link, '', {
_method: 'GET',
where: {
tail_kind: self.kind,
},
eager: true
}
- @all_links = $arvados_api_client.unpack_api_response(res)
+ @all_links = arvados_api_client.unpack_api_response(res)
end
def reload
if uuid_or_hash.is_a? Hash
hash = uuid_or_hash
else
- hash = $arvados_api_client.api(self.class, '/' + uuid_or_hash)
+ hash = arvados_api_client.api(self.class, '/' + uuid_or_hash)
end
hash.each do |k,v|
if self.respond_to?(k.to_s + '=')
end
resource_class = nil
uuid.match /^[0-9a-z]{5}-([0-9a-z]{5})-[0-9a-z]{15}$/ do |re|
- resource_class ||= $arvados_api_client.
+ resource_class ||= arvados_api_client.
kind_class(self.uuid_infix_object_kind[re[1]])
end
if opts[:referring_object] and
opts[:referring_attr] and
opts[:referring_attr].match /_uuid$/
- resource_class ||= $arvados_api_client.
+ resource_class ||= arvados_api_client.
kind_class(opts[:referring_object].
attributes[opts[:referring_attr].
sub(/_uuid$/, '_kind')])
class ArvadosResourceList
+ include ArvadosApiClientHelper
include Enumerable
def initialize resource_class=nil
end
cond.keys.select { |x| x.match /_kind$/ }.each do |kind_key|
if cond[kind_key].is_a? Class
- cond = cond.merge({ kind_key => 'arvados#' + $arvados_api_client.class_kind(cond[kind_key]) })
+ cond = cond.merge({ kind_key => 'arvados#' + arvados_api_client.class_kind(cond[kind_key]) })
end
end
api_params = {
api_params[:offset] = @offset if @offset
api_params[:order] = @orderby_spec if @orderby_spec
api_params[:filters] = @filters if @filters
- res = $arvados_api_client.api @resource_class, '', api_params
- @results = $arvados_api_client.unpack_api_response res
+ res = arvados_api_client.api @resource_class, '', api_params
+ @results = arvados_api_client.unpack_api_response res
self
end
end
end
+ def files_tree
+ tree = files.group_by { |file_spec| File.split(file_spec.first) }
+ # Fill in entries for empty directories.
+ tree.keys.map { |basedir, _| File.split(basedir) }.each do |splitdir|
+ until tree.include?(splitdir)
+ tree[splitdir] = []
+ splitdir = File.split(splitdir.first)
+ end
+ end
+ dir_to_tree = lambda do |dirname|
+ # First list subdirectories, with their files inside.
+ subnodes = tree.keys.select { |bd, td| (bd == dirname) and (td != '.') }
+ .sort.flat_map do |parts|
+ [parts + [nil]] + dir_to_tree.call(File.join(parts))
+ end
+ # Then extend that list with files in this directory.
+ subnodes + tree[File.split(dirname)]
+ end
+ dir_to_tree.call('.')
+ end
+
def attribute_editable?(attr)
false
end
end
def provenance
- $arvados_api_client.api "collections/#{self.uuid}/", "provenance"
+ arvados_api_client.api "collections/#{self.uuid}/", "provenance"
end
def used_by
- $arvados_api_client.api "collections/#{self.uuid}/", "used_by"
+ arvados_api_client.api "collections/#{self.uuid}/", "used_by"
end
end
class Group < ArvadosBase
def contents params={}
- res = $arvados_api_client.api self.class, "/#{self.uuid}/contents", {
+ res = arvados_api_client.api self.class, "/#{self.uuid}/contents", {
_method: 'GET'
}.merge(params)
ret = ArvadosResourceList.new
- ret.results = $arvados_api_client.unpack_api_response(res)
+ ret.results = arvados_api_client.unpack_api_response(res)
ret
end
def attribute_editable?(attr)
false
end
+
+ def self.creatable?
+ false
+ end
end
--- /dev/null
+class KeepService < ArvadosBase
+ def self.creatable?
+ current_user and current_user.is_admin
+ end
+end
end
def attribute_editable?(attr)
- attr && (attr.to_sym == :name || (attr.to_sym == :components and self.active == nil))
+ attr && (attr.to_sym == :name ||
+ (attr.to_sym == :components and (self.state == 'New' || self.state == 'Ready')))
end
def attributes_for_display
end
def self.current
- res = $arvados_api_client.api self, '/current'
- $arvados_api_client.unpack_api_response(res)
+ res = arvados_api_client.api self, '/current'
+ arvados_api_client.unpack_api_response(res)
end
def self.system
- $arvados_system_user ||= begin
- res = $arvados_api_client.api self, '/system'
- $arvados_api_client.unpack_api_response(res)
- end
+ @@arvados_system_user ||= begin
+ res = arvados_api_client.api self, '/system'
+ arvados_api_client.unpack_api_response(res)
+ end
end
def full_name
end
def activate
- self.private_reload($arvados_api_client.api(self.class,
- "/#{self.uuid}/activate",
- {}))
+ self.private_reload(arvados_api_client.api(self.class,
+ "/#{self.uuid}/activate",
+ {}))
end
def attributes_for_display
end
def unsetup
- self.private_reload($arvados_api_client.api(self.class,
- "/#{self.uuid}/unsetup",
- {}))
+ self.private_reload(arvados_api_client.api(self.class,
+ "/#{self.uuid}/unsetup",
+ {}))
end
def self.setup params
- $arvados_api_client.api(self, "/setup", params)
+ arvados_api_client.api(self, "/setup", params)
end
end
class UserAgreement < ArvadosBase
def self.signatures
- res = $arvados_api_client.api self, '/signatures'
- $arvados_api_client.unpack_api_response(res)
+ res = arvados_api_client.api self, '/signatures'
+ arvados_api_client.unpack_api_response(res)
end
def self.sign(params)
- res = $arvados_api_client.api self, '/sign', params
- $arvados_api_client.unpack_api_response(res)
+ res = arvados_api_client.api self, '/sign', params
+ arvados_api_client.unpack_api_response(res)
end
end
-<% if p.success %>
+<% if p.state == 'Complete' %>
<span class="label label-success">finished</span>
-<% elsif p.success == false %>
+<% elsif p.state == 'Failed' %>
<span class="label label-danger">failed</span>
-<% elsif p.active %>
+<% elsif p.state == 'RunningOnServer' || p.state == 'RunningOnClient' %>
<span class="label label-info">running</span>
<% else %>
<% if (p.components.select do |k,v| v[:job] end).length == 0 %>
-<% content_for :css do %>
-.file-list-inline-image {
- width: 50%;
- height: auto;
-}
-<% end %>
-
<% content_for :tab_line_buttons do %>
<div class="row">
<div class="col-md-6"></div>
</div>
<% end %>
-<table class="table table-condensed table-fixedlayout">
- <colgroup>
- <col width="4%" />
- <col width="35%" />
- <col width="40%" />
- <col width="15%" />
- <col width="10%" />
- </colgroup>
- <thead>
- <tr>
- <th></th>
- <th>path</th>
- <th>file</th>
- <th style="text-align:right">size</th>
- <th>d/l</th>
- </tr>
- </thead><tbody>
- <% if @object then @object.files.sort_by{|f|[f[0],f[1]]}.each do |file| %>
- <% file_path = CollectionsHelper::file_path file %>
- <tr>
- <td>
- <%= check_box_tag 'uuids[]', @object.uuid+'/'+file_path, false, {
+<% file_tree = @object.andand.files_tree %>
+<% if file_tree.nil? or file_tree.empty? %>
+ <p>This collection is empty.</p>
+<% else %>
+ <ul id="collection_files" class="collection_files">
+ <% dirstack = [file_tree.first.first] %>
+ <% file_tree.each_with_index do |(dirname, filename, size), index| %>
+ <% file_path = CollectionsHelper::file_path([dirname, filename]) %>
+ <% while dirstack.any? and (dirstack.last != dirname) %>
+ <% dirstack.pop %></ul></li>
+ <% end %>
+ <li>
+ <% if size.nil? # This is a subdirectory. %>
+ <% dirstack.push(File.join(dirname, filename)) %>
+ <div class="collection_files_row">
+ <div class="collection_files_name"><i class="fa fa-fw fa-folder-open"></i> <%= filename %></div>
+ </div>
+ <ul class="collection_files">
+ <% else %>
+ <% link_params = {controller: 'collections', action: 'show_file',
+ uuid: @object.uuid, file: file_path, size: size} %>
+ <div class="collection_files_row">
+ <div class="collection_files_buttons pull-right">
+ <%= raw(human_readable_bytes_html(size)) %>
+ <%= check_box_tag 'uuids[]', "#{@object.uuid}/#{file_path}", false, {
:class => 'persistent-selection',
:friendly_type => "File",
:friendly_name => "#{@object.uuid}/#{file_path}",
- :href => "#{url_for controller: 'collections', action: 'show', id: @object.uuid }/#{file_path}",
- :title => "Click to add this item to your selection list"
+ :href => url_for(controller: 'collections', action: 'show_file',
+ uuid: @object.uuid, file: file_path),
+ :title => "Include #{file_path} in your selections",
} %>
- </td>
- <td>
- <%= file[0] %>
- </td>
-
- <td>
- <%= link_to (if CollectionsHelper::is_image file[1]
- image_tag "#{url_for @object}/#{file_path}", class: "file-list-inline-image"
- else
- file[1]
- end),
- {controller: 'collections', action: 'show_file', uuid: @object.uuid, file: file_path, size: file[2], disposition: 'inline'},
- {title: file_path} %>
- </td>
-
- <td style="text-align:right">
- <%= raw(human_readable_bytes_html(file[2])) %>
- </td>
-
- <td>
- <div style="display:inline-block">
- <%= link_to raw('<i class="glyphicon glyphicon-download-alt"></i>'), {controller: 'collections', action: 'show_file', uuid: @object.uuid, file: file_path, size: file[2], disposition: 'attachment'}, {class: 'btn btn-info btn-sm', title: 'Download'} %>
- </div>
- </td>
- </tr>
- <% end; end %>
- </tbody>
-</table>
+ <%= link_to(raw('<i class="fa fa-search"></i>'),
+ link_params.merge(disposition: 'inline'),
+ {title: "View #{file_path}", class: "btn btn-info btn-sm"}) %>
+ <%= link_to(raw('<i class="fa fa-download"></i>'),
+ link_params.merge(disposition: 'attachment'),
+ {title: "Download #{file_path}", class: "btn btn-info btn-sm"}) %>
+ </div>
+ <% if CollectionsHelper::is_image(filename) %>
+ <div class="collection_files_name"><i class="fa fa-fw fa-bar-chart-o"></i> <%= filename %></div>
+ </div>
+ <div class="collection_files_inline">
+ <%= link_to(image_tag("#{url_for @object}/#{file_path}"),
+ link_params.merge(disposition: 'inline'),
+ {title: file_path}) %>
+ </div>
+ <% else %>
+ <div class="collection_files_name"><i class="fa fa-fw fa-file"></i> <%= filename %></div>
+ </div>
+ <% end %>
+ </li>
+ <% end # if file or directory %>
+ <% end # file_tree.each %>
+ <%= raw(dirstack.map { |_| "</ul>" }.join("</li>")) %>
+<% end # if file_tree %>
+++ /dev/null
-<table class="topalign table table-bordered">
- <thead>
- <tr class="contain-align-left">
- <th>
- job
- </th><th>
- version
- </th><th>
- status
- </th><th>
- start
- </th><th>
- finish
- </th><th>
- clock time
- </th>
- </tr>
- </thead>
- <tbody>
-
- <% @provenance.reverse.each do |p| %>
- <% j = p[:job] %>
-
- <% if j %>
-
- <tr class="job">
- <td>
- <tt><%= j.uuid %></tt>
- <br />
- <tt class="deemphasize"><%= j.submit_id %></tt>
- </td><td>
- <%= j.script_version %>
- </td><td>
- <span class="label <%= if j.success then 'label-success'; elsif j.running then 'label-primary'; else 'label-warning'; end %>">
- <%= j.success || j.running ? 'ok' : 'failed' %>
- </span>
- </td><td>
- <%= j.started_at %>
- </td><td>
- <%= j.finished_at %>
- </td><td>
- <% if j.started_at and j.finished_at %>
- <%= raw(distance_of_time_in_words(j.started_at, j.finished_at).sub('about ','~').sub(' ',' ')) %>
- <% elsif j.started_at and j.running %>
- <%= raw(distance_of_time_in_words(j.started_at, Time.now).sub('about ','~').sub(' ',' ')) %> (running)
- <% end %>
- </td>
- </tr>
-
- <% else %>
- <tr>
- <td>
- <span class="label label-danger">lookup fail</span>
- <br />
- <tt class="deemphasize"><%= p[:target] %></tt>
- </td><td colspan="4">
- </td>
- </tr>
- <% end %>
-
- <% end %>
-
- </tbody>
-</table>
+++ /dev/null
-<%= content_for :css do %>
-<%# https://github.com/mbostock/d3/wiki/Ordinal-Scales %>
-<% n=-1; %w(#1f77b4 #ff7f0e #2ca02c #d62728 #9467bd #8c564b #e377c2 #7f7f7f #bcbd22 #17becf).each do |color| %>
-.colorseries-10-<%= n += 1 %>, .btn.colorseries-10-<%= n %>:hover, .label.colorseries-10-<%= n %>:hover {
- *background-color: <%= color %>;
- background-color: <%= color %>;
- background-image: none;
-}
-<% end %>
-.colorseries-nil { }
-.label a {
- color: inherit;
-}
-<% end %>
-
-<table class="topalign table table-bordered">
- <thead>
- </thead>
- <tbody>
-
- <% @provenance.reverse.each do |p| %>
- <% j = p[:job] %>
-
- <% if j %>
-
- <tr class="job">
- <td style="padding-bottom: 3em">
- <table class="table" style="margin-bottom: 0; background: #f0f0ff">
- <% j.script_parameters.each do |k,v| %>
- <tr>
- <td style="width: 20%">
- <%= k.to_s %>
- </td><td style="width: 60%">
- <% if v and @output2job.has_key? v %>
- <tt class="label colorseries-10-<%= @output2colorindex[v] %>"><%= link_to_if_arvados_object v %></tt>
- <% else %>
- <span class="deemphasize"><%= link_to_if_arvados_object v %></span>
- <% end %>
- </td><td style="text-align: center; width: 20%">
- <% if v
- if @protected[v]
- labelclass = 'success'
- labeltext = 'keep'
- else
- labelclass = @output2job.has_key?(v) ? 'warning' : 'danger'
- labeltext = 'cache'
- end %>
-
- <tt class="label label-<%= labelclass %>"><%= labeltext %></tt>
- <% end %>
- </td>
- </tr>
- <% end %>
- </table>
- <div style="text-align: center">
- ↓
- <br />
- <span class="label"><%= j.script %><br /><tt><%= link_to_if j.script_version.match(/[0-9a-f]{40}/), j.script_version, "https://arvados.org/projects/arvados/repository/revisions/#{j.script_version}/entry/crunch_scripts/#{j.script}" if j.script_version %></tt></span>
- <br />
- ↓
- <br />
- <tt class="label colorseries-10-<%= @output2colorindex[p[:output]] %>"><%= link_to_if_arvados_object p[:output] %></tt>
- </div>
- </td>
- <td>
- <tt><span class="deemphasize">job:</span><br /><%= link_to_if_arvados_object j %><br /><span class="deemphasize"><%= j.submit_id %></span></tt>
- </td>
- </tr>
-
- <% else %>
- <tr>
- <td>
- <span class="label label-danger">lookup fail</span>
- <br />
- <tt class="deemphasize"><%= p[:target] %></tt>
- </td><td colspan="5">
- </td>
- </tr>
- <% end %>
-
- <% end %>
-
- </tbody>
-</table>
+++ /dev/null
-<table class="table table-bordered table-striped">
- <thead>
- <tr class="contain-align-left">
- <th>
- collection
- </th><th class="data-size">
- data size
- </th><th>
- storage
- </th><th>
- origin
- </th>
- </tr>
- </thead>
- <tbody>
-
- <% @sourcedata.values.each do |sourcedata| %>
-
- <tr class="collection">
- <td>
- <tt class="label"><%= sourcedata[:uuid] %></tt>
- </td><td class="data-size">
- <%= raw(human_readable_bytes_html(sourcedata[:collection].data_size)) if sourcedata[:collection] and sourcedata[:collection].data_size %>
- </td><td>
- <% if @protected[sourcedata[:uuid]] %>
- <span class="label label-success">keep</span>
- <% else %>
- <span class="label label-danger">cache</span>
- <% end %>
- </td><td>
- <% if sourcedata[:data_origins] %>
- <% sourcedata[:data_origins].each do |data_origin| %>
- <span class="deemphasize"><%= data_origin[0] %></span>
- <%= data_origin[2] %>
- <br />
- <% end %>
- <% end %>
- </td>
- </tr>
-
- <% end %>
-
- </tbody>
-</table>
--- /dev/null
+<div class="row row-fill-height">
+ <div class="col-md-6">
+ <div class="panel panel-info">
+ <div class="panel-heading">
+ <h3 class="panel-title">
+ <% default_name = "Collection #{@object.uuid}" %>
+ <% name_html = render_editable_attribute @object, 'name', nil, {data: {emptytext: default_name}} %>
+ <%= (/\S/.match(name_html)) ? name_html : default_name %>
+ </h3>
+ </div>
+ <div class="panel-body">
+ <img src="/favicon.ico" class="pull-right" alt="" style="opacity: 0.3"/>
+ <% if not (@output_of.andand.any? or @log_of.andand.any?) %>
+ <p><i>No source information available.</i></p>
+ <% end %>
+
+ <% if @output_of.andand.any? %>
+ <p>Output of jobs:<br />
+ <%= render_arvados_object_list_start(@output_of, 'Show all jobs',
+ jobs_path(filter: [['output', '=', @object.uuid]].to_json)) do |job| %>
+ <%= link_to_if_arvados_object(job, friendly_name: true) %><br />
+ <% end %>
+ </p>
+ <% end %>
+
+ <% if @log_of.andand.any? %>
+ <p>Log of jobs:<br />
+ <%= render_arvados_object_list_start(@log_of, 'Show all jobs',
+ jobs_path(filter: [['log', '=', @object.uuid]].to_json)) do |job| %>
+ <%= link_to_if_arvados_object(job, friendly_name: true) %><br />
+ <% end %>
+ </p>
+ <% end %>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-3">
+ <div class="panel panel-default">
+ <div class="panel-heading">
+ <h3 class="panel-title">
+ Activity
+ </h3>
+ </div>
+ <div class="panel-body smaller-text">
+ <!--
+ <input type="text" class="form-control" placeholder="Search"/>
+ -->
+ <div style="height:0.5em;"></div>
+ <% if not @logs.andand.any? %>
+ <p>
+ Created: <%= @object.created_at.to_s(:long) %>
+ </p>
+ <p>
+ Last modified: <%= @object.modified_at.to_s(:long) %> by <%= link_to_if_arvados_object @object.modified_by_user_uuid, friendly_name: true %>
+ </p>
+ <% else %>
+ <%= render_arvados_object_list_start(@logs, 'Show all activity',
+ logs_path(filters: [['object_uuid','=',@object.uuid]].to_json)) do |log| %>
+ <p>
+ <%= time_ago_in_words(log.event_at) %> ago: <%= log.summary %>
+ <% if log.object_uuid %>
+ <%= link_to_if_arvados_object log.object_uuid, link_text: raw('<i class="fa fa-hand-o-right"></i>') %>
+ <% end %>
+ </p>
+ <% end %>
+ <% end %>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-3">
+ <div class="panel panel-default">
+ <div class="panel-heading">
+ <h3 class="panel-title">
+ Sharing and permissions
+ </h3>
+ </div>
+ <div class="panel-body">
+ <!--
+ <input type="text" class="form-control" placeholder="Search"/>
+ -->
+ <div style="height:0.5em;"></div>
+ <% if @folders.andand.any? %>
+ <p>Included in folders:<br />
+ <%= render_arvados_object_list_start(@folders, 'Show all folders',
+ links_path(filter: [['head_uuid', '=', @object.uuid],
+ ['link_class', '=', 'name']].to_json)) do |folder| %>
+ <%= link_to_if_arvados_object(folder, friendly_name: true) %><br />
+ <% end %>
+ </p>
+ <% end %>
+ <% if @permissions.andand.any? %>
+ <p>Readable by:<br />
+ <%= render_arvados_object_list_start(@permissions, 'Show all permissions',
+ links_path(filter: [['head_uuid', '=', @object.uuid],
+ ['link_class', '=', 'permission']].to_json)) do |link| %>
+ <%= link_to_if_arvados_object(link.tail_uuid, friendly_name: true) %><br />
+ <% end %>
+ </p>
+ <% end %>
+ </div>
+ </div>
+ </div>
+</div>
+
+<%= render file: 'application/show.html.erb' %>
--- /dev/null
+<!DOCTYPE html>
+<html>
+<% coll_name = (@object.name =~ /\S/) ? @object.name : "Collection #{@object.uuid}" %>
+<% link_opts = {controller: 'collections', action: 'show_file',
+ uuid: @object.uuid, reader_token: params[:reader_token]} %>
+<head>
+ <meta charset="utf-8">
+ <title>
+ <%= coll_name %> / <%= Rails.configuration.site_name %>
+ </title>
+ <meta name="description" content="">
+ <meta name="author" content="">
+ <meta name="robots" content="NOINDEX">
+ <style type="text/css">
+body {
+ margin: 1.5em;
+}
+pre {
+ background-color: #D9EDF7;
+ border-radius: .25em;
+ padding: .75em;
+ overflow: auto;
+}
+.footer {
+ font-size: 82%;
+}
+.footer h2 {
+ font-size: 1.2em;
+}
+ </style>
+</head>
+<body>
+
+<h1><%= coll_name %></h1>
+
+<p>This collection of data files is being shared with you through
+Arvados. You can download individual files listed below. To download
+the entire collection with wget, try:</p>
+
+<pre>$ wget --mirror --no-parent --no-host --cut-dirs=3 <%=
+ url_for(link_opts.merge(action: 'show_file_links', only_path: false))
+ %></pre>
+
+<h2>File Listing</h2>
+
+<% if @object.andand.files_tree.andand.any? %>
+ <ul id="collection_files" class="collection_files">
+ <% dirstack = [@object.files_tree.first.first] %>
+ <% @object.files_tree.each_with_index do |(dirname, filename, size), index| %>
+ <% file_path = CollectionsHelper::file_path([dirname, filename]) %>
+ <% while dirstack.any? and (dirstack.last != dirname) %>
+ <% dirstack.pop %></ul></li>
+ <% end %>
+ <li>
+ <% if size.nil? # This is a subdirectory. %>
+ <% dirstack.push(File.join(dirname, filename)) %>
+ <%= filename %>
+ <ul class="collection_files">
+ <% else %>
+ <%= link_to(filename,
+ {controller: 'collections', action: 'show_file',
+ uuid: @object.uuid, file: file_path,
+ reader_token: params[:reader_token]},
+ {title: "Download #{file_path}"}) %>
+ </li>
+ <% end %>
+ <% end %>
+ <%= raw(dirstack.map { |_| "</ul>" }.join("</li>")) %>
+<% else %>
+ <p>No files in this collection.</p>
+<% end %>
+
+<div class="footer">
+<h2>About Arvados</h2>
+
+<p>Arvados is a free and open source software bioinformatics platform.
+To learn more, visit arvados.org.
+Arvados is not responsible for the files listed on this page.</p>
+</div>
+
+</body>
+</html>
<div class="col-md-6">
<div class="panel panel-info">
<div class="panel-heading">
- <h3 class="panel-title">
- <%= render_editable_attribute @object, 'name', nil, {data: {emptytext: "New folder"}} %>
- </h3>
+ <h3 class="panel-title">
+ <%= render_editable_attribute @object, 'name', nil, {data: {emptytext: "New folder"}} %>
+ </h3>
</div>
<div class="panel-body">
<img src="/favicon.ico" class="pull-right" alt="" style="opacity: 0.3"/>
- <%= render_editable_attribute @object, 'description', nil, { 'data-emptytext' => "Created: #{@object.created_at.to_s(:long)}", 'data-toggle' => 'manual', 'id' => "#{@object.uuid}-description" } %>
+ <%= render_editable_attribute @object, 'description', nil, { 'data-emptytext' => "Created: #{@object.created_at.to_s(:long)}", 'data-toggle' => 'manual', 'id' => "#{@object.uuid}-description" } %>
<% if @object.attribute_editable? 'description' %>
<div style="margin-top: 1em;">
<a href="#" class="btn btn-xs btn-default" data-toggle="x-editable" data-toggle-selector="#<%= @object.uuid %>-description"><i class="fa fa-fw fa-pencil"></i> Edit description</a>
<div class="col-md-3">
<div class="panel panel-default">
<div class="panel-heading">
- <h3 class="panel-title">
- Activity
- </h3>
+ <h3 class="panel-title">
+ Activity
+ </h3>
</div>
<div class="panel-body smaller-text">
<!--
- <input type="text" class="form-control" placeholder="Search"/>
+ <input type="text" class="form-control" placeholder="Search"/>
-->
- <div style="height:0.5em;"></div>
- <% @logs[0..2].each do |log| %>
- <p>
- <%= time_ago_in_words(log.event_at) %> ago: <%= log.summary %>
- <% if log.object_uuid %>
- <%= link_to_if_arvados_object log.object_uuid, link_text: raw('<i class="fa fa-hand-o-right"></i>') %>
- <% end %>
- </p>
- <% end %>
+ <div style="height:0.5em;"></div>
<% if @logs.any? %>
- <%= link_to raw('Show all activity <i class="fa fa-fw fa-arrow-circle-right"></i>'),
- logs_path(filters: [['object_uuid','=',@object.uuid]].to_json),
- class: 'btn btn-xs btn-default' %>
+ <%= render_arvados_object_list_start(@logs, 'Show all activity',
+ logs_path(filters: [['object_uuid','=',@object.uuid]].to_json)) do |log| %>
+ <p>
+ <%= time_ago_in_words(log.event_at) %> ago: <%= log.summary %>
+ <% if log.object_uuid %>
+ <%= link_to_if_arvados_object log.object_uuid, link_text: raw('<i class="fa fa-hand-o-right"></i>') %>
+ <% end %>
+ </p>
+ <% end %>
<% else %>
- <p>
- Created: <%= @object.created_at.to_s(:long) %>
- </p>
- <p>
- Last modified: <%= @object.modified_at.to_s(:long) %> by <%= link_to_if_arvados_object @object.modified_by_user_uuid, friendly_name: true %>
- </p>
+ <p>
+ Created: <%= @object.created_at.to_s(:long) %>
+ </p>
+ <p>
+ Last modified: <%= @object.modified_at.to_s(:long) %> by <%= link_to_if_arvados_object @object.modified_by_user_uuid, friendly_name: true %>
+ </p>
<% end %>
</div>
</div>
<div class="col-md-3">
<div class="panel panel-default">
<div class="panel-heading">
- <h3 class="panel-title">
- Sharing and permissions
- </h3>
+ <h3 class="panel-title">
+ Sharing and permissions
+ </h3>
</div>
<div class="panel-body">
<!--
- <input type="text" class="form-control" placeholder="Search"/>
+ <input type="text" class="form-control" placeholder="Search"/>
-->
- <div style="height:0.5em;"></div>
+ <div style="height:0.5em;"></div>
<p>Owner: <%= link_to_if_arvados_object @object.owner_uuid, friendly_name: true %></p>
<% if @share_links.any? %>
<p>Shared with:
}
<% end %>
+<%= render partial: "paging", locals: {results: objects, object: @object} %>
+
<table class="topalign table">
<thead>
<tr class="contain-align-left">
</thead>
<tbody>
- <% @jobs.sort_by { |j| j[:created_at] }.reverse.each do |j| %>
+ <% @objects.sort_by { |j| j[:created_at] }.reverse.each do |j| %>
<tr class="cell-noborder">
<td>
</div>
</td>
<td>
- <%= link_to_if_arvados_object j.uuid %>
+ <%= link_to_if_arvados_object j %>
</td>
<td>
<%= j.script %>
<link rel="shortcut icon" href="/favicon.ico" type="image/x-icon">
<meta name="description" content="">
<meta name="author" content="">
+ <meta name="robots" content="NOINDEX, NOFOLLOW">
<%= stylesheet_link_tag "application", :media => "all" %>
<%= javascript_include_tag "application" %>
<%= csrf_meta_tags %>
</li>
<li class="dropdown">
- <a href="#" class="dropdown-toggle" data-toggle="dropdown"><i class="fa fa-lg fa-folder-o fa-fw"></i> Folders <b class="caret"></b></a>
+ <a href="/folders" class="dropdown-toggle" data-toggle="dropdown"><i class="fa fa-lg fa-folder-o fa-fw"></i> Folders <b class="caret"></b></a>
<ul class="dropdown-menu">
<li><%= link_to raw('<i class="fa fa-plus fa-fw"></i> Create new folder'), folders_path, method: :post %></li>
<% @my_top_level_folders.call[0..7].each do |folder| %>
<li><a href="/collections">
<i class="fa fa-lg fa-briefcase fa-fw"></i> Collections (data files)
</a></li>
+ <li><a href="/jobs">
+ <i class="fa fa-lg fa-tasks fa-fw"></i> Jobs
+ </a></li>
<li><a href="/pipeline_instances">
<i class="fa fa-lg fa-tasks fa-fw"></i> Pipeline instances
</a></li>
<i class="fa fa-lg fa-users fa-fw"></i> Groups
</a></li>
<li><a href="/nodes">
- <i class="fa fa-lg fa-cogs fa-fw"></i> Compute nodes
+ <i class="fa fa-lg fa-cloud fa-fw"></i> Compute nodes
+ </a></li>
+ <li><a href="/keep_services">
+ <i class="fa fa-lg fa-exchange fa-fw"></i> Keep services
</a></li>
<li><a href="/keep_disks">
<i class="fa fa-lg fa-hdd-o fa-fw"></i> Keep disks
</ul>
</li>
<% else %>
- <li><a href="<%= $arvados_api_client.arvados_login_url(return_to: root_url) %>">Log in</a></li>
+ <li><a href="<%= arvados_api_client.arvados_login_url(return_to: root_url) %>">Log in</a></li>
<% end %>
</ul>
</div><!-- /.navbar-collapse -->
<td>
<% if current_user and (current_user.is_admin or current_user.uuid == link.owner_uuid) %>
- <%= link_to raw('<i class="glyphicon glyphicon-trash"></i>'), { action: 'destroy', id: link.uuid }, { confirm: 'Delete this link?', method: 'delete' } %>
+ <%= link_to raw('<i class="glyphicon glyphicon-trash"></i>'), { action: 'destroy', id: link.uuid }, data: {confirm: 'Delete this link?', method: 'delete'} %>
<% end %>
</td>
<% end %>
<% end %>
-<% if @object.active != nil %>
+<% if !@object.state.in? ['New', 'Ready', 'Paused'] %>
<table class="table pipeline-components-table">
<colgroup>
<col style="width: 15%" />
script, version
</th><th>
progress
- <%= link_to '(refresh)', request.fullpath, class: 'refresh hide', remote: true, method: 'get' %>
+ <%# format:'js' here helps browsers avoid using the cached js
+ content in html context (e.g., duplicate tab -> see
+ javascript) %>
+ <%= link_to '(refresh)', {format:'js'}, class: 'refresh hide', remote: true, method: 'get' %>
</th><th>
</th><th>
output
</tfoot>
</table>
-<% if @object.active %>
+<% if @object.state == 'RunningOnServer' || @object.state == 'RunningOnClient' %>
<% content_for :js do %>
setInterval(function(){$('a.refresh').click()}, 15000);
<% end %>
<% content_for :tab_line_buttons do %>
<%= form_tag @object, :method => :put do |f| %>
- <%= hidden_field @object.class.to_s.underscore.singularize.to_sym, :active, :value => false %>
+ <%= hidden_field @object.class.to_s.underscore.singularize.to_sym, :state, :value => 'Paused' %>
<%= button_tag "Stop pipeline", {class: 'btn btn-primary pull-right', id: "run-pipeline-button"} %>
<% end %>
<% end %>
<% else %>
-
- <p>Please set the desired input parameters for the components of this pipeline. Parameters highlighted in red are required.</p>
+ <% if @object.state == 'New' %>
+ <p>Please set the desired input parameters for the components of this pipeline. Parameters highlighted in red are required.</p>
+ <% end %>
<% content_for :tab_line_buttons do %>
<%= form_tag @object, :method => :put do |f| %>
- <%= hidden_field @object.class.to_s.underscore.singularize.to_sym, :active, :value => true %>
+ <%= hidden_field @object.class.to_s.underscore.singularize.to_sym, :state, :value => 'RunningOnServer' %>
<%= button_tag "Run pipeline", {class: 'btn btn-primary pull-right', id: "run-pipeline-button"} %>
<% end %>
<% end %>
- <%= render partial: 'show_components_editable', locals: {editable: true} %>
-
+ <% if @object.state.in? ['New', 'Ready'] %>
+ <%= render partial: 'show_components_editable', locals: {editable: true} %>
+ <% else %>
+ <%= render partial: 'show_components_editable', locals: {editable: false} %>
+ <% end %>
<% end %>
<col width="25%" />
<col width="20%" />
<col width="15%" />
- <col width="20%" />
+ <col width="15%" />
+ <col width="5%" />
</colgroup>
<thead>
<tr class="contain-align-left">
Owner
</th><th>
Age
+ </th><th>
</th>
</tr>
</thead>
<%= link_to_if_arvados_object ob.owner_uuid, friendly_name: true %>
</td><td>
<%= distance_of_time_in_words(ob.created_at, Time.now) %>
+ </td><td>
+ <%= render partial: 'delete_object_button', locals: {object:ob} %>
</td>
</tr>
<tr>
<td style="border-top: 0;" colspan="2">
</td>
- <td style="border-top: 0; opacity: 0.5;" colspan="5">
+ <td style="border-top: 0; opacity: 0.5;" colspan="6">
<% ob.components.each do |cname, c| %>
<% if c[:job] %>
<%= render partial: "job_status_label", locals: {:j => c[:job], :title => cname.to_s } %>
<% self.formats = [:html] %>
var new_content = "<%= escape_javascript(render template: 'pipeline_instances/show') %>";
var selected_tab_hrefs = [];
-if ($('div.body-content').html() != new_content) {
+if ($('div#page-wrapper').html() != new_content) {
$('.nav-tabs li.active a').each(function() {
selected_tab_hrefs.push($(this).attr('href'));
});
- $('div.body-content').html(new_content);
+ $('div#page-wrapper').html(new_content);
- // Show the same tabs that were active before we rewrote body-content
+ // Show the same tabs that were active before we rewrote page-wrapper
$.each(selected_tab_hrefs, function(i, href) {
$('.nav-tabs li a[href="' + href + '"]').tab('show');
});
<p>As an admin, you can deactivate and reset this user. This will remove all repository/VM permissions for the user. If you "setup" the user again, the user will have to sign the user agreement again.</p>
<blockquote>
-<%= button_to "Deactivate #{@object.full_name}", unsetup_user_url(id: @object.uuid), class: 'btn btn-primary', confirm: "Are you sure you want to deactivate #{@object.full_name}?"%>
+<%= button_to "Deactivate #{@object.full_name}", unsetup_user_url(id: @object.uuid), class: 'btn btn-primary', data: {confirm: "Are you sure you want to deactivate #{@object.full_name}?"} %>
</blockquote>
<% content_for :footer_html do %>
beyond that.
</p>
<p>
- <a class="pull-right btn btn-primary" href="<%= $arvados_api_client.arvados_login_url(return_to: request.url) %>">
+ <a class="pull-right btn btn-primary" href="<%= arvados_api_client.arvados_login_url(return_to: request.url) %>">
Click here to log in to <%= Rails.configuration.site_name %> with a Google account</a>
</p>
</div>
$("#PutStuffHere").append(content + "<br>");
};
-var dispatcher = new WebSocket('<%= $arvados_api_client.discovery[:websocketUrl] %>?api_token=<%= Thread.current[:arvados_api_token] %>');
+var dispatcher = new WebSocket('<%= arvados_api_client.discovery[:websocketUrl] %>?api_token=<%= Thread.current[:arvados_api_token] %>');
dispatcher.onmessage = function(event) {
//putStuffThere(JSON.parse(event.data));
putStuffThere(event.data);
development:
cache_classes: false
- whiny_nils: true
+ eager_load: true
consider_all_requests_local: true
action_controller.perform_caching: false
action_mailer.raise_delivery_errors: false
active_support.deprecation: :log
action_dispatch.best_standards_support: :builtin
- active_record.mass_assignment_sanitizer: :strict
- active_record.auto_explain_threshold_in_seconds: 0.5
- assets.compress: false
assets.debug: true
profiling_enabled: true
site_name: Arvados Workbench (dev)
production:
force_ssl: true
cache_classes: true
+ eager_load: true
consider_all_requests_local: false
action_controller.perform_caching: true
serve_static_assets: false
- assets.compress: true
assets.compile: false
assets.digest: true
i18n.fallbacks: true
test:
cache_classes: true
+ eager_load: false
serve_static_assets: true
static_cache_control: public, max-age=3600
- whiny_nils: true
consider_all_requests_local: true
action_controller.perform_caching: false
action_dispatch.show_exceptions: false
action_controller.allow_forgery_protection: false
action_mailer.delivery_method: :test
- active_record.mass_assignment_sanitizer: :strict
active_support.deprecation: :stderr
profiling_enabled: false
secret_token: <%= rand(2**256).to_s(36) %>
+ secret_key_base: <%= rand(2**256).to_s(36) %>
# When you run the Workbench's integration tests, it starts the API
# server as a dependency. These settings should match the API
site_name: Workbench:test
common:
+ assets.js_compressor: false
+ assets.css_compressor: false
data_import_dir: /tmp/arvados-workbench-upload
data_export_dir: /tmp/arvados-workbench-download
arvados_login_base: https://arvados.local/login
arvados_theme: default
show_user_agreement_inline: false
secret_token: ~
+ secret_key_base: false
default_openid_prefix: https://www.google.com/accounts/o8/id
send_user_setup_notification_email: true
require 'rails/all'
-if defined?(Bundler)
- # If you precompile assets before deploying to production, use this line
- Bundler.require(*Rails.groups(:assets => %w(development test)))
- # If you want your assets lazily compiled in production, use this line
- # Bundler.require(:default, :assets, Rails.env)
-end
+Bundler.require(:default, Rails.env)
module ArvadosWorkbench
class Application < Rails::Application
# like if you have constraints or database-specific column types
# config.active_record.schema_format = :sql
- # Enforce whitelist mode for mass assignment.
- # This will create an empty whitelist of attributes available for mass-assignment for all models
- # in your app. As such, your models will need to explicitly whitelist or blacklist accessible
- # parameters by using an attr_accessible or attr_protected declaration.
- config.active_record.whitelist_attributes = true
-
# Enable the asset pipeline
config.assets.enabled = true
config.assets.version = '1.0'
end
end
+
+require File.expand_path('../load_config', __FILE__)
# since you don't have to restart the web server when you make code changes.
config.cache_classes = false
- # Log error messages when you accidentally call methods on nil.
- config.whiny_nils = true
-
# Show full error reports and disable caching
config.consider_all_requests_local = true
config.action_controller.perform_caching = false
# Only use best-standards-support built into browsers
config.action_dispatch.best_standards_support = :builtin
- # Raise exception on mass assignment protection for Active Record models
- config.active_record.mass_assignment_sanitizer = :strict
-
- # Log the query plan for queries taking more than this (works
- # with SQLite, MySQL, and PostgreSQL)
- config.active_record.auto_explain_threshold_in_seconds = 0.5
-
# Do not compress assets
- config.assets.compress = false
+ config.assets.js_compressor = false
# Expands the lines which load the assets
config.assets.debug = true
config.serve_static_assets = false
# Compress JavaScripts and CSS
- config.assets.compress = true
+ config.assets.js_compressor = :yui
# Don't fallback to assets pipeline if a precompiled asset is missed
config.assets.compile = false
# Send deprecation notices to registered listeners
config.active_support.deprecation = :notify
- # Log the query plan for queries taking more than this (works
- # with SQLite, MySQL, and PostgreSQL)
- # config.active_record.auto_explain_threshold_in_seconds = 0.5
-
# Log timing data for API transactions
config.profiling_enabled = false
config.serve_static_assets = true
config.static_cache_control = "public, max-age=3600"
- # Log error messages when you accidentally call methods on nil
- config.whiny_nils = true
-
# Show full error reports and disable caching
config.consider_all_requests_local = true
config.action_controller.perform_caching = false
# ActionMailer::Base.deliveries array.
config.action_mailer.delivery_method = :test
- # Raise exception on mass assignment protection for Active Record models
- config.active_record.mass_assignment_sanitizer = :strict
-
# Print deprecation notices to the stderr
config.active_support.deprecation = :stderr
+++ /dev/null
-# The client object must be instantiated _after_ zza_load_config.rb
-# runs, because it relies on configuration settings.
-#
-if not $application_config
- raise "Fatal: Config must be loaded before instantiating ArvadosApiClient."
-end
-
-$arvados_api_client = ArvadosApiClient.new
themes_for_rails
resources :keep_disks
+ resources :keep_services
resources :user_agreements do
put 'sign', on: :collection
get 'signatures', on: :collection
resources :authorized_keys
resources :job_tasks
resources :jobs
- match '/logout' => 'sessions#destroy'
- match '/logged_out' => 'sessions#index'
+ match '/logout' => 'sessions#destroy', via: [:get, :post]
+ get '/logged_out' => 'sessions#index'
resources :users do
get 'home', :on => :member
get 'welcome', :on => :collection
get 'compare', on: :collection
end
resources :links
- match '/collections/graph' => 'collections#graph'
+ get '/collections/graph' => 'collections#graph'
resources :collections do
post 'set_persistent', on: :member
end
+ get('/collections/download/:uuid/:reader_token/*file' => 'collections#show_file',
+ format: false)
+ get '/collections/download/:uuid/:reader_token' => 'collections#show_file_links'
get '/collections/:uuid/*file' => 'collections#show_file', :format => false
resources :folders do
match 'remove/:item_uuid', on: :member, via: :delete, action: :remove_item
# Send unroutable requests to an arbitrary controller
# (ends up at ApplicationController#render_not_found)
- match '*a', :to => 'links#render_not_found'
+ match '*a', to: 'links#render_not_found', via: [:get, :post]
end
-User-Agent: *
-Disallow: /
"session token does not belong to #{client_auth}")
end
+ def show_collection(params, session={}, response=:success)
+ params = collection_params(params) if not params.is_a? Hash
+ session = session_for(session) if not session.is_a? Hash
+ get(:show, params, session)
+ assert_response response
+ end
+
# Mock the collection file reader to avoid external calls and return
# a predictable string.
CollectionsController.class_eval do
end
test "viewing a collection" do
- params = collection_params(:foo_file)
- sess = session_for(:active)
- get(:show, params, sess)
- assert_response :success
+ show_collection(:foo_file, :active)
assert_equal([['.', 'foo', 3]], assigns(:object).files)
end
- test "viewing a collection with a reader token" do
- params = collection_params(:foo_file)
- params[:reader_tokens] =
- [api_fixture('api_client_authorizations')['active']['api_token']]
- get(:show, params)
- assert_response :success
- assert_equal([['.', 'foo', 3]], assigns(:object).files)
- assert_no_session
+ test "viewing a collection fetches related folders" do
+ show_collection(:foo_file, :active)
+ assert_includes(assigns(:folders).map(&:uuid),
+ api_fixture('groups')['afolder']['uuid'],
+ "controller did not find linked folder")
+ end
+
+ test "viewing a collection fetches related permissions" do
+ show_collection(:bar_file, :active)
+ assert_includes(assigns(:permissions).map(&:uuid),
+ api_fixture('links')['bar_file_readable_by_active']['uuid'],
+ "controller did not find permission link")
+ end
+
+ test "viewing a collection fetches jobs that output it" do
+ show_collection(:bar_file, :active)
+ assert_includes(assigns(:output_of).map(&:uuid),
+ api_fixture('jobs')['foobar']['uuid'],
+ "controller did not find output job")
+ end
+
+ test "viewing a collection fetches jobs that logged it" do
+ show_collection(:baz_file, :active)
+ assert_includes(assigns(:log_of).map(&:uuid),
+ api_fixture('jobs')['foobar']['uuid'],
+ "controller did not find logger job")
end
- test "viewing the index with a reader token" do
- params = {reader_tokens:
- [api_fixture('api_client_authorizations')['spectator']['api_token']]
- }
- get(:index, params)
+ test "viewing a collection fetches logs about it" do
+ show_collection(:foo_file, :active)
+ assert_includes(assigns(:logs).map(&:uuid),
+ api_fixture('logs')['log4']['uuid'],
+ "controller did not find related log")
+ end
+
+ test "viewing collection files with a reader token" do
+ params = collection_params(:foo_file)
+ params[:reader_token] =
+ api_fixture('api_client_authorizations')['active']['api_token']
+ get(:show_file_links, params)
assert_response :success
+ assert_equal([['.', 'foo', 3]], assigns(:object).files)
assert_no_session
- listed_collections = assigns(:collections).map { |c| c.uuid }
- assert_includes(listed_collections,
- api_fixture('collections')['bar_file']['uuid'],
- "spectator reader token didn't list bar file")
- refute_includes(listed_collections,
- api_fixture('collections')['foo_file']['uuid'],
- "spectator reader token listed foo file")
end
test "getting a file from Keep" do
params = collection_params(:foo_file, 'foo')
sess = session_for(:spectator)
get(:show_file, params, sess)
- assert_includes([403, 404], @response.code.to_i)
+ assert_response 404
end
test "trying to get a nonexistent file from Keep returns a 404" do
test "getting a file from Keep with a good reader token" do
params = collection_params(:foo_file, 'foo')
read_token = api_fixture('api_client_authorizations')['active']['api_token']
- params[:reader_tokens] = [read_token]
+ params[:reader_token] = read_token
get(:show_file, params)
assert_response :success
assert_equal(expected_contents(params, read_token), @response.body,
test "trying to get from Keep with an unscoped reader token prompts login" do
params = collection_params(:foo_file, 'foo')
- read_token =
+ params[:reader_token] =
api_fixture('api_client_authorizations')['active_noscope']['api_token']
- params[:reader_tokens] = [read_token]
get(:show_file, params)
assert_response :redirect
end
params = collection_params(:foo_file, 'foo')
sess = session_for(:expired)
read_token = api_fixture('api_client_authorizations')['active']['api_token']
- params[:reader_tokens] = [read_token]
+ params[:reader_token] = read_token
get(:show_file, params, sess)
assert_response :success
assert_equal(expected_contents(params, read_token), @response.body,
require 'headless'
class CollectionsTest < ActionDispatch::IntegrationTest
-
def change_persist oldstate, newstate
find "div[data-persistent-state='#{oldstate}']"
page.assert_no_selector "div[data-persistent-state='#{newstate}']"
# isn't only showing up in an error message.
assert(page.has_link?('foo'), "Collection page did not include file link")
end
+
+ test "can download an entire collection with a reader token" do
+ uuid = api_fixture('collections')['foo_file']['uuid']
+ token = api_fixture('api_client_authorizations')['active_all_collections']['api_token']
+ url_head = "/collections/download/#{uuid}/#{token}/"
+ visit url_head
+ # It seems that Capybara can't inspect tags outside the body, so this is
+ # a very blunt approach.
+ assert_no_match(/<\s*meta[^>]+\bnofollow\b/i, page.html,
+ "wget prohibited from recursing the collection page")
+ # TODO: When we can test against a Keep server, actually follow links
+ # and check their contents, rather than testing the href directly
+ # (this is too closely tied to implementation details).
+ hrefs = page.all('a').map do |anchor|
+ link = anchor[:href] || ''
+ if link.start_with? url_head
+ link[url_head.size .. -1]
+ elsif link.start_with? '/'
+ nil
+ else
+ link
+ end
+ end
+ assert_equal(['foo'], hrefs.compact.sort,
+ "download page did provide strictly file links")
+ end
end
fill_in "email", :with => "foo@example.com"
fill_in "repo_name", :with => "test_repo"
click_button "Submit"
+ wait_for_ajax
end
visit '/users'
assert has_text? 'Virtual Machine'
fill_in "repo_name", :with => "test_repo"
click_button "Submit"
+ wait_for_ajax
end
- sleep(1)
assert page.has_text? 'modified_by_client_uuid'
click_link 'Metadata'
fill_in "repo_name", :with => "second_test_repo"
select("testvm.shell", :from => 'vm_uuid')
click_button "Submit"
+ wait_for_ajax
end
- sleep(0.1)
assert page.has_text? 'modified_by_client_uuid'
click_link 'Metadata'
fill_in "repo_name", :with => "second_test_repo"
select("testvm.shell", :from => 'vm_uuid')
click_button "Submit"
+ wait_for_ajax
end
- sleep(0.1)
assert page.has_text? 'modified_by_client_uuid'
click_link 'Metadata'
@@API_AUTHS = self.api_fixture('api_client_authorizations')
+ def setup
+ reset_session!
+ super
+ end
+
def page_with_token(token, path='/')
# Generate a page path with an embedded API token.
# Typical usage: visit page_with_token('token_name', page)
require File.expand_path('../../config/environment', __FILE__)
require 'rails/test_help'
-$ARV_API_SERVER_DIR = File.expand_path('../../../../services/api', __FILE__)
-SERVER_PID_PATH = 'tmp/pids/server.pid'
-
class ActiveSupport::TestCase
# Setup all fixtures in test/fixtures/*.(yml|csv) for all tests in
# alphabetical order.
def teardown
Thread.current[:arvados_api_token] = nil
+ Thread.current[:reader_tokens] = nil
super
end
end
# Returns the data structure from the named API server test fixture.
@@api_fixtures[name] ||= \
begin
- path = File.join($ARV_API_SERVER_DIR, 'test', 'fixtures', "#{name}.yml")
+ path = File.join(ApiServerForTests::ARV_API_SERVER_DIR,
+ 'test', 'fixtures', "#{name}.yml")
YAML.load(IO.read(path))
end
end
end
end
-class ApiServerBackedTestRunner < MiniTest::Unit
- def _system(*cmd)
+class ApiServerForTests
+ ARV_API_SERVER_DIR = File.expand_path('../../../../services/api', __FILE__)
+ SERVER_PID_PATH = File.expand_path('tmp/pids/wbtest-server.pid', ARV_API_SERVER_DIR)
+ @main_process_pid = $$
+
+ def self._system(*cmd)
+ $stderr.puts "_system #{cmd.inspect}"
Bundler.with_clean_env do
if not system({'RAILS_ENV' => 'test'}, *cmd)
raise RuntimeError, "#{cmd[0]} returned exit code #{$?.exitstatus}"
end
end
- def _run(args=[])
+ def self.make_ssl_cert
+ unless File.exists? './self-signed.key'
+ _system('openssl', 'req', '-new', '-x509', '-nodes',
+ '-out', './self-signed.pem',
+ '-keyout', './self-signed.key',
+ '-days', '3650',
+ '-subj', '/CN=localhost')
+ end
+ end
+
+ def self.kill_server
+ if (pid = find_server_pid)
+ $stderr.puts "Sending TERM to API server, pid #{pid}"
+ Process.kill 'TERM', pid
+ end
+ end
+
+ def self.find_server_pid
+ pid = nil
+ begin
+ pid = IO.read(SERVER_PID_PATH).to_i
+ $stderr.puts "API server is running, pid #{pid.inspect}"
+ rescue Errno::ENOENT
+ end
+ return pid
+ end
+
+ def self.run(args=[])
+ ::MiniTest.after_run do
+ self.kill_server
+ end
+
+ # Kill server left over from previous test run
+ self.kill_server
+
Capybara.javascript_driver = :poltergeist
- server_pid = Dir.chdir($ARV_API_SERVER_DIR) do |apidir|
+ Dir.chdir(ARV_API_SERVER_DIR) do |apidir|
ENV["NO_COVERAGE_TEST"] = "1"
+ make_ssl_cert
_system('bundle', 'exec', 'rake', 'db:test:load')
_system('bundle', 'exec', 'rake', 'db:fixtures:load')
- _system('bundle', 'exec', 'rails', 'server', '-d')
+ _system('bundle', 'exec', 'passenger', 'start', '-d', '-p3001',
+ '--pid-file', SERVER_PID_PATH,
+ '--ssl',
+ '--ssl-certificate', 'self-signed.pem',
+ '--ssl-certificate-key', 'self-signed.key')
timeout = Time.now.tv_sec + 10
good_pid = false
while (not good_pid) and (Time.now.tv_sec < timeout)
sleep 0.2
- begin
- server_pid = IO.read(SERVER_PID_PATH).to_i
- good_pid = (server_pid > 0) and (Process.kill(0, server_pid) rescue false)
- rescue Errno::ENOENT
- good_pid = false
- end
+ server_pid = find_server_pid
+ good_pid = (server_pid and
+ (server_pid > 0) and
+ (Process.kill(0, server_pid) rescue false))
end
if not good_pid
raise RuntimeError, "could not find API server Rails pid"
end
- server_pid
- end
- begin
- super(args)
- ensure
- Process.kill('TERM', server_pid)
end
end
end
-MiniTest::Unit.runner = ApiServerBackedTestRunner.new
+ApiServerForTests.run
assert_equal false, Collection.is_empty_blob_locator?(x)
end
end
+
+ def get_files_tree(coll_name)
+ use_token :admin
+ Collection.find(api_fixture('collections')[coll_name]['uuid']).files_tree
+ end
+
+ test "easy files_tree" do
+ files_in = lambda do |dirname|
+ (1..3).map { |n| [dirname, "file#{n}", 0] }
+ end
+ assert_equal([['.', 'dir1', nil], ['./dir1', 'subdir', nil]] +
+ files_in['./dir1/subdir'] + files_in['./dir1'] +
+ [['.', 'dir2', nil]] + files_in['./dir2'] + files_in['.'],
+ get_files_tree('multilevel_collection_1'),
+ "Collection file tree was malformed")
+ end
+
+ test "files_tree with files deep in subdirectories" do
+ # This test makes sure files_tree generates synthetic directory entries.
+ # The manifest doesn't list directories with no files.
+ assert_equal([['.', 'dir1', nil], ['./dir1', 'sub1', nil],
+ ['./dir1/sub1', 'a', 0], ['./dir1/sub1', 'b', 0],
+ ['.', 'dir2', nil], ['./dir2', 'sub2', nil],
+ ['./dir2/sub2', 'c', 0], ['./dir2/sub2', 'd', 0]],
+ get_files_tree('multilevel_collection_2'),
+ "Collection file tree was malformed")
+ end
end
require 'test_helper'
class CollectionsHelperTest < ActionView::TestCase
+ test "file_path generates short names" do
+ assert_equal('foo', CollectionsHelper.file_path(['.', 'foo', 0]),
+ "wrong result for filename in collection root")
+ assert_equal('foo/bar', CollectionsHelper.file_path(['foo', 'bar', 0]),
+ "wrong result for filename in directory without leading .")
+ assert_equal('foo/bar', CollectionsHelper.file_path(['./foo', 'bar', 0]),
+ "wrong result for filename in directory with leading .")
+ end
end
- api/methods/jobs.html.textile.liquid
- api/methods/job_tasks.html.textile.liquid
- api/methods/keep_disks.html.textile.liquid
+ - api/methods/keep_services.html.textile.liquid
- api/methods/links.html.textile.liquid
- api/methods/logs.html.textile.liquid
- api/methods/nodes.html.textile.liquid
- api/schema/Job.html.textile.liquid
- api/schema/JobTask.html.textile.liquid
- api/schema/KeepDisk.html.textile.liquid
+ - api/schema/KeepService.html.textile.liquid
- api/schema/Link.html.textile.liquid
- api/schema/Log.html.textile.liquid
- api/schema/Node.html.textile.liquid
--- /dev/null
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "keep_services"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/keep_services@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h2. accessible
+
+Get a list of keep services that are accessible to the requesting client. This
+is context-sensitive, for example providing the list of actual Keep servers
+when inside the cluster, but providing a proxy service if client contacts
+Arvados from outside the cluster.
+
+Takes no arguments.
+
+h2. create
+
+Create a new KeepService.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|keep_disk|object||query||
+
+h2. delete
+
+Delete an existing KeepService.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path||
+
+h2. get
+
+Gets a KeepService's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path||
+
+h2. list
+
+List keep_services.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of keep_services to return.|query||
+|order|string|Order in which to return matching keep_services.|query||
+|filters|array|Conditions for filtering keep_services.|query||
+
+h2. update
+
+Update attributes of an existing KeepService.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path||
+|keep_service|object||query||
|last_read_at|datetime|||
|last_write_at|datetime|||
|last_ping_at|datetime|||
-|service_host|string|||
-|service_port|integer|||
-|service_ssl_flag|boolean|||
+|keep_service_uuid|string|||
--- /dev/null
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: KeepService
+
+...
+
+A **KeepService** is a service endpoint that supports the Keep protocol.
+
+h2. Methods
+
+See "keep_services":{{site.baseurl}}/api/methods/keep_services.html
+
+h2. Resource
+
+Each KeepService has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|service_host|string|||
+|service_port|integer|||
+|service_ssl_flag|boolean|||
+|service_type|string|||
\ No newline at end of file
~/arvados/apps/workbench$ <span class="userinput">bundle install --path=vendor/bundle</span>
</code></pre></notextile>
+The @bundle install@ command might produce a warning about the themes_for_rails gem. This is OK:
+
+<notextile>
+<pre><code>themes_for_rails at /home/<b>you</b>/.rvm/gems/ruby-2.1.1/bundler/gems/themes_for_rails-1fd2d7897d75 did not have a valid gemspec.
+This prevents bundler from installing bins or native extensions, but that may not affect its functionality.
+The validation message from Rubygems was:
+ duplicate dependency on rails (= 3.0.11, development), (>= 3.0.0) use:
+ add_runtime_dependency 'rails', '= 3.0.11', '>= 3.0.0'
+Using themes_for_rails (0.5.1) from https://github.com/holtkampw/themes_for_rails (at 1fd2d78)
+</code></pre></notextile>
+
h2. Configure the Workbench application
This application needs a secret token. Generate a new secret:
s.executables << "arv-run-pipeline-instance"
s.executables << "arv-crunch-job"
s.executables << "arv-tag"
+ s.required_ruby_version = '>= 2.1.0'
s.add_runtime_dependency 'arvados', '~> 0.1.0'
s.add_runtime_dependency 'google-api-client', '~> 0.6.3'
s.add_runtime_dependency 'activesupport', '~> 3.2', '>= 3.2.13'
exit
end
-request_parameters = {}.merge(method_opts)
+request_parameters = {_profile:true}.merge(method_opts)
resource_body = request_parameters.delete(resource_schema.to_sym)
if resource_body
request_body = {
resource_schema => resource_body
}
else
- request_body = {}
+ request_body = nil
end
case api_method
end
exit 0
else
- request_body[:api_token] = ENV['ARVADOS_API_TOKEN']
- request_body[:_profile] = true
result = client.execute(:api_method => eval(api_method),
:parameters => request_parameters,
:body => request_body,
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
end
begin
:parameters => {
:uuid => uuid
},
- :body => {
- :api_token => ENV['ARVADOS_API_TOKEN']
- },
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
j = JSON.parse result.body, :symbolize_names => true
unless j.is_a? Hash and j[:uuid]
debuglog "Failed to get pipeline_instance: #{j[:errors] rescue nil}", 0
def self.create(attributes)
result = $client.execute(:api_method => $arvados.pipeline_instances.create,
:body => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
:pipeline_instance => attributes
},
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
j = JSON.parse result.body, :symbolize_names => true
unless j.is_a? Hash and j[:uuid]
abort "Failed to create pipeline_instance: #{j[:errors] rescue nil} #{j.inspect}"
:uuid => @pi[:uuid]
},
:body => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
:pipeline_instance => @attributes_to_update.to_json
},
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
j = JSON.parse result.body, :symbolize_names => true
unless j.is_a? Hash and j[:uuid]
debuglog "Failed to save pipeline_instance: #{j[:errors] rescue nil}", 0
@cache ||= {}
result = $client.execute(:api_method => $arvados.jobs.get,
:parameters => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
:uuid => uuid
},
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
@cache[uuid] = JSON.parse result.body, :symbolize_names => true
end
def self.where(conditions)
result = $client.execute(:api_method => $arvados.jobs.list,
:parameters => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
:limit => 10000,
:where => conditions.to_json
},
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
list = JSON.parse result.body, :symbolize_names => true
if list and list[:items].is_a? Array
list[:items]
def self.create(job, create_params)
@cache ||= {}
result = $client.execute(:api_method => $arvados.jobs.create,
- :parameters => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
+ :body => {
:job => job.to_json
}.merge(create_params),
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
j = JSON.parse result.body, :symbolize_names => true
if j.is_a? Hash and j[:uuid]
@cache[j[:uuid]] = j
else
result = $client.execute(:api_method => $arvados.pipeline_templates.get,
:parameters => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
:uuid => template
},
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
@template = JSON.parse result.body, :symbolize_names => true
if !@template[:uuid]
abort "#{$0}: fatal: failed to retrieve pipeline template #{template} #{@template[:errors].inspect rescue nil}"
end
end
@instance[:components] = @components
- @instance[:active] = moretodo
report_status
if @options[:no_wait]
debuglog "interrupt", 0
interrupted = true
break
- #abort
end
end
end
if interrupted
if success
- @instance[:active] = false
- @instance[:success] = success
- @instance[:state] = "Complete"
+ @instance[:state] = 'Complete'
else
- @instance[:active] = nil
- @instance[:success] = nil
@instance[:state] = 'Paused'
end
else
if ended == @components.length or failed > 0
- @instance[:active] = false
- @instance[:success] = success
- @instance[:state] = success ? "Complete" : "Failed"
+ @instance[:state] = success ? 'Complete' : 'Failed'
end
end
end
def cleanup
- if @instance
- @instance[:active] = false
+ if @instance and @instance[:state] == 'RunningOnClient'
+ @instance[:state] = 'Paused'
@instance.save
end
end
my $arv = Arvados->new('apiVersion' => 'v1');
-my $metastream;
+my $local_logfile;
my $User = $arv->{'users'}->{'current'}->execute;
$job_id = $Job->{'uuid'};
my $keep_logfile = $job_id . '.log.txt';
-my $local_logfile = File::Temp->new();
+$local_logfile = File::Temp->new();
$Job->{'runtime_constraints'} ||= {};
$Job->{'runtime_constraints'}->{'max_tasks_per_node'} ||= 0;
$message =~ s{([^ -\176])}{"\\" . sprintf ("%03o", ord($1))}ge;
$message .= "\n";
my $datetime;
- if ($metastream || -t STDERR) {
+ if ($local_logfile || -t STDERR) {
my @gmtime = gmtime;
$datetime = sprintf ("%04d-%02d-%02d_%02d:%02d:%02d",
$gmtime[5]+1900, $gmtime[4]+1, @gmtime[3,2,1,0]);
}
print STDERR ((-t STDERR) ? ($datetime." ".$message) : $message);
- if ($metastream) {
- print $metastream $datetime . " " . $message;
+ if ($local_logfile) {
+ print $local_logfile $datetime . " " . $message;
}
}
freeze() if @jobstep_todo;
collate_output() if @jobstep_todo;
cleanup();
- save_meta() if $metastream;
+ save_meta() if $local_logfile;
die;
}
Protocol scheme. Default: C<ARVADOS_API_PROTOCOL_SCHEME> environment
variable, or C<https>
-=item apiToken
+=item authToken
Authorization token. Default: C<ARVADOS_API_TOKEN> environment variable
{
my $self = shift;
my %req;
- $req{$self->{'method'}} = $self->{'uri'};
+ my %content;
+ my $method = $self->{'method'};
+ if ($method eq 'GET' || $method eq 'HEAD') {
+ $content{'_method'} = $method;
+ $method = 'POST';
+ }
+ $req{$method} = $self->{'uri'};
$self->{'req'} = new HTTP::Request (%req);
$self->{'req'}->header('Authorization' => ('OAuth2 ' . $self->{'authToken'})) if $self->{'authToken'};
$self->{'req'}->header('Accept' => 'application/json');
- my %content;
my ($p, $v);
while (($p, $v) = each %{$self->{'queryParams'}}) {
$content{$p} = (ref($v) eq "") ? $v : JSON::encode_json($v);
authorize_with("admin")
api = arvados.api('v1', cache=False)
- a = api.keep_disks().list().execute()
+ for d in api.keep_services().list().execute()['items']:
+ api.keep_services().delete(uuid=d['uuid']).execute()
for d in api.keep_disks().list().execute()['items']:
api.keep_disks().delete(uuid=d['uuid']).execute()
- api.keep_disks().create(body={"keep_disk": {"service_host": "localhost", "service_port": 25107} }).execute()
- api.keep_disks().create(body={"keep_disk": {"service_host": "localhost", "service_port": 25108} }).execute()
+ s1 = api.keep_services().create(body={"keep_service": {"service_host": "localhost", "service_port": 25107, "service_type": "disk"} }).execute()
+ s2 = api.keep_services().create(body={"keep_service": {"service_host": "localhost", "service_port": 25108, "service_type": "disk"} }).execute()
+ api.keep_disks().create(body={"keep_disk": {"keep_service_uuid": s1["uuid"] } }).execute()
+ api.keep_disks().create(body={"keep_disk": {"keep_service_uuid": s2["uuid"] } }).execute()
os.chdir(cwd)
s.email = 'gem-dev@curoverse.com'
s.licenses = ['Apache License, Version 2.0']
s.files = ["lib/arvados.rb"]
+ s.required_ruby_version = '>= 2.1.0'
s.add_dependency('google-api-client', '~> 0.6.3')
s.add_dependency('activesupport', '>= 3.2.13')
s.add_dependency('json', '>= 1.7.7')
end
def self.api_exec(method, parameters={})
api_method = arvados_api.send(api_models_sym).send(method.name.to_sym)
- parameters = parameters.
- merge(:api_token => arvados.config['ARVADOS_API_TOKEN'])
parameters.each do |k,v|
parameters[k] = v.to_json if v.is_a? Array or v.is_a? Hash
end
execute(:api_method => api_method,
:authenticated => false,
:parameters => parameters,
- :body => body)
+ :body => body,
+ :headers => {
+ authorization: 'OAuth2 '+arvados.config['ARVADOS_API_TOKEN']
+ })
resp = JSON.parse result.body, :symbolize_names => true
if resp[:errors]
raise Arvados::TransactionFailedError.new(resp[:errors])
# SimpleCov reports
/coverage
+
+# Dev/test SSL certificates
+/self-signed.key
+/self-signed.pem
addressable (2.3.6)
andand (1.3.3)
arel (3.0.3)
- arvados (0.1.20140414145041)
+ arvados (0.1.20140513131358)
activesupport (>= 3.2.13)
andand
google-api-client (~> 0.6.3)
json (>= 1.7.7)
- arvados-cli (0.1.20140414145041)
+ arvados-cli (0.1.20140513131358)
activesupport (~> 3.2, >= 3.2.13)
andand (~> 1.3, >= 1.3.3)
arvados (~> 0.1.0)
railties (>= 3.0, < 5.0)
thor (>= 0.14, < 2.0)
json (1.8.1)
- jwt (0.1.11)
+ jwt (0.1.13)
multi_json (>= 1.5)
launchy (2.4.2)
addressable (~> 2.3)
mime-types (~> 1.16)
treetop (~> 1.4.8)
mime-types (1.25.1)
- multi_json (1.9.2)
+ multi_json (1.10.0)
multipart-post (1.2.0)
net-scp (1.2.0)
net-ssh (>= 2.6.5)
jwt (~> 0.1.4)
multi_json (~> 1.0)
rack (~> 1.2)
- oj (2.7.3)
+ oj (2.9.0)
omniauth (1.1.1)
hashie (~> 1.2)
rack
logger.warn "User #{current_user.andand.uuid} tried to set collection owner_uuid to #{owner_uuid}"
raise ArvadosModel::PermissionDeniedError
end
+
+ # Check permissions on the collection manifest.
+ # If any signature cannot be verified, return 403 Permission denied.
+ perms_ok = true
+ api_token = current_api_client_authorization.andand.api_token
+ signing_opts = {
+ key: Rails.configuration.blob_signing_key,
+ api_token: api_token,
+ ttl: Rails.configuration.blob_signing_ttl,
+ }
+ resource_attrs[:manifest_text].lines.each do |entry|
+ entry.split[1..-1].each do |tok|
+ # TODO(twp): in Phase 4, fail the request if the locator
+ # lacks a permission signature. (see #2755)
+ loc = Locator.parse(tok)
+ if loc and loc.signature
+ if !api_token
+ logger.warn "No API token present; cannot verify signature on #{loc}"
+ perms_ok = false
+ elsif !Blob.verify_signature tok, signing_opts
+ logger.warn "Invalid signature on locator #{loc}"
+ perms_ok = false
+ end
+ end
+ end
+ end
+ unless perms_ok
+ raise ArvadosModel::PermissionDeniedError
+ end
+
+ # Remove any permission signatures from the manifest.
+ resource_attrs[:manifest_text]
+ .gsub!(/ [[:xdigit:]]{32}(\+[[:digit:]]+)?(\+\S+)/) { |word|
+ word.strip!
+ loc = Locator.parse(word)
+ if loc
+ " " + loc.without_signature.to_s
+ else
+ " " + word
+ end
+ }
+
+ # Save the collection with the stripped manifest.
act_as_system_user do
@object = model_class.new resource_attrs.reject { |k,v| k == :owner_uuid }
begin
@object = @existing_object || @object
end
end
-
if @object
link_attrs = {
owner_uuid: owner_uuid,
end
def show
+ if current_api_client_authorization
+ signing_opts = {
+ key: Rails.configuration.blob_signing_key,
+ api_token: current_api_client_authorization.api_token,
+ ttl: Rails.configuration.blob_signing_ttl,
+ }
+ @object[:manifest_text]
+ .gsub!(/ [[:xdigit:]]{32}(\+[[:digit:]]+)?(\+\S+)/) { |word|
+ word.strip!
+ loc = Locator.parse(word)
+ if loc
+ " " + Blob.sign_locator(word, signing_opts)
+ else
+ " " + word
+ end
+ }
+ end
render json: @object.as_api_response(:with_data)
end
end
end
end
-
end
--- /dev/null
+class Arvados::V1::KeepServicesController < ApplicationController
+
+ skip_before_filter :find_object_by_uuid, only: :accessible
+ skip_before_filter :render_404_if_no_object, only: :accessible
+
+ def find_objects_for_index
+ # all users can list all keep services
+ @objects = model_class.where('1=1')
+ super
+ end
+
+ def accessible
+ if request.headers['X-External-Client'] == '1'
+ @objects = model_class.where('service_type=?', 'proxy')
+ else
+ @objects = model_class.where('service_type=?', 'disk')
+ end
+ render_list
+ end
+
+end
class ApiClient < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
has_many :api_client_authorizations
end
def scopes_allow_request?(request)
- scopes_allow? [request.method, request.path].join(' ')
+ scopes_allow? [request.request_method, request.path].join(' ')
end
def logged_attributes
-require 'assign_uuid'
+require 'has_uuid'
+
class ArvadosModel < ActiveRecord::Base
self.abstract_class = true
before_save :ensure_ownership_path_leads_to_user
before_destroy :ensure_owner_uuid_is_permitted
before_destroy :ensure_permission_to_destroy
-
before_create :update_modified_by_fields
before_update :maybe_update_modified_by_fields
after_create :log_create
# Note: This only returns permission links. It does not account for
# permissions obtained via user.is_admin or
# user.uuid==object.owner_uuid.
- has_many :permissions, :foreign_key => :head_uuid, :class_name => 'Link', :primary_key => :uuid, :conditions => "link_class = 'permission'"
+ has_many :permissions, :foreign_key => :head_uuid, :class_name => 'Link', :primary_key => :uuid, :conditions => "link_class = 'permission'", dependent: :destroy
class PermissionDeniedError < StandardError
def http_status
def ensure_owner_uuid_is_permitted
raise PermissionDeniedError if !current_user
- self.owner_uuid ||= current_user.uuid
+ if respond_to? :owner_uuid=
+ self.owner_uuid ||= current_user.uuid
+ end
if self.owner_uuid_changed?
if current_user.uuid == self.owner_uuid or
current_user.can? write: self.owner_uuid
class AuthorizedKey < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
before_create :permission_to_set_authorized_user_uuid
class Collection < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
+require 'can_be_an_owner'
+
class Group < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
+ include CanBeAnOwner
api_accessible :user, extend: :common do |t|
t.add :name
class Human < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :properties, Hash
class Job < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :script_parameters, Hash
class JobTask < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :parameters, Hash
class KeepDisk < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
before_validation :ensure_ping_secret
t.add :service_host
t.add :service_port
t.add :service_ssl_flag
+ t.add :keep_service_uuid
end
api_accessible :superuser, :extend => :user do |t|
t.add :ping_secret
@bypass_arvados_authorization = true
self.update_attributes!(o.select { |k,v|
- [:service_host,
- :service_port,
- :service_ssl_flag,
- :bytes_total,
+ [:bytes_total,
:bytes_free,
:is_readable,
:is_writable,
}.merge(last_ping_at: Time.now))
end
+ def service_host
+ KeepService.find_by_uuid(self.keep_service_uuid).andand.service_host
+ end
+
+ def service_port
+ KeepService.find_by_uuid(self.keep_service_uuid).andand.service_port
+ end
+
+ def service_ssl_flag
+ KeepService.find_by_uuid(self.keep_service_uuid).andand.service_ssl_flag
+ end
+
protected
def ensure_ping_secret
--- /dev/null
+class KeepService < ArvadosModel
+ include HasUuid
+ include KindAndEtag
+ include CommonApiTemplate
+
+ api_accessible :user, extend: :common do |t|
+ t.add :service_host
+ t.add :service_port
+ t.add :service_ssl_flag
+ t.add :service_type
+ end
+ api_accessible :superuser, :extend => :user do |t|
+ end
+
+end
class Link < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :properties, Hash
--- /dev/null
+# A Locator is used to parse and manipulate Keep locator strings.
+#
+# Locators obey the following syntax:
+#
+# locator ::= address hint*
+# address ::= digest size-hint
+# digest ::= <32 hexadecimal digits>
+# size-hint ::= "+" [0-9]+
+# hint ::= "+" hint-type hint-content
+# hint-type ::= [A-Z]
+# hint-content ::= [A-Za-z0-9@_-]+
+#
+# Individual hints may have their own required format:
+#
+# sign-hint ::= "+A" <40 lowercase hex digits> "@" sign-timestamp
+# sign-timestamp ::= <8 lowercase hex digits>
+
+class Locator
+ def initialize(hasharg, sizearg, hintarg)
+ @hash = hasharg
+ @size = sizearg
+ @hints = hintarg
+ end
+
+ # Locator.parse returns a Locator object parsed from the string tok.
+ # Returns nil if tok could not be parsed as a valid locator.
+ def self.parse(tok)
+ begin
+ Locator.parse!(tok)
+ rescue ArgumentError => e
+ nil
+ end
+ end
+
+ # Locator.parse! returns a Locator object parsed from the string tok,
+ # raising an ArgumentError if tok cannot be parsed.
+ def self.parse!(tok)
+ m = /^([[:xdigit:]]{32})(\+([[:digit:]]+))?(\+([[:upper:]][[:alnum:]+@_-]*))?$/.match(tok.strip)
+ unless m
+ raise ArgumentError.new "could not parse #{tok}"
+ end
+
+ tokhash, _, toksize, _, trailer = m[1..5]
+ tokhints = []
+ if trailer
+ trailer.split('+').each do |hint|
+ if hint =~ /^[[:upper:]][[:alnum:]@_-]+$/
+ tokhints.push(hint)
+ else
+ raise ArgumentError.new "unknown hint #{hint}"
+ end
+ end
+ end
+
+ Locator.new(tokhash, toksize, tokhints)
+ end
+
+ # Returns the signature hint supplied with this locator,
+ # or nil if the locator was not signed.
+ def signature
+ @hints.grep(/^A/).first
+ end
+
+ # Returns an unsigned Locator.
+ def without_signature
+ Locator.new(@hash, @size, @hints.reject { |o| o.start_with?("A") })
+ end
+
+ def hash
+ @hash
+ end
+
+ def size
+ @size
+ end
+
+ def hints
+ @hints
+ end
+
+ def to_s
+ [ @hash, @size, *@hints ].join('+')
+ end
+end
class Log < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :properties, Hash
class Node < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :info, Hash
class PipelineInstance < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :components, Hash
return false
end
elsif 'success'.in? changed_attributes
+ logger.info "pipeline_instance changed_attributes has success for #{self.uuid}"
if self.success
self.active = false
self.state = Complete
self.state = Failed
end
elsif 'active'.in? changed_attributes
+ logger.info "pipeline_instance changed_attributes has active for #{self.uuid}"
if self.active
if self.state.in? [New, Ready, Paused]
self.state = RunningOnServer
end
if new_record? or 'components'.in? changed_attributes
- state ||= New
- if state == New and self.components_look_ready?
+ self.state ||= New
+ if self.state == New and self.components_look_ready?
self.state = Ready
end
end
class PipelineTemplate < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :components, Hash
class Repository < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
class Specimen < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :properties, Hash
class Trait < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
serialize :properties, Hash
+require 'can_be_an_owner'
+
class User < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
+ include CanBeAnOwner
+
serialize :prefs, Hash
has_many :api_client_authorizations
before_update :prevent_privilege_escalation
class VirtualMachine < ArvadosModel
- include AssignUuid
+ include HasUuid
include KindAndEtag
include CommonApiTemplate
common:
secret_token: ~
+ blob_signing_key: ~
uuid_prefix: <%= Digest::MD5.hexdigest(`hostname`).to_i(16).to_s(36)[0..4] %>
# Git repositories must be readable by api server, or you won't be
# configuration variable so that the primary server can give out the correct
# address of the dedicated websocket server:
#websocket_address: wss://127.0.0.1:3333/websocket
+
+ # Amount of time (in seconds) for which a blob permission signature
+ # remains valid. Default: 2 weeks (1209600 seconds)
+ blob_signing_ttl: 1209600
# 5. Section in application.default.yml called "common"
development:
+ # The blob_signing_key is a string of alphanumeric characters used
+ # to sign permission hints for Keep locators. It must be identical
+ # to the permission key given to Keep. If you run both apiserver
+ # and Keep in development, change this to a hardcoded string and
+ # make sure both systems use the same value.
+ blob_signing_key: ~
production:
# At minimum, you need a nice long randomly generated secret_token here.
+ # Use a long string of alphanumeric characters (at least 36).
secret_token: ~
+ # blob_signing_key is required and must be identical to the
+ # permission secret provisioned to Keep.
+ # Use a long string of alphanumeric characters (at least 36).
+ blob_signing_key: ~
+
uuid_prefix: bogus
# compute_node_domain: example.org
common:
#git_repositories_dir: /var/cache/git
#git_internal_dir: /var/cache/arvados/internal.git
+
+++ /dev/null
-require 'assign_uuid'
+++ /dev/null
-# Be sure to restart your server when you modify this file.
-
-# Your secret key for verifying the integrity of signed cookies.
-# If you change this key, all old signed cookies will become invalid!
-# Make sure the secret is at least 30 characters and all random,
-# no regular words or you'll be exposed to dictionary attacks.
-Server::Application.config.secret_token = 'a107d661bc696fd1263e92c76e7e88d8fa44b6a9793e8f56ccfb23f17cfc95ea8894e28ed7dd132a3a6069673961fb1bf32edd7f8a94c8e88d8a7047bfacdde2'
resources :keep_disks do
post 'ping', on: :collection
end
+ resources :keep_services do
+ get 'accessible', on: :collection
+ end
resources :links
resources :logs
resources :nodes do
add_column :pipeline_instances, :components_summary, :text
end
+ PipelineInstance.reset_column_information
+
act_as_system_user do
PipelineInstance.all.each do |pi|
pi.state = PipelineInstance::New
--- /dev/null
+class CreateKeepServices < ActiveRecord::Migration
+ include CurrentApiClient
+
+ def change
+ act_as_system_user do
+ create_table :keep_services do |t|
+ t.string :uuid, :null => false
+ t.string :owner_uuid, :null => false
+ t.string :modified_by_client_uuid
+ t.string :modified_by_user_uuid
+ t.datetime :modified_at
+ t.string :service_host
+ t.integer :service_port
+ t.boolean :service_ssl_flag
+ t.string :service_type
+
+ t.timestamps
+ end
+ add_index :keep_services, :uuid, :unique => true
+
+ add_column :keep_disks, :keep_service_uuid, :string
+
+ KeepDisk.reset_column_information
+
+ services = {}
+
+ KeepDisk.find_each do |k|
+ services["#{k[:service_host]}_#{k[:service_port]}_#{k[:service_ssl_flag]}"] = {
+ service_host: k[:service_host],
+ service_port: k[:service_port],
+ service_ssl_flag: k[:service_ssl_flag],
+ service_type: 'disk',
+ owner_uuid: k[:owner_uuid]
+ }
+ end
+
+ services.each do |k, v|
+ v['uuid'] = KeepService.create(v).uuid
+ end
+
+ KeepDisk.find_each do |k|
+ k.keep_service_uuid = services["#{k[:service_host]}_#{k[:service_port]}_#{k[:service_ssl_flag]}"]['uuid']
+ k.save
+ end
+
+ remove_column :keep_disks, :service_host
+ remove_column :keep_disks, :service_port
+ remove_column :keep_disks, :service_ssl_flag
+ end
+ end
+end
#
# It's strongly recommended to check this file into your version control system.
-ActiveRecord::Schema.define(:version => 20140501165548) do
+ActiveRecord::Schema.define(:version => 20140519205916) do
t.datetime "last_ping_at"
t.datetime "created_at", :null => false
t.datetime "updated_at", :null => false
- t.string "service_host"
- t.integer "service_port"
- t.boolean "service_ssl_flag"
+ t.string "keep_service_uuid"
end
add_index "keep_disks", ["filesystem_uuid"], :name => "index_keep_disks_on_filesystem_uuid"
add_index "keep_disks", ["last_ping_at"], :name => "index_keep_disks_on_last_ping_at"
add_index "keep_disks", ["node_uuid"], :name => "index_keep_disks_on_node_uuid"
- add_index "keep_disks", ["service_host", "service_port", "last_ping_at"], :name => "keep_disks_service_host_port_ping_at_index"
add_index "keep_disks", ["uuid"], :name => "index_keep_disks_on_uuid", :unique => true
+ create_table "keep_services", :force => true do |t|
+ t.string "uuid", :null => false
+ t.string "owner_uuid", :null => false
+ t.string "modified_by_client_uuid"
+ t.string "modified_by_user_uuid"
+ t.datetime "modified_at"
+ t.string "service_host"
+ t.integer "service_port"
+ t.boolean "service_ssl_flag"
+ t.string "service_type"
+ t.datetime "created_at", :null => false
+ t.datetime "updated_at", :null => false
+ end
+
+ add_index "keep_services", ["uuid"], :name => "index_keep_services_on_uuid", :unique => true
+
create_table "links", :force => true do |t|
t.string "uuid"
t.string "owner_uuid"
+++ /dev/null
-module AssignUuid
-
- def self.included(base)
- base.extend(ClassMethods)
- base.before_create :assign_uuid
- end
-
- module ClassMethods
- def uuid_prefix
- Digest::MD5.hexdigest(self.to_s).to_i(16).to_s(36)[-5..-1]
- end
- def generate_uuid
- [Server::Application.config.uuid_prefix,
- self.uuid_prefix,
- rand(2**256).to_s(36)[-15..-1]].
- join '-'
- end
- end
-
- protected
-
- def respond_to_uuid?
- self.respond_to? :uuid
- end
-
- def assign_uuid
- return true if !self.respond_to_uuid?
- return true if uuid and current_user and current_user.is_admin
- self.uuid = self.class.generate_uuid
- end
-end
--- /dev/null
+# Protect referential integrity of owner_uuid columns in other tables
+# that can refer to the uuid column in this table.
+
+module CanBeAnOwner
+
+ def self.included(base)
+ # Rails' "has_many" can prevent us from destroying the owner
+ # record when other objects refer to it.
+ ActiveRecord::Base.connection.tables.each do |t|
+ next if t == base.table_name
+ next if t == 'schema_migrations'
+ klass = t.classify.constantize
+ next unless klass and 'owner_uuid'.in?(klass.columns.collect(&:name))
+ base.has_many(t.to_sym,
+ foreign_key: :owner_uuid,
+ primary_key: :uuid,
+ dependent: :restrict)
+ end
+ # We need custom protection for changing an owner's primary
+ # key. (Apart from this restriction, admins are allowed to change
+ # UUIDs.)
+ base.validate :restrict_uuid_change_breaking_associations
+ end
+
+ protected
+
+ def restrict_uuid_change_breaking_associations
+ return true if new_record? or not uuid_changed?
+
+ # Check for objects that have my old uuid listed as their owner.
+ self.class.reflect_on_all_associations(:has_many).each do |assoc|
+ next unless assoc.foreign_key == :owner_uuid
+ if assoc.klass.where(owner_uuid: uuid_was).any?
+ errors.add(:uuid,
+ "cannot be changed on a #{self.class} that owns objects")
+ return false
+ end
+ end
+
+ # if I owned myself before, I'll just continue to own myself with
+ # my new uuid.
+ if owner_uuid == uuid_was
+ self.owner_uuid = uuid
+ end
+ end
+
+end
--- /dev/null
+module HasUuid
+
+ def self.included(base)
+ base.extend(ClassMethods)
+ base.before_create :assign_uuid
+ base.before_destroy :destroy_permission_links
+ base.has_many :links_via_head, class_name: 'Link', foreign_key: :head_uuid, primary_key: :uuid, conditions: "not (link_class = 'permission')", dependent: :restrict
+ base.has_many :links_via_tail, class_name: 'Link', foreign_key: :tail_uuid, primary_key: :uuid, conditions: "not (link_class = 'permission')", dependent: :restrict
+ end
+
+ module ClassMethods
+ def uuid_prefix
+ Digest::MD5.hexdigest(self.to_s).to_i(16).to_s(36)[-5..-1]
+ end
+ def generate_uuid
+ [Server::Application.config.uuid_prefix,
+ self.uuid_prefix,
+ rand(2**256).to_s(36)[-15..-1]].
+ join '-'
+ end
+ end
+
+ protected
+
+ def respond_to_uuid?
+ self.respond_to? :uuid
+ end
+
+ def assign_uuid
+ return true if !self.respond_to_uuid?
+ if (uuid.is_a?(String) and uuid.length>0 and
+ current_user and current_user.is_admin)
+ return true
+ end
+ self.uuid = self.class.generate_uuid
+ end
+
+ def destroy_permission_links
+ Link.destroy_all(['link_class=? and (head_uuid=? or tail_uuid=?)',
+ 'permission', uuid, uuid])
+ end
+end
if operand.is_a? Array
cond_out << "#{ar_table_name}.#{attr} #{operator} (?)"
param_out << operand
+ if operator == 'not in' and not operand.include?(nil)
+ # explicitly allow NULL
+ cond_out[-1] = "(#{cond_out[-1]} OR #{ar_table_name}.#{attr} IS NULL)"
+ end
else
raise ArgumentError.new("Invalid operand type '#{operand.class}' "\
"for '#{operator}' operator in filters")
j_done[:wait_thr].value
jobrecord = Job.find_by_uuid(job_done.uuid)
- jobrecord.running = false
- jobrecord.finished_at ||= Time.now
- # Don't set 'jobrecord.success = false' because if the job failed to run due to an
- # issue with crunch-job or slurm, we want the job to stay in the queue.
- jobrecord.save!
+ if jobrecord.started_at
+ # Clean up state fields in case crunch-job exited without
+ # putting the job in a suitable "finished" state.
+ jobrecord.running = false
+ jobrecord.finished_at ||= Time.now
+ if jobrecord.success.nil?
+ jobrecord.success = false
+ end
+ jobrecord.save!
+ else
+ # Don't fail the job if crunch-job didn't even get as far as
+ # starting it. If the job failed to run due to an infrastructure
+ # issue with crunch-job or slurm, we want the job to stay in the
+ # queue.
+ end
# Invalidate the per-job auth token
j_done[:job_auth].update_attributes expires_at: Time.now
expires_at: 2038-01-01 00:00:00
scopes: []
+active_all_collections:
+ api_client: untrusted
+ user: active
+ api_token: activecollectionsabcdefghijklmnopqrstuvwxyz1234567
+ expires_at: 2038-01-01 00:00:00
+ scopes: ["GET /arvados/v1/collections/", "GET /arvados/v1/keep_disks"]
+
active_userlist:
api_client: untrusted
user: active
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+
+multilevel_collection_1:
+ uuid: 1fd08fc162a5c6413070a8bd0bffc818+150
+ owner_uuid: qr1hi-tpzed-000000000000000
+ created_at: 2014-02-03T17:22:54Z
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ modified_at: 2014-02-03T17:22:54Z
+ updated_at: 2014-02-03T17:22:54Z
+ manifest_text: ". 0:0:file1 0:0:file2 0:0:file3\n./dir1 0:0:file1 0:0:file2 0:0:file3\n./dir1/subdir 0:0:file1 0:0:file2 0:0:file3\n./dir2 0:0:file1 0:0:file2 0:0:file3\n"
+
+multilevel_collection_2:
+ # All of this collection's files are deep in subdirectories.
+ uuid: 80cf6dd2cf079dd13f272ec4245cb4a8+48
+ owner_uuid: qr1hi-tpzed-000000000000000
+ created_at: 2014-02-03T17:22:54Z
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ modified_at: 2014-02-03T17:22:54Z
+ updated_at: 2014-02-03T17:22:54Z
+ manifest_text: "./dir1/sub1 0:0:a 0:0:b\n./dir2/sub2 0:0:c 0:0:d\n"
success: true
output: fa7aeb5140e2848d39b416daeef4ffc5+45
priority: ~
- log: d41d8cd98f00b204e9800998ecf8427e+0
+ log: ea10d51bcf88862dbcc36eb292017dfd+45
is_locked_by_uuid: ~
tasks_summary:
failed: 0
uuid: zzzzz-penuu-5w2o2t1q5wy7fhn
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
node_uuid: zzzzz-7ekkf-53y36l1lu5ijveb
- service_host: keep0.qr1hi.arvadosapi.com
- service_port: 25107
- service_ssl_flag: false
+ keep_service_uuid: zzzzz-bi6l4-6zhilxar6r8ey90
last_read_at: <%= 1.minute.ago.to_s(:db) %>
last_write_at: <%= 2.minute.ago.to_s(:db) %>
last_ping_at: <%= 3.minute.ago.to_s(:db) %>
uuid: zzzzz-penuu-4kmq58ui07xuftx
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
node_uuid: zzzzz-7ekkf-53y36l1lu5ijveb
- service_host: keep0.qr1hi.arvadosapi.com
- service_port: 25107
- service_ssl_flag: false
+ keep_service_uuid: zzzzz-bi6l4-6zhilxar6r8ey90
last_read_at: <%= 1.minute.ago.to_s(:db) %>
last_write_at: <%= 2.day.ago.to_s(:db) %>
last_ping_at: <%= 3.minute.ago.to_s(:db) %>
uuid: zzzzz-penuu-1ydrih9k2er5j11
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
node_uuid: zzzzz-7ekkf-2z3mc76g2q73aio
- service_host: keep1.qr1hi.arvadosapi.com
- service_port: 25107
- service_ssl_flag: false
+ keep_service_uuid: zzzzz-bi6l4-rsnj3c76ndxb7o0
last_read_at: <%= 1.minute.ago.to_s(:db) %>
last_write_at: <%= 2.minute.ago.to_s(:db) %>
last_ping_at: <%= 3.minute.ago.to_s(:db) %>
--- /dev/null
+keep0:
+ uuid: zzzzz-bi6l4-6zhilxar6r8ey90
+ owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ service_host: keep0.qr1hi.arvadosapi.com
+ service_port: 25107
+ service_ssl_flag: false
+ service_type: disk
+
+keep1:
+ uuid: zzzzz-bi6l4-rsnj3c76ndxb7o0
+ owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ service_host: keep1.qr1hi.arvadosapi.com
+ service_port: 25107
+ service_ssl_flag: false
+ service_type: disk
+
+proxy:
+ uuid: zzzzz-bi6l4-h0a0xwut9qa6g3a
+ owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ service_host: keep.qr1hi.arvadosapi.com
+ service_port: 25333
+ service_ssl_flag: true
+ service_type: proxy
name: can_manage
head_uuid: zzzzz-j7d0g-cx2al9cqkmsf1hs
properties: {}
+
+multilevel_collection_1_readable_by_active:
+ uuid: zzzzz-o0j2j-dp1d8395ldqw22j
+ owner_uuid: zzzzz-tpzed-000000000000000
+ created_at: 2014-01-24 20:42:26 -0800
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2014-01-24 20:42:26 -0800
+ updated_at: 2014-01-24 20:42:26 -0800
+ tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ link_class: permission
+ name: can_read
+ head_uuid: 1fd08fc162a5c6413070a8bd0bffc818+150
+ properties: {}
id: 1
uuid: zzzzz-xxxxx-pshmckwoma9plh7
object_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+ event_at: <%= 1.minute.ago.to_s(:db) %>
log2: # admin changes repository2, which is owned by active user
id: 2
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
object_uuid: zzzzz-2x53u-382brsig8rp3667 # repository foo
object_owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+ event_at: <%= 2.minute.ago.to_s(:db) %>
log3: # admin changes specimen owned_by_spectator
id: 3
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
object_uuid: zzzzz-2x53u-3b0xxwzlbzxq5yr # specimen owned_by_spectator
object_owner_uuid: zzzzz-tpzed-l1s2piq4t4mps8r # spectator user
+ event_at: <%= 3.minute.ago.to_s(:db) %>
log4: # foo collection added, readable by active through link
id: 4
owner_uuid: zzzzz-tpzed-000000000000000 # system user
object_uuid: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45 # foo file
object_owner_uuid: zzzzz-tpzed-000000000000000 # system user
+ event_at: <%= 4.minute.ago.to_s(:db) %>
log5: # baz collection added, readable by active and spectator through group 'all users' group membership
id: 5
owner_uuid: zzzzz-tpzed-000000000000000 # system user
object_uuid: ea10d51bcf88862dbcc36eb292017dfd+45 # baz file
object_owner_uuid: zzzzz-tpzed-000000000000000 # system user
+ event_at: <%= 5.minute.ago.to_s(:db) %>
assert_equal true, !!found.index('1f4b0bc7583c2a7f9102c395f4ffc5e3+45')
end
+ test "create collection with signed manifest" do
+ authorize_with :active
+ locators = %w(
+ d41d8cd98f00b204e9800998ecf8427e+0
+ acbd18db4cc2f85cedef654fccc4a4d8+3
+ ea10d51bcf88862dbcc36eb292017dfd+45)
+
+ unsigned_manifest = locators.map { |loc|
+ ". " + loc + " 0:0:foo.txt\n"
+ }.join()
+ manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +
+ '+' +
+ unsigned_manifest.length.to_s
+
+ # build a manifest with both signed and unsigned locators.
+ # TODO(twp): in phase 4, all locators will need to be signed, so
+ # this test should break and will need to be rewritten. Issue #2755.
+ signing_opts = {
+ key: Rails.configuration.blob_signing_key,
+ api_token: api_token(:active),
+ }
+ signed_manifest =
+ ". " + locators[0] + " 0:0:foo.txt\n" +
+ ". " + Blob.sign_locator(locators[1], signing_opts) + " 0:0:foo.txt\n" +
+ ". " + Blob.sign_locator(locators[2], signing_opts) + " 0:0:foo.txt\n"
+
+ post :create, {
+ collection: {
+ manifest_text: signed_manifest,
+ uuid: manifest_uuid,
+ }
+ }
+ assert_response :success
+ assert_not_nil assigns(:object)
+ resp = JSON.parse(@response.body)
+ assert_equal manifest_uuid, resp['uuid']
+ assert_equal 48, resp['data_size']
+ # All of the locators in the output must be signed.
+ resp['manifest_text'].lines.each do |entry|
+ m = /([[:xdigit:]]{32}\+\S+)/.match(entry)
+ if m
+ assert Blob.verify_signature m[0], signing_opts
+ end
+ end
+ end
+
+ test "create collection with signed manifest and explicit TTL" do
+ authorize_with :active
+ locators = %w(
+ d41d8cd98f00b204e9800998ecf8427e+0
+ acbd18db4cc2f85cedef654fccc4a4d8+3
+ ea10d51bcf88862dbcc36eb292017dfd+45)
+
+ unsigned_manifest = locators.map { |loc|
+ ". " + loc + " 0:0:foo.txt\n"
+ }.join()
+ manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +
+ '+' +
+ unsigned_manifest.length.to_s
+
+ # build a manifest with both signed and unsigned locators.
+ # TODO(twp): in phase 4, all locators will need to be signed, so
+ # this test should break and will need to be rewritten. Issue #2755.
+ signing_opts = {
+ key: Rails.configuration.blob_signing_key,
+ api_token: api_token(:active),
+ ttl: 3600 # 1 hour
+ }
+ signed_manifest =
+ ". " + locators[0] + " 0:0:foo.txt\n" +
+ ". " + Blob.sign_locator(locators[1], signing_opts) + " 0:0:foo.txt\n" +
+ ". " + Blob.sign_locator(locators[2], signing_opts) + " 0:0:foo.txt\n"
+
+ post :create, {
+ collection: {
+ manifest_text: signed_manifest,
+ uuid: manifest_uuid,
+ }
+ }
+ assert_response :success
+ assert_not_nil assigns(:object)
+ resp = JSON.parse(@response.body)
+ assert_equal manifest_uuid, resp['uuid']
+ assert_equal 48, resp['data_size']
+ # All of the locators in the output must be signed.
+ resp['manifest_text'].lines.each do |entry|
+ m = /([[:xdigit:]]{32}\+\S+)/.match(entry)
+ if m
+ assert Blob.verify_signature m[0], signing_opts
+ end
+ end
+ end
+
+ test "create fails with invalid signature" do
+ authorize_with :active
+ signing_opts = {
+ key: Rails.configuration.blob_signing_key,
+ api_token: api_token(:active),
+ }
+
+ # Generate a locator with a bad signature.
+ unsigned_locator = "d41d8cd98f00b204e9800998ecf8427e+0"
+ bad_locator = unsigned_locator + "+Affffffff@ffffffff"
+ assert !Blob.verify_signature(bad_locator, signing_opts)
+
+ # Creating a collection with this locator should
+ # produce 403 Permission denied.
+ unsigned_manifest = ". #{unsigned_locator} 0:0:foo.txt\n"
+ manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +
+ '+' +
+ unsigned_manifest.length.to_s
+
+ bad_manifest = ". #{bad_locator} 0:0:foo.txt\n"
+ post :create, {
+ collection: {
+ manifest_text: bad_manifest,
+ uuid: manifest_uuid
+ }
+ }
+
+ assert_response 403
+ end
+
+ test "create fails with uuid of signed manifest" do
+ authorize_with :active
+ signing_opts = {
+ key: Rails.configuration.blob_signing_key,
+ api_token: api_token(:active),
+ }
+
+ unsigned_locator = "d41d8cd98f00b204e9800998ecf8427e+0"
+ signed_locator = Blob.sign_locator(unsigned_locator, signing_opts)
+ signed_manifest = ". #{signed_locator} 0:0:foo.txt\n"
+ manifest_uuid = Digest::MD5.hexdigest(signed_manifest) +
+ '+' +
+ signed_manifest.length.to_s
+
+ post :create, {
+ collection: {
+ manifest_text: signed_manifest,
+ uuid: manifest_uuid
+ }
+ }
+
+ assert_response 422
+ end
+
+ test "multiple locators per line" do
+ authorize_with :active
+ locators = %w(
+ d41d8cd98f00b204e9800998ecf8427e+0
+ acbd18db4cc2f85cedef654fccc4a4d8+3
+ ea10d51bcf88862dbcc36eb292017dfd+45)
+
+ manifest_text = [".", *locators, "0:0:foo.txt\n"].join(" ")
+ manifest_uuid = Digest::MD5.hexdigest(manifest_text) +
+ '+' +
+ manifest_text.length.to_s
+
+ post :create, {
+ collection: {
+ manifest_text: manifest_text,
+ uuid: manifest_uuid,
+ }
+ }
+ assert_response :success
+ assert_not_nil assigns(:object)
+ resp = JSON.parse(@response.body)
+ assert_equal manifest_uuid, resp['uuid']
+ assert_equal 48, resp['data_size']
+ assert_equal resp['manifest_text'], manifest_text
+ end
+
+ test "multiple signed locators per line" do
+ authorize_with :active
+ locators = %w(
+ d41d8cd98f00b204e9800998ecf8427e+0
+ acbd18db4cc2f85cedef654fccc4a4d8+3
+ ea10d51bcf88862dbcc36eb292017dfd+45)
+
+ signing_opts = {
+ key: Rails.configuration.blob_signing_key,
+ api_token: api_token(:active),
+ }
+
+ unsigned_manifest = [".", *locators, "0:0:foo.txt\n"].join(" ")
+ manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +
+ '+' +
+ unsigned_manifest.length.to_s
+
+ signed_locators = locators.map { |loc| Blob.sign_locator loc, signing_opts }
+ signed_manifest = [".", *signed_locators, "0:0:foo.txt\n"].join(" ")
+
+ post :create, {
+ collection: {
+ manifest_text: signed_manifest,
+ uuid: manifest_uuid,
+ }
+ }
+ assert_response :success
+ assert_not_nil assigns(:object)
+ resp = JSON.parse(@response.body)
+ assert_equal manifest_uuid, resp['uuid']
+ assert_equal 48, resp['data_size']
+ # All of the locators in the output must be signed.
+ # Each line is of the form "path locator locator ... 0:0:file.txt"
+ # entry.split[1..-2] will yield just the tokens in the middle of the line
+ returned_locator_count = 0
+ resp['manifest_text'].lines.each do |entry|
+ entry.split[1..-2].each do |tok|
+ returned_locator_count += 1
+ assert Blob.verify_signature tok, signing_opts
+ end
+ end
+ assert_equal locators.count, returned_locator_count
+ end
end
--- /dev/null
+require 'test_helper'
+
+class Arvados::V1::FiltersTest < ActionController::TestCase
+ test '"not in" filter passes null values' do
+ @controller = Arvados::V1::GroupsController.new
+ authorize_with :admin
+ get :index, {
+ filters: [ ['group_class', 'not in', ['folder']] ],
+ controller: 'groups',
+ }
+ assert_response :success
+ found = assigns(:objects)
+ assert_includes(found.collect(&:group_class), nil,
+ "'group_class not in ['folder']' filter should pass null")
+ end
+end
authorize_with :admin
post :ping, {
ping_secret: '', # required by discovery doc, but ignored
- service_host: '::1',
- service_port: 55555,
- service_ssl_flag: false,
filesystem_uuid: 'eb1e77a1-db84-4193-b6e6-ca2894f67d5f'
}
assert_response :success
authorize_with :admin
opts = {
ping_secret: '',
- service_host: '::1',
- service_port: 55555,
- service_ssl_flag: false
}
post :ping, opts
assert_response :success
test "refuse to add keep disk without admin token" do
post :ping, {
ping_secret: '',
- service_host: '::1',
- service_port: 55555,
- service_ssl_flag: false
}
assert_response 404
end
assert_response :success
items = JSON.parse(@response.body)['items']
assert_not_equal 0, items.size
+
+ # Check these are still included
+ assert items[0]['service_host']
+ assert items[0]['service_port']
end
# active user sees non-secret attributes of keep disks
end
end
- test "search keep_disks by service_port with >= query" do
- authorize_with :active
- get :index, {
- filters: [['service_port', '>=', 25107]]
- }
- assert_response :success
- assert_equal true, assigns(:objects).any?
- end
-
- test "search keep_disks by service_port with < query" do
- authorize_with :active
- get :index, {
- filters: [['service_port', '<', 25107]]
- }
- assert_response :success
- assert_equal false, assigns(:objects).any?
- end
-
- test "search keep_disks with 'any' operator" do
+ test "search keep_services with 'any' operator" do
authorize_with :active
get :index, {
where: { any: ['contains', 'o2t1q5w'] }
assert_equal true, !!found.index('zzzzz-penuu-5w2o2t1q5wy7fhn')
end
+
end
--- /dev/null
+require 'test_helper'
+
+class Arvados::V1::KeepServicesControllerTest < ActionController::TestCase
+
+ test "search keep_services by service_port with < query" do
+ authorize_with :active
+ get :index, {
+ filters: [['service_port', '<', 25107]]
+ }
+ assert_response :success
+ assert_equal false, assigns(:objects).any?
+ end
+
+ test "search keep_disks by service_port with >= query" do
+ authorize_with :active
+ get :index, {
+ filters: [['service_port', '>=', 25107]]
+ }
+ assert_response :success
+ assert_equal true, assigns(:objects).any?
+ end
+
+end
--- /dev/null
+require 'test_helper'
+
+class KeepProxyTest < ActionDispatch::IntegrationTest
+ test "request keep disks" do
+ get "/arvados/v1/keep_services/accessible", {:format => :json}, auth(:active)
+ assert_response :success
+ services = json_response['items']
+
+ assert_equal 2, services.length
+ assert_equal 'disk', services[0]['service_type']
+ assert_equal 'disk', services[1]['service_type']
+
+ get "/arvados/v1/keep_services/accessible", {:format => :json}, auth(:active).merge({'HTTP_X_EXTERNAL_CLIENT' => '1'})
+ assert_response :success
+ services = json_response['items']
+
+ assert_equal 1, services.length
+
+ assert_equal "zzzzz-bi6l4-h0a0xwut9qa6g3a", services[0]['uuid']
+ assert_equal "keep.qr1hi.arvadosapi.com", services[0]['service_host']
+ assert_equal 25333, services[0]['service_port']
+ assert_equal true, services[0]['service_ssl_flag']
+ assert_equal 'proxy', services[0]['service_type']
+ end
+end
--- /dev/null
+require 'test_helper'
+
+class ArvadosModelTest < ActiveSupport::TestCase
+ fixtures :all
+
+ def create_with_attrs attrs
+ a = Specimen.create({material: 'caloric'}.merge(attrs))
+ a if a.valid?
+ end
+
+ test 'non-admin cannot assign uuid' do
+ set_user_from_auth :active_trustedclient
+ want_uuid = Specimen.generate_uuid
+ a = create_with_attrs(uuid: want_uuid)
+ assert_not_equal want_uuid, a.uuid, "Non-admin should not assign uuid."
+ assert a.uuid.length==27, "Auto assigned uuid length is wrong."
+ end
+
+ test 'admin can assign valid uuid' do
+ set_user_from_auth :admin_trustedclient
+ want_uuid = Specimen.generate_uuid
+ a = create_with_attrs(uuid: want_uuid)
+ assert_equal want_uuid, a.uuid, "Admin should assign valid uuid."
+ assert a.uuid.length==27, "Auto assigned uuid length is wrong."
+ end
+
+ test 'admin cannot assign empty uuid' do
+ set_user_from_auth :admin_trustedclient
+ a = create_with_attrs(uuid: "")
+ assert_not_equal "", a.uuid, "Admin should not assign empty uuid."
+ assert a.uuid.length==27, "Auto assigned uuid length is wrong."
+ end
+
+end
--- /dev/null
+require 'test_helper'
+
+class KeepServiceTest < ActiveSupport::TestCase
+ # test "the truth" do
+ # assert true
+ # end
+end
fixtures :all
setup do
- Thread.current[:user] = users(:active)
+ set_user_from_auth :admin_trustedclient
end
test 'name links with the same tail_uuid must be unique' do
assert a.invalid?, "invalid name was accepted as valid?"
end
end
+
+ test "cannot delete an object referenced by links" do
+ ob = Specimen.create
+ link = Link.create(tail_uuid: users(:active).uuid,
+ head_uuid: ob.uuid,
+ link_class: 'test',
+ name: 'test')
+ assert_raises(ActiveRecord::DeleteRestrictionError,
+ "should not delete #{ob.uuid} with link #{link.uuid}") do
+ ob.destroy
+ end
+ end
end
--- /dev/null
+require 'test_helper'
+
+# Test referential integrity: ensure we cannot leave any object
+# without owners by deleting a user or group.
+#
+# "o" is an owner.
+# "i" is an item.
+
+class OwnerTest < ActiveSupport::TestCase
+ fixtures :users, :groups, :specimens
+
+ setup do
+ set_user_from_auth :admin_trustedclient
+ end
+
+ User.all
+ Group.all
+ [User, Group].each do |o_class|
+ test "create object with legit #{o_class} owner" do
+ o = o_class.create
+ i = Specimen.create(owner_uuid: o.uuid)
+ assert i.valid?, "new item should pass validation"
+ assert i.uuid, "new item should have an ID"
+ assert Specimen.where(uuid: i.uuid).any?, "new item should really be in DB"
+ end
+
+ test "create object with non-existent #{o_class} owner" do
+ assert_raises(ActiveRecord::RecordInvalid,
+ "create should fail with random owner_uuid") do
+ i = Specimen.create!(owner_uuid: o_class.generate_uuid)
+ end
+
+ i = Specimen.create(owner_uuid: o_class.generate_uuid)
+ assert !i.valid?, "object with random owner_uuid should not be valid?"
+
+ i = Specimen.new(owner_uuid: o_class.generate_uuid)
+ assert !i.valid?, "new item should not pass validation"
+ assert !i.uuid, "new item should not have an ID"
+ end
+
+ [User, Group].each do |new_o_class|
+ test "change owner from legit #{o_class} to legit #{new_o_class} owner" do
+ o = o_class.create
+ i = Specimen.create(owner_uuid: o.uuid)
+ new_o = new_o_class.create
+ assert(Specimen.where(uuid: i.uuid).any?,
+ "new item should really be in DB")
+ assert(i.update_attributes(owner_uuid: new_o.uuid),
+ "should change owner_uuid from #{o.uuid} to #{new_o.uuid}")
+ end
+ end
+
+ test "delete #{o_class} that owns nothing" do
+ o = o_class.create
+ assert(o_class.where(uuid: o.uuid).any?,
+ "new #{o_class} should really be in DB")
+ assert(o.destroy, "should delete #{o_class} that owns nothing")
+ assert_equal(false, o_class.where(uuid: o.uuid).any?,
+ "#{o.uuid} should not be in DB after deleting")
+ end
+
+ test "change uuid of #{o_class} that owns nothing" do
+ # (we're relying on our admin credentials here)
+ o = o_class.create
+ assert(o_class.where(uuid: o.uuid).any?,
+ "new #{o_class} should really be in DB")
+ old_uuid = o.uuid
+ new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])
+ assert(o.update_attributes(uuid: new_uuid),
+ "should change #{o_class} uuid from #{old_uuid} to #{new_uuid}")
+ assert_equal(false, o_class.where(uuid: old_uuid).any?,
+ "#{old_uuid} should disappear when renamed to #{new_uuid}")
+ end
+ end
+
+ ['users(:active)', 'groups(:afolder)'].each do |ofixt|
+ test "delete #{ofixt} that owns other objects" do
+ o = eval ofixt
+ assert_equal(true, Specimen.where(owner_uuid: o.uuid).any?,
+ "need something to be owned by #{o.uuid} for this test")
+
+ assert_raises(ActiveRecord::DeleteRestrictionError,
+ "should not delete #{ofixt} that owns objects") do
+ o.destroy
+ end
+ end
+
+ test "change uuid of #{ofixt} that owns other objects" do
+ o = eval ofixt
+ assert_equal(true, Specimen.where(owner_uuid: o.uuid).any?,
+ "need something to be owned by #{o.uuid} for this test")
+ old_uuid = o.uuid
+ new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])
+ assert(!o.update_attributes(uuid: new_uuid),
+ "should not change uuid of #{ofixt} that owns objects")
+ end
+ end
+
+ test "delete User that owns self" do
+ o = User.create
+ assert User.where(uuid: o.uuid).any?, "new User should really be in DB"
+ assert_equal(true, o.update_attributes(owner_uuid: o.uuid),
+ "setting owner to self should work")
+ assert(o.destroy, "should delete User that owns self")
+ assert_equal(false, User.where(uuid: o.uuid).any?,
+ "#{o.uuid} should not be in DB after deleting")
+ end
+
+ test "change uuid of User that owns self" do
+ o = User.create
+ assert User.where(uuid: o.uuid).any?, "new User should really be in DB"
+ assert_equal(true, o.update_attributes(owner_uuid: o.uuid),
+ "setting owner to self should work")
+ old_uuid = o.uuid
+ new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])
+ assert(o.update_attributes(uuid: new_uuid),
+ "should change uuid of User that owns self")
+ assert_equal(false, User.where(uuid: old_uuid).any?,
+ "#{old_uuid} should not be in DB after deleting")
+ assert_equal(true, User.where(uuid: new_uuid).any?,
+ "#{new_uuid} should be in DB after renaming")
+ assert_equal(new_uuid, User.where(uuid: new_uuid).first.owner_uuid,
+ "#{new_uuid} should be its own owner in DB after renaming")
+ end
+
+end
--- /dev/null
+#! /usr/bin/env python
+
+import arvados
+
+import argparse
+import cgi
+import csv
+import json
+import logging
+import math
+import pprint
+import re
+import threading
+import urllib2
+
+from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
+from collections import defaultdict, Counter
+from functools import partial
+from operator import itemgetter
+from SocketServer import ThreadingMixIn
+
+arv = arvados.api('v1')
+
+# Adapted from http://stackoverflow.com/questions/4180980/formatting-data-quantity-capacity-as-string
+byteunits = ('B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB')
+def fileSizeFormat(value):
+ exponent = 0 if value == 0 else int(math.log(value, 1024))
+ return "%7.2f %-3s" % (float(value) / pow(1024, exponent),
+ byteunits[exponent])
+
+def percentageFloor(x):
+ """ Returns a float which is the input rounded down to the neared 0.01.
+
+e.g. precentageFloor(0.941354) = 0.94
+"""
+ return math.floor(x*100) / 100.0
+
+
+def byteSizeFromValidUuid(valid_uuid):
+ return int(valid_uuid.split('+')[1])
+
+class maxdict(dict):
+ """A dictionary that holds the largest value entered for each key."""
+ def addValue(self, key, value):
+ dict.__setitem__(self, key, max(dict.get(self, key), value))
+ def addValues(self, kv_pairs):
+ for key,value in kv_pairs:
+ self.addValue(key, value)
+ def addDict(self, d):
+ self.addValues(d.items())
+
+class CollectionInfo:
+ DEFAULT_PERSISTER_REPLICATION_LEVEL=2
+ all_by_uuid = {}
+
+ def __init__(self, uuid):
+ if CollectionInfo.all_by_uuid.has_key(uuid):
+ raise ValueError('Collection for uuid "%s" already exists.' % uuid)
+ self.uuid = uuid
+ self.block_uuids = set() # uuids of keep blocks in this collection
+ self.reader_uuids = set() # uuids of users who can read this collection
+ self.persister_uuids = set() # uuids of users who want this collection saved
+ # map from user uuid to replication level they desire
+ self.persister_replication = maxdict()
+
+ # The whole api response in case we need anything else later.
+ self.api_response = []
+ CollectionInfo.all_by_uuid[uuid] = self
+
+ def byteSize(self):
+ return sum(map(byteSizeFromValidUuid, self.block_uuids))
+
+ def __str__(self):
+ return ('CollectionInfo uuid: %s\n'
+ ' %d block(s) containing %s\n'
+ ' reader_uuids: %s\n'
+ ' persister_replication: %s' %
+ (self.uuid,
+ len(self.block_uuids),
+ fileSizeFormat(self.byteSize()),
+ pprint.pformat(self.reader_uuids, indent = 15),
+ pprint.pformat(self.persister_replication, indent = 15)))
+
+ @staticmethod
+ def get(uuid):
+ if not CollectionInfo.all_by_uuid.has_key(uuid):
+ CollectionInfo(uuid)
+ return CollectionInfo.all_by_uuid[uuid]
+
+
+def extractUuid(candidate):
+ """ Returns a canonical (hash+size) uuid from a valid uuid, or None if candidate is not a valid uuid."""
+ match = re.match('([0-9a-fA-F]{32}\+[0-9]+)(\+[^+]+)*$', candidate)
+ return match and match.group(1)
+
+def checkUserIsAdmin():
+ current_user = arv.users().current().execute()
+
+ if not current_user['is_admin']:
+ log.warning('Current user %s (%s - %s) does not have '
+ 'admin access and will not see much of the data.',
+ current_user['full_name'],
+ current_user['email'],
+ current_user['uuid'])
+ if args.require_admin_user:
+ log.critical('Exiting, rerun with --no-require-admin-user '
+ 'if you wish to continue.')
+ exit(1)
+
+def buildCollectionsList():
+ if args.uuid:
+ return [args.uuid,]
+ else:
+ collections_list_response = arv.collections().list(limit=args.max_api_results).execute()
+
+ print ('Returned %d of %d collections.' %
+ (len(collections_list_response['items']),
+ collections_list_response['items_available']))
+
+ return [item['uuid'] for item in collections_list_response['items']]
+
+
+def readCollections(collection_uuids):
+ for collection_uuid in collection_uuids:
+ collection_block_uuids = set()
+ collection_response = arv.collections().get(uuid=collection_uuid).execute()
+ collection_info = CollectionInfo.get(collection_uuid)
+ collection_info.api_response = collection_response
+ manifest_lines = collection_response['manifest_text'].split('\n')
+
+ if args.verbose:
+ print 'Manifest text for %s:' % collection_uuid
+ pprint.pprint(manifest_lines)
+
+ for manifest_line in manifest_lines:
+ if manifest_line:
+ manifest_tokens = manifest_line.split(' ')
+ if args.verbose:
+ print 'manifest tokens: ' + pprint.pformat(manifest_tokens)
+ stream_name = manifest_tokens[0]
+
+ line_block_uuids = set(filter(None,
+ [extractUuid(candidate)
+ for candidate in manifest_tokens[1:]]))
+ collection_info.block_uuids.update(line_block_uuids)
+
+ # file_tokens = [token
+ # for token in manifest_tokens[1:]
+ # if extractUuid(token) is None]
+
+ # # Sort file tokens by start position in case they aren't already
+ # file_tokens.sort(key=lambda file_token: int(file_token.split(':')[0]))
+
+ # if args.verbose:
+ # print 'line_block_uuids: ' + pprint.pformat(line_block_uuids)
+ # print 'file_tokens: ' + pprint.pformat(file_tokens)
+
+
+def readLinks():
+ link_classes = set()
+
+ for collection_uuid,collection_info in CollectionInfo.all_by_uuid.items():
+ # TODO(misha): We may not be seing all the links, but since items
+ # available does not return an accurate number, I don't knos how
+ # to confirm that we saw all of them.
+ collection_links_response = arv.links().list(where={'head_uuid':collection_uuid}).execute()
+ link_classes.update([link['link_class'] for link in collection_links_response['items']])
+ for link in collection_links_response['items']:
+ if link['link_class'] == 'permission':
+ collection_info.reader_uuids.add(link['tail_uuid'])
+ elif link['link_class'] == 'resources':
+ replication_level = link['properties'].get(
+ 'replication',
+ CollectionInfo.DEFAULT_PERSISTER_REPLICATION_LEVEL)
+ collection_info.persister_replication.addValue(
+ link['tail_uuid'],
+ replication_level)
+ collection_info.persister_uuids.add(link['tail_uuid'])
+
+ print 'Found the following link classes:'
+ pprint.pprint(link_classes)
+
+def reportMostPopularCollections():
+ most_popular_collections = sorted(
+ CollectionInfo.all_by_uuid.values(),
+ key=lambda info: len(info.reader_uuids) + 10 * len(info.persister_replication),
+ reverse=True)[:10]
+
+ print 'Most popular Collections:'
+ for collection_info in most_popular_collections:
+ print collection_info
+
+
+def buildMaps():
+ for collection_uuid,collection_info in CollectionInfo.all_by_uuid.items():
+ # Add the block holding the manifest itself for all calculations
+ block_uuids = collection_info.block_uuids.union([collection_uuid,])
+ for block_uuid in block_uuids:
+ block_to_collections[block_uuid].add(collection_uuid)
+ block_to_readers[block_uuid].update(collection_info.reader_uuids)
+ block_to_persisters[block_uuid].update(collection_info.persister_uuids)
+ block_to_persister_replication[block_uuid].addDict(
+ collection_info.persister_replication)
+ for reader_uuid in collection_info.reader_uuids:
+ reader_to_collections[reader_uuid].add(collection_uuid)
+ reader_to_blocks[reader_uuid].update(block_uuids)
+ for persister_uuid in collection_info.persister_uuids:
+ persister_to_collections[persister_uuid].add(collection_uuid)
+ persister_to_blocks[persister_uuid].update(block_uuids)
+
+
+def itemsByValueLength(original):
+ return sorted(original.items(),
+ key=lambda item:len(item[1]),
+ reverse=True)
+
+
+def reportBusiestUsers():
+ busiest_readers = itemsByValueLength(reader_to_collections)
+ print 'The busiest readers are:'
+ for reader,collections in busiest_readers:
+ print '%s reading %d collections.' % (reader, len(collections))
+ busiest_persisters = itemsByValueLength(persister_to_collections)
+ print 'The busiest persisters are:'
+ for persister,collections in busiest_persisters:
+ print '%s reading %d collections.' % (persister, len(collections))
+
+
+def blockDiskUsage(block_uuid):
+ """Returns the disk usage of a block given its uuid.
+
+ Will return 0 before reading the contents of the keep servers.
+ """
+ return byteSizeFromValidUuid(block_uuid) * block_to_replication[block_uuid]
+
+def blockPersistedUsage(user_uuid, block_uuid):
+ return (byteSizeFromValidUuid(block_uuid) *
+ block_to_persister_replication[block_uuid].get(user_uuid, 0))
+
+memo_computeWeightedReplicationCosts = {}
+def computeWeightedReplicationCosts(replication_levels):
+ """Computes the relative cost of varied replication levels.
+
+ replication_levels: a tuple of integers representing the desired
+ replication level. If n users want a replication level of x then x
+ should appear n times in replication_levels.
+
+ Returns a dictionary from replication level to cost.
+
+ The basic thinking is that the cost of replicating at level x should
+ be shared by everyone who wants replication of level x or higher.
+
+ For example, if we have two users who want 1 copy, one user who
+ wants 3 copies and two users who want 6 copies:
+ the input would be [1, 1, 3, 6, 6] (or any permutation)
+
+ The cost of the first copy is shared by all 5 users, so they each
+ pay 1 copy / 5 users = 0.2.
+ The cost of the second and third copies shared by 3 users, so they
+ each pay 2 copies / 3 users = 0.67 (plus the above costs)
+ The cost of the fourth, fifth and sixth copies is shared by two
+ users, so they each pay 3 copies / 2 users = 1.5 (plus the above costs)
+
+ Here are some other examples:
+ computeWeightedReplicationCosts([1,]) -> {1:1.0}
+ computeWeightedReplicationCosts([2,]) -> {2:2.0}
+ computeWeightedReplicationCosts([1,1]) -> {1:0.5}
+ computeWeightedReplicationCosts([2,2]) -> {1:1.0}
+ computeWeightedReplicationCosts([1,2]) -> {1:0.5,2:1.5}
+ computeWeightedReplicationCosts([1,3]) -> {1:0.5,2:2.5}
+ computeWeightedReplicationCosts([1,3,6,6,10]) -> {1:0.2,3:0.7,6:1.7,10:5.7}
+ """
+ replication_level_counts = sorted(Counter(replication_levels).items())
+
+ memo_key = str(replication_level_counts)
+
+ if not memo_key in memo_computeWeightedReplicationCosts:
+ last_level = 0
+ current_cost = 0
+ total_interested = float(sum(map(itemgetter(1), replication_level_counts)))
+ cost_for_level = {}
+ for replication_level, count in replication_level_counts:
+ copies_added = replication_level - last_level
+ # compute marginal cost from last level and add it to the last cost
+ current_cost += copies_added / total_interested
+ cost_for_level[replication_level] = current_cost
+ # update invariants
+ last_level = replication_level
+ total_interested -= count
+ memo_computeWeightedReplicationCosts[memo_key] = cost_for_level
+
+ return memo_computeWeightedReplicationCosts[memo_key]
+
+def blockPersistedWeightedUsage(user_uuid, block_uuid):
+ persister_replication_for_block = block_to_persister_replication[block_uuid]
+ user_replication = persister_replication_for_block[user_uuid]
+ return (
+ byteSizeFromValidUuid(block_uuid) *
+ computeWeightedReplicationCosts(
+ persister_replication_for_block.values())[user_replication])
+
+
+def computeUserStorageUsage():
+ for user, blocks in reader_to_blocks.items():
+ user_to_usage[user][UNWEIGHTED_READ_SIZE_COL] = sum(map(
+ byteSizeFromValidUuid,
+ blocks))
+ user_to_usage[user][WEIGHTED_READ_SIZE_COL] = sum(map(
+ lambda block_uuid:(float(byteSizeFromValidUuid(block_uuid))/
+ len(block_to_readers[block_uuid])),
+ blocks))
+ for user, blocks in persister_to_blocks.items():
+ user_to_usage[user][UNWEIGHTED_PERSIST_SIZE_COL] = sum(map(
+ partial(blockPersistedUsage, user),
+ blocks))
+ user_to_usage[user][WEIGHTED_PERSIST_SIZE_COL] = sum(map(
+ partial(blockPersistedWeightedUsage, user),
+ blocks))
+
+def printUserStorageUsage():
+ print ('user: unweighted readable block size, weighted readable block size, '
+ 'unweighted persisted block size, weighted persisted block size:')
+ for user, usage in user_to_usage.items():
+ print ('%s: %s %s %s %s' %
+ (user,
+ fileSizeFormat(usage[UNWEIGHTED_READ_SIZE_COL]),
+ fileSizeFormat(usage[WEIGHTED_READ_SIZE_COL]),
+ fileSizeFormat(usage[UNWEIGHTED_PERSIST_SIZE_COL]),
+ fileSizeFormat(usage[WEIGHTED_PERSIST_SIZE_COL])))
+
+def logUserStorageUsage():
+ for user, usage in user_to_usage.items():
+ body = {}
+ # user could actually represent a user or a group. We don't set
+ # the object_type field since we don't know which we have.
+ body['object_uuid'] = user
+ body['event_type'] = args.user_storage_log_event_type
+ properties = {}
+ properties['read_collections_total_bytes'] = usage[UNWEIGHTED_READ_SIZE_COL]
+ properties['read_collections_weighted_bytes'] = (
+ usage[WEIGHTED_READ_SIZE_COL])
+ properties['persisted_collections_total_bytes'] = (
+ usage[UNWEIGHTED_PERSIST_SIZE_COL])
+ properties['persisted_collections_weighted_bytes'] = (
+ usage[WEIGHTED_PERSIST_SIZE_COL])
+ body['properties'] = properties
+ # TODO(misha): Confirm that this will throw an exception if it
+ # fails to create the log entry.
+ arv.logs().create(body=body).execute()
+
+def getKeepServers():
+ response = arv.keep_disks().list().execute()
+ return [[keep_server['service_host'], keep_server['service_port']]
+ for keep_server in response['items']]
+
+
+def getKeepBlocks(keep_servers):
+ blocks = []
+ for host,port in keep_servers:
+ response = urllib2.urlopen('http://%s:%d/index' % (host, port))
+ server_blocks = [line.split(' ')
+ for line in response.read().split('\n')
+ if line]
+ server_blocks = [(block_id, int(mtime))
+ for block_id, mtime in server_blocks]
+ blocks.append(server_blocks)
+ return blocks
+
+def getKeepStats(keep_servers):
+ MOUNT_COLUMN = 5
+ TOTAL_COLUMN = 1
+ FREE_COLUMN = 3
+ DISK_BLOCK_SIZE = 1024
+ stats = []
+ for host,port in keep_servers:
+ response = urllib2.urlopen('http://%s:%d/status.json' % (host, port))
+
+ parsed_json = json.load(response)
+ df_entries = [line.split()
+ for line in parsed_json['df'].split('\n')
+ if line]
+ keep_volumes = [columns
+ for columns in df_entries
+ if 'keep' in columns[MOUNT_COLUMN]]
+ total_space = DISK_BLOCK_SIZE*sum(map(int,map(itemgetter(TOTAL_COLUMN),
+ keep_volumes)))
+ free_space = DISK_BLOCK_SIZE*sum(map(int,map(itemgetter(FREE_COLUMN),
+ keep_volumes)))
+ stats.append([total_space, free_space])
+ return stats
+
+
+def computeReplication(keep_blocks):
+ for server_blocks in keep_blocks:
+ for block_uuid, _ in server_blocks:
+ block_to_replication[block_uuid] += 1
+ log.debug('Seeing the following replication levels among blocks: %s',
+ str(set(block_to_replication.values())))
+
+
+def computeGarbageCollectionCandidates():
+ for server_blocks in keep_blocks:
+ block_to_latest_mtime.addValues(server_blocks)
+ empty_set = set()
+ garbage_collection_priority = sorted(
+ [(block,mtime)
+ for block,mtime in block_to_latest_mtime.items()
+ if len(block_to_persisters.get(block,empty_set)) == 0],
+ key = itemgetter(1))
+ global garbage_collection_report
+ garbage_collection_report = []
+ cumulative_disk_size = 0
+ for block,mtime in garbage_collection_priority:
+ disk_size = blockDiskUsage(block)
+ cumulative_disk_size += disk_size
+ garbage_collection_report.append(
+ (block,
+ mtime,
+ disk_size,
+ cumulative_disk_size,
+ float(free_keep_space + cumulative_disk_size)/total_keep_space))
+
+ print 'The oldest Garbage Collection Candidates: '
+ pprint.pprint(garbage_collection_report[:20])
+
+
+def outputGarbageCollectionReport(filename):
+ with open(filename, 'wb') as csvfile:
+ gcwriter = csv.writer(csvfile)
+ gcwriter.writerow(['block uuid', 'latest mtime', 'disk size',
+ 'cumulative size', 'disk free'])
+ for line in garbage_collection_report:
+ gcwriter.writerow(line)
+
+def computeGarbageCollectionHistogram():
+ # TODO(misha): Modify this to allow users to specify the number of
+ # histogram buckets through a flag.
+ histogram = []
+ last_percentage = -1
+ for _,mtime,_,_,disk_free in garbage_collection_report:
+ curr_percentage = percentageFloor(disk_free)
+ if curr_percentage > last_percentage:
+ histogram.append( (mtime, curr_percentage) )
+ last_percentage = curr_percentage
+
+ log.info('Garbage collection histogram is: %s', histogram)
+
+ return histogram
+
+
+def logGarbageCollectionHistogram():
+ body = {}
+ # TODO(misha): Decide whether we should specify an object_uuid in
+ # the body and if so, which uuid to use.
+ body['event_type'] = args.block_age_free_space_histogram_log_event_type
+ properties = {}
+ properties['histogram'] = garbage_collection_histogram
+ body['properties'] = properties
+ # TODO(misha): Confirm that this will throw an exception if it
+ # fails to create the log entry.
+ arv.logs().create(body=body).execute()
+
+
+def detectReplicationProblems():
+ blocks_not_in_any_collections.update(
+ set(block_to_replication.keys()).difference(block_to_collections.keys()))
+ underreplicated_persisted_blocks.update(
+ [uuid
+ for uuid, persister_replication in block_to_persister_replication.items()
+ if len(persister_replication) > 0 and
+ block_to_replication[uuid] < max(persister_replication.values())])
+ overreplicated_persisted_blocks.update(
+ [uuid
+ for uuid, persister_replication in block_to_persister_replication.items()
+ if len(persister_replication) > 0 and
+ block_to_replication[uuid] > max(persister_replication.values())])
+
+ log.info('Found %d blocks not in any collections, e.g. %s...',
+ len(blocks_not_in_any_collections),
+ ','.join(list(blocks_not_in_any_collections)[:5]))
+ log.info('Found %d underreplicated blocks, e.g. %s...',
+ len(underreplicated_persisted_blocks),
+ ','.join(list(underreplicated_persisted_blocks)[:5]))
+ log.info('Found %d overreplicated blocks, e.g. %s...',
+ len(overreplicated_persisted_blocks),
+ ','.join(list(overreplicated_persisted_blocks)[:5]))
+
+ # TODO:
+ # Read blocks sorted by mtime
+ # Cache window vs % free space
+ # Collections which candidates will appear in
+ # Youngest underreplicated read blocks that appear in collections.
+ # Report Collections that have blocks which are missing from (or
+ # underreplicated in) keep.
+
+
+# This is the main flow here
+
+parser = argparse.ArgumentParser(description='Report on keep disks.')
+"""The command line argument parser we use.
+
+We only use it in the __main__ block, but leave it outside the block
+in case another package wants to use it or customize it by specifying
+it as a parent to their commandline parser.
+"""
+parser.add_argument('-m',
+ '--max-api-results',
+ type=int,
+ default=5000,
+ help=('The max results to get at once.'))
+parser.add_argument('-p',
+ '--port',
+ type=int,
+ default=9090,
+ help=('The port number to serve on. 0 means no server.'))
+parser.add_argument('-v',
+ '--verbose',
+ help='increase output verbosity',
+ action='store_true')
+parser.add_argument('-u',
+ '--uuid',
+ help='uuid of specific collection to process')
+parser.add_argument('--require-admin-user',
+ action='store_true',
+ default=True,
+ help='Fail if the user is not an admin [default]')
+parser.add_argument('--no-require-admin-user',
+ dest='require_admin_user',
+ action='store_false',
+ help=('Allow users without admin permissions with '
+ 'only a warning.'))
+parser.add_argument('--log-to-workbench',
+ action='store_true',
+ default=False,
+ help='Log findings to workbench')
+parser.add_argument('--no-log-to-workbench',
+ dest='log_to_workbench',
+ action='store_false',
+ help='Don\'t log findings to workbench [default]')
+parser.add_argument('--user-storage-log-event-type',
+ default='user-storage-report',
+ help=('The event type to set when logging user '
+ 'storage usage to workbench.'))
+parser.add_argument('--block-age-free-space-histogram-log-event-type',
+ default='block-age-free-space-histogram',
+ help=('The event type to set when logging user '
+ 'storage usage to workbench.'))
+parser.add_argument('--garbage-collection-file',
+ default='',
+ help=('The file to write a garbage collection report, or '
+ 'leave empty for no report.'))
+
+args = None
+
+# TODO(misha): Think about moving some of this to the __main__ block.
+log = logging.getLogger('arvados.services.datamanager')
+stderr_handler = logging.StreamHandler()
+log.setLevel(logging.INFO)
+stderr_handler.setFormatter(
+ logging.Formatter('%(asctime)-15s %(levelname)-8s %(message)s'))
+log.addHandler(stderr_handler)
+
+# Global Data - don't try this at home
+collection_uuids = []
+
+# These maps all map from uuids to a set of uuids
+block_to_collections = defaultdict(set) # keep blocks
+reader_to_collections = defaultdict(set) # collection(s) for which the user has read access
+persister_to_collections = defaultdict(set) # collection(s) which the user has persisted
+block_to_readers = defaultdict(set)
+block_to_persisters = defaultdict(set)
+block_to_persister_replication = defaultdict(maxdict)
+reader_to_blocks = defaultdict(set)
+persister_to_blocks = defaultdict(set)
+
+UNWEIGHTED_READ_SIZE_COL = 0
+WEIGHTED_READ_SIZE_COL = 1
+UNWEIGHTED_PERSIST_SIZE_COL = 2
+WEIGHTED_PERSIST_SIZE_COL = 3
+NUM_COLS = 4
+user_to_usage = defaultdict(lambda : [0,]*NUM_COLS)
+
+keep_servers = []
+keep_blocks = []
+keep_stats = []
+total_keep_space = 0
+free_keep_space = 0
+
+block_to_replication = defaultdict(lambda: 0)
+block_to_latest_mtime = maxdict()
+
+garbage_collection_report = []
+"""A list of non-persisted blocks, sorted by increasing mtime
+
+Each entry is of the form (block uuid, latest mtime, disk size,
+cumulative size)
+
+* block uuid: The id of the block we want to delete
+* latest mtime: The latest mtime of the block across all keep servers.
+* disk size: The total disk space used by this block (block size
+multiplied by current replication level)
+* cumulative disk size: The sum of this block's disk size and all the
+blocks listed above it
+* disk free: The proportion of our disk space that would be free if we
+deleted this block and all the above. So this is (free disk space +
+cumulative disk size) / total disk capacity
+"""
+
+garbage_collection_histogram = []
+""" Shows the tradeoff of keep block age vs keep disk free space.
+
+Each entry is of the form (mtime, Disk Proportion).
+
+An entry of the form (1388747781, 0.52) means that if we deleted the
+oldest non-presisted blocks until we had 52% of the disk free, then
+all blocks with an mtime greater than 1388747781 would be preserved.
+"""
+
+# Stuff to report on
+blocks_not_in_any_collections = set()
+underreplicated_persisted_blocks = set()
+overreplicated_persisted_blocks = set()
+
+all_data_loaded = False
+
+def loadAllData():
+ checkUserIsAdmin()
+
+ log.info('Building Collection List')
+ global collection_uuids
+ collection_uuids = filter(None, [extractUuid(candidate)
+ for candidate in buildCollectionsList()])
+
+ log.info('Reading Collections')
+ readCollections(collection_uuids)
+
+ if args.verbose:
+ pprint.pprint(CollectionInfo.all_by_uuid)
+
+ log.info('Reading Links')
+ readLinks()
+
+ reportMostPopularCollections()
+
+ log.info('Building Maps')
+ buildMaps()
+
+ reportBusiestUsers()
+
+ log.info('Getting Keep Servers')
+ global keep_servers
+ keep_servers = getKeepServers()
+
+ print keep_servers
+
+ log.info('Getting Blocks from each Keep Server.')
+ global keep_blocks
+ keep_blocks = getKeepBlocks(keep_servers)
+
+ log.info('Getting Stats from each Keep Server.')
+ global keep_stats, total_keep_space, free_keep_space
+ keep_stats = getKeepStats(keep_servers)
+
+ total_keep_space = sum(map(itemgetter(0), keep_stats))
+ free_keep_space = sum(map(itemgetter(1), keep_stats))
+
+ # TODO(misha): Delete this hack when the keep servers are fixed!
+ # This hack deals with the fact that keep servers report each other's disks.
+ total_keep_space /= len(keep_stats)
+ free_keep_space /= len(keep_stats)
+
+ log.info('Total disk space: %s, Free disk space: %s (%d%%).' %
+ (fileSizeFormat(total_keep_space),
+ fileSizeFormat(free_keep_space),
+ 100*free_keep_space/total_keep_space))
+
+ computeReplication(keep_blocks)
+
+ log.info('average replication level is %f',
+ (float(sum(block_to_replication.values())) /
+ len(block_to_replication)))
+
+ computeGarbageCollectionCandidates()
+
+ if args.garbage_collection_file:
+ log.info('Writing garbage Collection report to %s',
+ args.garbage_collection_file)
+ outputGarbageCollectionReport(args.garbage_collection_file)
+
+ global garbage_collection_histogram
+ garbage_collection_histogram = computeGarbageCollectionHistogram()
+
+ if args.log_to_workbench:
+ logGarbageCollectionHistogram()
+
+ detectReplicationProblems()
+
+ computeUserStorageUsage()
+ printUserStorageUsage()
+ if args.log_to_workbench:
+ logUserStorageUsage()
+
+ global all_data_loaded
+ all_data_loaded = True
+
+
+class DataManagerHandler(BaseHTTPRequestHandler):
+ USER_PATH = 'user'
+ COLLECTION_PATH = 'collection'
+ BLOCK_PATH = 'block'
+
+ def userLink(self, uuid):
+ return ('<A HREF="/%(path)s/%(uuid)s">%(uuid)s</A>' %
+ {'uuid': uuid,
+ 'path': DataManagerHandler.USER_PATH})
+
+ def collectionLink(self, uuid):
+ return ('<A HREF="/%(path)s/%(uuid)s">%(uuid)s</A>' %
+ {'uuid': uuid,
+ 'path': DataManagerHandler.COLLECTION_PATH})
+
+ def blockLink(self, uuid):
+ return ('<A HREF="/%(path)s/%(uuid)s">%(uuid)s</A>' %
+ {'uuid': uuid,
+ 'path': DataManagerHandler.BLOCK_PATH})
+
+ def writeTop(self, title):
+ self.wfile.write('<HTML><HEAD><TITLE>%s</TITLE></HEAD>\n<BODY>' % title)
+
+ def writeBottom(self):
+ self.wfile.write('</BODY></HTML>\n')
+
+ def writeHomePage(self):
+ self.send_response(200)
+ self.end_headers()
+ self.writeTop('Home')
+ self.wfile.write('<TABLE>')
+ self.wfile.write('<TR><TH>user'
+ '<TH>unweighted readable block size'
+ '<TH>weighted readable block size'
+ '<TH>unweighted persisted block size'
+ '<TH>weighted persisted block size</TR>\n')
+ for user, usage in user_to_usage.items():
+ self.wfile.write('<TR><TD>%s<TD>%s<TD>%s<TD>%s<TD>%s</TR>\n' %
+ (self.userLink(user),
+ fileSizeFormat(usage[UNWEIGHTED_READ_SIZE_COL]),
+ fileSizeFormat(usage[WEIGHTED_READ_SIZE_COL]),
+ fileSizeFormat(usage[UNWEIGHTED_PERSIST_SIZE_COL]),
+ fileSizeFormat(usage[WEIGHTED_PERSIST_SIZE_COL])))
+ self.wfile.write('</TABLE>\n')
+ self.writeBottom()
+
+ def userExists(self, uuid):
+ # Currently this will return false for a user who exists but
+ # doesn't appear on any manifests.
+ # TODO(misha): Figure out if we need to fix this.
+ return user_to_usage.has_key(uuid)
+
+ def writeUserPage(self, uuid):
+ if not self.userExists(uuid):
+ self.send_error(404,
+ 'User (%s) Not Found.' % cgi.escape(uuid, quote=False))
+ else:
+ # Here we assume that since a user exists, they don't need to be
+ # html escaped.
+ self.send_response(200)
+ self.end_headers()
+ self.writeTop('User %s' % uuid)
+ self.wfile.write('<TABLE>')
+ self.wfile.write('<TR><TH>user'
+ '<TH>unweighted readable block size'
+ '<TH>weighted readable block size'
+ '<TH>unweighted persisted block size'
+ '<TH>weighted persisted block size</TR>\n')
+ usage = user_to_usage[uuid]
+ self.wfile.write('<TR><TD>%s<TD>%s<TD>%s<TD>%s<TD>%s</TR>\n' %
+ (self.userLink(uuid),
+ fileSizeFormat(usage[UNWEIGHTED_READ_SIZE_COL]),
+ fileSizeFormat(usage[WEIGHTED_READ_SIZE_COL]),
+ fileSizeFormat(usage[UNWEIGHTED_PERSIST_SIZE_COL]),
+ fileSizeFormat(usage[WEIGHTED_PERSIST_SIZE_COL])))
+ self.wfile.write('</TABLE>\n')
+ self.wfile.write('<P>Persisting Collections: %s\n' %
+ ', '.join(map(self.collectionLink,
+ persister_to_collections[uuid])))
+ self.wfile.write('<P>Reading Collections: %s\n' %
+ ', '.join(map(self.collectionLink,
+ reader_to_collections[uuid])))
+ self.writeBottom()
+
+ def collectionExists(self, uuid):
+ return CollectionInfo.all_by_uuid.has_key(uuid)
+
+ def writeCollectionPage(self, uuid):
+ if not self.collectionExists(uuid):
+ self.send_error(404,
+ 'Collection (%s) Not Found.' % cgi.escape(uuid, quote=False))
+ else:
+ collection = CollectionInfo.get(uuid)
+ # Here we assume that since a collection exists, its id doesn't
+ # need to be html escaped.
+ self.send_response(200)
+ self.end_headers()
+ self.writeTop('Collection %s' % uuid)
+ self.wfile.write('<H1>Collection %s</H1>\n' % uuid)
+ self.wfile.write('<P>Total size %s (not factoring in replication).\n' %
+ fileSizeFormat(collection.byteSize()))
+ self.wfile.write('<P>Readers: %s\n' %
+ ', '.join(map(self.userLink, collection.reader_uuids)))
+
+ if len(collection.persister_replication) == 0:
+ self.wfile.write('<P>No persisters\n')
+ else:
+ replication_to_users = defaultdict(set)
+ for user,replication in collection.persister_replication.items():
+ replication_to_users[replication].add(user)
+ replication_levels = sorted(replication_to_users.keys())
+
+ self.wfile.write('<P>%d persisters in %d replication level(s) maxing '
+ 'out at %dx replication:\n' %
+ (len(collection.persister_replication),
+ len(replication_levels),
+ replication_levels[-1]))
+
+ # TODO(misha): This code is used twice, let's move it to a method.
+ self.wfile.write('<TABLE><TR><TH>%s</TR>\n' %
+ '<TH>'.join(['Replication Level ' + str(x)
+ for x in replication_levels]))
+ self.wfile.write('<TR>\n')
+ for replication_level in replication_levels:
+ users = replication_to_users[replication_level]
+ self.wfile.write('<TD valign="top">%s\n' % '<BR>\n'.join(
+ map(self.userLink, users)))
+ self.wfile.write('</TR></TABLE>\n')
+
+ replication_to_blocks = defaultdict(set)
+ for block in collection.block_uuids:
+ replication_to_blocks[block_to_replication[block]].add(block)
+ replication_levels = sorted(replication_to_blocks.keys())
+ self.wfile.write('<P>%d blocks in %d replication level(s):\n' %
+ (len(collection.block_uuids), len(replication_levels)))
+ self.wfile.write('<TABLE><TR><TH>%s</TR>\n' %
+ '<TH>'.join(['Replication Level ' + str(x)
+ for x in replication_levels]))
+ self.wfile.write('<TR>\n')
+ for replication_level in replication_levels:
+ blocks = replication_to_blocks[replication_level]
+ self.wfile.write('<TD valign="top">%s\n' % '<BR>\n'.join(blocks))
+ self.wfile.write('</TR></TABLE>\n')
+
+
+ def do_GET(self):
+ if not all_data_loaded:
+ self.send_error(503,
+ 'Sorry, but I am still loading all the data I need.')
+ else:
+ # Removing leading '/' and process request path
+ split_path = self.path[1:].split('/')
+ request_type = split_path[0]
+ log.debug('path (%s) split as %s with request_type %s' % (self.path,
+ split_path,
+ request_type))
+ if request_type == '':
+ self.writeHomePage()
+ elif request_type == DataManagerHandler.USER_PATH:
+ self.writeUserPage(split_path[1])
+ elif request_type == DataManagerHandler.COLLECTION_PATH:
+ self.writeCollectionPage(split_path[1])
+ else:
+ self.send_error(404, 'Unrecognized request path.')
+ return
+
+class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
+ """Handle requests in a separate thread."""
+
+
+if __name__ == '__main__':
+ args = parser.parse_args()
+
+ if args.port == 0:
+ loadAllData()
+ else:
+ loader = threading.Thread(target = loadAllData, name = 'loader')
+ loader.start()
+
+ server = ThreadedHTTPServer(('localhost', args.port), DataManagerHandler)
+ server.serve_forever()
--- /dev/null
+#! /usr/bin/env python
+
+import datamanager
+import unittest
+
+class TestComputeWeightedReplicationCosts(unittest.TestCase):
+ def test_obvious(self):
+ self.assertEqual(datamanager.computeWeightedReplicationCosts([1,]),
+ {1:1.0})
+
+ def test_simple(self):
+ self.assertEqual(datamanager.computeWeightedReplicationCosts([2,]),
+ {2:2.0})
+
+ def test_even_split(self):
+ self.assertEqual(datamanager.computeWeightedReplicationCosts([1,1]),
+ {1:0.5})
+
+ def test_even_split_bigger(self):
+ self.assertEqual(datamanager.computeWeightedReplicationCosts([2,2]),
+ {2:1.0})
+
+ def test_uneven_split(self):
+ self.assertEqual(datamanager.computeWeightedReplicationCosts([1,2]),
+ {1:0.5, 2:1.5})
+
+ def test_uneven_split_bigger(self):
+ self.assertEqual(datamanager.computeWeightedReplicationCosts([1,3]),
+ {1:0.5, 3:2.5})
+
+ def test_uneven_split_jumble(self):
+ self.assertEqual(datamanager.computeWeightedReplicationCosts([1,3,6,6,10]),
+ {1:0.2, 3:0.7, 6:1.7, 10:5.7})
+
+ def test_documentation_example(self):
+ self.assertEqual(datamanager.computeWeightedReplicationCosts([1,1,3,6,6]),
+ {1:0.2, 3: 0.2 + 2.0 / 3, 6: 0.2 + 2.0 / 3 + 1.5})
+
+
+if __name__ == '__main__':
+ unittest.main()
return i['uuid'] == self.collection_locator
def update(self):
- collection = arvados.CollectionReader(arvados.Keep.get(self.collection_locator))
- for s in collection.all_streams():
- cwd = self
- for part in s.name().split('/'):
- if part != '' and part != '.':
- if part not in cwd._entries:
- cwd._entries[part] = self.inodes.add_entry(Directory(cwd.inode))
- cwd = cwd._entries[part]
- for k, v in s.files().items():
- cwd._entries[k] = self.inodes.add_entry(StreamReaderFile(cwd.inode, v))
- self.fresh()
-
+ try:
+ collection = arvados.CollectionReader(self.collection_locator)
+ for s in collection.all_streams():
+ cwd = self
+ for part in s.name().split('/'):
+ if part != '' and part != '.':
+ if part not in cwd._entries:
+ cwd._entries[part] = self.inodes.add_entry(Directory(cwd.inode))
+ cwd = cwd._entries[part]
+ for k, v in s.files().items():
+ cwd._entries[k] = self.inodes.add_entry(StreamReaderFile(cwd.inode, v))
+ print "found"
+ self.fresh()
+ except Exception as detail:
+ print("%s: error: %s" % (self.collection_locator,detail) )
class MagicDirectory(Directory):
'''A special directory that logically contains the set of all extant keep
llfuse.init(operations, args.mountpoint, opts)
llfuse.main()
else:
+ # Initialize the fuse connection
+ llfuse.init(operations, args.mountpoint, opts)
with daemon.DaemonContext():
- # Initialize the fuse connection
- llfuse.init(operations, args.mountpoint, opts)
llfuse.main()
--- /dev/null
+// Tests for Keep HTTP handlers:
+//
+// GetBlockHandler
+// PutBlockHandler
+// IndexHandler
+//
+// The HTTP handlers are responsible for enforcing permission policy,
+// so these tests must exercise all possible permission permutations.
+
+package main
+
+import (
+ "bytes"
+ "github.com/gorilla/mux"
+ "net/http"
+ "net/http/httptest"
+ "regexp"
+ "testing"
+ "time"
+)
+
+// A RequestTester represents the parameters for an HTTP request to
+// be issued on behalf of a unit test.
+type RequestTester struct {
+ uri string
+ api_token string
+ method string
+ request_body []byte
+}
+
+// Test GetBlockHandler on the following situations:
+// - permissions off, unauthenticated request, unsigned locator
+// - permissions on, authenticated request, signed locator
+// - permissions on, authenticated request, unsigned locator
+// - permissions on, unauthenticated request, signed locator
+// - permissions on, authenticated request, expired locator
+//
+func TestGetHandler(t *testing.T) {
+ defer teardown()
+
+ // Prepare two test Keep volumes. Our block is stored on the second volume.
+ KeepVM = MakeTestVolumeManager(2)
+ defer func() { KeepVM.Quit() }()
+
+ vols := KeepVM.Volumes()
+ if err := vols[0].Put(TEST_HASH, TEST_BLOCK); err != nil {
+ t.Error(err)
+ }
+
+ // Set up a REST router for testing the handlers.
+ rest := MakeRESTRouter()
+
+ // Create locators for testing.
+ // Turn on permission settings so we can generate signed locators.
+ enforce_permissions = true
+ PermissionSecret = []byte(known_key)
+ permission_ttl = time.Duration(300) * time.Second
+
+ var (
+ unsigned_locator = "http://localhost:25107/" + TEST_HASH
+ valid_timestamp = time.Now().Add(permission_ttl)
+ expired_timestamp = time.Now().Add(-time.Hour)
+ signed_locator = "http://localhost:25107/" + SignLocator(TEST_HASH, known_token, valid_timestamp)
+ expired_locator = "http://localhost:25107/" + SignLocator(TEST_HASH, known_token, expired_timestamp)
+ )
+
+ // -----------------
+ // Test unauthenticated request with permissions off.
+ enforce_permissions = false
+
+ // Unauthenticated request, unsigned locator
+ // => OK
+ response := IssueRequest(rest,
+ &RequestTester{
+ method: "GET",
+ uri: unsigned_locator,
+ })
+ ExpectStatusCode(t,
+ "Unauthenticated request, unsigned locator", http.StatusOK, response)
+ ExpectBody(t,
+ "Unauthenticated request, unsigned locator",
+ string(TEST_BLOCK),
+ response)
+
+ // ----------------
+ // Permissions: on.
+ enforce_permissions = true
+
+ // Authenticated request, signed locator
+ // => OK
+ response = IssueRequest(rest, &RequestTester{
+ method: "GET",
+ uri: signed_locator,
+ api_token: known_token,
+ })
+ ExpectStatusCode(t,
+ "Authenticated request, signed locator", http.StatusOK, response)
+ ExpectBody(t,
+ "Authenticated request, signed locator", string(TEST_BLOCK), response)
+
+ // Authenticated request, unsigned locator
+ // => PermissionError
+ response = IssueRequest(rest, &RequestTester{
+ method: "GET",
+ uri: unsigned_locator,
+ api_token: known_token,
+ })
+ ExpectStatusCode(t, "unsigned locator", PermissionError.HTTPCode, response)
+
+ // Unauthenticated request, signed locator
+ // => PermissionError
+ response = IssueRequest(rest, &RequestTester{
+ method: "GET",
+ uri: signed_locator,
+ })
+ ExpectStatusCode(t,
+ "Unauthenticated request, signed locator",
+ PermissionError.HTTPCode, response)
+
+ // Authenticated request, expired locator
+ // => ExpiredError
+ response = IssueRequest(rest, &RequestTester{
+ method: "GET",
+ uri: expired_locator,
+ api_token: known_token,
+ })
+ ExpectStatusCode(t,
+ "Authenticated request, expired locator",
+ ExpiredError.HTTPCode, response)
+}
+
+// Test PutBlockHandler on the following situations:
+// - no server key
+// - with server key, authenticated request, unsigned locator
+// - with server key, unauthenticated request, unsigned locator
+//
+func TestPutHandler(t *testing.T) {
+ defer teardown()
+
+ // Prepare two test Keep volumes.
+ KeepVM = MakeTestVolumeManager(2)
+ defer func() { KeepVM.Quit() }()
+
+ // Set up a REST router for testing the handlers.
+ rest := MakeRESTRouter()
+
+ // --------------
+ // No server key.
+
+ // Unauthenticated request, no server key
+ // => OK (unsigned response)
+ unsigned_locator := "http://localhost:25107/" + TEST_HASH
+ response := IssueRequest(rest,
+ &RequestTester{
+ method: "PUT",
+ uri: unsigned_locator,
+ request_body: TEST_BLOCK,
+ })
+
+ ExpectStatusCode(t,
+ "Unauthenticated request, no server key", http.StatusOK, response)
+ ExpectBody(t, "Unauthenticated request, no server key", TEST_HASH, response)
+
+ // ------------------
+ // With a server key.
+
+ PermissionSecret = []byte(known_key)
+ permission_ttl = time.Duration(300) * time.Second
+
+ // When a permission key is available, the locator returned
+ // from an authenticated PUT request will be signed.
+
+ // Authenticated PUT, signed locator
+ // => OK (signed response)
+ response = IssueRequest(rest,
+ &RequestTester{
+ method: "PUT",
+ uri: unsigned_locator,
+ request_body: TEST_BLOCK,
+ api_token: known_token,
+ })
+
+ ExpectStatusCode(t,
+ "Authenticated PUT, signed locator, with server key",
+ http.StatusOK, response)
+ if !VerifySignature(response.Body.String(), known_token) {
+ t.Errorf("Authenticated PUT, signed locator, with server key:\n"+
+ "response '%s' does not contain a valid signature",
+ response.Body.String())
+ }
+
+ // Unauthenticated PUT, unsigned locator
+ // => OK
+ response = IssueRequest(rest,
+ &RequestTester{
+ method: "PUT",
+ uri: unsigned_locator,
+ request_body: TEST_BLOCK,
+ })
+
+ ExpectStatusCode(t,
+ "Unauthenticated PUT, unsigned locator, with server key",
+ http.StatusOK, response)
+ ExpectBody(t,
+ "Unauthenticated PUT, unsigned locator, with server key",
+ TEST_HASH, response)
+}
+
+// Test /index requests:
+// - enforce_permissions off | unauthenticated /index request
+// - enforce_permissions off | unauthenticated /index/prefix request
+// - enforce_permissions off | authenticated /index request | non-superuser
+// - enforce_permissions off | authenticated /index/prefix request | non-superuser
+// - enforce_permissions off | authenticated /index request | superuser
+// - enforce_permissions off | authenticated /index/prefix request | superuser
+// - enforce_permissions on | unauthenticated /index request
+// - enforce_permissions on | unauthenticated /index/prefix request
+// - enforce_permissions on | authenticated /index request | non-superuser
+// - enforce_permissions on | authenticated /index/prefix request | non-superuser
+// - enforce_permissions on | authenticated /index request | superuser
+// - enforce_permissions on | authenticated /index/prefix request | superuser
+//
+// The only /index requests that should succeed are those issued by the
+// superuser when enforce_permissions = true.
+//
+func TestIndexHandler(t *testing.T) {
+ defer teardown()
+
+ // Set up Keep volumes and populate them.
+ // Include multiple blocks on different volumes, and
+ // some metadata files (which should be omitted from index listings)
+ KeepVM = MakeTestVolumeManager(2)
+ defer func() { KeepVM.Quit() }()
+
+ vols := KeepVM.Volumes()
+ vols[0].Put(TEST_HASH, TEST_BLOCK)
+ vols[1].Put(TEST_HASH_2, TEST_BLOCK_2)
+ vols[0].Put(TEST_HASH+".meta", []byte("metadata"))
+ vols[1].Put(TEST_HASH_2+".meta", []byte("metadata"))
+
+ // Set up a REST router for testing the handlers.
+ rest := MakeRESTRouter()
+
+ data_manager_token = "DATA MANAGER TOKEN"
+
+ unauthenticated_req := &RequestTester{
+ method: "GET",
+ uri: "http://localhost:25107/index",
+ }
+ authenticated_req := &RequestTester{
+ method: "GET",
+ uri: "http://localhost:25107/index",
+ api_token: known_token,
+ }
+ superuser_req := &RequestTester{
+ method: "GET",
+ uri: "http://localhost:25107/index",
+ api_token: data_manager_token,
+ }
+ unauth_prefix_req := &RequestTester{
+ method: "GET",
+ uri: "http://localhost:25107/index/" + TEST_HASH[0:3],
+ }
+ auth_prefix_req := &RequestTester{
+ method: "GET",
+ uri: "http://localhost:25107/index/" + TEST_HASH[0:3],
+ api_token: known_token,
+ }
+ superuser_prefix_req := &RequestTester{
+ method: "GET",
+ uri: "http://localhost:25107/index/" + TEST_HASH[0:3],
+ api_token: data_manager_token,
+ }
+
+ // ----------------------------
+ // enforce_permissions disabled
+ // All /index requests should fail.
+ enforce_permissions = false
+
+ // unauthenticated /index request
+ // => PermissionError
+ response := IssueRequest(rest, unauthenticated_req)
+ ExpectStatusCode(t,
+ "enforce_permissions off, unauthenticated request",
+ PermissionError.HTTPCode,
+ response)
+
+ // unauthenticated /index/prefix request
+ // => PermissionError
+ response = IssueRequest(rest, unauth_prefix_req)
+ ExpectStatusCode(t,
+ "enforce_permissions off, unauthenticated /index/prefix request",
+ PermissionError.HTTPCode,
+ response)
+
+ // authenticated /index request, non-superuser
+ // => PermissionError
+ response = IssueRequest(rest, authenticated_req)
+ ExpectStatusCode(t,
+ "enforce_permissions off, authenticated request, non-superuser",
+ PermissionError.HTTPCode,
+ response)
+
+ // authenticated /index/prefix request, non-superuser
+ // => PermissionError
+ response = IssueRequest(rest, auth_prefix_req)
+ ExpectStatusCode(t,
+ "enforce_permissions off, authenticated /index/prefix request, non-superuser",
+ PermissionError.HTTPCode,
+ response)
+
+ // authenticated /index request, superuser
+ // => PermissionError
+ response = IssueRequest(rest, superuser_req)
+ ExpectStatusCode(t,
+ "enforce_permissions off, superuser request",
+ PermissionError.HTTPCode,
+ response)
+
+ // superuser /index/prefix request
+ // => PermissionError
+ response = IssueRequest(rest, superuser_prefix_req)
+ ExpectStatusCode(t,
+ "enforce_permissions off, superuser /index/prefix request",
+ PermissionError.HTTPCode,
+ response)
+
+ // ---------------------------
+ // enforce_permissions enabled
+ // Only the superuser should be allowed to issue /index requests.
+ enforce_permissions = true
+
+ // unauthenticated /index request
+ // => PermissionError
+ response = IssueRequest(rest, unauthenticated_req)
+ ExpectStatusCode(t,
+ "enforce_permissions on, unauthenticated request",
+ PermissionError.HTTPCode,
+ response)
+
+ // unauthenticated /index/prefix request
+ // => PermissionError
+ response = IssueRequest(rest, unauth_prefix_req)
+ ExpectStatusCode(t,
+ "permissions on, unauthenticated /index/prefix request",
+ PermissionError.HTTPCode,
+ response)
+
+ // authenticated /index request, non-superuser
+ // => PermissionError
+ response = IssueRequest(rest, authenticated_req)
+ ExpectStatusCode(t,
+ "permissions on, authenticated request, non-superuser",
+ PermissionError.HTTPCode,
+ response)
+
+ // authenticated /index/prefix request, non-superuser
+ // => PermissionError
+ response = IssueRequest(rest, auth_prefix_req)
+ ExpectStatusCode(t,
+ "permissions on, authenticated /index/prefix request, non-superuser",
+ PermissionError.HTTPCode,
+ response)
+
+ // superuser /index request
+ // => OK
+ response = IssueRequest(rest, superuser_req)
+ ExpectStatusCode(t,
+ "permissions on, superuser request",
+ http.StatusOK,
+ response)
+
+ expected := `^` + TEST_HASH + `\+\d+ \d+\n` +
+ TEST_HASH_2 + `\+\d+ \d+\n$`
+ match, _ := regexp.MatchString(expected, response.Body.String())
+ if !match {
+ t.Errorf(
+ "permissions on, superuser request: expected %s, got:\n%s",
+ expected, response.Body.String())
+ }
+
+ // superuser /index/prefix request
+ // => OK
+ response = IssueRequest(rest, superuser_prefix_req)
+ ExpectStatusCode(t,
+ "permissions on, superuser request",
+ http.StatusOK,
+ response)
+
+ expected = `^` + TEST_HASH + `\+\d+ \d+\n$`
+ match, _ = regexp.MatchString(expected, response.Body.String())
+ if !match {
+ t.Errorf(
+ "permissions on, superuser /index/prefix request: expected %s, got:\n%s",
+ expected, response.Body.String())
+ }
+}
+
+// ====================
+// Helper functions
+// ====================
+
+// IssueTestRequest executes an HTTP request described by rt, to a
+// specified REST router. It returns the HTTP response to the request.
+func IssueRequest(router *mux.Router, rt *RequestTester) *httptest.ResponseRecorder {
+ response := httptest.NewRecorder()
+ body := bytes.NewReader(rt.request_body)
+ req, _ := http.NewRequest(rt.method, rt.uri, body)
+ if rt.api_token != "" {
+ req.Header.Set("Authorization", "OAuth "+rt.api_token)
+ }
+ router.ServeHTTP(response, req)
+ return response
+}
+
+// ExpectStatusCode checks whether a response has the specified status code,
+// and reports a test failure if not.
+func ExpectStatusCode(
+ t *testing.T,
+ testname string,
+ expected_status int,
+ response *httptest.ResponseRecorder) {
+ if response.Code != expected_status {
+ t.Errorf("%s: expected status %s, got %+v",
+ testname, expected_status, response)
+ }
+}
+
+func ExpectBody(
+ t *testing.T,
+ testname string,
+ expected_body string,
+ response *httptest.ResponseRecorder) {
+ if response.Body.String() != expected_body {
+ t.Errorf("%s: expected response body '%s', got %+v",
+ testname, expected_body, response)
+ }
+}
"io"
"io/ioutil"
"log"
+ "net"
"net/http"
"os"
+ "os/signal"
"regexp"
+ "strconv"
"strings"
"syscall"
+ "time"
)
// ======================
// and/or configuration file settings.
// Default TCP address on which to listen for requests.
+// Initialized by the --listen flag.
const DEFAULT_ADDR = ":25107"
// A Keep "block" is 64MB.
var PROC_MOUNTS = "/proc/mounts"
// The Keep VolumeManager maintains a list of available volumes.
+// Initialized by the --volumes flag (or by FindKeepVolumes).
var KeepVM VolumeManager
+// enforce_permissions controls whether permission signatures
+// should be enforced (affecting GET and DELETE requests).
+// Initialized by the --enforce-permissions flag.
+var enforce_permissions bool
+
+// permission_ttl is the time duration for which new permission
+// signatures (returned by PUT requests) will be valid.
+// Initialized by the --permission-ttl flag.
+var permission_ttl time.Duration
+
+// data_manager_token represents the API token used by the
+// Data Manager, and is required on certain privileged operations.
+// Initialized by the --data-manager-token-file flag.
+var data_manager_token string
+
// ==========
// Error types.
//
}
var (
- CollisionError = &KeepError{400, "Collision"}
- MD5Error = &KeepError{401, "MD5 Failure"}
- CorruptError = &KeepError{402, "Corruption"}
- NotFoundError = &KeepError{404, "Not Found"}
- GenericError = &KeepError{500, "Fail"}
- FullError = &KeepError{503, "Full"}
- TooLongError = &KeepError{504, "Too Long"}
+ CollisionError = &KeepError{400, "Collision"}
+ MD5Error = &KeepError{401, "MD5 Failure"}
+ PermissionError = &KeepError{401, "Permission denied"}
+ CorruptError = &KeepError{402, "Corruption"}
+ ExpiredError = &KeepError{403, "Expired permission signature"}
+ NotFoundError = &KeepError{404, "Not Found"}
+ GenericError = &KeepError{500, "Fail"}
+ FullError = &KeepError{503, "Full"}
+ TooLongError = &KeepError{504, "Too Long"}
)
func (e *KeepError) Error() string {
// data exceeds BLOCKSIZE bytes.
var ReadErrorTooLong = errors.New("Too long")
+// TODO(twp): continue moving as much code as possible out of main
+// so it can be effectively tested. Esp. handling and postprocessing
+// of command line flags (identifying Keep volumes and initializing
+// permission arguments).
+
func main() {
+ log.Println("Keep started: pid", os.Getpid())
+
// Parse command-line flags:
//
// -listen=ipaddr:port
// by looking at currently mounted filesystems for /keep top-level
// directories.
- var listen, volumearg string
- var serialize_io bool
- flag.StringVar(&listen, "listen", DEFAULT_ADDR,
- "interface on which to listen for requests, in the format ipaddr:port. e.g. -listen=10.0.1.24:8000. Use -listen=:port to listen on all network interfaces.")
- flag.StringVar(&volumearg, "volumes", "",
- "Comma-separated list of directories to use for Keep volumes, e.g. -volumes=/var/keep1,/var/keep2. If empty or not supplied, Keep will scan mounted filesystems for volumes with a /keep top-level directory.")
- flag.BoolVar(&serialize_io, "serialize", false,
- "If set, all read and write operations on local Keep volumes will be serialized.")
+ var (
+ data_manager_token_file string
+ listen string
+ permission_key_file string
+ permission_ttl_sec int
+ serialize_io bool
+ volumearg string
+ )
+ flag.StringVar(
+ &data_manager_token_file,
+ "data-manager-token-file",
+ "",
+ "File with the API token used by the Data Manager. All DELETE "+
+ "requests or GET /index requests must carry this token.")
+ flag.BoolVar(
+ &enforce_permissions,
+ "enforce-permissions",
+ false,
+ "Enforce permission signatures on requests.")
+ flag.StringVar(
+ &listen,
+ "listen",
+ DEFAULT_ADDR,
+ "Interface on which to listen for requests, in the format "+
+ "ipaddr:port. e.g. -listen=10.0.1.24:8000. Use -listen=:port "+
+ "to listen on all network interfaces.")
+ flag.StringVar(
+ &permission_key_file,
+ "permission-key-file",
+ "",
+ "File containing the secret key for generating and verifying "+
+ "permission signatures.")
+ flag.IntVar(
+ &permission_ttl_sec,
+ "permission-ttl",
+ 300,
+ "Expiration time (in seconds) for newly generated permission "+
+ "signatures.")
+ flag.BoolVar(
+ &serialize_io,
+ "serialize",
+ false,
+ "If set, all read and write operations on local Keep volumes will "+
+ "be serialized.")
+ flag.StringVar(
+ &volumearg,
+ "volumes",
+ "",
+ "Comma-separated list of directories to use for Keep volumes, "+
+ "e.g. -volumes=/var/keep1,/var/keep2. If empty or not "+
+ "supplied, Keep will scan mounted filesystems for volumes "+
+ "with a /keep top-level directory.")
flag.Parse()
// Look for local keep volumes.
log.Fatal("could not find any keep volumes")
}
+ // Initialize data manager token and permission key.
+ // If these tokens are specified but cannot be read,
+ // raise a fatal error.
+ if data_manager_token_file != "" {
+ if buf, err := ioutil.ReadFile(data_manager_token_file); err == nil {
+ data_manager_token = strings.TrimSpace(string(buf))
+ } else {
+ log.Fatalf("reading data manager token: %s\n", err)
+ }
+ }
+ if permission_key_file != "" {
+ if buf, err := ioutil.ReadFile(permission_key_file); err == nil {
+ PermissionSecret = bytes.TrimSpace(buf)
+ } else {
+ log.Fatalf("reading permission key: %s\n", err)
+ }
+ }
+
+ // Initialize permission TTL
+ permission_ttl = time.Duration(permission_ttl_sec) * time.Second
+
+ // If --enforce-permissions is true, we must have a permission key
+ // to continue.
+ if PermissionSecret == nil {
+ if enforce_permissions {
+ log.Fatal("--enforce-permissions requires a permission key")
+ } else {
+ log.Println("Running without a PermissionSecret. Block locators " +
+ "returned by this server will not be signed, and will be rejected " +
+ "by a server that enforces permissions.")
+ log.Println("To fix this, run Keep with --permission-key-file=<path> " +
+ "to define the location of a file containing the permission key.")
+ }
+ }
+
// Start a round-robin VolumeManager with the volumes we have found.
KeepVM = MakeRRVolumeManager(goodvols)
- // Set up REST handlers.
- //
- // Start with a router that will route each URL path to an
- // appropriate handler.
- //
- rest := mux.NewRouter()
- rest.HandleFunc(`/{hash:[0-9a-f]{32}}`, GetBlockHandler).Methods("GET", "HEAD")
- rest.HandleFunc(`/{hash:[0-9a-f]{32}}`, PutBlockHandler).Methods("PUT")
- rest.HandleFunc(`/index`, IndexHandler).Methods("GET", "HEAD")
- rest.HandleFunc(`/index/{prefix:[0-9a-f]{0,32}}`, IndexHandler).Methods("GET", "HEAD")
- rest.HandleFunc(`/status.json`, StatusHandler).Methods("GET", "HEAD")
-
// Tell the built-in HTTP server to direct all requests to the REST
// router.
- http.Handle("/", rest)
+ http.Handle("/", MakeRESTRouter())
+
+ // Set up a TCP listener.
+ listener, err := net.Listen("tcp", listen)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Shut down the server gracefully (by closing the listener)
+ // if SIGTERM is received.
+ term := make(chan os.Signal, 1)
+ go func(sig <-chan os.Signal) {
+ s := <-sig
+ log.Println("caught signal:", s)
+ listener.Close()
+ }(term)
+ signal.Notify(term, syscall.SIGTERM)
// Start listening for requests.
- http.ListenAndServe(listen, nil)
+ srv := &http.Server{Addr: listen}
+ srv.Serve(listener)
+
+ log.Println("shutting down")
+}
+
+// MakeRESTRouter
+// Returns a mux.Router that passes GET and PUT requests to the
+// appropriate handlers.
+//
+func MakeRESTRouter() *mux.Router {
+ rest := mux.NewRouter()
+ rest.HandleFunc(
+ `/{hash:[0-9a-f]{32}}`, GetBlockHandler).Methods("GET", "HEAD")
+ rest.HandleFunc(
+ `/{hash:[0-9a-f]{32}}+A{signature:[0-9a-f]+}@{timestamp:[0-9a-f]+}`,
+ GetBlockHandler).Methods("GET", "HEAD")
+ rest.HandleFunc(`/{hash:[0-9a-f]{32}}`, PutBlockHandler).Methods("PUT")
+
+ // For IndexHandler we support:
+ // /index - returns all locators
+ // /index/{prefix} - returns all locators that begin with {prefix}
+ // {prefix} is a string of hexadecimal digits between 0 and 32 digits.
+ // If {prefix} is the empty string, return an index of all locators
+ // (so /index and /index/ behave identically)
+ // A client may supply a full 32-digit locator string, in which
+ // case the server will return an index with either zero or one
+ // entries. This usage allows a client to check whether a block is
+ // present, and its size and upload time, without retrieving the
+ // entire block.
+ //
+ rest.HandleFunc(`/index`, IndexHandler).Methods("GET", "HEAD")
+ rest.HandleFunc(
+ `/index/{prefix:[0-9a-f]{0,32}}`, IndexHandler).Methods("GET", "HEAD")
+ rest.HandleFunc(`/status.json`, StatusHandler).Methods("GET", "HEAD")
+ return rest
}
// FindKeepVolumes
for scanner.Scan() {
args := strings.Fields(scanner.Text())
dev, mount := args[0], args[1]
- if (dev == "tmpfs" || strings.HasPrefix(dev, "/dev/")) && mount != "/" {
+ if mount != "/" &&
+ (dev == "tmpfs" || strings.HasPrefix(dev, "/dev/")) {
keep := mount + "/keep"
if st, err := os.Stat(keep); err == nil && st.IsDir() {
vols = append(vols, keep)
return vols
}
-func GetBlockHandler(w http.ResponseWriter, req *http.Request) {
+func GetBlockHandler(resp http.ResponseWriter, req *http.Request) {
hash := mux.Vars(req)["hash"]
+ log.Printf("%s %s", req.Method, hash)
+
+ signature := mux.Vars(req)["signature"]
+ timestamp := mux.Vars(req)["timestamp"]
+
+ // If permission checking is in effect, verify this
+ // request's permission signature.
+ if enforce_permissions {
+ if signature == "" || timestamp == "" {
+ http.Error(resp, PermissionError.Error(), PermissionError.HTTPCode)
+ return
+ } else if IsExpired(timestamp) {
+ http.Error(resp, ExpiredError.Error(), ExpiredError.HTTPCode)
+ return
+ } else {
+ validsig := MakePermSignature(hash, GetApiToken(req), timestamp)
+ if signature != validsig {
+ http.Error(resp, PermissionError.Error(), PermissionError.HTTPCode)
+ return
+ }
+ }
+ }
+
block, err := GetBlock(hash)
if err != nil {
- http.Error(w, err.Error(), 404)
+ // This type assertion is safe because the only errors
+ // GetBlock can return are CorruptError or NotFoundError.
+ http.Error(resp, err.Error(), err.(*KeepError).HTTPCode)
return
}
- _, err = w.Write(block)
+ _, err = resp.Write(block)
if err != nil {
log.Printf("GetBlockHandler: writing response: %s", err)
}
return
}
-func PutBlockHandler(w http.ResponseWriter, req *http.Request) {
+func PutBlockHandler(resp http.ResponseWriter, req *http.Request) {
hash := mux.Vars(req)["hash"]
+ log.Printf("%s %s", req.Method, hash)
+
// Read the block data to be stored.
// If the request exceeds BLOCKSIZE bytes, issue a HTTP 500 error.
//
//
if buf, err := ReadAtMost(req.Body, BLOCKSIZE); err == nil {
if err := PutBlock(buf, hash); err == nil {
- w.WriteHeader(http.StatusOK)
+ // Success; sign the locator and return it to the client.
+ api_token := GetApiToken(req)
+ expiry := time.Now().Add(permission_ttl)
+ signed_loc := SignLocator(hash, api_token, expiry)
+ resp.Write([]byte(signed_loc))
} else {
ke := err.(*KeepError)
- http.Error(w, ke.Error(), ke.HTTPCode)
+ http.Error(resp, ke.Error(), ke.HTTPCode)
}
} else {
log.Println("error reading request: ", err)
// the maximum request size.
errmsg = fmt.Sprintf("Max request size %d bytes", BLOCKSIZE)
}
- http.Error(w, errmsg, 500)
+ http.Error(resp, errmsg, 500)
}
}
// IndexHandler
// A HandleFunc to address /index and /index/{prefix} requests.
//
-func IndexHandler(w http.ResponseWriter, req *http.Request) {
+func IndexHandler(resp http.ResponseWriter, req *http.Request) {
prefix := mux.Vars(req)["prefix"]
+ // Only the data manager may issue /index requests,
+ // and only if enforce_permissions is enabled.
+ // All other requests return 403 Permission denied.
+ api_token := GetApiToken(req)
+ if !enforce_permissions ||
+ api_token == "" ||
+ data_manager_token != api_token {
+ http.Error(resp, PermissionError.Error(), PermissionError.HTTPCode)
+ return
+ }
var index string
for _, vol := range KeepVM.Volumes() {
index = index + vol.Index(prefix)
}
- w.Write([]byte(index))
+ resp.Write([]byte(index))
}
// StatusHandler
Volumes []*VolumeStatus `json:"volumes"`
}
-func StatusHandler(w http.ResponseWriter, req *http.Request) {
+func StatusHandler(resp http.ResponseWriter, req *http.Request) {
st := GetNodeStatus()
if jstat, err := json.Marshal(st); err == nil {
- w.Write(jstat)
+ resp.Write(jstat)
} else {
log.Printf("json.Marshal: %s\n", err)
log.Printf("NodeStatus = %v\n", st)
- http.Error(w, err.Error(), 500)
+ http.Error(resp, err.Error(), 500)
}
}
// they should be sent directly to an event manager at high
// priority or logged as urgent problems.
//
- log.Printf("%s: checksum mismatch for request %s (actual hash %s)\n",
+ log.Printf("%s: checksum mismatch for request %s (actual %s)\n",
vol, hash, filehash)
return buf, CorruptError
}
// If we already have a block on disk under this identifier, return
// success (but check for MD5 collisions).
// The only errors that GetBlock can return are ErrCorrupt and ErrNotFound.
- // In either case, we want to write our new (good) block to disk, so there is
- // nothing special to do if err != nil.
+ // In either case, we want to write our new (good) block to disk,
+ // so there is nothing special to do if err != nil.
if oldblock, err := GetBlock(hash); err == nil {
if bytes.Compare(block, oldblock) == 0 {
return nil
log.Printf("IsValidLocator: %s\n", err)
return false
}
+
+// GetApiToken returns the OAuth token from the Authorization
+// header of a HTTP request, or an empty string if no matching
+// token is found.
+func GetApiToken(req *http.Request) string {
+ if auth, ok := req.Header["Authorization"]; ok {
+ if strings.HasPrefix(auth[0], "OAuth ") {
+ return auth[0][6:]
+ }
+ }
+ return ""
+}
+
+// IsExpired returns true if the given Unix timestamp (expressed as a
+// hexadecimal string) is in the past, or if timestamp_hex cannot be
+// parsed as a hexadecimal string.
+func IsExpired(timestamp_hex string) bool {
+ ts, err := strconv.ParseInt(timestamp_hex, 16, 0)
+ if err != nil {
+ log.Printf("IsExpired: %s\n", err)
+ return true
+ }
+ return time.Unix(ts, 0).Before(time.Now())
+}
match, err := regexp.MatchString(expected, index)
if err == nil {
if !match {
- t.Errorf("IndexLocators returned:\n-----\n%s-----\n", index)
+ t.Errorf("IndexLocators returned:\n%s", index)
}
} else {
t.Errorf("regexp.MatchString: %s", err)
// Cleanup to perform after each test.
//
func teardown() {
+ data_manager_token = ""
+ enforce_permissions = false
+ PermissionSecret = nil
KeepVM = nil
}
// key.
var PermissionSecret []byte
-// makePermSignature returns a string representing the signed permission
+// MakePermSignature returns a string representing the signed permission
// hint for the blob identified by blob_hash, api_token and expiration timestamp.
-func makePermSignature(blob_hash string, api_token string, expiry string) string {
+func MakePermSignature(blob_hash string, api_token string, expiry string) string {
hmac := hmac.New(sha1.New, PermissionSecret)
hmac.Write([]byte(blob_hash))
hmac.Write([]byte("@"))
// SignLocator takes a blob_locator, an api_token and an expiry time, and
// returns a signed locator string.
func SignLocator(blob_locator string, api_token string, expiry time.Time) string {
+ // If no permission secret or API token is available,
+ // return an unsigned locator.
+ if PermissionSecret == nil || api_token == "" {
+ return blob_locator
+ }
// Extract the hash from the blob locator, omitting any size hint that may be present.
blob_hash := strings.Split(blob_locator, "+")[0]
// Return the signed locator string.
timestamp_hex := fmt.Sprintf("%08x", expiry.Unix())
return blob_locator +
- "+A" + makePermSignature(blob_hash, api_token, timestamp_hex) +
+ "+A" + MakePermSignature(blob_hash, api_token, timestamp_hex) +
"@" + timestamp_hex
}