var container; // element that receives new content
var src; // url for retrieving content
var scrollHeight;
+ var spinner, colspan;
scrollHeight = scroller.scrollHeight || $('body')[0].scrollHeight;
if ($(scroller).scrollTop() + $(scroller).height()
>
return;
// Don't start another request until this one finishes
$(container).attr('data-infinite-content-href', null);
- $(container).append('<img src="/assets/ajax-loader.gif" class="infinite-scroller-spinner"></img>');
+ spinner = '<div class="spinner spinner-32px spinner-h-center"></div>';
+ if ($(container).is('table,tbody,thead,tfoot')) {
+ // Hack to determine how many columns a new tr should have
+ // in order to reach full width.
+ colspan = $(container).closest('table').
+ find('tr').eq(0).find('td,th').length;
+ if (colspan == 0)
+ colspan = '*';
+ spinner = ('<tr class="spinner"><td colspan="' + colspan + '">' +
+ spinner +
+ '</td></tr>');
+ }
+ $(container).append(spinner);
$.ajax(src,
{dataType: 'json',
type: 'GET',
data: {},
context: {container: container, src: src}}).
+ always(function() {
+ $(this.container).find(".spinner").detach();
+ }).
fail(function(jqxhr, status, error) {
if (jqxhr.readyState == 0 || jqxhr.status == 0) {
message = "Cancelled."
$(this.container).attr('data-infinite-content-href', this.src);
}).
done(function(data, status, jqxhr) {
- $(this.container).find(".infinite-scroller-spinner").detach();
$(this.container).append(data.content);
$(this.container).attr('data-infinite-content-href', data.next_page_href);
});
$('[data-infinite-scroller]').each(function() {
var $scroller = $($(this).attr('data-infinite-scroller'));
if (!$scroller.hasClass('smart-scroll') &&
- 'scroll' != $scroller.css('overflow-y'))
+ 'scroll' != $scroller.css('overflow-y'))
$scroller = $(window);
$scroller.
addClass('infinite-scroller').
var v = lines[a].match(re);
if (v != null) {
- var ts = new Date(Date.UTC(v[2], v[3], v[4], v[6], v[7], v[8]));
+ var ts = new Date(Date.UTC(v[2], v[3]-1, v[4], v[6], v[7], v[8]));
v11 = v[11];
if (typeof v[11] === 'undefined') {
taskid: v11,
node: node,
slot: slot,
- message: message,
+ message: message.replace(/&/g, '&').replace(/</g, '<').replace(/>/g, '>'),
type: type
});
count += 1;
prop('disabled', !any);
if ($this.hasClass('active')) {
- $(".modal-dialog-preview-pane").html('<img src="/assets/ajax-loader.gif"></img>');
+ $(".modal-dialog-preview-pane").html('<div class="spinner spinner-32px spinner-h-center spinner-v-center"></div>');
$.ajax($this.attr('data-preview-href'),
{dataType: "html"}).
done(function(data, status, jqxhr) {
opacity: 0;
}
+.spinner {
+ /* placeholder for stuff like $.find('.spinner').detach() */
+}
+
+.spinner-32px {
+ background-image: url('<%= asset_path('spinner_32px.gif') %>');
+ background-repeat: no-repeat;
+ width: 32px;
+ height: 32px;
+}
+
+.spinner-h-center {
+ margin-left: auto;
+ margin-right: auto;
+}
+
+.spinner-v-center {
+ position: relative;
+ top: 45%;
+}
+
.rotating {
color: #f00;
/* Chrome and Firefox, at least in Linux, render a horrible shaky
redirect_to @object
end
+ def arv_normalize mt, *opts
+ r = ""
+ IO.popen(['arv-normalize'] + opts, 'w+b') do |io|
+ io.write mt
+ io.close_write
+ while buf = io.read(2**16)
+ r += buf
+ end
+ end
+ r
+ end
+
expose_action :combine_selected_files_into_collection do
lst = []
files = []
params["selection"].each do |s|
- m = CollectionsHelper.match(s)
+ a = ArvadosBase::resource_class_for_uuid s
+ m = nil
+ if a == Link
+ begin
+ m = CollectionsHelper.match(Link.find(s).head_uuid)
+ rescue
+ end
+ else
+ m = CollectionsHelper.match(s)
+ end
+
if m and m[1] and m[2]
lst.append(m[1] + m[2])
files.append(m)
files.each do |m|
mt = chash[m[1]+m[2]].manifest_text
if m[4]
- IO.popen(['arv-normalize', '--extract', m[4][1..-1]], 'w+b') do |io|
- io.write mt
- io.close_write
- while buf = io.read(2**20)
- combined += buf
- end
- end
+ combined += arv_normalize mt, '--extract', m[4][1..-1]
else
combined += chash[m[1]+m[2]].manifest_text
end
end
- normalized = ''
- IO.popen(['arv-normalize'], 'w+b') do |io|
- io.write combined
- io.close_write
- while buf = io.read(2**20)
- normalized += buf
- end
- end
+ normalized = arv_normalize combined
+ normalized_stripped = arv_normalize combined, '--strip'
require 'digest/md5'
d = Digest::MD5.new()
- d << normalized
- newuuid = "#{d.hexdigest}+#{normalized.length}"
+ d << normalized_stripped
+ newuuid = "#{d.hexdigest}+#{normalized_stripped.length}"
env = Hash[ENV].
merge({
})
IO.popen([env, 'arv-put', '--raw'], 'w+b') do |io|
- io.write normalized
+ io.write normalized_stripped
io.close_write
- while buf = io.read(2**20)
-
+ while buf = io.read(2**16)
end
end
ERROR_ACTIONS = [:render_error, :render_not_found]
around_filter :thread_clear
- around_filter :thread_with_mandatory_api_token, except: ERROR_ACTIONS
- around_filter :thread_with_optional_api_token
+ around_filter :set_thread_api_token
+ # Methods that don't require login should
+ # skip_around_filter :require_thread_api_token
+ around_filter :require_thread_api_token, except: ERROR_ACTIONS
before_filter :check_user_agreements, except: ERROR_ACTIONS
before_filter :check_user_notifications, except: ERROR_ACTIONS
before_filter :find_object_by_uuid, except: [:index, :choose] + ERROR_ACTIONS
theme :select_theme
begin
- rescue_from Exception,
- :with => :render_exception
- rescue_from ActiveRecord::RecordNotFound,
- :with => :render_not_found
- rescue_from ActionController::RoutingError,
- :with => :render_not_found
- rescue_from ActionController::UnknownController,
- :with => :render_not_found
- rescue_from ::AbstractController::ActionNotFound,
- :with => :render_not_found
+ rescue_from(ActiveRecord::RecordNotFound,
+ ActionController::RoutingError,
+ ActionController::UnknownController,
+ AbstractController::ActionNotFound,
+ with: :render_not_found)
+ rescue_from(Exception,
+ ActionController::UrlGenerationError,
+ with: :render_exception)
end
def unprocessable(message=nil)
render_error status: 422
end
- def render_error(opts)
- opts = {status: 500}.merge opts
+ def render_error(opts={})
+ opts[:status] ||= 500
respond_to do |f|
# json must come before html here, so it gets used as the
# default format when js is requested by the client. This lets
# ajax:error callback parse the response correctly, even though
# the browser can't.
f.json { render opts.merge(json: {success: false, errors: @errors}) }
- f.html { render opts.merge(controller: 'application', action: 'error') }
+ f.html { render({action: 'error'}.merge(opts)) }
end
end
def render_exception(e)
logger.error e.inspect
logger.error e.backtrace.collect { |x| x + "\n" }.join('') if e.backtrace
- if @object.andand.errors.andand.full_messages.andand.any?
+ err_opts = {status: 422}
+ if e.is_a?(ArvadosApiClient::ApiError)
+ err_opts.merge!(action: 'api_error', locals: {api_error: e})
+ @errors = e.api_response[:errors]
+ elsif @object.andand.errors.andand.full_messages.andand.any?
@errors = @object.errors.full_messages
else
@errors = [e.to_s]
end
- self.render_error status: 422
+ # If the user has an active session, and the API server is available,
+ # make user information available on the error page.
+ begin
+ load_api_token(session[:arvados_api_token])
+ rescue ArvadosApiClient::ApiError
+ load_api_token(nil)
+ end
+ # Preload projects trees for the template. If that fails, set empty
+ # trees so error page rendering can proceed. (It's easier to rescue the
+ # exception here than in a template.)
+ begin
+ build_project_trees
+ rescue ArvadosApiClient::ApiError
+ @my_project_tree ||= []
+ @shared_project_tree ||= []
+ end
+ render_error(err_opts)
end
def render_not_found(e=ActionController::RoutingError.new("Path not found"))
logger.error e.inspect
@errors = ["Path not found"]
- self.render_error status: 404
+ set_thread_api_token do
+ self.render_error(action: '404', status: 404)
+ end
end
def find_objects_for_index
end
def current_user
- return Thread.current[:user] if Thread.current[:user]
-
- if Thread.current[:arvados_api_token]
- if session[:user]
- if session[:user][:is_active] != true
- Thread.current[:user] = User.current
- else
- Thread.current[:user] = User.new(session[:user])
- end
- else
- Thread.current[:user] = User.current
- end
- else
- logger.error "No API token in Thread"
- return nil
- end
+ Thread.current[:user]
end
def model_class
protected
+ def strip_token_from_path(path)
+ path.sub(/([\?&;])api_token=[^&;]*[&;]?/, '\1')
+ end
+
def redirect_to_login
respond_to do |f|
f.html {
if request.method.in? ['GET', 'HEAD']
- redirect_to arvados_api_client.arvados_login_url(return_to: request.url)
+ redirect_to arvados_api_client.arvados_login_url(return_to: strip_token_from_path(request.url))
else
flash[:error] = "Either you are not logged in, or your session has timed out. I can't automatically log you in and re-attempt this request."
redirect_to :back
[:arvados_api_token, :user].each do |key|
start_values[key] = Thread.current[key]
end
- Thread.current[:arvados_api_token] = api_token
- Thread.current[:user] = nil
+ load_api_token(api_token)
begin
yield
ensure
if params[:id] and params[:id].match /\D/
params[:uuid] = params.delete :id
end
- if not model_class
- @object = nil
- elsif params[:uuid].is_a? String
- if params[:uuid].empty?
+ begin
+ if not model_class
+ @object = nil
+ elsif not params[:uuid].is_a?(String)
+ @object = model_class.where(uuid: params[:uuid]).first
+ elsif params[:uuid].empty?
@object = nil
+ elsif (model_class != Link and
+ resource_class_for_uuid(params[:uuid]) == Link)
+ @name_link = Link.find(params[:uuid])
+ @object = model_class.find(@name_link.head_uuid)
else
- if (model_class != Link and
- resource_class_for_uuid(params[:uuid]) == Link)
- @name_link = Link.find(params[:uuid])
- @object = model_class.find(@name_link.head_uuid)
- else
- @object = model_class.find(params[:uuid])
- end
+ @object = model_class.find(params[:uuid])
end
- else
- @object = model_class.where(uuid: params[:uuid]).first
+ rescue ArvadosApiClient::NotFoundException, RuntimeError => error
+ if error.is_a?(RuntimeError) and (error.message !~ /^argument to find\(/)
+ raise
+ end
+ render_not_found(error)
+ return false
end
end
def thread_clear
- Thread.current[:arvados_api_token] = nil
- Thread.current[:user] = nil
+ load_api_token(nil)
Rails.cache.delete_matched(/^request_#{Thread.current.object_id}_/)
yield
Rails.cache.delete_matched(/^request_#{Thread.current.object_id}_/)
end
- def thread_with_api_token(login_optional = false)
+ # Set up the thread with the given API token and associated user object.
+ def load_api_token(new_token)
+ Thread.current[:arvados_api_token] = new_token
+ if new_token.nil?
+ Thread.current[:user] = nil
+ elsif (new_token == session[:arvados_api_token]) and
+ session[:user].andand[:is_active]
+ Thread.current[:user] = User.new(session[:user])
+ else
+ Thread.current[:user] = User.current
+ end
+ end
+
+ # If there's a valid api_token parameter, set up the session with that
+ # user's information. Return true if the method redirects the request
+ # (usually a post-login redirect); false otherwise.
+ def setup_user_session
+ return false unless params[:api_token]
+ Thread.current[:arvados_api_token] = params[:api_token]
begin
- try_redirect_to_login = true
- if params[:api_token]
- try_redirect_to_login = false
- Thread.current[:arvados_api_token] = params[:api_token]
- # Before copying the token into session[], do a simple API
- # call to verify its authenticity.
- if verify_api_token
- session[:arvados_api_token] = params[:api_token]
- u = User.current
- session[:user] = {
- uuid: u.uuid,
- email: u.email,
- first_name: u.first_name,
- last_name: u.last_name,
- is_active: u.is_active,
- is_admin: u.is_admin,
- prefs: u.prefs
- }
- if !request.format.json? and request.method.in? ['GET', 'HEAD']
- # Repeat this request with api_token in the (new) session
- # cookie instead of the query string. This prevents API
- # tokens from appearing in (and being inadvisedly copied
- # and pasted from) browser Location bars.
- redirect_to request.fullpath.sub(%r{([&\?]api_token=)[^&\?]*}, '')
- else
- yield
- end
- else
- @errors = ['Invalid API token']
- self.render_error status: 401
- end
- elsif session[:arvados_api_token]
- # In this case, the token must have already verified at some
- # point, but it might have been revoked since. We'll try
- # using it, and catch the exception if it doesn't work.
- try_redirect_to_login = false
- Thread.current[:arvados_api_token] = session[:arvados_api_token]
- begin
- yield
- rescue ArvadosApiClient::NotLoggedInException
- try_redirect_to_login = true
- end
+ user = User.current
+ rescue ArvadosApiClient::NotLoggedInException
+ false # We may redirect to login, or not, based on the current action.
+ else
+ session[:arvados_api_token] = params[:api_token]
+ session[:user] = {
+ uuid: user.uuid,
+ email: user.email,
+ first_name: user.first_name,
+ last_name: user.last_name,
+ is_active: user.is_active,
+ is_admin: user.is_admin,
+ prefs: user.prefs
+ }
+ if !request.format.json? and request.method.in? ['GET', 'HEAD']
+ # Repeat this request with api_token in the (new) session
+ # cookie instead of the query string. This prevents API
+ # tokens from appearing in (and being inadvisedly copied
+ # and pasted from) browser Location bars.
+ redirect_to strip_token_from_path(request.fullpath)
+ true
else
- logger.debug "No token received, session is #{session.inspect}"
- end
- if try_redirect_to_login
- unless login_optional
- redirect_to_login
- else
- # login is optional for this route so go on to the regular controller
- Thread.current[:arvados_api_token] = nil
- yield
- end
+ false
end
ensure
- # Remove token in case this Thread is used for anything else.
Thread.current[:arvados_api_token] = nil
end
end
- def thread_with_mandatory_api_token
- thread_with_api_token(true) do
- if Thread.current[:arvados_api_token]
- yield
- elsif session[:arvados_api_token]
- # Expired session. Clear it before refreshing login so that,
- # if this login procedure fails, we end up showing the "please
- # log in" page instead of getting stuck in a redirect loop.
- session.delete :arvados_api_token
- redirect_to_login
- else
- render 'users/welcome'
- end
+ # Save the session API token in thread-local storage, and yield.
+ # This method also takes care of session setup if the request
+ # provides a valid api_token parameter.
+ # If a token is unavailable or expired, the block is still run, with
+ # a nil token.
+ def set_thread_api_token
+ if Thread.current[:arvados_api_token]
+ yield # An API token has already been found - pass it through.
+ return
+ elsif setup_user_session
+ return # A new session was set up and received a response.
end
- end
- # This runs after thread_with_mandatory_api_token in the filter chain.
- def thread_with_optional_api_token
- if Thread.current[:arvados_api_token]
- # We are already inside thread_with_mandatory_api_token.
+ begin
+ load_api_token(session[:arvados_api_token])
yield
- else
- # We skipped thread_with_mandatory_api_token. Use the optional version.
- thread_with_api_token(true) do
+ rescue ArvadosApiClient::NotLoggedInException
+ # If we got this error with a token, it must've expired.
+ # Retry the request without a token.
+ unless Thread.current[:arvados_api_token].nil?
+ load_api_token(nil)
yield
end
+ ensure
+ # Remove token in case this Thread is used for anything else.
+ load_api_token(nil)
end
end
- def verify_api_token
- begin
- Link.where(uuid: 'just-verifying-my-api-token')
- true
- rescue ArvadosApiClient::NotLoggedInException
- false
+ # Reroute this request if an API token is unavailable.
+ def require_thread_api_token
+ if Thread.current[:arvados_api_token]
+ yield
+ elsif session[:arvados_api_token]
+ # Expired session. Clear it before refreshing login so that,
+ # if this login procedure fails, we end up showing the "please
+ # log in" page instead of getting stuck in a redirect loop.
+ session.delete :arvados_api_token
+ redirect_to_login
+ else
+ render 'users/welcome'
end
end
def get_n_objects_of_class dataclass, size
@objects_map_for ||= {}
- raise ArgumentError, 'Argument is not a data class' unless dataclass.is_a? Class
+ raise ArgumentError, 'Argument is not a data class' unless dataclass.is_a? Class and dataclass < ArvadosBase
raise ArgumentError, 'Argument is not a valid limit size' unless (size && size>0)
# if the objects_map_for has a value for this dataclass, and the
class CollectionsController < ApplicationController
- skip_around_filter(:thread_with_mandatory_api_token,
+ skip_around_filter(:require_thread_api_token,
only: [:show_file, :show_file_links])
skip_before_filter(:find_object_by_uuid,
only: [:provenance, :show_file, :show_file_links])
def show_file_links
Thread.current[:reader_tokens] = [params[:reader_token]]
- find_object_by_uuid
+ return if false.equal?(find_object_by_uuid)
render layout: false
end
# error we encounter, and return nil.
most_specific_error = [401]
token_list.each do |api_token|
- using_specific_api_token(api_token) do
- begin
+ begin
+ using_specific_api_token(api_token) do
yield
return api_token
- rescue ArvadosApiClient::NotLoggedInException => error
- status = 401
- rescue => error
- status = (error.message =~ /\[API: (\d+)\]$/) ? $1.to_i : nil
- raise unless [401, 403, 404].include?(status)
end
- if status >= most_specific_error.first
- most_specific_error = [status, error]
+ rescue ArvadosApiClient::ApiError => error
+ if error.api_status >= most_specific_error.first
+ most_specific_error = [error.api_status, error]
end
end
end
class SessionsController < ApplicationController
- skip_around_filter :thread_with_mandatory_api_token, :only => [:destroy, :index]
- skip_around_filter :thread_with_optional_api_token, :only => [:destroy, :index]
+ skip_around_filter :require_thread_api_token, :only => [:destroy, :index]
+ skip_around_filter :set_thread_api_token, :only => [:destroy, :index]
skip_before_filter :find_object_by_uuid, :only => [:destroy, :index]
def destroy
else
link_name = object_for_dataclass(resource_class, link_uuid).andand.friendly_link_name
end
- rescue RuntimeError
+ rescue ArvadosApiClient::NotFoundException
# If that lookup failed, the link will too. So don't make one.
return attrvalue
end
selectables = []
attrtext = attrvalue
- if dataclass and dataclass.is_a? Class
+ if dataclass.is_a? Class and dataclass < ArvadosBase
objects = get_n_objects_of_class dataclass, 10
objects.each do |item|
items << item
render opts.merge(partial: "application/#{partial}")
end
end
-
+
def fa_icon_class_for_object object
case object.class.to_s.to_sym
when :User
require 'thread'
class ArvadosApiClient
- class NotLoggedInException < StandardError
+ class ApiError < StandardError
+ attr_reader :api_response, :api_response_s, :api_status, :request_url
+
+ def initialize(request_url, errmsg)
+ @request_url = request_url
+ @api_response ||= {}
+ super(errmsg)
+ end
end
- class InvalidApiResponseException < StandardError
+
+ class NoApiResponseException < ApiError
+ def initialize(request_url, exception)
+ @api_response_s = exception.to_s
+ super(request_url,
+ "#{exception.class.to_s} error connecting to API server")
+ end
end
- class AccessForbiddenException < StandardError
+
+ class InvalidApiResponseException < ApiError
+ def initialize(request_url, api_response)
+ @api_status = api_response.status_code
+ @api_response_s = api_response.content
+ super(request_url, "Unparseable response from API server")
+ end
+ end
+
+ class ApiErrorResponseException < ApiError
+ def initialize(request_url, api_response)
+ @api_status = api_response.status_code
+ @api_response_s = api_response.content
+ @api_response = Oj.load(@api_response_s, :symbol_keys => true)
+ errors = @api_response[:errors]
+ if errors.respond_to?(:join)
+ errors = errors.join("\n\n")
+ else
+ errors = errors.to_s
+ end
+ super(request_url, "#{errors} [API: #{@api_status}]")
+ end
end
+ class AccessForbiddenException < ApiErrorResponseException; end
+ class NotFoundException < ApiErrorResponseException; end
+ class NotLoggedInException < ApiErrorResponseException; end
+
+ ERROR_CODE_CLASSES = {
+ 401 => NotLoggedInException,
+ 403 => AccessForbiddenException,
+ 404 => NotFoundException,
+ }
+
@@profiling_enabled = Rails.configuration.profiling_enabled
@@discovery = nil
profile_checkpoint { "Prepare request #{url} #{query[:uuid]} #{query[:where]} #{query[:filters]}" }
msg = @client_mtx.synchronize do
- @api_client.post(url,
- query,
- header: header)
+ begin
+ @api_client.post(url, query, header: header)
+ rescue => exception
+ raise NoApiResponseException.new(url, exception)
+ end
end
profile_checkpoint 'API transaction'
- if msg.status_code == 401
- raise NotLoggedInException.new
- end
-
- json = msg.content
-
begin
- resp = Oj.load(json, :symbol_keys => true)
+ resp = Oj.load(msg.content, :symbol_keys => true)
rescue Oj::ParseError
- raise InvalidApiResponseException.new json
+ resp = nil
end
if not resp.is_a? Hash
- raise InvalidApiResponseException.new json
- end
- if msg.status_code != 200
- errors = resp[:errors]
- errors = errors.join("\n\n") if errors.is_a? Array
- if msg.status_code == 403
- raise AccessForbiddenException.new "#{errors} [API: #{msg.status_code}]"
- else
- raise "#{errors} [API: #{msg.status_code}]"
- end
+ raise InvalidApiResponseException.new(url, msg)
+ elsif msg.status_code != 200
+ error_class = ERROR_CODE_CLASSES.fetch(msg.status_code, ApiError)
+ raise error_class.new(url, msg)
end
+
if resp[:_profile]
Rails.logger.info "API client: " \
"#{resp.delete(:_profile)[:request_time]} request_time"
end
def self.columns
- return @columns unless @columns.nil?
+ return @columns if @columns.andand.any?
@columns = []
@attribute_info ||= {}
schema = arvados_api_client.discovery[:schemas][self.to_s.to_sym]
--- /dev/null
+<%
+ if (controller.andand.action_name == 'show') and params[:uuid]
+ class_name = controller.model_class.to_s.underscore.humanize(capitalize: false)
+ req_item = safe_join([class_name, " with UUID ",
+ raw("<code>"), params[:uuid], raw("</code>")], "")
+ else
+ req_item = "page you requested"
+ end
+%>
+
+<h2>Not Found</h2>
+
+<p>The <%= req_item %> was not found.
+
+<% if class_name %>
+Perhaps you'd like to
+<%= link_to("browse all #{class_name.pluralize}", action: :index) %>?
+<% end %>
+
+</p>
+
--- /dev/null
+{"errors":<%= raw @errors.to_json %>}
\ No newline at end of file
attr('data-method', '<%= j params[:action_method] %>').
data('action-data', <%= raw params[:action_data] %>);
$(".chooser-show-project").on("click", function() {
- $("#choose-scroll").html("<%=j image_tag 'ajax-loader.gif' %>");
+ $("#choose-scroll").html("<div class=\"spinner spinner-32px spinner-h-center\"></div>");
$(".modal-dialog-preview-pane").html('');
var t = $(this);
var d = {
<%= render(partial: 'show_' + pane.downcase,
locals: { comparable: comparable, objects: @objects }) %>
<% else %>
- <%= image_tag 'ajax-loader.gif' %>
+ <div class="spinner spinner-32px spinner-h-center"></div>
<% end %>
</div>
</div>
-<%if object and (object.class.goes_in_projects? or (object.is_a?(Link) and ArvadosBase::resource_class_for_uuid(object.head_uuid).to_s == 'Collection')) %>
+<%if object and object.uuid and (object.class.goes_in_projects? or (object.is_a?(Link) and ArvadosBase::resource_class_for_uuid(object.head_uuid).to_s == 'Collection')) %>
<% fn = if defined? friendly_name
friendly_name
else
--- /dev/null
+<h2>Oh... fiddlesticks.</h2>
+
+<p>An error occurred when Workbench sent a request to the Arvados API server. Try reloading this page. If the problem is temporary, your request might go through next time.
+
+<% if not api_error %>
+</p>
+<% else %>
+If that doesn't work, the information below can help system administrators track down the problem.
+</p>
+
+<dl>
+ <dt>API request URL</dt>
+ <dd><code><%= api_error.request_url %></code></dd>
+
+ <% if api_error.api_response.empty? %>
+ <dt>Invalid API response</dt>
+ <dd><%= api_error.api_response_s %></dd>
+ <% else %>
+ <dt>API response</dt>
+ <dd><pre><%= Oj.dump(api_error.api_response, indent: 2) %></pre></dd>
+ <% end %>
+</dl>
+<% end %>
--- /dev/null
+{"errors":<%= raw @errors.to_json %>}
\ No newline at end of file
<% @name_links.each do |name_link| %>
- <% puts "looking up #{name_link.head_uuid}" %>
<% if (object = get_object(name_link.head_uuid)) %>
- <% puts "got #{object}" %>
<div class="row filterable selectable <%= 'multiple' if multiple %>" data-object-uuid="<%= name_link.uuid %>"
data-preview-href="<%= url_for object %>?tab_pane=chooser_preview"
style="margin-left: 1em; border-bottom-style: solid; border-bottom-width: 1px; border-bottom-color: #DDDDDD">
<% end %>
</td>
<td>
- <%= raw(distance_of_time_in_words(c.created_at, Time.now).sub('about ','~').sub(' ',' ')) if c.created_at %>
+ <%= c.created_at.to_s if c.created_at %>
</td>
<td>
<% current_state = @collection_info[c.uuid][:wanted_by_me] ? 'persistent' : 'cache' %>
<th></th>
<th>uuid</th>
<th>contents</th>
- <th>age</th>
+ <th>created at</th>
<th>storage</th>
<th>tags</th>
</tr>
<% logcollection = Collection.find @object.log %>
<% if logcollection %>
$.ajax('<%=j url_for logcollection %>/<%=j logcollection.files[0][1] %>').
- done(function(data, status, jqxhr) {
- logViewer.filter();
- addToLogViewer(logViewer, data.split("\n"), taskState);
- logViewer.filter(makeFilter());
- generateJobOverview("#log-viewer-overview", logViewer, taskState);
- $("#logloadspinner").detach();
- }).
- fail(function(jqxhr, status, error) {
- $("#logloadspinner").detach();
- });
+ done(function(data, status, jqxhr) {
+ logViewer.filter();
+ addToLogViewer(logViewer, data.split("\n"), taskState);
+ logViewer.filter(makeFilter());
+ generateJobOverview("#log-viewer-overview", logViewer, taskState);
+ $("#log-viewer .spinner").detach();
+ }).
+ fail(function(jqxhr, status, error) {
+ $("#log-viewer .spinner").detach();
+ });
<% end %>
<% else %>
<%# Live log loading not implemented yet. %>
</table>
<% if @object.log and logcollection %>
- <%= image_tag 'ajax-loader.gif', id: "logloadspinner" %>
+ <div class="spinner spinner-32px"></div>
<% end %>
</div>
</li>
-->
- <% if current_user %>
<li class="dropdown notification-menu">
<a href="#" class="dropdown-toggle" data-toggle="dropdown" id="notifications-menu">
<span class="badge badge-alert notification-count"><%= @notification_count %></span>
<% end %>
</ul>
</li>
- <% end %>
<li class="dropdown selection-menu">
<a href="#" class="dropdown-toggle" data-toggle="dropdown">
</a>
<ul class="dropdown-menu" role="menu">
<li role="presentation" class="dropdown-header">
- System tools
+ Settings
</li>
<li role="presentation"><a href="/repositories">
<i class="fa fa-lg fa-code-fork fa-fw"></i> Repositories
<% else %>
<li><a href="<%= arvados_api_client.arvados_login_url(return_to: root_url) %>">Log in</a></li>
<% end %>
+
+ <li class="dropdown help-menu">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="arv-help">
+ <span class="fa fa-lg fa-question-circle"></span>
+ </a>
+ <ul class="dropdown-menu">
+ <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> Tutorials and User guide'), "#{Rails.configuration.arvados_docsite}/user", target: "_blank" %></li>
+ <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> API Reference'), "#{Rails.configuration.arvados_docsite}/api", target: "_blank" %></li>
+ <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> SDK Reference'), "#{Rails.configuration.arvados_docsite}/sdk", target: "_blank" %></li>
+ </ul>
+ </li>
</ul>
</div><!-- /.navbar-collapse -->
</nav>
</th><th>
Owner
</th><th>
- Age
+ Created at
</th><th>
</th>
</tr>
</td><td>
<%= link_to_if_arvados_object ob.owner_uuid, friendly_name: true %>
</td><td>
- <%= distance_of_time_in_words(ob.created_at, Time.now) %>
+ <%= ob.created_at.to_s %>
</td><td>
<%= render partial: 'delete_object_button', locals: {object:ob} %>
</td>
<% content_for :tab_line_buttons do %>
- <%= form_tag '/pipeline_instances' do |f| %>
- <%= hidden_field :pipeline_instance, :pipeline_template_uuid, :value => @object.uuid %>
- <%= button_tag "Run this pipeline", {class: 'btn btn-primary pull-right', id: "run-pipeline-button"} %>
-<% end %>
+ <%= button_to(choose_projects_path(id: "run-pipeline-button",
+ title: 'Choose project',
+ editable: true,
+ action_name: 'Choose',
+ action_href: pipeline_instances_path,
+ action_method: 'post',
+ action_data: {selection_param: 'pipeline_instance[owner_uuid]',
+ 'pipeline_instance[pipeline_template_uuid]' => @object.uuid,
+ 'success' => 'redirect-to-created-object'
+ }.to_json),
+ { class: "btn btn-primary btn-sm", remote: true, method: 'get' }
+ ) do %>
+ Run this pipeline
+ <% end %>
<% end %>
<%= render partial: 'pipeline_instances/show_components_editable', locals: {editable: false} %>
<tr>
<td>
- <%= form_tag '/pipeline_instances' do |f| %>
- <%= hidden_field :pipeline_instance, :pipeline_template_uuid, :value => ob.uuid %>
- <%= button_tag nil, {class: "btn btn-default btn-xs", title: "Run #{ob.name}"} do %>
- Run <i class="fa fa-fw fa-play"></i>
- <% end %>
- <% end %>
+ <%= button_to(choose_projects_path(id: "run-pipeline-button",
+ title: 'Choose project',
+ editable: true,
+ action_name: 'Choose',
+ action_href: pipeline_instances_path,
+ action_method: 'post',
+ action_data: {selection_param: 'pipeline_instance[owner_uuid]',
+ 'pipeline_instance[pipeline_template_uuid]' => ob.uuid,
+ 'success' => 'redirect-to-created-object'
+ }.to_json),
+ { class: "btn btn-default btn-xs", title: "Run #{ob.name}", remote: true, method: 'get' }
+ ) do %>
+ <i class="fa fa-fw fa-play"></i> Run
+ <% end %>
</td>
<td>
<%= render :partial => "show_object_button", :locals => {object: ob, size: 'xs'} %>
<% rowtype = projectnode[:object].class %>
<% next if rowtype != Group and !show_root_node %>
<div class="<%= 'project' if rowtype == Group %> row">
- <div class="col-md-12" style="padding-left: <%= projectnode[:depth] - (show_root_node ? 0 : 1) %>em;">
+ <div class="col-md-4" style="padding-left: <%= projectnode[:depth] - (show_root_node ? 0 : 1) %>em;">
<% if show_root_node and rowtype == String %>
<i class="fa fa-fw fa-folder-open-o"></i>
<%= projectnode[:object] %>
<% end %>
<% elsif rowtype == Group %>
<i class="fa fa-fw fa-folder-o"></i>
- <% opts = {} %>
- <% opts[:title] = projectnode[:object].description %>
- <% opts[:'data-toggle'] = 'tooltip' %>
- <% opts[:'data-placement'] = 'bottom' %>
- <%= link_to projectnode[:object], opts do %>
+ <%= link_to projectnode[:object] do %>
<%= projectnode[:object].friendly_link_name %>
<% end %>
<% end %>
</div>
+ <% if not projectnode[:object].description.blank? %>
+ <div class="col-md-8 small"><%= projectnode[:object].description %></div>
+ <% end %>
</div>
<% end %>
</div>
<th>Script</th>
<th>Output</th>
<th>Log</th>
- <th>Age</th>
+ <th>Created at</th>
<th>Status</th>
<th>Progress</th>
</tr>
<td>
<small>
- <%= raw(distance_of_time_in_words(j.created_at, Time.now).sub('about ','~').sub(' ',' ')) if j.created_at %>
+ <%= j.created_at.to_s if j.created_at %>
</small>
</td>
<tr>
<th>Instance</th>
<th>Template</th>
- <th>Age</th>
+ <th>Created at</th>
<th>Status</th>
<th>Progress</th>
</tr>
<td>
<small>
- <%= raw(distance_of_time_in_words(p.created_at, Time.now).sub('about ','~').sub(' ',' ')) if p.created_at %>
+ <%= (p.created_at.to_s) if p.created_at %>
</small>
</td>
</td>
<td>
<small>
- <%= raw(distance_of_time_in_words(c.created_at, Time.now).sub('about ','~').sub(' ',' ')) if c.created_at %>
+ <%= c.created_at.to_s if c.created_at %>
</small>
</td>
<td>
assert users.size == 3, 'Expected two objects in the preloaded hash'
end
+ test "requesting a nonexistent object returns 404" do
+ # We're really testing ApplicationController's find_object_by_uuid.
+ # It's easiest to do that by instantiating a concrete controller.
+ @controller = NodesController.new
+ get(:show, {id: "zzzzz-zzzzz-zzzzzzzzzzzzzzz"}, session_for(:admin))
+ assert_response 404
+ end
end
require 'test_helper'
class CollectionsControllerTest < ActionController::TestCase
+ NONEXISTENT_COLLECTION = "ffffffffffffffffffffffffffffffff+0"
+
def collection_params(collection_name, file_name=nil)
uuid = api_fixture('collections')[collection_name.to_s]['uuid']
params = {uuid: uuid, id: uuid}
"when showing the user agreement.")
assert_response :success
end
+
+ test "requesting nonexistent Collection returns 404" do
+ show_collection({uuid: NONEXISTENT_COLLECTION, id: NONEXISTENT_COLLECTION},
+ :active, 404)
+ end
end
end
test "ignore previously valid token (for deleted user), don't crash" do
- get :welcome, {}, session_for(:valid_token_deleted_user)
+ get :activity, {}, session_for(:valid_token_deleted_user)
assert_response :redirect
assert_match /^#{Rails.configuration.arvados_login_base}/, @response.redirect_url
assert_nil assigns(:my_jobs)
--- /dev/null
+require 'integration_helper'
+
+class ErrorsTest < ActionDispatch::IntegrationTest
+ BAD_UUID = "ffffffffffffffffffffffffffffffff+0"
+
+ test "error page renders user navigation" do
+ visit(page_with_token("active", "/collections/#{BAD_UUID}"))
+ assert(page.has_text?(api_fixture("users")["active"]["email"]),
+ "User information missing from error page")
+ assert(page.has_no_text?(/log ?in/i),
+ "Logged in user prompted to log in on error page")
+ end
+
+ test "no user navigation with expired token" do
+ visit(page_with_token("expired", "/collections/#{BAD_UUID}"))
+ assert(page.has_no_text?(api_fixture("users")["active"]["email"]),
+ "Page visited with expired token included user information")
+ assert(page.has_selector?("a", text: /log ?in/i),
+ "Login prompt missing on expired token error page")
+ end
+
+ test "error page renders without login" do
+ visit "/collections/download/#{BAD_UUID}/#{@@API_AUTHS['active']['api_token']}"
+ assert(page.has_no_text?(/\b500\b/),
+ "Error page without login returned 500")
+ end
+
+ test "'object not found' page includes search link" do
+ visit(page_with_token("active", "/collections/#{BAD_UUID}"))
+ assert(all("a").any? { |a| a[:href] =~ %r{/collections/?(\?|$)} },
+ "no search link found on 404 page")
+ end
+
+ def now_timestamp
+ Time.now.utc.to_i
+ end
+
+ def page_has_error_token?(start_stamp)
+ matching_stamps = (start_stamp .. now_timestamp).to_a.join("|")
+ # Check the page HTML because we really don't care how it's presented.
+ # I think it would even be reasonable to put it in a comment.
+ page.html =~ /\b(#{matching_stamps})\+[0-9A-Fa-f]{8}\b/
+ end
+
+ # We use API tokens with limited scopes as the quickest way to get the API
+ # server to return an error. If Workbench gets smarter about coping when
+ # it has a too-limited token, these tests will need to be adjusted.
+ test "API error page includes error token" do
+ start_stamp = now_timestamp
+ visit(page_with_token("active_readonly", "/authorized_keys"))
+ click_on "Add a new authorized key"
+ assert(page.has_text?(/fiddlesticks/i),
+ "Not on an error page after making an SSH key out of scope")
+ assert(page_has_error_token?(start_stamp), "no error token on 404 page")
+ end
+
+ test "showing a bad UUID returns 404" do
+ visit(page_with_token("active", "/pipeline_templates/zzz"))
+ assert(page.has_no_text?(/fiddlesticks/i),
+ "trying to show a bad UUID rendered a fiddlesticks page, not 404")
+ end
+
+ test "404 page includes information about missing object" do
+ visit(page_with_token("active", "/groups/zazazaz"))
+ assert(page.has_text?(/group with UUID zazazaz/i),
+ "name of searched group missing from 404 page")
+ end
+
+ test "unrouted 404 page works" do
+ visit(page_with_token("active", "/__asdf/ghjk/zxcv"))
+ assert(page.has_text?(/not found/i),
+ "unrouted page missing 404 text")
+ assert(page.has_no_text?(/fiddlesticks/i),
+ "unrouted request returned a generic error page, not 404")
+ end
+end
assert_no_match(/\bapi_token=/, current_path)
end
- test "can't use expired token" do
+ test "trying to use expired token redirects to login page" do
visit page_with_token('expired_trustedclient')
- assert page.has_text? 'Log in'
- end
-
- test "expired token yields login page, not error page" do
- visit page_with_token('expired_trustedclient')
- # Even the error page has a "Log in" link. We should look for
- # something that only appears the real login page.
- assert page.has_text? ' Log in Oh... fiddlesticks. Sorry, I had some trouble handling your request'
+ buttons = all("a.btn", text: /Log in/)
+ assert_equal(1, buttons.size, "Failed to find one login button")
+ login_link = buttons.first[:href]
+ assert_match(%r{//[^/]+/login}, login_link)
+ assert_no_match(/\bapi_token=/, login_link)
end
end
find('a,button', text: 'Run').click
end
+ # project chooser
+ within('.modal-dialog') do
+ find('.selectable', text: 'A Project').click
+ find('button', text: 'Choose').click
+ end
+
# This pipeline needs input. So, Run should be disabled
page.assert_selector 'a.disabled,button.disabled', text: 'Run'
instance_page = current_path
- # put this pipeline instance in "A Project"
- find('button', text: 'Choose a project...').click
- within('.modal-dialog') do
- find('.selectable', text: 'A Project').click
- find('button', text: 'Move').click
- end
-
# Go over to the collections page and select something
visit '/collections'
within('tr', text: 'GNU_General_Public_License') do
within('.modal-dialog') do
find('.btn', text: 'Add').click
end
-
+
find('tr[data-kind="arvados#pipelineInstance"]', text: 'New pipeline instance').
find('a', text: 'Show').
click
click
within('.modal-dialog') do
- find('span', text: 'foo_tag').click
+ first('span', text: 'foo_tag').click
find('button', text: 'OK').click
end
click
within('.modal-dialog') do
- find('span', text: 'foo_tag').click
+ first('span', text: 'foo_tag').click
find('button', text: 'OK').click
end
end
end
- @@screenshot_count = 0
+ @@screenshot_count = 1
def screenshot
- image_file = "./tmp/workbench-fail-#{@@screenshot_count += 1}.png"
- page.save_screenshot image_file
- puts "Saved #{image_file}"
+ image_file = "./tmp/workbench-fail-#{@@screenshot_count}.png"
+ begin
+ page.save_screenshot image_file
+ rescue Capybara::NotSupportedByDriverError
+ # C'est la vie.
+ else
+ puts "Saved #{image_file}"
+ @@screenshot_count += 1
+ end
end
teardown do
#!/usr/bin/env python
import arvados
+import md5
+import subst
+import subprocess
+import os
+import hashlib
-inputs = arvados.current_job()['script_parameters']['input']
-if not isinstance(inputs, (list,tuple)):
- inputs = [inputs]
+p = arvados.current_job()['script_parameters']
-out_manifest = ''
-for locator in inputs:
- out_manifest += arvados.CollectionReader(locator).manifest_text()
+merged = ""
+src = []
+for c in p["input"]:
+ c = subst.do_substitution(p, c)
+ i = c.find('/')
+ if i == -1:
+ src.append(c)
+ merged += arvados.CollectionReader(c).manifest_text()
+ else:
+ src.append(c[0:i])
+ cr = arvados.CollectionReader(c[0:i])
+ j = c.rfind('/')
+ stream = c[i+1:j]
+ if stream == "":
+ stream = "."
+ fn = c[(j+1):]
+ for s in cr.all_streams():
+ if s.name() == stream:
+ if fn in s.files():
+ merged += s.files()[fn].as_manifest()
-arvados.current_task().set_output(arvados.Keep.put(out_manifest))
+crm = arvados.CollectionReader(merged)
+
+combined = crm.manifest_text(strip=True)
+
+m = hashlib.new('md5')
+m.update(combined)
+
+uuid = "{}+{}".format(m.hexdigest(), len(combined))
+
+collection = arvados.api().collections().create(
+ body={
+ 'uuid': uuid,
+ 'manifest_text': crm.manifest_text(),
+ }).execute()
+
+for s in src:
+ l = arvados.api().links().create(body={
+ "link": {
+ "tail_uuid": s,
+ "head_uuid": uuid,
+ "link_class": "provenance",
+ "name": "provided"
+ }}).execute()
+
+arvados.current_task().set_output(uuid)
--- /dev/null
+#!/usr/bin/env python
+
+import arvados
+import re
+import os
+import subprocess
+import sys
+import shutil
+import subst
+
+os.umask(0077)
+
+t = arvados.current_task().tmpdir
+
+os.chdir(arvados.current_task().tmpdir)
+os.mkdir("tmpdir")
+os.mkdir("output")
+
+os.chdir("output")
+
+if len(arvados.current_task()['parameters']) > 0:
+ p = arvados.current_task()['parameters']
+else:
+ p = arvados.current_job()['script_parameters']
+
+links = []
+
+def sub_link(v):
+ r = os.path.basename(v)
+ os.symlink(os.path.join(os.environ['TASK_KEEPMOUNT'], v) , r)
+ links.append(r)
+ return r
+
+def sub_tmpdir(v):
+ return os.path.join(arvados.current_task().tmpdir, 'tmpdir')
+
+subst.default_subs["link "] = sub_link
+subst.default_subs["tmpdir"] = sub_tmpdir
+
+rcode = 1
+
+try:
+ cmd = []
+ for c in p["command"]:
+ cmd.append(subst.do_substitution(p, c))
+
+ stdoutname = None
+ stdoutfile = None
+ if "stdout" in p:
+ stdoutname = subst.do_substitution(p, p["stdout"])
+ stdoutfile = open(stdoutname, "wb")
+
+ print("Running command: {}{}".format(' '.join(cmd), (" > " + stdoutname) if stdoutname != None else ""))
+
+ rcode = subprocess.call(cmd, stdout=stdoutfile)
+
+except Exception as e:
+ print("Caught exception {}".format(e))
+
+finally:
+ for l in links:
+ os.unlink(l)
+
+ out = arvados.CollectionWriter()
+ out.write_directory_tree(".", max_manifest_depth=0)
+ arvados.current_task().set_output(out.finish())
+
+if rcode == 0:
+ os.chdir("..")
+ shutil.rmtree("tmpdir")
+ shutil.rmtree("output")
+
+sys.exit(rcode)
--- /dev/null
+import os
+import glob
+
+def search(c):
+ DEFAULT = 0
+ DOLLAR = 1
+
+ i = 0
+ state = DEFAULT
+ start = None
+ depth = 0
+ while i < len(c):
+ if c[i] == '\\':
+ i += 1
+ elif state == DEFAULT:
+ if c[i] == '$':
+ state = DOLLAR
+ if depth == 0:
+ start = i
+ elif c[i] == ')':
+ if depth == 1:
+ return [start, i]
+ if depth > 0:
+ depth -= 1
+ elif state == DOLLAR:
+ if c[i] == '(':
+ depth += 1
+ state = DEFAULT
+ i += 1
+ if depth != 0:
+ raise Exception("Substitution error, mismatched parentheses {}".format(c))
+ return None
+
+def sub_file(v):
+ return os.path.join(os.environ['TASK_KEEPMOUNT'], v)
+
+def sub_dir(v):
+ d = os.path.dirname(v)
+ if d == '':
+ d = v
+ return os.path.join(os.environ['TASK_KEEPMOUNT'], d)
+
+def sub_basename(v):
+ return os.path.splitext(os.path.basename(v))[0]
+
+def sub_glob(v):
+ return glob.glob(v)[0]
+
+default_subs = {"file ": sub_file,
+ "dir ": sub_dir,
+ "basename ": sub_basename,
+ "glob ": sub_glob}
+
+def do_substitution(p, c, subs=default_subs):
+ while True:
+ #print("c is", c)
+ m = search(c)
+ if m != None:
+ v = do_substitution(p, c[m[0]+2 : m[1]])
+ var = True
+ for sub in subs:
+ if v.startswith(sub):
+ r = subs[sub](v[len(sub):])
+ var = False
+ break
+ if var:
+ r = p[v]
+
+ c = c[:m[0]] + r + c[m[1]+1:]
+ else:
+ return c
Dir.chdir(".site") do
`which linkchecker`
if $? == 0
- system "linkchecker index.html --ignore-url='!file://'"
+ system "linkchecker index.html --ignore-url='!file://'" or exit $?.exitstatus
else
puts "Warning: linkchecker not found, skipping run".colorize(:light_red)
end
- admin/index.html.md.liquid
- admin/cheat_sheet.html.textile.liquid
installguide:
- - Install:
+ - Overview:
- install/index.html.textile.liquid
+ - Docker:
+ - install/install-docker.html.textile.liquid
+ - Manual installation:
- install/install-sso.html.textile.liquid
- install/install-api-server.html.textile.liquid
- install/install-workbench-app.html.textile.liquid
--- /dev/null
+<div class="alert alert-block alert-info">
+ <button type="button" class="close" data-dismiss="alert">×</button>
+ <p>The Docker installation is not feature complete. We do not have a Docker container yet for crunch-dispatch and the arvados compute nodes. This means that running pipelines from a Docker-based Arvados install is currently not supported without additional manual configuration. Without that manual configuration, it is possible to use arv-crunch-job to run a 'local' job against your Docker-based Arvados installation. To do this, please refer to the "Debugging a Crunch script":{{site.baseurl}}/user/topics/tutorial-job-debug.html page.</p>
+</div>
}
</pre></notextile>
+The same behavior, using filters:
+
+<notextile><pre>
+{
+ "job": {
+ "script": "hash.py",
+ "repository": "<b>you</b>",
+ "script_version": "master",
+ "script_parameters": {
+ "input": "c1bad4b39ca5a924e481008009d94e32+210"
+ }
+ },
+ "filters": [["script", "=", "hash.py"],
+ ["repository", "=", "<b>you</b>"],
+ ["script_version", "in git", "earlier_version_tag"],
+ ["script_version", "not in git", "blacklisted_version_tag"]],
+ "find_or_create": true
+}
+</pre></notextile>
+
Run the script "crunch_scripts/monte-carlo.py" in the repository "you" using the current "master" commit. Because it is marked as "nondeterministic", this job will not be considered as a suitable candidate for future job submissions that use the "find_or_create" feature.
<notextile><pre>
---
layout: default
navsection: installguide
-title: Overview
+title: Installation overview
...
-{% include 'alert_stub' %}
+Arvados can be installed in multiple ways. Arvados does not depend on any particular cloud operating stack. Arvados runs on one or more GNU/Linux system(s). Arvados is being developed on Debian and Ubuntu GNU/Linux.
+
+The simplest way to try out Arvados is to use the Docker-based installation, which installs Arvados in a series of Docker containers.
+
+For larger scale installations, a manual installation is more appropriate.
+
+h2. Docker
-h2. Installation Overview
+{% include 'alert_docker' %}
+
+"Installing with Docker":install-docker.html
+
+h2. Manual installation
+
+{% include 'alert_stub' %}
# Set up a cluster, or use Amazon
# Create and mount Keep volumes
--- /dev/null
+---
+layout: default
+navsection: installguide
+title: Installing with Docker
+...
+
+{% include 'alert_docker' %}
+
+h2. Prerequisites:
+
+# A GNU/Linux (virtual) machine
+# A working Docker installation
+
+h2. Download the source tree
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
+~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
+</code></pre></notextile>
+
+See also: "Downloading the source code":https://arvados.org/projects/arvados/wiki/Download on the Arvados wiki.
+
+h2. Building the Arvados Docker containers
+
+First we need a suitable @config.yml@ file.
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd arvados/docker</span>
+~$ <span class="userinput">cp config.yml.example config.yml</span>
+</code></pre></notextile>
+
+Now it's time to edit the @config.yml@ file and fill in suitable values for at a minimum these parameters:
+
+<pre>
+PUBLIC_KEY_PATH
+API_HOSTNAME
+API_AUTO_ADMIN_USER
+</pre>
+
+h2. Running the Arvados Docker containers
+
+The @arvdock@ command can be used to start and stop the docker containers. It has a number of options:
+
+<notextile>
+<pre><code>
+~$ <span class="userinput">./arvdock -h</span>
+
+usage: ./arvdock (start|stop|restart|test) [options]
+
+./arvdock start/stop/restart options:
+ -d [port], --doc[=port] Documentation server (default port 9898)
+ -w [port], --workbench[=port] Workbench server (default port 9899)
+ -s [port], --sso[=port] SSO server (default port 9901)
+ -a [port], --api[=port] API server (default port 9900)
+ -k, --keep Keep servers
+ --ssh Enable SSH access to server containers
+ -h, --help Display this help and exit
+
+ If no options are given, the action is applied to all servers.
+
+./arvdock test [testname] [testname] ...
+ By default, all tests are run.
+</code>
+</pre>
+</notextile>
+
+The @--ssh@ option can be useful to debug issues with the Docker containers; it allows you to ssh into the running containers as the @root@ user, provided you have access to the private key that matches the public key specified in @config.yml@'s PUBLIC_KEY_PATH variable.
+
+Start the docker containers:
+
+<notextile>
+<pre><code>
+~$ <span class="userinput">./arvdock start</span>
+sso_server
+Starting container:
+ /usr/bin/docker.io run -d -i -t -p 9901:443 --name sso_server arvados/sso
+api_server
+Starting container:
+ /usr/bin/docker.io run -d -i -t -p 9900:443 --name api_server --link sso_server:sso arvados/api
+keep_server_0
+Starting container:
+ /usr/bin/docker.io run -d -i -t -p 25107:25107 --name keep_server_0 -v /tmp/tmp.aCSx8Pq6Wb:/dev/keep-0 --link api_server:api arvados/keep
+keep_server_1
+Starting container:
+ /usr/bin/docker.io run -d -i -t -p 25108:25107 --name keep_server_1 -v /tmp/tmp.m4OQ9WB73G:/dev/keep-0 --link api_server:api arvados/keep
+doc_server
+Starting container:
+ /usr/bin/docker.io run -d -i -t -p 9898:80 --name doc_server arvados/doc
+
+*****************************************************************
+You can access the Arvados documentation at http://localhost:9898
+*****************************************************************
+
+workbench_server
+Starting container:
+ /usr/bin/docker.io run -d -i -t -p 9899:80 --name workbench_server --link api_server:api arvados/workbench
+
+*****************************************************************
+You can access the Arvados workbench at http://localhost:9899
+*****************************************************************
+</code></pre></notextile>
+
+h2. Accessing workbench
+
+Point your browser to the Dockerized workbench:
+
+<notextile>
+<pre><code><span class="userinput">https://localhost:9899</span>
+</code></pre>
+</notextile>
+
+Now use the google account you specified as @API_AUTO_ADMIN_USER@ in @config.yml@ to log in. You will be prompted *twice* by your browser that you are accessing a site with an untrusted SSL certificate. This is normal; by default the Arvados Docker installation uses self-signed SSL certificates for the SSO and API servers, respectively.
+
+
# Install generated config files
ADD generated/database.yml /usr/src/arvados/services/api/config/database.yml
-ADD generated/secret_token.rb /usr/src/arvados/services/api/config/initializers/secret_token.rb
ADD generated/omniauth.rb /usr/src/arvados/services/api/config/initializers/omniauth.rb
-ADD generated/production.rb /usr/src/arvados/services/api/config/environments/production.rb
+RUN /bin/cp /usr/src/arvados/services/api/config/environments/production.rb.example /usr/src/arvados/services/api/config/environments/production.rb
+ADD generated/application.yml /usr/src/arvados/services/api/config/application.yml
ADD generated/apache2_vhost /etc/apache2/sites-available/arvados
# Configure Rails databases.
--- /dev/null
+# Copy this file to application.yml and edit to suit.
+#
+# Consult application.default.yml for the full list of configuration
+# settings.
+#
+# The order of precedence is:
+# 1. config/environments/{RAILS_ENV}.rb (deprecated)
+# 2. Section in application.yml corresponding to RAILS_ENV (e.g., development)
+# 3. Section in application.yml called "common"
+# 4. Section in application.default.yml corresponding to RAILS_ENV
+# 5. Section in application.default.yml called "common"
+
+development:
+ # The blob_signing_key is a string of alphanumeric characters used
+ # to sign permission hints for Keep locators. It must be identical
+ # to the permission key given to Keep. If you run both apiserver
+ # and Keep in development, change this to a hardcoded string and
+ # make sure both systems use the same value.
+ blob_signing_key: ~
+
+production:
+ # At minimum, you need a nice long randomly generated secret_token here.
+ # Use a long string of alphanumeric characters (at least 36).
+ secret_token: @@API_SECRET@@
+
+ # blob_signing_key is required and must be identical to the
+ # permission secret provisioned to Keep.
+ # Use a long string of alphanumeric characters (at least 36).
+ blob_signing_key: @@KEEP_SIGNING_SECRET@@
+
+ uuid_prefix: @@API_HOSTNAME@@
+
+ # The e-mail address of the user you would like to become marked as an admin
+ # user on their first login.
+ # In the default configuration, authentication happens through the Arvados SSO
+ # server, which uses openid against Google's servers, so in that case this
+ # should be an address associated with a Google account.
+ auto_admin_user: @@API_AUTO_ADMIN_USER@@
+
+ # compute_node_domain: example.org
+ # compute_node_nameservers:
+ # - 127.0.0.1
+ # - 192.168.1.1
+ #
+ # The version below is suitable for AWS.
+ # Uncomment and change <%# to <%= to use it.
+ # compute_node_nameservers: <%#
+ # require 'net/http'
+ # ['local', 'public'].collect do |iface|
+ # Net::HTTP.get(URI("http://169.254.169.254/latest/meta-data/#{iface}-ipv4")).match(/^[\d\.]+$/)[0]
+ # end << '172.16.0.23'
+ # %>
+
+test:
+ uuid_prefix: zzzzz
+ secret_token: <%= rand(2**512).to_s(36) %>
+
+common:
+ #git_repositories_dir: /var/cache/git
+ #git_internal_dir: /var/cache/arvados/internal.git
+
-LoadModule passenger_module /usr/local/rvm/gems/ruby-2.1.0/gems/passenger-4.0.23/buildout/apache2/mod_passenger.so
-PassengerRoot /usr/local/rvm/gems/ruby-2.1.0/gems/passenger-4.0.23
+LoadModule passenger_module /usr/local/rvm/gems/ruby-2.1.0/gems/passenger-4.0.41/buildout/apache2/mod_passenger.so
+PassengerRoot /usr/local/rvm/gems/ruby-2.1.0/gems/passenger-4.0.41
PassengerDefaultRuby /usr/local/rvm/wrappers/ruby-2.1.0/ruby
+++ /dev/null
-Server::Application.configure do
- # Settings specified here will take precedence over those in config/application.rb
-
- # Code is not reloaded between requests
- config.cache_classes = true
-
- # Full error reports are disabled and caching is turned on
- config.consider_all_requests_local = false
- config.action_controller.perform_caching = true
-
- # Disable Rails's static asset server (Apache or nginx will already do this)
- config.serve_static_assets = false
-
- # Compress JavaScripts and CSS
- config.assets.compress = true
-
- # Don't fallback to assets pipeline if a precompiled asset is missed
- config.assets.compile = false
-
- # Generate digests for assets URLs
- config.assets.digest = true
-
- # Defaults to Rails.root.join("public/assets")
- # config.assets.manifest = YOUR_PATH
-
- # Specifies the header that your server uses for sending files
- # config.action_dispatch.x_sendfile_header = "X-Sendfile" # for apache
- # config.action_dispatch.x_sendfile_header = 'X-Accel-Redirect' # for nginx
-
- # Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies.
- # config.force_ssl = true
-
- # See everything in the log (default is :info)
- # config.log_level = :debug
-
- # Use a different logger for distributed setups
- # config.logger = SyslogLogger.new
-
- # Use a different cache store in production
- # config.cache_store = :mem_cache_store
-
- # Enable serving of images, stylesheets, and JavaScripts from an asset server
- # config.action_controller.asset_host = "http://assets.example.com"
-
- # Precompile additional assets (application.js, application.css, and all non-JS/CSS are already added)
- # config.assets.precompile += %w( search.js )
-
- # Disable delivery errors, bad email addresses will be ignored
- # config.action_mailer.raise_delivery_errors = false
- config.action_mailer.raise_delivery_errors = true
- config.action_mailer.perform_deliveries = true
-
- # Enable threaded mode
- # config.threadsafe!
-
- # Enable locale fallbacks for I18n (makes lookups for any locale fall back to
- # the I18n.default_locale when a translation can not be found)
- config.i18n.fallbacks = true
-
- # Send deprecation notices to registered listeners
- config.active_support.deprecation = :notify
-
- config.git_repositories_dir = '/var/cache/git'
-
- config.crunch_job_wrapper = :slurm_immediate
- config.crunch_job_user = 'crunch' # if false, do not set uid when running jobs
-
- # The web service must be able to create/write this file, and
- # crunch-job must be able to stat() it.
- config.crunch_refresh_trigger = '/tmp/crunch_refresh_trigger'
-
- # config.dnsmasq_conf_dir = '/etc/dnsmasq.d'
-
- # config.compute_node_ami = 'ami-cbca41a2'
- # config.compute_node_ec2run_args = '-g arvados-compute'
- # config.compute_node_spot_bid = 0.11
-
- # config.compute_node_domain = `hostname --domain`.strip
-
- # config.compute_node_nameservers = ['1.2.3.4', '1.2.3.5']
- require 'net/http'
- config.compute_node_nameservers = [ '@@ARVADOS_DNS_SERVER@@' ]
- config.compute_node_domain = false
- config.uuid_prefix = '@@API_HOSTNAME@@'
-
- # Authentication stub: hard code pre-approved API tokens.
- # config.accept_api_token = { rand(2**256).to_s(36) => true }
- config.accept_api_token = {}
-
- config.new_users_are_active = false
- config.admin_notifier_email_from = 'arvados@example.com'
- config.email_subject_prefix = '[ARVADOS] '
-
- # The e-mail address of the user you would like to become marked as an admin
- # user on their first login.
- # In the default configuration, authentication happens through the Arvados SSO
- # server, which uses openid against Google's servers, so in that case this
- # should be an address associated with a Google account.
- config.auto_admin_user = '@@API_AUTO_ADMIN_USER@@'
-end
+++ /dev/null
-# Be sure to restart your server when you modify this file.
-
-# Your secret key for verifying the integrity of signed cookies.
-# If you change this key, all old signed cookies will become invalid!
-# Make sure the secret is at least 30 characters and all random,
-# no regular words or you'll be exposed to dictionary attacks.
-Server::Application.config.secret_token = '@@API_SECRET@@'
-
-# The blob_signing_key is a string of alphanumeric characters used
-# to sign permission hints for Keep locators. It must be identical
-# to the permission key given to Keep.
-Server::Application.config.blob_signing_key = '@@KEEP_SIGNING_SECRET@@'
#!/bin/bash
ENABLE_SSH=false
+DOCKER=`which docker.io`
+
+if [[ "$DOCKER" == "" ]]; then
+ DOCKER=`which docker`
+fi
function usage {
echo >&2
function ip_address {
local container=$1
- echo `docker inspect $container |grep IPAddress |cut -f4 -d\"`
+ echo `$DOCKER inspect $container |grep IPAddress |cut -f4 -d\"`
}
function start_container {
args="$args -e ENABLE_SSH=$ENABLE_SSH"
fi
- `docker ps |grep -P "$name[^/]" -q`
+ `$DOCKER ps |grep -P "$name[^/]" -q`
if [[ "$?" == "0" ]]; then
echo "You have a running container with name $name -- skipping."
return
fi
# Remove any existing container by this name.
- docker rm "$name" 2>/dev/null
+ $DOCKER rm "$name" 2>/dev/null
echo "Starting container:"
- echo " docker run $args $image"
- container=`docker run $args $image`
+ echo " $DOCKER run $args $image"
+ container=`$DOCKER run $args $image`
if [[ "$?" != "0" ]]; then
echo "Unable to start container"
exit 1
echo " ssh root@$ip"
echo
fi
+
+ if [[ "$name" == "doc_server" ]]; then
+ echo
+ echo "*****************************************************************"
+ echo "You can access the Arvados documentation at http://localhost:${port%:*}"
+ echo "*****************************************************************"
+ echo
+ fi
+
+ if [[ "$name" == "workbench_server" ]]; then
+ echo
+ echo "*****************************************************************"
+ echo "You can access the Arvados workbench at http://localhost:${port%:*}"
+ echo "*****************************************************************"
+ echo
+ fi
+
+
}
declare -a keep_volumes
# Mount a keep volume if we don't already have one
for mountpoint in $(cut -d ' ' -f 2 /proc/mounts); do
if [[ -d "$mountpoint/keep" && "$mountpoint" != "/" ]]; then
- keep_volumes+=($mountpoint)
+ keep_volumes+=($mountpoint)
fi
done
# Create any keep volumes that do not yet exist.
while [ ${#keep_volumes[*]} -lt 2 ]
do
- new_keep=$(mktemp -d)
- echo >&2 "mounting 512M tmpfs keep volume in $new_keep"
- sudo mount -t tmpfs -o size=512M tmpfs $new_keep
- mkdir $new_keep/keep
- keep_volumes+=($new_keep)
+ new_keep=$(mktemp -d)
+ echo >&2 "mounting 512M tmpfs keep volume in $new_keep"
+ sudo mount -t tmpfs -o size=512M tmpfs $new_keep
+ mkdir $new_keep/keep
+ keep_volumes+=($new_keep)
done
}
while [ $# -ge 1 ]
do
case $1 in
- -d | --doc)
- case "$2" in
- "") start_doc=9898; shift 2 ;;
- *) start_doc=$2; shift 2 ;;
- esac
- ;;
- -s | --sso)
- case "$2" in
- "") start_sso=9901; shift 2 ;;
- *) start_sso=$2; shift 2 ;;
- esac
- ;;
- -a | --api)
- case "$2" in
- "") start_api=9900; shift 2 ;;
- *) start_api=$2; shift 2 ;;
- esac
- ;;
- -w | --workbench)
- case "$2" in
- "") start_workbench=9899; shift 2 ;;
- *) start_workbench=$2; shift 2 ;;
- esac
- ;;
- -k | --keep )
- start_keep=true
- shift
- ;;
- --ssh)
- # ENABLE_SSH is a global variable
- ENABLE_SSH=true
- shift
- ;;
- --)
- shift
- break
- ;;
- *)
- usage
- exit 1
- ;;
- esac
+ -d | --doc)
+ case "$2" in
+ "") start_doc=9898; shift 2 ;;
+ *) start_doc=$2; shift 2 ;;
+ esac
+ ;;
+ -s | --sso)
+ case "$2" in
+ "") start_sso=9901; shift 2 ;;
+ *) start_sso=$2; shift 2 ;;
+ esac
+ ;;
+ -a | --api)
+ case "$2" in
+ "") start_api=9900; shift 2 ;;
+ *) start_api=$2; shift 2 ;;
+ esac
+ ;;
+ -w | --workbench)
+ case "$2" in
+ "") start_workbench=9899; shift 2 ;;
+ *) start_workbench=$2; shift 2 ;;
+ esac
+ ;;
+ -k | --keep )
+ start_keep=true
+ shift
+ ;;
+ --ssh)
+ # ENABLE_SSH is a global variable
+ ENABLE_SSH=true
+ shift
+ ;;
+ --)
+ shift
+ break
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
done
# If no options were selected, then start all servers.
if [[ $start_doc == false &&
- $start_sso == false &&
- $start_api == false &&
- $start_workbench == false &&
- $start_keep == false ]]
+ $start_sso == false &&
+ $start_api == false &&
+ $start_workbench == false &&
+ $start_keep == false ]]
then
- start_doc=9898
- start_sso=9901
- start_api=9900
- start_workbench=9899
- start_keep=true
+ start_doc=9898
+ start_sso=9901
+ start_api=9900
+ start_workbench=9899
+ start_keep=true
fi
- if [[ $start_doc != false ]]
+ if [[ $start_sso != false ]]
then
- start_container "9898:80" "doc_server" '' '' "arvados/doc"
+ start_container "$start_sso:443" "sso_server" '' '' "arvados/sso"
fi
- if [[ $start_sso != false ]]
+ if [[ $start_api != false ]]
then
- start_container "9901:443" "sso_server" '' '' "arvados/sso"
+ start_container "$start_api:443" "api_server" '' "sso_server:sso" "arvados/api"
fi
- if [[ $start_api != false ]]
+ if [[ $start_keep != false ]]
then
- start_container "9900:443" "api_server" '' "sso_server:sso" "arvados/api"
+ # create `keep_volumes' array with a list of keep mount points
+ # remove any stale metadata from those volumes before starting them
+ make_keep_volumes
+ for v in ${keep_volumes[*]}
+ do
+ [ -f $v/keep/.metadata.yml ] && sudo rm $v/keep/.metadata.yml
+ done
+ start_container "25107:25107" "keep_server_0" \
+ "${keep_volumes[0]}:/dev/keep-0" \
+ "api_server:api" \
+ "arvados/keep"
+ start_container "25108:25107" "keep_server_1" \
+ "${keep_volumes[1]}:/dev/keep-0" \
+ "api_server:api" \
+ "arvados/keep"
fi
- if [[ $start_workbench != false ]]
+ if [[ $start_doc != false ]]
then
- start_container "9899:80" "workbench_server" '' "api_server:api" "arvados/workbench"
+ start_container "$start_doc:80" "doc_server" '' '' "arvados/doc"
fi
- if [[ $start_keep != false ]]
+ if [[ $start_workbench != false ]]
then
- # create `keep_volumes' array with a list of keep mount points
- # remove any stale metadata from those volumes before starting them
- make_keep_volumes
- for v in ${keep_volumes[*]}
- do
- [ -f $v/keep/.metadata.yml ] && sudo rm $v/keep/.metadata.yml
- done
- start_container "25107:25107" "keep_server_0" \
- "${keep_volumes[0]}:/dev/keep-0" \
- "api_server:api" \
- "arvados/warehouse"
- start_container "25108:25107" "keep_server_1" \
- "${keep_volumes[1]}:/dev/keep-0" \
- "api_server:api" \
- "arvados/warehouse"
+ start_container "$start_workbench:80" "workbench_server" '' "api_server:api" "arvados/workbench"
fi
if [ -d $HOME/.config/arvados ] || mkdir -p $HOME/.config/arvados
then
- cat >$HOME/.config/arvados/settings.conf <<EOF
+ cat >$HOME/.config/arvados/settings.conf <<EOF
ARVADOS_API_HOST=$(ip_address "api_server")
ARVADOS_API_HOST_INSECURE=yes
ARVADOS_API_TOKEN=$(cat api/generated/superuser_token)
EOF
fi
+
}
function do_stop {
while [ $# -ge 1 ]
do
case $1 in
- -d | --doc)
- stop_doc=doc_server ; shift 2 ;;
- -s | --sso)
- stop_sso=sso_server ; shift 2 ;;
- -a | --api)
- stop_api=api_server ; shift 2 ;;
- -w | --workbench)
- stop_workbench=workbench_server ; shift 2 ;;
- -k | --keep )
- stop_keep="keep_server_0 keep_server_1" ; shift ;;
- --ssh)
- shift
- ;;
- --)
- shift
- break
- ;;
- *)
- usage
- exit 1
- ;;
- esac
+ -d | --doc)
+ stop_doc=doc_server ; shift 2 ;;
+ -s | --sso)
+ stop_sso=sso_server ; shift 2 ;;
+ -a | --api)
+ stop_api=api_server ; shift 2 ;;
+ -w | --workbench)
+ stop_workbench=workbench_server ; shift 2 ;;
+ -k | --keep )
+ stop_keep="keep_server_0 keep_server_1" ; shift ;;
+ --ssh)
+ shift
+ ;;
+ --)
+ shift
+ break
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
done
- # If no options were selected, then start all servers.
+ # If no options were selected, then stop all servers.
if [[ $stop_doc == "" &&
- $stop_sso == "" &&
- $stop_api == "" &&
- $stop_workbench == "" &&
- $stop_keep == "" ]]
+ $stop_sso == "" &&
+ $stop_api == "" &&
+ $stop_workbench == "" &&
+ $stop_keep == "" ]]
then
- stop_doc=doc_server
- stop_sso=sso_server
- stop_api=api_server
- stop_workbench=workbench_server
- stop_keep="keep_server_0 keep_server_1"
+ stop_doc=doc_server
+ stop_sso=sso_server
+ stop_api=api_server
+ stop_workbench=workbench_server
+ stop_keep="keep_server_0 keep_server_1"
fi
- docker stop $stop_doc $stop_sso $stop_api $stop_workbench $stop_keep \
- 2>/dev/null
+ $DOCKER stop $stop_doc $stop_sso $stop_api $stop_workbench $stop_keep \
+ 2>/dev/null
}
function do_test {
local alltests
if [ $# -lt 1 ]
then
- alltests="python-sdk api"
+ alltests="python-sdk api"
else
- alltests="$@"
+ alltests="$@"
fi
for testname in $alltests
do
- echo "testing $testname..."
- case $testname in
- python-sdk)
- do_start --api --keep --sso
- export ARVADOS_API_HOST=$(ip_address "api_server")
- export ARVADOS_API_HOST_INSECURE=yes
- export ARVADOS_API_TOKEN=$(cat api/generated/superuser_token)
- python -m unittest discover ../sdk/python
- ;;
- api)
- docker run -t -i arvados/api \
- /usr/src/arvados/services/api/script/rake_test.sh
- ;;
- *)
- echo >&2 "unknown test $testname"
- ;;
- esac
+ echo "testing $testname..."
+ case $testname in
+ python-sdk)
+ do_start --api --keep --sso
+ export ARVADOS_API_HOST=$(ip_address "api_server")
+ export ARVADOS_API_HOST_INSECURE=yes
+ export ARVADOS_API_TOKEN=$(cat api/generated/superuser_token)
+ python -m unittest discover ../sdk/python
+ ;;
+ api)
+ $DOCKER run -t -i arvados/api \
+ /usr/src/arvados/services/api/script/rake_test.sh
+ ;;
+ *)
+ echo >&2 "unknown test $testname"
+ ;;
+ esac
done
}
case $1 in
start)
- shift
- do_start $@
- ;;
+ shift
+ do_start $@
+ ;;
stop)
- shift
- do_stop $@
- ;;
+ shift
+ do_stop $@
+ ;;
restart)
- shift
- do_stop $@
- do_start $@
- ;;
+ shift
+ do_stop $@
+ do_start $@
+ ;;
test)
- shift
- do_test $@
- ;;
+ shift
+ do_test $@
+ ;;
*)
- usage
- exit 1
- ;;
+ usage
+ exit 1
+ ;;
esac
-all: api-image doc-image workbench-image warehouse-image sso-image
+all: api-image doc-image workbench-image keep-image sso-image
# `make clean' removes the files generated in the build directory
# but does not remove any docker images generated in previous builds
# `make realclean' will also remove the docker images and force
# subsequent makes to build the entire chain from the ground up
realclean: clean
- -[ -n "`docker ps -q`" ] && docker stop `docker ps -q`
- -docker rm `docker ps -a -q`
- -docker rmi `docker images -q`
+ -[ -n "`$(DOCKER) ps -q`" ] && $(DOCKER) stop `$(DOCKER) ps -q`
+ -$(DOCKER) rm `$(DOCKER) ps -a -q`
+ -$(DOCKER) rmi `$(DOCKER) images -q`
# ============================================================
# Dependencies for */generated files which are prerequisites
JOBS_DEPS = jobs/Dockerfile
+BWA_SAMTOOLS_DEPS = bwa-samtools/Dockerfile
+
API_DEPS = api/Dockerfile $(API_GENERATED)
DOC_DEPS = doc/Dockerfile doc/apache2_vhost
workbench/passenger.conf \
$(WORKBENCH_GENERATED)
-WAREHOUSE_DEPS = warehouse/Dockerfile \
- warehouse/supervisor.conf \
- $(WAREHOUSE_GENERATED)
+KEEP_DEPS = keep/Dockerfile
SSO_DEPS = sso/passenger.conf $(SSO_GENERATED)
api/generated/config_databases.sh \
api/generated/database.yml \
api/generated/omniauth.rb \
- api/generated/production.rb \
- api/generated/secret_token.rb \
+ api/generated/application.yml \
api/generated/superuser_token
API_GENERATED_IN = \
api/config_databases.sh.in \
api/database.yml.in \
api/omniauth.rb.in \
- api/production.rb.in \
- api/secret_token.rb.in \
+ api/application.yml.in \
api/superuser_token.in
WORKBENCH_GENERATED = \
workbench/generated/apache2_vhost \
- workbench/generated/production.rb \
- workbench/generated/secret_token.rb
+ workbench/generated/application.yml
WORKBENCH_GENERATED_IN = \
workbench/apache2_vhost.in \
- workbench/production.rb.in \
- workbench/secret_token.rb.in
-
-WAREHOUSE_GENERATED = warehouse/generated/warehouse.conf
-
-WAREHOUSE_GENERATED_IN = warehouse/warehouse.conf.in
+ workbench/application.yml.in
SSO_GENERATED = \
sso/generated/apache2_vhost \
# The docker build -q option suppresses verbose build output.
# Necessary to prevent failure on building warehouse; see
# https://github.com/dotcloud/docker/issues/3172
-DOCKER_BUILD = docker build -q
+DOCKER_BUILD = $(DOCKER) build -q
# ============================================================
# The main Arvados servers: api, doc, workbench, warehouse
$(DOCKER_BUILD) -t arvados/doc doc
date >doc-image
+keep-image: debian-image $(BUILD) $(KEEP_DEPS)
+ $(DOCKER_BUILD) -t arvados/keep keep
+ date >keep-image
+
jobs-image: base-image $(BUILD) $(JOBS_DEPS)
$(DOCKER_BUILD) -t arvados/jobs jobs
date >jobs-image
+bwa-samtools-image: jobs-image $(BUILD) $(BWA_SAMTOOLS_DEPS)
+ $(DOCKER_BUILD) -t arvados/jobs-bwa-samtools bwa-samtools
+ date >bwa-samtools-image
+
workbench-image: passenger-image $(BUILD) $(WORKBENCH_DEPS)
mkdir -p workbench/generated
tar -czf workbench/generated/workbench.tar.gz -C build/apps workbench
# - TODO: mount cgroup automatically
# - TODO: start the docker service if not started
- docker_path = %x(which docker).chomp
+ docker_path = %x(which docker.io).chomp
+
+ if docker_path.empty?
+ docker_path = %x(which docker).chomp
+ end
+
if docker_path.empty?
warn "Docker not found."
warn ""
warn ""
warn "Installation instructions for a variety of platforms can be found at"
warn "http://docs.docker.io/en/latest/installation/"
- exit
- elsif not docker_ok?
+ exit 1
+ elsif not docker_ok? docker_path
warn "WARNING: docker could not be run."
warn "Please make sure that:"
warn " * You have permission to read and write /var/run/docker.sock"
warn " * a 'cgroup' volume is mounted on your machine"
warn " * the docker daemon is running"
- exit
+ exit 2
end
# Check that debootstrap is installed.
print "Generating config.yml.\n"
print "Arvados needs to know the email address of the administrative user,\n"
print "so that when that user logs in they are automatically made an admin.\n"
- print "This should be the email address you use to log in to Google.\n"
+ print "This should be an email address associated with a Google account.\n"
print "\n"
admin_email_address = ""
until is_valid_email? admin_email_address
# If all prerequisites are met, go ahead and build.
if ip_forwarding_enabled? and
- docker_ok? and
+ docker_ok? docker_path and
debootstrap_ok? and
File.exists? 'config.yml'
warn "Building Arvados."
- system '/usr/bin/make', '-f', options[:makefile], *ARGV
+ system({"DOCKER" => docker_path}, '/usr/bin/make', '-f', options[:makefile], *ARGV)
end
end
# docker_ok?
# Returns 'true' if docker can be run as the current user.
#
-def docker_ok?
- return system 'docker images > /dev/null 2>&1'
+def docker_ok?(docker_path)
+ return system "#{docker_path} images > /dev/null 2>&1"
end
# find_or_create_ssh_key arvados_name
if not linux_release.match '^1[234]\.'
warn "Arvados requires at least Ubuntu 12.04 (Precise Pangolin)."
warn "Your system is Ubuntu #{linux_release}."
- exit
+ exit 3
end
if linux_release.match '^12' and kernel_release.start_with? '3.2'
# Ubuntu Precise ships with a 3.2 kernel and must be upgraded.
warn " sudo apt-get update"
warn " sudo apt-get install linux-image-generic-lts-raring linux-headers-generic-lts-raring"
warn " sudo reboot"
- exit
+ exit 4
else
# install AUFS
sudo 'apt-get', 'update'
when 'Debian'
else
warn "Must be running a Debian or Ubuntu release in order to run Docker."
- exit
+ exit 5
end
end
--- /dev/null
+FROM arvados/jobs
+MAINTAINER Peter Amstutz <peter.amstutz@curoverse.com>
+
+USER root
+
+RUN cd /tmp && \
+ curl --location http://downloads.sourceforge.net/project/bio-bwa/bwa-0.7.9a.tar.bz2 -o bwa-0.7.9a.tar.bz2 && \
+ tar xjf bwa-0.7.9a.tar.bz2 && \
+ cd bwa-0.7.9a && \
+ make && \
+ (find . -executable -type f -print0 | xargs -0 -I {} mv {} /usr/local/bin) && \
+ rm -r /tmp/bwa-0.7.9a* && \
+ cd /tmp && \
+ curl --location http://downloads.sourceforge.net/project/samtools/samtools/0.1.19/samtools-0.1.19.tar.bz2 -o samtools-0.1.19.tar.bz2 && \
+ tar xjf samtools-0.1.19.tar.bz2 && \
+ cd samtools-0.1.19 && \
+ make && \
+ (find . -executable -type f -print0 | xargs -0 -I {} mv {} /usr/local/bin) && \
+ rm -r /tmp/samtools-0.1.19*
+
+USER crunch
\ No newline at end of file
--- /dev/null
+# Based on Debian Wheezy
+FROM arvados/debian:wheezy
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+RUN echo 'deb http://apt.arvados.org/ wheezy main' > /etc/apt/sources.list.d/apt.arvados.org.list
+
+RUN /usr/bin/apt-key adv --keyserver pgp.mit.edu --recv 1078ECD7
+
+RUN /usr/bin/apt-get update
+
+RUN /usr/bin/apt-get install keep
+
+# Start keep
+CMD ["/usr/bin/keep"]
fi
# some rudimentary detection for whether we need to "sudo" our docker calls
-docker=''
-if docker version > /dev/null 2>&1; then
- docker='docker'
-elif sudo docker version > /dev/null 2>&1; then
- docker='sudo docker'
-elif command -v docker > /dev/null 2>&1; then
- docker='docker'
+set +e
+docker=`which docker.io`
+if [[ "$docker" == "" ]]; then
+ docker=`which docker`
+fi
+set -e
+
+if $docker version > /dev/null 2>&1; then
+ docker="$docker"
+elif sudo $docker version > /dev/null 2>&1; then
+ docker="sudo $docker"
+elif command -v $docker > /dev/null 2>&1; then
+ docker="$docker"
else
echo >&2 "warning: either docker isn't installed, or your current user cannot run it;"
echo >&2 " this script is not likely to work as expected"
-LoadModule passenger_module /usr/local/rvm/gems/ruby-2.1.0/gems/passenger-4.0.23/buildout/apache2/mod_passenger.so
-PassengerRoot /usr/local/rvm/gems/ruby-2.1.0/gems/passenger-4.0.23
+LoadModule passenger_module /usr/local/rvm/gems/ruby-2.1.0/gems/passenger-4.0.41/buildout/apache2/mod_passenger.so
+PassengerRoot /usr/local/rvm/gems/ruby-2.1.0/gems/passenger-4.0.41
PassengerDefaultRuby /usr/local/rvm/wrappers/ruby-2.1.0/ruby
+++ /dev/null
-FROM arvados/base
-MAINTAINER Tim Pierce <twp@curoverse.com>
-
-RUN apt-get update && \
- apt-get -q -y install dpkg-dev debhelper libdbi-perl libwww-perl \
- libtest-pod-perl libtest-pod-coverage-perl libjson-perl flex \
- libgnupg-interface-perl libunix-syslog-perl libbsd-resource-perl \
- bioperl perlmagick imagemagick gnuplot libbz2-dev libfftw3-3 libfftw3-dev \
- xsltproc realpath supervisor libgpgme11-dev libcache-memcached-perl \
- libio-compress-perl
-
-# Install warehouse-apps source and build.
-# Remove failing test t/pod-coverage.t.
-RUN \
- perl -MCPAN -e 'install MogileFS::Client;' \
- -e 'install Crypt::GpgME' && \
- git clone https://github.com/curoverse/warehouse-apps.git /usr/src/warehouse-apps && \
- rm /usr/src/warehouse-apps/libwarehouse-perl/t/pod-coverage.t && \
- cd /usr/src/warehouse-apps && \
- sh install.sh /usr/local && \
- dpkg -i libwarehouse-perl*.deb && \
- /bin/mkdir -p /data/keep-0
-
-ADD supervisor.conf /etc/supervisor/conf.d/keepd.conf
-ADD generated/warehouse.conf /etc/warehouse/warehouse-client.conf
-
-# Start the supervisor.
-CMD ["/usr/bin/supervisord", "-n"]
+++ /dev/null
-[program:keepd]
-user=root
-command=/usr/src/warehouse-apps/libwarehouse-perl/server/keepd
+++ /dev/null
-#!/usr/bin/perl
-
-$Warehouse::warehouses = [
- {'name'=>'@@API_HOSTNAME@@',
- 'keep_name'=>'@@API_HOSTNAME@@',
- 'keeps' => [
- 'keep0.@@API_HOSTNAME@@.@@ARVADOS_DOMAIN@@:25107',
- 'keep0.@@API_HOSTNAME@@.@@ARVADOS_DOMAIN@@:25108',
- ],
- 'api_auth_token'=>'@@API_SUPERUSER_SECRET@@',
- },
-];
# Update Arvados source
RUN /bin/mkdir -p /usr/src/arvados/apps
ADD generated/workbench.tar.gz /usr/src/arvados/apps/
-ADD generated/secret_token.rb /usr/src/arvados/apps/workbench/config/initializers/secret_token.rb
-ADD generated/production.rb /usr/src/arvados/apps/workbench/config/environments/production.rb
+RUN /bin/cp /usr/src/arvados/apps/workbench/config/environments/production.rb.example /usr/src/arvados/apps/workbench/config/environments/production.rb
+ADD generated/application.yml /usr/src/arvados/apps/workbench/config/application.yml
ADD passenger.conf /etc/apache2/conf.d/passenger
touch /usr/src/arvados/apps/workbench/log/production.log && \
chmod 666 /usr/src/arvados/apps/workbench/log/production.log && \
touch /usr/src/arvados/apps/workbench/db/production.sqlite3 && \
- bundle install --gemfile=/usr/src/arvados/apps/workbench/Gemfile && \
cd /usr/src/arvados/apps/workbench && \
rake assets:precompile && \
chown -R www-data:www-data /usr/src/arvados/apps/workbench
/etc/init.d/ssh start
fi
+# Override the default API server address if necessary.
+if [[ "$API_PORT_443_TCP_ADDR" != "" ]]; then
+ sed -i "s/localhost:9900/$API_PORT_443_TCP_ADDR/" /usr/src/arvados/apps/workbench/config/application.yml
+fi
+
source /etc/apache2/envvars
/usr/sbin/apache2 -D FOREGROUND
--- /dev/null
+# Copy this file to application.yml and edit to suit.
+#
+# Consult application.default.yml for the full list of configuration
+# settings.
+#
+# The order of precedence is:
+# 1. config/environments/{RAILS_ENV}.rb (deprecated)
+# 2. Section in application.yml corresponding to RAILS_ENV (e.g., development)
+# 3. Section in application.yml called "common"
+# 4. Section in application.default.yml corresponding to RAILS_ENV
+# 5. Section in application.default.yml called "common"
+
+common:
+ # At minimum, you need a nice long randomly generated secret_token here.
+ secret_token: @@WORKBENCH_SECRET@@
+
+ # You probably also want to point to your API server.
+ arvados_login_base: 'https://localhost:9900/login'
+ arvados_v1_base: 'https://localhost:9900/arvados/v1'
+ arvados_insecure_https: @@WORKBENCH_INSECURE_HTTPS@@
+
+ data_import_dir: @@WORKBENCH_DATA_IMPORT_DIR@@
+ data_export_dir: @@WORKBENCH_DATA_EXPORT_DIR@@
+
+ site_name: @@WORKBENCH_SITE_NAME@@
+ activation_contact_link: @@WORKBENCH_ACTIVATION_CONTACT_LINK@@
+
-LoadModule passenger_module /usr/local/rvm/gems/ruby-2.1.0/gems/passenger-4.0.23/buildout/apache2/mod_passenger.so
-PassengerRoot /usr/local/rvm/gems/ruby-2.1.0/gems/passenger-4.0.23
+LoadModule passenger_module /usr/local/rvm/gems/ruby-2.1.0/gems/passenger-4.0.41/buildout/apache2/mod_passenger.so
+PassengerRoot /usr/local/rvm/gems/ruby-2.1.0/gems/passenger-4.0.41
PassengerDefaultRuby /usr/local/rvm/wrappers/ruby-2.1.0/ruby
s.add_runtime_dependency 'andand', '~> 1.3', '>= 1.3.3'
s.add_runtime_dependency 'oj', '~> 2.0', '>= 2.0.3'
s.add_runtime_dependency 'curb', '~> 0.8'
+ s.add_runtime_dependency('jwt', '>= 0.1.5', '< 1.0.0')
s.homepage =
'http://arvados.org'
end
EOS
end
-# read authentication data from arvados configuration file if present
-lineno = 0
-config_file = File.expand_path('~/.config/arvados/settings.conf')
-if File.exist? config_file then
- File.open(config_file, 'r').each do |line|
- lineno = lineno + 1
- # skip comments
- if line.match('^\s*#') then
- next
- end
- var, val = line.chomp.split('=', 2)
- # allow environment settings to override config files.
- if var and val
- ENV[var] ||= val
- else
- warn "#{config_file}: #{lineno}: could not parse `#{line}'"
- end
- end
-end
-
-case ARGV[0]
-when 'keep'
- ARGV.shift
- @sub = ARGV.shift
- if ['get', 'put', 'ls', 'normalize'].index @sub then
- # Native Arvados
- exec `which arv-#{@sub}`.strip, *ARGV
- elsif ['less', 'check'].index @sub then
- # wh* shims
- exec `which wh#{@sub}`.strip, *ARGV
- elsif @sub == 'docker'
- exec `which arv-keepdocker`.strip, *ARGV
- else
- puts "Usage: \n" +
- "#{$0} keep ls\n" +
- "#{$0} keep get\n" +
- "#{$0} keep put\n" +
- "#{$0} keep less\n" +
- "#{$0} keep check\n" +
- "#{$0} keep docker\n"
- end
- abort
-when 'pipeline'
- ARGV.shift
- @sub = ARGV.shift
- if ['run'].index @sub then
- exec `which arv-run-pipeline-instance`.strip, *ARGV
- else
- puts "Usage: \n" +
- "#{$0} pipeline run [...]\n" +
- "(see arv-run-pipeline-instance --help for details)\n"
- end
- abort
-when 'tag'
- ARGV.shift
- exec `which arv-tag`.strip, *ARGV
-when 'ws'
- ARGV.shift
- exec `which arv-ws`.strip, *ARGV
-end
-
-ENV['ARVADOS_API_VERSION'] ||= 'v1'
-
-if not ENV.include?('ARVADOS_API_HOST') or not ENV.include?('ARVADOS_API_TOKEN') then
- abort <<-EOS
-ARVADOS_API_HOST and ARVADOS_API_TOKEN need to be defined as environment variables.
- EOS
-end
-
begin
require 'curb'
require 'rubygems'
Please install all required gems:
- gem install activesupport andand curb google-api-client json oj trollop
+ gem install activesupport andand curb google-api-client json oj trollop yaml
EOS
end
+# Search for 'ENTRY POINT' to see where things get going
+
ActiveSupport::Inflector.inflections do |inflect|
inflect.irregular 'specimen', 'specimens'
inflect.irregular 'human', 'humans'
end
end
-# do this if you're testing with a dev server and you don't care about SSL certificate checks:
-if ENV['ARVADOS_API_HOST_INSECURE']
- suppress_warnings { OpenSSL::SSL::VERIFY_PEER = OpenSSL::SSL::VERIFY_NONE }
-end
-
class Google::APIClient
def discovery_document(api, version)
api = api.to_s
return @discovery_documents["#{api}:#{version}"] ||=
begin
# fetch new API discovery doc if stale
- cached_doc = File.expand_path '~/.cache/arvados/discovery_uri.json'
- if not File.exist?(cached_doc) or (Time.now - File.mtime(cached_doc)) > 86400
+ cached_doc = File.expand_path '~/.cache/arvados/discovery_uri.json' rescue nil
+
+ if cached_doc.nil? or not File.exist?(cached_doc) or (Time.now - File.mtime(cached_doc)) > 86400
response = self.execute!(:http_method => :get,
:uri => self.discovery_uri(api, version),
:authenticated => false)
- FileUtils.makedirs(File.dirname cached_doc)
- File.open(cached_doc, 'w') do |f|
- f.puts response.body
+
+ begin
+ FileUtils.makedirs(File.dirname cached_doc)
+ File.open(cached_doc, 'w') do |f|
+ f.puts response.body
+ end
+ rescue
+ return JSON.load response.body
end
end
end
end
-begin
- client = ArvadosClient.new(:host => ENV['ARVADOS_API_HOST'], :application_name => 'arvados-cli', :application_version => '1.0')
- arvados = client.discovered_api('arvados', ENV['ARVADOS_API_VERSION'])
-rescue Exception => e
- puts "Failed to connect to Arvados API server: #{e}"
- exit 1
+def init_config
+ # read authentication data from arvados configuration file if present
+ lineno = 0
+ config_file = File.expand_path('~/.config/arvados/settings.conf') rescue nil
+ if not config_file.nil? and File.exist? config_file then
+ File.open(config_file, 'r').each do |line|
+ lineno = lineno + 1
+ # skip comments
+ if line.match('^\s*#') then
+ next
+ end
+ var, val = line.chomp.split('=', 2)
+ # allow environment settings to override config files.
+ if var and val
+ ENV[var] ||= val
+ else
+ warn "#{config_file}: #{lineno}: could not parse `#{line}'"
+ end
+ end
+ end
+end
+
+subcommands = %w(keep pipeline tag ws edit)
+
+def check_subcommands client, arvados, subcommand, global_opts, remaining_opts
+ case subcommand
+ when 'keep'
+ @sub = remaining_opts.shift
+ if ['get', 'put', 'ls', 'normalize'].index @sub then
+ # Native Arvados
+ exec `which arv-#{@sub}`.strip, *remaining_opts
+ elsif ['less', 'check'].index @sub then
+ # wh* shims
+ exec `which wh#{@sub}`.strip, *remaining_opts
+ elsif @sub == 'docker'
+ exec `which arv-keepdocker`.strip, *remaining_opts
+ else
+ puts "Usage: arv keep [method] [--parameters]\n"
+ puts "Use 'arv keep [method] --help' to get more information about specific methods.\n\n"
+ puts "Available methods: ls, get, put, less, check, docker"
+ end
+ abort
+ when 'pipeline'
+ sub = remaining_opts.shift
+ if sub == 'run'
+ exec `which arv-run-pipeline-instance`.strip, *remaining_opts
+ else
+ puts "Usage: arv pipeline [method] [--parameters]\n"
+ puts "Use 'arv pipeline [method] --help' to get more information about specific methods.\n\n"
+ puts "Available methods: run"
+ end
+ abort
+ when 'tag'
+ exec `which arv-tag`.strip, *remaining_opts
+ when 'ws'
+ exec `which arv-ws`.strip, *remaining_opts
+ when 'edit'
+ arv_edit client, arvados, global_opts, remaining_opts
+ end
+end
+
+def arv_edit_save_tmp tmp
+ FileUtils::cp tmp.path, tmp.path + ".saved"
+ puts "Saved contents to " + tmp.path + ".saved"
+end
+
+def arv_edit client, arvados, global_opts, remaining_opts
+ uuid = remaining_opts.shift
+ if uuid.nil? or uuid == "-h" or uuid == "--help"
+ puts head_banner
+ puts "Usage: arv edit [uuid] [fields...]\n\n"
+ puts "Fetch the specified Arvados object, select the specified fields, \n"
+ puts "open an interactive text editor on a text representation (json or\n"
+ puts "yaml, use --format) and then update the object. Will use 'nano'\n"
+ puts "by default, customize with the EDITOR or VISUAL environment variable.\n"
+ exit 255
+ end
+
+ if not $stdout.tty?
+ puts "Not connected to a TTY, cannot run interactive editor."
+ exit 1
+ end
+
+ # determine controller
+
+ m = /([a-z0-9]{5})-([a-z0-9]{5})-([a-z0-9]{15})/.match uuid
+ if !m
+ if /^[a-f0-9]{32}/.match uuid
+ abort "Arvados collections are not editable."
+ else
+ abort "#{n} does not appear to be an Arvados uuid"
+ end
+ end
+
+ rsc = nil
+ arvados.discovery_document["resources"].each do |k,v|
+ klass = k.singularize.camelize
+ dig = Digest::MD5.hexdigest(klass).to_i(16).to_s(36)[-5..-1]
+ if dig == m[2]
+ rsc = k
+ end
+ end
+
+ if rsc.nil?
+ abort "Could not determine resource type #{m[2]}"
+ end
+
+ api_method = 'arvados.' + rsc + '.get'
+
+ result = client.execute(:api_method => eval(api_method),
+ :parameters => {"uuid" => uuid},
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
+ begin
+ results = JSON.parse result.body
+ rescue JSON::ParserError => e
+ abort "Failed to parse server response:\n" + e.to_s
+ end
+
+ if remaining_opts.length > 0
+ results.select! { |k, v| remaining_opts.include? k }
+ end
+
+ content = ""
+
+ case global_opts[:format]
+ when 'json'
+ content = Oj.dump(results, :indent => 1)
+ when 'yaml'
+ content = results.to_yaml
+ end
+
+ require 'tempfile'
+
+ tmp = Tempfile.new([uuid, "." + global_opts[:format]])
+ tmp.write(content)
+ tmp.close
+
+ need_edit = true
+
+ while need_edit
+ pid = Process::fork
+ if pid.nil?
+ editor ||= ENV["VISUAL"]
+ editor ||= ENV["EDITOR"]
+ editor ||= "nano"
+ exec editor, tmp.path
+ else
+ Process.wait pid
+ end
+
+ if $?.exitstatus == 0
+ tmp.open
+ newcontent = tmp.read()
+
+ newobj = {}
+ begin
+ case global_opts[:format]
+ when 'json'
+ newobj = Oj.load(newcontent)
+ when 'yaml'
+ newobj = YAML.load(newcontent)
+ end
+ need_edit = false
+ rescue Exception => e
+ puts "Parse error! " + e.to_s
+ n = 1
+ newcontent.each_line do |line|
+ puts "#{n.to_s.rjust 4} #{line}"
+ n += 1
+ end
+ puts "\nTry again (y/n)? "
+ yn = "X"
+ while not ["y", "Y", "n", "N"].include?(yn)
+ yn = $stdin.read 1
+ end
+ if yn == 'n' or yn == 'N'
+ arv_edit_save_tmp tmp
+ abort
+ end
+ end
+ else
+ puts "Editor exited with status #{$?.exitstatus}"
+ exit $?.exitstatus
+ end
+ end
+
+ begin
+ if newobj != results
+ api_method = 'arvados.' + rsc + '.update'
+ dumped = Oj.dump(newobj)
+
+ begin
+ result = client.execute(:api_method => eval(api_method),
+ :parameters => {"uuid" => uuid},
+ :body => { rsc.singularize => dumped },
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
+ rescue Exception => e
+ puts "Error communicating with server, error was #{e}"
+ puts "Update body was:"
+ puts dumped
+ arv_edit_save_tmp tmp
+ abort
+ end
+
+ begin
+ results = JSON.parse result.body
+ rescue JSON::ParserError => e
+ abort "Failed to parse server response:\n" + e.to_s
+ end
+
+ if result.response.status != 200
+ puts "Update failed. Server responded #{result.response.status}: #{results['errors']} "
+ puts "Update body was:"
+ puts dumped
+ arv_edit_save_tmp tmp
+ abort
+ end
+ else
+ puts "Object is unchanged, did not update."
+ end
+ ensure
+ tmp.close(true)
+ end
+
+ exit 0
end
def to_boolean(s)
!!(s =~ /^(true|t|yes|y|1)$/i)
end
+def head_banner
+ "Arvados command line client\n"
+end
+
def help_methods(discovery_document, resource, method=nil)
- banner = "\n"
- banner += "The #{resource} resource type supports the following methods:"
+ banner = head_banner
+ banner += "Usage: arv #{resource} [method] [--parameters]\n"
+ banner += "Use 'arv #{resource} [method] --help' to get more information about specific methods.\n\n"
+ banner += "The #{resource} resource supports the following methods:"
banner += "\n\n"
discovery_document["resources"][resource.pluralize]["methods"].
each do |k,v|
banner += "\n"
STDERR.puts banner
- if not method.nil? and method != '--help' then
- Trollop::die ("Unknown method #{method.inspect} " +
- "for resource #{resource.inspect}")
+ if not method.nil? and method != '--help' and method != '-h' then
+ abort "Unknown method #{method.inspect} " +
+ "for resource #{resource.inspect}"
end
exit 255
end
-def help_resources(discovery_document, resource)
- banner = "\n"
- banner += "This Arvados instance supports the following resource types:"
- banner += "\n\n"
- discovery_document["resources"].each do |k,v|
- description = ''
- resource_info = discovery_document["schemas"][k.singularize.capitalize]
- if resource_info and resource_info.include?('description')
- # add only the first line of the discovery doc description
- description = ' ' + resource_info["description"].split("\n").first.chomp
- end
- banner += " #{sprintf("%30s",k.singularize)}#{description}\n"
- end
- banner += "\n"
- STDERR.puts banner
+def help_resources(option_parser, discovery_document, resource)
+ option_parser.educate
if not resource.nil? and resource != '--help' then
Trollop::die "Unknown resource type #{resource.inspect}"
exit 255
end
-def parse_arguments(discovery_document)
+def parse_arguments(discovery_document, subcommands)
resource_types = Array.new()
discovery_document["resources"].each do |k,v|
resource_types << k.singularize
end
- global_opts = Trollop::options do
+ resource_types += subcommands
+
+ option_parser = Trollop::Parser.new do
version __FILE__
- banner "arv: the Arvados CLI tool"
+ banner head_banner
+ banner "Usage: arv [--flags] subcommand|resource [method] [--parameters]"
+ banner ""
+ banner "Available flags:"
+
opt :dry_run, "Don't actually do anything", :short => "-n"
opt :verbose, "Print some things on stderr"
opt :format,
:type => :string,
:default => 'json'
opt :short, "Return only UUIDs (equivalent to --format=uuid)"
- opt :resources, "Display list of resources known to this Arvados instance."
+
+ banner ""
+ banner "Use 'arv subcommand|resource --help' to get more information about a particular command or resource."
+ banner ""
+ banner "Available subcommands: #{subcommands.join(', ')}"
+ banner ""
+
+ banner "Available resources: #{discovery_document['resources'].keys.map { |k| k.singularize }.join(', ')}"
+
+ banner ""
+ banner "Additional options:"
+
conflicts :short, :format
stop_on resource_types
end
+ global_opts = Trollop::with_standard_exception_handling option_parser do
+ o = option_parser.parse ARGV
+ end
+
unless %w(json yaml uuid).include?(global_opts[:format])
$stderr.puts "#{$0}: --format must be one of json, yaml or uuid."
$stderr.puts "Use #{$0} --help for more information."
end
resource = ARGV.shift
- if global_opts[:resources] or not resource_types.include?(resource)
- help_resources(discovery_document, resource)
- end
- method = ARGV.shift
- if not (discovery_document["resources"][resource.pluralize]["methods"].
- include?(method))
- help_methods(discovery_document, resource, method)
- end
+ if not subcommands.include? resource
+ if not resource_types.include?(resource)
+ puts "Resource or subcommand '#{resource}' is not recognized.\n\n" if !resource.nil?
+ help_resources(option_parser, discovery_document, resource)
+ end
+
+ method = ARGV.shift
+ if not (discovery_document["resources"][resource.pluralize]["methods"].
+ include?(method))
+ help_methods(discovery_document, resource, method)
+ end
- discovered_params = discovery_document\
+ discovered_params = discovery_document\
["resources"][resource.pluralize]\
["methods"][method]["parameters"]
- method_opts = Trollop::options do
- discovered_params.each do |k,v|
- opts = Hash.new()
- opts[:type] = v["type"].to_sym if v.include?("type")
- if [:datetime, :text, :object, :array].index opts[:type]
- opts[:type] = :string # else trollop bork
+ method_opts = Trollop::options do
+ banner head_banner
+ banner "Usage: arv #{resource} #{method} [--parameters]"
+ banner ""
+ banner "This method supports the following parameters:"
+ banner ""
+ discovered_params.each do |k,v|
+ opts = Hash.new()
+ opts[:type] = v["type"].to_sym if v.include?("type")
+ if [:datetime, :text, :object, :array].index opts[:type]
+ opts[:type] = :string # else trollop bork
+ end
+ opts[:default] = v["default"] if v.include?("default")
+ opts[:default] = v["default"].to_i if opts[:type] == :integer
+ opts[:default] = to_boolean(v["default"]) if opts[:type] == :boolean
+ opts[:required] = true if v.include?("required") and v["required"]
+ description = ''
+ description = ' ' + v["description"] if v.include?("description")
+ opt k.to_sym, description, opts
end
- opts[:default] = v["default"] if v.include?("default")
- opts[:default] = v["default"].to_i if opts[:type] == :integer
- opts[:default] = to_boolean(v["default"]) if opts[:type] == :boolean
- opts[:required] = true if v.include?("required") and v["required"]
- description = ''
- description = ' ' + v["description"] if v.include?("description")
- opt k.to_sym, description, opts
- end
- body_object = discovery_document["resources"][resource.pluralize]["methods"][method]["request"]
- if body_object and discovered_params[resource].nil?
- is_required = true
- if body_object["required"] == false
- is_required = false
+
+ body_object = discovery_document["resources"][resource.pluralize]["methods"][method]["request"]
+ if body_object and discovered_params[resource].nil?
+ is_required = true
+ if body_object["required"] == false
+ is_required = false
+ end
+ opt resource.to_sym, "#{resource} (request body)", {
+ required: is_required,
+ type: :string
+ }
end
- opt resource.to_sym, "#{resource} (request body)", {
- required: is_required,
- type: :string
- }
end
- end
- discovered_params.each do |k,v|
- k = k.to_sym
- if ['object', 'array'].index(v["type"]) and method_opts.has_key? k
- if method_opts[k].andand.match /^\//
- method_opts[k] = File.open method_opts[k], 'rb' do |f| f.read end
+ discovered_params.each do |k,v|
+ k = k.to_sym
+ if ['object', 'array'].index(v["type"]) and method_opts.has_key? k
+ if method_opts[k].andand.match /^\//
+ method_opts[k] = File.open method_opts[k], 'rb' do |f| f.read end
+ end
end
end
end
+
return resource, method, method_opts, global_opts, ARGV
end
-resource_schema, method, method_opts, global_opts, remaining_opts = parse_arguments(arvados.discovery_document)
+#
+# ENTRY POINT
+#
+
+init_config
+
+ENV['ARVADOS_API_VERSION'] ||= 'v1'
+
+if not ENV.include?('ARVADOS_API_HOST') or not ENV.include?('ARVADOS_API_TOKEN') then
+ abort <<-EOS
+ARVADOS_API_HOST and ARVADOS_API_TOKEN need to be defined as environment variables.
+ EOS
+end
+
+# do this if you're testing with a dev server and you don't care about SSL certificate checks:
+if ENV['ARVADOS_API_HOST_INSECURE']
+ suppress_warnings { OpenSSL::SSL::VERIFY_PEER = OpenSSL::SSL::VERIFY_NONE }
+end
+
+begin
+ client = ArvadosClient.new(:host => ENV['ARVADOS_API_HOST'], :application_name => 'arvados-cli', :application_version => '1.0')
+ arvados = client.discovered_api('arvados', ENV['ARVADOS_API_VERSION'])
+rescue Exception => e
+ puts "Failed to connect to Arvados API server: #{e}"
+ exit 1
+end
+
+# Parse arguments here
+resource_schema, method, method_opts, global_opts, remaining_opts = parse_arguments(arvados.discovery_document, subcommands)
+
+check_subcommands client, arvados, resource_schema, global_opts, remaining_opts
+
controller = resource_schema.pluralize
api_method = 'arvados.' + controller + '.' + method
--- /dev/null
+../../python/bin/arv-keepdocker
\ No newline at end of file
-../../python/bin/arv-mount
\ No newline at end of file
+../../../services/fuse/bin/arv-mount
\ No newline at end of file
abort "#{$0}: syntax error: --instance cannot be combined with --template or --submit."
end
elsif not $options[:template]
- abort "#{$0}: syntax error: you must supply a --template or --instance."
+ puts "error: you must supply a --template or --instance."
+ p.educate
+ abort
end
if $options[:run_here] == $options[:submit]
:repository => c[:repository],
:nondeterministic => c[:nondeterministic],
:output_is_persistent => c[:output_is_persistent] || false,
+ :runtime_constraints => c[:runtime_constraints],
:owner_uuid => owner_uuid,
- # TODO: Delete the following three attributes when
- # supporting pre-20140418 API servers is no longer
- # important. New API servers take these as flags that
- # control behavior of create, rather than job attributes.
- :minimum_script_version => c[:minimum_script_version],
- :exclude_script_versions => c[:exclude_minimum_script_versions],
- :no_reuse => @options[:no_reuse] || c[:nondeterministic],
}, {
# This is the right place to put these attributes when
# dealing with new API servers.
:minimum_script_version => c[:minimum_script_version],
:exclude_script_versions => c[:exclude_minimum_script_versions],
:find_or_create => !(@options[:no_reuse] || c[:nondeterministic]),
+ :filters => c[:filters]
})
if job
debuglog "component #{cname} new job #{job[:uuid]}"
ended += 1
if c[:job][:success] == true
succeeded += 1
- elsif c[:job][:success] == false
+ elsif c[:job][:success] == false or c[:job][:cancelled_at]
failed += 1
end
end
$ENV{"TASK_SLOT_NODE"} = $slot[$childslot]->{node}->{name};
$ENV{"TASK_SLOT_NUMBER"} = $slot[$childslot]->{cpu};
$ENV{"TASK_WORK"} = $ENV{"JOB_WORK"}."/$id.$$";
+ $ENV{"HOME"} = $ENV{"TASK_WORK"};
$ENV{"TASK_KEEPMOUNT"} = $ENV{"TASK_WORK"}.".keep";
$ENV{"TASK_TMPDIR"} = $ENV{"TASK_WORK"}; # deprecated
$ENV{"CRUNCH_NODE_SLOTS"} = $slot[$childslot]->{node}->{ncpus};
my $command =
"if [ -e $ENV{TASK_WORK} ]; then rm -rf $ENV{TASK_WORK}; fi; "
."mkdir -p $ENV{JOB_WORK} $ENV{CRUNCH_TMP} $ENV{TASK_WORK} $ENV{TASK_KEEPMOUNT} "
+ ."&& chmod og+wrx $ENV{TASK_WORK}"
."&& cd $ENV{CRUNCH_TMP} ";
if ($build_script)
{
$command .=
q{$(ip -o address show scope global |
gawk 'match($4, /^([0-9\.:]+)\//, x){print "--dns", x[1]}') };
- foreach my $env_key (qw(CRUNCH_SRC CRUNCH_TMP TASK_KEEPMOUNT))
- {
- $command .= "-v \Q$ENV{$env_key}:$ENV{$env_key}:rw\E ";
- }
+ $command .= "-v \Q$ENV{TASK_WORK}:/tmp/crunch-job:rw\E ";
+ $command .= "-v \Q$ENV{CRUNCH_SRC}:/tmp/crunch-src:ro\E ";
+ $command .= "-v \Q$ENV{TASK_KEEPMOUNT}:/mnt:ro\E ";
+ $command .= "-e \QHOME=/tmp/crunch-job\E ";
while (my ($env_key, $env_val) = each %ENV)
{
if ($env_key =~ /^(ARVADOS|JOB|TASK)_/) {
- $command .= "-e \Q$env_key=$env_val\E ";
+ if ($env_key eq "TASK_WORK") {
+ $command .= "-e \QTASK_WORK=/tmp/crunch-job\E ";
+ }
+ elsif ($env_key eq "TASK_KEEPMOUNT") {
+ $command .= "-e \QTASK_KEEPMOUNT=/mnt\E ";
+ }
+ elsif ($env_key eq "CRUNCH_SRC") {
+ $command .= "-e \QCRUNCH_SRC=/tmp/crunch-src\E ";
+ }
+ else {
+ $command .= "-e \Q$env_key=$env_val\E ";
+ }
}
}
$command .= "\Q$docker_hash\E ";
+ $command .= "stdbuf -o0 -e0 ";
+ $command .= "/tmp/crunch-src/crunch_scripts/" . $Job->{"script"};
} else {
- $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -poll=10000 "
+ # Non-docker run
+ $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -poll=10000 ";
+ $command .= "stdbuf -o0 -e0 ";
+ $command .= "$ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
}
- $command .= "stdbuf -o0 -e0 ";
- $command .= "$ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
+
my @execargs = ('bash', '-c', $command);
srun (\@srunargs, \@execargs, undef, $build_script_to_send);
exit (111);
use Arvados;
$arv = Arvados->new(apiHost => 'arvados.local');
-
+
my $instances = $arv->{'pipeline_instances'}->{'list'}->execute();
print "UUID is ", $instances->{'items'}->[0]->{'uuid'}, "\n";
-
+
$uuid = 'eiv0u-arx5y-2c5ovx43zw90gvh';
$instance = $arv->{'pipeline_instances'}->{'get'}->execute('uuid' => $uuid);
print "ETag is ", $instance->{'etag'}, "\n";
-
+
$instance->{'active'} = 1;
$instance->{'name'} = '';
$instance->save();
=cut
package Arvados;
+
+use Net::SSL (); # From Crypt-SSLeay
+BEGIN {
+ $Net::HTTPS::SSL_SOCKET_CLASS = "Net::SSL"; # Force use of Net::SSL
+}
+
use JSON;
-use Data::Dumper;
-use IO::Socket::SSL;
use Carp;
use Arvados::ResourceAccessor;
use Arvados::ResourceMethod;
use Arvados::ResourceProxy;
use Arvados::ResourceProxyList;
use Arvados::Request;
+use Data::Dumper;
$Arvados::VERSION = 0.1;
$config = load_config_file("$ENV{HOME}/.config/arvados/settings.conf");
- $self->{'authToken'} ||=
+ $self->{'authToken'} ||=
$ENV{ARVADOS_API_TOKEN} || $config->{ARVADOS_API_TOKEN};
$self->{'apiHost'} ||=
$ENV{ARVADOS_API_HOST} || $config->{ARVADOS_API_HOST};
+ $self->{'noVerifyHostname'} ||=
+ $ENV{ARVADOS_API_HOST_INSECURE};
+
$self->{'apiProtocolScheme'} ||=
$ENV{ARVADOS_API_PROTOCOL_SCHEME} ||
$config->{ARVADOS_API_PROTOCOL_SCHEME};
{
my $self = shift;
local $ENV{'PERL_LWP_SSL_VERIFY_HOSTNAME'};
- if ($opts{'noVerifyHostname'} || ($host =~ /\.local$/)) {
+ if ($self->{'noVerifyHostname'} || ($host =~ /\.local$/)) {
$ENV{'PERL_LWP_SSL_VERIFY_HOSTNAME'} = 0;
}
Arvados::Request->new();
blocks[b[arvados.LOCATOR]] = streamoffset
streamoffset += b[arvados.BLOCKSIZE]
+ if len(stream_tokens) == 1:
+ stream_tokens.append(config.EMPTY_BLOCK_LOCATOR)
+
for f in sortedfiles:
current_span = None
fout = f.replace(' ', '\\040')
for f in s.all_files():
yield f
- def manifest_text(self):
+ def manifest_text(self, strip=False):
self._populate()
- return self._manifest_text
+ if strip:
+ m = ''.join([StreamReader(stream).manifest_text(strip=True) for stream in self._streams])
+ return m
+ else:
+ return self._manifest_text
class CollectionWriter(object):
KEEP_BLOCK_SIZE = 2**26
path, stream_name, max_manifest_depth = self._queued_trees[0]
make_dirents = (util.listdir_recursive if (max_manifest_depth == 0)
else os.listdir)
- self._queue_dirents(stream_name, make_dirents(path))
+ d = make_dirents(path)
+ if len(d) > 0:
+ self._queue_dirents(stream_name, d)
+ else:
+ self._queued_trees.popleft()
def _queue_file(self, source, filename=None):
assert (self._queued_file is None), "tried to queue more than one file"
for x in fields[1:-1] ]
clean += fields[0] + ' ' + ' '.join(locators) + ' ' + fields[-1] + "\n"
return clean
-
+
def manifest_text(self):
self.finish_current_stream()
manifest = ''
manifest += ' ' + ' '.join("%d:%d:%s" % (sfile[0], sfile[1], sfile[2].replace(' ', '\\040')) for sfile in stream[2])
manifest += "\n"
- #print 'writer',manifest
- #print 'after reader',CollectionReader(manifest).manifest_text()
-
- return CollectionReader(manifest).manifest_text()
+ if len(manifest) > 0:
+ return CollectionReader(manifest).manifest_text()
+ else:
+ return ""
def data_locators(self):
ret = []
block_size = data_locators[i][BLOCKSIZE]
block_start = data_locators[i][OFFSET]
block_end = block_start + block_size
-
+
while i < len(data_locators):
locator, block_size, block_start = data_locators[i]
block_end = block_start + block_size
dc = bz2.BZ2Decompressor()
return self.decompress(lambda segment: dc.decompress(segment), size)
elif re.search('\.gz$', self._name):
- dc = zlib.decompressobj(16+zlib.MAX_WBITS)
+ dc = zlib.decompressobj(16+zlib.MAX_WBITS)
return self.decompress(lambda segment: dc.decompress(dc.unconsumed_tail + segment), size)
else:
return self.readall(size)
self._keep = keep
else:
self._keep = Keep.global_client_object()
-
+
streamoffset = 0L
# parse stream
for locator, blocksize, segmentoffset, segmentsize in locators_and_ranges(self._data_locators, start, size):
data += self._keep.get(locator)[segmentoffset:segmentoffset+segmentsize]
return data
-
- def manifest_text(self):
+
+ def manifest_text(self, strip=False):
manifest_text = [self.name().replace(' ', '\\040')]
- manifest_text.extend([d[LOCATOR] for d in self._data_locators])
- manifest_text.extend([' '.join(["{}:{}:{}".format(seg[LOCATOR], seg[BLOCKSIZE], f.name().replace(' ', '\\040'))
+ if strip:
+ for d in self._data_locators:
+ m = re.match(r'^[0-9a-f]{32}\+\d+', d[LOCATOR])
+ manifest_text.append(m.group(0))
+ else:
+ manifest_text.extend([d[LOCATOR] for d in self._data_locators])
+ manifest_text.extend([' '.join(["{}:{}:{}".format(seg[LOCATOR], seg[BLOCKSIZE], f.name().replace(' ', '\\040'))
for seg in f.segments])
for f in self._files.values()])
return ' '.join(manifest_text) + '\n'
kwargs.setdefault('close_fds', True)
kwargs.setdefault('shell', False)
p = subprocess.Popen(execargs, **kwargs)
- stdoutdata, stderrdata = p.communicate(None)
+ if kwargs['stdout'] == subprocess.PIPE:
+ stdoutdata, stderrdata = p.communicate(None)
+ else:
+ p.wait()
if p.returncode != 0:
raise errors.CommandFailedError(
"run_command %s exit %d:\n%s" %
break
zip_file.write(buf)
zip_file.close()
-
+
p = subprocess.Popen(["unzip",
"-q", "-o",
"-d", path,
description='Read manifest on standard input and put normalized manifest on standard output.')
parser.add_argument('--extract', type=str, help="The file to extract from the input manifest")
+parser.add_argument('--strip', action='store_true', help="Strip authorization tokens")
args = parser.parse_args()
import arvados
r = sys.stdin.read()
-
+
cr = arvados.CollectionReader(r)
if args.extract:
if fn in s.files():
sys.stdout.write(s.files()[fn].as_manifest())
else:
- sys.stdout.write(cr.manifest_text())
+ sys.stdout.write(cr.manifest_text(args.strip))
cwriter.write_file('/dev/null')
cwriter.cache_state()
self.assertTrue(self.cache.load())
- self.assertEquals(". 0:0:null\n", cwriter.manifest_text())
+ self.assertEquals(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
def test_writer_works_without_cache(self):
cwriter = arv_put.ArvPutCollectionWriter()
cwriter.write_file('/dev/null')
- self.assertEquals(". 0:0:null\n", cwriter.manifest_text())
+ self.assertEquals(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
def test_writer_resumes_from_cache(self):
cwriter = arv_put.ArvPutCollectionWriter(self.cache)
cwriter.write_file(testfile.name, 'test')
new_writer = arv_put.ArvPutCollectionWriter.from_cache(self.cache)
new_writer.write_file('/dev/null')
- self.assertEquals(". 0:0:null\n", new_writer.manifest_text())
+ self.assertEquals(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", new_writer.manifest_text())
def test_new_writer_from_empty_cache(self):
cwriter = arv_put.ArvPutCollectionWriter.from_cache(self.cache)
cwriter.write_file('/dev/null')
- self.assertEquals(". 0:0:null\n", cwriter.manifest_text())
+ self.assertEquals(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
def test_writer_resumable_after_arbitrary_bytes(self):
cwriter = arv_put.ArvPutCollectionWriter(self.cache)
cw.start_new_file('zero.txt')
cw.write('')
- self.assertEqual(cw.manifest_text(), ". 0:0:zero.txt\n")
+ self.assertEqual(cw.manifest_text(), ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:zero.txt\n")
self.check_manifest_file_sizes(cw.manifest_text(), [0])
cw = arvados.CollectionWriter()
cw.start_new_file('zero.txt')
s.add_dependency('activesupport', '>= 3.2.13')
s.add_dependency('json', '>= 1.7.7')
s.add_dependency('andand')
+ s.add_runtime_dependency('jwt', '>= 0.1.5', '< 1.0.0')
s.homepage =
'http://arvados.org'
end
gem 'themes_for_rails'
-gem 'arvados-cli', '>= 0.1.20140328152103'
+gem 'arvados-cli', '>= 0.1.20140703110309'
# pg_power lets us use partial indexes in schema.rb in Rails 3
gem 'pg_power'
addressable (2.3.6)
andand (1.3.3)
arel (3.0.3)
- arvados (0.1.20140513131358)
+ arvados (0.1.20140703110309)
activesupport (>= 3.2.13)
andand
google-api-client (~> 0.6.3)
json (>= 1.7.7)
- arvados-cli (0.1.20140513131358)
+ jwt (>= 0.1.5, < 1.0.0)
+ arvados-cli (0.1.20140703110309)
activesupport (~> 3.2, >= 3.2.13)
andand (~> 1.3, >= 1.3.3)
arvados (~> 0.1.0)
curb (~> 0.8)
google-api-client (~> 0.6.3)
json (~> 1.7, >= 1.7.7)
+ jwt (>= 0.1.5, < 1.0.0)
oj (~> 2.0, >= 2.0.3)
trollop (~> 2.0)
autoparse (0.3.3)
DEPENDENCIES
acts_as_api
andand
- arvados-cli (>= 0.1.20140328152103)
+ arvados-cli (>= 0.1.20140703110309)
coffee-rails (~> 3.2.0)
database_cleaner
faye-websocket
include LoadParam
include RecordFilters
- ERROR_ACTIONS = [:render_error, :render_not_found]
-
-
respond_to :json
protect_from_forgery
+ ERROR_ACTIONS = [:render_error, :render_not_found]
+
before_filter :respond_with_json_by_default
before_filter :remote_ip
before_filter :load_read_auths
attr_accessor :resource_attrs
+ begin
+ rescue_from(Exception,
+ ArvadosModel::PermissionDeniedError,
+ :with => :render_error)
+ rescue_from(ActiveRecord::RecordNotFound,
+ ActionController::RoutingError,
+ ActionController::UnknownController,
+ AbstractController::ActionNotFound,
+ :with => :render_not_found)
+ end
+
def index
@objects.uniq!(&:id) if @select.nil? or @select.include? "id"
if params[:eager] and params[:eager] != '0' and params[:eager] != 0 and params[:eager] != ''
end
end
- begin
- rescue_from Exception,
- :with => :render_error
- rescue_from ActiveRecord::RecordNotFound,
- :with => :render_not_found
- rescue_from ActionController::RoutingError,
- :with => :render_not_found
- rescue_from ActionController::UnknownController,
- :with => :render_not_found
- rescue_from AbstractController::ActionNotFound,
- :with => :render_not_found
- rescue_from ArvadosModel::PermissionDeniedError,
- :with => :render_error
- end
-
def render_404_if_no_object
render_not_found "Object not found" if !@object
end
errors = [e.inspect]
end
status = e.respond_to?(:http_status) ? e.http_status : 422
- render json: { errors: errors }, status: status
+ send_error(*errors, status: status)
end
def render_not_found(e=ActionController::RoutingError.new("Path not found"))
logger.error e.inspect
- render json: { errors: ["Path not found"] }, status: 404
+ send_error("Path not found", status: 404)
end
protected
+ def send_error(*args)
+ if args.last.is_a? Hash
+ err = args.pop
+ else
+ err = {}
+ end
+ err[:errors] ||= args
+ err[:error_token] = [Time.now.utc.to_i, "%08x" % rand(16 ** 8)].join("+")
+ status = err.delete(:status) || 422
+ logger.error "Error #{err[:error_token]}: #{status}"
+ render json: err, status: status
+ end
+
def find_objects_for_index
@objects ||= model_class.readable_by(*@read_users)
apply_where_limit_order_params
def require_login
if not current_user
respond_to do |format|
- format.json {
- render :json => { errors: ['Not logged in'] }.to_json, status: 401
- }
- format.html {
- redirect_to '/auth/joshid'
- }
+ format.json { send_error("Not logged in", status: 401) }
+ format.html { redirect_to '/auth/joshid' }
end
false
end
def admin_required
unless current_user and current_user.is_admin
- render :json => { errors: ['Forbidden'] }.to_json, status: 403
+ send_error("Forbidden", status: 403)
end
end
def require_auth_scope
if @read_auths.empty?
if require_login != false
- render :json => { errors: ['Forbidden'] }.to_json, status: 403
+ send_error("Forbidden", status: 403)
end
false
end
def current_api_client_is_trusted
unless Thread.current[:api_client].andand.is_trusted
- render :json => { errors: ['Forbidden: this API client cannot manipulate other clients\' access tokens.'] }.to_json, status: 403
+ send_error('Forbidden: this API client cannot manipulate other clients\' access tokens.',
+ status: 403)
end
end
end
def create
[:repository, :script, :script_version, :script_parameters].each do |r|
if !resource_attrs[r]
- return render json: {
- :errors => ["#{r} attribute must be specified"]
- }, status: :unprocessable_entity
+ return send_error("#{r} attribute must be specified",
+ status: :unprocessable_entity)
end
end
def check_uuid_kind uuid, kind
if kind and ArvadosModel::resource_class_for_uuid(uuid).andand.kind != kind
- render :json => { errors: ["'#{kind}' does not match uuid '#{uuid}', expected '#{ArvadosModel::resource_class_for_uuid(uuid).andand.kind}'"] }.to_json, status: 422
+ send_error("'#{kind}' does not match uuid '#{uuid}', expected '#{ArvadosModel::resource_class_for_uuid(uuid).andand.kind}'",
+ status: 422)
nil
else
true
end
timestamp = opts[:expire]
else
- timestamp = Time.now.to_i + (opts[:ttl] || 600)
+ timestamp = Time.now.to_i + (opts[:ttl] || 1209600)
end
timestamp_hex = timestamp.to_s(16)
# => "53163cb4"
end
def groups_i_can(verb)
- self.group_permissions.select { |uuid, mask| mask[verb] }.keys
+ my_groups = self.group_permissions.select { |uuid, mask| mask[verb] }.keys
+ if verb == :read
+ my_groups << anonymous_group_uuid
+ end
+ my_groups
end
def can?(actions)
--- /dev/null
+class AnonymousGroup < ActiveRecord::Migration
+ include CurrentApiClient
+
+ def up
+ # create the anonymous group and user
+ anonymous_group
+ anonymous_user
+ end
+
+ def down
+ act_as_system_user do
+ anonymous_user.destroy
+ anonymous_group.destroy
+ end
+ end
+
+end
#
# It's strongly recommended to check this file into your version control system.
-ActiveRecord::Schema.define(:version => 20140611173003) do
+ActiveRecord::Schema.define(:version => 20140627210837) do
#
# It is invoked by `rake db:seed` and `rake db:setup`.
-# These two methods would create the system user and group objects on
-# demand later anyway, but it's better form to create them up front.
+# These two methods would create these objects on demand
+# later anyway, but it's better form to create them up front.
include CurrentApiClient
system_user
system_group
+anonymous_group
+anonymous_user
'000000000000000'].join('-')
end
+ def anonymous_group_uuid
+ [Server::Application.config.uuid_prefix,
+ Group.uuid_prefix,
+ 'anonymouspublic'].join('-')
+ end
+
+ def anonymous_user_uuid
+ [Server::Application.config.uuid_prefix,
+ User.uuid_prefix,
+ 'anonymouspublic'].join('-')
+ end
+
def system_user
if not $system_user
real_current_user = Thread.current[:user]
Thread.current[:user] = system_user
end
end
+
+ def anonymous_group
+ if not $anonymous_group
+ act_as_system_user do
+ ActiveRecord::Base.transaction do
+ $anonymous_group = Group.
+ where(uuid: anonymous_group_uuid).first_or_create do |g|
+ g.update_attributes(name: "Anonymous group",
+ description: "Anonymous group")
+ end
+ end
+ end
+ end
+ $anonymous_group
+ end
+
+ def anonymous_user
+ if not $anonymous_user
+ act_as_system_user do
+ $anonymous_user = User.where('uuid=?', anonymous_user_uuid).first
+ if !$anonymous_user
+ $anonymous_user = User.new(uuid: anonymous_user_uuid,
+ is_active: false,
+ is_admin: false,
+ email: 'anonymouspublic',
+ first_name: 'anonymouspublic',
+ last_name: 'anonymouspublic')
+ $anonymous_user.save!
+ $anonymous_user.reload
+ end
+
+ group_perms = Link.where(tail_uuid: anonymous_user_uuid,
+ head_uuid: anonymous_group_uuid,
+ link_class: 'permission',
+ name: 'can_read')
+
+ if !group_perms.any?
+ group_perm = Link.create!(tail_uuid: anonymous_user_uuid,
+ head_uuid: anonymous_group_uuid,
+ link_class: 'permission',
+ name: 'can_read')
+ end
+ end
+ end
+ $anonymous_user
+ end
+
end
#!/usr/bin/env ruby
+
if ENV["CRUNCH_DISPATCH_LOCKFILE"]
lockfilename = ENV.delete "CRUNCH_DISPATCH_LOCKFILE"
lockfile = File.open(lockfilename, File::RDWR|File::CREAT, 0644)
require File.dirname(__FILE__) + '/../config/boot'
require File.dirname(__FILE__) + '/../config/environment'
-def cancel_stale_jobs
- Job.running.each do |jobrecord|
- f = Log.where("object_uuid=?", jobrecord.uuid).limit(1).order("created_at desc").first
- if f
- age = (Time.now - f.created_at)
- if age > 300
- $stderr.puts "dispatch: failing orphan job #{jobrecord.uuid}, last log is #{age} seconds old"
- # job is marked running, but not known to crunch-dispatcher, and
- # hasn't produced any log entries for 5 minutes, so mark it as failed.
- jobrecord.running = false
- jobrecord.cancelled_at ||= Time.now
- jobrecord.finished_at ||= Time.now
- if jobrecord.success.nil?
- jobrecord.success = false
+class CancelJobs
+ include ApplicationHelper
+
+ def cancel_stale_jobs
+ act_as_system_user do
+ Job.running.each do |jobrecord|
+ f = Log.where("object_uuid=?", jobrecord.uuid).limit(1).order("created_at desc").first
+ if f
+ age = (Time.now - f.created_at)
+ if age > 300
+ $stderr.puts "dispatch: failing orphan job #{jobrecord.uuid}, last log is #{age} seconds old"
+ # job is marked running, but not known to crunch-dispatcher, and
+ # hasn't produced any log entries for 5 minutes, so mark it as failed.
+ jobrecord.running = false
+ jobrecord.cancelled_at ||= Time.now
+ jobrecord.finished_at ||= Time.now
+ if jobrecord.success.nil?
+ jobrecord.success = false
+ end
+ jobrecord.save!
+ end
end
- jobrecord.save!
end
end
end
end
-cancel_stale_jobs
+CancelJobs.new.cancel_stale_jobs
cmd_args = nil
case Server::Application.config.crunch_job_wrapper
when :none
+ if @running.size > 0
+ # Don't run more than one at a time.
+ return
+ end
cmd_args = []
when :slurm_immediate
nodelist = nodes_available_for_job(job)
next
end
- $stderr.puts `cd #{arvados_internal.shellescape} && git fetch --no-tags #{src_repo.shellescape} && git tag #{job.uuid.shellescape} #{job.script_version.shellescape}`
+ $stderr.puts `cd #{arvados_internal.shellescape} && git fetch-pack --all #{src_repo.shellescape} && git tag #{job.uuid.shellescape} #{job.script_version.shellescape}`
cmd_args << crunch_job_bin
cmd_args << '--job-api-token'
--- /dev/null
+#!/usr/bin/env ruby
+
+# Get or Create an anonymous user token.
+# If get option is used, an existing anonymous user token is returned. If none exist, one is created.
+# If the get option is omitted, a new token is created and returned.
+
+require 'trollop'
+
+opts = Trollop::options do
+ banner ''
+ banner "Usage: get_anonymous_user_token "
+ banner ''
+ opt :get, <<-eos
+Get an existing anonymous user token. If no such token exists \
+or if this option is omitted, a new token is created and returned.
+ eos
+end
+
+get_existing = opts[:get]
+
+require File.dirname(__FILE__) + '/../config/environment'
+
+include ApplicationHelper
+act_as_system_user
+
+def create_api_client_auth
+ api_client_auth = ApiClientAuthorization.
+ new(user: anonymous_user,
+ api_client_id: 0,
+ expires_at: Time.now + 100.years,
+ scopes: ['GET /'])
+ api_client_auth.save!
+ api_client_auth.reload
+end
+
+if get_existing
+ api_client_auth = ApiClientAuthorization.
+ where('user_id=?', anonymous_user.id.to_i).
+ where('expires_at>?', Time.now).
+ select { |auth| auth.scopes == ['GET /'] }.
+ first
+end
+
+# either not a get or no api_client_auth was found
+if !api_client_auth
+ api_client_auth = create_api_client_auth
+end
+
+# print it to the console
+puts api_client_auth.api_token
scopes: ["GET /arvados/v1/api_client_authorizations",
"POST /arvados/v1/api_client_authorizations"]
+active_readonly:
+ api_client: untrusted
+ user: active
+ api_token: activereadonlyabcdefghijklmnopqrstuvwxyz1234568790
+ expires_at: 2038-01-01 00:00:00
+ scopes: ["GET /"]
+
spectator:
api_client: untrusted
user: spectator
user_id: 1234567
api_token: tewfa58099sndckyqhlgd37za6e47o6h03r9l1vpll23hudm8b
expires_at: 2038-01-01 00:00:00
+
+anonymous:
+ api_client: untrusted
+ user: anonymous
+ api_token: 4kg6k6lzmp9kj4cpkcoxie964cmvjahbt4fod9zru44k4jqdmi
+ expires_at: 2038-01-01 00:00:00
+ scopes: ["GET /"]
modified_at: 2014-05-03 18:50:08 -0400
updated_at: 2014-05-03 18:50:08 -0400
name: Owned by bad group a
+
+anonymous_group:
+ uuid: zzzzz-j7d0g-anonymouspublic
+ owner_uuid: zzzzz-tpzed-000000000000000
+ name: Anonymous group
+ description: Anonymous group
+
+anonymously_accessible_project:
+ uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+ owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ name: Unrestricted public data
+ group_class: project
+ description: An anonymously accessible project
head_uuid: ~
properties: {}
+anonymous_group_can_read_anonymously_accessible_project:
+ uuid: zzzzz-o0j2j-15gpzezqjg4bc4z
+ owner_uuid: zzzzz-tpzed-000000000000000
+ created_at: 2014-05-30 14:30:00.184389725 Z
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2014-05-30 14:30:00.184019565 Z
+ updated_at: 2014-05-30 14:30:00.183829316 Z
+ link_class: permission
+ name: can_read
+ tail_uuid: zzzzz-j7d0g-anonymouspublic
+ head_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+ properties: {}
+
+user_agreement_in_anonymously_accessible_project:
+ uuid: zzzzz-o0j2j-k0ukddp35mt6ok1
+ owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+ created_at: 2014-06-13 20:42:26 -0800
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ modified_at: 2014-06-13 20:42:26 -0800
+ updated_at: 2014-06-13 20:42:26 -0800
+ link_class: name
+ name: GNU General Public License, version 3
+ tail_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+ head_uuid: b519d9cb706a29fc7ea24dbea2f05851+249025
+ properties: {}
+
+user_agreement_readable_by_anonymously_accessible_project:
+ uuid: zzzzz-o0j2j-o5ds5gvhkztdc8h
+ owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+ created_at: 2014-06-13 20:42:26 -0800
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ modified_at: 2014-06-13 20:42:26 -0800
+ updated_at: 2014-06-13 20:42:26 -0800
+ link_class: permission
+ name: can_read
+
active_user_permission_to_docker_image_collection:
uuid: zzzzz-o0j2j-dp1d8395ldqw33s
owner_uuid: zzzzz-tpzed-000000000000000
is_admin: false
prefs: {}
+anonymous:
+ uuid: zzzzz-tpzed-anonymouspublic
+ email: anonymouspublic
+ first_name: anonymouspublic
+ last_name: anonymouspublic
+ is_active: false
+ is_admin: false
+ prefs: {}
--- /dev/null
+require 'test_helper'
+
+class ApplicationControllerTest < ActionController::TestCase
+ BAD_UUID = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+
+ def now_timestamp
+ Time.now.utc.to_i
+ end
+
+ setup do
+ # These tests are meant to check behavior in ApplicationController.
+ # We instantiate a small concrete controller for convenience.
+ @controller = Arvados::V1::SpecimensController.new
+ @start_stamp = now_timestamp
+ end
+
+ def check_error_token
+ token = json_response['error_token']
+ assert_not_nil token
+ token_time = token.split('+', 2).first.to_i
+ assert_operator(token_time, :>=, @start_stamp, "error token too old")
+ assert_operator(token_time, :<=, now_timestamp, "error token too new")
+ end
+
+ def check_404(errmsg="Path not found")
+ assert_response 404
+ assert_equal([errmsg], json_response['errors'])
+ check_error_token
+ end
+
+ test "requesting nonexistent object returns 404 error" do
+ authorize_with :admin
+ get(:show, id: BAD_UUID)
+ check_404
+ end
+
+ test "requesting object without read permission returns 404 error" do
+ authorize_with :spectator
+ get(:show, id: specimens(:owned_by_active_user).uuid)
+ check_404
+ end
+
+ test "submitting bad object returns error" do
+ authorize_with :spectator
+ post(:create, specimen: {badattr: "badvalue"})
+ assert_response 422
+ check_error_token
+ end
+end
assert @uninvited_user.can? :write=>"#{@uninvited_user.uuid}"
assert @uninvited_user.can? :manage=>"#{@uninvited_user.uuid}"
- assert @uninvited_user.groups_i_can(:read).size == 0, "inactive and uninvited user should not be able read any groups"
+ assert @uninvited_user.groups_i_can(:read).size == 1, "inactive and uninvited user can only read anonymous user group"
+ assert @uninvited_user.groups_i_can(:read).first.ends_with? 'anonymouspublic' , "inactive and uninvited user can only read anonymous user group"
assert @uninvited_user.groups_i_can(:write).size == 0, "inactive and uninvited user should not be able write to any groups"
assert @uninvited_user.groups_i_can(:manage).size == 0, "inactive and uninvited user should not be able manage any groups"
end
if op == "Read" {
disk[device].last_read = disk[device].next_read
disk[device].next_read = next
- if disk[device].last_read > 0 {
+ if disk[device].last_read > 0 && (disk[device].next_read != disk[device].last_read) {
stderr <- fmt.Sprintf("crunchstat: blkio.io_service_bytes %s read %v", device, disk[device].next_read-disk[device].last_read)
}
}
if op == "Write" {
disk[device].last_write = disk[device].next_write
disk[device].next_write = next
- if disk[device].last_write > 0 {
+ if disk[device].last_write > 0 && (disk[device].next_write != disk[device].last_write) {
stderr <- fmt.Sprintf("crunchstat: blkio.io_service_bytes %s write %v", device, disk[device].next_write-disk[device].last_write)
}
}
import re
import apiclient
import json
+import logging
from time import time
from llfuse import FUSEError
try:
self.update()
except apiclient.errors.HttpError as e:
- print e
+ logging.debug(e)
def __getitem__(self, item):
self.checkupdate()
cwd = cwd._entries[part]
for k, v in s.files().items():
cwd._entries[k] = self.inodes.add_entry(StreamReaderFile(cwd.inode, v))
- print "found"
self.fresh()
+ return True
except Exception as detail:
- print("%s: error: %s" % (self.collection_locator,detail) )
+ logging.debug("arv-mount %s: error: %s" % (self.collection_locator,detail))
+ return False
class MagicDirectory(Directory):
'''A special directory that logically contains the set of all extant keep
if k in self._entries:
return True
try:
- if arvados.Keep.get(k):
+ e = self.inodes.add_entry(CollectionDirectory(self.inode, self.inodes, k))
+ if e.update():
+ self._entries[k] = e
return True
else:
return False
except Exception as e:
- #print 'exception keep', e
+ logging.debug('arv-mount exception keep %s', e)
return False
def __getitem__(self, item):
- if item not in self._entries:
- self._entries[item] = self.inodes.add_entry(CollectionDirectory(self.inode, self.inodes, item))
- return self._entries[item]
-
+ if item in self:
+ return self._entries[item]
+ else:
+ raise KeyError("No collection with id " + item)
class TagsDirectory(Directory):
'''A special directory that contains as subdirectories all tags visible to the user.'''
def update(self):
tags = self.api.links().list(filters=[['link_class', '=', 'tag']], select=['name'], distinct = True).execute()
- self.merge(tags['items'],
- lambda i: i['name'],
- lambda a, i: a.tag == i,
- lambda i: TagDirectory(self.inode, self.inodes, self.api, i['name'], poll=self._poll, poll_time=self._poll_time))
+ if "items" in tags:
+ self.merge(tags['items'],
+ lambda i: i['name'],
+ lambda a, i: a.tag == i,
+ lambda i: TagDirectory(self.inode, self.inodes, self.api, i['name'], poll=self._poll, poll_time=self._poll_time))
class TagDirectory(Directory):
'''A special directory that contains as subdirectories all collections visible
so request handlers do not run concurrently unless the lock is explicitly released
with llfuse.lock_released.'''
- def __init__(self, uid, gid):
+ def __init__(self, uid, gid, debug=False):
super(Operations, self).__init__()
+ if debug:
+ logging.basicConfig(level=logging.DEBUG)
+ logging.info("arv-mount debug enabled")
+
self.inodes = Inodes()
self.uid = uid
self.gid = gid
return entry
def lookup(self, parent_inode, name):
- #print "lookup: parent_inode", parent_inode, "name", name
+ logging.debug("arv-mount lookup: parent_inode %i name %s", parent_inode, name)
inode = None
if name == '.':
return fh
def read(self, fh, off, size):
- #print "read", fh, off, size
+ logging.debug("arv-mount read %i %i %i", fh, off, size)
if fh in self._filehandles:
handle = self._filehandles[fh]
else:
del self._filehandles[fh]
def opendir(self, inode):
- #print "opendir: inode", inode
+ logging.debug("arv-mount opendir: inode %i", inode)
if inode in self.inodes:
p = self.inodes[inode]
return fh
def readdir(self, fh, off):
- #print "readdir: fh", fh, "off", off
+ logging.debug("arv-mount readdir: fh %i off %i", fh, off)
if fh in self._filehandles:
handle = self._filehandles[fh]
else:
raise llfuse.FUSEError(errno.EBADF)
- #print "handle.entry", handle.entry
+ logging.debug("arv-mount handle.entry %s", handle.entry)
e = off
while e < len(handle.entry):
#!/usr/bin/env python
-from arvados_fuse import *
-import arvados
-import subprocess
import argparse
+import arvados
import daemon
+import os
import signal
+import subprocess
+
+from arvados_fuse import *
if __name__ == '__main__':
# Handle command line parameters
args = parser.parse_args()
# Create the request handler
- operations = Operations(os.getuid(), os.getgid())
+ operations = Operations(os.getuid(), os.getgid(), args.debug)
- if args.groups:
- api = arvados.api('v1')
- e = operations.inodes.add_entry(GroupsDirectory(llfuse.ROOT_INODE, operations.inodes, api))
- elif args.tags:
+ if args.debug:
+ arvados.config.settings()['ARVADOS_DEBUG'] = 'true'
+
+ try:
api = arvados.api('v1')
- e = operations.inodes.add_entry(TagsDirectory(llfuse.ROOT_INODE, operations.inodes, api))
- elif args.collection != None:
- # Set up the request handler with the collection at the root
- e = operations.inodes.add_entry(CollectionDirectory(llfuse.ROOT_INODE, operations.inodes, args.collection))
- else:
- # Set up the request handler with the 'magic directory' at the root
- operations.inodes.add_entry(MagicDirectory(llfuse.ROOT_INODE, operations.inodes))
+
+ if args.groups:
+ e = operations.inodes.add_entry(GroupsDirectory(llfuse.ROOT_INODE, operations.inodes, api))
+ elif args.tags:
+ e = operations.inodes.add_entry(TagsDirectory(llfuse.ROOT_INODE, operations.inodes, api))
+ elif args.collection != None:
+ # Set up the request handler with the collection at the root
+ e = operations.inodes.add_entry(CollectionDirectory(llfuse.ROOT_INODE, operations.inodes, args.collection))
+ else:
+ # Set up the request handler with the 'magic directory' at the root
+ operations.inodes.add_entry(MagicDirectory(llfuse.ROOT_INODE, operations.inodes))
+ except Exception as ex:
+ print("arv-mount: %s" % ex)
+ exit(1)
# FUSE options, see mount.fuse(8)
opts = [optname for optname in ['allow_other', 'debug']
exit(rc)
else:
- if args.foreground:
- # Initialize the fuse connection
- llfuse.init(operations, args.mountpoint, opts)
- llfuse.main()
- else:
- # Initialize the fuse connection
- llfuse.init(operations, args.mountpoint, opts)
- with daemon.DaemonContext():
- llfuse.main()
+ os.chdir(args.mountpoint)
+ if not args.foreground:
+ daemon_ctx = daemon.DaemonContext(working_directory='.')
+ daemon_ctx.open()
+ llfuse.init(operations, '.', opts)
+ llfuse.main()
flag.IntVar(
&permission_ttl_sec,
"permission-ttl",
- 300,
+ 1209600,
"Expiration time (in seconds) for newly generated permission "+
"signatures.")
flag.BoolVar(