# Skip require_thread_api_token if this is a show action
# for an object uuid that supports anonymous access.
skip_around_action :require_thread_api_token, if: proc { |ctrl|
- Rails.configuration.anonymous_user_token and
+ !Rails.configuration.Users.AnonymousUserToken.empty? and
'show' == ctrl.action_name and
params['uuid'] and
model_class.in?([Collection, Group, Job, PipelineInstance, PipelineTemplate])
def missing_required_profile?
missing_required = false
- profile_config = Rails.configuration.user_profile_form_fields
- if current_user && profile_config
+ profile_config = Rails.configuration.Workbench.UserProfileFormFields
+ if current_user && !profile_config.empty?
current_user_profile = current_user.prefs[:profile]
- profile_config.kind_of?(Array) && profile_config.andand.each do |entry|
- if entry['required']
+ profile_config.each do |k, entry|
+ if entry['Required']
if !current_user_profile ||
- !current_user_profile[entry['key'].to_sym] ||
- current_user_profile[entry['key'].to_sym].empty?
+ !current_user_profile[k] ||
+ current_user_profile[k].empty?
missing_required = true
break
end
end
def select_theme
- return Rails.configuration.arvados_theme
+ return Rails.configuration.Workbench.Theme
end
@@notification_tests = []
@@notification_tests.push lambda { |controller, current_user|
- return nil if Rails.configuration.shell_in_a_box_url
+ return nil if Rails.configuration.Services.WebShell.ExternalURL != URI("")
AuthorizedKey.limit(1).where(authorized_user_uuid: current_user.uuid).each do
return nil
end
helper_method :user_notifications
def user_notifications
@errors = nil if !defined?(@errors)
- return [] if @errors or not current_user.andand.is_active or not Rails.configuration.show_user_notifications
+ return [] if @errors or not current_user.andand.is_active or not Rails.configuration.Workbench.ShowUserNotifications
@notifications ||= @@notification_tests.map do |t|
t.call(self, current_user)
end.compact
include ActionController::Live
skip_around_action :require_thread_api_token, if: proc { |ctrl|
- Rails.configuration.anonymous_user_token and
+ !Rails.configuration.Users.AnonymousUserToken.empty? and
'show' == ctrl.action_name
}
skip_around_action(:require_thread_api_token,
# Otherwise, it's impossible to know whether any other request succeeded
# because of the reader token.
coll = nil
- tokens = [(Rails.configuration.anonymous_user_token || nil),
+ tokens = [(if !Rails.configuration.Users.AnonymousUserToken.empty? then
+ Rails.configuration.Users.AnonymousUserToken else nil end),
params[:reader_token],
Thread.current[:arvados_api_token]].compact
usable_token = find_usable_token(tokens) do
opts = {}
if usable_token == params[:reader_token]
opts[:path_token] = usable_token
- elsif usable_token == Rails.configuration.anonymous_user_token
+ elsif usable_token == Rails.configuration.Users.AnonymousUserToken
# Don't pass a token at all
else
# We pass the current user's real token only if it's necessary
def keep_web_url(uuid_or_pdh, file, opts)
munged_id = uuid_or_pdh.sub('+', '-')
- fmt = {uuid_or_pdh: munged_id}
- tmpl = Rails.configuration.keep_web_url
- if Rails.configuration.keep_web_download_url and
- (!tmpl or opts[:disposition] == 'attachment')
+ tmpl = Rails.configuration.Services.WebDAV.ExternalURL.to_s
+
+ if Rails.configuration.Services.WebDAVDownload.ExternalURL != URI("") and
+ (tmpl.empty? or opts[:disposition] == 'attachment')
# Prefer the attachment-only-host when we want an attachment
# (and when there is no preview link configured)
- tmpl = Rails.configuration.keep_web_download_url
- elsif not Rails.configuration.trust_all_content
- check_uri = URI.parse(tmpl % fmt)
+ tmpl = Rails.configuration.Services.WebDAVDownload.ExternalURL.to_s
+ elsif not Rails.configuration.Workbench.TrustAllContent
+ check_uri = URI.parse(tmpl.sub("*", munged_id))
if opts[:query_token] and
+ (check_uri.host.nil? or (
not check_uri.host.start_with?(munged_id + "--") and
- not check_uri.host.start_with?(munged_id + ".")
+ not check_uri.host.start_with?(munged_id + ".")))
# We're about to pass a token in the query string, but
# keep-web can't accept that safely at a single-origin URL
# template (unless it's -attachment-only-host).
- tmpl = Rails.configuration.keep_web_download_url
- if not tmpl
+ tmpl = Rails.configuration.Services.WebDAVDownload.ExternalURL.to_s
+ if tmpl.empty?
raise ArgumentError, "Download precluded by site configuration"
end
logger.warn("Using download link, even though inline content " \
end
end
- if tmpl == Rails.configuration.keep_web_download_url
+ if tmpl == Rails.configuration.Services.WebDAVDownload.ExternalURL.to_s
# This takes us to keep-web's -attachment-only-host so there is
# no need to add ?disposition=attachment.
opts.delete :disposition
end
- uri = URI.parse(tmpl % fmt)
+ uri = URI.parse(tmpl.sub("*", munged_id))
+ if tmpl.index("*").nil?
+ uri.path = "/c=#{munged_id}"
+ end
uri.path += '/' unless uri.path.end_with? '/'
if opts[:path_token]
uri.path += 't=' + opts[:path_token] + '/'
class ContainerRequestsController < ApplicationController
skip_around_action :require_thread_api_token, if: proc { |ctrl|
- Rails.configuration.anonymous_user_token and
+ !Rails.configuration.Users.AnonymousUserToken.empty? and
'show' == ctrl.action_name
}
class ContainersController < ApplicationController
skip_around_action :require_thread_api_token, if: proc { |ctrl|
- Rails.configuration.anonymous_user_token and
+ !Rails.configuration.Users.AnonymousUserToken.empty? and
'show' == ctrl.action_name
}
mgmt_token = Rails.configuration.ManagementToken
auth_header = request.headers['Authorization']
- if !mgmt_token
+ if mgmt_token.empty?
render :json => {:errors => "disabled"}, :status => 404
elsif !auth_header
render :json => {:errors => "authorization required"}, :status => 401
class JobsController < ApplicationController
skip_around_action :require_thread_api_token, if: proc { |ctrl|
- Rails.configuration.anonymous_user_token and
+ !Rails.configuration.Users.AnonymousUserToken.empty? and
'show' == ctrl.action_name
}
def logs
@logs = @object.
- stderr_log_query(Rails.configuration.running_job_log_records_to_fetch).
+ stderr_log_query(Rails.configuration.Workbench.RunningJobLogRecordsToFetch).
map { |e| e.serializable_hash.merge({ 'prepend' => true }) }
respond_to do |format|
format.json { render json: @logs }
skip_before_action :find_object_by_uuid, only: :compare
before_action :find_objects_by_uuid, only: :compare
skip_around_action :require_thread_api_token, if: proc { |ctrl|
- Rails.configuration.anonymous_user_token and
+ !Rails.configuration.Users.AnonymousUserToken.empty? and
'show' == ctrl.action_name
}
class PipelineTemplatesController < ApplicationController
skip_around_action :require_thread_api_token, if: proc { |ctrl|
- Rails.configuration.anonymous_user_token and
+ !Rails.configuration.Users.AnonymousUserToken.empty? and
'show' == ctrl.action_name
}
class ProjectsController < ApplicationController
before_action :set_share_links, if: -> { defined? @object and @object}
skip_around_action :require_thread_api_token, if: proc { |ctrl|
- Rails.configuration.anonymous_user_token and
+ !Rails.configuration.Users.AnonymousUserToken.empty? and
%w(show tab_counts public).include? ctrl.action_name
}
end
def public # Yes 'public' is the name of the action for public projects
- return render_not_found if not Rails.configuration.anonymous_user_token or not Rails.configuration.enable_public_projects_page
- @objects = using_specific_api_token Rails.configuration.anonymous_user_token do
+ return render_not_found if Rails.configuration.Users.AnonymousUserToken.empty? or not Rails.configuration.Workbench.EnablePublicProjectsPage
+ @objects = using_specific_api_token Rails.configuration.Users.AnonymousUserToken do
Group.where(group_class: 'project').order("modified_at DESC")
end
end
respond_to do |format|
if current_user.andand.is_admin
setup_params = {}
- setup_params[:send_notification_email] = "#{Rails.configuration.send_user_setup_notification_email}"
+ setup_params[:send_notification_email] = "#{Rails.configuration.Mail.SendUserSetupNotificationEmail}"
if params['user_uuid'] && params['user_uuid'].size>0
setup_params[:uuid] = params['user_uuid']
end
end
def webshell
- return render_not_found if not Rails.configuration.shell_in_a_box_url
- @webshell_url = Rails.configuration.shell_in_a_box_url % {
- uuid: @object.uuid,
- hostname: @object.hostname,
- }
+ return render_not_found if Rails.configuration.Workbench.ShellInABoxURL == URI("")
+ webshell_url = URI(Rails.configuration.Workbench.ShellInABoxURL)
+ if webshell_url.host.index("*") != nil
+ webshell_url.host = webshell_url.host.sub("*", @object.hostname)
+ else
+ webshell_url.path = "/#{@object.hostname}"
+ end
+ @webshell_url = webshell_url.to_s
render layout: false
end
class WorkUnitsController < ApplicationController
skip_around_action :require_thread_api_token, if: proc { |ctrl|
- Rails.configuration.anonymous_user_token and
+ !Rails.configuration.Users.AnonymousUserToken.empty? and
'show_child_component' == ctrl.action_name
}
class WorkflowsController < ApplicationController
skip_around_action :require_thread_api_token, if: proc { |ctrl|
- Rails.configuration.anonymous_user_token and
+ !Rails.configuration.Users.AnonymousUserToken.empty? and
'show' == ctrl.action_name
}
end
def current_api_host
- Rails.configuration.arvados_v1_base.gsub(/https?:\/\/|\/arvados\/v1/, '')
+ "#{Rails.configuration.Services.Controller.ExternalURL.hostname}:#{Rails.configuration.Services.Controller.ExternalURL.port}"
end
def current_uuid_prefix
- current_api_host[0..4]
+ Rails.configuration.ClusterID
end
def render_markup(markup)
display_value = link.name
elsif value_info[:link_name]
display_value = value_info[:link_name]
- elsif value_info[:selection_name]
- display_value = value_info[:selection_name]
+ elsif (sn = value_info[:selection_name]) && sn != ""
+ display_value = sn
end
end
if (attr == :components) and (subattr.size > 2)
elsif (file_type.raw_media_type == "text") || (file_type.raw_media_type == "image")
true
elsif (file_type.raw_media_type == "application") &&
- (Rails.configuration.application_mimetypes_with_view_icon.include? (file_type.sub_type))
+ Rails.configuration.Workbench.ApplicationMimetypesWithViewIcon[file_type.sub_type]
true
else
false
# SPDX-License-Identifier: AGPL-3.0
class IssueReporter < ActionMailer::Base
- default from: Rails.configuration.issue_reporter_email_from
- default to: Rails.configuration.issue_reporter_email_to
+ default from: Rails.configuration.Mail.IssueReporterEmailFrom
+ default to: Rails.configuration.Mail.IssueReporterEmailTo
def send_report(user, params)
@user = user
# SPDX-License-Identifier: AGPL-3.0
class RequestShellAccessReporter < ActionMailer::Base
- default from: Rails.configuration.email_from
- default to: Rails.configuration.support_email_address
+ default from: Rails.configuration.Mail.EmailFrom
+ default to: Rails.configuration.Mail.SupportEmailAddress
def send_request(user, params)
@user = user
404 => NotFoundException,
}
- @@profiling_enabled = Rails.configuration.profiling_enabled
+ @@profiling_enabled = Rails.configuration.Workbench.ProfilingEnabled
@@discovery = nil
# An API client object suitable for handling API requests on behalf
if not @api_client
@client_mtx.synchronize do
@api_client = HTTPClient.new
- @api_client.ssl_config.timeout = Rails.configuration.api_client_connect_timeout
- @api_client.connect_timeout = Rails.configuration.api_client_connect_timeout
- @api_client.receive_timeout = Rails.configuration.api_client_receive_timeout
- if Rails.configuration.arvados_insecure_https
+ @api_client.ssl_config.timeout = Rails.configuration.Workbench.APIClientConnectTimeout
+ @api_client.connect_timeout = Rails.configuration.Workbench.APIClientConnectTimeout
+ @api_client.receive_timeout = Rails.configuration.Workbench.APIClientReceiveTimeout
+ if Rails.configuration.TLS.Insecure
@api_client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE
else
# Use system CA certificates
.select { |ca_path| File.readable?(ca_path) }
.each { |ca_path| @api_client.ssl_config.add_trust_ca(ca_path) }
end
- if Rails.configuration.api_response_compression
+ if Rails.configuration.Workbench.APIResponseCompression
@api_client.transparent_gzip_decompression = true
end
end
# Clean up /arvados/v1/../../discovery/v1 to /discovery/v1
url.sub! '/arvados/v1/../../', '/'
+ anon_tokens = [Rails.configuration.Users.AnonymousUserToken].select { |x| !x.empty? && include_anon_token }
+
query = {
'reader_tokens' => ((tokens[:reader_tokens] ||
Thread.current[:reader_tokens] ||
[]) +
- (include_anon_token ? [Rails.configuration.anonymous_user_token] : [])).to_json,
+ anon_tokens).to_json,
}
if !data.nil?
data.each do |k,v|
end
def arvados_login_url(params={})
- if Rails.configuration.respond_to? :arvados_login_base
- uri = Rails.configuration.arvados_login_base
- else
- uri = self.arvados_v1_base.sub(%r{/arvados/v\d+.*}, '/login')
- end
- if params.size > 0
- uri += '?' << params.collect { |k,v|
- CGI.escape(k.to_s) + '=' + CGI.escape(v.to_s)
- }.join('&')
+ uri = URI.parse(Rails.configuration.Services.Controller.ExternalURL.to_s)
+ if Rails.configuration.testing_override_login_url
+ uri = URI(Rails.configuration.testing_override_login_url)
end
- uri
+ uri.path = "/login"
+ uri.query = URI.encode_www_form(params)
+ uri.to_s
end
def arvados_logout_url(params={})
end
def arvados_v1_base
- Rails.configuration.arvados_v1_base
+ # workaround Ruby 2.3 bug, can't duplicate URI objects
+ # https://github.com/httprb/http/issues/388
+ u = URI.parse(Rails.configuration.Services.Controller.ExternalURL.to_s)
+ u.path = "/arvados/v1"
+ u.to_s
end
def discovery
# non-zero.
def run_git *gitcmd
if not @workdir
- workdir = File.expand_path uuid+'.git', Rails.configuration.repository_cache
+ workdir = File.expand_path uuid+'.git', Rails.configuration.Workbench.RepositoryCache
if not File.exists? workdir
- FileUtils.mkdir_p Rails.configuration.repository_cache
+ FileUtils.mkdir_p Rails.configuration.Workbench.RepositoryCache
[['git', 'init', '--bare', workdir],
].each do |cmd|
system *cmd
'!cred(){ cat >/dev/null; if [ "$1" = get ]; then echo password=$ARVADOS_API_TOKEN; fi; };cred'],
['git', '--git-dir', @workdir, 'config', '--local',
'http.sslVerify',
- Rails.configuration.arvados_insecure_https ? 'false' : 'true'],
+ Rails.configuration.TLS.Insecure ? 'false' : 'true'],
].each do |cmd|
system *cmd
raise GitCommandError.new($?.to_s) unless $?.exitstatus == 0
<%= Thread.current[:arvados_api_token] %>
EOF
export ARVADOS_API_TOKEN ARVADOS_API_HOST=<%= current_api_host %>
-<% if Rails.configuration.arvados_insecure_https %>
+<% if Rails.configuration.TLS.Insecure %>
export ARVADOS_API_HOST_INSECURE=true
<% else %>
unset ARVADOS_API_HOST_INSECURE
<i class="glyphicon fa-fw glyphicon-search"></i> Search all projects ...
<% end %>
</li>
- <% if Rails.configuration.anonymous_user_token and Rails.configuration.enable_public_projects_page %>
+ <% if !Rails.configuration.Users.AnonymousUserToken.empty? and Rails.configuration.Workbench.EnablePublicProjectsPage %>
<li role="menuitem"><a href="/projects/public" role="menuitem"><i class="fa fa-fw fa-list"></i> Browse public projects </a>
</li>
<% end %>
or
-<%= mail_to(Rails.configuration.support_email_address, "email us",
+<%= mail_to(Rails.configuration.Mail.SupportEmailAddress, "email us",
subject: "Workbench problem report",
body: "Problem while viewing page #{request.url}") %>
<%
generated_at = arvados_api_client.discovery[:generatedAt]
- arvados_base = Rails.configuration.arvados_v1_base
- support_email = Rails.configuration.support_email_address
+ arvados_base = Rails.configuration.Services.Controller.ExternalURL.to_s + "/arvados/v1"
+ support_email = Rails.configuration.Mail.SupportEmailAddress
additional_info = {}
additional_info['Current location'] = params[:current_location]
choose_filters = {
"groups" => [["group_class", "=", "role"]],
}
- if not Rails.configuration.anonymous_user_token
+ if Rails.configuration.Users.AnonymousUserToken.empty?
# It would be ideal to filter out the anonymous group by UUID,
# but that's not readily doable. Workbench can't generate the
# UUID for a != filter, because it can't introspect the API
<head>
<meta charset="utf-8">
<title>
- <%= coll_name %> / <%= Rails.configuration.site_name %>
+ <%= coll_name %> / <%= Rails.configuration.Workbench.SiteName %>
</title>
<meta name="description" content="">
<meta name="author" content="">
<li>
Also known as a “workflow” in other systems
</li><li>
- A list of well-documented public pipelines can be found in the upper right corner by clicking the <span class="fa fa-lg fa-question-circle"></span> > <a href="<%= Rails.configuration.arvados_public_data_doc_url %>">Public Pipelines and Datasets</a>
+ A list of well-documented public pipelines can be found in the upper right corner by clicking the <span class="fa fa-lg fa-question-circle"></span> > <a href="<%= Rails.configuration.Workbench.ArvadosPublicDataDocURL %>">Public Pipelines and Datasets</a>
</li><li>
Pro-tip: A Pipeline contains Jobs which contain Tasks
</li><li>
<pre id="event_log_div"
class="arv-log-event-listener arv-log-event-handler-append-logs arv-job-log-window"
data-object-uuid="<%= @object.uuid %>"
- ><%= @object.stderr_log_lines(Rails.configuration.running_job_log_records_to_fetch).join("\n") %>
+ ><%= @object.stderr_log_lines(Rails.configuration.Workbench.RunningJobLogRecordsToFetch).join("\n") %>
</pre>
<%# Applying a long throttle suppresses the auto-refresh of this
<% logcollection = Collection.find @object.log %>
<% if logcollection %>
var log_size = <%= logcollection.files[0][2] %>
- var log_maxbytes = <%= Rails.configuration.log_viewer_max_bytes %>;
+ var log_maxbytes = <%= Rails.configuration.Workbench.LogViewerMaxBytes %>;
var logcollection_url = '<%=j url_for logcollection %>/<%=j logcollection.files[0][1] %>';
$("#log-viewer-download-url").attr('href', logcollection_url);
$("#log-viewer-download-pane").show();
<meta charset="utf-8">
<title>
<% if content_for? :page_title %>
- <%= yield :page_title %> / <%= Rails.configuration.site_name %>
+ <%= yield :page_title %> / <%= Rails.configuration.Workbench.SiteName %>
<% else %>
- <%= Rails.configuration.site_name %>
+ <%= Rails.configuration.Workbench.SiteName %>
<% end %>
</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="author" content="">
<% if current_user %>
<% content_for :js do %>
- window.defaultSession = <%=raw({baseURL: Rails.configuration.arvados_v1_base.sub(/\/arvados\/v1$/, '/'), token: Thread.current[:arvados_api_token], user: current_user}.to_json)%>
+ window.defaultSession = <%=raw({baseURL: Rails.configuration.Services.Controller.ExternalURL.to_s, token: Thread.current[:arvados_api_token], user: current_user}.to_json)%>
<% end %>
<% end %>
<% if current_user and $arvados_api_client.discovery[:websocketUrl] %>
social graph that search engines can use. http://ogp.me/ %>
<meta property="og:type" content="article" />
<meta property="og:url" content="<%= request.url %>" />
- <meta property="og:site_name" content="<%= Rails.configuration.site_name %>" />
+ <meta property="og:site_name" content="<%= Rails.configuration.Workbench.SiteName %>" />
<% if defined?(@object) && @object %>
<% if @object.respond_to?(:name) and @object.name.present? %>
<meta property="og:title" content="<%= @object.name%>" />
<%= yield :head %>
<%= javascript_tag do %>
angular.module('Arvados').value('arvadosApiToken', '<%=Thread.current[:arvados_api_token]%>');
- angular.module('Arvados').value('arvadosDiscoveryUri', '<%= Rails.configuration.arvados_v1_base.sub '/arvados/v1', '/discovery/v1/apis/arvados/v1/rest' %>');
+ angular.module('Arvados').value('arvadosDiscoveryUri', '<%= Rails.configuration.Services.Controller.ExternalURL.to_s + '/discovery/v1/apis/arvados/v1/rest' %>');
<%= yield :js %>
<% end %>
<style>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
- <% site_name = Rails.configuration.site_name.downcase rescue Rails.application.class.parent_name %>
+ <% site_name = Rails.configuration.Workbench.SiteName.downcase rescue Rails.application.class.parent_name %>
<% if current_user %>
<a class="navbar-brand" href="/" data-push=true><%= site_name %></a>
<% else %>
<% if current_user %>
<% if current_user.is_active %>
- <% if Rails.configuration.multi_site_search %>
+ <% if !Rails.configuration.Workbench.MultiSiteSearch.empty? %>
<li>
<form class="navbar-form">
<%=
- target = Rails.configuration.multi_site_search
- if target == true
+ target = Rails.configuration.Workbench.MultiSiteSearch
+ if target == "true"
target = {controller: 'search', action: 'index'}
end
link_to("Multi-site search", target, {class: 'btn btn-default'}) %>
</li>
<% if current_user.is_active %>
<li role="menuitem"><a href="/projects/<%=current_user.uuid%>" role="menuitem"><i class="fa fa-lg fa-home fa-fw"></i> Home project </a></li>
- <% if Rails.configuration.composer_url %>
+ <% if Rails.configuration.Services.Composer.ExternalURL != URI("") %>
<li role="menuitem">
- <form action="<%= Rails.configuration.composer_url %>" method="GET">
+ <form action="<%= Rails.configuration.Services.Composer.ExternalURL.to_s %>" method="GET">
<input type="hidden" name="api_token" value="<%= Thread.current[:arvados_api_token] %>" />
<button role="menuitem" type="submit">
<i class="fa fa-lg fa-share-alt fa-fw"></i> Workflow Composer
</form>
</li>
<% end %>
- <% if Rails.configuration.workbench2_url %>
+ <% if Rails.configuration.Services.Workbench2.ExternalURL != URI("") %>
<li role="menuitem">
<%
- wb2_url = Rails.configuration.workbench2_url
+ wb2_url = Rails.configuration.Services.Workbench2.ExternalURL.to_s
wb2_url += '/' if wb2_url[-1] != '/'
wb2_url += 'token'
%>
<i class="fa fa-lg fa-terminal fa-fw"></i> Virtual machines
<% end %>
</li>
- <% if Rails.configuration.repositories %>
+ <% if Rails.configuration.Workbench.Repositories %>
<li role="menuitem"><a href="/repositories" role="menuitem"><i class="fa fa-lg fa-code-fork fa-fw"></i> Repositories </a></li>
<% end -%>
<li role="menuitem"><a href="/current_token" role="menuitem"><i class="fa fa-lg fa-ticket fa-fw"></i> Current token</a></li>
<% end %>
</li>
<li role="menuitem"><a href="/users/link_account" role="menuitem"><i class="fa fa-lg fa-link fa-fw"></i> Link account </a></li>
- <% if Rails.configuration.user_profile_form_fields %>
+ <% if !Rails.configuration.Workbench.UserProfileFormFields.empty? %>
<li role="menuitem"><a href="/users/<%=current_user.uuid%>/profile" role="menuitem"><i class="fa fa-lg fa-user fa-fw"></i> Manage profile</a></li>
<% end %>
<% end %>
<li role="presentation" class="dropdown-header">
Admin Settings
</li>
- <% if Rails.configuration.repositories %>
+ <% if Rails.configuration.Workbench.Repositories %>
<li role="menuitem"><a href="/repositories">
<i class="fa fa-lg fa-code-fork fa-fw"></i> Repositories
</a></li>
</li>
<% end %>
<% else %>
- <% if Rails.configuration.anonymous_user_token and Rails.configuration.enable_public_projects_page %>
+ <% if !Rails.configuration.Users.AnonymousUserToken.empty? and Rails.configuration.Workbench.EnablePublicProjectsPage %>
<li><%= link_to 'Browse public projects', "/projects/public" %></li>
<% end %>
<li class="dropdown hover-dropdown login-menu">
<li role="presentation" class="dropdown-header">
Help
</li>
- <% if Rails.configuration.enable_getting_started_popup %>
+ <% if Rails.configuration.Workbench.EnableGettingStartedPopup %>
<li>
<%= link_to raw('<i class="fa fa-fw fa-info"></i> Getting Started ...'), "#",
{'data-toggle' => "modal", 'data-target' => '#getting-started-modal-window'} %>
</li>
<% end %>
- <% if Rails.configuration.arvados_public_data_doc_url %>
- <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> Public Pipelines and Data sets'), "#{Rails.configuration.arvados_public_data_doc_url}", target: "_blank" %></li>
+ <% if !Rails.configuration.Workbench.ArvadosPublicDataDocURL.empty? %>
+ <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> Public Pipelines and Data sets'), "#{Rails.configuration.Workbench.ArvadosPublicDataDocURL}", target: "_blank" %></li>
<% end %>
- <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> Tutorials and User guide'), "#{Rails.configuration.arvados_docsite}/user", target: "_blank" %></li>
- <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> API Reference'), "#{Rails.configuration.arvados_docsite}/api", target: "_blank" %></li>
- <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> SDK Reference'), "#{Rails.configuration.arvados_docsite}/sdk", target: "_blank" %></li>
+ <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> Tutorials and User guide'), "#{Rails.configuration.Workbench.ArvadosDocsite}/user", target: "_blank" %></li>
+ <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> API Reference'), "#{Rails.configuration.Workbench.ArvadosDocsite}/api", target: "_blank" %></li>
+ <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> SDK Reference'), "#{Rails.configuration.Workbench.ArvadosDocsite}/sdk", target: "_blank" %></li>
<li role="presentation" class="divider"></li>
<li> <%= link_to report_issue_popup_path(popup_type: 'version', current_location: request.url, current_path: request.fullpath, action_method: 'post'),
{class: 'report-issue-modal-window', remote: true, return_to: request.url} do %>
<div id="report-issue-modal-window"></div>
<script src="/browser_unsupported.js"></script>
-<% if Rails.configuration.enable_getting_started_popup and current_user and !current_user.prefs[:getting_started_shown] and
+<% if Rails.configuration.Workbench.EnableGettingStartedPopup and current_user and !current_user.prefs[:getting_started_shown] and
!request.url.include?("/profile") and
!request.url.include?("/user_agreements") and
!request.url.include?("/inactive")%>
<%= image_tag "dax.png", class: "dax" %>
<p>
- Hi, I noticed you haven't uploaded a new collection yet.
- <%= link_to "Click here to learn how to upload data to Arvados Keep.",
- "#{Rails.configuration.arvados_docsite}/user/tutorials/tutorial-keep.html",
+ Hi, I noticed you haven't uploaded a new collection yet.
+ <%= link_to "Click here to learn how to upload data to Arvados Keep.",
+ "#{Rails.configuration.Workbench.ArvadosDocsite}/user/tutorials/tutorial-keep.html",
style: "font-weight: bold", target: "_blank" %>
</p>
SPDX-License-Identifier: AGPL-3.0 %>
<p><%= image_tag "dax.png", class: "dax" %>
- Hi, I noticed you haven't run a job yet.
- <%= link_to "Click here to learn how to run an Arvados Crunch job.",
- "#{Rails.configuration.arvados_docsite}/user/tutorials/tutorial-job1.html",
+ Hi, I noticed you haven't run a job yet.
+ <%= link_to "Click here to learn how to run an Arvados Crunch job.",
+ "#{Rails.configuration.Workbench.ArvadosDocsite}/user/tutorials/tutorial-job1.html",
style: "font-weight: bold",
target: "_blank" %>
</p>
-
SPDX-License-Identifier: AGPL-3.0 %>
<p><%= image_tag "dax.png", class: "dax" %>
- Hi, I noticed you haven't run a pipeline yet.
- <%= link_to "Click here to learn how to run an Arvados Crunch pipeline.",
- "#{Rails.configuration.arvados_docsite}/user/tutorials/tutorial-pipeline-workbench.html",
+ Hi, I noticed you haven't run a pipeline yet.
+ <%= link_to "Click here to learn how to run an Arvados Crunch pipeline.",
+ "#{Rails.configuration.Workbench.ArvadosDocsite}/user/tutorials/tutorial-pipeline-workbench.html",
style: "font-weight: bold",
target: "_blank" %>
</p>
collection_pdhs = outputs.select {|x| !(m = CollectionsHelper.match(x)).nil?}.uniq.compact
collection_uuids = outputs - collection_pdhs
- if Rails.configuration.show_recent_collections_on_dashboard
+ if Rails.configuration.Workbench.ShowRecentCollectionsOnDashboard
recent_cs = recent_collections(8)
collection_uuids = collection_uuids + recent_cs[:collections].collect {|c| c.uuid}
collection_uuids.flatten.uniq
show_node_status = false
# Recent processes panel should take the entire width when is the only one
# being rendered.
- if !Rails.configuration.show_recent_collections_on_dashboard
+ if !Rails.configuration.Workbench.ShowRecentCollectionsOnDashboard
recent_procs_panel_width = 12
end
else
</div>
</div>
<% end %>
- <% if Rails.configuration.show_recent_collections_on_dashboard %>
+ <% if Rails.configuration.Workbench.ShowRecentCollectionsOnDashboard %>
<div class="panel panel-default">
<div class="panel-heading"><span class="panel-title">Recent collections</span>
<span class="pull-right">
<p>
See also:
<%= link_to raw('Arvados Docs → User Guide → SSH access'),
- "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
- target: "_blank"%> and
+ "#{Rails.configuration.Workbench.ArvadosDocsite}/user/getting_started/ssh-access-unix.html",
+ target: "_blank"%> and
<%= link_to raw('Arvados Docs → User Guide → Writing a Crunch
Script'),
- "#{Rails.configuration.arvados_docsite}/user/tutorials/tutorial-firstscript.html",
+ "#{Rails.configuration.Workbench.ArvadosDocsite}/user/tutorials/tutorial-firstscript.html",
target: "_blank"%>.
</p>
<div class="alert alert-info">
<strong>Please check <%= n_files > 1 ? 'each' : 'the' %> box below</strong> to indicate that you have read and accepted the user agreement<%= 's' if n_files > 1 %>.
</div>
- <% if n_files == 1 and (Rails.configuration.show_user_agreement_inline rescue false) %>
+ <% if n_files == 1 and (Rails.configuration.Workbench.ShowUserAgreementInline rescue false) %>
<% ua = unsigned_user_agreements.first; file = ua.files.first %>
<object data="<%= url_for(controller: 'collections', action: 'show_file', uuid: ua.uuid, file: "#{file[0]}/#{file[1]}") %>" type="<%= Rack::Mime::MIME_TYPES[file[1].match(/\.\w+$/)[0]] rescue '' %>" width="100%" height="400px">
</object>
<div class="modal-body">
<div> <%= link_to "Click here to learn about SSH keys in Arvados.",
- "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
+ "#{Rails.configuration.Workbench.ArvadosDocsite}/user/getting_started/ssh-access-unix.html",
style: "font-weight: bold",
target: "_blank" %>
</div>
</div>
<div id="#manage_current_token" class="panel-body">
-<p>The Arvados API token is a secret key that enables the Arvados SDKs to access Arvados with the proper permissions. For more information see <%= link_to raw('Getting an API token'), "#{Rails.configuration.arvados_docsite}/user/reference/api-tokens.html", target: "_blank"%>.</p>
+<p>The Arvados API token is a secret key that enables the Arvados SDKs to access Arvados with the proper permissions. For more information see <%= link_to raw('Getting an API token'), "#{Rails.configuration.Workbench.ArvadosDocsite}/user/reference/api-tokens.html", target: "_blank"%>.</p>
<p>Paste the following lines at a shell prompt to set up the necessary environment for Arvados SDKs to authenticate to your <b><%= current_user.username %></b> account.</p>
<pre>
HISTIGNORE=$HISTIGNORE:'export ARVADOS_API_TOKEN=*'
export ARVADOS_API_TOKEN=<%= Thread.current[:arvados_api_token] %>
export ARVADOS_API_HOST=<%= current_api_host %>
-<% if Rails.configuration.arvados_insecure_https %>
+<% if Rails.configuration.TLS.Insecure %>
export ARVADOS_API_HOST_INSECURE=true
<% else %>
unset ARVADOS_API_HOST_INSECURE
value="<%=identity_url_prefix%>" disabled=true>
<% else %>
<input class="form-control" id="openid_prefix" maxlength="250" name="openid_prefix" type="text"
- value="<%= Rails.configuration.default_openid_prefix %>">
+ value="<%= Rails.configuration.Workbench.DefaultOpenIdPrefix %>">
<% end %>
</div>
<div class="form-group">
<div id="manage_ssh_keys" class="panel-body">
<% if !@my_ssh_keys.any? %>
<p> You have not yet set up an SSH public key for use with Arvados. <%= link_to "Learn more.",
- "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
+ "#{Rails.configuration.Workbench.ArvadosDocsite}/user/getting_started/ssh-access-unix.html",
style: "font-weight: bold",
target: "_blank" %>
</p>
Your account must be activated by an Arvados administrator. If this
is your first time accessing Arvados and would like to request
access, or you believe you are seeing the page in error, please
- <%= link_to "contact us", Rails.configuration.activation_contact_link %>.
+ <%= link_to "contact us", Rails.configuration.Workbench.ActivationContactLink %>.
You should receive an email at the address you used to log in when
your account is activated. In the mean time, you can
<%= link_to "learn more about Arvados", "https://arvados.org/projects/arvados/wiki/Introduction_to_Arvados" %>,
</p>
<p style="padding-bottom: 1em">
<%= link_to raw('Contact us ✉'),
- Rails.configuration.activation_contact_link, class: "pull-right btn btn-primary" %></p>
+ Rails.configuration.Workbench.ActivationContactLink, class: "pull-right btn btn-primary" %></p>
</div>
</div>
</div>
<div id="manage_virtual_machines" class="panel-body">
<p>
For more information see <%= link_to raw('Arvados Docs → User Guide → VM access'),
- "#{Rails.configuration.arvados_docsite}/user/getting_started/vm-login-with-webshell.html",
+ "#{Rails.configuration.Workbench.ArvadosDocsite}/user/getting_started/vm-login-with-webshell.html",
target: "_blank"%>.
</p>
<th> Host name </th>
<th> Login name </th>
<th> Command line </th>
- <% if Rails.configuration.shell_in_a_box_url %>
+ <% if Rails.configuration.Services.WebShell.ExternalURL != URI("") %>
<th> Web shell <span class="label label-info">beta</span></th>
<% end %>
</tr>
<% end %>
<% end %>
</td>
- <% if Rails.configuration.shell_in_a_box_url %>
+ <% if Rails.configuration.Services.WebShell.ExternalURL != URI("") %>
<td>
<% @my_vm_logins[vm[:uuid]].andand.each do |login| %>
<%= link_to webshell_virtual_machine_path(vm, login: login), title: "Open a terminal session in your browser", class: 'btn btn-xs btn-default', target: "_blank" do %>
SPDX-License-Identifier: AGPL-3.0 %>
<%
- profile_config = Rails.configuration.user_profile_form_fields
+ profile_config = []
+ Rails.configuration.Workbench.UserProfileFormFields.each do |k, v|
+ r = v.dup
+ r["Key"] = k
+ profile_config << r
+ end
+ profile_config.sort_by! { |v| v["Position"] }
+
current_user_profile = current_user.prefs[:profile]
show_save_button = false
- profile_message = Rails.configuration.user_profile_form_message
+ profile_message = Rails.configuration.Workbench.UserProfileFormMessage
%>
<div>
</div>
<% profile_config.kind_of?(Array) && profile_config.andand.each do |entry| %>
- <% if entry['key'] %>
+ <% if entry['Key'] %>
<%
show_save_button = true
- label = entry['required'] ? '* ' : ''
- label += entry['form_field_title']
- value = current_user_profile[entry['key'].to_sym] if current_user_profile
+ label = entry['Required'] ? '* ' : ''
+ label += entry['FormFieldTitle']
+ value = current_user_profile[entry['Key'].to_sym] if current_user_profile
%>
<div class="form-group">
- <label for="<%=entry['key']%>"
+ <label for="<%=entry['Key']%>"
class="col-sm-3 control-label"
- style=<%="color:red" if entry['required']&&(!value||value.empty?)%>> <%=label%>
+ style=<%="color:red" if entry['Required']&&(!value||value.empty?)%>> <%=label%>
</label>
- <% if entry['type'] == 'select' %>
+ <% if entry['Type'] == 'select' %>
<div class="col-sm-8">
- <select class="form-control" name="user[prefs][profile][<%=entry['key']%>]">
- <% entry['options'].each do |option| %>
+ <select class="form-control" name="user[prefs][profile][<%=entry['Key']%>]">
+ <% entry['Options'].each do |option, _| %>
<option value="<%=option%>" <%='selected' if option==value%>><%=option%></option>
<% end %>
</select>
</div>
<% else %>
<div class="col-sm-8">
- <input type="text" class="form-control" name="user[prefs][profile][<%=entry['key']%>]" placeholder="<%=entry['form_field_description']%>" value="<%=value%>" ></input>
+ <input type="text" class="form-control" name="user[prefs][profile][<%=entry['Key']%>]" placeholder="<%=entry['FormFieldDescription']%>" value="<%=value%>" ></input>
</div>
<% end %>
</div>
The "Log in" button below will show you a Google sign-in page.
After you assure Google that you want to log in here with your
Google account, you will be redirected back here to
- <%= Rails.configuration.site_name %>.
+ <%= Rails.configuration.Workbench.SiteName %>.
</p><p>
- If you have never used <%= Rails.configuration.site_name %>
+ If you have never used <%= Rails.configuration.Workbench.SiteName %>
before, logging in for the first time will automatically
create a new account.
</p><p>
- <i><%= Rails.configuration.site_name %> uses your name and
+ <i><%= Rails.configuration.Workbench.SiteName %> uses your name and
email address only for identification, and does not retrieve
any other personal information from Google.</i>
now, don't provide 'auth_provider' to get the default one. %>
<div class="pull-right">
<%= link_to arvados_api_client.arvados_login_url(return_to: request.url), class: "btn btn-primary" do %>
- Log in to <%= Rails.configuration.site_name %>
+ Log in to <%= Rails.configuration.Workbench.SiteName %>
<i class="fa fa-fw fa-arrow-circle-right"></i>
<% end %>
</div>
<p>
See also:
<%= link_to raw('Arvados Docs → User Guide → SSH access'),
- "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
+ "#{Rails.configuration.Workbench.ArvadosDocsite}/user/getting_started/ssh-access-unix.html",
target: "_blank"%>.
</p>
SPDX-License-Identifier: AGPL-3.0 %>
<html>
- <title><%= @object.hostname %> / <%= Rails.configuration.site_name %></title>
+ <title><%= @object.hostname %> / <%= Rails.configuration.Workbench.SiteName %></title>
<link rel="stylesheet" href="<%= asset_path 'webshell/styles.css' %>" type="text/css">
<style type="text/css">
body {
</div>
<% end %>
-<% live_log_lines = wu.live_log_lines(Rails.configuration.running_job_log_records_to_fetch).join("\n") %>
+<% live_log_lines = wu.live_log_lines(Rails.configuration.Workbench.RunningJobLogRecordsToFetch).join("\n") %>
<% if !render_log or (live_log_lines.size > 0) %>
<%# Still running, or recently finished and logs are still available from logs table %>
<%# Show recent logs in terminal window %>
action_dispatch.best_standards_support: :builtin
assets.debug: true
profiling_enabled: true
- site_name: Arvados Workbench (dev)
-
- # API server configuration
- arvados_login_base: ~
- arvados_v1_base: ~
- arvados_insecure_https: ~
production:
force_ssl: true
profiling_enabled: false
log_level: info
- arvados_insecure_https: false
-
- data_import_dir: /data/arvados-workbench-upload/data
- data_export_dir: /data/arvados-workbench-download/data
-
- # API server configuration
- arvados_login_base: ~
- arvados_v1_base: ~
- arvados_insecure_https: ~
-
- site_name: Arvados Workbench
-
test:
cache_classes: true
eager_load: false
- IT
- Other
+ repository_cache: <%= File.expand_path 'tmp/git', Rails.root %>
+
common:
assets.js_compressor: false
assets.css_compressor: false
- data_import_dir: /tmp/arvados-workbench-upload
- data_export_dir: /tmp/arvados-workbench-download
- arvados_login_base: https://arvados.local/login
- arvados_v1_base: https://arvados.local/arvados/v1
- arvados_insecure_https: true
- activation_contact_link: mailto:info@arvados.org
- arvados_docsite: http://doc.arvados.org
- arvados_public_data_doc_url: http://arvados.org/projects/arvados/wiki/Public_Pipelines_and_Datasets
- arvados_theme: default
- show_user_agreement_inline: false
- secret_token: ~
- secret_key_base: false
- default_openid_prefix: https://www.google.com/accounts/o8/id
- send_user_setup_notification_email: true
-
- # Scratch directory used by the remote repository browsing
- # feature. If it doesn't exist, it (and any missing parents) will be
- # created using mkdir_p.
- repository_cache: <%= File.expand_path 'tmp/git', Rails.root %>
-
- # Set user_profile_form_fields to enable and configure the user
- # profile page. Default is set to false. A commented example with
- # full description is provided below.
- user_profile_form_fields: false
-
- # Below is a sample setting of user_profile_form_fields config parameter.
- # This configuration parameter should be set to either false (to disable) or
- # to an array as shown below.
- # Configure the list of input fields to be displayed in the profile page
- # using the attribute "key" for each of the input fields.
- # This sample shows configuration with one required and one optional form fields.
- # For each of these input fields:
- # You can specify "type" as "text" or "select".
- # List the "options" to be displayed for each of the "select" menu.
- # Set "required" as "true" for any of these fields to make them required.
- # If any of the required fields are missing in the user's profile, the user will be
- # redirected to the profile page before they can access any Workbench features.
- #user_profile_form_fields:
- # - key: organization
- # type: text
- # form_field_title: Institution/Company
- # form_field_description: Your organization
- # required: true
- # - key: role
- # type: select
- # form_field_title: Your role
- # form_field_description: Choose the category that best describes your role in your organization.
- # options:
- # - Bio-informatician
- # - Computational biologist
- # - Biologist or geneticist
- # - Software developer
- # - IT
- # - Other
-
- # Use "user_profile_form_message" to configure the message you want to display on
- # the profile page.
- user_profile_form_message: Welcome to Arvados. All <span style="color:red">required fields</span> must be completed before you can proceed.
# Override the automatic version string. With the default value of
# false, the version string is read from git-commit.version in
# Rails.root (included in vendor packages).
package_version: false
- # report notification to and from addresses
- issue_reporter_email_from: arvados@example.com
- issue_reporter_email_to: arvados@example.com
- support_email_address: arvados@example.com
-
- # generic issue email from
- email_from: arvados@example.com
-
- # Mimetypes of applications for which the view icon
- # would be enabled in a collection's show page.
- # It is sufficient to list only applications here.
- # No need to list text and image types.
- application_mimetypes_with_view_icon: [cwl, fasta, go, javascript, json, pdf, python, x-python, r, rtf, sam, x-sh, vnd.realvnc.bed, xml, xsl]
-
- # the maximum number of bytes to load in the log viewer
- log_viewer_max_bytes: 1000000
-
- # Set anonymous_user_token to enable anonymous user access. You can get
- # the token by running "bundle exec ./script/get_anonymous_user_token.rb"
- # in the directory where your API server is running.
- anonymous_user_token: false
-
- # when anonymous_user_token is configured, show public projects page
- enable_public_projects_page: true
-
- # by default, disable the "Getting Started" popup which is specific to the public beta install
- enable_getting_started_popup: false
-
- # Ask Arvados API server to compress its response payloads.
- api_response_compression: true
-
- # Timeouts for API requests.
- api_client_connect_timeout: 120
- api_client_receive_timeout: 300
-
- # ShellInABox service endpoint URL for a given VM. If false, do not
- # offer web shell logins.
- #
- # E.g., using a path-based proxy server to forward connections to shell hosts:
- # https://webshell.uuid_prefix.arvadosapi.com/%{hostname}
- #
- # E.g., using a name-based proxy server to forward connections to shell hosts:
- # https://%{hostname}.webshell.uuid_prefix.arvadosapi.com/
- shell_in_a_box_url: false
-
- # Format of preview links. If false, use keep_web_download_url
- # instead, and disable inline preview.
- # If both are false, Workbench won't start, this is a mandatory configuration.
- #
- # Examples:
- # keep_web_url: https://%{uuid_or_pdh}.collections.uuid_prefix.arvadosapi.com
- # keep_web_url: https://%{uuid_or_pdh}--collections.uuid_prefix.arvadosapi.com
- #
- # Example supporting only public data and collection-sharing links
- # (other data will be handled as downloads via keep_web_download_url):
- # keep_web_url: https://collections.uuid_prefix.arvadosapi.com/c=%{uuid_or_pdh}
- keep_web_url: false
-
- # Format of download links. If false, use keep_web_url with
- # disposition=attachment query param.
- #
- # The host part of the keep_web_download_url value here must match
- # the -attachment-only-host argument given to keep-web: if
- # keep_web_download_url is "https://FOO.EXAMPLE/c=..." then keep-web
- # must run with "-attachment-only-host=FOO.EXAMPLE".
- #
- # If keep_web_download_url is false, and keep_web_url uses a
- # single-origin form, then Workbench will show an error page
- # when asked to download or preview private data.
- #
- # Example:
- # keep_web_download_url: https://download.uuid_prefix.arvadosapi.com/c=%{uuid_or_pdh}
- keep_web_download_url: false
-
- # In "trust all content" mode, Workbench will redirect download
- # requests to keep-web, even in the cases when keep-web would have
- # to expose XSS vulnerabilities in order to handle the redirect.
- #
- # When enabling this setting, the -trust-all-content flag on the
- # keep-web server must also be enabled. For more detail, see
- # https://godoc.org/github.com/curoverse/arvados/services/keep-web
- #
- # This setting has no effect in the recommended configuration, where
- # the host part of keep_web_url begins with %{uuid_or_pdh}: in this
- # case XSS protection is provided by browsers' same-origin policy.
- #
- # The default setting (false) is appropriate for a multi-user site.
- trust_all_content: false
-
- # Maximum number of historic log records of a running job to fetch
- # and display in the Log tab, while subscribing to web sockets.
- running_job_log_records_to_fetch: 2000
-
- # In systems with many shared projects, loading of dashboard and topnav
- # cab be slow due to collections indexing; use the following parameters
- # to suppress these properties
- show_recent_collections_on_dashboard: true
- show_user_notifications: true
-
- # Token to be included in all healthcheck requests. Disabled by default.
- # Workbench expects request header of the format "Authorization: Bearer xxx"
- ManagementToken: false
-
- # Enable/disable "multi-site search" in top nav (true/false), or
- # link it to the multi-site search on a remote Workbench site.
- #
- # Example:
- # multi_site_search: https://workbench.qr1hi.arvadosapi.com/collections/multisite
- multi_site_search: false
-
- #
- # Link to use for Arvados Workflow Composer app, or false if not available.
- #
- composer_url: false
-
- #
- # Should workbench allow management of local git repositories? Set to false if
- # the jobs api is disabled and there are no local git repositories.
- #
- repositories: true
-
- #
- # Add an item to the user menu pointing to workbench2_url, if not false.
- #
- # Example:
- # workbench2_url: https://workbench2.qr1hi.arvadosapi.com
- #
- workbench2_url: false
+ # only used by tests
+ testing_override_login_url: false
module ArvadosWorkbench
class Application < Rails::Application
+
+ require_relative "arvados_config.rb"
+
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
config.assets.paths << Rails.root.join('node_modules')
end
end
-
-require File.expand_path('../load_config', __FILE__)
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+#
+# Load Arvados configuration from /etc/arvados/config.yml, using defaults
+# from config.default.yml
+#
+# Existing application.yml is migrated into the new config structure.
+# Keys in the legacy application.yml take precedence.
+#
+# Use "bundle exec config:dump" to get the complete active configuration
+#
+# Use "bundle exec config:migrate" to migrate application.yml to
+# config.yml. After adding the output of config:migrate to
+# /etc/arvados/config.yml, you will be able to delete application.yml.
+
+require 'config_loader'
+require 'config_validators'
+require 'open3'
+
+# Load the defaults, used by config:migrate and fallback loading
+# legacy application.yml
+Open3.popen2("arvados-server", "config-dump", "-config=-") do |stdin, stdout, status_thread|
+ stdin.write("Clusters: {xxxxx: {}}")
+ stdin.close
+ confs = YAML.load(stdout, deserialize_symbols: false)
+ clusterID, clusterConfig = confs["Clusters"].first
+ $arvados_config_defaults = clusterConfig
+ $arvados_config_defaults["ClusterID"] = clusterID
+end
+
+# Load the global config file
+Open3.popen2("arvados-server", "config-dump") do |stdin, stdout, status_thread|
+ confs = YAML.load(stdout, deserialize_symbols: false)
+ if confs && !confs.empty?
+ # config-dump merges defaults with user configuration, so every
+ # key should be set.
+ clusterID, clusterConfig = confs["Clusters"].first
+ $arvados_config_global = clusterConfig
+ $arvados_config_global["ClusterID"] = clusterID
+ else
+ # config-dump failed, assume we will be loading from legacy
+ # application.yml, initialize with defaults.
+ $arvados_config_global = $arvados_config_defaults.deep_dup
+ end
+end
+
+# Now make a copy
+$arvados_config = $arvados_config_global.deep_dup
+
+# Declare all our configuration items.
+arvcfg = ConfigLoader.new
+
+arvcfg.declare_config "ManagementToken", String, :ManagementToken
+arvcfg.declare_config "TLS.Insecure", Boolean, :arvados_insecure_https
+arvcfg.declare_config "Collections.TrustAllContent", Boolean, :trust_all_content
+
+arvcfg.declare_config "Services.Controller.ExternalURL", URI, :arvados_v1_base, ->(cfg, k, v) {
+ u = URI(v)
+ u.path = ""
+ ConfigLoader.set_cfg cfg, "Services.Controller.ExternalURL", u
+}
+
+arvcfg.declare_config "Services.WebShell.ExternalURL", URI, :shell_in_a_box_url, ->(cfg, k, v) {
+ v ||= ""
+ u = URI(v.sub("%{hostname}", "*"))
+ u.path = ""
+ ConfigLoader.set_cfg cfg, "Services.WebShell.ExternalURL", u
+}
+
+arvcfg.declare_config "Services.WebDAV.ExternalURL", URI, :keep_web_url, ->(cfg, k, v) {
+ v ||= ""
+ u = URI(v.sub("%{uuid_or_pdh}", "*"))
+ u.path = ""
+ ConfigLoader.set_cfg cfg, "Services.WebDAV.ExternalURL", u
+}
+
+arvcfg.declare_config "Services.WebDAVDownload.ExternalURL", URI, :keep_web_download_url, ->(cfg, k, v) {
+ v ||= ""
+ u = URI(v.sub("%{uuid_or_pdh}", "*"))
+ u.path = ""
+ ConfigLoader.set_cfg cfg, "Services.WebDAVDownload.ExternalURL", u
+}
+
+arvcfg.declare_config "Services.Composer.ExternalURL", URI, :composer_url
+arvcfg.declare_config "Services.Workbench2.ExternalURL", URI, :workbench2_url
+
+arvcfg.declare_config "Users.AnonymousUserToken", String, :anonymous_user_token
+
+arvcfg.declare_config "Workbench.SecretKeyBase", String, :secret_key_base
+
+arvcfg.declare_config "Workbench.ApplicationMimetypesWithViewIcon", Hash, :application_mimetypes_with_view_icon, ->(cfg, k, v) {
+ mimetypes = {}
+ v.each do |m|
+ mimetypes[m] = {}
+ end
+ ConfigLoader.set_cfg cfg, "Workbench.ApplicationMimetypesWithViewIcon", mimetypes
+}
+
+arvcfg.declare_config "Workbench.RunningJobLogRecordsToFetch", Integer, :running_job_log_records_to_fetch
+arvcfg.declare_config "Workbench.LogViewerMaxBytes", Integer, :log_viewer_max_bytes
+arvcfg.declare_config "Workbench.ProfilingEnabled", Boolean, :profiling_enabled
+arvcfg.declare_config "Workbench.APIResponseCompression", Boolean, :api_response_compression
+arvcfg.declare_config "Workbench.UserProfileFormFields", Hash, :user_profile_form_fields, ->(cfg, k, v) {
+ if !v
+ v = []
+ end
+ entries = {}
+ v.each_with_index do |s,i|
+ entries[s["key"]] = {
+ "Type" => s["type"],
+ "FormFieldTitle" => s["form_field_title"],
+ "FormFieldDescription" => s["form_field_description"],
+ "Required" => s["required"],
+ "Position": i
+ }
+ if s["options"]
+ entries[s["key"]]["Options"] = {}
+ s["options"].each do |o|
+ entries[s["key"]]["Options"][o] = {}
+ end
+ end
+ end
+ ConfigLoader.set_cfg cfg, "Workbench.UserProfileFormFields", entries
+}
+arvcfg.declare_config "Workbench.UserProfileFormMessage", String, :user_profile_form_message
+arvcfg.declare_config "Workbench.Theme", String, :arvados_theme
+arvcfg.declare_config "Workbench.ShowUserNotifications", Boolean, :show_user_notifications
+arvcfg.declare_config "Workbench.ShowUserAgreementInline", Boolean, :show_user_agreement_inline
+arvcfg.declare_config "Workbench.RepositoryCache", String, :repository_cache
+arvcfg.declare_config "Workbench.Repositories", Boolean, :repositories
+arvcfg.declare_config "Workbench.APIClientConnectTimeout", ActiveSupport::Duration, :api_client_connect_timeout
+arvcfg.declare_config "Workbench.APIClientReceiveTimeout", ActiveSupport::Duration, :api_client_receive_timeout
+arvcfg.declare_config "Workbench.APIResponseCompression", Boolean, :api_response_compression
+arvcfg.declare_config "Workbench.SiteName", String, :site_name
+arvcfg.declare_config "Workbench.MultiSiteSearch", String, :multi_site_search, ->(cfg, k, v) {
+ if !v
+ v = ""
+ end
+ ConfigLoader.set_cfg cfg, "Workbench.MultiSiteSearch", v.to_s
+}
+arvcfg.declare_config "Workbench.EnablePublicProjectsPage", Boolean, :enable_public_projects_page
+arvcfg.declare_config "Workbench.EnableGettingStartedPopup", Boolean, :enable_getting_started_popup
+arvcfg.declare_config "Workbench.ArvadosPublicDataDocURL", String, :arvados_public_data_doc_url
+arvcfg.declare_config "Workbench.ArvadosDocsite", String, :arvados_docsite
+arvcfg.declare_config "Workbench.ShowRecentCollectionsOnDashboard", Boolean, :show_recent_collections_on_dashboard
+arvcfg.declare_config "Workbench.ActivationContactLink", String, :activation_contact_link
+arvcfg.declare_config "Workbench.DefaultOpenIdPrefix", String, :default_openid_prefix
+
+arvcfg.declare_config "Mail.SendUserSetupNotificationEmail", Boolean, :send_user_setup_notification_email
+arvcfg.declare_config "Mail.IssueReporterEmailFrom", String, :issue_reporter_email_from
+arvcfg.declare_config "Mail.IssueReporterEmailTo", String, :issue_reporter_email_to
+arvcfg.declare_config "Mail.SupportEmailAddress", String, :support_email_address
+arvcfg.declare_config "Mail.EmailFrom", String, :email_from
+
+application_config = {}
+%w(application.default application).each do |cfgfile|
+ path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
+ confs = ConfigLoader.load(path, erb: true)
+ # Ignore empty YAML file:
+ next if confs == false
+ application_config.deep_merge!(confs['common'] || {})
+ application_config.deep_merge!(confs[::Rails.env.to_s] || {})
+end
+
+$remaining_config = arvcfg.migrate_config(application_config, $arvados_config)
+
+# Checks for wrongly typed configuration items, coerces properties
+# into correct types (such as Duration), and optionally raise error
+# for essential configuration that can't be empty.
+arvcfg.coercion_and_check $arvados_config_defaults, check_nonempty: false
+arvcfg.coercion_and_check $arvados_config_global, check_nonempty: false
+arvcfg.coercion_and_check $arvados_config, check_nonempty: true
+
+# * $arvados_config_defaults is the defaults
+# * $arvados_config_global is $arvados_config_defaults merged with the contents of /etc/arvados/config.yml
+# These are used by the rake config: tasks
+#
+# * $arvados_config is $arvados_config_global merged with the migrated contents of application.yml
+# This is what actually gets copied into the Rails configuration object.
+
+ArvadosWorkbench::Application.configure do
+ # Copy into the Rails config object. This also turns Hash into
+ # OrderedOptions so that application code can use
+ # Rails.configuration.API.Blah instead of
+ # Rails.configuration.API["Blah"]
+ ConfigLoader.copy_into_config $arvados_config, config
+ ConfigLoader.copy_into_config $remaining_config, config
+ secrets.secret_key_base = $arvados_config["Workbench"]["SecretKeyBase"]
+ ConfigValidators.validate_wb2_url_config()
+ ConfigValidators.validate_download_config()
+
+end
+++ /dev/null
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-# This file must be loaded _after_ secret_token.rb if secret_token is
-# defined there instead of in config/application.yml.
-
-$application_config = {}
-
-%w(application.default application).each do |cfgfile|
- path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
- if File.exist? path
- yaml = ERB.new(IO.read path).result(binding)
- confs = YAML.load(yaml, deserialize_symbols: true)
- $application_config.merge!(confs['common'] || {})
- $application_config.merge!(confs[::Rails.env.to_s] || {})
- end
-end
-
-ArvadosWorkbench::Application.configure do
- nils = []
- $application_config.each do |k, v|
- # "foo.bar: baz" --> { config.foo.bar = baz }
- cfg = config
- ks = k.split '.'
- k = ks.pop
- ks.each do |kk|
- cfg = cfg.send(kk)
- end
- if v.nil? and cfg.respond_to?(k) and !cfg.send(k).nil?
- # Config is nil in *.yml, but has been set already in
- # environments/*.rb (or has a Rails default). Don't overwrite
- # the default/upstream config with nil.
- #
- # After config files have been migrated, this mechanism should
- # be removed.
- Rails.logger.warn <<EOS
-DEPRECATED: Inheriting config.#{ks.join '.'} from Rails config.
- Please move this config into config/application.yml.
-EOS
- elsif v.nil?
- # Config variables are not allowed to be nil. Make a "naughty"
- # list, and present it below.
- nils << k
- else
- cfg.send "#{k}=", v
- end
- end
- if !nils.empty? and not ::Rails.groups.include?('assets')
- raise <<EOS
-#{::Rails.groups.include?('assets')}
-Refusing to start in #{::Rails.env.to_s} mode with missing configuration.
-
-The following configuration settings must be specified in
-config/application.yml:
-* #{nils.join "\n* "}
-
-EOS
- end
- # Refuse to start if keep-web isn't configured
- if not (config.keep_web_url or config.keep_web_download_url) and not ::Rails.groups.include?('assets')
- raise <<EOS
-Refusing to start in #{::Rails.env.to_s} mode with missing configuration.
-
-Keep-web service must be configured in config/application.yml:
-* keep_web_url
-* keep_web_download_url
-
-EOS
- end
-end
case "$TARGET" in
centos*)
- fpm_depends+=(git)
+ fpm_depends+=(git arvados-server)
;;
debian* | ubuntu*)
- fpm_depends+=(git g++)
+ fpm_depends+=(git g++ arvados-server)
;;
esac
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module Psych
+ module Visitors
+ class YAMLTree < Psych::Visitors::Visitor
+ def visit_ActiveSupport_Duration o
+ seconds = o.to_i
+ outstr = ""
+ if seconds / 3600 > 0
+ outstr += "#{seconds / 3600}h"
+ seconds = seconds % 3600
+ end
+ if seconds / 60 > 0
+ outstr += "#{seconds / 60}m"
+ seconds = seconds % 60
+ end
+ if seconds > 0
+ outstr += "#{seconds}s"
+ end
+ if outstr == ""
+ outstr = "0s"
+ end
+ @emitter.scalar outstr, nil, nil, true, false, Nodes::Scalar::ANY
+ end
+
+ def visit_URI_Generic o
+ @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY
+ end
+
+ def visit_URI_HTTP o
+ @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY
+ end
+
+ def visit_Pathname o
+ @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY
+ end
+ end
+ end
+end
+
+
+module Boolean; end
+class TrueClass; include Boolean; end
+class FalseClass; include Boolean; end
+
+class NonemptyString < String
+end
+
+class ConfigLoader
+ def initialize
+ @config_migrate_map = {}
+ @config_types = {}
+ end
+
+ def declare_config(assign_to, configtype, migrate_from=nil, migrate_fn=nil)
+ if migrate_from
+ @config_migrate_map[migrate_from] = migrate_fn || ->(cfg, k, v) {
+ ConfigLoader.set_cfg cfg, assign_to, v
+ }
+ end
+ @config_types[assign_to] = configtype
+ end
+
+
+ def migrate_config from_config, to_config
+ remainders = {}
+ from_config.each do |k, v|
+ if @config_migrate_map[k.to_sym]
+ begin
+ @config_migrate_map[k.to_sym].call to_config, k, v
+ rescue => e
+ raise "Error migrating '#{k}: #{v}' got error #{e}"
+ end
+ else
+ remainders[k] = v
+ end
+ end
+ remainders
+ end
+
+ def coercion_and_check check_cfg, check_nonempty: true
+ @config_types.each do |cfgkey, cfgtype|
+ begin
+ cfg = check_cfg
+ k = cfgkey
+ ks = k.split '.'
+ k = ks.pop
+ ks.each do |kk|
+ cfg = cfg[kk]
+ if cfg.nil?
+ break
+ end
+ end
+
+ if cfg.nil?
+ raise "missing #{cfgkey}"
+ end
+
+ if cfgtype == String and !cfg[k]
+ cfg[k] = ""
+ end
+
+ if cfgtype == String and cfg[k].is_a? Symbol
+ cfg[k] = cfg[k].to_s
+ end
+
+ if cfgtype == Pathname and cfg[k].is_a? String
+
+ if cfg[k] == ""
+ cfg[k] = Pathname.new("")
+ else
+ cfg[k] = Pathname.new(cfg[k])
+ if !cfg[k].exist?
+ raise "#{cfgkey} path #{cfg[k]} does not exist"
+ end
+ end
+ end
+
+ if cfgtype == NonemptyString
+ if (!cfg[k] || cfg[k] == "") && check_nonempty
+ raise "#{cfgkey} cannot be empty"
+ end
+ if cfg[k].is_a? String
+ next
+ end
+ end
+
+ if cfgtype == ActiveSupport::Duration
+ if cfg[k].is_a? Integer
+ cfg[k] = cfg[k].seconds
+ elsif cfg[k].is_a? String
+ cfg[k] = ConfigLoader.parse_duration(cfg[k], cfgkey: cfgkey)
+ end
+ end
+
+ if cfgtype == URI
+ if cfg[k]
+ cfg[k] = URI(cfg[k])
+ else
+ cfg[k] = URI("")
+ end
+ end
+
+ if cfgtype == Integer && cfg[k].is_a?(String)
+ v = cfg[k].sub(/B\s*$/, '')
+ if mt = /(-?\d*\.?\d+)\s*([KMGTPE]i?)$/.match(v)
+ if mt[1].index('.')
+ v = mt[1].to_f
+ else
+ v = mt[1].to_i
+ end
+ cfg[k] = v * {
+ 'K' => 1000,
+ 'Ki' => 1 << 10,
+ 'M' => 1000000,
+ 'Mi' => 1 << 20,
+ "G" => 1000000000,
+ "Gi" => 1 << 30,
+ "T" => 1000000000000,
+ "Ti" => 1 << 40,
+ "P" => 1000000000000000,
+ "Pi" => 1 << 50,
+ "E" => 1000000000000000000,
+ "Ei" => 1 << 60,
+ }[mt[2]]
+ end
+ end
+
+ rescue => e
+ raise "#{cfgkey} expected #{cfgtype} but '#{cfg[k]}' got error #{e}"
+ end
+
+ if !cfg[k].is_a? cfgtype
+ raise "#{cfgkey} expected #{cfgtype} but was #{cfg[k].class}"
+ end
+ end
+ end
+
+ def self.set_cfg cfg, k, v
+ # "foo.bar = baz" --> { cfg["foo"]["bar"] = baz }
+ ks = k.split '.'
+ k = ks.pop
+ ks.each do |kk|
+ cfg = cfg[kk]
+ if cfg.nil?
+ break
+ end
+ end
+ if !cfg.nil?
+ cfg[k] = v
+ end
+ end
+
+ def self.parse_duration durstr, cfgkey:
+ duration_re = /-?(\d+(\.\d+)?)(s|m|h)/
+ dursec = 0
+ while durstr != ""
+ mt = duration_re.match durstr
+ if !mt
+ raise "#{cfgkey} not a valid duration: '#{durstr}', accepted suffixes are s, m, h"
+ end
+ multiplier = {s: 1, m: 60, h: 3600}
+ dursec += (Float(mt[1]) * multiplier[mt[3].to_sym])
+ durstr = durstr[mt[0].length..-1]
+ end
+ return dursec.seconds
+ end
+
+ def self.copy_into_config src, dst
+ src.each do |k, v|
+ dst.send "#{k}=", self.to_OrderedOptions(v)
+ end
+ end
+
+ def self.to_OrderedOptions confs
+ if confs.is_a? Hash
+ opts = ActiveSupport::OrderedOptions.new
+ confs.each do |k,v|
+ opts[k] = self.to_OrderedOptions(v)
+ end
+ opts
+ elsif confs.is_a? Array
+ confs.map { |v| self.to_OrderedOptions v }
+ else
+ confs
+ end
+ end
+
+ def self.load path, erb: false
+ if File.exist? path
+ yaml = IO.read path
+ if erb
+ yaml = ERB.new(yaml).result(binding)
+ end
+ YAML.load(yaml, deserialize_symbols: false)
+ else
+ {}
+ end
+ end
+
+end
require 'uri'
module ConfigValidators
- def validate_wb2_url_config
- if Rails.configuration.workbench2_url
- begin
- if !URI.parse(Rails.configuration.workbench2_url).is_a?(URI::HTTP)
- Rails.logger.warn("workbench2_url config is not an HTTP URL: #{Rails.configuration.workbench2_url}")
- Rails.configuration.workbench2_url = false
- elsif /.*[\/]{2,}$/.match(Rails.configuration.workbench2_url)
- Rails.logger.warn("workbench2_url config shouldn't have multiple trailing slashes: #{Rails.configuration.workbench2_url}")
- Rails.configuration.workbench2_url = false
- else
- return true
- end
- rescue URI::InvalidURIError
- Rails.logger.warn("workbench2_url config invalid URL: #{Rails.configuration.workbench2_url}")
- Rails.configuration.workbench2_url = false
- end
- end
- return false
+ def self.validate_wb2_url_config
+ if Rails.configuration.Services.Workbench2.ExternalURL != URI("")
+ if !Rails.configuration.Services.Workbench2.ExternalURL.is_a?(URI::HTTP)
+ raise "workbench2_url config is not an HTTP URL: #{Rails.configuration.Services.Workbench2.ExternalURL}"
+ elsif /.*[\/]{2,}$/.match(Rails.configuration.Services.Workbench2.ExternalURL.to_s)
+ raise "workbench2_url config shouldn't have multiple trailing slashes: #{Rails.configuration.Services.Workbench2.ExternalURL}"
+ else
+ return true
+ end
end
-end
+ return false
+ end
+ def self.validate_download_config
+ if Rails.configuration.Services.WebDAV.ExternalURL == URI("") and Rails.configuration.Services.WebDAVDownload.ExternalURL == URI("")
+ raise "Keep-web service must be configured in Services.WebDAV and/or Services.WebDAVDownload"
+ end
+ end
+end
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+def diff_hash base, final
+ diffed = {}
+ base.each do |k,v|
+ bk = base[k]
+ fk = final[k]
+ if bk.is_a? Hash
+ d = diff_hash bk, fk
+ if d.length > 0
+ diffed[k] = d
+ end
+ else
+ if bk.to_yaml != fk.to_yaml
+ diffed[k] = fk
+ end
+ end
+ end
+ diffed
+end
+
+namespace :config do
+ desc 'Print items that differ between legacy application.yml and system config.yml'
+ task diff: :environment do
+ diffed = diff_hash $arvados_config_global, $arvados_config
+ cfg = { "Clusters" => {}}
+ cfg["Clusters"][$arvados_config["ClusterID"]] = diffed.select {|k,v| k != "ClusterID"}
+ if cfg["Clusters"][$arvados_config["ClusterID"]].empty?
+ puts "No migrations required for /etc/arvados/config.yml"
+ else
+ puts cfg.to_yaml
+ end
+ end
+
+ desc 'Print config.yml after merging with legacy application.yml'
+ task migrate: :environment do
+ diffed = diff_hash $arvados_config_defaults, $arvados_config
+ cfg = { "Clusters" => {}}
+ cfg["Clusters"][$arvados_config["ClusterID"]] = diffed.select {|k,v| k != "ClusterID"}
+ puts cfg.to_yaml
+ end
+
+ desc 'Print configuration as accessed through Rails.configuration'
+ task dump: :environment do
+ combined = $arvados_config.deep_dup
+ combined.update $remaining_config
+ puts combined.to_yaml
+ end
+
+ desc 'Legacy config check task -- it is a noop now'
+ task check: :environment do
+ # This exists so that build/rails-package-scripts/postinst.sh doesn't fail.
+ end
+end
+++ /dev/null
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-namespace :config do
- desc 'Ensure site configuration has all required settings'
- task check: :environment do
- $application_config.sort.each do |k, v|
- if ENV.has_key?('QUIET') then
- # Make sure we still check for the variable to exist
- eval("Rails.configuration.#{k}")
- else
- if /(password|secret)/.match(k) then
- # Make sure we still check for the variable to exist, but don't print the value
- eval("Rails.configuration.#{k}")
- $stderr.puts "%-32s %s" % [k, '*********']
- else
- $stderr.puts "%-32s %s" % [k, eval("Rails.configuration.#{k}")]
- end
- end
- end
- end
-end
+++ /dev/null
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-namespace :config do
- desc 'Show site configuration'
- task dump: :environment do
- puts $application_config.to_yaml
- end
-end
['pipeline_templates', 'pipeline_template_in_publicly_accessible_project'],
].each do |dm, fixture|
test "access show method for public #{dm} and expect to see page" do
- Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+ Rails.configuration.Users.AnonymousUserToken = api_fixture('api_client_authorizations')['anonymous']['api_token']
get(:show, params: {uuid: api_fixture(dm)[fixture]['uuid']})
assert_response :redirect
if dm == 'groups'
['traits', 'owned_by_aproject_with_no_name', :redirect],
].each do |dm, fixture, expected|
test "access show method for non-public #{dm} and expect #{expected}" do
- Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+ Rails.configuration.Users.AnonymousUserToken = api_fixture('api_client_authorizations')['anonymous']['api_token']
get(:show, params: {uuid: api_fixture(dm)[fixture]['uuid']})
assert_response expected
if expected == 404
# Each pdh has more than one collection; however, we should get only one for each
assert collections.size == 2, 'Expected two objects in the preloaded collection hash'
assert collections[pdh1], 'Expected collections for the passed in pdh #{pdh1}'
- assert_equal collections[pdh1].size, 1, 'Expected one collection for the passed in pdh #{pdh1}'
+ assert_equal collections[pdh1].size, 1, "Expected one collection for the passed in pdh #{pdh1}"
assert collections[pdh2], 'Expected collections for the passed in pdh #{pdh2}'
- assert_equal collections[pdh2].size, 1, 'Expected one collection for the passed in pdh #{pdh2}'
+ assert_equal collections[pdh2].size, 1, "Expected one collection for the passed in pdh #{pdh2}"
end
test "requesting a nonexistent object returns 404" do
true
end.returns fake_api_response('{}', 200, {})
- Rails.configuration.anonymous_user_token =
+ Rails.configuration.Users.AnonymousUserToken =
api_fixture("api_client_authorizations", "anonymous", "api_token")
@controller = ProjectsController.new
test_uuid = "zzzzz-j7d0g-zzzzzzzzzzzzzzz"
].each do |css_selector|
test "login link at #{css_selector.inspect} includes return_to param" do
# Without an anonymous token, we're immediately redirected to login.
- Rails.configuration.anonymous_user_token =
+ Rails.configuration.Users.AnonymousUserToken =
api_fixture("api_client_authorizations", "anonymous", "api_token")
@controller = ProjectsController.new
test_uuid = "zzzzz-j7d0g-zzzzzzzzzzzzzzz"
# We're really testing ApplicationController's render_exception.
# Our primary concern is that it doesn't raise an error and
# return 500.
- orig_api_server = Rails.configuration.arvados_v1_base
+ orig_api_server = Rails.configuration.Services.Controller.ExternalURL
begin
# The URL should look valid in all respects, and avoid talking over a
# network. 100::/64 is the IPv6 discard prefix, so it's perfect.
- Rails.configuration.arvados_v1_base = "https://[100::f]:1/"
+ Rails.configuration.Services.Controller.ExternalURL = "https://[100::f]:1/"
@controller = NodesController.new
get(:index, params: {}, session: session_for(:active))
assert_includes(405..422, @response.code.to_i,
"bad response code when API server is unreachable")
ensure
- Rails.configuration.arvados_v1_base = orig_api_server
+ Rails.configuration.Services.Controller.ExternalURL = orig_api_server
end
end
].each do |controller, fixture, anon_config=true|
test "#{controller} show method with anonymous config #{anon_config ? '' : 'not '}enabled" do
if anon_config
- Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+ Rails.configuration.Users.AnonymousUserToken = api_fixture('api_client_authorizations')['anonymous']['api_token']
else
- Rails.configuration.anonymous_user_token = false
+ Rails.configuration.Users.AnonymousUserToken = ""
end
@controller = controller
false,
].each do |config|
test "invoke show with include_accept_encoding_header config #{config}" do
- Rails.configuration.include_accept_encoding_header_in_api_requests = config
+ Rails.configuration.APIResponseCompression = config
@controller = CollectionsController.new
get(:show, params: {id: api_fixture('collections')['foo_file']['uuid']}, session: session_for(:admin))
NONEXISTENT_COLLECTION = "ffffffffffffffffffffffffffffffff+0"
def config_anonymous enable
- Rails.configuration.anonymous_user_token =
+ Rails.configuration.Users.AnonymousUserToken =
if enable
api_token('anonymous')
else
- false
+ ""
end
end
assert_not_includes @response.body, '<a href="#Upload"'
end
- def setup_for_keep_web cfg='https://%{uuid_or_pdh}.example', dl_cfg=false
- Rails.configuration.keep_web_url = cfg
- Rails.configuration.keep_web_download_url = dl_cfg
+ def setup_for_keep_web cfg='https://*.example', dl_cfg=""
+ Rails.configuration.Services.WebDAV.ExternalURL = URI(cfg)
+ Rails.configuration.Services.WebDAVDownload.ExternalURL = URI(dl_cfg)
end
%w(uuid portable_data_hash).each do |id_type|
end
test "Redirect to keep_web_download_url via #{id_type}" do
- setup_for_keep_web('https://collections.example/c=%{uuid_or_pdh}',
- 'https://download.example/c=%{uuid_or_pdh}')
+ setup_for_keep_web('https://collections.example',
+ 'https://download.example')
tok = api_token('active')
id = api_fixture('collections')['w_a_z_file'][id_type]
get :show_file, params: {uuid: id, file: "w a z"}, session: session_for(:active)
end
test "Redirect to keep_web_url via #{id_type} when trust_all_content enabled" do
- Rails.configuration.trust_all_content = true
- setup_for_keep_web('https://collections.example/c=%{uuid_or_pdh}',
- 'https://download.example/c=%{uuid_or_pdh}')
+ Rails.configuration.Workbench.TrustAllContent = true
+ setup_for_keep_web('https://collections.example',
+ 'https://download.example')
tok = api_token('active')
id = api_fixture('collections')['w_a_z_file'][id_type]
get :show_file, params: {uuid: id, file: "w a z"}, session: session_for(:active)
test "Redirect download to keep_web_download_url, anon #{anon}" do
config_anonymous anon
- setup_for_keep_web('https://collections.example/c=%{uuid_or_pdh}',
- 'https://download.example/c=%{uuid_or_pdh}')
+ setup_for_keep_web('https://collections.example/',
+ 'https://download.example/')
tok = api_token('active')
id = api_fixture('collections')['public_text_file']['uuid']
get :show_file, params: {
test "Error if file is impossible to retrieve from keep_web_url" do
# Cannot pass a session token using a single-origin keep-web URL,
# cannot read this collection without a session token.
- setup_for_keep_web 'https://collections.example/c=%{uuid_or_pdh}', false
+ setup_for_keep_web 'https://collections.example/', ""
id = api_fixture('collections')['w_a_z_file']['uuid']
get :show_file, params: {uuid: id, file: "w a z"}, session: session_for(:active)
assert_response 422
[false, true].each do |trust_all_content|
test "Redirect preview to keep_web_download_url when preview is disabled and trust_all_content is #{trust_all_content}" do
- Rails.configuration.trust_all_content = trust_all_content
- setup_for_keep_web false, 'https://download.example/c=%{uuid_or_pdh}'
+ Rails.configuration.Workbench.TrustAllContent = trust_all_content
+ setup_for_keep_web "", 'https://download.example/'
tok = api_token('active')
id = api_fixture('collections')['w_a_z_file']['uuid']
get :show_file, params: {uuid: id, file: "w a z"}, session: session_for(:active)
assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar\?" # locator on command
assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/foo" # mount input1
assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/bar" # mount input2
- assert_includes @response.body, "href=\"\/collections/1fd08fc162a5c6413070a8bd0bffc818+150" # mount workflow
+ assert_includes @response.body, "href=\"\/collections/f9ddda46bb293b6847da984e3aa735db+290" # mount workflow
assert_includes @response.body, "href=\"#Log\""
assert_includes @response.body, "href=\"#Provenance\""
end
test "project tabs as user #{user} when pipeline related index APIs are disabled" do
@controller = ProjectsController.new
- Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+ Rails.configuration.Users.AnonymousUserToken = api_fixture('api_client_authorizations')['anonymous']['api_token']
dd = ArvadosApiClient.new_or_current.discovery.deep_dup
dd[:resources][:pipeline_templates][:methods].delete(:index)
end
test "visit non-public project as anonymous when anonymous browsing is enabled and expect page not found" do
- Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+ Rails.configuration.Users.AnonymousUserToken = api_fixture('api_client_authorizations')['anonymous']['api_token']
get(:show, params: {id: api_fixture('groups')['aproject']['uuid']})
assert_response 404
assert_match(/log ?in/i, @response.body)
end
test "visit home page as anonymous when anonymous browsing is enabled and expect login" do
- Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+ Rails.configuration.Users.AnonymousUserToken = api_fixture('api_client_authorizations')['anonymous']['api_token']
get(:index)
assert_response :redirect
assert_match /\/users\/welcome/, @response.redirect_url
:active,
].each do |user|
test "visit public projects page when anon config is enabled, as user #{user}, and expect page" do
- Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+ Rails.configuration.Users.AnonymousUserToken = api_fixture('api_client_authorizations')['anonymous']['api_token']
if user
get :public, params: {}, session: session_for(user)
end
test "visit public projects page when anon config is not enabled as active user and expect 404" do
- Rails.configuration.anonymous_user_token = nil
- Rails.configuration.enable_public_projects_page = false
+ Rails.configuration.Users.AnonymousUserToken = ""
+ Rails.configuration.Workbench.EnablePublicProjectsPage = false
get :public, params: {}, session: session_for(:active)
assert_response 404
end
test "visit public projects page when anon config is enabled but public projects page is disabled as active user and expect 404" do
- Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
- Rails.configuration.enable_public_projects_page = false
+ Rails.configuration.Users.AnonymousUserToken = api_fixture('api_client_authorizations')['anonymous']['api_token']
+ Rails.configuration.Workbench.EnablePublicProjectsPage = false
get :public, params: {}, session: session_for(:active)
assert_response 404
end
test "visit public projects page when anon config is not enabled as anonymous and expect login page" do
- Rails.configuration.anonymous_user_token = nil
- Rails.configuration.enable_public_projects_page = false
+ Rails.configuration.Users.AnonymousUserToken = ""
+ Rails.configuration.Workbench.EnablePublicProjectsPage = false
get :public
assert_response :redirect
assert_match /\/users\/welcome/, @response.redirect_url
end
test "visit public projects page when anon config is enabled and public projects page is disabled and expect login page" do
- Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
- Rails.configuration.enable_public_projects_page = false
+ Rails.configuration.Users.AnonymousUserToken = api_fixture('api_client_authorizations')['anonymous']['api_token']
+ Rails.configuration.Workbench.EnablePublicProjectsPage = false
get :index
assert_response :redirect
assert_match /\/users\/welcome/, @response.redirect_url
end
test "visit public projects page when anon config is not enabled and public projects page is enabled and expect login page" do
- Rails.configuration.enable_public_projects_page = true
+ Rails.configuration.Workbench.EnablePublicProjectsPage = true
get :index
assert_response :redirect
assert_match /\/users\/welcome/, @response.redirect_url
end
test "visit a public project and verify the public projects page link exists" do
- Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+ Rails.configuration.Users.AnonymousUserToken = api_fixture('api_client_authorizations')['anonymous']['api_token']
uuid = api_fixture('groups')['anonymously_accessible_project']['uuid']
get :show, params: {id: uuid}
project = assigns(:object)
test "ignore previously valid token (for deleted user), don't crash" do
get :activity, params: {}, session: session_for(:valid_token_deleted_user)
assert_response :redirect
- assert_match /^#{Rails.configuration.arvados_login_base}/, @response.redirect_url
+ assert_match /^#{Rails.configuration.Services.Controller.ExternalURL}\/login/, @response.redirect_url
assert_nil assigns(:my_jobs)
assert_nil assigns(:my_ssh_keys)
end
id: api_fixture('users')['active']['uuid']
}, session: session_for(:expired_trustedclient)
assert_response :redirect
- assert_match /^#{Rails.configuration.arvados_login_base}/, @response.redirect_url
+ assert_match /^#{Rails.configuration.Services.Controller.ExternalURL}\/login/, @response.redirect_url
assert_nil assigns(:my_jobs)
assert_nil assigns(:my_ssh_keys)
end
setup do
need_javascript
- Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+ Rails.configuration.Users.AnonymousUserToken = api_fixture('api_client_authorizations')['anonymous']['api_token']
end
PUBLIC_PROJECT = "/projects/#{api_fixture('groups')['anonymously_accessible_project']['uuid']}"
assert_text 'indicate that you have read and accepted the user agreement'
end
within('.navbar-fixed-top') do
- assert_selector 'a', text: Rails.configuration.site_name.downcase
+ assert_selector 'a', text: Rails.configuration.Workbench.SiteName.downcase
assert(page.has_link?("notifications-menu"), 'no user menu')
page.find("#notifications-menu").click
within('.dropdown-menu') do
else # anonymous
assert_text 'Unrestricted public data'
within('.navbar-fixed-top') do
- assert_text Rails.configuration.site_name.downcase
- assert_no_selector 'a', text: Rails.configuration.site_name.downcase
+ assert_text Rails.configuration.Workbench.SiteName.downcase
+ assert_no_selector 'a', text: Rails.configuration.Workbench.SiteName.downcase
assert_selector 'a', text: 'Log in'
assert_selector 'a', text: 'Browse public projects'
end
end
def verify_homepage user, invited, has_profile
- profile_config = Rails.configuration.user_profile_form_fields
+ profile_config = Rails.configuration.Workbench.UserProfileFormFields
if !user
assert page.has_text?('Please log in'), 'Not found text - Please log in'
assert page.has_text?('The "Log in" button below will show you a Google sign-in page'), 'Not found text - google sign in page'
assert page.has_no_text?('My projects'), 'Found text - My projects'
- assert page.has_link?("Log in to #{Rails.configuration.site_name}"), 'Not found text - log in to'
+ assert page.has_link?("Log in to #{Rails.configuration.Workbench.SiteName}"), 'Not found text - log in to'
elsif user['is_active']
if profile_config && !has_profile
assert page.has_text?('Save profile'), 'No text - Save profile'
within('.navbar-fixed-top') do
if !user
- assert_text Rails.configuration.site_name.downcase
- assert_no_selector 'a', text: Rails.configuration.site_name.downcase
+ assert_text Rails.configuration.Workbench.SiteName.downcase
+ assert_no_selector 'a', text: Rails.configuration.Workbench.SiteName.downcase
assert page.has_link?('Log in'), 'Not found link - Log in'
else
# my account menu
- assert_selector 'a', text: Rails.configuration.site_name.downcase
+ assert_selector 'a', text: Rails.configuration.Workbench.SiteName.downcase
assert(page.has_link?("notifications-menu"), 'no user menu')
page.find("#notifications-menu").click
within('.dropdown-menu') do
end
[
- [false, false],
+ ["", false],
['http://wb2.example.org//', false],
['ftp://wb2.example.org', false],
['wb2.example.org', false],
['https://wb2.example.org/', true],
].each do |wb2_url_config, wb2_menu_appear|
test "workbench2_url=#{wb2_url_config} should#{wb2_menu_appear ? '' : ' not'} show WB2 menu" do
- Rails.configuration.workbench2_url = wb2_url_config
- assert_equal wb2_menu_appear, ConfigValidators::validate_wb2_url_config()
+ Rails.configuration.Services.Workbench2.ExternalURL = URI(wb2_url_config)
+ if !wb2_menu_appear and !wb2_url_config.empty?
+ assert_raises RuntimeError do
+ ConfigValidators.validate_wb2_url_config()
+ end
+ Rails.configuration.Services.Workbench2.ExternalURL = URI("")
+ end
visit page_with_token('active')
within('.navbar-fixed-top') do
['active_with_prefs_profile_no_getting_started_shown', false],
].each do |token, getting_started_shown|
test "getting started help menu item #{getting_started_shown}" do
- Rails.configuration.enable_getting_started_popup = true
+ Rails.configuration.Workbench.EnableGettingStartedPopup = true
visit page_with_token(token)
end
test "test arvados_public_data_doc_url config unset" do
- Rails.configuration.arvados_public_data_doc_url = false
+ Rails.configuration.Workbench.ArvadosPublicDataDocURL = ""
visit page_with_token('active')
within '.navbar-fixed-top' do
end
test "no SSH public key notification when shell_in_a_box_url is configured" do
- Rails.configuration.shell_in_a_box_url = 'example.com'
+ Rails.configuration.Services.WebShell.ExternalURL = URI('http://example.com')
visit page_with_token('job_reader')
click_link 'notifications-menu'
assert_no_selector 'a', text:'Click here to set up an SSH public key for use with Arvados.'
end
test "preview anonymous content from keep-web by #{id_type}" do
- Rails.configuration.anonymous_user_token =
+ Rails.configuration.Users.AnonymousUserToken =
api_fixture('api_client_authorizations')['anonymous']['api_token']
uuid_or_pdh =
api_fixture('collections')['public_text_file'][id_type]
end
test "download anonymous content from keep-web by #{id_type}" do
- Rails.configuration.anonymous_user_token =
+ Rails.configuration.Users.AnonymousUserToken =
api_fixture('api_client_authorizations')['anonymous']['api_token']
uuid_or_pdh =
api_fixture('collections')['public_text_file'][id_type]
test "API error page has Report problem button" do
# point to a bad api server url to generate fiddlesticks error
- original_arvados_v1_base = Rails.configuration.arvados_v1_base
- Rails.configuration.arvados_v1_base = "https://[::1]:1/"
+ original_arvados_v1_base = Rails.configuration.Services.Controller.ExternalURL
+ Rails.configuration.Services.Controller.ExternalURL = URI("https://[::1]:1/")
visit page_with_token("active")
assert_text 'fiddlesticks'
# reset api server base config to let the popup rendering to work
- Rails.configuration.arvados_v1_base = original_arvados_v1_base
+ Rails.configuration.Services.Controller.ExternalURL = original_arvados_v1_base
click_link 'Report problem'
use_keep_web_config
# This config will be restored during teardown by ../test_helper.rb:
- Rails.configuration.log_viewer_max_bytes = 100
+ Rails.configuration.Workbench.LogViewerMaxBytes = 100
logdata = fakepipe_with_log_data.read
job_uuid = api_fixture('jobs')['running']['uuid']
need_javascript
end
+ teardown do
+ Rails.configuration.testing_override_login_url = false
+ end
+
def start_sso_stub token
port = available_port('sso_stub')
test "Add another login to this account" do
visit page_with_token('active_trustedclient')
- stub = start_sso_stub(api_fixture('api_client_authorizations')['project_viewer_trustedclient']['api_token'])
- Rails.configuration.arvados_login_base = stub + "login"
+ Rails.configuration.testing_override_login_url = start_sso_stub(api_fixture('api_client_authorizations')['project_viewer_trustedclient']['api_token'])
find("#notifications-menu").click
assert_text "active-user@arvados.local"
test "Use this login to access another account" do
visit page_with_token('project_viewer_trustedclient')
- stub = start_sso_stub(api_fixture('api_client_authorizations')['active_trustedclient']['api_token'])
- Rails.configuration.arvados_login_base = stub + "login"
+ Rails.configuration.testing_override_login_url = start_sso_stub(api_fixture('api_client_authorizations')['active_trustedclient']['api_token'])
find("#notifications-menu").click
assert_text "project-viewer@arvados.local"
test "Link login of inactive user to this account" do
visit page_with_token('active_trustedclient')
- stub = start_sso_stub(api_fixture('api_client_authorizations')['inactive_uninvited_trustedclient']['api_token'])
- Rails.configuration.arvados_login_base = stub + "login"
+ Rails.configuration.testing_override_login_url = start_sso_stub(api_fixture('api_client_authorizations')['inactive_uninvited_trustedclient']['api_token'])
find("#notifications-menu").click
assert_text "active-user@arvados.local"
test "Cannot link to inactive user" do
visit page_with_token('active_trustedclient')
- stub = start_sso_stub(api_fixture('api_client_authorizations')['inactive_uninvited_trustedclient']['api_token'])
- Rails.configuration.arvados_login_base = stub + "login"
+ Rails.configuration.testing_override_login_url = start_sso_stub(api_fixture('api_client_authorizations')['inactive_uninvited_trustedclient']['api_token'])
find("#notifications-menu").click
assert_text "active-user@arvados.local"
test "Inactive user can link to active account" do
visit page_with_token('inactive_uninvited_trustedclient')
- stub = start_sso_stub(api_fixture('api_client_authorizations')['active_trustedclient']['api_token'])
- Rails.configuration.arvados_login_base = stub + "login"
+ Rails.configuration.testing_override_login_url = start_sso_stub(api_fixture('api_client_authorizations')['active_trustedclient']['api_token'])
find("#notifications-menu").click
assert_text "inactive-uninvited-user@arvados.local"
test "Admin cannot link to non-admin" do
visit page_with_token('admin_trustedclient')
- stub = start_sso_stub(api_fixture('api_client_authorizations')['active_trustedclient']['api_token'])
- Rails.configuration.arvados_login_base = stub + "login"
+ Rails.configuration.testing_override_login_url = start_sso_stub(api_fixture('api_client_authorizations')['active_trustedclient']['api_token'])
find("#notifications-menu").click
assert_text "admin@arvados.local"
def create_and_run_pipeline_in_aproject in_aproject, template_name, collection_fixture, choose_file=false
# collection in aproject to be used as input
collection = api_fixture('collections', collection_fixture)
+ collection['name'] ||= '' # API response is "" even if fixture attr is null
# create a pipeline instance
find('.btn', text: 'Run a process').click
if collection_fixture == 'foo_collection_in_aproject'
first('span', text: 'foo_tag').click
- elsif collection['name']
+ elsif collection['name'] != ''
first('span', text: "#{collection['name']}").click
else
collection_uuid = collection['uuid']
end
test "projects not publicly sharable when anonymous browsing disabled" do
- Rails.configuration.anonymous_user_token = false
+ Rails.configuration.Users.AnonymousUserToken = ""
open_groups_sharing
# Check for a group we do expect first, to make sure the modal's loaded.
assert_selector(".modal-container .selectable",
end
test "projects publicly sharable when anonymous browsing enabled" do
- Rails.configuration.anonymous_user_token = "testonlytoken"
+ Rails.configuration.Users.AnonymousUserToken = "testonlytoken"
open_groups_sharing
assert_selector(".modal-container .selectable",
text: group_name("anonymous_group"))
end
test "error while loading tab" do
- original_arvados_v1_base = Rails.configuration.arvados_v1_base
+ original_arvados_v1_base = Rails.configuration.Services.Controller.ExternalURL
visit page_with_token 'active', '/projects/' + api_fixture('groups')['aproject']['uuid']
# Point to a bad api server url to generate error
- Rails.configuration.arvados_v1_base = "https://[::1]:1/"
+ Rails.configuration.Services.Controller.ExternalURL = "https://[::1]:1/"
click_link 'Other objects'
within '#Other_objects' do
# Error
assert_selector('a', text: 'Reload tab')
# Now point back to the orig api server and reload tab
- Rails.configuration.arvados_v1_base = original_arvados_v1_base
+ Rails.configuration.Services.Controller.ExternalURL = original_arvados_v1_base
click_link 'Reload tab'
assert_no_selector('a', text: 'Reload tab')
assert_selector('button', text: 'Selection')
class ReportIssueTest < ActionDispatch::IntegrationTest
setup do
need_javascript
- @user_profile_form_fields = Rails.configuration.user_profile_form_fields
+ @user_profile_form_fields = Rails.configuration.Workbench.UserProfileFormFields
end
teardown do
- Rails.configuration.user_profile_form_fields = @user_profile_form_fields
+ Rails.configuration.Workbench.UserProfileFormFields = @user_profile_form_fields
end
# test version info and report issue from help menu
class UserProfileTest < ActionDispatch::IntegrationTest
setup do
need_javascript
- @user_profile_form_fields = Rails.configuration.user_profile_form_fields
+ @user_profile_form_fields = Rails.configuration.Workbench.UserProfileFormFields
end
teardown do
- Rails.configuration.user_profile_form_fields = @user_profile_form_fields
+ Rails.configuration.Workbench.UserProfileFormFields = @user_profile_form_fields
end
def verify_homepage_with_profile user, invited, has_profile
- profile_config = Rails.configuration.user_profile_form_fields
+ profile_config = Rails.configuration.Workbench.UserProfileFormFields
if !user
assert_text('Please log in')
elsif user['is_active']
- if profile_config && !has_profile
+ if !profile_config.empty? && !has_profile
assert_text('Save profile')
add_profile user
else
assert_selector('a', text: 'Current token')
assert_selector('a', text: 'SSH keys')
- if profile_config
+ if !profile_config.empty?
assert_selector('a', text: 'Manage profile')
else
assert_no_selector('a', text: 'Manage profile')
assert_text('Save profile')
# This time fill in required field and then save. Expect to go to requested page after that.
- profile_message = Rails.configuration.user_profile_form_message
+ profile_message = Rails.configuration.Workbench.UserProfileFormMessage
required_field_title = ''
required_field_key = ''
- profile_config = Rails.configuration.user_profile_form_fields
- profile_config.each do |entry|
- if entry['required']
- required_field_key = entry['key']
- required_field_title = entry['form_field_title']
+ profile_config = Rails.configuration.Workbench.UserProfileFormFields
+ profile_config.each do |k, entry|
+ if entry['Required']
+ required_field_key = k.to_s
+ required_field_title = entry['FormFieldTitle']
break
end
end
[true, false].each do |profile_required|
test "visit #{token} home page when profile is #{'not ' if !profile_required}configured" do
if !profile_required
- Rails.configuration.user_profile_form_fields = false
+ Rails.configuration.Workbench.UserProfileFormFields = []
else
# Our test config enabled profile by default. So, no need to update config
end
- Rails.configuration.enable_getting_started_popup = true
+ Rails.configuration.Workbench.EnableGettingStartedPopup = true
if !token
visit ('/')
test "pipeline notification shown even though public pipelines exist" do
skip "created_by doesn't work that way"
- Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+ Rails.configuration.Users.AnonymousUserToken = api_fixture('api_client_authorizations')['anonymous']['api_token']
visit page_with_token 'job_reader'
click_link 'notifications-menu'
assert_selector 'a', text: 'Click here to learn how to run an Arvados Crunch pipeline'
['active'],
].each do |user, *expect|
test "user settings menu for #{user} with notifications #{expect.inspect}" do
- Rails.configuration.anonymous_user_token = false
+ Rails.configuration.Users.AnonymousUserToken = ""
visit page_with_token(user)
click_link 'notifications-menu'
if expect.include? :ssh
visit page_with_token("active", "/jobs/#{job['uuid']}\#Log")
# Expect "all" historic log records because we have less than
- # default Rails.configuration.running_job_log_records_to_fetch count
+ # default Rails.configuration.Workbench.RunningJobLogRecordsToFetch
assert_text 'Historic log message'
# Create new log record and expect it to show up in log tab
test "test running job with too many previous log records" do
max = 5
- Rails.configuration.running_job_log_records_to_fetch = max
+ Rails.configuration.Workbench.RunningJobLogRecordsToFetch = max
job = api_fixture("jobs")['running']
# Create max+1 log records
if token
visit page_with_token token, "/#{type}/#{obj['uuid']}"
else
- Rails.configuration.anonymous_user_token =
+ Rails.configuration.Users.AnonymousUserToken =
api_fixture("api_client_authorizations", "anonymous", "api_token")
visit "/#{type}/#{obj['uuid']}"
end
def use_keep_web_config
@kwport = getport 'keep-web-ssl'
@kwdport = getport 'keep-web-dl-ssl'
- Rails.configuration.keep_web_url = "https://localhost:#{@kwport}/c=%{uuid_or_pdh}"
- Rails.configuration.keep_web_download_url = "https://localhost:#{@kwdport}/c=%{uuid_or_pdh}"
+ Rails.configuration.Services.WebDAV.ExternalURL = URI("https://localhost:#{@kwport}")
+ Rails.configuration.Services.WebDAVDownload.ExternalURL = URI("https://localhost:#{@kwdport}")
end
end
# didn't make a significant difference.
[true].each do |compress|
test "crud cycle for collection with big manifest (compress=#{compress})" do
- Rails.configuration.api_response_compression = compress
+ Rails.configuration.Workbench.APIResponseCompression = compress
Thread.current[:arvados_api_client] = nil
crudtest
end
end
def self.reset_application_config
- $application_config.each do |k,v|
- if k.match /^[^.]*$/
- Rails.configuration.send (k + '='), v
- end
- end
+ # Restore configuration settings changed during tests
+ ConfigLoader.copy_into_config $arvados_config, Rails.configuration
+ ConfigLoader.copy_into_config $remaining_config, Rails.configuration
+ Rails.configuration.Services.Controller.ExternalURL = URI("https://#{ENV['ARVADOS_API_HOST']}")
+ Rails.configuration.TLS.Insecure = true
end
end
end
run_test_server
- $application_config['arvados_login_base'] = "https://#{ENV['ARVADOS_API_HOST']}/login"
- $application_config['arvados_v1_base'] = "https://#{ENV['ARVADOS_API_HOST']}/arvados/v1"
- $application_config['arvados_insecure_host'] = true
ActiveSupport::TestCase.reset_application_config
@@server_is_running = true
test "User.current doesn't return anonymous user when using invalid token" do
# Set up anonymous user token
- Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+ Rails.configuration.Users.AnonymousUserToken = api_fixture('api_client_authorizations')['anonymous']['api_token']
# First, try with a valid user
use_token :active
u = User.current
reset_api_fixtures :after_each_test, false
setup do
- Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+ Rails.configuration.Users.AnonymousUserToken = api_fixture('api_client_authorizations')['anonymous']['api_token']
end
[
#
# SPDX-License-Identifier: AGPL-3.0
-all: centos7/generated debian8/generated debian9/generated ubuntu1404/generated ubuntu1604/generated ubuntu1804/generated
+all: centos7/generated debian9/generated ubuntu1604/generated ubuntu1804/generated
centos7/generated: common-generated-all
test -d centos7/generated || mkdir centos7/generated
cp -rlt centos7/generated common-generated/*
-debian8/generated: common-generated-all
- test -d debian8/generated || mkdir debian8/generated
- cp -rlt debian8/generated common-generated/*
-
debian9/generated: common-generated-all
test -d debian9/generated || mkdir debian9/generated
cp -rlt debian9/generated common-generated/*
-ubuntu1404/generated: common-generated-all
- test -d ubuntu1404/generated || mkdir ubuntu1404/generated
- cp -rlt ubuntu1404/generated common-generated/*
-
ubuntu1604/generated: common-generated-all
test -d ubuntu1604/generated || mkdir ubuntu1604/generated
cp -rlt ubuntu1604/generated common-generated/*
test -d ubuntu1804/generated || mkdir ubuntu1804/generated
cp -rlt ubuntu1804/generated common-generated/*
-GOTARBALL=go1.10.1.linux-amd64.tar.gz
+GOTARBALL=go1.12.7.linux-amd64.tar.gz
NODETARBALL=node-v6.11.2-linux-x64.tar.xz
RVMKEY1=mpapis.asc
RVMKEY2=pkuczynski.asc
common-generated-all: common-generated/$(GOTARBALL) common-generated/$(NODETARBALL) common-generated/$(RVMKEY1) common-generated/$(RVMKEY2)
common-generated/$(GOTARBALL): common-generated
- wget -cqO common-generated/$(GOTARBALL) http://storage.googleapis.com/golang/$(GOTARBALL)
+ wget -cqO common-generated/$(GOTARBALL) https://dl.google.com/go/$(GOTARBALL)
common-generated/$(NODETARBALL): common-generated
wget -cqO common-generated/$(NODETARBALL) https://nodejs.org/dist/v6.11.2/$(NODETARBALL)
/usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
# Install golang binary
-ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
+ADD generated/go1.12.7.linux-amd64.tar.gz /usr/local/
RUN ln -s /usr/local/go/bin/go /usr/local/bin/
# Install nodejs and npm
RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
RUN rpm -ivh epel-release-latest-7.noarch.rpm
-RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle && rm -rf /tmp/arvados
+RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle
+
+# Workbench depends on arvados-server for config manipulation
+ENV GOPATH /tmp
+RUN mkdir -p $GOPATH/src/git.curoverse.com && ln -sT /tmp/arvados $GOPATH/src/git.curoverse.com/arvados.git && cd $GOPATH/src/git.curoverse.com/arvados.git/cmd/arvados-server && go get -v github.com/kardianos/govendor && $GOPATH/bin/govendor sync && go get && go build && cp arvados-server /usr/local/bin/ && rm -rf /tmp/arvados
# The version of setuptools that comes with CentOS is way too old
RUN pip install --upgrade setuptools
/usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
# Install golang binary
-ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
+ADD generated/go1.12.7.linux-amd64.tar.gz /usr/local/
RUN ln -s /usr/local/go/bin/go /usr/local/bin/
# Install nodejs and npm
ADD generated/node-v6.11.2-linux-x64.tar.xz /usr/local/
RUN ln -s /usr/local/node-v6.11.2-linux-x64/bin/* /usr/local/bin/
-RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle && rm -rf /tmp/arvados
+RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle
+
+# Workbench depends on arvados-server for config manipulation
+ENV GOPATH /tmp
+RUN mkdir -p $GOPATH/src/git.curoverse.com && ln -sT /tmp/arvados $GOPATH/src/git.curoverse.com/arvados.git && cd $GOPATH/src/git.curoverse.com/arvados.git/cmd/arvados-server && go get -v github.com/kardianos/govendor && $GOPATH/bin/govendor sync && go get && go build && cp arvados-server /usr/local/bin/ && rm -rf /tmp/arvados
ENV WORKSPACE /arvados
CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "debian8"]
/usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
# Install golang binary
-ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
+ADD generated/go1.12.7.linux-amd64.tar.gz /usr/local/
RUN ln -s /usr/local/go/bin/go /usr/local/bin/
# Install nodejs and npm
ADD generated/node-v6.11.2-linux-x64.tar.xz /usr/local/
RUN ln -s /usr/local/node-v6.11.2-linux-x64/bin/* /usr/local/bin/
-RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle && rm -rf /tmp/arvados
+RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle
+
+# Workbench depends on arvados-server for config manipulation
+ENV GOPATH /tmp
+RUN mkdir -p $GOPATH/src/git.curoverse.com && ln -sT /tmp/arvados $GOPATH/src/git.curoverse.com/arvados.git && cd $GOPATH/src/git.curoverse.com/arvados.git/cmd/arvados-server && go get -v github.com/kardianos/govendor && $GOPATH/bin/govendor sync && go get && go build && cp arvados-server /usr/local/bin/ && rm -rf /tmp/arvados
ENV WORKSPACE /arvados
CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "debian9"]
/usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
# Install golang binary
-ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
+ADD generated/go1.12.7.linux-amd64.tar.gz /usr/local/
RUN ln -s /usr/local/go/bin/go /usr/local/bin/
# Install nodejs and npm
ADD generated/node-v6.11.2-linux-x64.tar.xz /usr/local/
RUN ln -s /usr/local/node-v6.11.2-linux-x64/bin/* /usr/local/bin/
-RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle && rm -rf /tmp/arvados
+RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle
+
+# Workbench depends on arvados-server for config manipulation
+ENV GOPATH /tmp
+RUN mkdir -p $GOPATH/src/git.curoverse.com && ln -sT /tmp/arvados $GOPATH/src/git.curoverse.com/arvados.git && cd $GOPATH/src/git.curoverse.com/arvados.git/cmd/arvados-server && go get -v github.com/kardianos/govendor && $GOPATH/bin/govendor sync && go get && go build && cp arvados-server /usr/local/bin/ && rm -rf /tmp/arvados
ENV WORKSPACE /arvados
CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "ubuntu1404"]
/usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
# Install golang binary
-ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
+ADD generated/go1.12.7.linux-amd64.tar.gz /usr/local/
RUN ln -s /usr/local/go/bin/go /usr/local/bin/
# Install nodejs and npm
ADD generated/node-v6.11.2-linux-x64.tar.xz /usr/local/
RUN ln -s /usr/local/node-v6.11.2-linux-x64/bin/* /usr/local/bin/
-RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle && rm -rf /tmp/arvados
+RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle
+
+# Workbench depends on arvados-server for config manipulation
+ENV GOPATH /tmp
+RUN mkdir -p $GOPATH/src/git.curoverse.com && ln -sT /tmp/arvados $GOPATH/src/git.curoverse.com/arvados.git && cd $GOPATH/src/git.curoverse.com/arvados.git/cmd/arvados-server && go get -v github.com/kardianos/govendor && $GOPATH/bin/govendor sync && go get && go build && cp arvados-server /usr/local/bin/ && rm -rf /tmp/arvados
ENV WORKSPACE /arvados
CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "ubuntu1604"]
/usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
# Install golang binary
-ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
+ADD generated/go1.12.7.linux-amd64.tar.gz /usr/local/
RUN ln -s /usr/local/go/bin/go /usr/local/bin/
# Install nodejs and npm
ADD generated/node-v6.11.2-linux-x64.tar.xz /usr/local/
RUN ln -s /usr/local/node-v6.11.2-linux-x64/bin/* /usr/local/bin/
-RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle && rm -rf /tmp/arvados
+RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle
+
+# Workbench depends on arvados-server for config manipulation
+ENV GOPATH /tmp
+RUN mkdir -p $GOPATH/src/git.curoverse.com && ln -sT /tmp/arvados $GOPATH/src/git.curoverse.com/arvados.git && cd $GOPATH/src/git.curoverse.com/arvados.git/cmd/arvados-server && go get -v github.com/kardianos/govendor && $GOPATH/bin/govendor sync && go get && go build && cp arvados-server /usr/local/bin/ && rm -rf /tmp/arvados
ENV WORKSPACE /arvados
CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "ubuntu1804"]
#
# SPDX-License-Identifier: AGPL-3.0
-all: centos7/generated debian8/generated debian9/generated ubuntu1404/generated ubuntu1604/generated ubuntu1804/generated
+all: centos7/generated debian9/generated ubuntu1604/generated ubuntu1804/generated
centos7/generated: common-generated-all
test -d centos7/generated || mkdir centos7/generated
cp -rlt centos7/generated common-generated/*
-debian8/generated: common-generated-all
- test -d debian8/generated || mkdir debian8/generated
- cp -rlt debian8/generated common-generated/*
-
debian9/generated: common-generated-all
test -d debian9/generated || mkdir debian9/generated
cp -rlt debian9/generated common-generated/*
-ubuntu1404/generated: common-generated-all
- test -d ubuntu1404/generated || mkdir ubuntu1404/generated
- cp -rlt ubuntu1404/generated common-generated/*
-
ubuntu1604/generated: common-generated-all
test -d ubuntu1604/generated || mkdir ubuntu1604/generated
cp -rlt ubuntu1604/generated common-generated/*
+++ /dev/null
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-FROM debian:8
-MAINTAINER Ward Vandewege <wvandewege@veritasgenetics.com>
-
-RUN perl -ni~ -e 'print unless /jessie-updates/' /etc/apt/sources.list
-
-ENV DEBIAN_FRONTEND noninteractive
-
-# Install dependencies
-RUN apt-get update && \
- apt-get -y install --no-install-recommends curl ca-certificates
-
-# Install RVM
-ADD generated/mpapis.asc /tmp/
-ADD generated/pkuczynski.asc /tmp/
-RUN gpg --import --no-tty /tmp/mpapis.asc && \
- gpg --import --no-tty /tmp/pkuczynski.asc && \
- curl -L https://get.rvm.io | bash -s stable && \
- /usr/local/rvm/bin/rvm install 2.5 && \
- /usr/local/rvm/bin/rvm alias create default ruby-2.5
-
-# udev daemon can't start in a container, so don't try.
-RUN mkdir -p /etc/udev/disabled
-
-RUN echo "deb file:///arvados/packages/debian8/ /" >>/etc/apt/sources.list
+++ /dev/null
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-FROM ubuntu:trusty
-MAINTAINER Ward Vandewege <wvandewege@veritasgenetics.com>
-
-ENV DEBIAN_FRONTEND noninteractive
-
-# Install dependencies
-RUN apt-get update && \
- apt-get -y install --no-install-recommends curl ca-certificates python2.7-dev python3 python-setuptools python3-setuptools libcurl4-gnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip binutils build-essential ca-certificates
-
-# Install RVM
-ADD generated/mpapis.asc /tmp/
-ADD generated/pkuczynski.asc /tmp/
-RUN gpg --import --no-tty /tmp/mpapis.asc && \
- gpg --import --no-tty /tmp/pkuczynski.asc && \
- curl -L https://get.rvm.io | bash -s stable && \
- /usr/local/rvm/bin/rvm install 2.3 && \
- /usr/local/rvm/bin/rvm alias create default ruby-2.3
-
-# udev daemon can't start in a container, so don't try.
-RUN mkdir -p /etc/udev/disabled
-
-RUN echo "deb file:///arvados/packages/ubuntu1404/ /" >>/etc/apt/sources.list
fi
report_not_ready "$DATABASE_READY" "$CONFIG_PATH/database.yml"
-report_not_ready "$APPLICATION_READY" "$CONFIG_PATH/application.yml"
+if printf '%s\n' "$CONFIG_PATH" | grep -Fqe "sso"; then
+ report_not_ready "$APPLICATION_READY" "$CONFIG_PATH/application.yml"
+else
+ report_not_ready "$APPLICATION_READY" "/etc/arvados/config.yml"
+fi
fi
echo
echo "START: $p test on $IMAGE" >&2
- # ulimit option can be removed when debian8 and ubuntu1404 are retired
- if docker run --ulimit nofile=4096:4096 \
+ if docker run \
--rm \
"${docker_volume_args[@]}" \
--env ARVADOS_DEBUG=$ARVADOS_DEBUG \
mv -f ${WORKSPACE}/packages/${TARGET}/* ${WORKSPACE}/packages/${TARGET}/processed/ 2>/dev/null
set -e
set -x
- # Build packages. ulimit option can be removed when debian8 and ubuntu1404 are retired
- if docker run --ulimit nofile=4096:4096 \
+ # Build packages.
+ if docker run \
--rm \
"${docker_volume_args[@]}" \
--env ARVADOS_BUILDING_VERSION="$ARVADOS_BUILDING_VERSION" \
GEM_BUILD_FAILURES=${#failures[@]}
fi
-python_wrapper arvados-pam "$WORKSPACE/sdk/pam"
python_wrapper arvados-python-client "$WORKSPACE/sdk/python"
+python_wrapper arvados-pam "$WORKSPACE/sdk/pam"
python_wrapper arvados-cwl-runner "$WORKSPACE/sdk/cwl"
python_wrapper arvados_fuse "$WORKSPACE/services/fuse"
python_wrapper arvados-node-manager "$WORKSPACE/services/nodemanager"
--debug
Output debug information (default: false)
--target <target>
- Distribution to build packages for (default: debian8)
+ Distribution to build packages for (default: debian9)
--only-build <package>
Build only a specific package (or $ONLY_BUILD from environment)
--command
DEBUG=${ARVADOS_DEBUG:-0}
EXITCODE=0
-TARGET=debian8
+TARGET=debian9
COMMAND=
PARSEDOPTS=$(getopt --name "$0" --longoptions \
package_go_binary tools/keep-exercise keep-exercise \
"Performance testing tool for Arvados Keep"
-# The Python SDK
+# The Python SDK - Should be built first because it's needed by others
fpm_build_virtualenv "arvados-python-client" "sdk/python"
-fpm_build_virtualenv "arvados-python-client" "sdk/python" "python3"
# Arvados cwl runner
fpm_build_virtualenv "arvados-cwl-runner" "sdk/cwl"
# The node manager
fpm_build_virtualenv "arvados-node-manager" "services/nodemanager"
-# The Docker image cleaner
-fpm_build_virtualenv "arvados-docker-cleaner" "services/dockercleaner" "python3"
-
# The Arvados crunchstat-summary tool
fpm_build_virtualenv "crunchstat-summary" "tools/crunchstat-summary"
+# The Python SDK - Python3 package
+fpm_build_virtualenv "arvados-python-client" "sdk/python" "python3"
+
+# The Docker image cleaner
+fpm_build_virtualenv "arvados-docker-cleaner" "services/dockercleaner" "python3"
+
# The cwltest package, which lives out of tree
cd "$WORKSPACE"
if [[ -e "$WORKSPACE/cwltest" ]]; then
rm -rf tmp
mkdir tmp
- # Set up application.yml and production.rb so that asset precompilation works
- \cp config/application.yml.example config/application.yml -f
- \cp config/environments/production.rb.example config/environments/production.rb -f
- sed -i 's/secret_token: ~/secret_token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/' config/application.yml
- sed -i 's/keep_web_url: false/keep_web_url: exampledotcom/' config/application.yml
+ # Set up an appropriate config.yml
+ arvados-server config-dump -config <(cat /etc/arvados/config.yml 2>/dev/null || echo "Clusters: {zzzzz: {}}") > /tmp/x
+ mkdir -p /etc/arvados/
+ mv /tmp/x /etc/arvados/config.yml
+ perl -p -i -e 'BEGIN{undef $/;} s/WebDAV(.*?):\n( *)ExternalURL: ""/WebDAV$1:\n$2ExternalURL: "example.com"/g' /etc/arvados/config.yml
RAILS_ENV=production RAILS_GROUPS=assets bundle exec rake npm:install >/dev/null
RAILS_ENV=production RAILS_GROUPS=assets bundle exec rake assets:precompile >/dev/null
# Remove generated configuration files so they don't go in the package.
- rm config/application.yml config/environments/production.rb
+ rm -rf /etc/arvados/
)
if [[ "$?" != "0" ]]; then
if [[ "$FORMAT" == "deb" ]]; then
declare -A dd
- dd[debian8]=jessie
dd[debian9]=stretch
dd[debian10]=buster
- dd[ubuntu1404]=trusty
dd[ubuntu1604]=xenial
dd[ubuntu1804]=bionic
D=${dd[$TARGET]}
if [[ "$pkgname" != "arvados-workbench" ]]; then
exclude_list+=('config/database.yml')
fi
- # for arvados-api-server, we need to dereference the
- # config/config.default.yml file. There is no fpm way to do that, sadly
- # (excluding the existing symlink and then adding the file from its source
- # path doesn't work, sadly.
- if [[ "$pkgname" == "arvados-api-server" ]]; then
- mv /arvados/services/api/config/config.default.yml /arvados/services/api/config/config.default.yml.bu
- cp -p /arvados/lib/config/config.default.yml /arvados/services/api/config/
- exclude_list+=('config/config.default.yml.bu')
- fi
for exclude in ${exclude_list[@]}; do
switches+=(-x "$exclude_root/$exclude")
done
-x "$exclude_root/vendor/cache-*" \
-x "$exclude_root/vendor/bundle" "$@" "$license_arg"
rm -rf "$scripts_dir"
- # Undo the deferencing we did above
- if [[ "$pkgname" == "arvados-api-server" ]]; then
- rm -f /arvados/services/api/config/config.default.yml
- mv /arvados/services/api/config/config.default.yml.bu /arvados/services/api/config/config.default.yml
- fi
}
# Build python packages with a virtualenv built-in
PYTHON_PKG=$PKG
fi
- if [[ -n "$ONLY_BUILD" ]] && [[ "$PYTHON_PKG" != "$ONLY_BUILD" ]] && [[ "$PKG" != "$ONLY_BUILD" ]]; then
+ # arvados-python-client sdist should always be built, to be available
+ # for other dependant packages.
+ if [[ -n "$ONLY_BUILD" ]] && [[ "arvados-python-client" != "$PKG" ]] && [[ "$PYTHON_PKG" != "$ONLY_BUILD" ]] && [[ "$PKG" != "$ONLY_BUILD" ]]; then
return 0
fi
PACKAGE_PATH=`(cd dist; ls *tar.gz)`
+ if [[ "arvados-python-client" == "$PKG" ]]; then
+ PYSDK_PATH=`pwd`/dist/
+ fi
+
+ if [[ -n "$ONLY_BUILD" ]] && [[ "$PYTHON_PKG" != "$ONLY_BUILD" ]] && [[ "$PKG" != "$ONLY_BUILD" ]]; then
+ return 0
+ fi
+
# Determine the package version from the generated sdist archive
PYTHON_VERSION=${ARVADOS_BUILDING_VERSION:-$(awk '($1 == "Version:"){print $2}' *.egg-info/PKG-INFO)}
echo "wheel version: `build/usr/share/$python/dist/$PYTHON_PKG/bin/wheel version`"
if [[ "$TARGET" != "centos7" ]] || [[ "$PYTHON_PKG" != "python-arvados-fuse" ]]; then
- build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG $PACKAGE_PATH
+ build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG -f $PYSDK_PATH $PACKAGE_PATH
else
# centos7 needs these special tweaks to install python-arvados-fuse
build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG docutils
- PYCURL_SSL_LIBRARY=nss build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG $PACKAGE_PATH
+ PYCURL_SSL_LIBRARY=nss build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG -f $PYSDK_PATH $PACKAGE_PATH
fi
if [[ "$?" != "0" ]]; then
echo "Error, unable to run"
- echo " build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG $PACKAGE_PATH"
+ echo " build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG -f $PYSDK_PATH $PACKAGE_PATH"
exit 1
fi
lib/cli
lib/cmd
lib/controller
+lib/controller/federation
+lib/controller/railsproxy
+lib/controller/router
+lib/controller/rpc
lib/crunchstat
lib/cloud
lib/cloud/azure
+lib/cloud/cloudtest
lib/dispatchcloud
lib/dispatchcloud/container
lib/dispatchcloud/scheduler
echo -n 'go: '
go version \
|| fatal "No go binary. See http://golang.org/doc/install"
- [[ $(go version) =~ go1.([0-9]+) ]] && [[ ${BASH_REMATCH[1]} -ge 10 ]] \
- || fatal "Go >= 1.10 required. See http://golang.org/doc/install"
+ [[ $(go version) =~ go1.([0-9]+) ]] && [[ ${BASH_REMATCH[1]} -ge 12 ]] \
+ || fatal "Go >= 1.12 required. See http://golang.org/doc/install"
echo -n 'gcc: '
gcc --version | egrep ^gcc \
|| fatal "No gcc. Try: apt-get install build-essential"
echo -n 'libpq libpq-fe.h: '
find /usr/include -path '*/postgresql/libpq-fe.h' | egrep --max-count=1 . \
|| fatal "No libpq libpq-fe.h. Try: apt-get install libpq-dev"
- echo -n 'services/api/config/database.yml: '
- if [[ ! -f "$WORKSPACE/services/api/config/database.yml" ]]; then
- fatal "Please provide a database.yml file for the test suite"
- else
- echo "OK"
- fi
echo -n 'postgresql: '
psql --version || fatal "No postgresql. Try: apt-get install postgresql postgresql-client-common"
echo -n 'phantomjs: '
else
"$venvdest/bin/pip" install --no-cache-dir 'setuptools>=18.5' 'pip>=7'
fi
- # ubuntu1404 can't seem to install mock via tests_require, but it can do this.
- "$venvdest/bin/pip" install --no-cache-dir 'mock>=1.0' 'pbr<1.7.0'
}
initialize() {
export R_LIBS
export GOPATH
+ # Make sure our compiled binaries under test override anything
+ # else that might be in the environment.
+ export PATH=$GOPATH/bin:$PATH
# Jenkins config requires that glob tmp/*.log match something. Ensure
# that happens even if we don't end up running services that set up
unset http_proxy https_proxy no_proxy
-
# Note: this must be the last time we change PATH, otherwise rvm will
# whine a lot.
setup_ruby_environment
+ if [[ -s "$CONFIGSRC/config.yml" ]] ; then
+ cp "$CONFIGSRC/config.yml" "$temp/test-config.yml"
+ export ARVADOS_CONFIG="$temp/test-config.yml"
+ else
+ if [[ -s /etc/arvados/config.yml ]] ; then
+ python > "$temp/test-config.yml" <<EOF
+import yaml
+import json
+v = list(yaml.safe_load(open('/etc/arvados/config.yml'))['Clusters'].values())[0]['PostgreSQL']
+v['Connection']['dbname'] = 'arvados_test'
+print(json.dumps({"Clusters": { "zzzzz": {'PostgreSQL': v}}}))
+EOF
+ export ARVADOS_CONFIG="$temp/test-config.yml"
+ else
+ if [[ ! -f "$WORKSPACE/services/api/config/database.yml" ]]; then
+ fatal "Please provide a database.yml file for the test suite"
+ fi
+ fi
+ fi
+
echo "PATH is $PATH"
}
services/api)
stop_services
;;
- gofmt | govendor | doc | lib/cli | lib/cloud/azure | lib/cloud/ec2 | lib/cmd | lib/dispatchcloud/ssh_executor | lib/dispatchcloud/worker)
+ gofmt | govendor | doc | lib/cli | lib/cloud/azure | lib/cloud/ec2 | lib/cloud/cloudtest | lib/cmd | lib/dispatchcloud/ssh_executor | lib/dispatchcloud/worker)
# don't care whether services are running
;;
*)
# before trying "go test". Otherwise, coverage-reporting
# mode makes Go show the wrong line numbers when reporting
# compilation errors.
- go get -ldflags "-X main.version=${ARVADOS_VERSION:-$(git log -n1 --format=%H)-dev}" -t "git.curoverse.com/arvados.git/$1" && \
+ go get -ldflags "-X git.curoverse.com/arvados.git/lib/cmd.version=${ARVADOS_VERSION:-$(git log -n1 --format=%H)-dev}" -t "git.curoverse.com/arvados.git/$1" && \
cd "$GOPATH/src/git.curoverse.com/arvados.git/$1" && \
if [[ -n "${testargs[$1]}" ]]
then
result=1
elif [[ "$2" == "go" ]]
then
- go get -ldflags "-X main.version=${ARVADOS_VERSION:-$(git log -n1 --format=%H)-dev}" -t "git.curoverse.com/arvados.git/$1"
+ go get -ldflags "-X git.curoverse.com/arvados.git/lib/cmd.version=${ARVADOS_VERSION:-$(git log -n1 --format=%H)-dev}" -t "git.curoverse.com/arvados.git/$1"
elif [[ "$2" == "pip" ]]
then
# $3 can name a path directory for us to use, including trailing
)
declare -a gostuff
-gostuff=(
- cmd/arvados-client
- cmd/arvados-server
- lib/cli
- lib/cmd
- lib/controller
- lib/crunchstat
- lib/cloud
- lib/cloud/azure
- lib/cloud/ec2
- lib/config
- lib/dispatchcloud
- lib/dispatchcloud/container
- lib/dispatchcloud/scheduler
- lib/dispatchcloud/ssh_executor
- lib/dispatchcloud/worker
- lib/service
- sdk/go/arvados
- sdk/go/arvadosclient
- sdk/go/auth
- sdk/go/blockdigest
- sdk/go/dispatch
- sdk/go/health
- sdk/go/httpserver
- sdk/go/manifest
- sdk/go/asyncbuf
- sdk/go/crunchrunner
- sdk/go/stats
- services/arv-git-httpd
- services/crunchstat
- services/health
- services/keep-web
- services/keepstore
- sdk/go/keepclient
- services/keep-balance
- services/keepproxy
- services/crunch-dispatch-local
- services/crunch-dispatch-slurm
- services/crunch-run
- services/ws
- tools/keep-block-check
- tools/keep-exercise
- tools/keep-rsync
- tools/sync-groups
-)
+gostuff=($(cd "$WORKSPACE" && git grep -lw func | grep \\.go | sed -e 's/\/[^\/]*$//' | sort -u))
install_apps/workbench() {
cd "$WORKSPACE/apps/workbench" \
}
test_apps/workbench_units() {
+ local TASK="test:units"
+ if [[ -n "${testargs[apps/workbench]}" ]] || [[ -n "${testargs[apps/workbench_units]}" ]]; then
+ TASK="test"
+ fi
cd "$WORKSPACE/apps/workbench" \
- && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:units TESTOPTS='-v -d' ${testargs[apps/workbench]} ${testargs[apps/workbench_units]}
+ && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake ${TASK} TESTOPTS='-v -d' ${testargs[apps/workbench]} ${testargs[apps/workbench_units]}
}
test_apps/workbench_functionals() {
+ local TASK="test:functionals"
+ if [[ -n "${testargs[apps/workbench]}" ]] || [[ -n "${testargs[apps/workbench_functionals]}" ]]; then
+ TASK="test"
+ fi
cd "$WORKSPACE/apps/workbench" \
- && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:functionals TESTOPTS='-v -d' ${testargs[apps/workbench]} ${testargs[apps/workbench_functionals]}
+ && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake ${TASK} TESTOPTS='-v -d' ${testargs[apps/workbench]} ${testargs[apps/workbench_functionals]}
}
test_apps/workbench_integration() {
+ local TASK="test:integration"
+ if [[ -n "${testargs[apps/workbench]}" ]] || [[ -n "${testargs[apps/workbench_integration]}" ]]; then
+ TASK="test"
+ fi
cd "$WORKSPACE/apps/workbench" \
- && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:integration TESTOPTS='-v -d' ${testargs[apps/workbench]} ${testargs[apps/workbench_integration]}
+ && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake ${TASK} TESTOPTS='-v -d' ${testargs[apps/workbench]} ${testargs[apps/workbench_integration]}
}
test_apps/workbench_benchmark() {
+ local TASK="test:benchmark"
+ if [[ -n "${testargs[apps/workbench]}" ]] || [[ -n "${testargs[apps/workbench_benchmark]}" ]]; then
+ TASK="test"
+ fi
cd "$WORKSPACE/apps/workbench" \
- && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:benchmark ${testargs[apps/workbench_benchmark]}
+ && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake ${TASK} ${testargs[apps/workbench_benchmark]}
}
test_apps/workbench_profile() {
+ local TASK="test:profile"
+ if [[ -n "${testargs[apps/workbench]}" ]] || [[ -n "${testargs[apps/workbench_profile]}" ]]; then
+ TASK="test"
+ fi
cd "$WORKSPACE/apps/workbench" \
- && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:profile ${testargs[apps/workbench_profile]}
+ && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake ${TASK} ${testargs[apps/workbench_profile]}
}
install_deps() {
fi
fi
done
- do_install services/api
for g in "${gostuff[@]}"
do
do_install "$g" go
done
+ do_install services/api
do_install apps/workbench
}
done
testfuncargs["sdk/cli"]="sdk/cli"
+testfuncargs["sdk/R"]="sdk/R"
+testfuncargs["sdk/java-v2"]="sdk/java-v2"
+testfuncargs["apps/workbench_units"]="apps/workbench_units"
+testfuncargs["apps/workbench_functionals"]="apps/workbench_functionals"
+testfuncargs["apps/workbench_integration"]="apps/workbench_integration"
+testfuncargs["apps/workbench_benchmark"]="apps/workbench_benchmark"
+testfuncargs["apps/workbench_profile"]="apps/workbench_profile"
if [[ -z ${interactive} ]]; then
install_all
# assume emacs, or something, is offering a history buffer
# and pre-populating the command will only cause trouble
nextcmd=
- elif [[ "$nextcmd" != "install deps" ]]; then
- :
- elif [[ -e "$VENVDIR/bin/activate" ]]; then
- nextcmd="test lib/cmd"
- else
+ elif [[ ! -e "$VENVDIR/bin/activate" ]]; then
nextcmd="install deps"
+ else
+ nextcmd=""
fi
}
echo
help_interactive
nextcmd="install deps"
setnextcmd
- while read -p 'What next? ' -e -i "${nextcmd}" nextcmd; do
+ HISTFILE="$WORKSPACE/tmp/.history"
+ history -r
+ while read -p 'What next? ' -e -i "$nextcmd" nextcmd; do
+ history -s "$nextcmd"
+ history -w
read verb target opts <<<"${nextcmd}"
target="${target%/}"
target="${target/\/:/:}"
)
var (
- version = "dev"
handler = cmd.Multi(map[string]cmd.Handler{
- "-e": cmd.Version(version),
- "version": cmd.Version(version),
- "-version": cmd.Version(version),
- "--version": cmd.Version(version),
+ "-e": cmd.Version,
+ "version": cmd.Version,
+ "-version": cmd.Version,
+ "--version": cmd.Version,
"copy": cli.Copy,
"create": cli.Create,
import (
"os"
+ "git.curoverse.com/arvados.git/lib/cloud/cloudtest"
"git.curoverse.com/arvados.git/lib/cmd"
"git.curoverse.com/arvados.git/lib/config"
"git.curoverse.com/arvados.git/lib/controller"
)
var (
- version = "dev"
handler = cmd.Multi(map[string]cmd.Handler{
- "version": cmd.Version(version),
- "-version": cmd.Version(version),
- "--version": cmd.Version(version),
+ "version": cmd.Version,
+ "-version": cmd.Version,
+ "--version": cmd.Version,
- "config-check": config.CheckCommand,
- "config-dump": config.DumpCommand,
- "controller": controller.Command,
- "dispatch-cloud": dispatchcloud.Command,
+ "cloudtest": cloudtest.Command,
+ "config-check": config.CheckCommand,
+ "config-dump": config.DumpCommand,
+ "config-defaults": config.DumpDefaultsCommand,
+ "controller": controller.Command,
+ "dispatch-cloud": dispatchcloud.Command,
})
)
- Cloud:
- admin/storage-classes.html.textile.liquid
- admin/spot-instances.html.textile.liquid
+ - admin/cloudtest.html.textile.liquid
- Data Management:
- admin/collection-versioning.html.textile.liquid
- admin/collection-managed-properties.html.textile.liquid
- admin/federation.html.textile.liquid
- admin/controlling-container-reuse.html.textile.liquid
- admin/logs-table-management.html.textile.liquid
+ - admin/troubleshooting.html.textile.liquid
installguide:
- Overview:
- install/index.html.textile.liquid
filters = [['properties.responsible_person_uuid', 'exists', False]]
cols = util.list_all(arvados.api().collections().list, filters=filters, select=['uuid', 'name'])
-print("Found {} collections:".format(len(cols)))
+print('Found {} collections:'.format(len(cols)))
for c in cols:
print('{}, "{}"'.format(c['uuid'], c['name']))
\ No newline at end of file
f = [['properties.responsible_person_uuid', 'exists', False],
['owner_uuid', '=', p_uuid]]
cols = get_cols(api, f)
- print("Found {} collections owned by {}".format(len(cols), p_uuid))
+ print('Found {} collections owned by {}'.format(len(cols), p_uuid))
for c in cols:
- print(" - Updating collection {}".format(c["uuid"]))
+ print(' - Updating collection {}'.format(c['uuid']))
props = c['properties']
props['responsible_person_uuid'] = responsible_uuid
api.collections().update(uuid=c['uuid'], body={'properties': props}).execute()
\ No newline at end of file
import arvados
import arvados.util as util
-old_uuid = "zzzzz-tpzed-xxxxxxxxxxxxxxx"
-new_uuid = "zzzzz-tpzed-yyyyyyyyyyyyyyy"
+old_uuid = 'zzzzz-tpzed-xxxxxxxxxxxxxxx'
+new_uuid = 'zzzzz-tpzed-yyyyyyyyyyyyyyy'
api = arvados.api()
filters = [['properties.responsible_person_uuid', '=', '{}'.format(old_uuid)]]
cols = util.list_all(api.collections().list, filters=filters, select=['uuid', 'properties'])
-print("Found {} collections".format(len(cols)))
+print('Found {} collections'.format(len(cols)))
for c in cols:
- print("Updating collection {}".format(c["uuid"]))
+ print('Updating collection {}'.format(c['uuid']))
props = c['properties']
props['responsible_person_uuid'] = new_uuid
api.collections().update(uuid=c['uuid'], body={'properties': props}).execute()
\ No newline at end of file
--- /dev/null
+---
+layout: default
+navsection: admin
+title: Testing cloud configuration
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The @arvados-server@ package includes a @cloudtest@ tool that checks compatibility between your Arvados configuration, your cloud driver, your cloud provider's API, your cloud provider's VM instances, and the worker image you use with the *experimental* "cloud dispatcher":../install/install-dispatch-cloud.html.
+
+@arvados-server cloudtest@ performs the following steps:
+# Create a new instance
+# Wait for it to finish booting
+# Run a shell command on the new instance (optional)
+# Pause while you log in to the new instance and do other tests yourself (optional)
+# Shut down the instance
+
+This is an easy way to expose problems like these:
+* Configured cloud credentials don't work
+* Configured image types don't work
+* Configured driver is not compatible with your cloud API/region
+* Newly created instances are not usable due to a network problem or misconfiguration
+* Newly created instances do not accept the configured SSH private key
+* Selected machine image does not boot properly
+* Selected machine image is incompatible with some instance types
+* Driver has bugs
+
+h2. Typical uses
+
+Before bringing up the @arvados-dispatch-cloud@ service for the first time, we recommend running @cloudtest@ to check your configuration:
+
+<notextile><pre>
+$ <span class="userinput">arvados-server cloudtest -command "crunch-run --list"</span>
+</pre></notextile>
+
+Before updating your configuration to use a new VM image, we recommend running @cloudtest@ with the new image:
+
+<notextile><pre>
+$ <span class="userinput">arvados-server cloudtest -image-id <b>new_image_id</b> -command "crunch-run --list"</span>
+</pre></notextile>
+
+After adding an instance type to your configuration, we recommend running @cloudtest@ with the new instance type:
+
+<notextile><pre>
+$ <span class="userinput">arvados-server cloudtest -instance-type <b>new_instance_type_name</b></span>
+</pre></notextile>
+
+For a full list of options, use the @-help@ flag:
+
+<notextile><pre>
+$ <span class="userinput">arvados-server cloudtest -help</span>
+Usage:
+ -command string
+ Run an interactive shell command on the test instance when it boots
+ -config file
+ Site configuration file (default "/etc/arvados/config.yml")
+ -destroy-existing
+ Destroy any existing instances tagged with our InstanceSetID, instead of erroring out
+ -image-id string
+ Image ID to use when creating the test instance (if empty, use cluster config)
+ -instance-set-id value
+ InstanceSetID tag value to use on the test instance (default "cloudtest-user@hostname.example")
+ -instance-type string
+ Instance type to create (if empty, use cheapest type in config)
+ -pause-before-destroy
+ Prompt and wait before destroying the test instance
+</pre></notextile>
Change to the API server directory and use the following commands:
<pre>
-$ bundle exec rake config:migrate > config.yml
+$ RAILS_ENV=production bundle exec rake config:migrate > config.yml
$ cp config.yml /etc/arvados/config.yml
</pre>
-This will print the contents of @config.yml@ after merging with legacy @application.yml@. It may then be redirected to a file and copied to @/etc/arvados/config.yml@.
+This will print the contents of @config.yml@ after merging the legacy @application.yml@ and @database.yml@ into the existing systemwide @config.yml@. It may be redirected to a file and copied to @/etc/arvados/config.yml@ (it is safe to copy over, all configuration items from the existing @/etc/arvados/config.yml@ will be included in the migrated output).
If you wish to update @config.yml@ configuration by hand, or check that everything has been migrated, use @config:diff@ to print configuration items that differ between @application.yml@ and the system @config.yml@.
<pre>
-$ bundle exec rake config:diff
+$ RAILS_ENV=production bundle exec rake config:diff
+</pre>
+
+This command will also report if no migrations are required.
+
+h2. Workbench
+
+The legacy workbench configuration is stored in @config/application.yml@. After migration to @/etc/arvados/config.yml@, this file should be moved out of the way and/or deleted.
+
+Change to the workbench server directory and use the following commands:
+
+<pre>
+$ RAILS_ENV=production bundle exec rake config:migrate > config.yml
+$ cp config.yml /etc/arvados/config.yml
+</pre>
+
+This will print the contents of @config.yml@ after merging the legacy @application.yml@ into the existing systemwide @config.yml@. It may be redirected to a file and copied to @/etc/arvados/config.yml@ (it is safe to copy over, all configuration items from the existing @/etc/arvados/config.yml@ will be included in the migrated output).
+
+If you wish to update @config.yml@ configuration by hand, or check that everything has been migrated, use @config:diff@ to print configuration items that differ between @application.yml@ and the system @config.yml@.
+
+<pre>
+$ RAILS_ENV=production bundle exec rake config:diff
</pre>
This command will also report if no migrations are required.
h2. arvados-controller
-Only supports centralized config file. No migration needed.
+Already uses centralized config exclusively. No migration needed.
h2. arvados-dispatch-cloud
-Only supports centralized config file. No migration needed.
+Already uses centralized config exclusively. No migration needed.
--- /dev/null
+---
+layout: default
+navsection: admin
+title: Troubleshooting
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Using a distributed system with several services working together sometimes makes it difficult to find the root cause of errors, as one single client request usually means several different requests to more than one service.
+
+To deal with this difficulty, Arvados creates a request ID that gets carried over different services as the requests take place. This ID has a specific format and it's comprised of the prefix "@req-@" followed by 20 random alphanumeric characters:
+
+<pre>req-frdyrcgdh4rau1ajiq5q</pre>
+
+This ID gets propagated via an HTTP @X-Request-Id@ header, and gets logged on every service.
+
+h3. API Server error reporting and logging
+
+In addition to providing the request ID on every HTTP response, the API Server adds it to every error message so that all clients show enough information to the user to be able to track a particular issue. As an example, let's suppose that we get the following error when trying to create a collection using the CLI tools:
+
+<pre>
+$ arv collection create --collection '{}'
+Error: #<RuntimeError: Whoops, something bad happened> (req-ku5ct9ehw0y71f1c5p79)
+</pre>
+
+The API Server logs every request in JSON format on the @production.log@ (usually under @/var/www/arvados-api/current/log/@ when installing from packages) file, so we can retrieve more information about this by using @grep@ and @jq@ tools:
+
+<pre>
+# grep req-ku5ct9ehw0y71f1c5p79 /var/www/arvados-api/current/log/production.log | jq .
+{
+ "method": "POST",
+ "path": "/arvados/v1/collections",
+ "format": "json",
+ "controller": "Arvados::V1::CollectionsController",
+ "action": "create",
+ "status": 422,
+ "duration": 1.52,
+ "view": 0.25,
+ "db": 0,
+ "request_id": "req-ku5ct9ehw0y71f1c5p79",
+ "client_ipaddr": "127.0.0.1",
+ "client_auth": "zzzzz-gj3su-jllemyj9v3s5emu",
+ "exception": "#<RuntimeError: Whoops, something bad happened>",
+ "exception_backtrace": "/var/www/arvados-api/current/app/controllers/arvados/v1/collections_controller.rb:43:in `create'\n/var/lib/gems/ruby/2.3.0/gems/actionpack-5.0.7.2/lib/action_controller/metal/basic_implicit_render.rb:4:in `send_action'\n ...[snipped]",
+ "params": {
+ "collection": "{}",
+ "_profile": "true",
+ "cluster_id": "",
+ "collection_given": "true",
+ "ensure_unique_name": "false",
+ "help": "false"
+ },
+ "@timestamp": "2019-07-15T16:40:41.726634182Z",
+ "@version": "1",
+ "message": "[422] POST /arvados/v1/collections (Arvados::V1::CollectionsController#create)"
+}
+</pre>
+
+When logging a request that produced an error, the API Server adds @exception@ and @exception_backtrace@ keys to the JSON log. The latter includes the complete error stack trace as a string, and can be displayed in a more readable form like so:
+
+<pre>
+# grep req-ku5ct9ehw0y71f1c5p79 /var/www/arvados-api/current/log/production.log | jq -r .exception_backtrace
+/var/www/arvados-api/current/app/controllers/arvados/v1/collections_controller.rb:43:in `create'
+/var/lib/gems/ruby/2.3.0/gems/actionpack-5.0.7.2/lib/action_controller/metal/basic_implicit_render.rb:4:in `send_action'
+/var/lib/gems/ruby/2.3.0/gems/actionpack-5.0.7.2/lib/abstract_controller/base.rb:188:in `process_action'
+/var/lib/gems/ruby/2.3.0/gems/actionpack-5.0.7.2/lib/action_controller/metal/rendering.rb:30:in `process_action'
+/var/lib/gems/ruby/2.3.0/gems/actionpack-5.0.7.2/lib/abstract_controller/callbacks.rb:20:in `block in process_action'
+/var/lib/gems/ruby/2.3.0/gems/activesupport-5.0.7.2/lib/active_support/callbacks.rb:126:in `call'
+...
+</pre>
\ No newline at end of file
Subsequently, the <code class="userinput">psql -d 'arvados_production' -c '\dx'</code> command will display the installed extensions for the arvados_production database. This list should now contain @pg_trgm@.
+h4. Migrating to centralized config.yml
+
+See "Migrating Configuration":config-migration.html for notes on migrating legacy per-component configuration files to the new centralized @/etc/arvados/config.yml@. To ensure a smooth transition, the per-component config files continue to be read, and take precedence over the centralized configuration.
+
h3(#v1_4_0). v1.4.0 (2019-06-05)
h4. Populating the new file_count and file_size_total columns on the collections table
|@is_a@|string|Arvados object type|@["head_uuid","is_a","arvados#collection"]@|
|@exists@|string|Test if a subproperty is present.|@["properties","exists","my_subproperty"]@|
+
+h4(#substringsearchfilter). Filtering using substring search
+
+Resources can also be filtered by searching for a substring in attributes of type @string@, @array of strings@, @text@, and @hash@, which are indexed in the database specifically for search. To use substring search, the filter must:
+
+* Specify @any@ as the attribute
+* Use either the @like@ or @ilike@ operator
+* Have an operand of type @string@ that is wrapped in the SQL pattern match wildcard character @%@
+
+For example, the @["any", "like", "%foo%"]@ filter will return all resources that contain @foo@ in the content of at least one attribute of the previously defined types. This is the recommended way to do keyword and file name search across the entire database. Note that only exact substring matches are returned and results are unranked and returned in the order specified by the @list@ @order@ argument.
+
h4(#subpropertyfilters). Filtering on subproperties
-Some record type have an additional @properties@ attribute that allows recording and filtering on additional key-value pairs. To filter on a subproperty, the value in the @attribute@ position has the form @properties.user_property@. You may also use JSON-LD / RDF style URIs for property keys by enclosing them in @<...>@ for example @properties.<http://example.com/user_property>@. Alternately you may also provide a JSON-LD "@context" field, however at this time JSON-LD contexts are not interpreted by Arvados.
+Some record types have an additional @properties@ attribute that allows recording and filtering on additional key-value pairs. To filter on a subproperty, the value in the @attribute@ position has the form @properties.user_property@. You may also use JSON-LD / RDF style URIs for property keys by enclosing them in @<...>@ for example @properties.<http://example.com/user_property>@. Alternately you may also provide a JSON-LD "@context" field, however at this time JSON-LD contexts are not interpreted by Arvados.
table(table table-bordered table-condensed).
|_. Operator|_. Operand type|_. Description|_. Example|
h3. contents
-Retrieve a list of items owned by the group.
+Retrieve a list of items owned by the group. Use "recursive" to list objects within subprojects as well.
Arguments:
|order|array|Attributes to use as sort keys to determine the order resources are returned, each optionally followed by @asc@ or @desc@ to indicate ascending or descending order. Sort within a resource type by prefixing the attribute with the resource name and a period.|query|@["collections.modified_at desc"]@|
|filters|array|Conditions for filtering items.|query|@[["uuid", "is_a", "arvados#job"]]@|
|recursive|boolean (default false)|Include items owned by subprojects.|query|@true@|
-|exclude_home_project|boolean (default false)|Only return items which are visible to the user but not accessible within the user's home project. Use this to get a list of items that are shared with the user.|query|@true@|
+|exclude_home_project|boolean (default false)|Only return items which are visible to the user but not accessible within the user's home project. Use this to get a list of items that are shared with the user. Uses the logic described under the "shared" endpoint.|query|@true@|
+|include|string|If provided with the value "owner_uuid", this will return owner objects in the "included" field of the response.|query||
-Note: Because adding access tokens to manifests can be computationally expensive, the @manifest_text@ field is not included in listed collections. If you need it, request a "list of collections":{{site.baseurl}}/api/methods/collections.html with the filter @["owner_uuid", "=", GROUP_UUID]@, and @"manifest_text"@ listed in the select parameter.
+Notes:
+
+Because adding access tokens to manifests can be computationally expensive, the @manifest_text@ field is not included in listed collections. If you need it, request a "list of collections":{{site.baseurl}}/api/methods/collections.html with the filter @["owner_uuid", "=", GROUP_UUID]@, and @"manifest_text"@ listed in the select parameter.
+
+Use filters with the attribute format @<item type>.<field name>@ to filter items of a specific type. For example: @["container_requests.state", "=", "Final"]@ to filter @container_requests@ where @state@ is @Final@. All other types of items owned by this group will be unimpacted by this filter and will still be included.
+
+When called with “include=owner_uuid”, the @included@ field of the response is populated with users, projects, or other groups that own the objects returned in @items@. This can be used to fetch an object and its parent with a single API call.
-Note: Use filters with the attribute format @<item type>.<field name>@ to filter items of a specific type. For example: @["pipeline_instances.state", "=", "Complete"]@ to filter @pipeline_instances@ where @state@ is @Complete@. All other types of items owned by this group will be unimpacted by this filter and will still be included.
h3. create
h3. shared
-This endpoint returns the toplevel set of groups to which access is granted through a chain of one or more permission links rather than through direct ownership by the current user account. This is useful for clients which wish to browse the list of projects the user has permission to read which are not part of the "home" project tree.
-
-When called with "include=owner_uuid" this also returns (in the "included" field) the objects that own those projects (users or non-project groups).
+This endpoint returns the toplevel set of groups to which access is granted through a chain of one or more permission links rather than through direct ownership by the current user account. This is useful for clients which wish to browse the list of projects the user has permission to read which are not part of the "home" project tree. Similar behavior is also available with the @exclude_home_project@ option of the "contents" endpoint.
Specifically, the logic is:
the owner_uuid is a group but group_class is not a project)
</pre>
-In addition to the "include" parameter this endpoint also supports the same parameters as the "list method.":{{site.baseurl}}/api/methods.html#index
-
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|include|string|If provided with the value "owner_uuid", this will return owner objects in the "included" field of the response.|query|?include=owner_uuid|
+|include|string|If provided with the value "owner_uuid", this will return owner objects in the @included@ field of the response.|query||
+
+Notes:
+
+When called with “include=owner_uuid”, the @included@ field of the response is populated with users and non-project groups that own the objects returned in @items@.
+
+In addition to the "include" parameter this endpoint also supports the same parameters as the "list method.":{{site.baseurl}}/api/methods.html#index
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-Arvados components run on GNU/Linux systems, and supports multiple cloud operating stacks. Arvados supports Debian and derivatives such as Ubuntu, as well as Red Hat and derivatives such as CentOS.
+Arvados components run on GNU/Linux systems, and supports multiple cloud operating stacks. Arvados supports Debian and derivatives such as Ubuntu, as well as Red Hat and derivatives such as CentOS. Although Arvados development is sponsored by Veritas Genetics which offers commercial support, "Arvados is Free Software":{{site.baseurl}}/copying/copying.html and we encourage self supported/community supported installations.
Arvados components can be installed and configured in a number of different ways.
{% include 'install_git' %}
-h2(#configure). Set up the database
-
-Configure the API server to connect to your database by updating @/etc/arvados/api/database.yml@. Replace the @xxxxxxxx@ database password placeholder with the "password you generated during database setup":install-postgresql.html#api. Be sure to update the @production@ section.
-
-<notextile>
-<pre><code>~$ <span class="userinput">editor /etc/arvados/api/database.yml</span>
-</code></pre></notextile>
-
h2(#configure_application). Configure the API server
-Edit @/etc/arvados/api/application.yml@ to configure the settings described in the following sections. The API server reads both @application.yml@ and its own @config/application.default.yml@ file. The settings in @application.yml@ take precedence over the defaults that are defined in @config/application.default.yml@. The @config/application.yml.example@ file is not read by the API server and is provided as a starting template only.
+Edit @/etc/arvados/config.yml@ to set the keys below. Only the most important configuration options are listed here. The example configuration fragments given below should be merged into a single configuration structure. Correct indentation is important. The full set of configuration options are listed in "config.yml":{{site.baseurl}}/admin/config.html
-@config/application.default.yml@ documents additional configuration settings not listed here. You can "view the current source version":https://dev.arvados.org/projects/arvados/repository/revisions/master/entry/services/api/config/application.default.yml for reference.
+h3(#uuid_prefix). ClusterID
-Only put local configuration in @application.yml@. Do not edit @application.default.yml@.
+The @ClusterID@ is used for all database identifiers to identify the record as originating from this site. It is the first key under @Clusters@ in @config.yml@. It must be exactly 5 lowercase ASCII letters and digits. All configuration items go under the cluster id key (replace @zzzzz@ with your cluster id in the examples below).
-h3(#uuid_prefix). uuid_prefix
+<notextile>
+<pre><code>Clusters:
+ <span class="userinput">zzzzz</span>:
+ ...</code></pre>
+</notextile>
-Define your @uuid_prefix@ in @application.yml@ by setting the @uuid_prefix@ field in the section for your environment. This prefix is used for all database identifiers to identify the record as originating from this site. It must be exactly 5 lowercase ASCII letters and digits.
+h3(#configure). PostgreSQL.Connection
-Example @application.yml@:
+Replace the @xxxxxxxx@ database password placeholder with the "password you generated during database setup":install-postgresql.html#api.
<notextile>
-<pre><code> uuid_prefix: <span class="userinput">zzzzz</span></code></pre>
+<pre><code>Clusters:
+ zzzzz:
+ PostgreSQL:
+ Connection:
+ host: <span class="userinput">localhost</span>
+ user: <span class="userinput">arvados</span>
+ password: <span class="userinput">xxxxxxxx</span>
+ dbname: <span class="userinput">arvados_production</span>
+ </code></pre>
</notextile>
-h3. secret_token
+h3. API.RailsSessionSecretToken
-The @secret_token@ is used for for signing cookies. IMPORTANT: This is a site secret. It should be at least 50 characters. Generate a random value and set it in @application.yml@:
+The @API.RailsSessionSecretToken@ is used for for signing cookies. IMPORTANT: This is a site secret. It should be at least 50 characters. Generate a random value and set it in @config.yml@:
<notextile>
<pre><code>~$ <span class="userinput">ruby -e 'puts rand(2**400).to_s(36)'</span>
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
</code></pre></notextile>
-Example @application.yml@:
+Example @config.yml@:
<notextile>
-<pre><code> secret_token: <span class="userinput">yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy</span></code></pre>
+<pre><code>Clusters:
+ zzzzz:
+ API:
+ RailsSessionSecretToken: <span class="userinput">yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy</span></code></pre>
</notextile>
-h3(#blob_signing_key). blob_signing_key
+h3(#blob_signing_key). Collections.BlobSigningKey
-The @blob_signing_key@ is used to enforce access control to Keep blocks. This same key must be provided to the Keepstore daemons when "installing Keepstore servers.":install-keepstore.html IMPORTANT: This is a site secret. It should be at least 50 characters. Generate a random value and set it in @application.yml@:
+The @Collections.BlobSigningKey@ is used to enforce access control to Keep blocks. This same key must be provided to the Keepstore daemons when "installing Keepstore servers.":install-keepstore.html IMPORTANT: This is a site secret. It should be at least 50 characters. Generate a random value and set it in @config.yml@:
<notextile>
<pre><code>~$ <span class="userinput">ruby -e 'puts rand(2**400).to_s(36)'</span>
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
</code></pre></notextile>
-Example @application.yml@:
+Example @config.yml@:
<notextile>
-<pre><code> blob_signing_key: <span class="userinput">xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx</span></code></pre>
+<pre><code>Clusters:
+ zzzzz:
+ Collections:
+ BlobSigningKey: <span class="userinput">xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx</span></code></pre>
</notextile>
-h3(#omniauth). sso_app_secret, sso_app_id, sso_provider_url
+h3(#omniauth). Login.ProviderAppID, Login.ProviderAppSecret, Services.SSO.ExternalURL
The following settings enable the API server to communicate with the "Single Sign On (SSO) server":install-sso.html to authenticate user log in.
-Set @sso_provider_url@ to the base URL where your SSO server is installed. This should be a URL consisting of the scheme and host (and optionally, port), without a trailing slash.
+Set @Services.SSO.ExternalURL@ to the base URL where your SSO server is installed. This should be a URL consisting of the scheme and host (and optionally, port), without a trailing slash.
-Set @sso_app_secret@ and @sso_app_id@ to the corresponding values for @app_secret@ and @app_id@ used in the "Create arvados-server client for Single Sign On (SSO)":install-sso.html#client step.
+Set @Login.ProviderAppID@ and @Login.ProviderAppSecret@ to the corresponding values for @app_id@ and @app_secret@ used in the "Create arvados-server client for Single Sign On (SSO)":install-sso.html#client step.
-Example @application.yml@:
+Example @config.yml@:
<notextile>
-<pre><code> sso_app_id: <span class="userinput">arvados-server</span>
- sso_app_secret: <span class="userinput">wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww</span>
- sso_provider_url: <span class="userinput">https://sso.example.com</span>
-</code></pre>
+<pre><code>Clusters:
+ zzzzz:
+ Services:
+ SSO:
+ ExternalURL: <span class="userinput">https://sso.example.com</span>
+ Login:
+ ProviderAppID: <span class="userinput">arvados-server</span>
+ ProviderAppSecret: <span class="userinput">wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww</span></code></pre>
</notextile>
-h3. workbench_address
+h3. Services.Workbench1.ExternalURL
-Set @workbench_address@ to the URL of your workbench application after following "Install Workbench.":install-workbench-app.html
+Set @Services.Workbench1.ExternalURL@ to the URL of your workbench application after following "Install Workbench.":install-workbench-app.html
-Example @application.yml@:
+Example @config.yml@:
<notextile>
-<pre><code> workbench_address: <span class="userinput">https://workbench.zzzzz.example.com</span></code></pre>
+<pre><code>Clusters:
+ zzzzz:
+ Services:
+ Workbench1:
+ ExternalURL: <span class="userinput">https://workbench.zzzzz.example.com</span></code></pre>
</notextile>
-h3. websocket_address
+h3. Services.Websocket.ExternalURL
-Set @websocket_address@ to the @wss://@ URL of the API server websocket endpoint after following "Set up Web servers":#set_up. The path of the default endpoint is @/websocket@.
+Set @Services.Websocket.ExternalURL@ to the @wss://@ URL of the API server websocket endpoint after following "Install the websocket server":install-ws.html .
-Example @application.yml@:
+Example @config.yml@:
<notextile>
-<pre><code> websocket_address: <span class="userinput">wss://ws.zzzzz.example.com</span>/websocket</code></pre>
+<pre><code>Clusters:
+ zzzzz:
+ Services:
+ Websocket:
+ ExternalURL: <span class="userinput">wss://ws.zzzzz.example.com</span></code></pre>
</notextile>
-h3(#git_repositories_dir). git_repositories_dir
+h3(#git_repositories_dir). Git.Repositories
-The @git_repositories_dir@ setting specifies the directory where user git repositories will be stored.
+The @Git.Repositories@ setting specifies the directory where user git repositories will be stored.
The git server setup process is covered on "its own page":install-arv-git-httpd.html. For now, create an empty directory in the default location:
<pre><code>~$ <span class="userinput">sudo mkdir -p /var/lib/arvados/git/repositories</span>
</code></pre></notextile>
-If you intend to store your git repositories in a different location, specify that location in @application.yml@.
-
-Default setting in @application.default.yml@:
+If you intend to store your git repositories in a different location, specify that location in @config.yml@. Example:
<notextile>
-<pre><code> git_repositories_dir: <span class="userinput">/var/lib/arvados/git/repositories</span>
-</code></pre>
+<pre><code>Clusters:
+ zzzzz:
+ Git:
+ Repositories: <span class="userinput">/var/lib/arvados/git/repositories</span></code></pre>
</notextile>
-h3(#git_internal_dir). git_internal_dir
+h3(#enable_legacy_jobs_api). Containers.JobsAPI.Enable
+
+Enable the legacy "Jobs API":install-crunch-dispatch.html . Note: new installations should use the "Containers API":crunch2-slurm/install-prerequisites.html
-The @git_internal_dir@ setting specifies the location of Arvados' internal git repository. By default this is @/var/lib/arvados/internal.git@. This repository stores git commits that have been used to run Crunch jobs. It should _not_ be a subdirectory of @git_repositories_dir@.
+Disabling the jobs API means methods involving @jobs@, @job_tasks@, @pipeline_templates@ and @pipeline_instances@ are disabled. This functionality is superceded by the containers API which consists of @container_requests@, @containers@ and @workflows@. Arvados clients (such as @arvados-cwl-runner@) detect which APIs are available and adjust behavior accordingly. Note the configuration value must be a quoted string.
-Example @application.yml@:
+* 'auto' -- (default) enable the Jobs API only if it has been used before (i.e., there are job records in the database), otherwise disable jobs API .
+* 'true' -- enable the Jobs API even if there are no existing job records.
+* 'false' -- disable the Jobs API even in the presence of existing job records.
<notextile>
-<pre><code> git_internal_dir: <span class="userinput">/var/lib/arvados/internal.git</span>
-</code></pre>
+<pre><code>Clusters:
+ zzzzz:
+ Containers:
+ JobsAPI:
+ Enable: <span class="userinput">'auto'</span></code></pre>
</notextile>
-h3(#enable_legacy_jobs_api). enable_legacy_jobs_api
+h4(#git_internal_dir). Containers.JobsAPI.GitInternalDir
-Enable the legacy "Jobs API":install-crunch-dispatch.html . Note: new installations should use the "Containers API":crunch2-slurm/install-prerequisites.html
+Only required if the legacy "Jobs API" is enabled, otherwise you should skip this.
-Disabling the jobs API means methods involving @jobs@, @job_tasks@, @pipeline_templates@ and @pipeline_instances@ are disabled. This functionality is superceded by the containers API which consists of @container_requests@, @containers@ and @workflows@. Arvados clients (such as @arvados-cwl-runner@) detect which APIs are available and adjust behavior accordingly.
+The @Containers.JobsAPI.GitInternalDir@ setting specifies the location of Arvados' internal git repository. By default this is @/var/lib/arvados/internal.git@. This repository stores git commits that have been used to run Crunch jobs. It should _not_ be a subdirectory of the directory in @Git.Repositories@.
-* auto -- (default) enable the Jobs API only if it has been used before (i.e., there are job records in the database), otherwise disable jobs API .
-* true -- enable the Jobs API even if there are no existing job records.
-* false -- disable the Jobs API even in the presence of existing job records.
+Example @config.yml@:
<notextile>
-<pre><code> enable_legacy_jobs_api: <span class="userinput">auto</span>
-</code></pre>
+<pre><code>Clusters:
+ zzzzz:
+ Containers:
+ JobsAPI:
+ GitInternalDir: <span class="userinput">/var/lib/arvados/internal.git</span></code></pre>
</notextile>
h2(#set_up). Set up Nginx and Passenger
# also ensure the following settings match it:
# * `client_max_body_size` in the server section below
# * `client_max_body_size` in the Workbench Nginx configuration (twice)
- # * `max_request_size` in the API server's application.yml file
+ # * `API.MaxRequestSize` in config.yml
client_max_body_size 128m;
}
<notextile><pre>fatal: Not a git repository (or any of the parent directories): .git</pre></notextile>
{% include 'notebox_end' %}
+
+h2. Troubleshooting
+
+Once you have the API Server up and running you may need to check it back if dealing with client related issues. Please read our "admin troubleshooting notes":{{site.baseurl}}/admin/troubleshooting.html on how requests can be tracked down between services.
\ No newline at end of file
h2. Configure the dispatcher
-Add or update the following portions of your cluster configuration file, @/etc/arvados/config.yml@. Refer to "config.defaults.yml":https://dev.arvados.org/projects/arvados/repository/revisions/13996-new-api-config/entry/lib/config/config.defaults.yml for information about additional configuration options.
+Add or update the following portions of your cluster configuration file, @/etc/arvados/config.yml@. Refer to "config.defaults.yml":{{site.baseurl}}/admin/config.html for information about additional configuration options.
<notextile>
<pre><code>Clusters:
</code></pre>
</notextile>
-h2. Install the dispatcher
+h2. Test your configuration
First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/install-manual-prerequisites.html#repos.
+Next, install the arvados-server package.
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install arvados-server</span>
+</code></pre>
+</notextile>
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install arvados-server</span>
+</code></pre>
+</notextile>
+
+Run the @cloudtest@ tool to verify that your configuration works. This creates a new cloud VM, confirms that it boots correctly and accepts your configured SSH private key, and shuts it down.
+
+<notextile>
+<pre><code>~$ <span class="userinput">arvados-server cloudtest && echo "OK!"</span>
+</code></pre>
+</notextile>
+
+Refer to the "cloudtest tool documentation":../admin/cloudtest.html for more information.
+
+h2. Install the dispatcher
+
On Red Hat-based systems:
<notextile>
table(table table-bordered table-condensed).
|_. Distribution|_. State|_. Last supported version|
|CentOS 7|Supported|Latest|
-|Debian 8 ("jessie")|Supported|Latest|
|Debian 9 ("stretch")|Supported|Latest|
-|Ubuntu 14.04 ("trusty")|Supported|Latest|
|Ubuntu 16.04 ("xenial")|Supported|Latest|
|Ubuntu 18.04 ("bionic")|Supported|Latest|
+|Ubuntu 14.04 ("trusty")|EOL|5f943cd451acfbdcddd84e791738c3aa5926bfed (2019-07-10)|
+|Debian 8 ("jessie")|EOL|5f943cd451acfbdcddd84e791738c3aa5926bfed (2019-07-10)|
|Ubuntu 12.04 ("precise")|EOL|8ed7b6dd5d4df93a3f37096afe6d6f81c2a7ef6e (2017-05-03)|
|Debian 7 ("wheezy")|EOL|997479d1408139e96ecdb42a60b4f727f814f6c9 (2016-12-28)|
|CentOS 6 |EOL|997479d1408139e96ecdb42a60b4f727f814f6c9 (2016-12-28)|
+Arvados packages are published for current Debian releases (until the EOL date), current Ubuntu LTS releases (until the end of standard support), and the latest version of CentOS.
+
h2(#repos). Arvados package repositories
On any host where you install Arvados software, you'll need to set up an Arvados package repository. They're available for several popular distributions.
h3. Debian and Ubuntu
-Packages are available for Debian 8 ("jessie"), Debian 9 ("stretch"), Ubuntu 14.04 ("trusty"), Ubuntu 16.04 ("xenial") and Ubuntu 18.04 ("bionic").
+Packages are available for Debian 9 ("stretch"), Ubuntu 16.04 ("xenial") and Ubuntu 18.04 ("bionic").
First, register the Curoverse signing key in apt's database:
table(table table-bordered table-condensed).
|_. OS version|_. Command|
-|Debian 8 ("jessie")|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ jessie main" | sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
|Debian 9 ("stretch")|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ stretch main" | sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
-|Ubuntu 14.04 ("trusty")[1]|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ trusty main" | sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
|Ubuntu 16.04 ("xenial")[1]|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ xenial main" | sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
|Ubuntu 18.04 ("bionic")[1]|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ bionic main" | sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
# ssh_key = path
# The GCE image name and network zone name to use when creating new nodes.
-image = debian-7
+image = debian
# network = your_network_name
# JSON string of service account authorizations for this cluster.
h2(#configure). Configure Workbench
-Edit @/etc/arvados/workbench/application.yml@ following the instructions below. Workbench reads both @application.yml@ and its own @config/application.defaults.yml@ file. Values in @application.yml@ take precedence over the defaults that are defined in @config/application.defaults.yml@. The @config/application.yml.example@ file is not read by Workbench and is provided for installation convenience only.
+Edit @/etc/arvados/config.yml@ to set the keys below. Only the most important configuration options are listed here. The full set of configuration options are in the "Workbench section of config.yml":{{site.baseurl}}/admin/config.html
-Consult @config/application.default.yml@ for a full list of configuration options. Always put your local configuration in @/etc/arvados/workbench/application.yml@—never edit @config/application.default.yml@.
-
-h3. secret_token
+h3. Workbench.SecretKeyBase
This application needs a secret token. Generate a new secret:
</code></pre>
</notextile>
-Then put that value in the @secret_token@ field.
+Then put that value in the @Workbench.SecretKeyBase@ field.
+
+<notextile>
+<pre><code>Cluster:
+ zzzzz:
+ Workbench:
+ SecretKeyBase: <span class="userinput">aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa</span>
+</code></pre>
+</notextile>
-h3. arvados_login_base and arvados_v1_base
+h3. Services.Controller.ExternalURL
-Point @arvados_login_base@ and @arvados_v1_base@ at your "API server":install-api-server.html. For example like this:
+Ensure that @Services.Controller.ExternalURL@ is configured for "Arvados Controller":install-controller.html . For example like this:
<notextile>
-<pre><code>arvados_login_base: https://prefix_uuid.your.domain/login
-arvados_v1_base: https://prefix_uuid.your.domain/arvados/v1
+<pre><code>Cluster:
+ zzzzz:
+ Services:
+ Controller:
+ ExternalURL: <span class="userinput">https://prefix_uuid.your.domain</span>
</code></pre>
</notextile>
-h3. site_name
+h3. Workbench.SiteName
+
+@Workbench.SiteName@ can be set to any arbitrary string. It is used to identify this Workbench to people visiting it.
-@site_name@ can be set to any arbitrary string. It is used to identify this Workbench to people visiting it.
-h3. arvados_insecure_https
+<notextile>
+<pre><code>Cluster:
+ zzzzz:
+ Workbench:
+ SiteName: <span class="userinput">My Arvados</span>
+</code></pre>
+</notextile>
-If the SSL certificate you use for your API server isn't an official certificate signed by a CA, make sure @arvados_insecure_https@ is @true@.
+h3. TLS.Insecure
-h3. Other options
+For testing only. Allows use of self-signed certificates. If true, workbench will not verify the TLS certificate of Arvados Controller.
-Consult @application.default.yml@ for a full list of configuration options. Always put your local configuration in @application.yml@ instead of editing @application.default.yml@.
+<notextile>
+<pre><code>Cluster:
+ zzzzz:
+ TLS:
+ Insecure: <span class="userinput">false</span>
+</code></pre>
+</notextile>
-h2. Configure Piwik
+h2. Configure Piwik (optional)
-In @/var/www/arvados-workbench/current/config@, copy @piwik.yml.example@ to @piwik.yml@ and edit to suit.
+Piwik can be used to gather usage analytics. In @/var/www/arvados-workbench/current/config@, copy @piwik.yml.example@ to @piwik.yml@ and edit to suit.
h2. Set up Web server
#passenger_ruby /usr/local/rvm/wrappers/default/ruby;
# `client_max_body_size` should match the corresponding setting in
- # the API server's Nginx configuration.
+ # the API.MaxRequestSize and Controller's server's Nginx configuration.
client_max_body_size 128m;
}
index index.html index.htm index.php;
# `client_max_body_size` should match the corresponding setting in
- # the API server's Nginx configuration.
+ # the API.MaxRequestSize and Controller's server's Nginx configuration.
client_max_body_size 128m;
location / {
@arv collection list --filters '[["name", "=", "PGP VAR inputs"], ["created_at", ">=", "2014-10-01"]]'@
-will return a list of all collections visible to the current user which are named "PGP VAR inputs" and were created on or after October 1, 2014.
+will return a list of all collections visible to the current user which are named "PGP VAR inputs" and were created on or after October 1, 2014. See the "Common resource methods":{{site.baseurl}}/api/methods.html#index page for more details on using @list@ and @--filters@.
h3. 7. Set Docker image, base command, and input port for "sort" tool
-The "Docker Repository" is the name:tag of a "Docker image uploaded Arvados.":{{site.baseurl}}/user/topics/arv-docker.html (Use @arv-keepdocker --pull debian:8@) You can also find prepackaged bioinformatics tools on various sites, such as http://dockstore.org and http://biocontainers.pro/ .
+The "Docker Repository" is the name:tag of a "Docker image uploaded Arvados.":{{site.baseurl}}/user/topics/arv-docker.html (Use @arv-keepdocker --pull debian:9@) You can also find prepackaged bioinformatics tools on various sites, such as http://dockstore.org and http://biocontainers.pro/ .
!(screenshot)c6.png!
<pre>
requirements:
DockerRequirement:
- dockerPull: "debian:8"
+ dockerPull: "debian:9"
arv:dockerCollectionPDH: "feaf1fc916103d7cdab6489e1f8c3a2b+174"
</pre>
<notextile>
<pre><code>root@fbf1d0f529d5:/# apt-get update
-Hit http://security.debian.org jessie/updates InRelease
-Ign http://httpredir.debian.org jessie InRelease
-Ign http://apt.arvados.org jessie InRelease
-Hit http://apt.arvados.org jessie Release.gpg
-Get:1 http://security.debian.org jessie/updates/main amd64 Packages [431 kB]
-Hit http://apt.arvados.org jessie Release
-Hit http://httpredir.debian.org jessie-updates InRelease
-Get:2 http://apt.arvados.org jessie/main amd64 Packages [257 kB]
-Get:3 http://httpredir.debian.org jessie-updates/main amd64 Packages [17.6 kB]
-Hit http://httpredir.debian.org jessie Release.gpg
-Hit http://httpredir.debian.org jessie Release
-Get:4 http://httpredir.debian.org jessie/main amd64 Packages [9049 kB]
-Fetched 9755 kB in 2s (3894 kB/s)
+Get:2 http://apt.arvados.org stretch-dev InRelease [3260 B]
+Get:1 http://security-cdn.debian.org/debian-security stretch/updates InRelease [94.3 kB]
+Ign:3 http://cdn-fastly.deb.debian.org/debian stretch InRelease
+Get:4 http://cdn-fastly.deb.debian.org/debian stretch-updates InRelease [91.0 kB]
+Get:5 http://apt.arvados.org stretch-dev/main amd64 Packages [208 kB]
+Get:6 http://cdn-fastly.deb.debian.org/debian stretch Release [118 kB]
+Get:7 http://security-cdn.debian.org/debian-security stretch/updates/main amd64 Packages [499 kB]
+Get:8 http://cdn-fastly.deb.debian.org/debian stretch Release.gpg [2434 B]
+Get:9 http://cdn-fastly.deb.debian.org/debian stretch-updates/main amd64 Packages.diff/Index [10.6 kB]
+Get:10 http://cdn-fastly.deb.debian.org/debian stretch-updates/main amd64 Packages 2019-07-08-0821.07.pdiff [445 B]
+Get:10 http://cdn-fastly.deb.debian.org/debian stretch-updates/main amd64 Packages 2019-07-08-0821.07.pdiff [445 B]
+Fetched 1026 kB in 0s (1384 kB/s)
Reading package lists... Done
</code></pre>
</notextile>
Reading package lists... Done
Building dependency tree
Reading state information... Done
-The following extra packages will be installed:
- [...]
- libxxf86vm1 make patch r-base-core r-base-dev r-cran-boot r-cran-class
- r-cran-cluster r-cran-codetools r-cran-foreign r-cran-kernsmooth
- r-cran-lattice r-cran-mass r-cran-matrix r-cran-mgcv r-cran-nlme r-cran-nnet
- r-cran-rpart r-cran-spatial r-cran-survival r-doc-html r-recommended
- [...]
-Suggested packages:
- [...]
-The following NEW packages will be installed:
- [...]
- libxxf86vm1 make patch r-base-core r-base-dev r-cran-boot r-cran-class
- r-cran-cluster r-cran-codetools r-cran-foreign r-cran-kernsmooth
- r-cran-lattice r-cran-mass r-cran-matrix r-cran-mgcv r-cran-nlme r-cran-nnet
- r-cran-rpart r-cran-spatial r-cran-survival r-doc-html r-recommended
- [...]
-0 upgraded, 203 newly installed, 0 to remove and 39 not upgraded.
-Need to get 124 MB of archives.
-After this operation, 334 MB of additional disk space will be used.
-Do you want to continue [Y/n]? y
-[...]
-Get:130 http://httpredir.debian.org/debian/ jessie/main r-cran-cluster amd64 1.15.3-1 [475 kB]
-Get:131 http://httpredir.debian.org/debian/ jessie/main r-base-dev all 3.1.1-1 [4018 B]
-Get:132 http://httpredir.debian.org/debian/ jessie/main r-cran-boot all 1.3-13-1 [571 kB]
-Get:133 http://httpredir.debian.org/debian/ jessie/main r-cran-codetools all 0.2-9-1 [45.7 kB]
-Get:134 http://httpredir.debian.org/debian/ jessie/main r-cran-rpart amd64 4.1-8-1 [862 kB]
-Get:135 http://httpredir.debian.org/debian/ jessie/main r-cran-foreign amd64 0.8.61-1 [213 kB]
-[...]
-Fetched 124 MB in 52s (2380 kB/s)
-debconf: delaying package configuration, since apt-utils is not installed
-[...]
-Unpacking r-base-core (3.1.1-1+b2) ...
-Selecting previously unselected package r-base-dev.
-Preparing to unpack .../r-base-dev_3.1.1-1_all.deb ...
-Unpacking r-base-dev (3.1.1-1) ...
-Selecting previously unselected package r-cran-boot.
-Preparing to unpack .../r-cran-boot_1.3-13-1_all.deb ...
-Unpacking r-cran-boot (1.3-13-1) ...
-Selecting previously unselected package r-cran-mass.
-[...]
-Setting up r-base-core (3.1.1-1+b2) ...
-
-Creating config file /etc/R/Renviron with new version
-Setting up r-base-dev (3.1.1-1) ...
-Setting up r-cran-boot (1.3-13-1) ...
-Setting up r-cran-mass (7.3-34-1) ...
-Setting up r-cran-class (7.3-11-1) ...
+The following additional packages will be installed:
[...]
+done.
</code></pre>
</notextile>
<notextile>
<pre><code>root@fbf1d0f529d5:/# <span class="userinput">R</span>
-R version 3.1.1 (2014-07-10) -- "Sock it to Me"
-Copyright (C) 2014 The R Foundation for Statistical Computing
+R version 3.3.3 (2017-03-06) -- "Another Canoe"
+Copyright (C) 2017 The R Foundation for Statistical Computing
Platform: x86_64-pc-linux-gnu (64-bit)
R is free software and comes with ABSOLUTELY NO WARRANTY.
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package cloudtest
+
+import (
+ "bufio"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+
+ "git.curoverse.com/arvados.git/lib/cloud"
+ "git.curoverse.com/arvados.git/lib/config"
+ "git.curoverse.com/arvados.git/lib/dispatchcloud"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+ "golang.org/x/crypto/ssh"
+)
+
+var Command command
+
+type command struct{}
+
+func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+ var err error
+ defer func() {
+ if err != nil {
+ fmt.Fprintf(stderr, "%s\n", err)
+ }
+ }()
+
+ flags := flag.NewFlagSet("", flag.ContinueOnError)
+ flags.SetOutput(stderr)
+ configFile := flags.String("config", arvados.DefaultConfigFile, "Site configuration `file`")
+ instanceSetID := flags.String("instance-set-id", "zzzzz-zzzzz-zzzzzzcloudtest", "InstanceSetID tag `value` to use on the test instance")
+ imageID := flags.String("image-id", "", "Image ID to use when creating the test instance (if empty, use cluster config)")
+ instanceType := flags.String("instance-type", "", "Instance type to create (if empty, use cheapest type in config)")
+ destroyExisting := flags.Bool("destroy-existing", false, "Destroy any existing instances tagged with our InstanceSetID, instead of erroring out")
+ shellCommand := flags.String("command", "", "Run an interactive shell command on the test instance when it boots")
+ pauseBeforeDestroy := flags.Bool("pause-before-destroy", false, "Prompt and wait before destroying the test instance")
+ err = flags.Parse(args)
+ if err == flag.ErrHelp {
+ err = nil
+ return 0
+ } else if err != nil {
+ return 2
+ }
+
+ if len(flags.Args()) != 0 {
+ flags.Usage()
+ return 2
+ }
+ logger := ctxlog.New(stderr, "text", "info")
+ defer func() {
+ if err != nil {
+ logger.WithError(err).Error("fatal")
+ // suppress output from the other error-printing func
+ err = nil
+ }
+ logger.Info("exiting")
+ }()
+
+ loader := config.NewLoader(stdin, logger)
+ loader.Path = *configFile
+ cfg, err := loader.Load()
+ if err != nil {
+ return 1
+ }
+ cluster, err := cfg.GetCluster("")
+ if err != nil {
+ return 1
+ }
+ key, err := ssh.ParsePrivateKey([]byte(cluster.Containers.DispatchPrivateKey))
+ if err != nil {
+ err = fmt.Errorf("error parsing configured Containers.DispatchPrivateKey: %s", err)
+ return 1
+ }
+ driver, ok := dispatchcloud.Drivers[cluster.Containers.CloudVMs.Driver]
+ if !ok {
+ err = fmt.Errorf("unsupported cloud driver %q", cluster.Containers.CloudVMs.Driver)
+ return 1
+ }
+ if *imageID == "" {
+ *imageID = cluster.Containers.CloudVMs.ImageID
+ }
+ it, err := chooseInstanceType(cluster, *instanceType)
+ if err != nil {
+ return 1
+ }
+ tags := cloud.SharedResourceTags(cluster.Containers.CloudVMs.ResourceTags)
+ tagKeyPrefix := cluster.Containers.CloudVMs.TagKeyPrefix
+ tags[tagKeyPrefix+"CloudTestPID"] = fmt.Sprintf("%d", os.Getpid())
+ if !(&tester{
+ Logger: logger,
+ Tags: tags,
+ TagKeyPrefix: tagKeyPrefix,
+ SetID: cloud.InstanceSetID(*instanceSetID),
+ DestroyExisting: *destroyExisting,
+ ProbeInterval: cluster.Containers.CloudVMs.ProbeInterval.Duration(),
+ SyncInterval: cluster.Containers.CloudVMs.SyncInterval.Duration(),
+ TimeoutBooting: cluster.Containers.CloudVMs.TimeoutBooting.Duration(),
+ Driver: driver,
+ DriverParameters: cluster.Containers.CloudVMs.DriverParameters,
+ ImageID: cloud.ImageID(*imageID),
+ InstanceType: it,
+ SSHKey: key,
+ SSHPort: cluster.Containers.CloudVMs.SSHPort,
+ BootProbeCommand: cluster.Containers.CloudVMs.BootProbeCommand,
+ ShellCommand: *shellCommand,
+ PauseBeforeDestroy: func() {
+ if *pauseBeforeDestroy {
+ logger.Info("waiting for operator to press Enter")
+ fmt.Fprint(stderr, "Press Enter to continue: ")
+ bufio.NewReader(stdin).ReadString('\n')
+ }
+ },
+ }).Run() {
+ return 1
+ }
+ return 0
+}
+
+// Return the named instance type, or the cheapest type if name=="".
+func chooseInstanceType(cluster *arvados.Cluster, name string) (arvados.InstanceType, error) {
+ if len(cluster.InstanceTypes) == 0 {
+ return arvados.InstanceType{}, errors.New("no instance types are configured")
+ } else if name == "" {
+ first := true
+ var best arvados.InstanceType
+ for _, it := range cluster.InstanceTypes {
+ if first || best.Price > it.Price {
+ best = it
+ first = false
+ }
+ }
+ return best, nil
+ } else if it, ok := cluster.InstanceTypes[name]; !ok {
+ return it, fmt.Errorf("requested instance type %q is not configured", name)
+ } else {
+ return it, nil
+ }
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package cloudtest
+
+import (
+ "crypto/rand"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "time"
+
+ "git.curoverse.com/arvados.git/lib/cloud"
+ "git.curoverse.com/arvados.git/lib/dispatchcloud/ssh_executor"
+ "git.curoverse.com/arvados.git/lib/dispatchcloud/worker"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/crypto/ssh"
+)
+
+var (
+ errTestInstanceNotFound = errors.New("test instance missing from cloud provider's list")
+)
+
+// A tester does a sequence of operations to test a cloud driver and
+// configuration. Run() should be called only once, after assigning
+// suitable values to public fields.
+type tester struct {
+ Logger logrus.FieldLogger
+ Tags cloud.SharedResourceTags
+ TagKeyPrefix string
+ SetID cloud.InstanceSetID
+ DestroyExisting bool
+ ProbeInterval time.Duration
+ SyncInterval time.Duration
+ TimeoutBooting time.Duration
+ Driver cloud.Driver
+ DriverParameters json.RawMessage
+ InstanceType arvados.InstanceType
+ ImageID cloud.ImageID
+ SSHKey ssh.Signer
+ SSHPort string
+ BootProbeCommand string
+ ShellCommand string
+ PauseBeforeDestroy func()
+
+ is cloud.InstanceSet
+ testInstance *worker.TagVerifier
+ secret string
+ executor *ssh_executor.Executor
+ showedLoginInfo bool
+
+ failed bool
+}
+
+// Run the test suite as specified, clean up as needed, and return
+// true (everything is OK) or false (something went wrong).
+func (t *tester) Run() bool {
+ // This flag gets set when we encounter a non-fatal error, so
+ // we can continue doing more tests but remember to return
+ // false (failure) at the end.
+ deferredError := false
+
+ var err error
+ t.is, err = t.Driver.InstanceSet(t.DriverParameters, t.SetID, t.Tags, t.Logger)
+ if err != nil {
+ t.Logger.WithError(err).Info("error initializing driver")
+ return false
+ }
+
+ for {
+ // Don't send the driver any filters when getting the
+ // initial instance list. This way we can log an
+ // instance count (N=...) that includes all instances
+ // in this service account, even if they don't have
+ // the same InstanceSetID.
+ insts, err := t.getInstances(nil)
+ if err != nil {
+ t.Logger.WithError(err).Info("error getting list of instances")
+ return false
+ }
+
+ foundExisting := false
+ for _, i := range insts {
+ if i.Tags()[t.TagKeyPrefix+"InstanceSetID"] != string(t.SetID) {
+ continue
+ }
+ lgr := t.Logger.WithFields(logrus.Fields{
+ "Instance": i.ID(),
+ "InstanceSetID": t.SetID,
+ })
+ foundExisting = true
+ if t.DestroyExisting {
+ lgr.Info("destroying existing instance with our InstanceSetID")
+ t0 := time.Now()
+ err := i.Destroy()
+ lgr := lgr.WithField("Duration", time.Since(t0))
+ if err != nil {
+ lgr.WithError(err).Error("error destroying existing instance")
+ } else {
+ lgr.Info("Destroy() call succeeded")
+ }
+ } else {
+ lgr.Error("found existing instance with our InstanceSetID")
+ }
+ }
+ if !foundExisting {
+ break
+ } else if t.DestroyExisting {
+ t.sleepSyncInterval()
+ } else {
+ t.Logger.Error("cannot continue with existing instances -- clean up manually, use -destroy-existing=true, or choose a different -instance-set-id")
+ return false
+ }
+ }
+
+ t.secret = randomHex(40)
+
+ tags := cloud.InstanceTags{}
+ for k, v := range t.Tags {
+ tags[k] = v
+ }
+ tags[t.TagKeyPrefix+"InstanceSetID"] = string(t.SetID)
+ tags[t.TagKeyPrefix+"InstanceSecret"] = t.secret
+
+ defer t.destroyTestInstance()
+
+ bootDeadline := time.Now().Add(t.TimeoutBooting)
+ initCommand := worker.TagVerifier{nil, t.secret}.InitCommand()
+
+ t.Logger.WithFields(logrus.Fields{
+ "InstanceType": t.InstanceType.Name,
+ "ProviderInstanceType": t.InstanceType.ProviderType,
+ "ImageID": t.ImageID,
+ "Tags": tags,
+ "InitCommand": initCommand,
+ }).Info("creating instance")
+ t0 := time.Now()
+ inst, err := t.is.Create(t.InstanceType, t.ImageID, tags, initCommand, t.SSHKey.PublicKey())
+ lgrC := t.Logger.WithField("Duration", time.Since(t0))
+ if err != nil {
+ // Create() might have failed due to a bug or network
+ // error even though the creation was successful, so
+ // it's safer to wait a bit for an instance to appear.
+ deferredError = true
+ lgrC.WithError(err).Error("error creating test instance")
+ t.Logger.WithField("Deadline", bootDeadline).Info("waiting for instance to appear anyway, in case the Create response was incorrect")
+ for err = t.refreshTestInstance(); err != nil; err = t.refreshTestInstance() {
+ if time.Now().After(bootDeadline) {
+ t.Logger.Error("timed out")
+ return false
+ } else {
+ t.sleepSyncInterval()
+ }
+ }
+ t.Logger.WithField("Instance", t.testInstance.ID()).Info("new instance appeared")
+ t.showLoginInfo()
+ } else {
+ // Create() succeeded. Make sure the new instance
+ // appears right away in the Instances() list.
+ lgrC.WithField("Instance", inst.ID()).Info("created instance")
+ t.testInstance = &worker.TagVerifier{inst, t.secret}
+ t.showLoginInfo()
+ err = t.refreshTestInstance()
+ if err == errTestInstanceNotFound {
+ t.Logger.WithError(err).Error("cloud/driver Create succeeded, but instance is not in list")
+ deferredError = true
+ } else if err != nil {
+ t.Logger.WithError(err).Error("error getting list of instances")
+ return false
+ }
+ }
+
+ if !t.checkTags() {
+ // checkTags() already logged the errors
+ deferredError = true
+ }
+
+ if !t.waitForBoot(bootDeadline) {
+ deferredError = true
+ }
+
+ if t.ShellCommand != "" {
+ err = t.runShellCommand(t.ShellCommand)
+ if err != nil {
+ t.Logger.WithError(err).Error("shell command failed")
+ deferredError = true
+ }
+ }
+
+ if fn := t.PauseBeforeDestroy; fn != nil {
+ t.showLoginInfo()
+ fn()
+ }
+
+ return !deferredError
+}
+
+// If the test instance has an address, log an "ssh user@host" command
+// line that the operator can paste into another terminal, and set
+// t.showedLoginInfo.
+//
+// If the test instance doesn't have an address yet, do nothing.
+func (t *tester) showLoginInfo() {
+ t.updateExecutor()
+ host, port := t.executor.TargetHostPort()
+ if host == "" {
+ return
+ }
+ user := t.testInstance.RemoteUser()
+ t.Logger.WithField("Command", fmt.Sprintf("ssh -p%s %s@%s", port, user, host)).Info("showing login information")
+ t.showedLoginInfo = true
+}
+
+// Get the latest instance list from the driver. If our test instance
+// is found, assign it to t.testIntance.
+func (t *tester) refreshTestInstance() error {
+ insts, err := t.getInstances(cloud.InstanceTags{t.TagKeyPrefix + "InstanceSetID": string(t.SetID)})
+ if err != nil {
+ return err
+ }
+ for _, i := range insts {
+ if t.testInstance == nil {
+ // Filter by InstanceSetID tag value
+ if i.Tags()[t.TagKeyPrefix+"InstanceSetID"] != string(t.SetID) {
+ continue
+ }
+ } else {
+ // Filter by instance ID
+ if i.ID() != t.testInstance.ID() {
+ continue
+ }
+ }
+ t.Logger.WithFields(logrus.Fields{
+ "Instance": i.ID(),
+ "Address": i.Address(),
+ }).Info("found our instance in returned list")
+ t.testInstance = &worker.TagVerifier{i, t.secret}
+ if !t.showedLoginInfo {
+ t.showLoginInfo()
+ }
+ return nil
+ }
+ return errTestInstanceNotFound
+}
+
+// Get the list of instances, passing the given tags to the cloud
+// driver to filter results.
+//
+// Return only the instances that have our InstanceSetID tag.
+func (t *tester) getInstances(tags cloud.InstanceTags) ([]cloud.Instance, error) {
+ var ret []cloud.Instance
+ t.Logger.WithField("FilterTags", tags).Info("getting instance list")
+ t0 := time.Now()
+ insts, err := t.is.Instances(tags)
+ if err != nil {
+ return nil, err
+ }
+ t.Logger.WithFields(logrus.Fields{
+ "Duration": time.Since(t0),
+ "N": len(insts),
+ }).Info("got instance list")
+ for _, i := range insts {
+ if i.Tags()[t.TagKeyPrefix+"InstanceSetID"] == string(t.SetID) {
+ ret = append(ret, i)
+ }
+ }
+ return ret, nil
+}
+
+// Check that t.testInstance has every tag in t.Tags. If not, log an
+// error and return false.
+func (t *tester) checkTags() bool {
+ ok := true
+ for k, v := range t.Tags {
+ if got := t.testInstance.Tags()[k]; got != v {
+ ok = false
+ t.Logger.WithFields(logrus.Fields{
+ "Key": k,
+ "ExpectedValue": v,
+ "GotValue": got,
+ }).Error("tag is missing from test instance")
+ }
+ }
+ if ok {
+ t.Logger.Info("all expected tags are present")
+ }
+ return ok
+}
+
+// Run t.BootProbeCommand on t.testInstance until it succeeds or the
+// deadline arrives.
+func (t *tester) waitForBoot(deadline time.Time) bool {
+ for time.Now().Before(deadline) {
+ err := t.runShellCommand(t.BootProbeCommand)
+ if err == nil {
+ return true
+ }
+ t.sleepProbeInterval()
+ t.refreshTestInstance()
+ }
+ t.Logger.Error("timed out")
+ return false
+}
+
+// Create t.executor and/or update its target to t.testInstance's
+// current address.
+func (t *tester) updateExecutor() {
+ if t.executor == nil {
+ t.executor = ssh_executor.New(t.testInstance)
+ t.executor.SetTargetPort(t.SSHPort)
+ t.executor.SetSigners(t.SSHKey)
+ } else {
+ t.executor.SetTarget(t.testInstance)
+ }
+}
+
+func (t *tester) runShellCommand(cmd string) error {
+ t.updateExecutor()
+ t.Logger.WithFields(logrus.Fields{
+ "Command": cmd,
+ }).Info("executing remote command")
+ t0 := time.Now()
+ stdout, stderr, err := t.executor.Execute(nil, cmd, nil)
+ lgr := t.Logger.WithFields(logrus.Fields{
+ "Duration": time.Since(t0),
+ "Command": cmd,
+ "stdout": string(stdout),
+ "stderr": string(stderr),
+ })
+ if err != nil {
+ lgr.WithError(err).Info("remote command failed")
+ } else {
+ lgr.Info("remote command succeeded")
+ }
+ return err
+}
+
+// currently, this tries forever until it can return true (success).
+func (t *tester) destroyTestInstance() bool {
+ if t.testInstance == nil {
+ return true
+ }
+ for {
+ lgr := t.Logger.WithField("Instance", t.testInstance.ID())
+ lgr.Info("destroying instance")
+ t0 := time.Now()
+
+ err := t.testInstance.Destroy()
+ lgrDur := lgr.WithField("Duration", time.Since(t0))
+ if err != nil {
+ lgrDur.WithError(err).Error("error destroying instance")
+ } else {
+ lgrDur.Info("destroyed instance")
+ }
+
+ err = t.refreshTestInstance()
+ if err == errTestInstanceNotFound {
+ lgr.Info("instance no longer appears in list")
+ t.testInstance = nil
+ return true
+ } else if err == nil {
+ lgr.Info("instance still exists after calling Destroy")
+ t.sleepSyncInterval()
+ continue
+ } else {
+ t.Logger.WithError(err).Error("error getting list of instances")
+ continue
+ }
+ }
+}
+
+func (t *tester) sleepSyncInterval() {
+ t.Logger.WithField("Duration", t.SyncInterval).Info("waiting SyncInterval")
+ time.Sleep(t.SyncInterval)
+}
+
+func (t *tester) sleepProbeInterval() {
+ t.Logger.WithField("Duration", t.ProbeInterval).Info("waiting ProbeInterval")
+ time.Sleep(t.ProbeInterval)
+}
+
+// Return a random string of n hexadecimal digits (n*4 random bits). n
+// must be even.
+func randomHex(n int) string {
+ buf := make([]byte, n/2)
+ _, err := rand.Read(buf)
+ if err != nil {
+ panic(err)
+ }
+ return fmt.Sprintf("%x", buf)
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package cloudtest
+
+import (
+ "bytes"
+ "testing"
+ "time"
+
+ "git.curoverse.com/arvados.git/lib/cloud"
+ "git.curoverse.com/arvados.git/lib/dispatchcloud/test"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+ "golang.org/x/crypto/ssh"
+ check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+ check.TestingT(t)
+}
+
+var _ = check.Suite(&TesterSuite{})
+
+type TesterSuite struct {
+ stubDriver *test.StubDriver
+ cluster *arvados.Cluster
+ tester *tester
+ log bytes.Buffer
+}
+
+func (s *TesterSuite) SetUpTest(c *check.C) {
+ pubkey, privkey := test.LoadTestKey(c, "../../dispatchcloud/test/sshkey_dispatch")
+ _, privhostkey := test.LoadTestKey(c, "../../dispatchcloud/test/sshkey_vm")
+ s.stubDriver = &test.StubDriver{
+ HostKey: privhostkey,
+ AuthorizedKeys: []ssh.PublicKey{pubkey},
+ ErrorRateDestroy: 0.1,
+ MinTimeBetweenCreateCalls: time.Millisecond,
+ }
+ tagKeyPrefix := "tagprefix:"
+ s.cluster = &arvados.Cluster{
+ ManagementToken: "test-management-token",
+ Containers: arvados.ContainersConfig{
+ CloudVMs: arvados.CloudVMsConfig{
+ SyncInterval: arvados.Duration(10 * time.Millisecond),
+ TimeoutBooting: arvados.Duration(150 * time.Millisecond),
+ TimeoutProbe: arvados.Duration(15 * time.Millisecond),
+ ProbeInterval: arvados.Duration(5 * time.Millisecond),
+ ResourceTags: map[string]string{"testtag": "test value"},
+ },
+ },
+ InstanceTypes: arvados.InstanceTypeMap{
+ test.InstanceType(1).Name: test.InstanceType(1),
+ test.InstanceType(2).Name: test.InstanceType(2),
+ test.InstanceType(3).Name: test.InstanceType(3),
+ },
+ }
+ s.tester = &tester{
+ Logger: ctxlog.New(&s.log, "text", "info"),
+ Tags: cloud.SharedResourceTags{"testtagkey": "testtagvalue"},
+ TagKeyPrefix: tagKeyPrefix,
+ SetID: cloud.InstanceSetID("test-instance-set-id"),
+ ProbeInterval: 5 * time.Millisecond,
+ SyncInterval: 10 * time.Millisecond,
+ TimeoutBooting: 150 * time.Millisecond,
+ Driver: s.stubDriver,
+ DriverParameters: nil,
+ InstanceType: test.InstanceType(2),
+ ImageID: "test-image-id",
+ SSHKey: privkey,
+ BootProbeCommand: "crunch-run --list",
+ ShellCommand: "true",
+ }
+}
+
+func (s *TesterSuite) TestSuccess(c *check.C) {
+ s.tester.Logger = ctxlog.TestLogger(c)
+ ok := s.tester.Run()
+ c.Check(ok, check.Equals, true)
+}
+
+func (s *TesterSuite) TestBootFail(c *check.C) {
+ s.tester.BootProbeCommand = "falsey"
+ ok := s.tester.Run()
+ c.Check(ok, check.Equals, false)
+ c.Check(s.log.String(), check.Matches, `(?ms).*\\"falsey\\": command not found.*`)
+}
+
+func (s *TesterSuite) TestShellCommandFail(c *check.C) {
+ s.tester.ShellCommand = "falsey"
+ ok := s.tester.Run()
+ c.Check(ok, check.Equals, false)
+ c.Check(s.log.String(), check.Matches, `(?ms).*\\"falsey\\": command not found.*`)
+}
return f(prog, args, stdin, stdout, stderr)
}
-type Version string
+// Version is a Handler that prints the package version (set at build
+// time using -ldflags) and Go runtime version to stdout, and returns
+// 0.
+var Version versionCommand
-func (v Version) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+var version = "dev"
+
+type versionCommand struct{}
+
+func (versionCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
prog = regexp.MustCompile(` -*version$`).ReplaceAllLiteralString(prog, "")
- fmt.Fprintf(stdout, "%s %s (%s)\n", prog, v, runtime.Version())
+ fmt.Fprintf(stdout, "%s %s (%s)\n", prog, version, runtime.Version())
return 0
}
os.Stdout, os.Stderr = stdout, stderr
for i, tmpfile := range tmpfiles {
- c.Log("checking %s", i)
+ c.Logf("checking %s", i)
_, err := tmpfile.Seek(0, io.SeekStart)
c.Assert(err, check.IsNil)
leaked, err := ioutil.ReadAll(tmpfile)
"flag"
"fmt"
"io"
- "io/ioutil"
"os"
"os/exec"
- "git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/ctxlog"
"github.com/ghodss/yaml"
+ "github.com/sirupsen/logrus"
)
var DumpCommand dumpCommand
}
}()
+ loader := &Loader{
+ Stdin: stdin,
+ Logger: ctxlog.New(stderr, "text", "info"),
+ }
+
flags := flag.NewFlagSet("", flag.ContinueOnError)
flags.SetOutput(stderr)
- configFile := flags.String("config", arvados.DefaultConfigFile, "Site configuration `file`")
+ loader.SetupFlags(flags)
+
err = flags.Parse(args)
if err == flag.ErrHelp {
err = nil
flags.Usage()
return 2
}
- log := ctxlog.New(stderr, "text", "info")
- cfg, err := loadFileOrStdin(*configFile, stdin, log)
+
+ cfg, err := loader.Load()
if err != nil {
return 1
}
func (checkCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
var err error
+ var logbuf = &bytes.Buffer{}
defer func() {
+ io.Copy(stderr, logbuf)
if err != nil {
fmt.Fprintf(stderr, "%s\n", err)
}
}()
+ logger := logrus.New()
+ logger.Out = logbuf
+ loader := &Loader{
+ Stdin: stdin,
+ Logger: logger,
+ }
+
flags := flag.NewFlagSet("", flag.ContinueOnError)
flags.SetOutput(stderr)
- configFile := flags.String("config", arvados.DefaultConfigFile, "Site configuration `file`")
+ loader.SetupFlags(flags)
+
err = flags.Parse(args)
if err == flag.ErrHelp {
err = nil
flags.Usage()
return 2
}
- log := &plainLogger{w: stderr}
- var buf []byte
- if *configFile == "-" {
- buf, err = ioutil.ReadAll(stdin)
- } else {
- buf, err = ioutil.ReadFile(*configFile)
- }
- if err != nil {
- return 1
- }
- withoutDepr, err := load(bytes.NewBuffer(buf), log, false)
+
+ // Load the config twice -- once without loading deprecated
+ // keys/files, once with -- and then compare the two resulting
+ // configs. This reveals whether the deprecated keys/files
+ // have any effect on the final configuration.
+ //
+ // If they do, show the operator how to update their config
+ // such that the deprecated keys/files are superfluous and can
+ // be deleted.
+ loader.SkipDeprecated = true
+ withoutDepr, err := loader.Load()
if err != nil {
return 1
}
- withDepr, err := load(bytes.NewBuffer(buf), nil, true)
+ loader.SkipDeprecated = false
+ withDepr, err := loader.Load()
if err != nil {
return 1
}
if bytes.HasPrefix(diff, []byte("--- ")) {
fmt.Fprintln(stdout, "Your configuration is relying on deprecated entries. Suggest making the following changes.")
stdout.Write(diff)
+ err = nil
return 1
} else if len(diff) > 0 {
fmt.Fprintf(stderr, "Unexpected diff output:\n%s", diff)
} else if err != nil {
return 1
}
- if log.used {
+ if logbuf.Len() > 0 {
return 1
}
return 0
}
-type plainLogger struct {
- w io.Writer
- used bool
-}
+var DumpDefaultsCommand defaultsCommand
+
+type defaultsCommand struct{}
-func (pl *plainLogger) Warnf(format string, args ...interface{}) {
- pl.used = true
- fmt.Fprintf(pl.w, format+"\n", args...)
+func (defaultsCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+ _, err := stdout.Write(DefaultYAML)
+ if err != nil {
+ fmt.Fprintln(stderr, err)
+ return 1
+ }
+ return 0
}
import (
"bytes"
+ "io"
+ "io/ioutil"
+ "os"
"git.curoverse.com/arvados.git/lib/cmd"
check "gopkg.in/check.v1"
z1234:
API:
MaxItemsPerResponse: 1234
+ PostgreSQL:
+ Connection:
+ sslmode: require
+ Services:
+ RailsAPI:
+ InternalURLs:
+ "http://0.0.0.0:8000": {}
+ Workbench:
+ UserProfileFormFields:
+ color:
+ Type: select
+ Options:
+ fuchsia: {}
+ ApplicationMimetypesWithViewIcon:
+ whitespace: {}
`
code := CheckCommand.RunCommand("arvados config-check", []string{"-config", "-"}, bytes.NewBufferString(in), &stdout, &stderr)
c.Check(code, check.Equals, 0)
c.Check(stdout.String(), check.Matches, `(?ms).*\n\- +.*MaxItemsPerResponse: 1000\n\+ +MaxItemsPerResponse: 1234\n.*`)
}
+func (s *CommandSuite) TestCheckOldKeepstoreConfigFile(c *check.C) {
+ f, err := ioutil.TempFile("", "")
+ c.Assert(err, check.IsNil)
+ defer os.Remove(f.Name())
+
+ io.WriteString(f, "Debug: true\n")
+
+ var stdout, stderr bytes.Buffer
+ in := `
+Clusters:
+ z1234:
+ SystemLogs:
+ LogLevel: info
+`
+ code := CheckCommand.RunCommand("arvados config-check", []string{"-config", "-", "-legacy-keepstore-config", f.Name()}, bytes.NewBufferString(in), &stdout, &stderr)
+ c.Check(code, check.Equals, 1)
+ c.Check(stdout.String(), check.Matches, `(?ms).*\n\- +.*LogLevel: info\n\+ +LogLevel: debug\n.*`)
+ c.Check(stderr.String(), check.Matches, `.*you should remove the legacy keepstore config file.*\n`)
+}
+
func (s *CommandSuite) TestCheckUnknownKey(c *check.C) {
var stdout, stderr bytes.Buffer
in := `
code := CheckCommand.RunCommand("arvados config-check", []string{"-config", "-"}, bytes.NewBufferString(in), &stdout, &stderr)
c.Log(stderr.String())
c.Check(code, check.Equals, 1)
- c.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.Bogus1\n.*`)
- c.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.BogusSection\n.*`)
- c.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.API.Bogus3\n.*`)
- c.Check(stderr.String(), check.Matches, `(?ms).*unexpected object in config entry: Clusters.z1234.PostgreSQL.ConnectionPool\n.*`)
+ c.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.Bogus1"\n.*`)
+ c.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.BogusSection"\n.*`)
+ c.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.API.Bogus3"\n.*`)
+ c.Check(stderr.String(), check.Matches, `(?ms).*unexpected object in config entry: Clusters.z1234.PostgreSQL.ConnectionPool"\n.*`)
}
func (s *CommandSuite) TestDumpFormatting(c *check.C) {
ManagementToken: ""
Services:
+
+ # In each of the service sections below, the keys under
+ # InternalURLs are the endpoints where the service should be
+ # listening, and reachable from other hosts in the cluster.
+ SAMPLE:
+ InternalURLs:
+ "http://example.host:12345": {}
+ SAMPLE: {}
+ ExternalURL: "-"
+
RailsAPI:
InternalURLs: {}
ExternalURL: "-"
ExternalURL: ""
WebDAV:
InternalURLs: {}
+ # Base URL for Workbench inline preview. If blank, use
+ # WebDAVDownload instead, and disable inline preview.
+ # If both are empty, downloading collections from workbench
+ # will be impossible.
+ #
+ # It is important to properly configure the download service
+ # to migitate cross-site-scripting (XSS) attacks. A HTML page
+ # can be stored in collection. If an attacker causes a victim
+ # to visit that page through Workbench, it will be rendered by
+ # the browser. If all collections are served at the same
+ # domain, the browser will consider collections as coming from
+ # the same origin and having access to the same browsing data,
+ # enabling malicious Javascript on that page to access Arvados
+ # on behalf of the victim.
+ #
+ # This is mitigating by having separate domains for each
+ # collection, or limiting preview to circumstances where the
+ # collection is not accessed with the user's regular
+ # full-access token.
+ #
+ # Serve preview links using uuid or pdh in subdomain
+ # (requires wildcard DNS and TLS certificate)
+ # https://*.collections.uuid_prefix.arvadosapi.com
+ #
+ # Serve preview links using uuid or pdh in main domain
+ # (requires wildcard DNS and TLS certificate)
+ # https://*--collections.uuid_prefix.arvadosapi.com
+ #
+ # Serve preview links by setting uuid or pdh in the path.
+ # This configuration only allows previews of public data or
+ # collection-sharing links, because these use the anonymous
+ # user token or the token is already embedded in the URL.
+ # Other data must be handled as downloads via WebDAVDownload:
+ # https://collections.uuid_prefix.arvadosapi.com
+ #
ExternalURL: ""
+
WebDAVDownload:
InternalURLs: {}
+ # Base URL for download links. If blank, serve links to WebDAV
+ # with disposition=attachment query param. Unlike preview links,
+ # browsers do not render attachments, so there is no risk of XSS.
+ #
+ # If WebDAVDownload is blank, and WebDAV uses a
+ # single-origin form, then Workbench will show an error page
+ #
+ # Serve download links by setting uuid or pdh in the path:
+ # https://download.uuid_prefix.arvadosapi.com
+ #
ExternalURL: ""
+
Keepstore:
InternalURLs: {}
ExternalURL: "-"
ExternalURL: ""
WebShell:
InternalURLs: {}
+ # ShellInABox service endpoint URL for a given VM. If empty, do not
+ # offer web shell logins.
+ #
+ # E.g., using a path-based proxy server to forward connections to shell hosts:
+ # https://webshell.uuid_prefix.arvadosapi.com
+ #
+ # E.g., using a name-based proxy server to forward connections to shell hosts:
+ # https://*.webshell.uuid_prefix.arvadosapi.com
ExternalURL: ""
Workbench1:
InternalURLs: {}
user: ""
password: ""
dbname: ""
+ SAMPLE: ""
API:
# Maximum size (in bytes) allowed for a single API request. This
# limit is published in the discovery document for use by clients.
NewUserNotificationRecipients: []
NewInactiveUserNotificationRecipients: []
+ # Set anonymous_user_token to enable anonymous user access. You can get
+ # the token by running "bundle exec ./script/get_anonymous_user_token.rb"
+ # in the directory where your API server is running.
+ AnonymousUserToken: ""
+
AuditLogs:
# Time to keep audit logs, in seconds. (An audit log is a row added
# to the "logs" table in the PostgreSQL database each time an
# one another!
BlobSigning: true
- # blob_signing_key is a string of alphanumeric characters used to
+ # BlobSigningKey is a string of alphanumeric characters used to
# generate permission signatures for Keep locators. It must be
# identical to the permission key given to Keep. IMPORTANT: This is
# a site secret. It should be at least 50 characters.
ManagedProperties:
SAMPLE: {Function: original_owner, Protected: true}
+ # In "trust all content" mode, Workbench will redirect download
+ # requests to WebDAV preview link, even in the cases when
+ # WebDAV would have to expose XSS vulnerabilities in order to
+ # handle the redirect (see discussion on Services.WebDAV).
+ #
+ # This setting has no effect in the recommended configuration,
+ # where the WebDAV is configured to have a separate domain for
+ # every collection; in this case XSS protection is provided by
+ # browsers' same-origin policy.
+ #
+ # The default setting (false) is appropriate for a multi-user site.
+ TrustAllContent: false
+
Login:
# These settings are provided by your OAuth2 provider (e.g.,
# sso-provider).
# Shell command to execute on each worker to determine whether
# the worker is booted and ready to run containers. It should
# exit zero if the worker is ready.
- BootProbeCommand: "docker ps"
+ BootProbeCommand: "docker ps -q"
# Minimum interval between consecutive probes to a single
# worker.
Mail:
MailchimpAPIKey: ""
MailchimpListID: ""
- SendUserSetupNotificationEmail: ""
- IssueReporterEmailFrom: ""
- IssueReporterEmailTo: ""
- SupportEmailAddress: ""
- EmailFrom: ""
+ SendUserSetupNotificationEmail: true
+
+ # Bug/issue report notification to and from addresses
+ IssueReporterEmailFrom: "arvados@example.com"
+ IssueReporterEmailTo: "arvados@example.com"
+ SupportEmailAddress: "arvados@example.com"
+
+ # Generic issue email from
+ EmailFrom: "arvados@example.com"
RemoteClusters:
"*":
Host: ""
ArvadosDocsite: https://doc.arvados.org
ArvadosPublicDataDocURL: https://playground.arvados.org/projects/public
ShowUserAgreementInline: false
- SecretToken: ""
SecretKeyBase: ""
+
+ # Scratch directory used by the remote repository browsing
+ # feature. If it doesn't exist, it (and any missing parents) will be
+ # created using mkdir_p.
RepositoryCache: /var/www/arvados-workbench/current/tmp/git
+
+ # Below is a sample setting of user_profile_form_fields config parameter.
+ # This configuration parameter should be set to either false (to disable) or
+ # to a map as shown below.
+ # Configure the map of input fields to be displayed in the profile page
+ # using the attribute "key" for each of the input fields.
+ # This sample shows configuration with one required and one optional form fields.
+ # For each of these input fields:
+ # You can specify "Type" as "text" or "select".
+ # List the "Options" to be displayed for each of the "select" menu.
+ # Set "Required" as "true" for any of these fields to make them required.
+ # If any of the required fields are missing in the user's profile, the user will be
+ # redirected to the profile page before they can access any Workbench features.
UserProfileFormFields:
SAMPLE:
- Type: text
- FormFieldTitle: ""
- FormFieldDescription: ""
- Required: true
+ Type: select
+ FormFieldTitle: Best color
+ FormFieldDescription: your favorite color
+ Required: false
+ Position: 1
+ Options:
+ red: {}
+ blue: {}
+ green: {}
+ SAMPLE: {}
+
+ # exampleTextValue: # key that will be set in properties
+ # Type: text #
+ # FormFieldTitle: ""
+ # FormFieldDescription: ""
+ # Required: true
+ # Position: 1
+ # exampleOptionsValue:
+ # Type: select
+ # FormFieldTitle: ""
+ # FormFieldDescription: ""
+ # Required: true
+ # Position: 1
+ # Options:
+ # red: {}
+ # blue: {}
+ # yellow: {}
+
+ # Use "UserProfileFormMessage to configure the message you want
+ # to display on the profile page.
UserProfileFormMessage: 'Welcome to Arvados. All <span style="color:red">required fields</span> must be completed before you can proceed.'
+
+ # Mimetypes of applications for which the view icon
+ # would be enabled in a collection's show page.
+ # It is sufficient to list only applications here.
+ # No need to list text and image types.
ApplicationMimetypesWithViewIcon:
cwl: {}
fasta: {}
vnd.realvnc.bed: {}
xml: {}
xsl: {}
+ SAMPLE: {}
+
+ # The maximum number of bytes to load in the log viewer
LogViewerMaxBytes: 1M
+
+ # When anonymous_user_token is configured, show public projects page
EnablePublicProjectsPage: true
+
+ # By default, disable the "Getting Started" popup which is specific to Arvados playground
EnableGettingStartedPopup: false
+
+ # Ask Arvados API server to compress its response payloads.
APIResponseCompression: true
+
+ # Timeouts for API requests.
APIClientConnectTimeout: 2m
APIClientReceiveTimeout: 5m
+
+ # Maximum number of historic log records of a running job to fetch
+ # and display in the Log tab, while subscribing to web sockets.
RunningJobLogRecordsToFetch: 2000
+
+ # In systems with many shared projects, loading of dashboard and topnav
+ # cab be slow due to collections indexing; use the following parameters
+ # to suppress these properties
ShowRecentCollectionsOnDashboard: true
ShowUserNotifications: true
- MultiSiteSearch: false
+
+ # Enable/disable "multi-site search" in top nav ("true"/"false"), or
+ # a link to the multi-site search page on a "home" Workbench site.
+ #
+ # Example:
+ # https://workbench.qr1hi.arvadosapi.com/collections/multisite
+ MultiSiteSearch: ""
+
+ # Should workbench allow management of local git repositories? Set to false if
+ # the jobs api is disabled and there are no local git repositories.
Repositories: true
+
SiteName: Arvados Workbench
+ ProfilingEnabled: false
+
+ # This is related to obsolete Google OpenID 1.0 login
+ # but some workbench stuff still expects it to be set.
+ DefaultOpenIdPrefix: "https://www.google.com/accounts/o8/id"
# Workbench2 configs
VocabularyURL: ""
FileViewersConfigURL: ""
+
+ # Use experimental controller code (see https://dev.arvados.org/issues/14287)
+ EnableBetaController14287: false
import (
"fmt"
+ "io/ioutil"
"os"
"strings"
Insecure bool
}
-func applyDeprecatedConfig(cfg *arvados.Config, configdata []byte, log logger) error {
+func (ldr *Loader) applyDeprecatedConfig(cfg *arvados.Config) error {
var dc deprecatedConfig
- err := yaml.Unmarshal(configdata, &dc)
+ err := yaml.Unmarshal(ldr.configdata, &dc)
if err != nil {
return err
}
for name, np := range dcluster.NodeProfiles {
if name == "*" || name == os.Getenv("ARVADOS_NODE_PROFILE") || name == hostname {
name = "localhost"
- } else if log != nil {
- log.Warnf("overriding Clusters.%s.Services using Clusters.%s.NodeProfiles.%s (guessing %q is a hostname)", id, id, name, name)
+ } else if ldr.Logger != nil {
+ ldr.Logger.Warnf("overriding Clusters.%s.Services using Clusters.%s.NodeProfiles.%s (guessing %q is a hostname)", id, id, name, name)
}
applyDeprecatedNodeProfile(name, np.RailsAPI, &cluster.Services.RailsAPI)
applyDeprecatedNodeProfile(name, np.Controller, &cluster.Services.Controller)
}
svc.InternalURLs[arvados.URL{Scheme: scheme, Host: host}] = arvados.ServiceInstance{}
}
+
+const defaultKeepstoreConfigPath = "/etc/arvados/keepstore/keepstore.yml"
+
+type oldKeepstoreConfig struct {
+ Debug *bool
+}
+
+// update config using values from an old-style keepstore config file.
+func (ldr *Loader) loadOldKeepstoreConfig(cfg *arvados.Config) error {
+ path := ldr.KeepstorePath
+ if path == "" {
+ return nil
+ }
+ buf, err := ioutil.ReadFile(path)
+ if os.IsNotExist(err) && path == defaultKeepstoreConfigPath {
+ return nil
+ } else if err != nil {
+ return err
+ } else {
+ ldr.Logger.Warnf("you should remove the legacy keepstore config file (%s) after migrating all config keys to the cluster configuration file (%s)", path, ldr.Path)
+ }
+ cluster, err := cfg.GetCluster("")
+ if err != nil {
+ return err
+ }
+
+ var oc oldKeepstoreConfig
+ err = yaml.Unmarshal(buf, &oc)
+ if err != nil {
+ return fmt.Errorf("%s: %s", path, err)
+ }
+
+ if v := oc.Debug; v == nil {
+ } else if *v && cluster.SystemLogs.LogLevel != "debug" {
+ cluster.SystemLogs.LogLevel = "debug"
+ } else if !*v && cluster.SystemLogs.LogLevel != "info" {
+ cluster.SystemLogs.LogLevel = "info"
+ }
+
+ cfg.Clusters[cluster.ClusterID] = *cluster
+ return nil
+}
// exists.
var whitelist = map[string]bool{
// | sort -t'"' -k2,2
- "API": true,
- "API.AsyncPermissionsUpdateInterval": false,
- "API.DisabledAPIs": false,
- "API.MaxIndexDatabaseRead": false,
- "API.MaxItemsPerResponse": true,
- "API.MaxRequestAmplification": false,
- "API.MaxRequestSize": true,
- "API.RailsSessionSecretToken": false,
- "API.RequestTimeout": true,
- "AuditLogs": false,
- "AuditLogs.MaxAge": false,
- "AuditLogs.MaxDeleteBatch": false,
- "AuditLogs.UnloggedAttributes": false,
- "Collections": true,
- "Collections.BlobSigning": true,
- "Collections.BlobSigningKey": false,
- "Collections.BlobSigningTTL": true,
- "Collections.CollectionVersioning": false,
- "Collections.DefaultReplication": true,
- "Collections.DefaultTrashLifetime": true,
- "Collections.ManagedProperties": true,
- "Collections.ManagedProperties.*": true,
- "Collections.ManagedProperties.*.*": true,
- "Collections.PreserveVersionIfIdle": true,
- "Collections.TrashSweepInterval": false,
- "Containers": true,
- "Containers.CloudVMs": false,
- "Containers.DefaultKeepCacheRAM": true,
- "Containers.DispatchPrivateKey": false,
- "Containers.JobsAPI": true,
- "Containers.JobsAPI.CrunchJobUser": false,
- "Containers.JobsAPI.CrunchJobWrapper": false,
- "Containers.JobsAPI.CrunchRefreshTrigger": false,
- "Containers.JobsAPI.DefaultDockerImage": false,
- "Containers.JobsAPI.Enable": true,
- "Containers.JobsAPI.GitInternalDir": false,
- "Containers.JobsAPI.ReuseJobIfOutputsDiffer": false,
- "Containers.Logging": false,
- "Containers.LogReuseDecisions": false,
- "Containers.MaxComputeVMs": false,
- "Containers.MaxDispatchAttempts": false,
- "Containers.MaxRetryAttempts": true,
- "Containers.SLURM": false,
- "Containers.StaleLockTimeout": false,
- "Containers.SupportedDockerImageFormats": true,
- "Containers.UsePreemptibleInstances": true,
- "Git": false,
- "InstanceTypes": true,
- "InstanceTypes.*": true,
- "InstanceTypes.*.*": true,
- "Login": false,
- "Mail": false,
- "ManagementToken": false,
- "PostgreSQL": false,
- "RemoteClusters": true,
- "RemoteClusters.*": true,
- "RemoteClusters.*.ActivateUsers": true,
- "RemoteClusters.*.Host": true,
- "RemoteClusters.*.Insecure": true,
- "RemoteClusters.*.Proxy": true,
- "RemoteClusters.*.Scheme": true,
- "Services": true,
- "Services.*": true,
- "Services.*.ExternalURL": true,
- "Services.*.InternalURLs": false,
- "SystemLogs": false,
- "SystemRootToken": false,
- "TLS": false,
- "Users": false,
- "Workbench": false,
+ "API": true,
+ "API.AsyncPermissionsUpdateInterval": false,
+ "API.DisabledAPIs": false,
+ "API.MaxIndexDatabaseRead": false,
+ "API.MaxItemsPerResponse": true,
+ "API.MaxRequestAmplification": false,
+ "API.MaxRequestSize": true,
+ "API.RailsSessionSecretToken": false,
+ "API.RequestTimeout": true,
+ "AuditLogs": false,
+ "AuditLogs.MaxAge": false,
+ "AuditLogs.MaxDeleteBatch": false,
+ "AuditLogs.UnloggedAttributes": false,
+ "Collections": true,
+ "Collections.BlobSigning": true,
+ "Collections.BlobSigningKey": false,
+ "Collections.BlobSigningTTL": true,
+ "Collections.CollectionVersioning": false,
+ "Collections.DefaultReplication": true,
+ "Collections.DefaultTrashLifetime": true,
+ "Collections.ManagedProperties": true,
+ "Collections.ManagedProperties.*": true,
+ "Collections.ManagedProperties.*.*": true,
+ "Collections.PreserveVersionIfIdle": true,
+ "Collections.TrashSweepInterval": false,
+ "Collections.TrustAllContent": false,
+ "Containers": true,
+ "Containers.CloudVMs": false,
+ "Containers.DefaultKeepCacheRAM": true,
+ "Containers.DispatchPrivateKey": false,
+ "Containers.JobsAPI": true,
+ "Containers.JobsAPI.CrunchJobUser": false,
+ "Containers.JobsAPI.CrunchJobWrapper": false,
+ "Containers.JobsAPI.CrunchRefreshTrigger": false,
+ "Containers.JobsAPI.DefaultDockerImage": false,
+ "Containers.JobsAPI.Enable": true,
+ "Containers.JobsAPI.GitInternalDir": false,
+ "Containers.JobsAPI.ReuseJobIfOutputsDiffer": false,
+ "Containers.Logging": false,
+ "Containers.LogReuseDecisions": false,
+ "Containers.MaxComputeVMs": false,
+ "Containers.MaxDispatchAttempts": false,
+ "Containers.MaxRetryAttempts": true,
+ "Containers.SLURM": false,
+ "Containers.StaleLockTimeout": false,
+ "Containers.SupportedDockerImageFormats": true,
+ "Containers.UsePreemptibleInstances": true,
+ "EnableBetaController14287": false,
+ "Git": false,
+ "InstanceTypes": true,
+ "InstanceTypes.*": true,
+ "InstanceTypes.*.*": true,
+ "Login": false,
+ "Mail": false,
+ "ManagementToken": false,
+ "PostgreSQL": false,
+ "RemoteClusters": true,
+ "RemoteClusters.*": true,
+ "RemoteClusters.*.ActivateUsers": true,
+ "RemoteClusters.*.Host": true,
+ "RemoteClusters.*.Insecure": true,
+ "RemoteClusters.*.Proxy": true,
+ "RemoteClusters.*.Scheme": true,
+ "Services": true,
+ "Services.*": true,
+ "Services.*.ExternalURL": true,
+ "Services.*.InternalURLs": false,
+ "SystemLogs": false,
+ "SystemRootToken": false,
+ "TLS": false,
+ "Users": true,
+ "Users.AnonymousUserToken": true,
+ "Users.AdminNotifierEmailFrom": false,
+ "Users.AutoAdminFirstUser": false,
+ "Users.AutoAdminUserWithEmail": false,
+ "Users.AutoSetupNewUsers": false,
+ "Users.AutoSetupNewUsersWithRepository": false,
+ "Users.AutoSetupNewUsersWithVmUUID": false,
+ "Users.AutoSetupUsernameBlacklist": false,
+ "Users.EmailSubjectPrefix": false,
+ "Users.NewInactiveUserNotificationRecipients": false,
+ "Users.NewUserNotificationRecipients": false,
+ "Users.NewUsersAreActive": false,
+ "Users.UserNotifierEmailFrom": false,
+ "Users.UserProfileNotificationAddress": false,
+ "Workbench": true,
+ "Workbench.ActivationContactLink": false,
+ "Workbench.APIClientConnectTimeout": true,
+ "Workbench.APIClientReceiveTimeout": true,
+ "Workbench.APIResponseCompression": true,
+ "Workbench.ApplicationMimetypesWithViewIcon": true,
+ "Workbench.ApplicationMimetypesWithViewIcon.*": true,
+ "Workbench.ArvadosDocsite": true,
+ "Workbench.ArvadosPublicDataDocURL": true,
+ "Workbench.DefaultOpenIdPrefix": false,
+ "Workbench.EnableGettingStartedPopup": true,
+ "Workbench.EnablePublicProjectsPage": true,
+ "Workbench.FileViewersConfigURL": true,
+ "Workbench.LogViewerMaxBytes": true,
+ "Workbench.MultiSiteSearch": true,
+ "Workbench.ProfilingEnabled": true,
+ "Workbench.Repositories": false,
+ "Workbench.RepositoryCache": false,
+ "Workbench.RunningJobLogRecordsToFetch": true,
+ "Workbench.SecretKeyBase": false,
+ "Workbench.ShowRecentCollectionsOnDashboard": true,
+ "Workbench.ShowUserAgreementInline": true,
+ "Workbench.ShowUserNotifications": true,
+ "Workbench.SiteName": true,
+ "Workbench.Theme": true,
+ "Workbench.UserProfileFormFields": true,
+ "Workbench.UserProfileFormFields.*": true,
+ "Workbench.UserProfileFormFields.*.*": true,
+ "Workbench.UserProfileFormFields.*.*.*": true,
+ "Workbench.UserProfileFormMessage": true,
+ "Workbench.VocabularyURL": true,
}
func redactUnsafe(m map[string]interface{}, mPrefix, lookupPrefix string) error {
"regexp"
"strings"
- "git.curoverse.com/arvados.git/sdk/go/ctxlog"
check "gopkg.in/check.v1"
)
type ExportSuite struct{}
func (s *ExportSuite) TestExport(c *check.C) {
- confdata := bytes.Replace(DefaultYAML, []byte("SAMPLE"), []byte("testkey"), -1)
- cfg, err := Load(bytes.NewBuffer(confdata), ctxlog.TestLogger(c))
+ confdata := strings.Replace(string(DefaultYAML), "SAMPLE", "testkey", -1)
+ cfg, err := testLoader(c, confdata, nil).Load()
c.Assert(err, check.IsNil)
cluster := cfg.Clusters["xxxxx"]
cluster.ManagementToken = "abcdefg"
ManagementToken: ""
Services:
+
+ # In each of the service sections below, the keys under
+ # InternalURLs are the endpoints where the service should be
+ # listening, and reachable from other hosts in the cluster.
+ SAMPLE:
+ InternalURLs:
+ "http://example.host:12345": {}
+ SAMPLE: {}
+ ExternalURL: "-"
+
RailsAPI:
InternalURLs: {}
ExternalURL: "-"
ExternalURL: ""
WebDAV:
InternalURLs: {}
+ # Base URL for Workbench inline preview. If blank, use
+ # WebDAVDownload instead, and disable inline preview.
+ # If both are empty, downloading collections from workbench
+ # will be impossible.
+ #
+ # It is important to properly configure the download service
+ # to migitate cross-site-scripting (XSS) attacks. A HTML page
+ # can be stored in collection. If an attacker causes a victim
+ # to visit that page through Workbench, it will be rendered by
+ # the browser. If all collections are served at the same
+ # domain, the browser will consider collections as coming from
+ # the same origin and having access to the same browsing data,
+ # enabling malicious Javascript on that page to access Arvados
+ # on behalf of the victim.
+ #
+ # This is mitigating by having separate domains for each
+ # collection, or limiting preview to circumstances where the
+ # collection is not accessed with the user's regular
+ # full-access token.
+ #
+ # Serve preview links using uuid or pdh in subdomain
+ # (requires wildcard DNS and TLS certificate)
+ # https://*.collections.uuid_prefix.arvadosapi.com
+ #
+ # Serve preview links using uuid or pdh in main domain
+ # (requires wildcard DNS and TLS certificate)
+ # https://*--collections.uuid_prefix.arvadosapi.com
+ #
+ # Serve preview links by setting uuid or pdh in the path.
+ # This configuration only allows previews of public data or
+ # collection-sharing links, because these use the anonymous
+ # user token or the token is already embedded in the URL.
+ # Other data must be handled as downloads via WebDAVDownload:
+ # https://collections.uuid_prefix.arvadosapi.com
+ #
ExternalURL: ""
+
WebDAVDownload:
InternalURLs: {}
+ # Base URL for download links. If blank, serve links to WebDAV
+ # with disposition=attachment query param. Unlike preview links,
+ # browsers do not render attachments, so there is no risk of XSS.
+ #
+ # If WebDAVDownload is blank, and WebDAV uses a
+ # single-origin form, then Workbench will show an error page
+ #
+ # Serve download links by setting uuid or pdh in the path:
+ # https://download.uuid_prefix.arvadosapi.com
+ #
ExternalURL: ""
+
Keepstore:
InternalURLs: {}
ExternalURL: "-"
ExternalURL: ""
WebShell:
InternalURLs: {}
+ # ShellInABox service endpoint URL for a given VM. If empty, do not
+ # offer web shell logins.
+ #
+ # E.g., using a path-based proxy server to forward connections to shell hosts:
+ # https://webshell.uuid_prefix.arvadosapi.com
+ #
+ # E.g., using a name-based proxy server to forward connections to shell hosts:
+ # https://*.webshell.uuid_prefix.arvadosapi.com
ExternalURL: ""
Workbench1:
InternalURLs: {}
user: ""
password: ""
dbname: ""
+ SAMPLE: ""
API:
# Maximum size (in bytes) allowed for a single API request. This
# limit is published in the discovery document for use by clients.
NewUserNotificationRecipients: []
NewInactiveUserNotificationRecipients: []
+ # Set anonymous_user_token to enable anonymous user access. You can get
+ # the token by running "bundle exec ./script/get_anonymous_user_token.rb"
+ # in the directory where your API server is running.
+ AnonymousUserToken: ""
+
AuditLogs:
# Time to keep audit logs, in seconds. (An audit log is a row added
# to the "logs" table in the PostgreSQL database each time an
# one another!
BlobSigning: true
- # blob_signing_key is a string of alphanumeric characters used to
+ # BlobSigningKey is a string of alphanumeric characters used to
# generate permission signatures for Keep locators. It must be
# identical to the permission key given to Keep. IMPORTANT: This is
# a site secret. It should be at least 50 characters.
ManagedProperties:
SAMPLE: {Function: original_owner, Protected: true}
+ # In "trust all content" mode, Workbench will redirect download
+ # requests to WebDAV preview link, even in the cases when
+ # WebDAV would have to expose XSS vulnerabilities in order to
+ # handle the redirect (see discussion on Services.WebDAV).
+ #
+ # This setting has no effect in the recommended configuration,
+ # where the WebDAV is configured to have a separate domain for
+ # every collection; in this case XSS protection is provided by
+ # browsers' same-origin policy.
+ #
+ # The default setting (false) is appropriate for a multi-user site.
+ TrustAllContent: false
+
Login:
# These settings are provided by your OAuth2 provider (e.g.,
# sso-provider).
# Shell command to execute on each worker to determine whether
# the worker is booted and ready to run containers. It should
# exit zero if the worker is ready.
- BootProbeCommand: "docker ps"
+ BootProbeCommand: "docker ps -q"
# Minimum interval between consecutive probes to a single
# worker.
Mail:
MailchimpAPIKey: ""
MailchimpListID: ""
- SendUserSetupNotificationEmail: ""
- IssueReporterEmailFrom: ""
- IssueReporterEmailTo: ""
- SupportEmailAddress: ""
- EmailFrom: ""
+ SendUserSetupNotificationEmail: true
+
+ # Bug/issue report notification to and from addresses
+ IssueReporterEmailFrom: "arvados@example.com"
+ IssueReporterEmailTo: "arvados@example.com"
+ SupportEmailAddress: "arvados@example.com"
+
+ # Generic issue email from
+ EmailFrom: "arvados@example.com"
RemoteClusters:
"*":
Host: ""
ArvadosDocsite: https://doc.arvados.org
ArvadosPublicDataDocURL: https://playground.arvados.org/projects/public
ShowUserAgreementInline: false
- SecretToken: ""
SecretKeyBase: ""
+
+ # Scratch directory used by the remote repository browsing
+ # feature. If it doesn't exist, it (and any missing parents) will be
+ # created using mkdir_p.
RepositoryCache: /var/www/arvados-workbench/current/tmp/git
+
+ # Below is a sample setting of user_profile_form_fields config parameter.
+ # This configuration parameter should be set to either false (to disable) or
+ # to a map as shown below.
+ # Configure the map of input fields to be displayed in the profile page
+ # using the attribute "key" for each of the input fields.
+ # This sample shows configuration with one required and one optional form fields.
+ # For each of these input fields:
+ # You can specify "Type" as "text" or "select".
+ # List the "Options" to be displayed for each of the "select" menu.
+ # Set "Required" as "true" for any of these fields to make them required.
+ # If any of the required fields are missing in the user's profile, the user will be
+ # redirected to the profile page before they can access any Workbench features.
UserProfileFormFields:
SAMPLE:
- Type: text
- FormFieldTitle: ""
- FormFieldDescription: ""
- Required: true
+ Type: select
+ FormFieldTitle: Best color
+ FormFieldDescription: your favorite color
+ Required: false
+ Position: 1
+ Options:
+ red: {}
+ blue: {}
+ green: {}
+ SAMPLE: {}
+
+ # exampleTextValue: # key that will be set in properties
+ # Type: text #
+ # FormFieldTitle: ""
+ # FormFieldDescription: ""
+ # Required: true
+ # Position: 1
+ # exampleOptionsValue:
+ # Type: select
+ # FormFieldTitle: ""
+ # FormFieldDescription: ""
+ # Required: true
+ # Position: 1
+ # Options:
+ # red: {}
+ # blue: {}
+ # yellow: {}
+
+ # Use "UserProfileFormMessage to configure the message you want
+ # to display on the profile page.
UserProfileFormMessage: 'Welcome to Arvados. All <span style="color:red">required fields</span> must be completed before you can proceed.'
+
+ # Mimetypes of applications for which the view icon
+ # would be enabled in a collection's show page.
+ # It is sufficient to list only applications here.
+ # No need to list text and image types.
ApplicationMimetypesWithViewIcon:
cwl: {}
fasta: {}
vnd.realvnc.bed: {}
xml: {}
xsl: {}
+ SAMPLE: {}
+
+ # The maximum number of bytes to load in the log viewer
LogViewerMaxBytes: 1M
+
+ # When anonymous_user_token is configured, show public projects page
EnablePublicProjectsPage: true
+
+ # By default, disable the "Getting Started" popup which is specific to Arvados playground
EnableGettingStartedPopup: false
+
+ # Ask Arvados API server to compress its response payloads.
APIResponseCompression: true
+
+ # Timeouts for API requests.
APIClientConnectTimeout: 2m
APIClientReceiveTimeout: 5m
+
+ # Maximum number of historic log records of a running job to fetch
+ # and display in the Log tab, while subscribing to web sockets.
RunningJobLogRecordsToFetch: 2000
+
+ # In systems with many shared projects, loading of dashboard and topnav
+ # cab be slow due to collections indexing; use the following parameters
+ # to suppress these properties
ShowRecentCollectionsOnDashboard: true
ShowUserNotifications: true
- MultiSiteSearch: false
+
+ # Enable/disable "multi-site search" in top nav ("true"/"false"), or
+ # a link to the multi-site search page on a "home" Workbench site.
+ #
+ # Example:
+ # https://workbench.qr1hi.arvadosapi.com/collections/multisite
+ MultiSiteSearch: ""
+
+ # Should workbench allow management of local git repositories? Set to false if
+ # the jobs api is disabled and there are no local git repositories.
Repositories: true
+
SiteName: Arvados Workbench
+ ProfilingEnabled: false
+
+ # This is related to obsolete Google OpenID 1.0 login
+ # but some workbench stuff still expects it to be set.
+ DefaultOpenIdPrefix: "https://www.google.com/accounts/o8/id"
# Workbench2 configs
VocabularyURL: ""
FileViewersConfigURL: ""
+
+ # Use experimental controller code (see https://dev.arvados.org/issues/14287)
+ EnableBetaController14287: false
`)
"bytes"
"encoding/json"
"errors"
+ "flag"
"fmt"
"io"
"io/ioutil"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"github.com/ghodss/yaml"
"github.com/imdario/mergo"
+ "github.com/sirupsen/logrus"
)
-type logger interface {
- Warnf(string, ...interface{})
+var ErrNoClustersDefined = errors.New("config does not define any clusters")
+
+type Loader struct {
+ Stdin io.Reader
+ Logger logrus.FieldLogger
+ SkipDeprecated bool // Don't load legacy/deprecated config keys/files
+
+ Path string
+ KeepstorePath string
+
+ configdata []byte
}
-func loadFileOrStdin(path string, stdin io.Reader, log logger) (*arvados.Config, error) {
- if path == "-" {
- return load(stdin, log, true)
- } else {
- return LoadFile(path, log)
+// NewLoader returns a new Loader with Stdin and Logger set to the
+// given values, and all config paths set to their default values.
+func NewLoader(stdin io.Reader, logger logrus.FieldLogger) *Loader {
+ ldr := &Loader{Stdin: stdin, Logger: logger}
+ // Calling SetupFlags on a throwaway FlagSet has the side
+ // effect of assigning default values to the configurable
+ // fields.
+ ldr.SetupFlags(flag.NewFlagSet("", flag.ContinueOnError))
+ return ldr
+}
+
+// SetupFlags configures a flagset so arguments like -config X can be
+// used to change the loader's Path fields.
+//
+// ldr := NewLoader(os.Stdin, logrus.New())
+// flagset := flag.NewFlagSet("", flag.ContinueOnError)
+// ldr.SetupFlags(flagset)
+// // ldr.Path == "/etc/arvados/config.yml"
+// flagset.Parse([]string{"-config", "/tmp/c.yaml"})
+// // ldr.Path == "/tmp/c.yaml"
+func (ldr *Loader) SetupFlags(flagset *flag.FlagSet) {
+ flagset.StringVar(&ldr.Path, "config", arvados.DefaultConfigFile, "Site configuration `file` (default may be overridden by setting an ARVADOS_CONFIG environment variable)")
+ flagset.StringVar(&ldr.KeepstorePath, "legacy-keepstore-config", defaultKeepstoreConfigPath, "Legacy keepstore configuration `file`")
+}
+
+// MungeLegacyConfigArgs checks args for a -config flag whose argument
+// is a regular file (or a symlink to one), but doesn't have a
+// top-level "Clusters" key and therefore isn't a valid cluster
+// configuration file. If it finds such a flag, it replaces -config
+// with legacyConfigArg (e.g., "-legacy-keepstore-config").
+//
+// This is used by programs that still need to accept "-config" as a
+// way to specify a per-component config file until their config has
+// been migrated.
+//
+// If any errors are encountered while reading or parsing a config
+// file, the given args are not munged. We presume the same errors
+// will be encountered again and reported later on when trying to load
+// cluster configuration from the same file, regardless of which
+// struct we end up using.
+func (ldr *Loader) MungeLegacyConfigArgs(lgr logrus.FieldLogger, args []string, legacyConfigArg string) []string {
+ munged := append([]string(nil), args...)
+ for i := 0; i < len(args); i++ {
+ if !strings.HasPrefix(args[i], "-") || strings.SplitN(strings.TrimPrefix(args[i], "-"), "=", 2)[0] != "config" {
+ continue
+ }
+ var operand string
+ if strings.Contains(args[i], "=") {
+ operand = strings.SplitN(args[i], "=", 2)[1]
+ } else if i+1 < len(args) && !strings.HasPrefix(args[i+1], "-") {
+ i++
+ operand = args[i]
+ } else {
+ continue
+ }
+ if fi, err := os.Stat(operand); err != nil || !fi.Mode().IsRegular() {
+ continue
+ }
+ f, err := os.Open(operand)
+ if err != nil {
+ continue
+ }
+ defer f.Close()
+ buf, err := ioutil.ReadAll(f)
+ if err != nil {
+ continue
+ }
+ var cfg arvados.Config
+ err = yaml.Unmarshal(buf, &cfg)
+ if err != nil {
+ continue
+ }
+ if len(cfg.Clusters) == 0 {
+ lgr.Warnf("%s is not a cluster config file -- interpreting %s as %s (please migrate your config!)", operand, "-config", legacyConfigArg)
+ if operand == args[i] {
+ munged[i-1] = legacyConfigArg
+ } else {
+ munged[i] = legacyConfigArg + "=" + operand
+ }
+ }
}
+ return munged
}
-func LoadFile(path string, log logger) (*arvados.Config, error) {
+func (ldr *Loader) loadBytes(path string) ([]byte, error) {
+ if path == "-" {
+ return ioutil.ReadAll(ldr.Stdin)
+ }
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
- return Load(f, log)
+ return ioutil.ReadAll(f)
}
-func Load(rdr io.Reader, log logger) (*arvados.Config, error) {
- return load(rdr, log, true)
-}
-
-func load(rdr io.Reader, log logger, useDeprecated bool) (*arvados.Config, error) {
- buf, err := ioutil.ReadAll(rdr)
- if err != nil {
- return nil, err
+func (ldr *Loader) Load() (*arvados.Config, error) {
+ if ldr.configdata == nil {
+ buf, err := ldr.loadBytes(ldr.Path)
+ if err != nil {
+ return nil, err
+ }
+ ldr.configdata = buf
}
// Load the config into a dummy map to get the cluster ID
var dummy struct {
Clusters map[string]struct{}
}
- err = yaml.Unmarshal(buf, &dummy)
+ err := yaml.Unmarshal(ldr.configdata, &dummy)
if err != nil {
return nil, err
}
if len(dummy.Clusters) == 0 {
- return nil, errors.New("config does not define any clusters")
+ return nil, ErrNoClustersDefined
}
// We can't merge deep structs here; instead, we unmarshal the
}
}
var src map[string]interface{}
- err = yaml.Unmarshal(buf, &src)
+ err = yaml.Unmarshal(ldr.configdata, &src)
if err != nil {
return nil, fmt.Errorf("loading config data: %s", err)
}
- logExtraKeys(log, merged, src, "")
+ ldr.logExtraKeys(merged, src, "")
removeSampleKeys(merged)
err = mergo.Merge(&merged, src, mergo.WithOverride)
if err != nil {
return nil, fmt.Errorf("transcoding config data: %s", err)
}
- if useDeprecated {
- err = applyDeprecatedConfig(&cfg, buf, log)
+ if !ldr.SkipDeprecated {
+ err = ldr.applyDeprecatedConfig(&cfg)
if err != nil {
return nil, err
}
+ for _, err := range []error{
+ ldr.loadOldKeepstoreConfig(&cfg),
+ } {
+ if err != nil {
+ return nil, err
+ }
+ }
}
// Check for known mistakes
}
}
-func logExtraKeys(log logger, expected, supplied map[string]interface{}, prefix string) {
- if log == nil {
+func (ldr *Loader) logExtraKeys(expected, supplied map[string]interface{}, prefix string) {
+ if ldr.Logger == nil {
return
}
allowed := map[string]interface{}{}
allowed[strings.ToLower(k)] = v
}
for k, vsupp := range supplied {
+ if k == "SAMPLE" {
+ // entry will be dropped in removeSampleKeys anyway
+ continue
+ }
vexp, ok := allowed[strings.ToLower(k)]
- if !ok && expected["SAMPLE"] != nil {
+ if expected["SAMPLE"] != nil {
vexp = expected["SAMPLE"]
} else if !ok {
- log.Warnf("deprecated or unknown config entry: %s%s", prefix, k)
+ ldr.Logger.Warnf("deprecated or unknown config entry: %s%s", prefix, k)
continue
}
if vsupp, ok := vsupp.(map[string]interface{}); !ok {
// will be caught elsewhere; see TestBadType.
continue
} else if vexp, ok := vexp.(map[string]interface{}); !ok {
- log.Warnf("unexpected object in config entry: %s%s", prefix, k)
+ ldr.Logger.Warnf("unexpected object in config entry: %s%s", prefix, k)
} else {
- logExtraKeys(log, vexp, vsupp, prefix+k+".")
+ ldr.logExtraKeys(vexp, vsupp, prefix+k+".")
}
}
}
import (
"bytes"
+ "fmt"
"io"
+ "io/ioutil"
"os"
"os/exec"
+ "reflect"
"strings"
"testing"
var _ = check.Suite(&LoadSuite{})
+// Return a new Loader that reads cluster config from configdata
+// (instead of the usual default /etc/arvados/config.yml), and logs to
+// logdst or (if that's nil) c.Log.
+func testLoader(c *check.C, configdata string, logdst io.Writer) *Loader {
+ logger := ctxlog.TestLogger(c)
+ if logdst != nil {
+ lgr := logrus.New()
+ lgr.Out = logdst
+ logger = lgr
+ }
+ ldr := NewLoader(bytes.NewBufferString(configdata), logger)
+ ldr.Path = "-"
+ return ldr
+}
+
type LoadSuite struct{}
func (s *LoadSuite) TestEmpty(c *check.C) {
- cfg, err := Load(&bytes.Buffer{}, ctxlog.TestLogger(c))
+ cfg, err := testLoader(c, "", nil).Load()
c.Check(cfg, check.IsNil)
c.Assert(err, check.ErrorMatches, `config does not define any clusters`)
}
func (s *LoadSuite) TestNoConfigs(c *check.C) {
- cfg, err := Load(bytes.NewBufferString(`Clusters: {"z1111": {}}`), ctxlog.TestLogger(c))
+ cfg, err := testLoader(c, `Clusters: {"z1111": {}}`, nil).Load()
c.Assert(err, check.IsNil)
c.Assert(cfg.Clusters, check.HasLen, 1)
cc, err := cfg.GetCluster("z1111")
c.Check(cc.API.MaxItemsPerResponse, check.Equals, 1000)
}
+func (s *LoadSuite) TestMungeLegacyConfigArgs(c *check.C) {
+ f, err := ioutil.TempFile("", "")
+ c.Check(err, check.IsNil)
+ defer os.Remove(f.Name())
+ io.WriteString(f, "Debug: true\n")
+ oldfile := f.Name()
+
+ f, err = ioutil.TempFile("", "")
+ c.Check(err, check.IsNil)
+ defer os.Remove(f.Name())
+ io.WriteString(f, "Clusters: {aaaaa: {}}\n")
+ newfile := f.Name()
+
+ for _, trial := range []struct {
+ argsIn []string
+ argsOut []string
+ }{
+ {
+ []string{"-config", oldfile},
+ []string{"-old-config", oldfile},
+ },
+ {
+ []string{"-config=" + oldfile},
+ []string{"-old-config=" + oldfile},
+ },
+ {
+ []string{"-config", newfile},
+ []string{"-config", newfile},
+ },
+ {
+ []string{"-config=" + newfile},
+ []string{"-config=" + newfile},
+ },
+ {
+ []string{"-foo", oldfile},
+ []string{"-foo", oldfile},
+ },
+ {
+ []string{"-foo=" + oldfile},
+ []string{"-foo=" + oldfile},
+ },
+ {
+ []string{"-foo", "-config=" + oldfile},
+ []string{"-foo", "-old-config=" + oldfile},
+ },
+ {
+ []string{"-foo", "bar", "-config=" + oldfile},
+ []string{"-foo", "bar", "-old-config=" + oldfile},
+ },
+ {
+ []string{"-foo=bar", "baz", "-config=" + oldfile},
+ []string{"-foo=bar", "baz", "-old-config=" + oldfile},
+ },
+ {
+ []string{"-config=/dev/null"},
+ []string{"-config=/dev/null"},
+ },
+ {
+ []string{"-config=-"},
+ []string{"-config=-"},
+ },
+ {
+ []string{"-config="},
+ []string{"-config="},
+ },
+ {
+ []string{"-foo=bar", "baz", "-config"},
+ []string{"-foo=bar", "baz", "-config"},
+ },
+ {
+ []string{},
+ nil,
+ },
+ } {
+ var logbuf bytes.Buffer
+ logger := logrus.New()
+ logger.Out = &logbuf
+
+ var ldr Loader
+ args := ldr.MungeLegacyConfigArgs(logger, trial.argsIn, "-old-config")
+ c.Check(args, check.DeepEquals, trial.argsOut)
+ if fmt.Sprintf("%v", trial.argsIn) != fmt.Sprintf("%v", trial.argsOut) {
+ c.Check(logbuf.String(), check.Matches, `.*`+oldfile+` is not a cluster config file -- interpreting -config as -old-config.*\n`)
+ }
+ }
+}
+
func (s *LoadSuite) TestSampleKeys(c *check.C) {
for _, yaml := range []string{
`{"Clusters":{"z1111":{}}}`,
`{"Clusters":{"z1111":{"InstanceTypes":{"Foo":{"RAM": "12345M"}}}}}`,
} {
- cfg, err := Load(bytes.NewBufferString(yaml), ctxlog.TestLogger(c))
+ cfg, err := testLoader(c, yaml, nil).Load()
c.Assert(err, check.IsNil)
cc, err := cfg.GetCluster("z1111")
_, hasSample := cc.InstanceTypes["SAMPLE"]
}
func (s *LoadSuite) TestMultipleClusters(c *check.C) {
- cfg, err := Load(bytes.NewBufferString(`{"Clusters":{"z1111":{},"z2222":{}}}`), ctxlog.TestLogger(c))
+ cfg, err := testLoader(c, `{"Clusters":{"z1111":{},"z2222":{}}}`, nil).Load()
c.Assert(err, check.IsNil)
c1, err := cfg.GetCluster("z1111")
c.Assert(err, check.IsNil)
func (s *LoadSuite) TestDeprecatedOrUnknownWarning(c *check.C) {
var logbuf bytes.Buffer
- logger := logrus.New()
- logger.Out = &logbuf
- _, err := Load(bytes.NewBufferString(`
+ _, err := testLoader(c, `
Clusters:
zzzzz:
postgresql: {}
Host: z2222.arvadosapi.com
Proxy: true
BadKey: badValue
-`), logger)
+`, &logbuf).Load()
c.Assert(err, check.IsNil)
logs := strings.Split(strings.TrimSuffix(logbuf.String(), "\n"), "\n")
for _, log := range logs {
c.Check(logs, check.HasLen, 2)
}
+func (s *LoadSuite) checkSAMPLEKeys(c *check.C, path string, x interface{}) {
+ v := reflect.Indirect(reflect.ValueOf(x))
+ switch v.Kind() {
+ case reflect.Map:
+ var stringKeys, sampleKey bool
+ iter := v.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ if k.Kind() == reflect.String {
+ stringKeys = true
+ if k.String() == "SAMPLE" || k.String() == "xxxxx" {
+ sampleKey = true
+ s.checkSAMPLEKeys(c, path+"."+k.String(), iter.Value().Interface())
+ }
+ }
+ }
+ if stringKeys && !sampleKey {
+ c.Errorf("%s is a map with string keys (type %T) but config.default.yml has no SAMPLE key", path, x)
+ }
+ return
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ val := v.Field(i)
+ if val.CanInterface() {
+ s.checkSAMPLEKeys(c, path+"."+v.Type().Field(i).Name, val.Interface())
+ }
+ }
+ }
+}
+
+func (s *LoadSuite) TestDefaultConfigHasAllSAMPLEKeys(c *check.C) {
+ var logbuf bytes.Buffer
+ loader := testLoader(c, string(DefaultYAML), &logbuf)
+ cfg, err := loader.Load()
+ c.Assert(err, check.IsNil)
+ s.checkSAMPLEKeys(c, "", cfg)
+}
+
func (s *LoadSuite) TestNoUnrecognizedKeysInDefaultConfig(c *check.C) {
var logbuf bytes.Buffer
- logger := logrus.New()
- logger.Out = &logbuf
var supplied map[string]interface{}
yaml.Unmarshal(DefaultYAML, &supplied)
- cfg, err := Load(bytes.NewBuffer(DefaultYAML), logger)
+
+ loader := testLoader(c, string(DefaultYAML), &logbuf)
+ cfg, err := loader.Load()
c.Assert(err, check.IsNil)
var loaded map[string]interface{}
buf, err := yaml.Marshal(cfg)
err = yaml.Unmarshal(buf, &loaded)
c.Assert(err, check.IsNil)
- logExtraKeys(logger, loaded, supplied, "")
+ loader.logExtraKeys(loaded, supplied, "")
c.Check(logbuf.String(), check.Equals, "")
}
var logbuf bytes.Buffer
logger := logrus.New()
logger.Out = &logbuf
- cfg, err := Load(bytes.NewBufferString(`{"Clusters":{"zzzzz":{}}}`), logger)
+ cfg, err := testLoader(c, `{"Clusters":{"zzzzz":{}}}`, &logbuf).Load()
c.Assert(err, check.IsNil)
yaml, err := yaml.Marshal(cfg)
c.Assert(err, check.IsNil)
- cfgDumped, err := Load(bytes.NewBuffer(yaml), logger)
+ cfgDumped, err := testLoader(c, string(yaml), &logbuf).Load()
c.Assert(err, check.IsNil)
c.Check(cfg, check.DeepEquals, cfgDumped)
c.Check(logbuf.String(), check.Equals, "")
}
func (s *LoadSuite) TestPostgreSQLKeyConflict(c *check.C) {
- _, err := Load(bytes.NewBufferString(`
+ _, err := testLoader(c, `
Clusters:
zzzzz:
postgresql:
connection:
DBName: dbname
Host: host
-`), ctxlog.TestLogger(c))
+`, nil).Load()
c.Check(err, check.ErrorMatches, `Clusters.zzzzz.PostgreSQL.Connection: multiple entries for "(dbname|host)".*`)
}
`,
} {
c.Log(data)
- v, err := Load(bytes.NewBufferString(data), ctxlog.TestLogger(c))
+ v, err := testLoader(c, data, nil).Load()
if v != nil {
c.Logf("%#v", v.Clusters["zzzzz"].PostgreSQL.ConnectionPool)
}
}
func (s *LoadSuite) checkEquivalent(c *check.C, goty, expectedy string) {
- got, err := Load(bytes.NewBufferString(goty), ctxlog.TestLogger(c))
+ got, err := testLoader(c, goty, nil).Load()
c.Assert(err, check.IsNil)
- expected, err := Load(bytes.NewBufferString(expectedy), ctxlog.TestLogger(c))
+ expected, err := testLoader(c, expectedy, nil).Load()
c.Assert(err, check.IsNil)
if !c.Check(got, check.DeepEquals, expected) {
cmd := exec.Command("diff", "-u", "--label", "expected", "--label", "got", "/dev/fd/3", "/dev/fd/4")
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package federation
+
+import (
+ "context"
+ "crypto/md5"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strings"
+
+ "git.curoverse.com/arvados.git/lib/controller/railsproxy"
+ "git.curoverse.com/arvados.git/lib/controller/rpc"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/auth"
+ "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+)
+
+type Conn struct {
+ cluster *arvados.Cluster
+ local backend
+ remotes map[string]backend
+}
+
+func New(cluster *arvados.Cluster) arvados.API {
+ local := railsproxy.NewConn(cluster)
+ remotes := map[string]backend{}
+ for id, remote := range cluster.RemoteClusters {
+ if !remote.Proxy {
+ continue
+ }
+ remotes[id] = rpc.NewConn(id, &url.URL{Scheme: remote.Scheme, Host: remote.Host}, remote.Insecure, saltedTokenProvider(local, id))
+ }
+
+ return &Conn{
+ cluster: cluster,
+ local: local,
+ remotes: remotes,
+ }
+}
+
+// Return a new rpc.TokenProvider that takes the client-provided
+// tokens from an incoming request context, determines whether they
+// should (and can) be salted for the given remoteID, and returns the
+// resulting tokens.
+func saltedTokenProvider(local backend, remoteID string) rpc.TokenProvider {
+ return func(ctx context.Context) ([]string, error) {
+ var tokens []string
+ incoming, ok := auth.FromContext(ctx)
+ if !ok {
+ return nil, errors.New("no token provided")
+ }
+ for _, token := range incoming.Tokens {
+ salted, err := auth.SaltToken(token, remoteID)
+ switch err {
+ case nil:
+ tokens = append(tokens, salted)
+ case auth.ErrSalted:
+ tokens = append(tokens, token)
+ case auth.ErrObsoleteToken:
+ ctx := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{token}})
+ aca, err := local.APIClientAuthorizationCurrent(ctx, arvados.GetOptions{})
+ if errStatus(err) == http.StatusUnauthorized {
+ // pass through unmodified
+ tokens = append(tokens, token)
+ continue
+ } else if err != nil {
+ return nil, err
+ }
+ salted, err := auth.SaltToken(aca.TokenV2(), remoteID)
+ if err != nil {
+ return nil, err
+ }
+ tokens = append(tokens, salted)
+ default:
+ return nil, err
+ }
+ }
+ return tokens, nil
+ }
+}
+
+// Return suitable backend for a query about the given cluster ID
+// ("aaaaa") or object UUID ("aaaaa-dz642-abcdefghijklmno").
+func (conn *Conn) chooseBackend(id string) backend {
+ if len(id) == 27 {
+ id = id[:5]
+ } else if len(id) != 5 {
+ // PDH or bogus ID
+ return conn.local
+ }
+ if id == conn.cluster.ClusterID {
+ return conn.local
+ } else if be, ok := conn.remotes[id]; ok {
+ return be
+ } else {
+ // TODO: return an "always error" backend?
+ return conn.local
+ }
+}
+
+// Call fn with the local backend; then, if fn returned 404, call fn
+// on the available remote backends (possibly concurrently) until one
+// succeeds.
+//
+// The second argument to fn is the cluster ID of the remote backend,
+// or "" for the local backend.
+//
+// A non-nil error means all backends failed.
+func (conn *Conn) tryLocalThenRemotes(ctx context.Context, fn func(context.Context, string, backend) error) error {
+ if err := fn(ctx, "", conn.local); err == nil || errStatus(err) != http.StatusNotFound {
+ return err
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ errchan := make(chan error, len(conn.remotes))
+ for remoteID, be := range conn.remotes {
+ remoteID, be := remoteID, be
+ go func() {
+ errchan <- fn(ctx, remoteID, be)
+ }()
+ }
+ all404 := true
+ var errs []error
+ for i := 0; i < cap(errchan); i++ {
+ err := <-errchan
+ if err == nil {
+ return nil
+ }
+ all404 = all404 && errStatus(err) == http.StatusNotFound
+ errs = append(errs, err)
+ }
+ if all404 {
+ return notFoundError{}
+ }
+ // FIXME: choose appropriate HTTP status
+ return fmt.Errorf("errors: %v", errs)
+}
+
+func (conn *Conn) CollectionCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Collection, error) {
+ return conn.chooseBackend(options.ClusterID).CollectionCreate(ctx, options)
+}
+
+func (conn *Conn) CollectionUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Collection, error) {
+ return conn.chooseBackend(options.UUID).CollectionUpdate(ctx, options)
+}
+
+func rewriteManifest(mt, remoteID string) string {
+ return regexp.MustCompile(` [0-9a-f]{32}\+[^ ]*`).ReplaceAllStringFunc(mt, func(tok string) string {
+ return strings.Replace(tok, "+A", "+R"+remoteID+"-", -1)
+ })
+}
+
+// this could be in sdk/go/arvados
+func portableDataHash(mt string) string {
+ h := md5.New()
+ blkRe := regexp.MustCompile(`^ [0-9a-f]{32}\+\d+`)
+ size := 0
+ _ = regexp.MustCompile(` ?[^ ]*`).ReplaceAllFunc([]byte(mt), func(tok []byte) []byte {
+ if m := blkRe.Find(tok); m != nil {
+ // write hash+size, ignore remaining block hints
+ tok = m
+ }
+ n, err := h.Write(tok)
+ if err != nil {
+ panic(err)
+ }
+ size += n
+ return nil
+ })
+ return fmt.Sprintf("%x+%d", h.Sum(nil), size)
+}
+
+func (conn *Conn) CollectionGet(ctx context.Context, options arvados.GetOptions) (arvados.Collection, error) {
+ if len(options.UUID) == 27 {
+ // UUID is really a UUID
+ c, err := conn.chooseBackend(options.UUID).CollectionGet(ctx, options)
+ if err == nil && options.UUID[:5] != conn.cluster.ClusterID {
+ c.ManifestText = rewriteManifest(c.ManifestText, options.UUID[:5])
+ }
+ return c, err
+ } else {
+ // UUID is a PDH
+ first := make(chan arvados.Collection, 1)
+ err := conn.tryLocalThenRemotes(ctx, func(ctx context.Context, remoteID string, be backend) error {
+ c, err := be.CollectionGet(ctx, options)
+ if err != nil {
+ return err
+ }
+ // options.UUID is either hash+size or
+ // hash+size+hints; only hash+size need to
+ // match the computed PDH.
+ if pdh := portableDataHash(c.ManifestText); pdh != options.UUID && !strings.HasPrefix(options.UUID, pdh+"+") {
+ ctxlog.FromContext(ctx).Warnf("bad portable data hash %q received from remote %q (expected %q)", pdh, remoteID, options.UUID)
+ return notFoundError{}
+ }
+ if remoteID != "" {
+ c.ManifestText = rewriteManifest(c.ManifestText, remoteID)
+ }
+ select {
+ case first <- c:
+ return nil
+ default:
+ // lost race, return value doesn't matter
+ return nil
+ }
+ })
+ if err != nil {
+ return arvados.Collection{}, err
+ }
+ return <-first, nil
+ }
+}
+
+func (conn *Conn) CollectionList(ctx context.Context, options arvados.ListOptions) (arvados.CollectionList, error) {
+ return conn.local.CollectionList(ctx, options)
+}
+
+func (conn *Conn) CollectionProvenance(ctx context.Context, options arvados.GetOptions) (map[string]interface{}, error) {
+ return conn.chooseBackend(options.UUID).CollectionProvenance(ctx, options)
+}
+
+func (conn *Conn) CollectionUsedBy(ctx context.Context, options arvados.GetOptions) (map[string]interface{}, error) {
+ return conn.chooseBackend(options.UUID).CollectionUsedBy(ctx, options)
+}
+
+func (conn *Conn) CollectionDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Collection, error) {
+ return conn.chooseBackend(options.UUID).CollectionDelete(ctx, options)
+}
+
+func (conn *Conn) CollectionTrash(ctx context.Context, options arvados.DeleteOptions) (arvados.Collection, error) {
+ return conn.chooseBackend(options.UUID).CollectionTrash(ctx, options)
+}
+
+func (conn *Conn) CollectionUntrash(ctx context.Context, options arvados.UntrashOptions) (arvados.Collection, error) {
+ return conn.chooseBackend(options.UUID).CollectionUntrash(ctx, options)
+}
+
+func (conn *Conn) ContainerCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Container, error) {
+ return conn.chooseBackend(options.ClusterID).ContainerCreate(ctx, options)
+}
+
+func (conn *Conn) ContainerUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Container, error) {
+ return conn.chooseBackend(options.UUID).ContainerUpdate(ctx, options)
+}
+
+func (conn *Conn) ContainerGet(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
+ return conn.chooseBackend(options.UUID).ContainerGet(ctx, options)
+}
+
+func (conn *Conn) ContainerList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerList, error) {
+ return conn.local.ContainerList(ctx, options)
+}
+
+func (conn *Conn) ContainerDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Container, error) {
+ return conn.chooseBackend(options.UUID).ContainerDelete(ctx, options)
+}
+
+func (conn *Conn) ContainerLock(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
+ return conn.chooseBackend(options.UUID).ContainerLock(ctx, options)
+}
+
+func (conn *Conn) ContainerUnlock(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
+ return conn.chooseBackend(options.UUID).ContainerUnlock(ctx, options)
+}
+
+func (conn *Conn) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
+ return conn.chooseBackend(options.ClusterID).SpecimenCreate(ctx, options)
+}
+
+func (conn *Conn) SpecimenUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Specimen, error) {
+ return conn.chooseBackend(options.UUID).SpecimenUpdate(ctx, options)
+}
+
+func (conn *Conn) SpecimenGet(ctx context.Context, options arvados.GetOptions) (arvados.Specimen, error) {
+ return conn.chooseBackend(options.UUID).SpecimenGet(ctx, options)
+}
+
+func (conn *Conn) SpecimenList(ctx context.Context, options arvados.ListOptions) (arvados.SpecimenList, error) {
+ return conn.local.SpecimenList(ctx, options)
+}
+
+func (conn *Conn) SpecimenDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Specimen, error) {
+ return conn.chooseBackend(options.UUID).SpecimenDelete(ctx, options)
+}
+
+func (conn *Conn) APIClientAuthorizationCurrent(ctx context.Context, options arvados.GetOptions) (arvados.APIClientAuthorization, error) {
+ return conn.chooseBackend(options.UUID).APIClientAuthorizationCurrent(ctx, options)
+}
+
+type backend interface{ arvados.API }
+
+type notFoundError struct{}
+
+func (notFoundError) HTTPStatus() int { return http.StatusNotFound }
+func (notFoundError) Error() string { return "not found" }
+
+func errStatus(err error) int {
+ if httpErr, ok := err.(interface{ HTTPStatus() int }); ok {
+ return httpErr.HTTPStatus()
+ } else {
+ return http.StatusInternalServerError
+ }
+}
import (
"bytes"
+ "context"
"encoding/json"
"fmt"
"io"
// provided by the integration test environment.
remoteServer *httpserver.Server
// remoteMock ("zmock") appends each incoming request to
- // remoteMockRequests, and returns an empty 200 response.
+ // remoteMockRequests, and returns 200 with an empty JSON
+ // object.
remoteMock *httpserver.Server
remoteMockRequests []http.Request
}
c.Assert(s.remoteMock.Start(), check.IsNil)
cluster := &arvados.Cluster{
- ClusterID: "zhome",
- PostgreSQL: integrationTestCluster().PostgreSQL,
+ ClusterID: "zhome",
+ PostgreSQL: integrationTestCluster().PostgreSQL,
+ EnableBetaController14287: enableBetaController14287,
}
cluster.TLS.Insecure = true
cluster.API.MaxItemsPerResponse = 1000
arvadostest.SetServiceURL(&cluster.Services.Controller, "http://localhost:/")
s.testHandler = &Handler{Cluster: cluster}
s.testServer = newServerFromIntegrationTestEnv(c)
- s.testServer.Server.Handler = httpserver.AddRequestIDs(httpserver.LogRequests(s.log, s.testHandler))
+ s.testServer.Server.Handler = httpserver.HandlerWithContext(
+ ctxlog.Context(context.Background(), s.log),
+ httpserver.AddRequestIDs(httpserver.LogRequests(s.testHandler)))
cluster.RemoteClusters = map[string]arvados.RemoteCluster{
"zzzzz": {
req.Body.Close()
req.Body = ioutil.NopCloser(b)
s.remoteMockRequests = append(s.remoteMockRequests, *req)
+ // Repond 200 with a valid JSON object
+ fmt.Fprint(w, "{}")
}
func (s *FederationSuite) TearDownTest(c *check.C) {
}
}
-func (s *FederationSuite) testRequest(req *http.Request) *http.Response {
+func (s *FederationSuite) testRequest(req *http.Request) *httptest.ResponseRecorder {
resp := httptest.NewRecorder()
s.testServer.Server.Handler.ServeHTTP(resp, req)
- return resp.Result()
+ return resp
}
func (s *FederationSuite) TestLocalRequest(c *check.C) {
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zhome-", 1), nil)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
s.checkHandledLocally(c, resp)
}
func (s *FederationSuite) TestNoAuth(c *check.C) {
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusUnauthorized)
- s.checkJSONErrorMatches(c, resp, `Not logged in`)
+ s.checkJSONErrorMatches(c, resp, `Not logged in.*`)
}
func (s *FederationSuite) TestBadAuth(c *check.C) {
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
req.Header.Set("Authorization", "Bearer aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusUnauthorized)
- s.checkJSONErrorMatches(c, resp, `Not logged in`)
+ s.checkJSONErrorMatches(c, resp, `Not logged in.*`)
}
func (s *FederationSuite) TestNoAccess(c *check.C) {
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.SpectatorToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
- s.checkJSONErrorMatches(c, resp, `.*not found`)
+ s.checkJSONErrorMatches(c, resp, `.*not found.*`)
}
func (s *FederationSuite) TestGetUnknownRemote(c *check.C) {
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zz404-", 1), nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
s.checkJSONErrorMatches(c, resp, `.*no proxy available for cluster zz404`)
}
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusBadGateway)
s.checkJSONErrorMatches(c, resp, `.*HTTP response to HTTPS client`)
}
func (s *FederationSuite) TestGetRemoteWorkflow(c *check.C) {
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var wf arvados.Workflow
c.Check(json.NewDecoder(resp.Body).Decode(&wf), check.IsNil)
func (s *FederationSuite) TestOptionsMethod(c *check.C) {
req := httptest.NewRequest("OPTIONS", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
req.Header.Set("Origin", "https://example.com")
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
body, err := ioutil.ReadAll(resp.Body)
c.Check(err, check.IsNil)
func (s *FederationSuite) TestRemoteWithTokenInQuery(c *check.C) {
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zmock-", 1)+"?api_token="+arvadostest.ActiveToken, nil)
- s.testRequest(req)
+ s.testRequest(req).Result()
c.Assert(s.remoteMockRequests, check.HasLen, 1)
pr := s.remoteMockRequests[0]
// Token is salted and moved from query to Authorization header.
}
func (s *FederationSuite) TestLocalTokenSalted(c *check.C) {
- req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zmock-", 1), nil)
- req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- s.testRequest(req)
- c.Assert(s.remoteMockRequests, check.HasLen, 1)
- pr := s.remoteMockRequests[0]
- // The salted token here has a "zzzzz-" UUID instead of a
- // "ztest-" UUID because ztest's local database has the
- // "zzzzz-" test fixtures. The "secret" part is HMAC(sha1,
- // arvadostest.ActiveToken, "zmock") = "7fd3...".
- c.Check(pr.Header.Get("Authorization"), check.Equals, "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/7fd31b61f39c0e82a4155592163218272cedacdc")
+ defer s.localServiceReturns404(c).Close()
+ for _, path := range []string{
+ // During the transition to the strongly typed
+ // controller implementation (#14287), workflows and
+ // collections test different code paths.
+ "/arvados/v1/workflows/" + strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zmock-", 1),
+ "/arvados/v1/collections/" + strings.Replace(arvadostest.UserAgreementCollection, "zzzzz-", "zmock-", 1),
+ } {
+ c.Log("testing path ", path)
+ s.remoteMockRequests = nil
+ req := httptest.NewRequest("GET", path, nil)
+ req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+ s.testRequest(req).Result()
+ c.Assert(s.remoteMockRequests, check.HasLen, 1)
+ pr := s.remoteMockRequests[0]
+ // The salted token here has a "zzzzz-" UUID instead of a
+ // "ztest-" UUID because ztest's local database has the
+ // "zzzzz-" test fixtures. The "secret" part is HMAC(sha1,
+ // arvadostest.ActiveToken, "zmock") = "7fd3...".
+ c.Check(pr.Header.Get("Authorization"), check.Equals, "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/7fd31b61f39c0e82a4155592163218272cedacdc")
+ }
}
func (s *FederationSuite) TestRemoteTokenNotSalted(c *check.C) {
+ defer s.localServiceReturns404(c).Close()
// remoteToken can be any v1 token that doesn't appear in
// ztest's local db.
remoteToken := "abcdef00000000000000000000000000000000000000000000"
- req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zmock-", 1), nil)
- req.Header.Set("Authorization", "Bearer "+remoteToken)
- s.testRequest(req)
- c.Assert(s.remoteMockRequests, check.HasLen, 1)
- pr := s.remoteMockRequests[0]
- c.Check(pr.Header.Get("Authorization"), check.Equals, "Bearer "+remoteToken)
+
+ for _, path := range []string{
+ // During the transition to the strongly typed
+ // controller implementation (#14287), workflows and
+ // collections test different code paths.
+ "/arvados/v1/workflows/" + strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zmock-", 1),
+ "/arvados/v1/collections/" + strings.Replace(arvadostest.UserAgreementCollection, "zzzzz-", "zmock-", 1),
+ } {
+ c.Log("testing path ", path)
+ s.remoteMockRequests = nil
+ req := httptest.NewRequest("GET", path, nil)
+ req.Header.Set("Authorization", "Bearer "+remoteToken)
+ s.testRequest(req).Result()
+ c.Assert(s.remoteMockRequests, check.HasLen, 1)
+ pr := s.remoteMockRequests[0]
+ c.Check(pr.Header.Get("Authorization"), check.Equals, "Bearer "+remoteToken)
+ }
}
func (s *FederationSuite) TestWorkflowCRUD(c *check.C) {
- wf := arvados.Workflow{
- Description: "TestCRUD",
- }
+ var wf arvados.Workflow
{
- body := &strings.Builder{}
- json.NewEncoder(body).Encode(&wf)
req := httptest.NewRequest("POST", "/arvados/v1/workflows", strings.NewReader(url.Values{
- "workflow": {body.String()},
+ "workflow": {`{"description": "TestCRUD"}`},
}.Encode()))
req.Header.Set("Content-type", "application/x-www-form-urlencoded")
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
req := httptest.NewRequest(method, "/arvados/v1/workflows/"+wf.UUID, strings.NewReader(form.Encode()))
req.Header.Set("Content-type", "application/x-www-form-urlencoded")
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
s.checkResponseOK(c, resp)
err := json.NewDecoder(resp.Body).Decode(&wf)
c.Check(err, check.IsNil)
{
req := httptest.NewRequest("DELETE", "/arvados/v1/workflows/"+wf.UUID, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
s.checkResponseOK(c, resp)
err := json.NewDecoder(resp.Body).Decode(&wf)
c.Check(err, check.IsNil)
{
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+wf.UUID, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
}
}
func (s *FederationSuite) localServiceReturns404(c *check.C) *httpserver.Server {
return s.localServiceHandler(c, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- w.WriteHeader(404)
+ if req.URL.Path == "/arvados/v1/api_client_authorizations/current" {
+ if req.Header.Get("Authorization") == "Bearer "+arvadostest.ActiveToken {
+ json.NewEncoder(w).Encode(arvados.APIClientAuthorization{UUID: arvadostest.ActiveTokenUUID, APIToken: arvadostest.ActiveToken})
+ } else {
+ w.WriteHeader(http.StatusUnauthorized)
+ }
+ } else {
+ w.WriteHeader(404)
+ }
}))
}
req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementCollection, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var col arvados.Collection
}).Encode()))
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8")
- resp = s.testRequest(req)
+ resp = s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
col = arvados.Collection{}
req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementCollection, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var col arvados.Collection
c.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)
req := httptest.NewRequest("GET", "/arvados/v1/collections/zzzzz-4zz18-fakefakefakefak", nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
}
req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementPDH, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var col arvados.Collection
req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementPDH, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
req := httptest.NewRequest("GET", "/arvados/v1/collections/99999999999999999999999999999999+99", nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
defer resp.Body.Close()
c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
req := httptest.NewRequest("GET", "/arvados/v1/collections/99999999999999999999999999999999+99", nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
defer resp.Body.Close()
c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementPDH, nil)
req.Header.Set("Authorization", "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/282d7d172b6cfdce364c5ed12ddf7417b2d00065")
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var col arvados.Collection
req := httptest.NewRequest("GET", "/arvados/v1/collections/99999999999999999999999999999999+99", nil)
req.Header.Set("Authorization", "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/282d7d172b6cfdce364c5ed12ddf7417b2d00065")
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
}
defer s.localServiceReturns404(c).Close()
req := httptest.NewRequest("GET", "/arvados/v1/container_requests/"+arvadostest.QueuedContainerRequestUUID, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var cr arvados.ContainerRequest
c.Check(json.NewDecoder(resp.Body).Decode(&cr), check.IsNil)
strings.NewReader(fmt.Sprintf(`{"container_request": {"priority": %d}}`, pri)))
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
req.Header.Set("Content-type", "application/json")
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var cr arvados.ContainerRequest
c.Check(json.NewDecoder(resp.Body).Decode(&cr), check.IsNil)
`))
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
req.Header.Set("Content-type", "application/json")
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var cr arvados.ContainerRequest
c.Check(json.NewDecoder(resp.Body).Decode(&cr), check.IsNil)
arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
s.testHandler.Cluster.ClusterID = "zzzzz"
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var cr struct {
arvados.ContainerRequest `json:"container_request"`
`))
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
req.Header.Set("Content-type", "application/json")
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var cr struct {
arvados.ContainerRequest `json:"container_request"`
`))
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2+"/zzzzz-dz642-parentcontainer")
req.Header.Set("Content-type", "application/json")
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var cr struct {
arvados.ContainerRequest `json:"container_request"`
`))
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
req.Header.Set("Content-type", "application/json")
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
}
req := httptest.NewRequest("GET", "/arvados/v1/containers/"+arvadostest.QueuedContainerUUID, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req)
- c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+ c.Check(resp.Code, check.Equals, http.StatusOK)
var cn arvados.Container
c.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)
c.Check(cn.UUID, check.Equals, arvadostest.QueuedContainerUUID)
req := httptest.NewRequest("GET", "/arvados/v1/containers?count=none&filters="+
url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v"]]]`, arvadostest.QueuedContainerUUID)), nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var cn arvados.ContainerList
c.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)
+ c.Assert(cn.Items, check.HasLen, 1)
c.Check(cn.Items[0].UUID, check.Equals, arvadostest.QueuedContainerUUID)
}
url.QueryEscape(`["uuid", "command"]`)),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var cn arvados.ContainerList
c.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)
url.QueryEscape(`["uuid", "command"]`)),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusBadGateway)
s.checkJSONErrorMatches(c, resp, `error fetching from zhome \(404 Not Found\): EOF`)
}
arvadostest.QueuedContainerUUID))),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
c.Check(callCount, check.Equals, 2)
var cn arvados.ContainerList
arvadostest.QueuedContainerUUID))),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
c.Check(callCount, check.Equals, 2)
var cn arvados.ContainerList
arvadostest.QueuedContainerUUID))),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)
s.checkJSONErrorMatches(c, resp, `Federated multi-object request for 2 objects which is more than max page size 1.`)
}
arvadostest.QueuedContainerUUID))),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)
s.checkJSONErrorMatches(c, resp, `Federated multi-object may not provide 'limit', 'offset' or 'order'.`)
}
arvadostest.QueuedContainerUUID))),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)
s.checkJSONErrorMatches(c, resp, `Federated multi-object may not provide 'limit', 'offset' or 'order'.`)
}
arvadostest.QueuedContainerUUID))),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)
s.checkJSONErrorMatches(c, resp, `Federated multi-object may not provide 'limit', 'offset' or 'order'.`)
}
url.QueryEscape(`["command"]`)),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- resp := s.testRequest(req)
+ resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)
s.checkJSONErrorMatches(c, resp, `Federated multi-object request must include 'uuid' in 'select'`)
}
"time"
"git.curoverse.com/arvados.git/lib/config"
+ "git.curoverse.com/arvados.git/lib/controller/railsproxy"
+ "git.curoverse.com/arvados.git/lib/controller/router"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/health"
"git.curoverse.com/arvados.git/sdk/go/httpserver"
func (h *Handler) CheckHealth() error {
h.setupOnce.Do(h.setup)
- _, _, err := findRailsAPI(h.Cluster)
+ _, _, err := railsproxy.FindRailsAPI(h.Cluster)
return err
}
io.Copy(w, &buf)
}))
+ if h.Cluster.EnableBetaController14287 {
+ rtr := router.New(h.Cluster)
+ mux.Handle("/arvados/v1/collections", rtr)
+ mux.Handle("/arvados/v1/collections/", rtr)
+ }
+
hs := http.NotFoundHandler()
hs = prepend(hs, h.proxyRailsAPI)
hs = h.setupProxyRemoteCluster(hs)
}
func (h *Handler) localClusterRequest(req *http.Request) (*http.Response, error) {
- urlOut, insecure, err := findRailsAPI(h.Cluster)
+ urlOut, insecure, err := railsproxy.FindRailsAPI(h.Cluster)
if err != nil {
return nil, err
}
check "gopkg.in/check.v1"
)
+var enableBetaController14287 bool
+
// Gocheck boilerplate
func Test(t *testing.T) {
- check.TestingT(t)
+ for _, enableBetaController14287 = range []bool{false, true} {
+ check.TestingT(t)
+ }
}
var _ = check.Suite(&HandlerSuite{})
s.cluster = &arvados.Cluster{
ClusterID: "zzzzz",
PostgreSQL: integrationTestCluster().PostgreSQL,
+
+ EnableBetaController14287: enableBetaController14287,
}
s.cluster.TLS.Insecure = true
arvadostest.SetServiceURL(&s.cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
return h.Message
}
-// headers that shouldn't be forwarded when proxying. See
-// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers
var dropHeaders = map[string]bool{
+ // Headers that shouldn't be forwarded when proxying. See
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers
"Connection": true,
"Keep-Alive": true,
"Proxy-Authenticate": true,
"Proxy-Authorization": true,
- // this line makes gofmt 1.10 and 1.11 agree
- "TE": true,
- "Trailer": true,
- "Transfer-Encoding": true, // *-Encoding headers interfer with Go's automatic compression/decompression
- "Content-Encoding": true,
+ // (comment/space here makes gofmt1.10 agree with gofmt1.11)
+ "TE": true,
+ "Trailer": true,
+ "Upgrade": true,
+
+ // Headers that would interfere with Go's automatic
+ // compression/decompression if we forwarded them.
"Accept-Encoding": true,
- "Upgrade": true,
+ "Content-Encoding": true,
+ "Transfer-Encoding": true,
}
type ResponseFilter func(*http.Response, error) (*http.Response, error)
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Package railsproxy implements Arvados APIs by proxying to the
+// RailsAPI server on the local machine.
+package railsproxy
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "git.curoverse.com/arvados.git/lib/controller/rpc"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/auth"
+)
+
+// For now, FindRailsAPI always uses the rails API running on this
+// node.
+func FindRailsAPI(cluster *arvados.Cluster) (*url.URL, bool, error) {
+ var best *url.URL
+ for target := range cluster.Services.RailsAPI.InternalURLs {
+ target := url.URL(target)
+ best = &target
+ if strings.HasPrefix(target.Host, "localhost:") || strings.HasPrefix(target.Host, "127.0.0.1:") || strings.HasPrefix(target.Host, "[::1]:") {
+ break
+ }
+ }
+ if best == nil {
+ return nil, false, fmt.Errorf("Services.RailsAPI.InternalURLs is empty")
+ }
+ return best, cluster.TLS.Insecure, nil
+}
+
+func NewConn(cluster *arvados.Cluster) *rpc.Conn {
+ url, insecure, err := FindRailsAPI(cluster)
+ if err != nil {
+ panic(err)
+ }
+ return rpc.NewConn(cluster.ClusterID, url, insecure, provideIncomingToken)
+}
+
+func provideIncomingToken(ctx context.Context) ([]string, error) {
+ incoming, ok := auth.FromContext(ctx)
+ if !ok {
+ return nil, errors.New("no token provided")
+ }
+ return incoming.Tokens, nil
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package router
+
+import (
+ "reflect"
+ "runtime"
+
+ check "gopkg.in/check.v1"
+)
+
+// a Gocheck checker for testing the name of a function. Used with
+// (*arvadostest.APIStub).Calls() to check that an HTTP request has
+// been routed to the correct arvados.API method.
+//
+// c.Check(bytes.NewBuffer().Read, isMethodNamed, "Read")
+var isMethodNamed check.Checker = &chkIsMethodNamed{
+ CheckerInfo: &check.CheckerInfo{
+ Name: "isMethodNamed",
+ Params: []string{"obtained", "expected"},
+ },
+}
+
+type chkIsMethodNamed struct{ *check.CheckerInfo }
+
+func (*chkIsMethodNamed) Check(params []interface{}, names []string) (bool, string) {
+ methodName := runtime.FuncForPC(reflect.ValueOf(params[0]).Pointer()).Name()
+ regex := `.*\)\.` + params[1].(string) + `(-.*)?`
+ return check.Matches.Check([]interface{}{methodName, regex}, names)
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package router
+
+type errorWithStatus struct {
+ code int
+ error
+}
+
+func (err errorWithStatus) HTTPStatus() int {
+ return err.code
+}
+
+func httpError(code int, err error) error {
+ return errorWithStatus{code: code, error: err}
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package router
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "mime"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "github.com/julienschmidt/httprouter"
+)
+
+// Parse req as an Arvados V1 API request and return the request
+// parameters.
+//
+// If the request has a parameter whose name is attrsKey (e.g.,
+// "collection"), it is renamed to "attrs".
+func (rtr *router) loadRequestParams(req *http.Request, attrsKey string) (map[string]interface{}, error) {
+ err := req.ParseForm()
+ if err != nil {
+ return nil, httpError(http.StatusBadRequest, err)
+ }
+ params := map[string]interface{}{}
+
+ // Load parameters from req.Form, which (after
+ // req.ParseForm()) includes the query string and -- when
+ // Content-Type is application/x-www-form-urlencoded -- the
+ // request body.
+ for k, values := range req.Form {
+ // All of these form values arrive as strings, so we
+ // need some type-guessing to accept non-string
+ // inputs:
+ //
+ // Values for parameters that take ints (limit=1) or
+ // bools (include_trash=1) are parsed accordingly.
+ //
+ // "null" and "" are nil.
+ //
+ // Values that look like JSON objects, arrays, or
+ // strings are parsed as JSON.
+ //
+ // The rest are left as strings.
+ for _, v := range values {
+ switch {
+ case intParams[k]:
+ params[k], err = strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ case boolParams[k]:
+ params[k] = stringToBool(v)
+ case v == "null" || v == "":
+ params[k] = nil
+ case strings.HasPrefix(v, "["):
+ var j []interface{}
+ err := json.Unmarshal([]byte(v), &j)
+ if err != nil {
+ return nil, err
+ }
+ params[k] = j
+ case strings.HasPrefix(v, "{"):
+ var j map[string]interface{}
+ err := json.Unmarshal([]byte(v), &j)
+ if err != nil {
+ return nil, err
+ }
+ params[k] = j
+ case strings.HasPrefix(v, "\""):
+ var j string
+ err := json.Unmarshal([]byte(v), &j)
+ if err != nil {
+ return nil, err
+ }
+ params[k] = j
+ default:
+ params[k] = v
+ }
+ // TODO: Need to accept "?foo[]=bar&foo[]=baz"
+ // as foo=["bar","baz"]?
+ }
+ }
+
+ // Decode body as JSON if Content-Type request header is
+ // missing or application/json.
+ mt := req.Header.Get("Content-Type")
+ if ct, _, err := mime.ParseMediaType(mt); err != nil && mt != "" {
+ return nil, fmt.Errorf("error parsing media type %q: %s", mt, err)
+ } else if (ct == "application/json" || mt == "") && req.ContentLength != 0 {
+ jsonParams := map[string]interface{}{}
+ err := json.NewDecoder(req.Body).Decode(&jsonParams)
+ if err != nil {
+ return nil, httpError(http.StatusBadRequest, err)
+ }
+ for k, v := range jsonParams {
+ params[k] = v
+ }
+ if attrsKey != "" && params[attrsKey] == nil {
+ // Copy top-level parameters from JSON request
+ // body into params[attrsKey]. Some SDKs rely
+ // on this Rails API feature; see
+ // https://api.rubyonrails.org/v5.2.1/classes/ActionController/ParamsWrapper.html
+ params[attrsKey] = jsonParams
+ }
+ }
+
+ routeParams, _ := req.Context().Value(httprouter.ParamsKey).(httprouter.Params)
+ for _, p := range routeParams {
+ params[p.Key] = p.Value
+ }
+
+ if v, ok := params[attrsKey]; ok && attrsKey != "" {
+ params["attrs"] = v
+ delete(params, attrsKey)
+ }
+
+ if order, ok := params["order"].(string); ok {
+ // We must accept strings ("foo, bar desc") and arrays
+ // (["foo", "bar desc"]) because RailsAPI does.
+ // Convert to an array here before trying to unmarshal
+ // into options structs.
+ if order == "" {
+ delete(params, "order")
+ } else {
+ params["order"] = strings.Split(order, ",")
+ }
+ }
+
+ return params, nil
+}
+
+// Copy src to dst, using json as an intermediate format in order to
+// invoke src's json-marshaling and dst's json-unmarshaling behaviors.
+func (rtr *router) transcode(src interface{}, dst interface{}) error {
+ var errw error
+ pr, pw := io.Pipe()
+ go func() {
+ defer pw.Close()
+ errw = json.NewEncoder(pw).Encode(src)
+ }()
+ defer pr.Close()
+ err := json.NewDecoder(pr).Decode(dst)
+ if errw != nil {
+ return errw
+ }
+ return err
+}
+
+var intParams = map[string]bool{
+ "limit": true,
+ "offset": true,
+}
+
+var boolParams = map[string]bool{
+ "distinct": true,
+ "ensure_unique_name": true,
+ "include_trash": true,
+ "include_old_versions": true,
+}
+
+func stringToBool(s string) bool {
+ switch s {
+ case "", "false", "0":
+ return false
+ default:
+ return true
+ }
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package router
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+ check "gopkg.in/check.v1"
+)
+
+type testReq struct {
+ method string
+ path string
+ token string // default is ActiveTokenV2; use noToken to omit
+ param map[string]interface{}
+ attrs map[string]interface{}
+ attrsKey string
+ header http.Header
+
+ // variations on request formatting
+ json bool
+ jsonAttrsTop bool
+ jsonStringParam bool
+ tokenInBody bool
+ tokenInQuery bool
+ noContentType bool
+
+ body *bytes.Buffer
+}
+
+const noToken = "(no token)"
+
+func (tr *testReq) Request() *http.Request {
+ param := map[string]interface{}{}
+ for k, v := range tr.param {
+ param[k] = v
+ }
+
+ if tr.body != nil {
+ // caller provided a buffer
+ } else if tr.json {
+ if tr.jsonAttrsTop {
+ for k, v := range tr.attrs {
+ param[k] = v
+ }
+ } else if tr.attrs != nil {
+ param[tr.attrsKey] = tr.attrs
+ }
+ tr.body = bytes.NewBuffer(nil)
+ err := json.NewEncoder(tr.body).Encode(param)
+ if err != nil {
+ panic(err)
+ }
+ } else {
+ values := make(url.Values)
+ for k, v := range param {
+ if vs, ok := v.(string); ok && !tr.jsonStringParam {
+ values.Set(k, vs)
+ } else {
+ jv, err := json.Marshal(v)
+ if err != nil {
+ panic(err)
+ }
+ values.Set(k, string(jv))
+ }
+ }
+ if tr.attrs != nil {
+ jattrs, err := json.Marshal(tr.attrs)
+ if err != nil {
+ panic(err)
+ }
+ values.Set(tr.attrsKey, string(jattrs))
+ }
+ tr.body = bytes.NewBuffer(nil)
+ io.WriteString(tr.body, values.Encode())
+ }
+ method := tr.method
+ if method == "" {
+ method = "GET"
+ }
+ path := tr.path
+ if path == "" {
+ path = "example/test/path"
+ }
+ req := httptest.NewRequest(method, "https://an.example/"+path, tr.body)
+ token := tr.token
+ if token == "" {
+ token = arvadostest.ActiveTokenV2
+ }
+ if token != noToken {
+ req.Header.Set("Authorization", "Bearer "+token)
+ }
+ if tr.json {
+ req.Header.Set("Content-Type", "application/json")
+ } else {
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ }
+ for k, v := range tr.header {
+ req.Header[k] = append([]string(nil), v...)
+ }
+ return req
+}
+
+func (tr *testReq) bodyContent() string {
+ return string(tr.body.Bytes())
+}
+
+func (s *RouterSuite) TestAttrsInBody(c *check.C) {
+ attrs := map[string]interface{}{"foo": "bar"}
+ for _, tr := range []testReq{
+ {attrsKey: "model_name", json: true, attrs: attrs},
+ {attrsKey: "model_name", json: true, attrs: attrs, jsonAttrsTop: true},
+ } {
+ c.Logf("tr: %#v", tr)
+ req := tr.Request()
+ params, err := s.rtr.loadRequestParams(req, tr.attrsKey)
+ c.Logf("params: %#v", params)
+ c.Assert(err, check.IsNil)
+ c.Check(params, check.NotNil)
+ c.Assert(params["attrs"], check.FitsTypeOf, map[string]interface{}{})
+ c.Check(params["attrs"].(map[string]interface{})["foo"], check.Equals, "bar")
+ }
+}
+
+func (s *RouterSuite) TestBoolParam(c *check.C) {
+ testKey := "ensure_unique_name"
+
+ for i, tr := range []testReq{
+ {method: "POST", param: map[string]interface{}{testKey: false}, json: true},
+ {method: "POST", param: map[string]interface{}{testKey: false}},
+ {method: "POST", param: map[string]interface{}{testKey: "false"}},
+ {method: "POST", param: map[string]interface{}{testKey: "0"}},
+ {method: "POST", param: map[string]interface{}{testKey: ""}},
+ } {
+ c.Logf("#%d, tr: %#v", i, tr)
+ req := tr.Request()
+ c.Logf("tr.body: %s", tr.bodyContent())
+ params, err := s.rtr.loadRequestParams(req, tr.attrsKey)
+ c.Logf("params: %#v", params)
+ c.Assert(err, check.IsNil)
+ c.Check(params, check.NotNil)
+ c.Check(params[testKey], check.Equals, false)
+ }
+
+ for i, tr := range []testReq{
+ {method: "POST", param: map[string]interface{}{testKey: true}, json: true},
+ {method: "POST", param: map[string]interface{}{testKey: true}},
+ {method: "POST", param: map[string]interface{}{testKey: "true"}},
+ {method: "POST", param: map[string]interface{}{testKey: "1"}},
+ } {
+ c.Logf("#%d, tr: %#v", i, tr)
+ req := tr.Request()
+ c.Logf("tr.body: %s", tr.bodyContent())
+ params, err := s.rtr.loadRequestParams(req, tr.attrsKey)
+ c.Logf("params: %#v", params)
+ c.Assert(err, check.IsNil)
+ c.Check(params, check.NotNil)
+ c.Check(params[testKey], check.Equals, true)
+ }
+}
+
+func (s *RouterSuite) TestOrderParam(c *check.C) {
+ for i, tr := range []testReq{
+ {method: "POST", param: map[string]interface{}{"order": ""}, json: true},
+ {method: "POST", param: map[string]interface{}{"order": ""}, json: false},
+ {method: "POST", param: map[string]interface{}{"order": []string{}}, json: true},
+ {method: "POST", param: map[string]interface{}{"order": []string{}}, json: false},
+ {method: "POST", param: map[string]interface{}{}, json: true},
+ {method: "POST", param: map[string]interface{}{}, json: false},
+ } {
+ c.Logf("#%d, tr: %#v", i, tr)
+ req := tr.Request()
+ params, err := s.rtr.loadRequestParams(req, tr.attrsKey)
+ c.Assert(err, check.IsNil)
+ c.Assert(params, check.NotNil)
+ if order, ok := params["order"]; ok && order != nil {
+ c.Check(order, check.DeepEquals, []interface{}{})
+ }
+ }
+
+ for i, tr := range []testReq{
+ {method: "POST", param: map[string]interface{}{"order": "foo,bar desc"}, json: true},
+ {method: "POST", param: map[string]interface{}{"order": "foo,bar desc"}, json: false},
+ {method: "POST", param: map[string]interface{}{"order": "[\"foo\", \"bar desc\"]"}, json: false},
+ {method: "POST", param: map[string]interface{}{"order": []string{"foo", "bar desc"}}, json: true},
+ {method: "POST", param: map[string]interface{}{"order": []string{"foo", "bar desc"}}, json: false},
+ } {
+ c.Logf("#%d, tr: %#v", i, tr)
+ req := tr.Request()
+ params, err := s.rtr.loadRequestParams(req, tr.attrsKey)
+ c.Assert(err, check.IsNil)
+ if _, ok := params["order"].([]string); ok {
+ c.Check(params["order"], check.DeepEquals, []string{"foo", "bar desc"})
+ } else {
+ c.Check(params["order"], check.DeepEquals, []interface{}{"foo", "bar desc"})
+ }
+ }
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package router
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "regexp"
+ "strings"
+ "time"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/httpserver"
+)
+
+const rfc3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+
+type responseOptions struct {
+ Select []string
+ Count string
+}
+
+func (rtr *router) responseOptions(opts interface{}) (responseOptions, error) {
+ var rOpts responseOptions
+ switch opts := opts.(type) {
+ case *arvados.GetOptions:
+ rOpts.Select = opts.Select
+ case *arvados.ListOptions:
+ rOpts.Select = opts.Select
+ rOpts.Count = opts.Count
+ }
+ return rOpts, nil
+}
+
+func applySelectParam(selectParam []string, orig map[string]interface{}) map[string]interface{} {
+ if len(selectParam) == 0 {
+ return orig
+ }
+ selected := map[string]interface{}{}
+ for _, attr := range selectParam {
+ if v, ok := orig[attr]; ok {
+ selected[attr] = v
+ }
+ }
+ // Preserve "kind" even if not requested
+ if v, ok := orig["kind"]; ok {
+ selected["kind"] = v
+ }
+ return selected
+}
+
+func (rtr *router) sendResponse(w http.ResponseWriter, resp interface{}, opts responseOptions) {
+ var tmp map[string]interface{}
+
+ err := rtr.transcode(resp, &tmp)
+ if err != nil {
+ rtr.sendError(w, err)
+ return
+ }
+
+ respKind := kind(resp)
+ if respKind != "" {
+ tmp["kind"] = respKind
+ }
+ defaultItemKind := ""
+ if strings.HasSuffix(respKind, "List") {
+ defaultItemKind = strings.TrimSuffix(respKind, "List")
+ }
+
+ if items, ok := tmp["items"].([]interface{}); ok {
+ for i, item := range items {
+ // Fill in "kind" by inspecting UUID/PDH if
+ // possible; fall back on assuming each
+ // Items[] entry in an "arvados#fooList"
+ // response should have kind="arvados#foo".
+ item, _ := item.(map[string]interface{})
+ infix := ""
+ if uuid, _ := item["uuid"].(string); len(uuid) == 27 {
+ infix = uuid[6:11]
+ }
+ if k := kind(infixMap[infix]); k != "" {
+ item["kind"] = k
+ } else if pdh, _ := item["portable_data_hash"].(string); pdh != "" {
+ item["kind"] = "arvados#collection"
+ } else if defaultItemKind != "" {
+ item["kind"] = defaultItemKind
+ }
+ items[i] = applySelectParam(opts.Select, item)
+ }
+ if opts.Count == "none" {
+ delete(tmp, "items_available")
+ }
+ } else {
+ tmp = applySelectParam(opts.Select, tmp)
+ }
+
+ // Format non-nil timestamps as rfc3339NanoFixed (by default
+ // they will have been encoded to time.RFC3339Nano, which
+ // omits trailing zeroes).
+ for k, v := range tmp {
+ if !strings.HasSuffix(k, "_at") {
+ continue
+ }
+ switch tv := v.(type) {
+ case *time.Time:
+ if tv == nil {
+ break
+ }
+ tmp[k] = tv.Format(rfc3339NanoFixed)
+ case time.Time:
+ tmp[k] = tv.Format(rfc3339NanoFixed)
+ case string:
+ t, err := time.Parse(time.RFC3339Nano, tv)
+ if err != nil {
+ break
+ }
+ tmp[k] = t.Format(rfc3339NanoFixed)
+ }
+ }
+ w.Header().Set("Content-Type", "application/json")
+ json.NewEncoder(w).Encode(tmp)
+}
+
+func (rtr *router) sendError(w http.ResponseWriter, err error) {
+ code := http.StatusInternalServerError
+ if err, ok := err.(interface{ HTTPStatus() int }); ok {
+ code = err.HTTPStatus()
+ }
+ httpserver.Error(w, err.Error(), code)
+}
+
+var infixMap = map[string]interface{}{
+ "4zz18": arvados.Collection{},
+ "j7d0g": arvados.Group{},
+}
+
+var mungeKind = regexp.MustCompile(`\..`)
+
+func kind(resp interface{}) string {
+ t := fmt.Sprintf("%T", resp)
+ if !strings.HasPrefix(t, "arvados.") {
+ return ""
+ }
+ return mungeKind.ReplaceAllStringFunc(t, func(s string) string {
+ // "arvados.CollectionList" => "arvados#collectionList"
+ return "#" + strings.ToLower(s[1:])
+ })
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package router
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "git.curoverse.com/arvados.git/lib/controller/federation"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/auth"
+ "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+ "git.curoverse.com/arvados.git/sdk/go/httpserver"
+ "github.com/julienschmidt/httprouter"
+ "github.com/sirupsen/logrus"
+)
+
+type router struct {
+ mux *httprouter.Router
+ fed arvados.API
+}
+
+func New(cluster *arvados.Cluster) *router {
+ rtr := &router{
+ mux: httprouter.New(),
+ fed: federation.New(cluster),
+ }
+ rtr.addRoutes()
+ return rtr
+}
+
+type routableFunc func(ctx context.Context, opts interface{}) (interface{}, error)
+
+func (rtr *router) addRoutes() {
+ for _, route := range []struct {
+ endpoint arvados.APIEndpoint
+ defaultOpts func() interface{}
+ exec routableFunc
+ }{
+ {
+ arvados.EndpointCollectionCreate,
+ func() interface{} { return &arvados.CreateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.CollectionCreate(ctx, *opts.(*arvados.CreateOptions))
+ },
+ },
+ {
+ arvados.EndpointCollectionUpdate,
+ func() interface{} { return &arvados.UpdateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.CollectionUpdate(ctx, *opts.(*arvados.UpdateOptions))
+ },
+ },
+ {
+ arvados.EndpointCollectionGet,
+ func() interface{} { return &arvados.GetOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.CollectionGet(ctx, *opts.(*arvados.GetOptions))
+ },
+ },
+ {
+ arvados.EndpointCollectionList,
+ func() interface{} { return &arvados.ListOptions{Limit: -1} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.CollectionList(ctx, *opts.(*arvados.ListOptions))
+ },
+ },
+ {
+ arvados.EndpointCollectionProvenance,
+ func() interface{} { return &arvados.GetOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.CollectionProvenance(ctx, *opts.(*arvados.GetOptions))
+ },
+ },
+ {
+ arvados.EndpointCollectionUsedBy,
+ func() interface{} { return &arvados.GetOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.CollectionUsedBy(ctx, *opts.(*arvados.GetOptions))
+ },
+ },
+ {
+ arvados.EndpointCollectionDelete,
+ func() interface{} { return &arvados.DeleteOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.CollectionDelete(ctx, *opts.(*arvados.DeleteOptions))
+ },
+ },
+ {
+ arvados.EndpointCollectionTrash,
+ func() interface{} { return &arvados.DeleteOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.CollectionTrash(ctx, *opts.(*arvados.DeleteOptions))
+ },
+ },
+ {
+ arvados.EndpointCollectionUntrash,
+ func() interface{} { return &arvados.UntrashOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.CollectionUntrash(ctx, *opts.(*arvados.UntrashOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerCreate,
+ func() interface{} { return &arvados.CreateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.ContainerCreate(ctx, *opts.(*arvados.CreateOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerUpdate,
+ func() interface{} { return &arvados.UpdateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.ContainerUpdate(ctx, *opts.(*arvados.UpdateOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerGet,
+ func() interface{} { return &arvados.GetOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.ContainerGet(ctx, *opts.(*arvados.GetOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerList,
+ func() interface{} { return &arvados.ListOptions{Limit: -1} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.ContainerList(ctx, *opts.(*arvados.ListOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerDelete,
+ func() interface{} { return &arvados.DeleteOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.ContainerDelete(ctx, *opts.(*arvados.DeleteOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerLock,
+ func() interface{} {
+ return &arvados.GetOptions{Select: []string{"uuid", "state", "priority", "auth_uuid", "locked_by_uuid"}}
+ },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.ContainerLock(ctx, *opts.(*arvados.GetOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerUnlock,
+ func() interface{} {
+ return &arvados.GetOptions{Select: []string{"uuid", "state", "priority", "auth_uuid", "locked_by_uuid"}}
+ },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.ContainerUnlock(ctx, *opts.(*arvados.GetOptions))
+ },
+ },
+ {
+ arvados.EndpointSpecimenCreate,
+ func() interface{} { return &arvados.CreateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.SpecimenCreate(ctx, *opts.(*arvados.CreateOptions))
+ },
+ },
+ {
+ arvados.EndpointSpecimenUpdate,
+ func() interface{} { return &arvados.UpdateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.SpecimenUpdate(ctx, *opts.(*arvados.UpdateOptions))
+ },
+ },
+ {
+ arvados.EndpointSpecimenGet,
+ func() interface{} { return &arvados.GetOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.SpecimenGet(ctx, *opts.(*arvados.GetOptions))
+ },
+ },
+ {
+ arvados.EndpointSpecimenList,
+ func() interface{} { return &arvados.ListOptions{Limit: -1} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.SpecimenList(ctx, *opts.(*arvados.ListOptions))
+ },
+ },
+ {
+ arvados.EndpointSpecimenDelete,
+ func() interface{} { return &arvados.DeleteOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.SpecimenDelete(ctx, *opts.(*arvados.DeleteOptions))
+ },
+ },
+ } {
+ rtr.addRoute(route.endpoint, route.defaultOpts, route.exec)
+ if route.endpoint.Method == "PATCH" {
+ // Accept PUT as a synonym for PATCH.
+ endpointPUT := route.endpoint
+ endpointPUT.Method = "PUT"
+ rtr.addRoute(endpointPUT, route.defaultOpts, route.exec)
+ }
+ }
+ rtr.mux.NotFound = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ httpserver.Errors(w, []string{"API endpoint not found"}, http.StatusNotFound)
+ })
+ rtr.mux.MethodNotAllowed = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ httpserver.Errors(w, []string{"API endpoint not found"}, http.StatusMethodNotAllowed)
+ })
+}
+
+func (rtr *router) addRoute(endpoint arvados.APIEndpoint, defaultOpts func() interface{}, exec routableFunc) {
+ rtr.mux.HandlerFunc(endpoint.Method, "/"+endpoint.Path, func(w http.ResponseWriter, req *http.Request) {
+ logger := ctxlog.FromContext(req.Context())
+ params, err := rtr.loadRequestParams(req, endpoint.AttrsKey)
+ if err != nil {
+ logger.WithFields(logrus.Fields{
+ "req": req,
+ "method": endpoint.Method,
+ "endpoint": endpoint,
+ }).WithError(err).Debug("error loading request params")
+ rtr.sendError(w, err)
+ return
+ }
+ opts := defaultOpts()
+ err = rtr.transcode(params, opts)
+ if err != nil {
+ logger.WithField("params", params).WithError(err).Debugf("error transcoding params to %T", opts)
+ rtr.sendError(w, err)
+ return
+ }
+ respOpts, err := rtr.responseOptions(opts)
+ if err != nil {
+ logger.WithField("opts", opts).WithError(err).Debugf("error getting response options from %T", opts)
+ rtr.sendError(w, err)
+ return
+ }
+
+ creds := auth.CredentialsFromRequest(req)
+ if rt, _ := params["reader_tokens"].([]interface{}); len(rt) > 0 {
+ for _, t := range rt {
+ if t, ok := t.(string); ok {
+ creds.Tokens = append(creds.Tokens, t)
+ }
+ }
+ }
+ ctx := auth.NewContext(req.Context(), creds)
+ ctx = arvados.ContextWithRequestID(ctx, req.Header.Get("X-Request-Id"))
+ logger.WithFields(logrus.Fields{
+ "apiEndpoint": endpoint,
+ "apiOptsType": fmt.Sprintf("%T", opts),
+ "apiOpts": opts,
+ }).Debug("exec")
+ resp, err := exec(ctx, opts)
+ if err != nil {
+ logger.WithError(err).Debugf("returning error type %T", err)
+ rtr.sendError(w, err)
+ return
+ }
+ rtr.sendResponse(w, resp, respOpts)
+ })
+}
+
+func (rtr *router) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ switch strings.SplitN(strings.TrimLeft(r.URL.Path, "/"), "/", 2)[0] {
+ case "login", "logout", "auth":
+ default:
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Access-Control-Allow-Methods", "GET, HEAD, PUT, POST, DELETE")
+ w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type")
+ w.Header().Set("Access-Control-Max-Age", "86486400")
+ }
+ if r.Method == "OPTIONS" {
+ return
+ }
+ r.ParseForm()
+ if m := r.FormValue("_method"); m != "" {
+ r2 := *r
+ r = &r2
+ r.Method = m
+ }
+ rtr.mux.ServeHTTP(w, r)
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package router
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+ "github.com/julienschmidt/httprouter"
+ check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+ check.TestingT(t)
+}
+
+var _ = check.Suite(&RouterSuite{})
+
+type RouterSuite struct {
+ rtr *router
+ stub arvadostest.APIStub
+}
+
+func (s *RouterSuite) SetUpTest(c *check.C) {
+ s.stub = arvadostest.APIStub{}
+ s.rtr = &router{
+ mux: httprouter.New(),
+ fed: &s.stub,
+ }
+ s.rtr.addRoutes()
+}
+
+func (s *RouterSuite) TestOptions(c *check.C) {
+ token := arvadostest.ActiveToken
+ for _, trial := range []struct {
+ method string
+ path string
+ header http.Header
+ body string
+ shouldStatus int // zero value means 200
+ shouldCall string
+ withOptions interface{}
+ }{
+ {
+ method: "GET",
+ path: "/arvados/v1/collections/" + arvadostest.FooCollection,
+ shouldCall: "CollectionGet",
+ withOptions: arvados.GetOptions{UUID: arvadostest.FooCollection},
+ },
+ {
+ method: "PUT",
+ path: "/arvados/v1/collections/" + arvadostest.FooCollection,
+ shouldCall: "CollectionUpdate",
+ withOptions: arvados.UpdateOptions{UUID: arvadostest.FooCollection},
+ },
+ {
+ method: "PATCH",
+ path: "/arvados/v1/collections/" + arvadostest.FooCollection,
+ shouldCall: "CollectionUpdate",
+ withOptions: arvados.UpdateOptions{UUID: arvadostest.FooCollection},
+ },
+ {
+ method: "DELETE",
+ path: "/arvados/v1/collections/" + arvadostest.FooCollection,
+ shouldCall: "CollectionDelete",
+ withOptions: arvados.DeleteOptions{UUID: arvadostest.FooCollection},
+ },
+ {
+ method: "POST",
+ path: "/arvados/v1/collections",
+ shouldCall: "CollectionCreate",
+ withOptions: arvados.CreateOptions{},
+ },
+ {
+ method: "GET",
+ path: "/arvados/v1/collections",
+ shouldCall: "CollectionList",
+ withOptions: arvados.ListOptions{Limit: -1},
+ },
+ {
+ method: "GET",
+ path: "/arvados/v1/collections?limit=123&offset=456&include_trash=true&include_old_versions=1",
+ shouldCall: "CollectionList",
+ withOptions: arvados.ListOptions{Limit: 123, Offset: 456, IncludeTrash: true, IncludeOldVersions: true},
+ },
+ {
+ method: "POST",
+ path: "/arvados/v1/collections?limit=123&_method=GET",
+ body: `{"offset":456,"include_trash":true,"include_old_versions":true}`,
+ shouldCall: "CollectionList",
+ withOptions: arvados.ListOptions{Limit: 123, Offset: 456, IncludeTrash: true, IncludeOldVersions: true},
+ },
+ {
+ method: "POST",
+ path: "/arvados/v1/collections?limit=123",
+ body: "offset=456&include_trash=true&include_old_versions=1&_method=GET",
+ header: http.Header{"Content-Type": {"application/x-www-form-urlencoded"}},
+ shouldCall: "CollectionList",
+ withOptions: arvados.ListOptions{Limit: 123, Offset: 456, IncludeTrash: true, IncludeOldVersions: true},
+ },
+ {
+ method: "PATCH",
+ path: "/arvados/v1/collections",
+ shouldStatus: http.StatusMethodNotAllowed,
+ },
+ {
+ method: "PUT",
+ path: "/arvados/v1/collections",
+ shouldStatus: http.StatusMethodNotAllowed,
+ },
+ {
+ method: "DELETE",
+ path: "/arvados/v1/collections",
+ shouldStatus: http.StatusMethodNotAllowed,
+ },
+ } {
+ // Reset calls captured in previous trial
+ s.stub = arvadostest.APIStub{}
+
+ c.Logf("trial: %#v", trial)
+ _, rr, _ := doRequest(c, s.rtr, token, trial.method, trial.path, trial.header, bytes.NewBufferString(trial.body))
+ if trial.shouldStatus == 0 {
+ c.Check(rr.Code, check.Equals, http.StatusOK)
+ } else {
+ c.Check(rr.Code, check.Equals, trial.shouldStatus)
+ }
+ calls := s.stub.Calls(nil)
+ if trial.shouldCall == "" {
+ c.Check(calls, check.HasLen, 0)
+ } else if len(calls) != 1 {
+ c.Check(calls, check.HasLen, 1)
+ } else {
+ c.Check(calls[0].Method, isMethodNamed, trial.shouldCall)
+ c.Check(calls[0].Options, check.DeepEquals, trial.withOptions)
+ }
+ }
+}
+
+var _ = check.Suite(&RouterIntegrationSuite{})
+
+type RouterIntegrationSuite struct {
+ rtr *router
+}
+
+func (s *RouterIntegrationSuite) SetUpTest(c *check.C) {
+ cluster := &arvados.Cluster{}
+ cluster.TLS.Insecure = true
+ arvadostest.SetServiceURL(&cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
+ s.rtr = New(cluster)
+}
+
+func (s *RouterIntegrationSuite) TearDownSuite(c *check.C) {
+ err := arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil)
+ c.Check(err, check.IsNil)
+}
+
+func (s *RouterIntegrationSuite) TestCollectionResponses(c *check.C) {
+ token := arvadostest.ActiveTokenV2
+
+ // Check "get collection" response has "kind" key
+ _, rr, jresp := doRequest(c, s.rtr, token, "GET", `/arvados/v1/collections`, nil, bytes.NewBufferString(`{"include_trash":true}`))
+ c.Check(rr.Code, check.Equals, http.StatusOK)
+ c.Check(jresp["items"], check.FitsTypeOf, []interface{}{})
+ c.Check(jresp["kind"], check.Equals, "arvados#collectionList")
+ c.Check(jresp["items"].([]interface{})[0].(map[string]interface{})["kind"], check.Equals, "arvados#collection")
+
+ // Check items in list response have a "kind" key regardless
+ // of whether a uuid/pdh is selected.
+ for _, selectj := range []string{
+ ``,
+ `,"select":["portable_data_hash"]`,
+ `,"select":["name"]`,
+ `,"select":["uuid"]`,
+ } {
+ _, rr, jresp = doRequest(c, s.rtr, token, "GET", `/arvados/v1/collections`, nil, bytes.NewBufferString(`{"where":{"uuid":["`+arvadostest.FooCollection+`"]}`+selectj+`}`))
+ c.Check(rr.Code, check.Equals, http.StatusOK)
+ c.Check(jresp["items"], check.FitsTypeOf, []interface{}{})
+ c.Check(jresp["items_available"], check.FitsTypeOf, float64(0))
+ c.Check(jresp["kind"], check.Equals, "arvados#collectionList")
+ item0 := jresp["items"].([]interface{})[0].(map[string]interface{})
+ c.Check(item0["kind"], check.Equals, "arvados#collection")
+ if selectj == "" || strings.Contains(selectj, "portable_data_hash") {
+ c.Check(item0["portable_data_hash"], check.Equals, arvadostest.FooCollectionPDH)
+ } else {
+ c.Check(item0["portable_data_hash"], check.IsNil)
+ }
+ if selectj == "" || strings.Contains(selectj, "name") {
+ c.Check(item0["name"], check.FitsTypeOf, "")
+ } else {
+ c.Check(item0["name"], check.IsNil)
+ }
+ if selectj == "" || strings.Contains(selectj, "uuid") {
+ c.Check(item0["uuid"], check.Equals, arvadostest.FooCollection)
+ } else {
+ c.Check(item0["uuid"], check.IsNil)
+ }
+ }
+
+ // Check "create collection" response has "kind" key
+ _, rr, jresp = doRequest(c, s.rtr, token, "POST", `/arvados/v1/collections`, http.Header{"Content-Type": {"application/x-www-form-urlencoded"}}, bytes.NewBufferString(`ensure_unique_name=true`))
+ c.Check(rr.Code, check.Equals, http.StatusOK)
+ c.Check(jresp["uuid"], check.FitsTypeOf, "")
+ c.Check(jresp["kind"], check.Equals, "arvados#collection")
+}
+
+func (s *RouterIntegrationSuite) TestContainerList(c *check.C) {
+ token := arvadostest.ActiveTokenV2
+
+ _, rr, jresp := doRequest(c, s.rtr, token, "GET", `/arvados/v1/containers?limit=0`, nil, nil)
+ c.Check(rr.Code, check.Equals, http.StatusOK)
+ c.Check(jresp["items_available"], check.FitsTypeOf, float64(0))
+ c.Check(jresp["items_available"].(float64) > 2, check.Equals, true)
+ c.Check(jresp["items"], check.HasLen, 0)
+
+ _, rr, jresp = doRequest(c, s.rtr, token, "GET", `/arvados/v1/containers?limit=2&select=["uuid","command"]`, nil, nil)
+ c.Check(rr.Code, check.Equals, http.StatusOK)
+ c.Check(jresp["items_available"], check.FitsTypeOf, float64(0))
+ c.Check(jresp["items_available"].(float64) > 2, check.Equals, true)
+ c.Check(jresp["items"], check.HasLen, 2)
+ item0 := jresp["items"].([]interface{})[0].(map[string]interface{})
+ c.Check(item0["uuid"], check.HasLen, 27)
+ c.Check(item0["command"], check.FitsTypeOf, []interface{}{})
+ c.Check(item0["command"].([]interface{})[0], check.FitsTypeOf, "")
+ c.Check(item0["mounts"], check.IsNil)
+
+ _, rr, jresp = doRequest(c, s.rtr, token, "GET", `/arvados/v1/containers`, nil, nil)
+ c.Check(rr.Code, check.Equals, http.StatusOK)
+ c.Check(jresp["items_available"], check.FitsTypeOf, float64(0))
+ c.Check(jresp["items_available"].(float64) > 2, check.Equals, true)
+ avail := int(jresp["items_available"].(float64))
+ c.Check(jresp["items"], check.HasLen, avail)
+ item0 = jresp["items"].([]interface{})[0].(map[string]interface{})
+ c.Check(item0["uuid"], check.HasLen, 27)
+ c.Check(item0["command"], check.FitsTypeOf, []interface{}{})
+ c.Check(item0["command"].([]interface{})[0], check.FitsTypeOf, "")
+ c.Check(item0["mounts"], check.NotNil)
+}
+
+func (s *RouterIntegrationSuite) TestContainerLock(c *check.C) {
+ uuid := arvadostest.QueuedContainerUUID
+ token := arvadostest.AdminToken
+ _, rr, jresp := doRequest(c, s.rtr, token, "POST", "/arvados/v1/containers/"+uuid+"/lock", nil, nil)
+ c.Check(rr.Code, check.Equals, http.StatusOK)
+ c.Check(jresp["uuid"], check.HasLen, 27)
+ c.Check(jresp["state"], check.Equals, "Locked")
+ _, rr, jresp = doRequest(c, s.rtr, token, "POST", "/arvados/v1/containers/"+uuid+"/lock", nil, nil)
+ c.Check(rr.Code, check.Equals, http.StatusUnprocessableEntity)
+ c.Check(rr.Body.String(), check.Not(check.Matches), `.*"uuid":.*`)
+ _, rr, jresp = doRequest(c, s.rtr, token, "POST", "/arvados/v1/containers/"+uuid+"/unlock", nil, nil)
+ c.Check(rr.Code, check.Equals, http.StatusOK)
+ c.Check(jresp["uuid"], check.HasLen, 27)
+ c.Check(jresp["state"], check.Equals, "Queued")
+ c.Check(jresp["environment"], check.IsNil)
+ _, rr, jresp = doRequest(c, s.rtr, token, "POST", "/arvados/v1/containers/"+uuid+"/unlock", nil, nil)
+ c.Check(rr.Code, check.Equals, http.StatusUnprocessableEntity)
+ c.Check(jresp["uuid"], check.IsNil)
+}
+
+func (s *RouterIntegrationSuite) TestFullTimestampsInResponse(c *check.C) {
+ uuid := arvadostest.CollectionReplicationDesired2Confirmed2UUID
+ token := arvadostest.ActiveTokenV2
+
+ _, rr, jresp := doRequest(c, s.rtr, token, "GET", `/arvados/v1/collections/`+uuid, nil, nil)
+ c.Check(rr.Code, check.Equals, http.StatusOK)
+ c.Check(jresp["uuid"], check.Equals, uuid)
+ expectNS := map[string]int{
+ "created_at": 596506000, // fixture says 596506247, but truncated by postgresql
+ "modified_at": 596338000, // fixture says 596338465, but truncated by postgresql
+ }
+ for key, ns := range expectNS {
+ mt, ok := jresp[key].(string)
+ c.Logf("jresp[%q] == %q", key, mt)
+ c.Assert(ok, check.Equals, true)
+ t, err := time.Parse(time.RFC3339Nano, mt)
+ c.Check(err, check.IsNil)
+ c.Check(t.Nanosecond(), check.Equals, ns)
+ }
+}
+
+func (s *RouterIntegrationSuite) TestSelectParam(c *check.C) {
+ uuid := arvadostest.QueuedContainerUUID
+ token := arvadostest.ActiveTokenV2
+ for _, sel := range [][]string{
+ {"uuid", "command"},
+ {"uuid", "command", "uuid"},
+ {"", "command", "uuid"},
+ } {
+ j, err := json.Marshal(sel)
+ c.Assert(err, check.IsNil)
+ _, rr, resp := doRequest(c, s.rtr, token, "GET", "/arvados/v1/containers/"+uuid+"?select="+string(j), nil, nil)
+ c.Check(rr.Code, check.Equals, http.StatusOK)
+
+ c.Check(resp["kind"], check.Equals, "arvados#container")
+ c.Check(resp["uuid"], check.HasLen, 27)
+ c.Check(resp["command"], check.HasLen, 2)
+ c.Check(resp["mounts"], check.IsNil)
+ _, hasMounts := resp["mounts"]
+ c.Check(hasMounts, check.Equals, false)
+ }
+}
+
+func (s *RouterIntegrationSuite) TestRouteNotFound(c *check.C) {
+ token := arvadostest.ActiveTokenV2
+ req := (&testReq{
+ method: "POST",
+ path: "arvados/v1/collections/" + arvadostest.FooCollection + "/error404pls",
+ token: token,
+ }).Request()
+ rr := httptest.NewRecorder()
+ s.rtr.ServeHTTP(rr, req)
+ c.Check(rr.Code, check.Equals, http.StatusNotFound)
+ c.Logf("body: %q", rr.Body.String())
+ var j map[string]interface{}
+ err := json.Unmarshal(rr.Body.Bytes(), &j)
+ c.Check(err, check.IsNil)
+ c.Logf("decoded: %v", j)
+ c.Assert(j["errors"], check.FitsTypeOf, []interface{}{})
+ c.Check(j["errors"].([]interface{})[0], check.Equals, "API endpoint not found")
+}
+
+func (s *RouterIntegrationSuite) TestCORS(c *check.C) {
+ token := arvadostest.ActiveTokenV2
+ req := (&testReq{
+ method: "OPTIONS",
+ path: "arvados/v1/collections/" + arvadostest.FooCollection,
+ header: http.Header{"Origin": {"https://example.com"}},
+ token: token,
+ }).Request()
+ rr := httptest.NewRecorder()
+ s.rtr.ServeHTTP(rr, req)
+ c.Check(rr.Code, check.Equals, http.StatusOK)
+ c.Check(rr.Body.String(), check.HasLen, 0)
+ c.Check(rr.Result().Header.Get("Access-Control-Allow-Origin"), check.Equals, "*")
+ for _, hdr := range []string{"Authorization", "Content-Type"} {
+ c.Check(rr.Result().Header.Get("Access-Control-Allow-Headers"), check.Matches, ".*"+hdr+".*")
+ }
+ for _, method := range []string{"GET", "HEAD", "PUT", "POST", "DELETE"} {
+ c.Check(rr.Result().Header.Get("Access-Control-Allow-Methods"), check.Matches, ".*"+method+".*")
+ }
+
+ for _, unsafe := range []string{"login", "logout", "auth", "auth/foo", "login/?blah"} {
+ req := (&testReq{
+ method: "OPTIONS",
+ path: unsafe,
+ header: http.Header{"Origin": {"https://example.com"}},
+ token: token,
+ }).Request()
+ rr := httptest.NewRecorder()
+ s.rtr.ServeHTTP(rr, req)
+ c.Check(rr.Code, check.Equals, http.StatusOK)
+ c.Check(rr.Body.String(), check.HasLen, 0)
+ c.Check(rr.Result().Header.Get("Access-Control-Allow-Origin"), check.Equals, "")
+ c.Check(rr.Result().Header.Get("Access-Control-Allow-Methods"), check.Equals, "")
+ c.Check(rr.Result().Header.Get("Access-Control-Allow-Headers"), check.Equals, "")
+
+ req = (&testReq{
+ method: "POST",
+ path: unsafe,
+ header: http.Header{"Origin": {"https://example.com"}},
+ token: token,
+ }).Request()
+ rr = httptest.NewRecorder()
+ s.rtr.ServeHTTP(rr, req)
+ c.Check(rr.Result().Header.Get("Access-Control-Allow-Origin"), check.Equals, "")
+ c.Check(rr.Result().Header.Get("Access-Control-Allow-Methods"), check.Equals, "")
+ c.Check(rr.Result().Header.Get("Access-Control-Allow-Headers"), check.Equals, "")
+ }
+}
+
+func doRequest(c *check.C, rtr http.Handler, token, method, path string, hdrs http.Header, body io.Reader) (*http.Request, *httptest.ResponseRecorder, map[string]interface{}) {
+ req := httptest.NewRequest(method, path, body)
+ for k, v := range hdrs {
+ req.Header[k] = v
+ }
+ req.Header.Set("Authorization", "Bearer "+token)
+ rr := httptest.NewRecorder()
+ rtr.ServeHTTP(rr, req)
+ c.Logf("response body: %s", rr.Body.String())
+ var jresp map[string]interface{}
+ err := json.Unmarshal(rr.Body.Bytes(), &jresp)
+ c.Check(err, check.IsNil)
+ return req, rr, jresp
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package rpc
+
+import (
+ "context"
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+type TokenProvider func(context.Context) ([]string, error)
+
+type Conn struct {
+ clusterID string
+ httpClient http.Client
+ baseURL url.URL
+ tokenProvider TokenProvider
+}
+
+func NewConn(clusterID string, url *url.URL, insecure bool, tp TokenProvider) *Conn {
+ transport := http.DefaultTransport
+ if insecure {
+ // It's not safe to copy *http.DefaultTransport
+ // because it has a mutex (which might be locked)
+ // protecting a private map (which might not be nil).
+ // So we build our own, using the Go 1.12 default
+ // values, ignoring any changes the application has
+ // made to http.DefaultTransport.
+ transport = &http.Transport{
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ }
+ }
+ return &Conn{
+ clusterID: clusterID,
+ httpClient: http.Client{Transport: transport},
+ baseURL: *url,
+ tokenProvider: tp,
+ }
+}
+
+func (conn *Conn) requestAndDecode(ctx context.Context, dst interface{}, ep arvados.APIEndpoint, body io.Reader, opts interface{}) error {
+ aClient := arvados.Client{
+ Client: &conn.httpClient,
+ Scheme: conn.baseURL.Scheme,
+ APIHost: conn.baseURL.Host,
+ }
+ tokens, err := conn.tokenProvider(ctx)
+ if err != nil {
+ return err
+ } else if len(tokens) > 0 {
+ ctx = arvados.ContextWithAuthorization(ctx, "Bearer "+tokens[0])
+ } else {
+ // Use a non-empty auth string to ensure we override
+ // any default token set on aClient -- and to avoid
+ // having the remote prompt us to send a token by
+ // responding 401.
+ ctx = arvados.ContextWithAuthorization(ctx, "Bearer -")
+ }
+
+ // Encode opts to JSON and decode from there to a
+ // map[string]interface{}, so we can munge the query params
+ // using the JSON key names specified by opts' struct tags.
+ j, err := json.Marshal(opts)
+ if err != nil {
+ return fmt.Errorf("%T: requestAndDecode: Marshal opts: %s", conn, err)
+ }
+ var params map[string]interface{}
+ err = json.Unmarshal(j, ¶ms)
+ if err != nil {
+ return fmt.Errorf("%T: requestAndDecode: Unmarshal opts: %s", conn, err)
+ }
+ if attrs, ok := params["attrs"]; ok && ep.AttrsKey != "" {
+ params[ep.AttrsKey] = attrs
+ delete(params, "attrs")
+ }
+ if limit, ok := params["limit"].(float64); ok && limit < 0 {
+ // Negative limit means "not specified" here, but some
+ // servers/versions do not accept that, so we need to
+ // remove it entirely.
+ delete(params, "limit")
+ }
+ if len(tokens) > 1 {
+ params["reader_tokens"] = tokens[1:]
+ }
+ path := ep.Path
+ if strings.Contains(ep.Path, "/:uuid") {
+ uuid, _ := params["uuid"].(string)
+ path = strings.Replace(path, "/:uuid", "/"+uuid, 1)
+ delete(params, "uuid")
+ }
+ return aClient.RequestAndDecodeContext(ctx, dst, ep.Method, path, body, params)
+}
+
+func (conn *Conn) CollectionCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Collection, error) {
+ ep := arvados.EndpointCollectionCreate
+ var resp arvados.Collection
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) CollectionUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Collection, error) {
+ ep := arvados.EndpointCollectionUpdate
+ var resp arvados.Collection
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) CollectionGet(ctx context.Context, options arvados.GetOptions) (arvados.Collection, error) {
+ ep := arvados.EndpointCollectionGet
+ var resp arvados.Collection
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) CollectionList(ctx context.Context, options arvados.ListOptions) (arvados.CollectionList, error) {
+ ep := arvados.EndpointCollectionList
+ var resp arvados.CollectionList
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) CollectionProvenance(ctx context.Context, options arvados.GetOptions) (map[string]interface{}, error) {
+ ep := arvados.EndpointCollectionProvenance
+ var resp map[string]interface{}
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) CollectionUsedBy(ctx context.Context, options arvados.GetOptions) (map[string]interface{}, error) {
+ ep := arvados.EndpointCollectionUsedBy
+ var resp map[string]interface{}
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) CollectionDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Collection, error) {
+ ep := arvados.EndpointCollectionDelete
+ var resp arvados.Collection
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) CollectionTrash(ctx context.Context, options arvados.DeleteOptions) (arvados.Collection, error) {
+ ep := arvados.EndpointCollectionTrash
+ var resp arvados.Collection
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) CollectionUntrash(ctx context.Context, options arvados.UntrashOptions) (arvados.Collection, error) {
+ ep := arvados.EndpointCollectionUntrash
+ var resp arvados.Collection
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) ContainerCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Container, error) {
+ ep := arvados.EndpointContainerCreate
+ var resp arvados.Container
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) ContainerUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Container, error) {
+ ep := arvados.EndpointContainerUpdate
+ var resp arvados.Container
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) ContainerGet(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
+ ep := arvados.EndpointContainerGet
+ var resp arvados.Container
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) ContainerList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerList, error) {
+ ep := arvados.EndpointContainerList
+ var resp arvados.ContainerList
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) ContainerDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Container, error) {
+ ep := arvados.EndpointContainerDelete
+ var resp arvados.Container
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) ContainerLock(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
+ ep := arvados.EndpointContainerLock
+ var resp arvados.Container
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) ContainerUnlock(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
+ ep := arvados.EndpointContainerUnlock
+ var resp arvados.Container
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
+ ep := arvados.EndpointSpecimenCreate
+ var resp arvados.Specimen
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) SpecimenUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Specimen, error) {
+ ep := arvados.EndpointSpecimenUpdate
+ var resp arvados.Specimen
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) SpecimenGet(ctx context.Context, options arvados.GetOptions) (arvados.Specimen, error) {
+ ep := arvados.EndpointSpecimenGet
+ var resp arvados.Specimen
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) SpecimenList(ctx context.Context, options arvados.ListOptions) (arvados.SpecimenList, error) {
+ ep := arvados.EndpointSpecimenList
+ var resp arvados.SpecimenList
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) SpecimenDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Specimen, error) {
+ ep := arvados.EndpointSpecimenDelete
+ var resp arvados.Specimen
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) APIClientAuthorizationCurrent(ctx context.Context, options arvados.GetOptions) (arvados.APIClientAuthorization, error) {
+ ep := arvados.EndpointAPIClientAuthorizationCurrent
+ var resp arvados.APIClientAuthorization
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package rpc
+
+import (
+ "context"
+ "net/url"
+ "os"
+ "testing"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+ "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+ "github.com/sirupsen/logrus"
+ check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+ check.TestingT(t)
+}
+
+var _ = check.Suite(&RPCSuite{})
+
+const contextKeyTestTokens = "testTokens"
+
+type RPCSuite struct {
+ log logrus.FieldLogger
+ ctx context.Context
+ conn *Conn
+}
+
+func (s *RPCSuite) SetUpTest(c *check.C) {
+ ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+ s.ctx = context.WithValue(ctx, contextKeyTestTokens, []string{arvadostest.ActiveToken})
+ s.conn = NewConn("zzzzz", &url.URL{Scheme: "https", Host: os.Getenv("ARVADOS_TEST_API_HOST")}, true, func(ctx context.Context) ([]string, error) {
+ return ctx.Value(contextKeyTestTokens).([]string), nil
+ })
+}
+
+func (s *RPCSuite) TestCollectionCreate(c *check.C) {
+ coll, err := s.conn.CollectionCreate(s.ctx, arvados.CreateOptions{Attrs: map[string]interface{}{
+ "owner_uuid": arvadostest.ActiveUserUUID,
+ "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0",
+ }})
+ c.Check(err, check.IsNil)
+ c.Check(coll.UUID, check.HasLen, 27)
+}
+
+func (s *RPCSuite) TestSpecimenCRUD(c *check.C) {
+ sp, err := s.conn.SpecimenCreate(s.ctx, arvados.CreateOptions{Attrs: map[string]interface{}{
+ "owner_uuid": arvadostest.ActiveUserUUID,
+ "properties": map[string]string{"foo": "bar"},
+ }})
+ c.Check(err, check.IsNil)
+ c.Check(sp.UUID, check.HasLen, 27)
+ c.Check(sp.Properties, check.HasLen, 1)
+ c.Check(sp.Properties["foo"], check.Equals, "bar")
+
+ spGet, err := s.conn.SpecimenGet(s.ctx, arvados.GetOptions{UUID: sp.UUID})
+ c.Check(spGet.UUID, check.Equals, sp.UUID)
+ c.Check(spGet.Properties["foo"], check.Equals, "bar")
+
+ spList, err := s.conn.SpecimenList(s.ctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", sp.UUID}}})
+ c.Check(spList.ItemsAvailable, check.Equals, 1)
+ c.Assert(spList.Items, check.HasLen, 1)
+ c.Check(spList.Items[0].UUID, check.Equals, sp.UUID)
+ c.Check(spList.Items[0].Properties["foo"], check.Equals, "bar")
+
+ anonCtx := context.WithValue(context.Background(), contextKeyTestTokens, []string{arvadostest.AnonymousToken})
+ spList, err = s.conn.SpecimenList(anonCtx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", sp.UUID}}})
+ c.Check(spList.ItemsAvailable, check.Equals, 0)
+ c.Check(spList.Items, check.HasLen, 0)
+
+ spDel, err := s.conn.SpecimenDelete(s.ctx, arvados.DeleteOptions{UUID: sp.UUID})
+ c.Check(spDel.UUID, check.Equals, sp.UUID)
+}
package controller
import (
+ "context"
"net/http"
"os"
"path/filepath"
handler := &Handler{Cluster: &arvados.Cluster{
ClusterID: "zzzzz",
PostgreSQL: integrationTestCluster().PostgreSQL,
+
+ EnableBetaController14287: enableBetaController14287,
}}
handler.Cluster.TLS.Insecure = true
arvadostest.SetServiceURL(&handler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
srv := &httpserver.Server{
Server: http.Server{
- Handler: httpserver.AddRequestIDs(httpserver.LogRequests(log, handler)),
+ Handler: httpserver.HandlerWithContext(
+ ctxlog.Context(context.Background(), log),
+ httpserver.AddRequestIDs(httpserver.LogRequests(handler))),
},
Addr: ":",
}
defer wg.Done()
err := cq.Unlock(uuid)
c.Check(err, check.NotNil)
- c.Check(err, check.ErrorMatches, ".*cannot unlock when Queued*.")
+ c.Check(err, check.ErrorMatches, ".*cannot unlock when Queued.*")
err = cq.Lock(uuid)
c.Check(err, check.IsNil)
// a fake queue and cloud driver. The fake cloud driver injects
// artificial errors in order to exercise a variety of code paths.
func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
- drivers["test"] = s.stubDriver
+ Drivers["test"] = s.stubDriver
s.disp.setupOnce.Do(s.disp.initialize)
queue := &test.Queue{
ChooseType: func(ctr *arvados.Container) (arvados.InstanceType, error) {
func (s *DispatcherSuite) TestAPIPermissions(c *check.C) {
s.cluster.ManagementToken = "abcdefgh"
- drivers["test"] = s.stubDriver
+ Drivers["test"] = s.stubDriver
s.disp.setupOnce.Do(s.disp.initialize)
s.disp.queue = &test.Queue{}
go s.disp.run()
func (s *DispatcherSuite) TestAPIDisabled(c *check.C) {
s.cluster.ManagementToken = ""
- drivers["test"] = s.stubDriver
+ Drivers["test"] = s.stubDriver
s.disp.setupOnce.Do(s.disp.initialize)
s.disp.queue = &test.Queue{}
go s.disp.run()
func (s *DispatcherSuite) TestInstancesAPI(c *check.C) {
s.cluster.ManagementToken = "abcdefgh"
s.cluster.Containers.CloudVMs.TimeoutBooting = arvados.Duration(time.Second)
- drivers["test"] = s.stubDriver
+ Drivers["test"] = s.stubDriver
s.disp.setupOnce.Do(s.disp.initialize)
s.disp.queue = &test.Queue{}
go s.disp.run()
"golang.org/x/crypto/ssh"
)
-var drivers = map[string]cloud.Driver{
+// Map of available cloud drivers.
+// Clusters.*.Containers.CloudVMs.Driver configuration values
+// correspond to keys in this map.
+var Drivers = map[string]cloud.Driver{
"azure": azure.Driver,
"ec2": ec2.Driver,
}
func newInstanceSet(cluster *arvados.Cluster, setID cloud.InstanceSetID, logger logrus.FieldLogger, reg *prometheus.Registry) (cloud.InstanceSet, error) {
- driver, ok := drivers[cluster.Containers.CloudVMs.Driver]
+ driver, ok := Drivers[cluster.Containers.CloudVMs.Driver]
if !ok {
return nil, fmt.Errorf("unsupported cloud driver %q", cluster.Containers.CloudVMs.Driver)
}
return is.InstanceSet.Create(it, image, allTags, init, pk)
}
-// Filters the instances returned by the wrapped InstanceSet's
+// Filter the instances returned by the wrapped InstanceSet's
// Instances() method (in case the wrapped InstanceSet didn't do this
// itself).
type filteringInstanceSet struct {
return exr.client, exr.clientErr
}
-// Create a new SSH client.
-func (exr *Executor) setupSSHClient() (*ssh.Client, error) {
- target := exr.Target()
- addr := target.Address()
+func (exr *Executor) TargetHostPort() (string, string) {
+ addr := exr.Target().Address()
if addr == "" {
- return nil, errors.New("instance has no address")
+ return "", ""
}
- if h, p, err := net.SplitHostPort(addr); err != nil || p == "" {
+ h, p, err := net.SplitHostPort(addr)
+ if err != nil || p == "" {
// Target address does not specify a port. Use
// targetPort, or "ssh".
if h == "" {
if p = exr.targetPort; p == "" {
p = "ssh"
}
- addr = net.JoinHostPort(h, p)
+ }
+ return h, p
+}
+
+// Create a new SSH client.
+func (exr *Executor) setupSSHClient() (*ssh.Client, error) {
+ addr := net.JoinHostPort(exr.TargetHostPort())
+ if addr == ":" {
+ return nil, errors.New("instance has no address")
}
var receivedKey ssh.PublicKey
client, err := ssh.Dial("tcp", addr, &ssh.ClientConfig{
- User: target.RemoteUser(),
+ User: exr.Target().RemoteUser(),
Auth: []ssh.AuthMethod{
ssh.PublicKeys(exr.signers...),
},
}
if exr.hostKey == nil || !bytes.Equal(exr.hostKey.Marshal(), receivedKey.Marshal()) {
- err = target.VerifyHostKey(receivedKey, client)
+ err = exr.Target().VerifyHostKey(receivedKey, client)
if err != nil {
return nil, err
}
wp.tagKeyPrefix + tagKeyIdleBehavior: string(IdleBehaviorRun),
wp.tagKeyPrefix + tagKeyInstanceSecret: secret,
}
- initCmd := cloud.InitCommand(fmt.Sprintf("umask 0177 && echo -n %q >%s", secret, instanceSecretFilename))
+ initCmd := TagVerifier{nil, secret}.InitCommand()
inst, err := wp.instanceSet.Create(it, wp.imageID, tags, initCmd, wp.installPublicKey)
wp.mtx.Lock()
defer wp.mtx.Unlock()
// Caller must have lock.
func (wp *Pool) updateWorker(inst cloud.Instance, it arvados.InstanceType) (*worker, bool) {
secret := inst.Tags()[wp.tagKeyPrefix+tagKeyInstanceSecret]
- inst = tagVerifier{inst, secret}
+ inst = TagVerifier{inst, secret}
id := inst.ID()
if wkr := wp.workers[id]; wkr != nil {
wkr.executor.SetTarget(inst)
instanceSecretLength = 40 // hex digits
)
-type tagVerifier struct {
+type TagVerifier struct {
cloud.Instance
- secret string
+ Secret string
}
-func (tv tagVerifier) VerifyHostKey(pubKey ssh.PublicKey, client *ssh.Client) error {
- if err := tv.Instance.VerifyHostKey(pubKey, client); err != cloud.ErrNotImplemented || tv.secret == "" {
+func (tv TagVerifier) InitCommand() cloud.InitCommand {
+ return cloud.InitCommand(fmt.Sprintf("umask 0177 && echo -n %q >%s", tv.Secret, instanceSecretFilename))
+}
+
+func (tv TagVerifier) VerifyHostKey(pubKey ssh.PublicKey, client *ssh.Client) error {
+ if err := tv.Instance.VerifyHostKey(pubKey, client); err != cloud.ErrNotImplemented || tv.Secret == "" {
// If the wrapped instance indicates it has a way to
// verify the key, return that decision.
return err
if err != nil {
return err
}
- if stdout.String() != tv.secret {
+ if stdout.String() != tv.Secret {
return errBadInstanceSecret
}
return nil
"flag"
"fmt"
"io"
- "io/ioutil"
"net"
"net/http"
"net/url"
log.WithError(err).Info("exiting")
}
}()
+
flags := flag.NewFlagSet("", flag.ContinueOnError)
flags.SetOutput(stderr)
- configFile := flags.String("config", arvados.DefaultConfigFile, "Site configuration `file`")
+
+ loader := config.NewLoader(stdin, log)
+ loader.SetupFlags(flags)
+ versionFlag := flags.Bool("version", false, "Write version information to stdout and exit 0")
+
err = flags.Parse(args)
if err == flag.ErrHelp {
err = nil
return 0
} else if err != nil {
return 2
+ } else if *versionFlag {
+ return cmd.Version.RunCommand(prog, args, stdin, stdout, stderr)
}
- // Logged warnings are discarded for now: the config template
- // is incomplete, which causes extra warnings about keys that
- // are really OK.
- cfg, err := config.LoadFile(*configFile, ctxlog.New(ioutil.Discard, "json", "error"))
+
+ cfg, err := loader.Load()
if err != nil {
return 1
}
}
srv := &httpserver.Server{
Server: http.Server{
- Handler: httpserver.AddRequestIDs(httpserver.LogRequests(log, handler)),
+ Handler: httpserver.HandlerWithContext(ctx,
+ httpserver.AddRequestIDs(httpserver.LogRequests(handler))),
},
Addr: listen,
}
patternsegments = rest.split("/")
return sorted(self._match(collection, patternsegments, "keep:" + collection.manifest_locator()))
- def open(self, fn, mode):
+ def open(self, fn, mode, encoding=None):
collection, rest = self.get_collection(fn)
if collection is not None:
- return collection.open(rest, mode)
+ return collection.open(rest, mode, encoding=encoding)
else:
return super(CollectionFsAccess, self).open(self._abs(fn), mode)
def fetch_text(self, url):
if url.startswith("keep:"):
- with self.fsaccess.open(url, "r") as f:
+ with self.fsaccess.open(url, "r", encoding="utf-8") as f:
return f.read()
if url.startswith("arvwf:"):
record = self.api_client.workflows().get(uuid=url[6:]).execute(num_retries=self.num_retries)
import arvados_version
version = arvados_version.get_version(SETUP_DIR, "arvados_cwl")
+if os.environ.get('ARVADOS_BUILDING_VERSION', False):
+ pysdk_dep = "=={}".format(version)
+else:
+ # On dev releases, arvados-python-client may have a different timestamp
+ pysdk_dep = "<={}".format(version)
setup(name='arvados-cwl-runner',
version=version,
'schema-salad==4.2.20190417121603',
'typing >= 3.6.4',
'ruamel.yaml >=0.15.54, <=0.15.77',
- 'arvados-python-client>=1.3.0.20190205182514',
+ 'arvados-python-client{}'.format(pysdk_dep),
'setuptools',
'ciso8601 >= 2.0.0',
'networkx < 2.3'
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import "context"
+
+type APIEndpoint struct {
+ Method string
+ Path string
+ // "new attributes" key for create/update requests
+ AttrsKey string
+}
+
+var (
+ EndpointCollectionCreate = APIEndpoint{"POST", "arvados/v1/collections", "collection"}
+ EndpointCollectionUpdate = APIEndpoint{"PATCH", "arvados/v1/collections/:uuid", "collection"}
+ EndpointCollectionGet = APIEndpoint{"GET", "arvados/v1/collections/:uuid", ""}
+ EndpointCollectionList = APIEndpoint{"GET", "arvados/v1/collections", ""}
+ EndpointCollectionProvenance = APIEndpoint{"GET", "arvados/v1/collections/:uuid/provenance", ""}
+ EndpointCollectionUsedBy = APIEndpoint{"GET", "arvados/v1/collections/:uuid/used_by", ""}
+ EndpointCollectionDelete = APIEndpoint{"DELETE", "arvados/v1/collections/:uuid", ""}
+ EndpointCollectionTrash = APIEndpoint{"POST", "arvados/v1/collections/:uuid/trash", ""}
+ EndpointCollectionUntrash = APIEndpoint{"POST", "arvados/v1/collections/:uuid/untrash", ""}
+ EndpointSpecimenCreate = APIEndpoint{"POST", "arvados/v1/specimens", "specimen"}
+ EndpointSpecimenUpdate = APIEndpoint{"PATCH", "arvados/v1/specimens/:uuid", "specimen"}
+ EndpointSpecimenGet = APIEndpoint{"GET", "arvados/v1/specimens/:uuid", ""}
+ EndpointSpecimenList = APIEndpoint{"GET", "arvados/v1/specimens", ""}
+ EndpointSpecimenDelete = APIEndpoint{"DELETE", "arvados/v1/specimens/:uuid", ""}
+ EndpointContainerCreate = APIEndpoint{"POST", "arvados/v1/containers", "container"}
+ EndpointContainerUpdate = APIEndpoint{"PATCH", "arvados/v1/containers/:uuid", "container"}
+ EndpointContainerGet = APIEndpoint{"GET", "arvados/v1/containers/:uuid", ""}
+ EndpointContainerList = APIEndpoint{"GET", "arvados/v1/containers", ""}
+ EndpointContainerDelete = APIEndpoint{"DELETE", "arvados/v1/containers/:uuid", ""}
+ EndpointContainerLock = APIEndpoint{"POST", "arvados/v1/containers/:uuid/lock", ""}
+ EndpointContainerUnlock = APIEndpoint{"POST", "arvados/v1/containers/:uuid/unlock", ""}
+ EndpointAPIClientAuthorizationCurrent = APIEndpoint{"GET", "arvados/v1/api_client_authorizations/current", ""}
+)
+
+type GetOptions struct {
+ UUID string `json:"uuid"`
+ Select []string `json:"select"`
+ IncludeTrash bool `json:"include_trash"`
+}
+
+type UntrashOptions struct {
+ UUID string `json:"uuid"`
+ EnsureUniqueName bool `json:"ensure_unique_name"`
+}
+
+type ListOptions struct {
+ ClusterID string `json:"cluster_id"`
+ Select []string `json:"select"`
+ Filters []Filter `json:"filters"`
+ Where map[string]interface{} `json:"where"`
+ Limit int `json:"limit"`
+ Offset int `json:"offset"`
+ Order []string `json:"order"`
+ Distinct bool `json:"distinct"`
+ Count string `json:"count"`
+ IncludeTrash bool `json:"include_trash"`
+ IncludeOldVersions bool `json:"include_old_versions"`
+}
+
+type CreateOptions struct {
+ ClusterID string `json:"cluster_id"`
+ EnsureUniqueName bool `json:"ensure_unique_name"`
+ Select []string `json:"select"`
+ Attrs map[string]interface{} `json:"attrs"`
+}
+
+type UpdateOptions struct {
+ UUID string `json:"uuid"`
+ Attrs map[string]interface{} `json:"attrs"`
+}
+
+type DeleteOptions struct {
+ UUID string `json:"uuid"`
+}
+
+type API interface {
+ CollectionCreate(ctx context.Context, options CreateOptions) (Collection, error)
+ CollectionUpdate(ctx context.Context, options UpdateOptions) (Collection, error)
+ CollectionGet(ctx context.Context, options GetOptions) (Collection, error)
+ CollectionList(ctx context.Context, options ListOptions) (CollectionList, error)
+ CollectionProvenance(ctx context.Context, options GetOptions) (map[string]interface{}, error)
+ CollectionUsedBy(ctx context.Context, options GetOptions) (map[string]interface{}, error)
+ CollectionDelete(ctx context.Context, options DeleteOptions) (Collection, error)
+ CollectionTrash(ctx context.Context, options DeleteOptions) (Collection, error)
+ CollectionUntrash(ctx context.Context, options UntrashOptions) (Collection, error)
+ ContainerCreate(ctx context.Context, options CreateOptions) (Container, error)
+ ContainerUpdate(ctx context.Context, options UpdateOptions) (Container, error)
+ ContainerGet(ctx context.Context, options GetOptions) (Container, error)
+ ContainerList(ctx context.Context, options ListOptions) (ContainerList, error)
+ ContainerDelete(ctx context.Context, options DeleteOptions) (Container, error)
+ ContainerLock(ctx context.Context, options GetOptions) (Container, error)
+ ContainerUnlock(ctx context.Context, options GetOptions) (Container, error)
+ SpecimenCreate(ctx context.Context, options CreateOptions) (Specimen, error)
+ SpecimenUpdate(ctx context.Context, options UpdateOptions) (Specimen, error)
+ SpecimenGet(ctx context.Context, options GetOptions) (Specimen, error)
+ SpecimenList(ctx context.Context, options ListOptions) (SpecimenList, error)
+ SpecimenDelete(ctx context.Context, options DeleteOptions) (Specimen, error)
+ APIClientAuthorizationCurrent(ctx context.Context, options GetOptions) (APIClientAuthorization, error)
+}
// APIClientAuthorization is an arvados#apiClientAuthorization resource.
type APIClientAuthorization struct {
- UUID string `json:"uuid,omitempty"`
- APIToken string `json:"api_token,omitempty"`
- ExpiresAt string `json:"expires_at,omitempty"`
- Scopes []string `json:"scopes,omitempty"`
+ UUID string `json:"uuid"`
+ APIToken string `json:"api_token"`
+ ExpiresAt string `json:"expires_at"`
+ Scopes []string `json:"scopes"`
}
// APIClientAuthorizationList is an arvados#apiClientAuthorizationList resource.
"io"
"io/ioutil"
"log"
- "math"
"net/http"
"net/url"
"os"
// DefaultSecureClient or InsecureHTTPClient will be used.
Client *http.Client `json:"-"`
+ // Protocol scheme: "http", "https", or "" (https)
+ Scheme string
+
// Hostname (or host:port) of Arvados API server.
APIHost string
return nil, fmt.Errorf("no host in config Services.Controller.ExternalURL: %v", ctrlURL)
}
return &Client{
+ Scheme: ctrlURL.Scheme,
APIHost: ctrlURL.Host,
Insecure: cluster.TLS.Insecure,
}, nil
insecure = true
}
return &Client{
+ Scheme: "https",
APIHost: os.Getenv("ARVADOS_API_HOST"),
AuthToken: os.Getenv("ARVADOS_API_TOKEN"),
Insecure: insecure,
// Do adds Authorization and X-Request-Id headers and then calls
// (*http.Client)Do().
func (c *Client) Do(req *http.Request) (*http.Response, error) {
- if c.AuthToken != "" {
+ if auth, _ := req.Context().Value(contextKeyAuthorization{}).(string); auth != "" {
+ req.Header.Add("Authorization", auth)
+ } else if c.AuthToken != "" {
req.Header.Add("Authorization", "OAuth2 "+c.AuthToken)
}
if req.Header.Get("X-Request-Id") == "" {
- reqid, _ := c.context().Value(contextKeyRequestID).(string)
+ reqid, _ := req.Context().Value(contextKeyRequestID{}).(string)
+ if reqid == "" {
+ reqid, _ = c.context().Value(contextKeyRequestID{}).(string)
+ }
if reqid == "" {
reqid = reqIDGen.Next()
}
return nil, err
}
var generic map[string]interface{}
- err = json.Unmarshal(j, &generic)
+ dec := json.NewDecoder(bytes.NewBuffer(j))
+ dec.UseNumber()
+ err = dec.Decode(&generic)
if err != nil {
return nil, err
}
urlValues.Set(k, v)
continue
}
- if v, ok := v.(float64); ok {
- // Unmarshal decodes all numbers as float64,
- // which can be written as 1.2345e4 in JSON,
- // but this form is not accepted for ints in
- // url params. If a number fits in an int64,
- // encode it as int64 rather than float64.
- if v, frac := math.Modf(v); frac == 0 && v <= math.MaxInt64 && v >= math.MinInt64 {
- urlValues.Set(k, fmt.Sprintf("%d", int64(v)))
- continue
+ if v, ok := v.(json.Number); ok {
+ urlValues.Set(k, v.String())
+ continue
+ }
+ if v, ok := v.(bool); ok {
+ if v {
+ urlValues.Set(k, "true")
+ } else {
+ // "foo=false", "foo=0", and "foo="
+ // are all taken as true strings, so
+ // don't send false values at all --
+ // rely on the default being false.
}
+ continue
}
j, err := json.Marshal(v)
if err != nil {
return nil, err
}
+ if bytes.Equal(j, []byte("null")) {
+ // don't add it to urlValues at all
+ continue
+ }
urlValues.Set(k, string(j))
}
return urlValues, nil
//
// path must not contain a query string.
func (c *Client) RequestAndDecode(dst interface{}, method, path string, body io.Reader, params interface{}) error {
+ return c.RequestAndDecodeContext(c.context(), dst, method, path, body, params)
+}
+
+func (c *Client) RequestAndDecodeContext(ctx context.Context, dst interface{}, method, path string, body io.Reader, params interface{}) error {
if body, ok := body.(io.Closer); ok {
// Ensure body is closed even if we error out early
defer body.Close()
if err != nil {
return err
}
+ req = req.WithContext(ctx)
req.Header.Set("Content-type", "application/x-www-form-urlencoded")
return c.DoAndDecode(dst, req)
}
return bytes.NewBufferString(v.Encode())
}
-type contextKey string
-
-var contextKeyRequestID contextKey = "X-Request-Id"
-
+// WithRequestID returns a new shallow copy of c that sends the given
+// X-Request-Id value (instead of a new randomly generated one) with
+// each subsequent request that doesn't provide its own via context or
+// header.
func (c *Client) WithRequestID(reqid string) *Client {
cc := *c
- cc.ctx = context.WithValue(cc.context(), contextKeyRequestID, reqid)
+ cc.ctx = ContextWithRequestID(cc.context(), reqid)
return &cc
}
}
func (c *Client) apiURL(path string) string {
- return "https://" + c.APIHost + "/" + path
+ scheme := c.Scheme
+ if scheme == "" {
+ scheme = "https"
+ }
+ return scheme + "://" + c.APIHost + "/" + path
}
// DiscoveryDocument is the Arvados server's description of itself.
// Collection is an arvados#collection resource.
type Collection struct {
- UUID string `json:"uuid,omitempty"`
- OwnerUUID string `json:"owner_uuid,omitempty"`
- TrashAt *time.Time `json:"trash_at,omitempty"`
- ManifestText string `json:"manifest_text"`
- UnsignedManifestText string `json:"unsigned_manifest_text,omitempty"`
- Name string `json:"name,omitempty"`
- CreatedAt *time.Time `json:"created_at,omitempty"`
- ModifiedAt *time.Time `json:"modified_at,omitempty"`
- PortableDataHash string `json:"portable_data_hash,omitempty"`
- ReplicationConfirmed *int `json:"replication_confirmed,omitempty"`
- ReplicationConfirmedAt *time.Time `json:"replication_confirmed_at,omitempty"`
- ReplicationDesired *int `json:"replication_desired,omitempty"`
- StorageClassesDesired []string `json:"storage_classes_desired,omitempty"`
- StorageClassesConfirmed []string `json:"storage_classes_confirmed,omitempty"`
- StorageClassesConfirmedAt *time.Time `json:"storage_classes_confirmed_at,omitempty"`
- DeleteAt *time.Time `json:"delete_at,omitempty"`
- IsTrashed bool `json:"is_trashed,omitempty"`
+ UUID string `json:"uuid"`
+ Etag string `json:"etag"`
+ OwnerUUID string `json:"owner_uuid"`
+ TrashAt *time.Time `json:"trash_at"`
+ ManifestText string `json:"manifest_text"`
+ UnsignedManifestText string `json:"unsigned_manifest_text"`
+ Name string `json:"name"`
+ CreatedAt *time.Time `json:"created_at"`
+ ModifiedAt *time.Time `json:"modified_at"`
+ PortableDataHash string `json:"portable_data_hash"`
+ ReplicationConfirmed *int `json:"replication_confirmed"`
+ ReplicationConfirmedAt *time.Time `json:"replication_confirmed_at"`
+ ReplicationDesired *int `json:"replication_desired"`
+ StorageClassesDesired []string `json:"storage_classes_desired"`
+ StorageClassesConfirmed []string `json:"storage_classes_confirmed"`
+ StorageClassesConfirmedAt *time.Time `json:"storage_classes_confirmed_at"`
+ DeleteAt *time.Time `json:"delete_at"`
+ IsTrashed bool `json:"is_trashed"`
+ Properties map[string]interface{} `json:"properties"`
}
func (c Collection) resourceName() string {
return sds, scanner.Err()
}
-// CollectionList is an arvados#collectionList resource.
type CollectionList struct {
Items []Collection `json:"items"`
ItemsAvailable int `json:"items_available"`
"errors"
"fmt"
"net/url"
+ "os"
"git.curoverse.com/arvados.git/sdk/go/config"
)
-const DefaultConfigFile = "/etc/arvados/config.yml"
+var DefaultConfigFile = func() string {
+ if path := os.Getenv("ARVADOS_CONFIG"); path != "" {
+ return path
+ } else {
+ return "/etc/arvados/config.yml"
+ }
+}()
type Config struct {
Clusters map[string]Cluster
UnloggedAttributes []string
}
Collections struct {
- BlobSigning bool
- BlobSigningKey string
- BlobSigningTTL Duration
- CollectionVersioning bool
- DefaultTrashLifetime Duration
- DefaultReplication int
- ManagedProperties map[string]interface{}
+ BlobSigning bool
+ BlobSigningKey string
+ BlobSigningTTL Duration
+ CollectionVersioning bool
+ DefaultTrashLifetime Duration
+ DefaultReplication int
+ ManagedProperties map[string]struct {
+ Value interface{}
+ Function string
+ Protected bool
+ }
PreserveVersionIfIdle Duration
TrashSweepInterval Duration
+ TrustAllContent bool
}
Git struct {
Repositories string
Mail struct {
MailchimpAPIKey string
MailchimpListID string
- SendUserSetupNotificationEmail string
+ SendUserSetupNotificationEmail bool
IssueReporterEmailFrom string
IssueReporterEmailTo string
SupportEmailAddress string
Insecure bool
}
Users struct {
+ AnonymousUserToken string
AdminNotifierEmailFrom string
AutoAdminFirstUser bool
AutoAdminUserWithEmail string
ApplicationMimetypesWithViewIcon map[string]struct{}
ArvadosDocsite string
ArvadosPublicDataDocURL string
+ DefaultOpenIdPrefix string
EnableGettingStartedPopup bool
EnablePublicProjectsPage bool
FileViewersConfigURL string
LogViewerMaxBytes ByteSize
- MultiSiteSearch bool
+ MultiSiteSearch string
+ ProfilingEnabled bool
Repositories bool
RepositoryCache string
RunningJobLogRecordsToFetch int
SecretKeyBase string
- SecretToken string
ShowRecentCollectionsOnDashboard bool
ShowUserAgreementInline bool
ShowUserNotifications bool
FormFieldTitle string
FormFieldDescription string
Required bool
+ Position int
+ Options map[string]struct{}
}
UserProfileFormMessage string
VocabularyURL string
}
+
+ EnableBetaController14287 bool
}
type Services struct {
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+ "context"
+)
+
+type contextKeyRequestID struct{}
+type contextKeyAuthorization struct{}
+
+func ContextWithRequestID(ctx context.Context, reqid string) context.Context {
+ return context.WithValue(ctx, contextKeyRequestID{}, reqid)
+}
+
+// ContextWithAuthorization returns a child context that (when used
+// with (*Client)RequestAndDecodeContext) sends the given
+// Authorization header value instead of the Client's default
+// AuthToken.
+func ContextWithAuthorization(ctx context.Context, value string) context.Context {
+ return context.WithValue(ctx, contextKeyAuthorization{}, value)
+}
return
}
+func (e TransactionError) HTTPStatus() int {
+ return e.StatusCode
+}
+
func newTransactionError(req *http.Request, resp *http.Response, buf []byte) *TransactionError {
var e TransactionError
if json.Unmarshal(buf, &e) != nil {
type apiClient interface {
RequestAndDecode(dst interface{}, method, path string, body io.Reader, params interface{}) error
- UpdateBody(rsc resource) io.Reader
}
UUID: fs.uuid,
ManifestText: txt,
}
- err = fs.RequestAndDecode(nil, "PUT", "arvados/v1/collections/"+fs.uuid, fs.UpdateBody(coll), map[string]interface{}{"select": []string{"uuid"}})
+ err = fs.RequestAndDecode(nil, "PUT", "arvados/v1/collections/"+fs.uuid, nil, map[string]interface{}{
+ "collection": map[string]string{
+ "manifest_text": coll.ManifestText,
+ },
+ "select": []string{"uuid"},
+ })
if err != nil {
return fmt.Errorf("sync failed: update %s: %s", fs.uuid, err)
}
}
func (s *SiteFSSuite) TestSlashInName(c *check.C) {
- badCollection := Collection{
- Name: "bad/collection",
- OwnerUUID: fixtureAProjectUUID,
- }
- err := s.client.RequestAndDecode(&badCollection, "POST", "arvados/v1/collections", s.client.UpdateBody(&badCollection), nil)
+ var badCollection Collection
+ err := s.client.RequestAndDecode(&badCollection, "POST", "arvados/v1/collections", nil, map[string]interface{}{
+ "collection": map[string]string{
+ "name": "bad/collection",
+ "owner_uuid": fixtureAProjectUUID,
+ },
+ })
c.Assert(err, check.IsNil)
defer s.client.RequestAndDecode(nil, "DELETE", "arvados/v1/collections/"+badCollection.UUID, nil, nil)
- badProject := Group{
- Name: "bad/project",
- GroupClass: "project",
- OwnerUUID: fixtureAProjectUUID,
- }
- err = s.client.RequestAndDecode(&badProject, "POST", "arvados/v1/groups", s.client.UpdateBody(&badProject), nil)
+ var badProject Group
+ err = s.client.RequestAndDecode(&badProject, "POST", "arvados/v1/groups", nil, map[string]interface{}{
+ "group": map[string]string{
+ "name": "bad/project",
+ "group_class": "project",
+ "owner_uuid": fixtureAProjectUUID,
+ },
+ })
c.Assert(err, check.IsNil)
defer s.client.RequestAndDecode(nil, "DELETE", "arvados/v1/groups/"+badProject.UUID, nil, nil)
_, err = s.fs.Open("/home/A Project/oob")
c.Check(err, check.NotNil)
- oob := Collection{
- Name: "oob",
- OwnerUUID: fixtureAProjectUUID,
- }
- err = s.client.RequestAndDecode(&oob, "POST", "arvados/v1/collections", s.client.UpdateBody(&oob), nil)
+ var oob Collection
+ err = s.client.RequestAndDecode(&oob, "POST", "arvados/v1/collections", nil, map[string]interface{}{
+ "collection": map[string]string{
+ "name": "oob",
+ "owner_uuid": fixtureAProjectUUID,
+ },
+ })
c.Assert(err, check.IsNil)
defer s.client.RequestAndDecode(nil, "DELETE", "arvados/v1/collections/"+oob.UUID, nil, nil)
c.Check(err, check.IsNil)
// Delete test.txt behind s.fs's back by updating the
- // collection record with the old (empty) ManifestText.
- err = s.client.RequestAndDecode(nil, "PATCH", "arvados/v1/collections/"+oob.UUID, s.client.UpdateBody(&oob), nil)
+ // collection record with an empty ManifestText.
+ err = s.client.RequestAndDecode(nil, "PATCH", "arvados/v1/collections/"+oob.UUID, nil, map[string]interface{}{
+ "collection": map[string]string{
+ "manifest_text": "",
+ "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0",
+ },
+ })
c.Assert(err, check.IsNil)
err = project.Sync()
// Group is an arvados#group record
type Group struct {
- UUID string `json:"uuid,omitempty"`
- Name string `json:"name,omitempty"`
- OwnerUUID string `json:"owner_uuid,omitempty"`
+ UUID string `json:"uuid"`
+ Name string `json:"name"`
+ OwnerUUID string `json:"owner_uuid"`
GroupClass string `json:"group_class"`
}
// Link is an arvados#link record
type Link struct {
UUID string `json:"uuid,omiempty"`
- OwnerUUID string `json:"owner_uuid,omitempty"`
- Name string `json:"name,omitempty"`
- LinkClass string `json:"link_class,omitempty"`
- HeadUUID string `json:"head_uuid,omitempty"`
- HeadKind string `json:"head_kind,omitempty"`
- TailUUID string `json:"tail_uuid,omitempty"`
- TailKind string `json:"tail_kind,omitempty"`
+ OwnerUUID string `json:"owner_uuid"`
+ Name string `json:"name"`
+ LinkClass string `json:"link_class"`
+ HeadUUID string `json:"head_uuid"`
+ HeadKind string `json:"head_kind"`
+ TailUUID string `json:"tail_uuid"`
+ TailKind string `json:"tail_kind"`
}
// UserList is an arvados#userList resource.
// Log is an arvados#log record
type Log struct {
- ID uint64 `json:"id,omitempty"`
- UUID string `json:"uuid,omitempty"`
- ObjectUUID string `json:"object_uuid,omitempty"`
- ObjectOwnerUUID string `json:"object_owner_uuid,omitempty"`
- EventType string `json:"event_type,omitempty"`
- EventAt *time.Time `json:"event,omitempty"`
- Properties map[string]interface{} `json:"properties,omitempty"`
- CreatedAt *time.Time `json:"created_at,omitempty"`
+ ID uint64 `json:"id"`
+ UUID string `json:"uuid"`
+ ObjectUUID string `json:"object_uuid"`
+ ObjectOwnerUUID string `json:"object_owner_uuid"`
+ EventType string `json:"event_type"`
+ EventAt *time.Time `json:"event"`
+ Properties map[string]interface{} `json:"properties"`
+ CreatedAt *time.Time `json:"created_at"`
}
// LogList is an arvados#logList resource.
Domain string `json:"domain"`
Hostname string `json:"hostname"`
IPAddress string `json:"ip_address"`
- LastPingAt *time.Time `json:"last_ping_at,omitempty"`
+ LastPingAt *time.Time `json:"last_ping_at"`
SlotNumber int `json:"slot_number"`
Status string `json:"status"`
- JobUUID string `json:"job_uuid,omitempty"`
+ JobUUID string `json:"job_uuid"`
Properties NodeProperties `json:"properties"`
}
package arvados
-import "encoding/json"
+import (
+ "encoding/json"
+ "fmt"
+)
// ResourceListParams expresses which results are requested in a
// list/index API.
Operand interface{}
}
-// MarshalJSON encodes a Filter in the form expected by the API.
+// MarshalJSON encodes a Filter to a JSON array.
func (f *Filter) MarshalJSON() ([]byte, error) {
return json.Marshal([]interface{}{f.Attr, f.Operator, f.Operand})
}
+
+// UnmarshalJSON decodes a JSON array to a Filter.
+func (f *Filter) UnmarshalJSON(data []byte) error {
+ var elements []interface{}
+ err := json.Unmarshal(data, &elements)
+ if err != nil {
+ return err
+ }
+ if len(elements) != 3 {
+ return fmt.Errorf("invalid filter %q: must have 3 elements", data)
+ }
+ attr, ok := elements[0].(string)
+ if !ok {
+ return fmt.Errorf("invalid filter attr %q", elements[0])
+ }
+ op, ok := elements[1].(string)
+ if !ok {
+ return fmt.Errorf("invalid filter operator %q", elements[1])
+ }
+ operand := elements[2]
+ switch operand.(type) {
+ case string, float64, []interface{}, nil:
+ default:
+ return fmt.Errorf("invalid filter operand %q", elements[2])
+ }
+ *f = Filter{attr, op, operand}
+ return nil
+}
t.Errorf("Encoded as %q, expected %q", buf, expect)
}
}
+
+func TestMarshalFiltersWithNil(t *testing.T) {
+ buf, err := json.Marshal([]Filter{
+ {Attr: "modified_at", Operator: "=", Operand: nil}})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if expect := []byte(`[["modified_at","=",null]]`); 0 != bytes.Compare(buf, expect) {
+ t.Errorf("Encoded as %q, expected %q", buf, expect)
+ }
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import "time"
+
+type Specimen struct {
+ UUID string `json:"uuid"`
+ OwnerUUID string `json:"owner_uuid"`
+ CreatedAt time.Time `json:"created_at"`
+ ModifiedAt time.Time `json:"modified_at"`
+ UpdatedAt time.Time `json:"updated_at"`
+ Properties map[string]interface{} `json:"properties"`
+}
+
+type SpecimenList struct {
+ Items []Specimen `json:"items"`
+ ItemsAvailable int `json:"items_available"`
+ Offset int `json:"offset"`
+ Limit int `json:"limit"`
+}
// User is an arvados#user record
type User struct {
- UUID string `json:"uuid,omitempty"`
+ UUID string `json:"uuid"`
IsActive bool `json:"is_active"`
IsAdmin bool `json:"is_admin"`
- Username string `json:"username,omitempty"`
- Email string `json:"email,omitempty"`
+ Username string `json:"username"`
+ Email string `json:"email"`
}
// UserList is an arvados#userList resource.
// Workflow is an arvados#workflow resource.
type Workflow struct {
- UUID string `json:"uuid,omitempty"`
- OwnerUUID string `json:"owner_uuid,omitempty"`
- Name string `json:"name,omitempty"`
- Description string `json:"description,omitempty"`
- Definition string `json:"definition,omitempty"`
- CreatedAt *time.Time `json:"created_at,omitempty"`
- ModifiedAt *time.Time `json:"modified_at,omitempty"`
+ UUID string `json:"uuid"`
+ OwnerUUID string `json:"owner_uuid"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Definition string `json:"definition"`
+ CreatedAt *time.Time `json:"created_at"`
+ ModifiedAt *time.Time `json:"modified_at"`
}
// WorkflowList is an arvados#workflowList resource.
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvadostest
+
+import (
+ "context"
+ "errors"
+ "sync"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+var ErrStubUnimplemented = errors.New("stub unimplemented")
+
+type APIStub struct {
+ // The error to return from every stubbed API method.
+ Error error
+ calls []APIStubCall
+ mtx sync.Mutex
+}
+
+func (as *APIStub) CollectionCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Collection, error) {
+ as.appendCall(as.CollectionCreate, ctx, options)
+ return arvados.Collection{}, as.Error
+}
+func (as *APIStub) CollectionUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Collection, error) {
+ as.appendCall(as.CollectionUpdate, ctx, options)
+ return arvados.Collection{}, as.Error
+}
+func (as *APIStub) CollectionGet(ctx context.Context, options arvados.GetOptions) (arvados.Collection, error) {
+ as.appendCall(as.CollectionGet, ctx, options)
+ return arvados.Collection{}, as.Error
+}
+func (as *APIStub) CollectionList(ctx context.Context, options arvados.ListOptions) (arvados.CollectionList, error) {
+ as.appendCall(as.CollectionList, ctx, options)
+ return arvados.CollectionList{}, as.Error
+}
+func (as *APIStub) CollectionProvenance(ctx context.Context, options arvados.GetOptions) (map[string]interface{}, error) {
+ as.appendCall(as.CollectionProvenance, ctx, options)
+ return nil, as.Error
+}
+func (as *APIStub) CollectionUsedBy(ctx context.Context, options arvados.GetOptions) (map[string]interface{}, error) {
+ as.appendCall(as.CollectionUsedBy, ctx, options)
+ return nil, as.Error
+}
+func (as *APIStub) CollectionDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Collection, error) {
+ as.appendCall(as.CollectionDelete, ctx, options)
+ return arvados.Collection{}, as.Error
+}
+func (as *APIStub) CollectionTrash(ctx context.Context, options arvados.DeleteOptions) (arvados.Collection, error) {
+ as.appendCall(as.CollectionTrash, ctx, options)
+ return arvados.Collection{}, as.Error
+}
+func (as *APIStub) CollectionUntrash(ctx context.Context, options arvados.UntrashOptions) (arvados.Collection, error) {
+ as.appendCall(as.CollectionUntrash, ctx, options)
+ return arvados.Collection{}, as.Error
+}
+func (as *APIStub) ContainerCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Container, error) {
+ as.appendCall(as.ContainerCreate, ctx, options)
+ return arvados.Container{}, as.Error
+}
+func (as *APIStub) ContainerUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Container, error) {
+ as.appendCall(as.ContainerUpdate, ctx, options)
+ return arvados.Container{}, as.Error
+}
+func (as *APIStub) ContainerGet(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
+ as.appendCall(as.ContainerGet, ctx, options)
+ return arvados.Container{}, as.Error
+}
+func (as *APIStub) ContainerList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerList, error) {
+ as.appendCall(as.ContainerList, ctx, options)
+ return arvados.ContainerList{}, as.Error
+}
+func (as *APIStub) ContainerDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Container, error) {
+ as.appendCall(as.ContainerDelete, ctx, options)
+ return arvados.Container{}, as.Error
+}
+func (as *APIStub) ContainerLock(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
+ as.appendCall(as.ContainerLock, ctx, options)
+ return arvados.Container{}, as.Error
+}
+func (as *APIStub) ContainerUnlock(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
+ as.appendCall(as.ContainerUnlock, ctx, options)
+ return arvados.Container{}, as.Error
+}
+func (as *APIStub) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
+ as.appendCall(as.SpecimenCreate, ctx, options)
+ return arvados.Specimen{}, as.Error
+}
+func (as *APIStub) SpecimenUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Specimen, error) {
+ as.appendCall(as.SpecimenUpdate, ctx, options)
+ return arvados.Specimen{}, as.Error
+}
+func (as *APIStub) SpecimenGet(ctx context.Context, options arvados.GetOptions) (arvados.Specimen, error) {
+ as.appendCall(as.SpecimenGet, ctx, options)
+ return arvados.Specimen{}, as.Error
+}
+func (as *APIStub) SpecimenList(ctx context.Context, options arvados.ListOptions) (arvados.SpecimenList, error) {
+ as.appendCall(as.SpecimenList, ctx, options)
+ return arvados.SpecimenList{}, as.Error
+}
+func (as *APIStub) SpecimenDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Specimen, error) {
+ as.appendCall(as.SpecimenDelete, ctx, options)
+ return arvados.Specimen{}, as.Error
+}
+func (as *APIStub) APIClientAuthorizationCurrent(ctx context.Context, options arvados.GetOptions) (arvados.APIClientAuthorization, error) {
+ as.appendCall(as.APIClientAuthorizationCurrent, ctx, options)
+ return arvados.APIClientAuthorization{}, as.Error
+}
+
+func (as *APIStub) appendCall(method interface{}, ctx context.Context, options interface{}) {
+ as.mtx.Lock()
+ defer as.mtx.Unlock()
+ as.calls = append(as.calls, APIStubCall{method, ctx, options})
+}
+
+func (as *APIStub) Calls(method interface{}) []APIStubCall {
+ as.mtx.Lock()
+ defer as.mtx.Unlock()
+ var calls []APIStubCall
+ for _, call := range as.calls {
+ if method == nil || call.Method == method {
+ calls = append(calls, call)
+ }
+ }
+ return calls
+}
+
+type APIStubCall struct {
+ Method interface{}
+ Context context.Context
+ Options interface{}
+}
ASubprojectUUID = "zzzzz-j7d0g-axqo7eu9pwvna1x"
FooAndBarFilesInDirUUID = "zzzzz-4zz18-foonbarfilesdir"
- FooAndBarFilesInDirPDH = "6bbac24198d09a93975f60098caf0bdf+62"
+ FooAndBarFilesInDirPDH = "870369fc72738603c2fad16664e50e2d+58"
Dispatch1Token = "kwi8oowusvbutahacwk2geulqewy5oaqmpalczfna4b6bb0hfw"
Dispatch1AuthUUID = "zzzzz-gj3su-k9dvestay1plssr"
FooCollectionSharingToken = "iknqgmunrhgsyfok8uzjlwun9iscwm3xacmzmg65fa1j1lpdss"
WorkflowWithDefinitionYAMLUUID = "zzzzz-7fd4e-validworkfloyml"
+
+ CollectionReplicationDesired2Confirmed2UUID = "zzzzz-4zz18-434zv1tnnf2rygp"
)
// PathologicalManifest : A valid manifest designed to test
package auth
import (
+ "context"
"encoding/base64"
"net/http"
"net/url"
return &Credentials{Tokens: []string{}}
}
+func NewContext(ctx context.Context, c *Credentials) context.Context {
+ return context.WithValue(ctx, contextKeyCredentials{}, c)
+}
+
+func FromContext(ctx context.Context) (*Credentials, bool) {
+ c, ok := ctx.Value(contextKeyCredentials{}).(*Credentials)
+ return c, ok
+}
+
func CredentialsFromRequest(r *http.Request) *Credentials {
- if c, ok := r.Context().Value(contextKeyCredentials).(*Credentials); ok {
+ if c, ok := FromContext(r.Context()); ok {
// preloaded by middleware
return c
}
"net/http"
)
-type contextKey string
-
-var contextKeyCredentials contextKey = "credentials"
+type contextKeyCredentials struct{}
// LoadToken wraps the next handler, adding credentials to the request
// context so subsequent handlers can access them efficiently via
// CredentialsFromRequest.
func LoadToken(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if _, ok := r.Context().Value(contextKeyCredentials).(*Credentials); !ok {
- r = r.WithContext(context.WithValue(r.Context(), contextKeyCredentials, CredentialsFromRequest(r)))
+ if _, ok := r.Context().Value(contextKeyCredentials{}).(*Credentials); !ok {
+ r = r.WithContext(context.WithValue(r.Context(), contextKeyCredentials{}, CredentialsFromRequest(r)))
}
next.ServeHTTP(w, r)
})
}
func Error(w http.ResponseWriter, error string, code int) {
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("X-Content-Type-Options", "nosniff")
- w.WriteHeader(code)
- json.NewEncoder(w).Encode(ErrorResponse{Errors: []string{error}})
+ Errors(w, []string{error}, code)
}
func Errors(w http.ResponseWriter, errors []string, code int) {
"net/http"
"time"
+ "git.curoverse.com/arvados.git/sdk/go/ctxlog"
"git.curoverse.com/arvados.git/sdk/go/stats"
"github.com/sirupsen/logrus"
)
var (
requestTimeContextKey = contextKey{"requestTime"}
- loggerContextKey = contextKey{"logger"}
)
+// HandlerWithContext returns an http.Handler that changes the request
+// context to ctx (replacing http.Server's default
+// context.Background()), then calls next.
+func HandlerWithContext(ctx context.Context, next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r.WithContext(ctx))
+ })
+}
+
// LogRequests wraps an http.Handler, logging each request and
-// response via logger.
-func LogRequests(logger logrus.FieldLogger, h http.Handler) http.Handler {
- if logger == nil {
- logger = logrus.StandardLogger()
- }
+// response.
+func LogRequests(h http.Handler) http.Handler {
return http.HandlerFunc(func(wrapped http.ResponseWriter, req *http.Request) {
w := &responseTimer{ResponseWriter: WrapResponseWriter(wrapped)}
- lgr := logger.WithFields(logrus.Fields{
+ lgr := ctxlog.FromContext(req.Context()).WithFields(logrus.Fields{
"RequestID": req.Header.Get("X-Request-Id"),
"remoteAddr": req.RemoteAddr,
"reqForwardedFor": req.Header.Get("X-Forwarded-For"),
})
ctx := req.Context()
ctx = context.WithValue(ctx, &requestTimeContextKey, time.Now())
- ctx = context.WithValue(ctx, &loggerContextKey, lgr)
+ ctx = ctxlog.Context(ctx, lgr)
req = req.WithContext(ctx)
logRequest(w, req, lgr)
}
func Logger(req *http.Request) logrus.FieldLogger {
- if lgr, ok := req.Context().Value(&loggerContextKey).(logrus.FieldLogger); ok {
- return lgr
- } else {
- return logrus.StandardLogger()
- }
+ return ctxlog.FromContext(req.Context())
}
func logRequest(w *responseTimer, req *http.Request, lgr *logrus.Entry) {
import (
"bytes"
+ "context"
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"time"
+ "git.curoverse.com/arvados.git/sdk/go/ctxlog"
"github.com/sirupsen/logrus"
check "gopkg.in/check.v1"
)
log.Formatter = &logrus.JSONFormatter{
TimestampFormat: time.RFC3339Nano,
}
+ ctx := ctxlog.Context(context.Background(), log)
+
+ h := AddRequestIDs(LogRequests(
+ http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ w.Write([]byte("hello world"))
+ })))
- h := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- w.Write([]byte("hello world"))
- })
req, err := http.NewRequest("GET", "https://foo.example/bar", nil)
req.Header.Set("X-Forwarded-For", "1.2.3.4:12345")
c.Assert(err, check.IsNil)
resp := httptest.NewRecorder()
- AddRequestIDs(LogRequests(log, h)).ServeHTTP(resp, req)
+
+ HandlerWithContext(ctx, h).ServeHTTP(resp, req)
dec := json.NewDecoder(captured)
//
// For the metrics to be accurate, the caller must ensure every
// request passed to the Handler also passes through
-// LogRequests(logger, ...), and vice versa.
+// LogRequests(...), and vice versa.
//
// If registry is nil, a new registry is created.
//
// It's not safe to copy *http.DefaultTransport
// because it has a mutex (which might be locked)
// protecting a private map (which might not be nil).
- // So we build our own, using the Go 1.10 default
+ // So we build our own, using the Go 1.12 default
// values, ignoring any changes the application has
// made to http.DefaultTransport.
Transport: &http.Transport{
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: tlsTimeout,
- ExpectContinueTimeout: time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: arvadosclient.MakeTLSConfig(kc.Arvados.ApiInsecure),
},
}
import arvados_version
version = arvados_version.get_version(SETUP_DIR, "arvados_pam")
+if os.environ.get('ARVADOS_BUILDING_VERSION', False):
+ pysdk_dep = "=={}".format(version)
+else:
+ # On dev releases, arvados-python-client may have a different timestamp
+ pysdk_dep = "<={}".format(version)
short_tests_only = False
if '--short-tests-only' in sys.argv:
('share/doc/arvados-pam/examples', glob.glob('examples/*')),
],
install_requires=[
- 'arvados-python-client>=0.1.20150801000000',
+ 'arvados-python-client{}'.format(pysdk_dep),
],
test_suite='tests',
tests_require=['pbr<1.7.0', 'mock>=1.0', 'python-pam'],
}
http {
log_format customlog
- '[$time_local] $server_name $status $body_bytes_sent $request_time $request_method "$scheme://$http_host$request_uri" $remote_addr:$remote_port '
+ '[$time_local] "$http_x_request_id" $server_name $status $body_bytes_sent $request_time $request_method "$scheme://$http_host$request_uri" $remote_addr:$remote_port '
'"$http_referer" "$http_user_agent"';
access_log "{{ACCESSLOG}}" customlog;
client_body_temp_path "{{TMPDIR}}";
f.write("""
Clusters:
zzzzz:
+ EnableBetaController14287: {beta14287}
ManagementToken: e687950a23c3a9bceec28c6223a06c79
API:
RequestTimeout: 30s
+ Logging:
+ Level: "{loglevel}"
+ HTTPRequestTimeout: 30s
PostgreSQL:
ConnectionPool: 32
Connection:
InternalURLs:
"https://localhost:{railsport}": {{}}
""".format(
+ beta14287=('true' if '14287' in os.environ.get('ARVADOS_EXPERIMENTAL', '') else 'false'),
+ loglevel=('info' if os.environ.get('ARVADOS_DEBUG', '') in ['','0'] else 'debug'),
dbhost=_dbconfig('host'),
- dbname=_dbconfig('database'),
- dbuser=_dbconfig('username'),
+ dbname=_dbconfig('dbname'),
+ dbuser=_dbconfig('user'),
dbpass=_dbconfig('password'),
controllerport=port,
railsport=rails_api_port,
port,
('info' if os.environ.get('ARVADOS_DEBUG', '') in ['','0'] else 'debug'),
_dbconfig('host'),
- _dbconfig('database'),
- _dbconfig('username'),
+ _dbconfig('dbname'),
+ _dbconfig('user'),
_dbconfig('password')))
logf = open(_logfilename('ws'), 'a')
ws = subprocess.Popen(
def _dbconfig(key):
global _cached_db_config
if not _cached_db_config:
- _cached_db_config = yaml.safe_load(open(os.path.join(
- SERVICES_SRC_DIR, 'api', 'config', 'database.yml')))
- return _cached_db_config['test'][key]
+ if "ARVADOS_CONFIG" in os.environ:
+ _cached_db_config = list(yaml.safe_load(open(os.environ["ARVADOS_CONFIG"]))["Clusters"].values())[0]["PostgreSQL"]["Connection"]
+ else:
+ _cached_db_config = yaml.safe_load(open(os.path.join(
+ SERVICES_SRC_DIR, 'api', 'config', 'database.yml')))["test"]
+ _cached_db_config["dbname"] = _cached_db_config["database"]
+ _cached_db_config["user"] = _cached_db_config["username"]
+ return _cached_db_config[key]
def _apiconfig(key):
global _cached_config
def render_error(e)
logger.error e.inspect
- if !e.is_a? RequestError and (e.respond_to? :backtrace and e.backtrace)
- logger.error e.backtrace.collect { |x| x + "\n" }.join('')
+ if e.respond_to? :backtrace and e.backtrace
+ # This will be cleared by lograge after adding it to the log.
+ # Usually lograge would get the exceptions, but in our case we're catching
+ # all of them with exception handlers that cannot re-raise them because they
+ # don't get propagated.
+ Thread.current[:exception] = e.inspect
+ Thread.current[:backtrace] = e.backtrace.collect { |x| x + "\n" }.join('')
end
if (@object.respond_to? :errors and
@object.errors.andand.full_messages.andand.any?)
err = {}
end
err[:errors] ||= args
+ err[:errors].map! do |err|
+ err += " (" + Thread.current[:request_id] + ")"
+ end
err[:error_token] = [Time.now.utc.to_i, "%08x" % rand(16 ** 8)].join("+")
status = err.delete(:status) || 422
logger.error "Error #{err[:error_token]}: #{status}"
recursive: {
type: 'boolean', required: false, description: 'Include contents from child groups recursively.'
},
+ include: {
+ type: 'string', required: false, description: 'Include objects referred to by listed field in "included" (only owner_uuid)'
+ }
})
params.delete(:select)
params
before_validation :check_manifest_validity
before_validation :check_signatures
before_validation :strip_signatures_and_update_replication_confirmed
+ before_validation :name_null_if_empty
validate :ensure_pdh_matches_manifest_text
validate :ensure_storage_classes_desired_is_not_empty
validate :ensure_storage_classes_contain_non_empty_strings
around_update :manage_versioning, unless: :is_past_version?
api_accessible :user, extend: :common do |t|
- t.add :name
+ t.add lambda { |x| x.name || "" }, as: :name
t.add :description
t.add :properties
t.add :portable_data_hash
# correct timestamp in signed_manifest_text.
'manifest_text' => ['manifest_text', 'trash_at', 'is_trashed'],
'unsigned_manifest_text' => ['manifest_text'],
+ 'name' => ['name'],
)
end
end
end
+ def name_null_if_empty
+ if name == ""
+ self.name = nil
+ end
+ end
+
def set_file_names
if self.manifest_text_changed?
self.file_names = manifest_files
# delete application.yml and database.yml.
require 'config_loader'
+require 'open3'
begin
# If secret_token.rb exists here, we need to load it first.
WARNED_OMNIAUTH_CONFIG = true
end
-# Load the defaults
-$arvados_config_defaults = ConfigLoader.load "#{::Rails.root.to_s}/config/config.default.yml"
-if $arvados_config_defaults.empty?
- raise "Missing #{::Rails.root.to_s}/config/config.default.yml"
-end
-
-def remove_sample_entries(h)
- return unless h.is_a? Hash
- h.delete("SAMPLE")
- h.each { |k, v| remove_sample_entries(v) }
+# Load the defaults, used by config:migrate and fallback loading
+# legacy application.yml
+Open3.popen2("arvados-server", "config-dump", "-config=-") do |stdin, stdout, status_thread|
+ stdin.write("Clusters: {xxxxx: {}}")
+ stdin.close
+ confs = YAML.load(stdout, deserialize_symbols: false)
+ clusterID, clusterConfig = confs["Clusters"].first
+ $arvados_config_defaults = clusterConfig
+ $arvados_config_defaults["ClusterID"] = clusterID
end
-remove_sample_entries($arvados_config_defaults)
-
-clusterID, clusterConfig = $arvados_config_defaults["Clusters"].first
-$arvados_config_defaults = clusterConfig
-$arvados_config_defaults["ClusterID"] = clusterID
-
-# Initialize the global config with the defaults
-$arvados_config_global = $arvados_config_defaults.deep_dup
# Load the global config file
-confs = ConfigLoader.load "/etc/arvados/config.yml"
-if !confs.empty?
- clusterID, clusterConfig = confs["Clusters"].first
- $arvados_config_global["ClusterID"] = clusterID
-
- # Copy the cluster config over the defaults
- $arvados_config_global.deep_merge!(clusterConfig)
+Open3.popen2("arvados-server", "config-dump") do |stdin, stdout, status_thread|
+ confs = YAML.load(stdout, deserialize_symbols: false)
+ if confs && !confs.empty?
+ # config-dump merges defaults with user configuration, so every
+ # key should be set.
+ clusterID, clusterConfig = confs["Clusters"].first
+ $arvados_config_global = clusterConfig
+ $arvados_config_global["ClusterID"] = clusterID
+ else
+ # config-dump failed, assume we will be loading from legacy
+ # application.yml, initialize with defaults.
+ $arvados_config_global = $arvados_config_defaults.deep_dup
+ end
end
# Now make a copy
+++ /dev/null
-../../../lib/config/config.default.yml
\ No newline at end of file
client_ipaddr: event.payload[:client_ipaddr],
client_auth: event.payload[:client_auth],
}
+
+ # Lograge adds exceptions not being rescued to event.payload, but we're
+ # catching all errors on ApplicationController so we look for backtraces
+ # elsewhere.
+ if !Thread.current[:backtrace].nil?
+ payload.merge!(
+ {
+ exception: Thread.current[:exception],
+ exception_backtrace: Thread.current[:backtrace],
+ }
+ )
+ Thread.current[:exception] = nil
+ Thread.current[:backtrace] = nil
+ end
+
exceptions = %w(controller action format id)
params = event.payload[:params].except(*exceptions)
case "$TARGET" in
centos*)
- fpm_depends+=(libcurl-devel postgresql-devel)
+ fpm_depends+=(libcurl-devel postgresql-devel arvados-server)
;;
debian* | ubuntu*)
- fpm_depends+=(libcurl-ssl-dev libpq-dev g++)
+ fpm_depends+=(libcurl-ssl-dev libpq-dev g++ arvados-server)
;;
esac
w_a_z_file:
uuid: zzzzz-4zz18-25k12570yk134b3
current_version_uuid: zzzzz-4zz18-25k12570yk134b3
- portable_data_hash: 8706aadd12a0ebc07d74cae88762ba9e+56
+ portable_data_hash: 44a8da9ec82098323895cd14e178386f+56
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2015-02-09T10:53:38Z
modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
multilevel_collection_1:
uuid: zzzzz-4zz18-pyw8yp9g3pr7irn
current_version_uuid: zzzzz-4zz18-pyw8yp9g3pr7irn
- portable_data_hash: 1fd08fc162a5c6413070a8bd0bffc818+150
+ portable_data_hash: f9ddda46bb293b6847da984e3aa735db+290
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
uuid: zzzzz-4zz18-45xf9hw1sxkhl6q
current_version_uuid: zzzzz-4zz18-45xf9hw1sxkhl6q
# All of this collection's files are deep in subdirectories.
- portable_data_hash: 80cf6dd2cf079dd13f272ec4245cb4a8+48
+ portable_data_hash: 8591cc5caeca80fc62fd529ba1d63bf3+118
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
unique_expired_collection2:
uuid: zzzzz-4zz18-mto52zx1s7sn3jr
current_version_uuid: zzzzz-4zz18-mto52zx1s7sn3jr
- portable_data_hash: 4ad199f90029935844dc3f098f4fca2b+49
+ portable_data_hash: 64a2bed1ef0f40fe3a7d39bcf2584cb8+50
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-02-03T17:22:54Z
modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
is_trashed: true
trash_at: 2001-01-01T00:00:00Z
delete_at: 2038-01-01T00:00:00Z
- manifest_text: ". 29d7797f1888013986899bc9083783fa+3 0:3:expired\n"
+ manifest_text: ". 29d7797f1888013986899bc9083783fa+3 0:3:expired2\n"
name: unique_expired_collection2
# a collection with a log file that can be parsed by the log viewer
uuid: zzzzz-4zz18-filesinsubdir00
current_version_uuid: zzzzz-4zz18-filesinsubdir00
name: collection_files_in_subdir
- portable_data_hash: 85877ca2d7e05498dd3d109baf2df106+95
+ portable_data_hash: 7eb64275355980ebc93411b44050c137+281
owner_uuid: zzzzz-tpzed-user1withloadab
created_at: 2014-02-03T17:22:54Z
modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-user1withloadab
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
- manifest_text: ". 85877ca2d7e05498dd3d109baf2df106+95 0:95:file_in_subdir1\n./subdir2/subdir3 2bbc341c702df4d8f42ec31f16c10120+64 0:32:file1_in_subdir3.txt 32:32:file2_in_subdir3.txt\n./subdir2/subdir3/subdir4 2bbc341c702df4d8f42ec31f16c10120+64 0:32:file1_in_subdir4.txt 32:32:file2_in_subdir4.txt"
+ manifest_text: ". 85877ca2d7e05498dd3d109baf2df106+95 0:95:file_in_subdir1\n./subdir2/subdir3 2bbc341c702df4d8f42ec31f16c10120+64 0:32:file1_in_subdir3.txt 32:32:file2_in_subdir3.txt\n./subdir2/subdir3/subdir4 2bbc341c702df4d8f42ec31f16c10120+64 0:32:file1_in_subdir4.txt 32:32:file2_in_subdir4.txt\n"
graph_test_collection1:
uuid: zzzzz-4zz18-bv31uwvy3neko22
collection_with_repeated_filenames_and_contents_in_two_dirs_1:
uuid: zzzzz-4zz18-duplicatenames1
current_version_uuid: zzzzz-4zz18-duplicatenames1
- portable_data_hash: f3a67fad3a19c31c658982fb8158fa58+144
+ portable_data_hash: ce437b12aa73ab34f7af5227f556c9e6+142
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
foo_and_bar_files_in_dir:
uuid: zzzzz-4zz18-foonbarfilesdir
current_version_uuid: zzzzz-4zz18-foonbarfilesdir
- portable_data_hash: 6bbac24198d09a93975f60098caf0bdf+62
+ portable_data_hash: 870369fc72738603c2fad16664e50e2d+58
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
collection_not_readable_by_active:
uuid: zzzzz-4zz18-cd42uwvy3neko21
current_version_uuid: zzzzz-4zz18-cd42uwvy3neko21
- portable_data_hash: bb89eb5140e2848d39b416daeef4ffc5+45
+ portable_data_hash: b9e51a238ce08a698e7d7f8f101aee18+55
owner_uuid: zzzzz-tpzed-000000000000000
created_at: 2014-02-03T17:22:54Z
modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-02-03T17:22:54Z
updated_at: 2014-02-03T17:22:54Z
- manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+ manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar 0:0:empty\n"
name: collection_not_readable_by_active
collection_to_remove_and_rename_files:
uuid: zzzzz-4zz18-a21ux3541sxa8sf
current_version_uuid: zzzzz-4zz18-a21ux3541sxa8sf
- portable_data_hash: 80cf6dd2cf079dd13f272ec4245cb4a8+48
+ portable_data_hash: 21aed8fd508bd6263704b673455949ba+57
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
trashed_collection_to_test_name_conflict_on_untrash:
uuid: zzzzz-4zz18-trashedcolnamec
current_version_uuid: zzzzz-4zz18-trashedcolnamec
- portable_data_hash: 80cf6dd2cf079dd13f272ec4245cb4a8+48
+ portable_data_hash: 21aed8fd508bd6263704b673455949ba+57
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
same_name_as_trashed_coll_to_test_name_conflict_on_untrash:
uuid: zzzzz-4zz18-namesameastrash
current_version_uuid: zzzzz-4zz18-namesameastrash
- portable_data_hash: 80cf6dd2cf079dd13f272ec4245cb4a8+48
+ portable_data_hash: 21aed8fd508bd6263704b673455949ba+57
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2014-02-03T17:22:54Z
modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
collection_in_trashed_subproject:
uuid: zzzzz-4zz18-trashedproj2col
current_version_uuid: zzzzz-4zz18-trashedproj2col
- portable_data_hash: 80cf6dd2cf079dd13f272ec4245cb4a8+48
+ portable_data_hash: 21aed8fd508bd6263704b673455949ba+57
owner_uuid: zzzzz-j7d0g-trashedproject2
created_at: 2014-02-03T17:22:54Z
modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
basename: bar
class: File
location: "keep:fa7aeb5140e2848d39b416daeef4ffc5+45/bar"
- /var/lib/cwl/workflow.json: "keep:1fd08fc162a5c6413070a8bd0bffc818+150"
+ /var/lib/cwl/workflow.json: "keep:f9ddda46bb293b6847da984e3aa735db+290"
uncommitted:
uuid: zzzzz-xvhdp-cr4uncommittedc
token_time = token.split('+', 2).first.to_i
assert_operator(token_time, :>=, @start_stamp, "error token too old")
assert_operator(token_time, :<=, now_timestamp, "error token too new")
+ json_response['errors'].each do |err|
+ assert_match(/req-[a-z0-9]{20}/, err, "X-Request-Id value missing on error message")
+ end
end
def check_404(errmsg="Path not found")
assert_response 404
- assert_equal([errmsg], json_response['errors'])
+ json_response['errors'].each do |err|
+ assert(err.include?(errmsg), "error message '#{err}' expected to include '#{errmsg}'")
+ end
check_error_token
end
end
end
end
+
+ test "exceptions with backtraces get logged at exception_backtrace key" do
+ Group.stubs(:new).raises(Exception, 'Whoops')
+ Rails.logger.expects(:info).with(any_parameters) do |param|
+ param.include?('Whoops') and param.include?('"exception_backtrace":')
+ end
+ @controller = Arvados::V1::GroupsController.new
+ authorize_with :active
+ post :create, params: {
+ group: {},
+ }
+ end
end
group_index_params = discovery_doc['resources']['groups']['methods']['index']['parameters']
group_contents_params = discovery_doc['resources']['groups']['methods']['contents']['parameters']
- assert_equal group_contents_params.keys.sort, (group_index_params.keys - ['select'] + ['uuid', 'recursive']).sort
+ assert_equal group_contents_params.keys.sort, (group_index_params.keys - ['select'] + ['uuid', 'recursive', 'include']).sort
recursive_param = group_contents_params['recursive']
assert_equal 'boolean', recursive_param['type']
params: {specimen: {}},
headers: {'HTTP_ACCEPT' => ''})
assert_response 401
- assert_includes(json_response['errors'], "Not logged in")
+ json_response['errors'].each do |err|
+ assert(err.include?("Not logged in"), "error message '#{err}' expected to include 'Not logged in'")
+ end
end
test "login prompt respects JSON Accept header" do
params: {specimen: {}},
headers: {'HTTP_ACCEPT' => 'application/json'})
assert_response 401
- assert_includes(json_response['errors'], "Not logged in")
+ json_response['errors'].each do |err|
+ assert(err.include?("Not logged in"), "error message '#{err}' expected to include 'Not logged in'")
+ end
end
test "login prompt respects HTML Accept header" do
assert_empty Collection.where(uuid: uuid)
end
+ test "empty names are exempt from name uniqueness" do
+ act_as_user users(:active) do
+ c1 = Collection.new(name: nil, manifest_text: '', owner_uuid: groups(:aproject).uuid)
+ assert c1.save
+ c2 = Collection.new(name: '', manifest_text: '', owner_uuid: groups(:aproject).uuid)
+ assert c2.save
+ c3 = Collection.new(name: '', manifest_text: '', owner_uuid: groups(:aproject).uuid)
+ assert c3.save
+ c4 = Collection.new(name: 'c4', manifest_text: '', owner_uuid: groups(:aproject).uuid)
+ assert c4.save
+ c5 = Collection.new(name: 'c4', manifest_text: '', owner_uuid: groups(:aproject).uuid)
+ assert_raises(ActiveRecord::RecordNotUnique) do
+ c5.save
+ end
+ end
+ end
+
test "create collections with managed properties" do
Rails.configuration.Collections.ManagedProperties = {
'default_prop1' => {'Value' => 'prop1_value'},
go func() {
_, err := io.Copy(response.Conn, stdinRdr)
if err != nil {
- runner.CrunchLog.Printf("While writing stdin collection to docker container %q", err)
+ runner.CrunchLog.Printf("While writing stdin collection to docker container: %v", err)
runner.stop(nil)
}
stdinRdr.Close()
go func() {
_, err := io.Copy(response.Conn, bytes.NewReader(stdinJson))
if err != nil {
- runner.CrunchLog.Printf("While writing stdin json to docker container %q", err)
+ runner.CrunchLog.Printf("While writing stdin json to docker container: %v", err)
runner.stop(nil)
}
response.CloseWrite()
import arvados_version
version = arvados_version.get_version(SETUP_DIR, "arvados_fuse")
+if os.environ.get('ARVADOS_BUILDING_VERSION', False):
+ pysdk_dep = "=={}".format(version)
+else:
+ # On dev releases, arvados-python-client may have a different timestamp
+ pysdk_dep = "<={}".format(version)
short_tests_only = False
if '--short-tests-only' in sys.argv:
('share/doc/arvados_fuse', ['agpl-3.0.txt', 'README.rst']),
],
install_requires=[
- 'arvados-python-client >= 0.1.20151118035730',
- 'llfuse >=1.2, <=1.3.6',
+ 'arvados-python-client{}'.format(pysdk_dep),
+ 'llfuse >= 1.3.6',
'future',
'python-daemon',
'ciso8601 >= 2.0.0',
var (
version = "dev"
- command cmd.Handler = service.Command(arvados.ServiceNameController, newHandler)
+ command cmd.Handler = service.Command(arvados.ServiceNameHealth, newHandler)
)
func newHandler(ctx context.Context, cluster *arvados.Cluster, _ string) service.Handler {
var lastMod time.Time
sawUUID := make(map[string]bool)
err := EachCollection(&s.config.Client, pageSize, func(c arvados.Collection) error {
- got[trial] = append(got[trial], c.UUID)
if c.ModifiedAt == nil {
return nil
}
// dup
return nil
}
+ got[trial] = append(got[trial], c.UUID)
sawUUID[c.UUID] = true
if lastMod == *c.ModifiedAt {
streak++
package main
import (
+ "context"
"fmt"
"net/http"
"os"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/auth"
+ "git.curoverse.com/arvados.git/sdk/go/ctxlog"
"git.curoverse.com/arvados.git/sdk/go/httpserver"
"github.com/sirupsen/logrus"
)
if srv.config.Listen == "" {
return nil
}
+ ctx := ctxlog.Context(context.Background(), srv.Logger)
server := &httpserver.Server{
Server: http.Server{
- Handler: httpserver.LogRequests(srv.Logger,
- auth.RequireLiteralToken(srv.config.ManagementToken,
- srv.metrics.Handler(srv.Logger))),
+ Handler: httpserver.HandlerWithContext(ctx,
+ httpserver.LogRequests(
+ auth.RequireLiteralToken(srv.config.ManagementToken,
+ srv.metrics.Handler(srv.Logger)))),
},
Addr: srv.config.Listen,
}
}
var updated arvados.Collection
defer c.pdhs.Remove(coll.UUID)
- err := client.RequestAndDecode(&updated, "PATCH", "arvados/v1/collections/"+coll.UUID, client.UpdateBody(coll), nil)
+ err := client.RequestAndDecode(&updated, "PATCH", "arvados/v1/collections/"+coll.UUID, nil, map[string]interface{}{
+ "collection": map[string]string{
+ "manifest_text": coll.ManifestText,
+ },
+ })
if err == nil {
c.collections.Add(client.AuthToken+"\000"+coll.PortableDataHash, &cachedCollection{
expire: time.Now().Add(time.Duration(c.TTL)),
"fmt"
"io"
"io/ioutil"
- "net/url"
"os"
"os/exec"
"path/filepath"
var newCollection arvados.Collection
arv := arvados.NewClientFromEnv()
arv.AuthToken = arvadostest.ActiveToken
- err = arv.RequestAndDecode(&newCollection, "POST", "arvados/v1/collections", bytes.NewBufferString(url.Values{"collection": {"{}"}}.Encode()), nil)
+ err = arv.RequestAndDecode(&newCollection, "POST", "arvados/v1/collections", nil, map[string]interface{}{"collection": map[string]interface{}{}})
c.Assert(err, check.IsNil)
readPath, writePath, pdhPath := pathFunc(newCollection)
f.Close()
mtxt, err := fs.MarshalManifest(".")
c.Assert(err, check.IsNil)
- coll := arvados.Collection{ManifestText: mtxt}
- err = client.RequestAndDecode(&coll, "POST", "arvados/v1/collections", client.UpdateBody(coll), nil)
+ var coll arvados.Collection
+ err = client.RequestAndDecode(&coll, "POST", "arvados/v1/collections", nil, map[string]interface{}{
+ "collection": map[string]string{
+ "manifest_text": mtxt,
+ },
+ })
c.Assert(err, check.IsNil)
u, _ := url.Parse("http://download.example.com/c=" + coll.UUID + "/")
func (s *IntegrationSuite) TestDeleteLastFile(c *check.C) {
arv := arvados.NewClientFromEnv()
var newCollection arvados.Collection
- err := arv.RequestAndDecode(&newCollection, "POST", "arvados/v1/collections", arv.UpdateBody(&arvados.Collection{
- OwnerUUID: arvadostest.ActiveUserUUID,
- ManifestText: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt 0:3:bar.txt\n",
- Name: "keep-web test collection",
- }), map[string]bool{"ensure_unique_name": true})
+ err := arv.RequestAndDecode(&newCollection, "POST", "arvados/v1/collections", nil, map[string]interface{}{
+ "collection": map[string]string{
+ "owner_uuid": arvadostest.ActiveUserUUID,
+ "manifest_text": ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt 0:3:bar.txt\n",
+ "name": "keep-web test collection",
+ },
+ "ensure_unique_name": true,
+ })
c.Assert(err, check.IsNil)
defer arv.RequestAndDecode(&newCollection, "DELETE", "arvados/v1/collections/"+newCollection.UUID, nil, nil)
package main
import (
+ "context"
"net/http"
+ "git.curoverse.com/arvados.git/sdk/go/ctxlog"
"git.curoverse.com/arvados.git/sdk/go/httpserver"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
)
type server struct {
h := &handler{Config: srv.Config}
reg := prometheus.NewRegistry()
h.Config.Cache.registry = reg
- mh := httpserver.Instrument(reg, nil, httpserver.AddRequestIDs(httpserver.LogRequests(nil, h)))
+ ctx := ctxlog.Context(context.Background(), logrus.StandardLogger())
+ mh := httpserver.Instrument(reg, nil, httpserver.HandlerWithContext(ctx, httpserver.AddRequestIDs(httpserver.LogRequests(h))))
h.MetricsAPI = mh.ServeAPI(h.Config.ManagementToken, http.NotFoundHandler())
srv.Handler = mh
srv.Addr = srv.Config.Listen
// Start serving requests.
router = MakeRESTRouter(!cfg.DisableGet, !cfg.DisablePut, kc, time.Duration(cfg.Timeout), cfg.ManagementToken)
- http.Serve(listener, httpserver.AddRequestIDs(httpserver.LogRequests(nil, router)))
+ http.Serve(listener, httpserver.AddRequestIDs(httpserver.LogRequests(router)))
log.Println("shutting down")
}
"time"
"git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/ctxlog"
"git.curoverse.com/arvados.git/sdk/go/health"
"git.curoverse.com/arvados.git/sdk/go/httpserver"
"github.com/gorilla/mux"
rtr.metrics.setupWorkQueueMetrics(trashq, "trash")
rtr.metrics.setupRequestMetrics(rtr.limiter)
- instrumented := httpserver.Instrument(rtr.metrics.reg, nil,
- httpserver.AddRequestIDs(httpserver.LogRequests(nil, rtr.limiter)))
+ instrumented := httpserver.Instrument(rtr.metrics.reg, log,
+ httpserver.HandlerWithContext(
+ ctxlog.Context(context.Background(), log),
+ httpserver.AddRequestIDs(httpserver.LogRequests(rtr.limiter))))
return instrumented.ServeAPI(theConfig.ManagementToken, instrumented)
}
PATH
remote: .
specs:
- arvados-login-sync (1.3.3.20190528194843)
+ arvados-login-sync (1.4.0.20190701162225)
arvados (~> 1.3.0, >= 1.3.0)
GEM
mocha (1.8.0)
metaclass (~> 0.0.1)
multi_json (1.13.1)
- multipart-post (2.1.1)
- os (1.0.1)
+ multipart-post (2.0.0)
+ os (1.0.0)
public_suffix (3.0.3)
rake (12.3.2)
retriable (1.4.1)
import arvados_version
version = arvados_version.get_version(SETUP_DIR, "arvnodeman")
+if os.environ.get('ARVADOS_BUILDING_VERSION', False):
+ pysdk_dep = "=={}".format(version)
+else:
+ # On dev releases, arvados-python-client may have a different timestamp
+ pysdk_dep = "<={}".format(version)
short_tests_only = False
if '--short-tests-only' in sys.argv:
],
install_requires=[
'apache-libcloud>=2.3.1.dev1',
- 'arvados-python-client>=0.1.20170731145219',
+ 'arvados-python-client{}'.format(pysdk_dep),
'future',
'pykka < 2',
'python-daemon',
ac := arvados.NewClientFromEnv()
ac.AuthToken = s.token
- coll := &arvados.Collection{ManifestText: ""}
- err := ac.RequestAndDecode(coll, "POST", "arvados/v1/collections", s.jsonBody("collection", coll), map[string]interface{}{"ensure_unique_name": true})
+ var coll arvados.Collection
+ err := ac.RequestAndDecode(&coll, "POST", "arvados/v1/collections", s.jsonBody("collection", `{"manifest_text":""}`), map[string]interface{}{"ensure_unique_name": true})
c.Assert(err, check.IsNil)
s.ignoreLogID = s.lastLogID(c)
wf := &arvados.Workflow{
Name: "ws_test",
}
- err := ac.RequestAndDecode(wf, "POST", "arvados/v1/workflows", s.jsonBody("workflow", wf), map[string]interface{}{"ensure_unique_name": true})
+ err := ac.RequestAndDecode(wf, "POST", "arvados/v1/workflows", s.jsonBody("workflow", `{"name":"ws_test"}`), map[string]interface{}{"ensure_unique_name": true})
if err != nil {
panic(err)
}
uuidChan <- wf.UUID
}
lg := &arvados.Log{}
- err = ac.RequestAndDecode(lg, "POST", "arvados/v1/logs", s.jsonBody("log", &arvados.Log{
- ObjectUUID: wf.UUID,
- EventType: "blip",
- Properties: map[string]interface{}{
+ err = ac.RequestAndDecode(lg, "POST", "arvados/v1/logs", s.jsonBody("log", map[string]interface{}{
+ "object_uuid": wf.UUID,
+ "event_type": "blip",
+ "properties": map[string]interface{}{
"beep": "boop",
},
}), nil)
if err != nil {
panic(err)
}
- err = ac.RequestAndDecode(wf, "PUT", "arvados/v1/workflows/"+wf.UUID, s.jsonBody("workflow", wf), nil)
+ err = ac.RequestAndDecode(wf, "PUT", "arvados/v1/workflows/"+wf.UUID, s.jsonBody("workflow", `{"name":"ws_test"}`), nil)
if err != nil {
panic(err)
}
}
func (s *v0Suite) jsonBody(rscName string, ob interface{}) io.Reader {
- j, err := json.Marshal(ob)
- if err != nil {
- panic(err)
+ val, ok := ob.(string)
+ if !ok {
+ j, err := json.Marshal(ob)
+ if err != nil {
+ panic(err)
+ }
+ val = string(j)
}
v := url.Values{}
- v[rscName] = []string{string(j)}
+ v[rscName] = []string{val}
return bytes.NewBufferString(v.Encode())
}
clone)
if test -n "$2" ; then
- cp -r "$ARVBOX_BASE/$1" "$ARVBOX_BASE/$2"
+ mkdir -p "$ARVBOX_BASE/$2"
+ cp -a "$ARVBOX_BASE/$1/passenger" \
+ "$ARVBOX_BASE/$1/gems" \
+ "$ARVBOX_BASE/$1/pip" \
+ "$ARVBOX_BASE/$1/npm" \
+ "$ARVBOX_BASE/$1/gopath" \
+ "$ARVBOX_BASE/$1/Rlibs" \
+ "$ARVBOX_BASE/$1/arvados" \
+ "$ARVBOX_BASE/$1/sso-devise-omniauth-provider" \
+ "$ARVBOX_BASE/$1/composer" \
+ "$ARVBOX_BASE/$1/workbench2" \
+ "$ARVBOX_BASE/$2"
echo "Created new arvbox $2"
echo "export ARVBOX_CONTAINER=$2"
else
ENV GEM_PATH /var/lib/gems
ENV PATH $PATH:/var/lib/gems/bin
-ENV GOVERSION 1.11.5
+ENV GOVERSION 1.12.7
# Install golang binary
RUN curl -f http://storage.googleapis.com/golang/go${GOVERSION}.linux-amd64.tar.gz | \
keep-setup.sh common.sh createusers.sh \
logger runsu.sh waitforpostgres.sh \
yml_override.py api-setup.sh \
- go-setup.sh devenv.sh \
+ go-setup.sh devenv.sh cluster-config.sh \
/usr/local/lib/arvbox/
ADD runit /etc/runit
set -ex -o pipefail
. /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
cd /usr/src/arvados/services/api
echo $vm_uuid > /var/lib/arvados/vm-uuid
fi
+if ! test -f /var/lib/arvados/api_database_pw ; then
+ ruby -e 'puts rand(2**128).to_s(36)' > /var/lib/arvados/api_database_pw
+fi
+database_pw=$(cat /var/lib/arvados/api_database_pw)
+
+if ! (psql postgres -c "\du" | grep "^ arvados ") >/dev/null ; then
+ psql postgres -c "create user arvados with password '$database_pw'"
+fi
+psql postgres -c "ALTER USER arvados WITH SUPERUSER;"
+
+if test -a /usr/src/arvados/services/api/config/arvados_config.rb ; then
+ rm -f config/application.yml config/database.yml
+ flock /var/lib/arvados/cluster_config.yml.lock /usr/local/lib/arvbox/cluster-config.sh
+else
cat >config/application.yml <<EOF
$RAILS_ENV:
uuid_prefix: $uuid_prefix
EOF
(cd config && /usr/local/lib/arvbox/yml_override.py application.yml)
-
-if ! test -f /var/lib/arvados/api_database_pw ; then
- ruby -e 'puts rand(2**128).to_s(36)' > /var/lib/arvados/api_database_pw
-fi
-database_pw=$(cat /var/lib/arvados/api_database_pw)
-
-if ! (psql postgres -c "\du" | grep "^ arvados ") >/dev/null ; then
- psql postgres -c "create user arvados with password '$database_pw'"
-fi
-psql postgres -c "ALTER USER arvados WITH SUPERUSER;"
-
sed "s/password:.*/password: $database_pw/" <config/database.yml.example >config/database.yml
+fi
if ! test -f /var/lib/arvados/api_database_setup ; then
bundle exec rake db:setup
--- /dev/null
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+if [[ -s /etc/arvados/config.yml ]] ; then
+ exit
+fi
+
+uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
+secret_token=$(cat /var/lib/arvados/api_secret_token)
+blob_signing_key=$(cat /var/lib/arvados/blob_signing_key)
+management_token=$(cat /var/lib/arvados/management_token)
+sso_app_secret=$(cat /var/lib/arvados/sso_app_secret)
+vm_uuid=$(cat /var/lib/arvados/vm-uuid)
+database_pw=$(cat /var/lib/arvados/api_database_pw)
+
+workbench_secret_key_base=$(cat /var/lib/arvados/workbench_secret_token)
+
+if test -s /var/lib/arvados/api_rails_env ; then
+ database_env=$(cat /var/lib/arvados/api_rails_env)
+else
+ database_env=development
+fi
+
+cat >/var/lib/arvados/cluster_config.yml <<EOF
+Clusters:
+ ${uuid_prefix}:
+ ManagementToken: $management_token
+ Services:
+ Workbench1:
+ ExternalURL: "https://$localip:${services[workbench]}"
+ Workbench2:
+ ExternalURL: "https://$localip:${services[workbench2-ssl]}"
+ SSO:
+ ExternalURL: "https://$localip:${services[sso]}"
+ Websocket:
+ ExternalURL: "wss://$localip:${services[websockets-ssl]}/websocket"
+ GitSSH:
+ ExternalURL: "ssh://git@$localip:"
+ GitHTTP:
+ ExternalURL: "http://$localip:${services[arv-git-httpd]}/"
+ WebDAV:
+ ExternalURL: "https://$localip:${services[keep-web-ssl]}/"
+ Composer:
+ ExternalURL: "http://$localip:${services[composer]}"
+ Controller:
+ ExternalURL: "https://$localip:${services[controller-ssl]}"
+ NodeProfiles: # to be deprecated in favor of "Services" section
+ "*":
+ arvados-controller:
+ Listen: ":${services[controller]}" # choose a port
+ arvados-api-server:
+ Listen: ":${services[api]}" # must match Rails server port in your Nginx config
+ PostgreSQL:
+ ConnectionPool: 32 # max concurrent connections per arvados server daemon
+ Connection:
+ # All parameters here are passed to the PG client library in a connection string;
+ # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
+ host: localhost
+ user: arvados
+ password: ${database_pw}
+ dbname: arvados_${database_env}
+ client_encoding: utf8
+ API:
+ RailsSessionSecretToken: $secret_token
+ Collections:
+ BlobSigningKey: $blob_signing_key
+ DefaultReplication: 1
+ Containers:
+ SupportedDockerImageFormats: ["v2"]
+ Login:
+ ProviderAppSecret: $sso_app_secret
+ ProviderAppID: arvados-server
+ Users:
+ NewUsersAreActive: true
+ AutoAdminFirstUser: true
+ AutoSetupNewUsers: true
+ AutoSetupNewUsersWithVmUUID: $vm_uuid
+ AutoSetupNewUsersWithRepository: true
+ Workbench:
+ SecretKeyBase: $workbench_secret_key_base
+ ArvadosDocsite: http://$localip:${services[doc]}/
+EOF
+
+/usr/local/lib/arvbox/yml_override.py /var/lib/arvados/cluster_config.yml
+
+cp /var/lib/arvados/cluster_config.yml /etc/arvados/config.yml
localip=$(ip addr show $defaultdev | grep 'inet ' | sed 's/ *inet \(.*\)\/.*/\1/')
fi
+root_cert=/var/lib/arvados/root-cert.pem
+root_cert_key=/var/lib/arvados/root-cert.key
+server_cert=/var/lib/arvados/server-cert-${localip}.pem
+server_cert_key=/var/lib/arvados/server-cert-${localip}.key
+
declare -A services
services=(
[workbench]=443
export GOPATH=/var/lib/gopath
EOF
+ mkdir -p /etc/arvados
+ chown -R arvbox:arvbox /etc/arvados
fi
if ! grep "^fuse:" /etc/group >/dev/null 2>/dev/null ; then
cd "$GOPATH/src/git.curoverse.com/arvados.git"
flock /var/lib/gopath/gopath.lock go get -v -d ...
flock /var/lib/gopath/gopath.lock "$GOPATH/bin/govendor" sync
+
+flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/cmd/arvados-server"
+install $GOPATH/bin/arvados-server /usr/local/bin
uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
-if test ! -s /var/lib/arvados/root-cert.pem ; then
+if ! openssl verify -CAfile $root_cert $root_cert ; then
# req signing request sub-command
# -new new certificate request
# -nodes "no des" don't encrypt key
-extensions x509_ext \
-config <(cat /etc/ssl/openssl.cnf \
<(printf "\n[x509_ext]\nbasicConstraints=critical,CA:true,pathlen:0\nkeyUsage=critical,keyCertSign,cRLSign")) \
- -out /var/lib/arvados/root-cert.pem \
- -keyout /var/lib/arvados/root-cert.key \
+ -out $root_cert \
+ -keyout $root_cert_key \
-days 365
- chown arvbox:arvbox /var/lib/arvados/root-cert.*
+ chown arvbox:arvbox $root_cert $root_cert_key
+ rm -f $server_cert $server_cert_key
fi
-if test ! -s /var/lib/arvados/server-cert-${localip}.pem ; then
+cp $root_cert /usr/local/share/ca-certificates/arvados-testing-cert.crt
+update-ca-certificates
+
+if ! openssl verify -CAfile $root_cert $server_cert ; then
+
+ rm -f $server_cert $server_cert_key
if [[ $localip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
san=IP:$localip
-config <(cat /etc/ssl/openssl.cnf \
<(printf "\n[x509_ext]\nkeyUsage=critical,digitalSignature,keyEncipherment\nsubjectAltName=DNS:localhost,$san")) \
-out /var/lib/arvados/server-cert-${localip}.csr \
- -keyout /var/lib/arvados/server-cert-${localip}.key \
+ -keyout $server_cert_key \
-days 365
openssl x509 \
-req \
-in /var/lib/arvados/server-cert-${localip}.csr \
- -CA /var/lib/arvados/root-cert.pem \
- -CAkey /var/lib/arvados/root-cert.key \
- -out /var/lib/arvados/server-cert-${localip}.pem \
+ -CA $root_cert \
+ -CAkey $root_cert_key \
+ -out $server_cert \
-set_serial $RANDOM$RANDOM \
-extfile <(cat /etc/ssl/openssl.cnf \
<(printf "\n[x509_ext]\nkeyUsage=critical,digitalSignature,keyEncipherment\nsubjectAltName=DNS:localhost,$san")) \
-extensions x509_ext \
-days 365
- chown arvbox:arvbox /var/lib/arvados/server-cert-${localip}.*
+ chown arvbox:arvbox $server_cert $server_cert_key
fi
-cp /var/lib/arvados/root-cert.pem /usr/local/share/ca-certificates/arvados-testing-cert.crt
-update-ca-certificates
-
sv stop certificate
. /usr/local/lib/arvbox/common.sh
. /usr/local/lib/arvbox/go-setup.sh
-flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/cmd/arvados-server"
-install $GOPATH/bin/arvados-server /usr/local/bin
(cd /usr/local/bin && ln -sf arvados-server arvados-controller)
if test "$1" = "--only-deps" ; then
exit
fi
-uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
-secret_token=$(cat /var/lib/arvados/api_secret_token)
-blob_signing_key=$(cat /var/lib/arvados/blob_signing_key)
-management_token=$(cat /var/lib/arvados/management_token)
-sso_app_secret=$(cat /var/lib/arvados/sso_app_secret)
-vm_uuid=$(cat /var/lib/arvados/vm-uuid)
-database_pw=$(cat /var/lib/arvados/api_database_pw)
-
-if test -s /var/lib/arvados/api_rails_env ; then
- database_env=$(cat /var/lib/arvados/api_rails_env)
-else
- database_env=development
-fi
-
-mkdir -p /etc/arvados
-
-cat >/var/lib/arvados/cluster_config.yml <<EOF
-Clusters:
- ${uuid_prefix}:
- ManagementToken: $management_token
- Services:
- Workbench1:
- ExternalURL: "https://$localip:${services[workbench]}"
- Workbench2:
- ExternalURL: "https://$localip:${services[workbench2-ssl]}"
- SSO:
- ExternalURL: "https://$localip:${services[sso]}"
- Websocket:
- ExternalURL: "wss://$localip:${services[websockets-ssl]}/websocket"
- GitSSH:
- ExternalURL: "ssh://git@$localip:"
- GitHTTP:
- ExternalURL: "http://$localip:${services[arv-git-httpd]}/"
- WebDAV:
- ExternalURL: "https://$localip:${services[keep-web-ssl]}/"
- NodeProfiles: # to be deprecated in favor of "Services" section
- "*":
- arvados-controller:
- Listen: ":${services[controller]}" # choose a port
- arvados-api-server:
- Listen: ":${services[api]}" # must match Rails server port in your Nginx config
- PostgreSQL:
- ConnectionPool: 32 # max concurrent connections per arvados server daemon
- Connection:
- # All parameters here are passed to the PG client library in a connection string;
- # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
- host: localhost
- user: arvados
- password: ${database_pw}
- dbname: arvados_${database_env}
- client_encoding: utf8
- API:
- RailsSessionSecretToken: $secret_token
- Collections:
- BlobSigningKey: $blob_signing_key
- DefaultReplication: 1
- Containers:
- SupportedDockerImageFormats: ["v2"]
- Login:
- ProviderAppSecret: $sso_app_secret
- ProviderAppID: arvados-server
- Users:
- NewUsersAreActive: true
- AutoAdminFirstUser: true
- AutoSetupNewUsers: true
- AutoSetupNewUsersWithVmUUID: $vm_uuid
- AutoSetupNewUsersWithRepository: true
-EOF
-
-/usr/local/lib/arvbox/yml_override.py /var/lib/arvados/cluster_config.yml
-
-cp /var/lib/arvados/cluster_config.yml /etc/arvados/config.yml
+flock /var/lib/arvados/cluster_config.yml.lock /usr/local/lib/arvbox/cluster-config.sh
exec /usr/local/lib/arvbox/runsu.sh /usr/local/bin/arvados-controller
. /usr/local/lib/arvbox/common.sh
+openssl verify -CAfile $root_cert $server_cert
+
cat <<EOF >/var/lib/arvados/nginx.conf
worker_processes auto;
pid /var/lib/arvados/nginx.pid;
server {
listen *:${services[controller-ssl]} ssl default_server;
server_name controller;
- ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
- ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+ ssl_certificate "${server_cert}";
+ ssl_certificate_key "${server_cert_key}";
location / {
proxy_pass http://controller;
proxy_set_header Host \$http_host;
proxy_read_timeout 300s;
ssl on;
- ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
- ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+ ssl_certificate "${server_cert}";
+ ssl_certificate_key "${server_cert_key}";
location / {
proxy_pass http://arvados-ws;
server {
listen *:${services[workbench2-ssl]} ssl default_server;
server_name workbench2;
- ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
- ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+ ssl_certificate "${server_cert}";
+ ssl_certificate_key "${server_cert_key}";
location / {
proxy_pass http://workbench2;
proxy_set_header Host \$http_host;
server {
listen *:${services[keep-web-ssl]} ssl default_server;
server_name keep-web;
- ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
- ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+ ssl_certificate "${server_cert}";
+ ssl_certificate_key "${server_cert_key}";
location / {
proxy_pass http://keep-web;
proxy_set_header Host \$http_host;
fi
secret_token=$(cat /var/lib/arvados/sso_secret_token)
-test -s /var/lib/arvados/server-cert-${localip}.pem
+openssl verify -CAfile $root_cert $server_cert
cat >config/application.yml <<EOF
$RAILS_ENV:
fi
if test "$1" != "--only-deps" ; then
+ openssl verify -CAfile $root_cert $server_cert
exec bundle exec passenger start --port=${services[workbench]} \
--ssl --ssl-certificate=/var/lib/arvados/server-cert-${localip}.pem \
--ssl-certificate-key=/var/lib/arvados/server-cert-${localip}.key \
bundle exec passenger-config build-native-support
bundle exec passenger-config install-standalone-runtime
mkdir -p /usr/src/arvados/apps/workbench/tmp
-RAILS_GROUPS=assets bundle exec rake npm:install
if test "$1" = "--only-deps" ; then
- exit
+ # Workaround for validation that asserts there's a download URL
+ # configured, which breaks rake if it is missing.
+cat >config/application.yml <<EOF
+$RAILS_ENV:
+ keep_web_url: https://example.com/c=%{uuid_or_pdh}
+EOF
+ RAILS_GROUPS=assets bundle exec rake npm:install
+ rm config/application.yml
+ exit
fi
set -u
fi
secret_token=$(cat /var/lib/arvados/workbench_secret_token)
+if test -a /usr/src/arvados/apps/workbench/config/arvados_config.rb ; then
+ rm -f config/application.yml
+else
cat >config/application.yml <<EOF
$RAILS_ENV:
secret_token: $secret_token
workbench2_url: https://$localip:${services[workbench2-ssl]}
EOF
-bundle exec rake assets:precompile
-
(cd config && /usr/local/lib/arvbox/yml_override.py application.yml)
+fi
+
+RAILS_GROUPS=assets bundle exec rake npm:install
+bundle exec rake assets:precompile
def headHTML(self):
return '<link rel="stylesheet" href="{}">\n'.format(self.CSS)
- def chartdata(self, label, tasks, stat):
+ def chartdata(self, label, tasks, stats):
+ '''For Crunch2, label is the name of container request,
+ tasks is the top level container and
+ stats is index by a tuple of (category, metric).
+ '''
return {
- 'data': self._collate_data(tasks, stat),
+ 'data': self._collate_data(tasks, stats),
'options': {
+ 'legend': 'always',
'connectSeparatedPoints': True,
- 'labels': ['elapsed']+[uuid for uuid, _ in tasks.items()],
- 'title': '{}: {} {}'.format(label, stat[0], stat[1]),
+ 'labels': ['elapsed'] + stats[1],
+ 'title': '{}: {}'.format(label, stats[0]),
},
}
- def _collate_data(self, tasks, stat):
+ def _collate_data(self, tasks, stats):
data = []
nulls = []
+ # uuid is category for crunch2
for uuid, task in tasks.items():
- for pt in task.series[stat]:
- data.append([pt[0].total_seconds()] + nulls + [pt[1]])
+ # All stats in a category are assumed to have the same time base and same number of samples
+ category = stats[0]
+ series_names = stats[1]
+ sn0 = series_names[0]
+ series = task.series[(category,sn0)]
+ for i in range(len(series)):
+ pt = series[i]
+ vals = [task.series[(category,stat)][i][1] for stat in series_names[1:]]
+ data.append([pt[0].total_seconds()] + nulls + [pt[1]] + vals)
nulls.append(None)
return sorted(data)
try:
self.label = m.group('job_uuid')
except IndexError:
- self.label = 'container'
- if m.group('category').endswith(':'):
+ self.label = 'label #1'
+ category = m.group('category')
+ if category.endswith(':'):
# "stderr crunchstat: notice: ..."
continue
- elif m.group('category') in ('error', 'caught'):
+ elif category in ('error', 'caught'):
continue
- elif m.group('category') in ('read', 'open', 'cgroup', 'CID', 'Running'):
+ elif category in ('read', 'open', 'cgroup', 'CID', 'Running'):
# "stderr crunchstat: read /proc/1234/net/dev: ..."
# (old logs are less careful with unprefixed error messages)
continue
if group == 'interval' and this_interval_s:
stat = stat + '__rate'
val = val / this_interval_s
- if stat in ['user+sys__rate', 'tx+rx__rate']:
+ if stat in ['user+sys__rate', 'user__rate', 'sys__rate', 'tx+rx__rate', 'rx__rate', 'tx__rate']:
task.series[category, stat].append(
(timestamp - self.starttime, val))
else:
- if stat in ['rss']:
+ if stat in ['rss','used','total']:
task.series[category, stat].append(
(timestamp - self.starttime, val))
self.task_stats[task_id][category][stat] = val
(float(self.job_tot['blkio:0:0']['read']) /
float(self.job_tot['net:keep0']['rx']))
if self.job_tot['net:keep0']['rx'] > 0 else 0,
- lambda x: x * 100.0)):
+ lambda x: x * 100.0),
+ ('Temp disk utilization {}%',
+ (float(self.job_tot['statfs']['used']) /
+ float(self.job_tot['statfs']['total']))
+ if self.job_tot['statfs']['total'] > 0 else 0,
+ lambda x: x * 100.0),
+ ):
format_string, val, transform = args
if val == float('-Inf'):
continue
return itertools.chain(
self._recommend_cpu(),
self._recommend_ram(),
- self._recommend_keep_cache())
+ self._recommend_keep_cache(),
+ self._recommend_temp_disk(),
+ )
def _recommend_cpu(self):
"""Recommend asking for 4 cores if max CPU usage was 333%"""
math.ceil(asked_cache * 2 / self._runtime_constraint_mem_unit()))
+ def _recommend_temp_disk(self):
+ """Recommend decreasing temp disk if utilization < 50%"""
+ total = float(self.job_tot['statfs']['total'])
+ utilization = (float(self.job_tot['statfs']['used']) / total) if total > 0 else 0.0
+
+ if utilization < 50.8 and total > 0:
+ yield (
+ '#!! {} max temp disk utilization was {:.0f}% of {:.0f} MiB -- '
+ 'consider reducing "tmpdirMin" and/or "outdirMin"'
+ ).format(
+ self.label,
+ utilization * 100.0,
+ total / MB)
+
+
def _format(self, val):
"""Return a string representation of a stat.
'label': s.long_label(),
'charts': [
self.chartdata(s.label, s.tasks, stat)
- for stat in (('cpu', 'user+sys__rate'),
- ('mem', 'rss'),
- ('net:eth0', 'tx+rx__rate'),
- ('net:keep0', 'tx+rx__rate'))],
+ for stat in (('cpu', ['user+sys__rate', 'user__rate', 'sys__rate']),
+ ('mem', ['rss']),
+ ('net:eth0', ['tx+rx__rate','rx__rate','tx__rate']),
+ ('net:keep0', ['tx+rx__rate','rx__rate','tx__rate']),
+ ('statfs', ['used', 'total']),
+ )
+ ],
}
for s in self.summarizers]
# Max network speed in a single interval: 0.00MB/s
# Keep cache miss rate 0.00%
# Keep cache utilization 0.00%
+# Temp disk utilization 0.00%
# Max network speed in a single interval: 0.00MB/s
# Keep cache miss rate 0.00%
# Keep cache utilization 0.00%
-#!! container max RSS was 67 MiB -- try reducing runtime_constraints to "ram":1020054732
+# Temp disk utilization 1.21%
+#!! label #1 max RSS was 67 MiB -- try reducing runtime_constraints to "ram":1020054732
+#!! label #1 max temp disk utilization was 1% of 383960 MiB -- consider reducing "tmpdirMin" and/or "outdirMin"
# Max network speed in a single interval: 0.00MB/s
# Keep cache miss rate 0.00%
# Keep cache utilization 0.00%
+# Temp disk utilization 1.21%
#!! container max RSS was 67 MiB -- try reducing runtime_constraints to "ram":1020054732
+#!! container max temp disk utilization was 1% of 383960 MiB -- consider reducing "tmpdirMin" and/or "outdirMin"
# Max network speed in a single interval: 42.58MB/s
# Keep cache miss rate 0.00%
# Keep cache utilization 0.00%
+# Temp disk utilization 0.00%
#!! 4xphq-8i9sb-jq0ekny1xou3zoh max RSS was 334 MiB -- try reducing runtime_constraints to "min_ram_mb_per_node":972
# Max network speed in a single interval: 0.00MB/s
# Keep cache miss rate 0.00%
# Keep cache utilization 0.00%
+# Temp disk utilization 0.00%
#!! 4xphq-8i9sb-zvb2ocfycpomrup max RSS was 1 MiB -- try reducing runtime_constraints to "min_ram_mb_per_node":972
# Max network speed in a single interval: 0.00MB/s
# Keep cache miss rate 0.00%
# Keep cache utilization 0.00%
+# Temp disk utilization 0.00%
#!! 4xphq-8i9sb-v831jm2uq0g2g9x max RSS was 1 MiB -- try reducing runtime_constraints to "min_ram_mb_per_node":972
return UTF8Decode(gzip.open(self.arvmountlog))
mock_cr().open.side_effect = _open
args = crunchstat_summary.command.ArgumentParser().parse_args(
- ['--job', self.fake_request['uuid']])
+ ['--container', self.fake_request['uuid']])
cmd = crunchstat_summary.command.Command(args)
cmd.run()
self.diff_known_report(self.reportfile, cmd)