source 'https://rubygems.org'
-gem 'rails', '~> 4.2.0'
+gem 'rails', '~> 5.0.0'
gem 'arvados', '>= 0.1.20150511150219'
-gem 'activerecord-nulldb-adapter'
+gem 'activerecord-nulldb-adapter', git: 'https://github.com/curoverse/nulldb'
gem 'multi_json'
gem 'oj'
gem 'sass'
# Gems used only for assets and not required
# in production environments by default.
group :assets do
- gem 'sass-rails'
+ gem 'sassc-rails'
gem 'uglifier', '~> 2.0'
# See https://github.com/sstephenson/execjs#readme for more supported runtimes
end
group :test, :performance do
+ gem 'byebug'
gem 'rails-perftest'
gem 'ruby-prof'
gem 'rvm-capistrano'
gem 'simplecov', '~> 0.7', require: false
gem 'simplecov-rcov', require: false
gem 'mocha', require: false
+ gem 'rails-controller-testing'
end
gem 'jquery-rails'
-gem 'bootstrap-sass', '~> 3.1.0'
+gem 'bootstrap-sass', '~> 3.4.1'
gem 'bootstrap-x-editable-rails'
gem 'bootstrap-tab-history-rails'
gem 'less'
gem 'less-rails'
-gem 'wiselinks'
+
+# Wiselinks hasn't been updated for many years and it's using deprecated methods
+# Use our own Wiselinks fork until this PR is accepted:
+# https://github.com/igor-alexandrov/wiselinks/pull/116
+# gem 'wiselinks', git: 'https://github.com/curoverse/wiselinks.git', branch: 'rails-5.1-compatibility'
+
gem 'sshkey'
# To use ActiveModel has_secure_password
+GIT
+ remote: https://github.com/curoverse/nulldb
+ revision: d8e0073b665acdd2537c5eb15178a60f02f4b413
+ specs:
+ activerecord-nulldb-adapter (0.3.9)
+ activerecord (>= 2.0.0)
+
GIT
remote: https://github.com/curoverse/themes_for_rails
- revision: 61154877047d2346890bda0b7be5827cf51a6a76
+ revision: ddf6e592b3b6493ea0c2de7b5d3faa120ed35be0
specs:
themes_for_rails (0.5.1)
rails (>= 3.0.0)
remote: https://rubygems.org/
specs:
RedCloth (4.3.2)
- actionmailer (4.2.11)
- actionpack (= 4.2.11)
- actionview (= 4.2.11)
- activejob (= 4.2.11)
+ actioncable (5.0.7.2)
+ actionpack (= 5.0.7.2)
+ nio4r (>= 1.2, < 3.0)
+ websocket-driver (~> 0.6.1)
+ actionmailer (5.0.7.2)
+ actionpack (= 5.0.7.2)
+ actionview (= 5.0.7.2)
+ activejob (= 5.0.7.2)
mail (~> 2.5, >= 2.5.4)
- rails-dom-testing (~> 1.0, >= 1.0.5)
- actionpack (4.2.11)
- actionview (= 4.2.11)
- activesupport (= 4.2.11)
- rack (~> 1.6)
- rack-test (~> 0.6.2)
- rails-dom-testing (~> 1.0, >= 1.0.5)
+ rails-dom-testing (~> 2.0)
+ actionpack (5.0.7.2)
+ actionview (= 5.0.7.2)
+ activesupport (= 5.0.7.2)
+ rack (~> 2.0)
+ rack-test (~> 0.6.3)
+ rails-dom-testing (~> 2.0)
rails-html-sanitizer (~> 1.0, >= 1.0.2)
- actionview (4.2.11)
- activesupport (= 4.2.11)
+ actionview (5.0.7.2)
+ activesupport (= 5.0.7.2)
builder (~> 3.1)
erubis (~> 2.7.0)
- rails-dom-testing (~> 1.0, >= 1.0.5)
+ rails-dom-testing (~> 2.0)
rails-html-sanitizer (~> 1.0, >= 1.0.3)
- activejob (4.2.11)
- activesupport (= 4.2.11)
- globalid (>= 0.3.0)
- activemodel (4.2.11)
- activesupport (= 4.2.11)
- builder (~> 3.1)
- activerecord (4.2.11)
- activemodel (= 4.2.11)
- activesupport (= 4.2.11)
- arel (~> 6.0)
- activerecord-nulldb-adapter (0.3.8)
- activerecord (>= 2.0.0)
- activesupport (4.2.11)
- i18n (~> 0.7)
+ activejob (5.0.7.2)
+ activesupport (= 5.0.7.2)
+ globalid (>= 0.3.6)
+ activemodel (5.0.7.2)
+ activesupport (= 5.0.7.2)
+ activerecord (5.0.7.2)
+ activemodel (= 5.0.7.2)
+ activesupport (= 5.0.7.2)
+ arel (~> 7.0)
+ activesupport (5.0.7.2)
+ concurrent-ruby (~> 1.0, >= 1.0.2)
+ i18n (>= 0.7, < 2)
minitest (~> 5.1)
- thread_safe (~> 0.3, >= 0.3.4)
tzinfo (~> 1.1)
- addressable (2.5.2)
+ addressable (2.6.0)
public_suffix (>= 2.0.2, < 4.0)
andand (1.3.3)
angularjs-rails (1.3.15)
- arel (6.0.4)
- arvados (0.1.20180302192246)
+ arel (7.1.4)
+ arvados (1.3.1.20190320201707)
activesupport (>= 3)
andand (~> 1.3, >= 1.3.3)
- google-api-client (>= 0.7, < 0.8.9)
+ arvados-google-api-client (>= 0.7, < 0.8.9)
i18n (~> 0)
json (>= 1.7.7, < 3)
jwt (>= 0.1.5, < 2)
+ arvados-google-api-client (0.8.7.2)
+ activesupport (>= 3.2, < 5.1)
+ addressable (~> 2.3)
+ autoparse (~> 0.3)
+ extlib (~> 0.9)
+ faraday (~> 0.9)
+ googleauth (~> 0.3)
+ launchy (~> 2.4)
+ multi_json (~> 1.10)
+ retriable (~> 1.4)
+ signet (~> 0.6)
autoparse (0.3.3)
addressable (>= 2.3.1)
extlib (>= 0.9.15)
multi_json (>= 1.0.0)
- bootstrap-sass (3.1.1.1)
- sass (~> 3.2)
+ autoprefixer-rails (9.5.1.1)
+ execjs
+ bootstrap-sass (3.4.1)
+ autoprefixer-rails (>= 5.2.1)
+ sassc (>= 2.0.0)
bootstrap-tab-history-rails (0.1.0)
railties (>= 3.1)
bootstrap-x-editable-rails (1.5.1.1)
railties (>= 3.0)
builder (3.2.3)
- byebug (10.0.0)
+ byebug (11.0.1)
capistrano (2.15.9)
highline
net-scp (>= 1.0.0)
execjs
coffee-script-source (1.12.2)
commonjs (0.2.7)
- concurrent-ruby (1.1.4)
+ concurrent-ruby (1.1.5)
crass (1.0.4)
deep_merge (1.2.1)
- docile (1.1.5)
+ docile (1.3.1)
erubis (2.7.0)
execjs (2.7.0)
extlib (0.9.16)
- faraday (0.14.0)
+ faraday (0.15.4)
multipart-post (>= 1.2, < 3)
- ffi (1.9.25)
+ ffi (1.10.0)
flamegraph (0.9.5)
- globalid (0.4.1)
+ globalid (0.4.2)
activesupport (>= 4.2.0)
- google-api-client (0.8.7)
- activesupport (>= 3.2, < 5.0)
- addressable (~> 2.3)
- autoparse (~> 0.3)
- extlib (~> 0.9)
- faraday (~> 0.9)
- googleauth (~> 0.3)
- launchy (~> 2.4)
- multi_json (~> 1.10)
- retriable (~> 1.4)
- signet (~> 0.6)
- googleauth (0.6.2)
+ googleauth (0.8.1)
faraday (~> 0.12)
jwt (>= 1.4, < 3.0)
- logging (~> 2.0)
- memoist (~> 0.12)
+ memoist (~> 0.16)
multi_json (~> 1.11)
- os (~> 0.9)
+ os (>= 0.9, < 2.0)
signet (~> 0.7)
- grease (0.3.1)
headless (1.0.2)
- highline (1.7.10)
+ highline (2.0.2)
httpclient (2.8.3)
i18n (0.9.5)
concurrent-ruby (~> 1.0)
- jquery-rails (3.1.4)
- railties (>= 3.0, < 5.0)
+ jquery-rails (4.3.3)
+ rails-dom-testing (>= 1, < 3)
+ railties (>= 4.2.0)
thor (>= 0.14, < 2.0)
- json (2.1.0)
+ json (2.2.0)
jwt (1.5.6)
launchy (2.4.3)
addressable (~> 2.3)
less (2.6.0)
commonjs (~> 0.2.7)
- less-rails (3.0.0)
- actionpack (>= 4.0)
- grease
+ less-rails (4.0.0)
+ actionpack (>= 4)
less (~> 2.6.0)
- sprockets (> 2, < 4)
- tilt
+ sprockets (>= 2)
libv8 (3.16.14.19)
- little-plugger (1.1.4)
- logging (2.2.2)
- little-plugger (~> 1.1)
- multi_json (~> 1.10)
- lograge (0.9.0)
+ lograge (0.10.0)
actionpack (>= 4)
activesupport (>= 4)
railties (>= 4)
mini_mime (>= 0.1.1)
memoist (0.16.0)
metaclass (0.0.4)
- mime-types (3.1)
+ method_source (0.9.2)
+ mime-types (3.2.2)
mime-types-data (~> 3.2015)
- mime-types-data (3.2016.0521)
+ mime-types-data (3.2019.0331)
mini_mime (1.0.1)
mini_portile2 (2.4.0)
minitest (5.10.3)
- mocha (1.3.0)
+ mocha (1.8.0)
metaclass (~> 0.0.1)
morrisjs-rails (0.5.1.2)
railties (> 3.1, < 6)
multi_json (1.13.1)
multipart-post (2.0.0)
- net-scp (1.2.1)
- net-ssh (>= 2.6.5)
+ net-scp (2.0.0)
+ net-ssh (>= 2.6.5, < 6.0.0)
net-sftp (2.1.2)
net-ssh (>= 2.6.5)
- net-ssh (4.2.0)
+ net-ssh (5.2.0)
net-ssh-gateway (2.0.0)
net-ssh (>= 4.0.0)
- nokogiri (1.9.1)
+ nio4r (2.3.1)
+ nokogiri (1.10.2)
mini_portile2 (~> 2.4.0)
npm-rails (0.2.1)
rails (>= 3.2)
- oj (3.6.4)
- os (0.9.6)
- passenger (5.2.1)
+ oj (3.7.11)
+ os (1.0.0)
+ passenger (6.0.2)
rack
rake (>= 0.8.1)
piwik_analytics (1.0.2)
cliver (~> 0.3.1)
multi_json (~> 1.0)
websocket-driver (>= 0.2.0)
- public_suffix (3.0.2)
- rack (1.6.11)
- rack-mini-profiler (0.10.7)
+ public_suffix (3.0.3)
+ rack (2.0.7)
+ rack-mini-profiler (1.0.2)
rack (>= 1.2.0)
rack-test (0.6.3)
rack (>= 1.0)
- rails (4.2.11)
- actionmailer (= 4.2.11)
- actionpack (= 4.2.11)
- actionview (= 4.2.11)
- activejob (= 4.2.11)
- activemodel (= 4.2.11)
- activerecord (= 4.2.11)
- activesupport (= 4.2.11)
- bundler (>= 1.3.0, < 2.0)
- railties (= 4.2.11)
- sprockets-rails
- rails-deprecated_sanitizer (1.0.3)
- activesupport (>= 4.2.0.alpha)
- rails-dom-testing (1.0.9)
- activesupport (>= 4.2.0, < 5.0)
- nokogiri (~> 1.6)
- rails-deprecated_sanitizer (>= 1.0.1)
+ rails (5.0.7.2)
+ actioncable (= 5.0.7.2)
+ actionmailer (= 5.0.7.2)
+ actionpack (= 5.0.7.2)
+ actionview (= 5.0.7.2)
+ activejob (= 5.0.7.2)
+ activemodel (= 5.0.7.2)
+ activerecord (= 5.0.7.2)
+ activesupport (= 5.0.7.2)
+ bundler (>= 1.3.0)
+ railties (= 5.0.7.2)
+ sprockets-rails (>= 2.0.0)
+ rails-controller-testing (1.0.4)
+ actionpack (>= 5.0.1.x)
+ actionview (>= 5.0.1.x)
+ activesupport (>= 5.0.1.x)
+ rails-dom-testing (2.0.3)
+ activesupport (>= 4.2.0)
+ nokogiri (>= 1.6)
rails-html-sanitizer (1.0.4)
loofah (~> 2.2, >= 2.2.2)
rails-perftest (0.0.7)
- railties (4.2.11)
- actionpack (= 4.2.11)
- activesupport (= 4.2.11)
+ railties (5.0.7.2)
+ actionpack (= 5.0.7.2)
+ activesupport (= 5.0.7.2)
+ method_source
rake (>= 0.8.7)
thor (>= 0.18.1, < 2.0)
rake (12.3.2)
raphael-rails (2.1.2)
rb-fsevent (0.10.3)
- rb-inotify (0.9.10)
- ffi (>= 0.5.0, < 2)
+ rb-inotify (0.10.0)
+ ffi (~> 1.0)
ref (2.0.0)
- request_store (1.4.0)
+ request_store (1.4.1)
rack (>= 1.4)
- responders (2.4.0)
- actionpack (>= 4.2.0, < 5.3)
- railties (>= 4.2.0, < 5.3)
+ responders (2.4.1)
+ actionpack (>= 4.2.0, < 6.0)
+ railties (>= 4.2.0, < 6.0)
retriable (1.4.1)
ruby-debug-passenger (0.2.0)
ruby-prof (0.17.0)
rubyzip (1.2.2)
rvm-capistrano (1.5.6)
capistrano (~> 2.15.4)
- safe_yaml (1.0.4)
- sass (3.5.5)
+ safe_yaml (1.0.5)
+ sass (3.7.4)
sass-listen (~> 4.0.0)
sass-listen (4.0.0)
rb-fsevent (~> 0.9, >= 0.9.4)
rb-inotify (~> 0.9, >= 0.9.7)
- sass-rails (5.0.7)
- railties (>= 4.0.0, < 6)
- sass (~> 3.1)
- sprockets (>= 2.8, < 4.0)
- sprockets-rails (>= 2.0, < 4.0)
- tilt (>= 1.1, < 3)
- selenium-webdriver (3.14.1)
+ sassc (2.0.1)
+ ffi (~> 1.9)
+ rake
+ sassc-rails (2.1.0)
+ railties (>= 4.0.0)
+ sassc (>= 2.0)
+ sprockets (> 3.0)
+ sprockets-rails
+ tilt
+ selenium-webdriver (3.141.0)
childprocess (~> 0.5)
rubyzip (~> 1.2, >= 1.2.2)
- signet (0.8.1)
+ signet (0.11.0)
addressable (~> 2.3)
faraday (~> 0.9)
jwt (>= 1.5, < 3.0)
multi_json (~> 1.10)
- simplecov (0.15.1)
- docile (~> 1.1.0)
+ simplecov (0.16.1)
+ docile (~> 1.1)
json (>= 1.8, < 3)
simplecov-html (~> 0.10.0)
simplecov-html (0.10.2)
actionpack (>= 4.0)
activesupport (>= 4.0)
sprockets (>= 3.0.0)
- sshkey (1.9.0)
+ sshkey (2.0.0)
therubyracer (0.12.3)
libv8 (~> 3.16.14.15)
ref
thor (0.20.3)
thread_safe (0.3.6)
- tilt (2.0.8)
+ tilt (2.0.9)
tzinfo (1.2.5)
thread_safe (~> 0.1)
uglifier (2.7.2)
execjs (>= 0.3.0)
json (>= 1.8.0)
- websocket-driver (0.7.0)
+ websocket-driver (0.6.5)
websocket-extensions (>= 0.1.0)
websocket-extensions (0.1.3)
- wiselinks (1.2.1)
xpath (2.1.0)
nokogiri (~> 1.3)
DEPENDENCIES
RedCloth
- activerecord-nulldb-adapter
+ activerecord-nulldb-adapter!
andand
angularjs-rails (~> 1.3.8)
arvados (>= 0.1.20150511150219)
- bootstrap-sass (~> 3.1.0)
+ bootstrap-sass (~> 3.4.1)
bootstrap-tab-history-rails
bootstrap-x-editable-rails
byebug
piwik_analytics
poltergeist (~> 1.5.1)
rack-mini-profiler
- rails (~> 4.2.0)
+ rails (~> 5.0.0)
+ rails-controller-testing
rails-perftest
raphael-rails
responders (~> 2.0)
rvm-capistrano
safe_yaml
sass
- sass-rails
+ sassc-rails
selenium-webdriver (~> 3)
simplecov (~> 0.7)
simplecov-rcov
themes_for_rails!
therubyracer
uglifier (~> 2.0)
- wiselinks
BUNDLED WITH
- 1.17.2
+ 1.17.3
//= require bootstrap/button
//= require bootstrap3-editable/bootstrap-editable
//= require bootstrap-tab-history
-//= require wiselinks
//= require angular
//= require raphael
//= require morris
* compiled file, but it's generally better to create a new file per style scope.
*
*= require_self
- *= require bootstrap
*= require bootstrap3-editable/bootstrap-editable
*= require morris
*= require awesomplete
*= require_tree .
*/
+@import "bootstrap-sprockets";
+@import "bootstrap";
+
.contain-align-left {
text-align: left;
}
# SPDX-License-Identifier: AGPL-3.0
require "arvados/collection"
+require "app_version"
class ActionsController < ApplicationController
# Skip require_thread_api_token if this is a show action
# for an object uuid that supports anonymous access.
- skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+ skip_around_action :require_thread_api_token, if: proc { |ctrl|
Rails.configuration.anonymous_user_token and
'show' == ctrl.action_name and
params['uuid'] and
model_class.in?([Collection, Group, Job, PipelineInstance, PipelineTemplate])
}
- skip_filter :require_thread_api_token, only: [:report_issue_popup, :report_issue]
- skip_filter :check_user_agreements, only: [:report_issue_popup, :report_issue]
+ skip_around_action :require_thread_api_token, only: [:report_issue_popup, :report_issue]
+ skip_before_action :check_user_agreements, only: [:report_issue_popup, :report_issue]
@@exposed_actions = {}
def self.expose_action method, &block
respond_to do |format|
IssueReporter.send_report(current_user, params).deliver
- format.js {render nothing: true}
+ format.js {render body: nil}
end
end
ERROR_ACTIONS = [:render_error, :render_not_found]
- around_filter :thread_clear
- around_filter :set_current_request_id
- around_filter :set_thread_api_token
+ around_action :thread_clear
+ around_action :set_current_request_id
+ around_action :set_thread_api_token
# Methods that don't require login should
- # skip_around_filter :require_thread_api_token
- around_filter :require_thread_api_token, except: ERROR_ACTIONS
- before_filter :ensure_arvados_api_exists, only: [:index, :show]
- before_filter :set_cache_buster
- before_filter :accept_uuid_as_id_param, except: ERROR_ACTIONS
- before_filter :check_user_agreements, except: ERROR_ACTIONS
- before_filter :check_user_profile, except: ERROR_ACTIONS
- before_filter :load_filters_and_paging_params, except: ERROR_ACTIONS
- before_filter :find_object_by_uuid, except: [:create, :index, :choose] + ERROR_ACTIONS
+ # skip_around_action :require_thread_api_token
+ around_action :require_thread_api_token, except: ERROR_ACTIONS
+ before_action :ensure_arvados_api_exists, only: [:index, :show]
+ before_action :set_cache_buster
+ before_action :accept_uuid_as_id_param, except: ERROR_ACTIONS
+ before_action :check_user_agreements, except: ERROR_ACTIONS
+ before_action :check_user_profile, except: ERROR_ACTIONS
+ before_action :load_filters_and_paging_params, except: ERROR_ACTIONS
+ before_action :find_object_by_uuid, except: [:create, :index, :choose] + ERROR_ACTIONS
theme :select_theme
begin
def update
@updates ||= params[@object.resource_param_name.to_sym]
+ if @updates.is_a? ActionController::Parameters
+ @updates = @updates.to_unsafe_hash
+ end
@updates.keys.each do |attr|
if @object.send(attr).is_a? Hash
if @updates[attr].is_a? String
if params[:merge] || params["merge_#{attr}".to_sym]
# Merge provided Hash with current Hash, instead of
# replacing.
+ if @updates[attr].is_a? ActionController::Parameters
+ @updates[attr] = @updates[attr].to_unsafe_hash
+ end
@updates[attr] = @object.send(attr).with_indifferent_access.
deep_merge(@updates[attr].with_indifferent_access)
end
class CollectionsController < ApplicationController
include ActionController::Live
- skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+ skip_around_action :require_thread_api_token, if: proc { |ctrl|
Rails.configuration.anonymous_user_token and
'show' == ctrl.action_name
}
- skip_around_filter(:require_thread_api_token,
+ skip_around_action(:require_thread_api_token,
only: [:show_file, :show_file_links])
- skip_before_filter(:find_object_by_uuid,
+ skip_before_action(:find_object_by_uuid,
only: [:provenance, :show_file, :show_file_links])
# We depend on show_file to display the user agreement:
- skip_before_filter :check_user_agreements, only: :show_file
- skip_before_filter :check_user_profile, only: :show_file
+ skip_before_action :check_user_agreements, only: :show_file
+ skip_before_action :check_user_profile, only: :show_file
RELATION_LIMIT = 5
end
def update
- updated_attr = params[:collection].each.select {|a| a[0].andand.start_with? 'rename-file-path:'}
+ updated_attr = params[:collection].to_unsafe_hash.each.select {|a| a[0].andand.start_with? 'rename-file-path:'}
if updated_attr.size > 0
# Is it file rename?
# SPDX-License-Identifier: AGPL-3.0
class ContainerRequestsController < ApplicationController
- skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+ skip_around_action :require_thread_api_token, if: proc { |ctrl|
Rails.configuration.anonymous_user_token and
'show' == ctrl.action_name
}
# SPDX-License-Identifier: AGPL-3.0
class ContainersController < ApplicationController
- skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+ skip_around_action :require_thread_api_token, if: proc { |ctrl|
Rails.configuration.anonymous_user_token and
'show' == ctrl.action_name
}
# SPDX-License-Identifier: AGPL-3.0
class HealthcheckController < ApplicationController
- skip_around_filter :thread_clear
- skip_around_filter :set_thread_api_token
- skip_around_filter :require_thread_api_token
- skip_before_filter :ensure_arvados_api_exists
- skip_before_filter :accept_uuid_as_id_param
- skip_before_filter :check_user_agreements
- skip_before_filter :check_user_profile
- skip_before_filter :load_filters_and_paging_params
- skip_before_filter :find_object_by_uuid
+ skip_around_action :thread_clear
+ skip_around_action :set_thread_api_token
+ skip_around_action :require_thread_api_token
+ skip_before_action :ensure_arvados_api_exists
+ skip_before_action :accept_uuid_as_id_param
+ skip_before_action :check_user_agreements
+ skip_before_action :check_user_profile
+ skip_before_action :load_filters_and_paging_params
+ skip_before_action :find_object_by_uuid
- before_filter :check_auth_header
+ before_action :check_auth_header
def check_auth_header
mgmt_token = Rails.configuration.ManagementToken
# SPDX-License-Identifier: AGPL-3.0
class JobsController < ApplicationController
- skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+ skip_around_action :require_thread_api_token, if: proc { |ctrl|
Rails.configuration.anonymous_user_token and
'show' == ctrl.action_name
}
# SPDX-License-Identifier: AGPL-3.0
class LogsController < ApplicationController
- before_filter :ensure_current_user_is_admin
+ before_action :ensure_current_user_is_admin
end
# SPDX-License-Identifier: AGPL-3.0
class PipelineInstancesController < ApplicationController
- skip_before_filter :find_object_by_uuid, only: :compare
- before_filter :find_objects_by_uuid, only: :compare
- skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+ skip_before_action :find_object_by_uuid, only: :compare
+ before_action :find_objects_by_uuid, only: :compare
+ skip_around_action :require_thread_api_token, if: proc { |ctrl|
Rails.configuration.anonymous_user_token and
'show' == ctrl.action_name
}
end
def update
- @updates ||= params[@object.class.to_s.underscore.singularize.to_sym]
+ @updates ||= params.to_unsafe_hash[@object.class.to_s.underscore.singularize.to_sym]
if (components = @updates[:components])
components.each do |cname, component|
if component[:script_parameters]
# SPDX-License-Identifier: AGPL-3.0
class PipelineTemplatesController < ApplicationController
- skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+ skip_around_action :require_thread_api_token, if: proc { |ctrl|
Rails.configuration.anonymous_user_token and
'show' == ctrl.action_name
}
# SPDX-License-Identifier: AGPL-3.0
class ProjectsController < ApplicationController
- before_filter :set_share_links, if: -> { defined? @object and @object}
- skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+ before_action :set_share_links, if: -> { defined? @object and @object}
+ skip_around_action :require_thread_api_token, if: proc { |ctrl|
Rails.configuration.anonymous_user_token and
%w(show tab_counts public).include? ctrl.action_name
}
# SPDX-License-Identifier: AGPL-3.0
class RepositoriesController < ApplicationController
- before_filter :set_share_links, if: -> { defined? @object }
+ before_action :set_share_links, if: -> { defined? @object }
def index_pane_list
%w(repositories help)
# SPDX-License-Identifier: AGPL-3.0
class SearchController < ApplicationController
- skip_before_filter :ensure_arvados_api_exists
+ skip_before_action :ensure_arvados_api_exists
def find_objects_for_index
search_what = Group
# SPDX-License-Identifier: AGPL-3.0
class SessionsController < ApplicationController
- skip_around_filter :require_thread_api_token, :only => [:destroy, :logged_out]
- skip_around_filter :set_thread_api_token, :only => [:destroy, :logged_out]
- skip_before_filter :find_object_by_uuid
- skip_before_filter :find_objects_for_index
- skip_before_filter :ensure_arvados_api_exists
+ skip_around_action :require_thread_api_token, :only => [:destroy, :logged_out]
+ skip_around_action :set_thread_api_token, :only => [:destroy, :logged_out]
+ skip_before_action :find_object_by_uuid
+ skip_before_action :find_objects_for_index, raise: false
+ skip_before_action :ensure_arvados_api_exists
def destroy
session.clear
#
# SPDX-License-Identifier: AGPL-3.0
+require "app_version"
+
class StatusController < ApplicationController
- skip_around_filter :require_thread_api_token
- skip_before_filter :find_object_by_uuid
+ skip_around_action :require_thread_api_token
+ skip_before_action :find_object_by_uuid
def status
# Allow non-credentialed cross-origin requests
headers['Access-Control-Allow-Origin'] = '*'
# SPDX-License-Identifier: AGPL-3.0
class TestsController < ApplicationController
- skip_before_filter :find_object_by_uuid
+ skip_before_action :find_object_by_uuid
def mithril
end
end
# SPDX-License-Identifier: AGPL-3.0
class UserAgreementsController < ApplicationController
- skip_before_filter :check_user_agreements
- skip_before_filter :find_object_by_uuid
- skip_before_filter :check_user_profile
+ skip_before_action :check_user_agreements
+ skip_before_action :find_object_by_uuid
+ skip_before_action :check_user_profile
def index
if unsigned_user_agreements.empty?
# SPDX-License-Identifier: AGPL-3.0
class UsersController < ApplicationController
- skip_around_filter :require_thread_api_token, only: :welcome
- skip_before_filter :check_user_agreements, only: [:welcome, :inactive, :link_account, :merge]
- skip_before_filter :check_user_profile, only: [:welcome, :inactive, :profile, :link_account, :merge]
- skip_before_filter :find_object_by_uuid, only: [:welcome, :activity, :storage]
- before_filter :ensure_current_user_is_admin, only: [:sudo, :unsetup, :setup]
+ skip_around_action :require_thread_api_token, only: :welcome
+ skip_before_action :check_user_agreements, only: [:welcome, :inactive, :link_account, :merge]
+ skip_before_action :check_user_profile, only: [:welcome, :inactive, :profile, :link_account, :merge]
+ skip_before_action :find_object_by_uuid, only: [:welcome, :activity, :storage]
+ before_action :ensure_current_user_is_admin, only: [:sudo, :unsetup, :setup]
def show
if params[:uuid] == current_user.uuid
# SPDX-License-Identifier: AGPL-3.0
class WebsocketController < ApplicationController
- skip_before_filter :find_objects_for_index
+ skip_before_action :find_objects_for_index, raise: false
def index
end
# SPDX-License-Identifier: AGPL-3.0
class WorkUnitsController < ApplicationController
- skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+ skip_around_action :require_thread_api_token, if: proc { |ctrl|
Rails.configuration.anonymous_user_token and
'show_child_component' == ctrl.action_name
}
# SPDX-License-Identifier: AGPL-3.0
class WorkflowsController < ApplicationController
- skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+ skip_around_action :require_thread_api_token, if: proc { |ctrl|
Rails.configuration.anonymous_user_token and
'show' == ctrl.action_name
}
end
def human_readable_bytes_html(n)
- return h(n) unless n.is_a? Fixnum
+ return h(n) unless n.is_a? Integer
return "0 bytes" if (n == 0)
orders = {
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ApplicationRecord < ActiveRecord::Base
+ self.abstract_class = true
+end
\ No newline at end of file
#
# SPDX-License-Identifier: AGPL-3.0
-class ArvadosBase < ActiveRecord::Base
- self.abstract_class = true
+class ArvadosBase
+ include ActiveModel::Validations
+ include ActiveModel::Conversion
+ include ActiveModel::Serialization
+ include ActiveModel::Dirty
+ include ActiveModel::AttributeAssignment
+ extend ActiveModel::Naming
+
+ Column = Struct.new("Column", :name)
+
attr_accessor :attribute_sortkey
attr_accessor :create_params
+ class Error < StandardError; end
+
+ module Type
+ class Hash < ActiveModel::Type::Value
+ def type
+ :hash
+ end
+
+ def default_value
+ {}
+ end
+
+ private
+ def cast_value(value)
+ (value.class == String) ? ::JSON.parse(value) : value
+ end
+ end
+
+ class Array < ActiveModel::Type::Value
+ def type
+ :array
+ end
+
+ def default_value
+ []
+ end
+
+ private
+ def cast_value(value)
+ (value.class == String) ? ::JSON.parse(value) : value
+ end
+ end
+ end
+
def self.arvados_api_client
ArvadosApiClient.new_or_current
end
end
def initialize raw_params={}, create_params={}
- super self.class.permit_attribute_params(raw_params)
+ self.class.permit_attribute_params(raw_params)
@create_params = create_params
@attribute_sortkey ||= {
'id' => nil,
'uuid' => '999',
}
@loaded_attributes = {}
+ attributes = self.class.columns.map { |c| [c.name.to_sym, nil] }.to_h.merge(raw_params)
+ attributes.symbolize_keys.each do |name, value|
+ send("#{name}=", value)
+ end
end
def self.columns
else
# Hash, Array
@discovered_columns << column(k, coldef[:type], coldef[:type].constantize.new)
- serialize k, coldef[:type].constantize
- end
- define_method k do
- unless new_record? or @loaded_attributes.include? k.to_s
- Rails.logger.debug "BUG: access non-loaded attribute #{k}"
- # We should...
- # raise ActiveModel::MissingAttributeError, "missing attribute: #{k}"
- end
- super()
end
+ attr_reader k
@attribute_info[k] = coldef
end
end
@discovered_columns
end
+ def new_record?
+ # dup method doesn't reset the uuid attr
+ @uuid.nil? || @new_record || false
+ end
+
+ def initialize_dup(other)
+ super
+ @new_record = true
+ @created_at = nil
+ end
+
def self.column(name, sql_type = nil, default = nil, null = true)
- if sql_type == 'datetime'
- cast_type = "ActiveRecord::Type::DateTime".constantize.new
- else
- cast_type = ActiveRecord::Base.connection.lookup_cast_type(sql_type)
+ caster = case sql_type
+ when 'integer'
+ ActiveModel::Type::Integer
+ when 'string', 'text'
+ ActiveModel::Type::String
+ when 'float'
+ ActiveModel::Type::Float
+ when 'datetime'
+ ActiveModel::Type::DateTime
+ when 'boolean'
+ ActiveModel::Type::Boolean
+ when 'Hash'
+ ArvadosBase::Type::Hash
+ when 'Array'
+ ArvadosBase::Type::Array
+ when 'jsonb'
+ ArvadosBase::Type::Hash
+ else
+ raise ArvadosBase::Error.new("Type unknown: #{sql_type}")
+ end
+ define_method "#{name}=" do |val|
+ val = default if val.nil?
+ casted_value = caster.new.cast(val)
+ attribute_will_change!(name) if send(name) != casted_value
+ set_attribute_after_cast(name, casted_value)
end
- ActiveRecord::ConnectionAdapters::Column.new(name.to_s, default, cast_type, sql_type.to_s, null)
+ Column.new(name.to_s)
+ end
+
+ def set_attribute_after_cast(name, casted_value)
+ instance_variable_set("@#{name}", casted_value)
+ end
+
+ def [](attr_name)
+ begin
+ send(attr_name)
+ rescue
+ Rails.logger.debug "BUG: access non-loaded attribute #{attr_name}"
+ nil
+ end
+ end
+
+ def []=(attr_name, attr_val)
+ send("#{attr_name}=", attr_val)
end
def self.attribute_info
# The following permit! is necessary even with
# "ActionController::Parameters.permit_all_parameters = true",
# because permit_all does not permit nested attributes.
- ActionController::Parameters.new(raw_params).permit!
+ if !raw_params.is_a? ActionController::Parameters
+ raw_params = ActionController::Parameters.new(raw_params)
+ end
+ raw_params.permit!
end
def self.create raw_params={}, create_params={}
- x = super(permit_attribute_params(raw_params))
- x.create_params = create_params
+ x = new(permit_attribute_params(raw_params), create_params)
+ x.save
x
end
+ def self.create! raw_params={}, create_params={}
+ x = new(permit_attribute_params(raw_params), create_params)
+ x.save!
+ x
+ end
+
+ def self.table_name
+ self.name.underscore.pluralize.downcase
+ end
+
def update_attributes raw_params={}
- super(self.class.permit_attribute_params(raw_params))
+ assign_attributes(self.class.permit_attribute_params(raw_params))
+ save
+ end
+
+ def update_attributes! raw_params={}
+ assign_attributes(self.class.permit_attribute_params(raw_params))
+ save!
end
def save
obdata.delete :uuid
resp = arvados_api_client.api(self.class, '/' + uuid, postdata)
else
- postdata.merge!(@create_params) if @create_params
+ if @create_params
+ @create_params = @create_params.to_unsafe_hash if @create_params.is_a? ActionController::Parameters
+ postdata.merge!(@create_params)
+ end
resp = arvados_api_client.api(self.class, '', postdata)
end
return false if !resp[:etag] || !resp[:uuid]
self.save or raise Exception.new("Save failed")
end
+ def persisted?
+ (!new_record? && !destroyed?) ? true : false
+ end
+
+ def destroyed?
+ !(new_record? || etag || uuid)
+ end
+
def destroy
if etag || uuid
postdata = { '_method' => 'DELETE' }
forget_uuid!
end
+ def attributes
+ kv = self.class.columns.collect {|c| c.name}.map {|key| [key, send(key)]}
+ kv.to_h
+ end
+
def attributes_for_display
self.attributes.reject { |k,v|
attribute_sortkey.has_key?(k) and !attribute_sortkey[k]
false
end
- def self.creatable?
+ def self.creatable?
current_user and current_user.is_admin
- end
+ end
end
<% else %>
data-object-uuid="<%= @object.uuid %>"
<% end %>
- data-pane-content-url="<%= url_for(params.merge(tab_pane: pane_name)) %>"
+ data-pane-content-url="<%= url_for(params.permit!.merge(tab_pane: pane_name)) %>"
style="margin-top:0.5em;"
>
<div class="pane-content">
<% content_for :tab_line_buttons do %>
<div class="pane-loaded arv-log-event-listener arv-refresh-on-state-change"
- data-pane-content-url="<%= url_for(params.merge(tab_pane: "job_buttons")) %>"
+ data-pane-content-url="<%= url_for(params.permit!.merge(tab_pane: "job_buttons")) %>"
data-object-uuid="<%= @object.uuid %>"
style="display: inline">
<%= render partial: 'show_job_buttons', locals: {object: @object}%>
<div id="pipeline-instance-tab-buttons"
class="pane-loaded arv-log-event-listener arv-refresh-on-state-change"
- data-pane-content-url="<%= url_for(params.merge(tab_pane: "tab_buttons")) %>"
+ data-pane-content-url="<%= url_for(params.permit!.merge(tab_pane: "tab_buttons")) %>"
data-object-uuid="<%= @object.uuid %>"
>
<%= render partial: 'show_tab_buttons', locals: {object: @object}%>
--- /dev/null
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', __FILE__)
+load Gem.bin_path('bundler', 'bundle')
--- /dev/null
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+APP_PATH = File.expand_path('../config/application', __dir__)
+require_relative '../config/boot'
+require 'rails/commands'
--- /dev/null
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require_relative '../config/boot'
+require 'rake'
+Rake.application.run
--- /dev/null
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'pathname'
+require 'fileutils'
+include FileUtils
+
+# path to your application root.
+APP_ROOT = Pathname.new File.expand_path('../../', __FILE__)
+
+def system!(*args)
+ system(*args) || abort("\n== Command #{args} failed ==")
+end
+
+chdir APP_ROOT do
+ # This script is a starting point to setup your application.
+ # Add necessary setup steps to this file.
+
+ puts '== Installing dependencies =='
+ system! 'gem install bundler --conservative'
+ system('bundle check') || system!('bundle install')
+
+ # puts "\n== Copying sample files =="
+ # unless File.exist?('config/database.yml')
+ # cp 'config/database.yml.sample', 'config/database.yml'
+ # end
+
+ puts "\n== Preparing database =="
+ system! 'bin/rails db:setup'
+
+ puts "\n== Removing old logs and tempfiles =="
+ system! 'bin/rails log:clear tmp:clear'
+
+ puts "\n== Restarting application server =="
+ system! 'bin/rails restart'
+end
--- /dev/null
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'pathname'
+require 'fileutils'
+include FileUtils
+
+# path to your application root.
+APP_ROOT = Pathname.new File.expand_path('../../', __FILE__)
+
+def system!(*args)
+ system(*args) || abort("\n== Command #{args} failed ==")
+end
+
+chdir APP_ROOT do
+ # This script is a way to update your development environment automatically.
+ # Add necessary update steps to this file.
+
+ puts '== Installing dependencies =='
+ system! 'gem install bundler --conservative'
+ system('bundle check') || system!('bundle install')
+
+ puts "\n== Updating database =="
+ system! 'bin/rails db:migrate'
+
+ puts "\n== Removing old logs and tempfiles =="
+ system! 'bin/rails log:clear tmp:clear'
+
+ puts "\n== Restarting application server =="
+ system! 'bin/rails restart'
+end
eager_load: true
consider_all_requests_local: false
action_controller.perform_caching: true
- serve_static_files: false
assets.compile: false
assets.digest: true
i18n.fallbacks: true
test:
cache_classes: true
eager_load: false
- serve_static_files: true
- static_cache_control: public, max-age=3600
consider_all_requests_local: true
action_controller.perform_caching: false
action_dispatch.show_exceptions: false
# would be enabled in a collection's show page.
# It is sufficient to list only applications here.
# No need to list text and image types.
- application_mimetypes_with_view_icon: [cwl, fasta, go, javascript, json, pdf, python, r, rtf, sam, x-sh, vnd.realvnc.bed, xml, xsl]
+ application_mimetypes_with_view_icon: [cwl, fasta, go, javascript, json, pdf, python, x-python, r, rtf, sam, x-sh, vnd.realvnc.bed, xml, xsl]
# the maximum number of bytes to load in the log viewer
log_viewer_max_bytes: 1000000
require File.expand_path('../boot', __FILE__)
-require 'rails/all'
+require "rails"
+# Pick only the frameworks we need:
+require "active_model/railtie"
+require "active_job/railtie"
+require "active_record/railtie"
+require "action_controller/railtie"
+require "action_mailer/railtie"
+require "action_view/railtie"
+# Skip ActionCable (new in Rails 5.0) as it adds '/cable' routes that we're not using
+# require "action_cable/engine"
+require "sprockets/railtie"
+require "rails/test_unit/railtie"
Bundler.require(:default, Rails.env)
# -- all .rb files in that directory are automatically loaded.
# Custom directories with classes and modules you want to be autoloadable.
+ # Autoload paths shouldn't be used anymore since Rails 5.0
+ # See #15258 and https://github.com/rails/rails/issues/13142#issuecomment-74586224
# config.autoload_paths += %W(#{config.root}/extras)
- config.autoload_paths += %W(#{config.root}/lib)
# Only load the plugins named here, in the order given (default is alphabetical).
# :all can be used as a placeholder for all plugins not explicitly named.
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+development:
+ adapter: async
+
+test:
+ adapter: async
+
+production:
+ adapter: redis
+ url: redis://localhost:6379/1
# SPDX-License-Identifier: AGPL-3.0
# Load the rails application
-require File.expand_path('../application', __FILE__)
+require_relative 'application'
# Initialize the rails application
-ArvadosWorkbench::Application.initialize!
+Rails.application.initialize!
config.action_controller.perform_caching = true
# Disable Rails's static asset server (Apache or nginx will already do this)
- config.serve_static_files = false
+ config.public_file_server.enabled = false
# Compress JavaScripts and CSS
config.assets.js_compressor = :uglifier
config.cache_classes = true
# Configure static asset server for tests with Cache-Control for performance
- config.serve_static_files = true
- config.static_cache_control = "public, max-age=3600"
+ config.public_file_server.enabled = true
+ config.public_file_server.headers = { 'Cache-Control' => 'public, max-age=3600' }
# Show full error reports and disable caching
config.consider_all_requests_local = true
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+# ActiveSupport::Reloader.to_prepare do
+# ApplicationController.renderer.defaults.merge!(
+# http_host: 'example.org',
+# https: false
+# )
+# end
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+# Version of your assets, change this if you want to expire all your assets.
+Rails.application.config.assets.version = '1.0'
+
+# Add additional assets to the asset load path
+# Rails.application.config.assets.paths << Emoji.images_path
+
+# Precompile additional assets.
+# application.js, application.css, and all non-JS/CSS in app/assets folder are already added.
+# Rails.application.config.assets.precompile += %w( search.js )
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+# Specify a serializer for the signed and encrypted cookie jars.
+# Valid options are :json, :marshal, and :hybrid.
+Rails.application.config.action_dispatch.cookies_serializer = :marshal
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+# Configure sensitive parameters which will be filtered from the log file.
+Rails.application.config.filter_parameters += [:password]
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+#
+# This file contains migration options to ease your Rails 5.0 upgrade.
+#
+# Once upgraded flip defaults one by one to migrate to the new default.
+#
+# Read the Guide for Upgrading Ruby on Rails for more info on each option.
+
+Rails.application.config.action_controller.raise_on_unfiltered_parameters = true
+
+# Enable per-form CSRF tokens. Previous versions had false.
+Rails.application.config.action_controller.per_form_csrf_tokens = false
+
+# Enable origin-checking CSRF mitigation. Previous versions had false.
+Rails.application.config.action_controller.forgery_protection_origin_check = false
+
+# Make Ruby 2.4 preserve the timezone of the receiver when calling `to_time`.
+# Previous versions had false.
+ActiveSupport.to_time_preserves_timezone = false
+
+# Require `belongs_to` associations by default. Previous versions had false.
+Rails.application.config.active_record.belongs_to_required_by_default = false
+
+# Do not halt callback chains when a callback returns false. Previous versions had true.
+ActiveSupport.halt_callback_chains_on_return_false = true
# Be sure to restart your server when you modify this file.
-ArvadosWorkbench::Application.config.session_store :cookie_store, key: '_arvados_workbench_session'
+Rails.application.config.session_store :cookie_store, key: '_arvados_workbench_session'
# Use the database for sessions instead of the cookie-based default,
# which shouldn't be used to store highly confidential information
#
# SPDX-License-Identifier: AGPL-3.0
+require 'config_validators'
+
include ConfigValidators
ConfigValidators::validate_wb2_url_config()
\ No newline at end of file
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Puma can serve each request in a thread from an internal thread pool.
+# The `threads` method setting takes two numbers a minimum and maximum.
+# Any libraries that use thread pools should be configured to match
+# the maximum value specified for Puma. Default is set to 5 threads for minimum
+# and maximum, this matches the default thread size of Active Record.
+#
+threads_count = ENV.fetch("RAILS_MAX_THREADS") { 5 }.to_i
+threads threads_count, threads_count
+
+# Specifies the `port` that Puma will listen on to receive requests, default is 3000.
+#
+port ENV.fetch("PORT") { 3000 }
+
+# Specifies the `environment` that Puma will run in.
+#
+environment ENV.fetch("RAILS_ENV") { "development" }
+
+# Specifies the number of `workers` to boot in clustered mode.
+# Workers are forked webserver processes. If using threads and workers together
+# the concurrency of the application would be max `threads` * `workers`.
+# Workers do not work on JRuby or Windows (both of which do not support
+# processes).
+#
+# workers ENV.fetch("WEB_CONCURRENCY") { 2 }
+
+# Use the `preload_app!` method when specifying a `workers` number.
+# This directive tells Puma to first boot the application and load code
+# before forking the application. This takes advantage of Copy On Write
+# process behavior so workers use less memory. If you use this option
+# you need to make sure to reconnect any threads in the `on_worker_boot`
+# block.
+#
+# preload_app!
+
+# The code in the `on_worker_boot` will be called if you are using
+# clustered mode by specifying a number of `workers`. After each worker
+# process is booted this block will be run, if you are using `preload_app!`
+# option you will want to use this block to reconnect to any threads
+# or connections that may have been created at application boot, Ruby
+# cannot share connections between processes.
+#
+# on_worker_boot do
+# ActiveRecord::Base.establish_connection if defined?(ActiveRecord)
+# end
+
+# Allow puma to be restarted by `rails restart` command.
+plugin :tmp_restart
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+# Your secret key is used for verifying the integrity of signed cookies.
+# If you change this key, all old signed cookies will become invalid!
+
+# Make sure the secret is at least 30 characters and all random,
+# no regular words or you'll be exposed to dictionary attacks.
+# You can use `rails secret` to generate a secure secret key.
+
+# Make sure the secrets in this file are kept private
+# if you're sharing your code publicly.
+
+development:
+ secret_key_base: 33e2d171ec6c67cf8e9a9fbfadc1071328bdab761297e2fe28b9db7613dd542c1ba3bdb3bd3e636d1d6f74ab73a2d90c4e9c0ecc14fde8ccd153045f94e9cc41
+
+test:
+ secret_key_base: d4c07cab3530fccf5d86565ecdc359eb2a853b8ede3b06edb2885e4423d7a726f50a3e415bb940fd4861e8fec16459665fd377acc8cdd98ea63294d2e0d12bb2
+
+# Do not keep production secrets in the repository,
+# instead read values from the environment.
+production:
+ secret_key_base: <%= ENV["SECRET_KEY_BASE"] %>
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+%w(
+ .ruby-version
+ .rbenv-vars
+ tmp/restart.txt
+ tmp/caching-dev.txt
+).each { |path| Spring.watch(path) }
class ActionsControllerTest < ActionController::TestCase
test "send report" do
- post :report_issue, {format: 'js'}, session_for(:admin)
+ post :report_issue, params: {format: 'js'}, session: session_for(:admin)
assert_response :success
found_email = false
end
test "combine files into new collection" do
- post(:combine_selected_files_into_collection, {
+ post(:combine_selected_files_into_collection, params: {
selection: ['zzzzz-4zz18-znfnqtbbv4spc3w/foo',
'zzzzz-4zz18-ehbhgtheo8909or/bar',
'zzzzz-4zz18-y9vne9npefyxh8g/baz',
'7a6ef4c162a5c6413070a8bd0bffc818+150'],
format: "json"},
- session_for(:active))
+ session: session_for(:active))
assert_response 302 # collection created and redirected to new collection page
end
test "combine files with repeated names into new collection" do
- post(:combine_selected_files_into_collection, {
+ post(:combine_selected_files_into_collection, params: {
selection: ['zzzzz-4zz18-znfnqtbbv4spc3w/foo',
'zzzzz-4zz18-00000nonamecoll/foo',
'zzzzz-4zz18-abcd6fx123409f7/foo',
'zzzzz-4zz18-y9vne9npefyxh8g/baz',
'7a6ef4c162a5c6413070a8bd0bffc818+150'],
format: "json"},
- session_for(:active))
+ session: session_for(:active))
assert_response 302 # collection created and redirected to new collection page
end
test "combine collections with repeated filenames in almost similar directories and expect files with proper suffixes" do
- post(:combine_selected_files_into_collection, {
+ post(:combine_selected_files_into_collection, params: {
selection: ['zzzzz-4zz18-duplicatenames1',
'zzzzz-4zz18-duplicatenames2',
'zzzzz-4zz18-znfnqtbbv4spc3w/foo',
'zzzzz-4zz18-00000nonamecoll/foo',],
format: "json"},
- session_for(:active))
+ session: session_for(:active))
assert_response 302 # collection created and redirected to new collection page
end
test "combine collections with same filename in two different streams and expect no suffixes for filenames" do
- post(:combine_selected_files_into_collection, {
+ post(:combine_selected_files_into_collection, params: {
selection: ['zzzzz-4zz18-znfnqtbbv4spc3w',
'zzzzz-4zz18-foonbarfilesdir'],
format: "json"},
- session_for(:active))
+ session: session_for(:active))
assert_response 302 # collection created and redirected to new collection page
end
test "combine foo files from two different collection streams and expect proper filename suffixes" do
- post(:combine_selected_files_into_collection, {
+ post(:combine_selected_files_into_collection, params: {
selection: ['zzzzz-4zz18-znfnqtbbv4spc3w/foo',
'zzzzz-4zz18-foonbarfilesdir/dir1/foo'],
format: "json"},
- session_for(:active))
+ session: session_for(:active))
assert_response 302 # collection created and redirected to new collection page
].each do |dm, fixture|
test "access show method for public #{dm} and expect to see page" do
Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
- get(:show, {uuid: api_fixture(dm)[fixture]['uuid']})
+ get(:show, params: {uuid: api_fixture(dm)[fixture]['uuid']})
assert_response :redirect
if dm == 'groups'
assert_includes @response.redirect_url, "projects/#{fixture['uuid']}"
].each do |dm, fixture, expected|
test "access show method for non-public #{dm} and expect #{expected}" do
Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
- get(:show, {uuid: api_fixture(dm)[fixture]['uuid']})
+ get(:show, params: {uuid: api_fixture(dm)[fixture]['uuid']})
assert_response expected
if expected == 404
assert_includes @response.inspect, 'Log in'
# We're really testing ApplicationController's find_object_by_uuid.
# It's easiest to do that by instantiating a concrete controller.
@controller = NodesController.new
- get(:show, {id: "zzzzz-zzzzz-zzzzzzzzzzzzzzz"}, session_for(:admin))
+ get(:show, params: {id: "zzzzz-zzzzz-zzzzzzzzzzzzzzz"}, session: session_for(:admin))
assert_response 404
end
api_fixture("api_client_authorizations", "anonymous", "api_token")
@controller = ProjectsController.new
test_uuid = "zzzzz-j7d0g-zzzzzzzzzzzzzzz"
- get(:show, {id: test_uuid})
+ get(:show, params: {id: test_uuid})
assert_not_nil got_header
assert_includes got_header, 'X-Request-Id'
test "current request_id is nil after a request" do
@controller = NodesController.new
- get(:index, {}, session_for(:active))
+ get(:index, params: {}, session: session_for(:active))
assert_nil Thread.current[:request_id]
end
test "X-Request-Id header" do
@controller = NodesController.new
- get(:index, {}, session_for(:active))
+ get(:index, params: {}, session: session_for(:active))
assert_match /^req-[0-9a-zA-Z]{20}$/, response.headers['X-Request-Id']
end
api_fixture("api_client_authorizations", "anonymous", "api_token")
@controller = ProjectsController.new
test_uuid = "zzzzz-j7d0g-zzzzzzzzzzzzzzz"
- get(:show, {id: test_uuid})
+ get(:show, params: {id: test_uuid})
login_link = css_select(css_selector).first
assert_not_nil(login_link, "failed to select login link")
login_href = URI.unescape(login_link.attributes["href"].value)
# network. 100::/64 is the IPv6 discard prefix, so it's perfect.
Rails.configuration.arvados_v1_base = "https://[100::f]:1/"
@controller = NodesController.new
- get(:index, {}, session_for(:active))
+ get(:index, params: {}, session: session_for(:active))
assert_includes(405..422, @response.code.to_i,
"bad response code when API server is unreachable")
ensure
@controller = controller
- get(:show, {id: fixture['uuid']})
+ get(:show, params: {id: fixture['uuid']})
if anon_config
assert_response 200
Rails.configuration.include_accept_encoding_header_in_api_requests = config
@controller = CollectionsController.new
- get(:show, {id: api_fixture('collections')['foo_file']['uuid']}, session_for(:admin))
+ get(:show, params: {id: api_fixture('collections')['foo_file']['uuid']}, session: session_for(:admin))
assert_equal([['.', 'foo', 3]], assigns(:object).files)
end
test 'Edit name and verify that a duplicate is not created' do
@controller = ProjectsController.new
project = api_fixture("groups")["aproject"]
- post :update, {
+ post :update, params: {
id: project["uuid"],
project: {
name: 'test name'
},
format: :json
- }, session_for(:active)
+ }, session: session_for(:active)
assert_includes @response.body, 'test name'
updated = assigns(:object)
assert_equal updated.uuid, project["uuid"]
test "access #{controller.controller_name} index as admin and verify Home link is#{' not' if !expect_home_link} shown" do
@controller = controller
- get :index, {}, session_for(:admin)
+ get :index, params: {}, session: session_for(:admin)
assert_response 200
assert_includes @response.body, expect_str
test "access #{controller.controller_name} index as admin and verify Delete option is#{' not' if !expect_delete_link} shown" do
@controller = controller
- get :index, {}, session_for(:admin)
+ get :index, params: {}, session: session_for(:admin)
assert_response 200
assert_includes @response.body, expect_str
def assert_hash_includes(actual_hash, expected_hash, msg=nil)
expected_hash.each do |key, value|
- assert_equal(value, actual_hash[key], msg)
+ if value.nil?
+ assert_nil(actual_hash[key], msg)
+ else
+ assert_equal(value, actual_hash[key], msg)
+ end
end
end
def show_collection(params, session={}, response=:success)
params = collection_params(params) if not params.is_a? Hash
session = session_for(session) if not session.is_a? Hash
- get(:show, params, session)
+ get(:show, params: params, session: session)
assert_response response
end
test "download a file with spaces in filename" do
setup_for_keep_web
collection = api_fixture('collections')['w_a_z_file']
- get :show_file, {
+ get :show_file, params: {
uuid: collection['uuid'],
file: 'w a z'
- }, session_for(:active)
+ }, session: session_for(:active)
assert_response :redirect
assert_match /w%20a%20z/, response.redirect_url
end
test "viewing collection files with a reader token" do
params = collection_params(:foo_file)
params[:reader_token] = api_token("active_all_collections")
- get(:show_file_links, params)
+ get(:show_file_links, params: params)
assert_response :redirect
assert_no_session
end
setup_for_keep_web
params = collection_params(:foo_file, "foo")
params[:reader_token] = api_token("active_all_collections")
- get(:show_file, params)
+ get(:show_file, params: params)
assert_response :redirect
assert_match /foo/, response.redirect_url
assert_no_session
test "reader token Collection links end with trailing slash" do
# Testing the fix for #2937.
session = session_for(:active_trustedclient)
- post(:share, collection_params(:foo_file), session)
+ post(:share, params: collection_params(:foo_file), session: session)
assert(@controller.download_link.ends_with? '/',
"Collection share link does not end with slash for wget")
end
setup_for_keep_web
params = collection_params(:foo_file, 'foo')
sess = session_for(:active)
- get(:show_file, params, sess)
+ get(:show_file, params: params, session: sess)
assert_response :redirect
assert_match /foo/, response.redirect_url
end
test 'anonymous download' do
setup_for_keep_web
config_anonymous true
- get :show_file, {
+ get :show_file, params: {
uuid: api_fixture('collections')['user_agreement_in_anonymously_accessible_project']['uuid'],
file: 'GNU_General_Public_License,_version_3.pdf',
}
test "can't get a file from Keep without permission" do
params = collection_params(:foo_file, 'foo')
sess = session_for(:spectator)
- get(:show_file, params, sess)
+ get(:show_file, params: params, session: sess)
assert_response 404
end
params = collection_params(:foo_file, 'foo')
read_token = api_token('active')
params[:reader_token] = read_token
- get(:show_file, params)
+ get(:show_file, params: params)
assert_response :redirect
assert_match /foo/, response.redirect_url
assert_not_equal(read_token, session[:arvados_api_token],
params = collection_params(:foo_file, 'foo')
params[:reader_token] =
api_token('active_noscope')
- get(:show_file, params)
+ get(:show_file, params: params)
if anon
# Some files can be shown without a valid token, but not this one.
assert_response 404
sess = session_for(:expired)
read_token = api_token('active')
params[:reader_token] = read_token
- get(:show_file, params, sess)
+ get(:show_file, params: params, session: sess)
assert_response :redirect
assert_not_equal(read_token, session[:arvados_api_token],
"using a reader token set the session's API token")
ua_collection = api_fixture('collections')['user_agreement']
# Here we don't test whether the agreement can be retrieved from
# Keep. We only test that show_file decides to send file content.
- get :show_file, {
+ get :show_file, params: {
uuid: ua_collection['uuid'],
file: ua_collection['manifest_text'].match(/ \d+:\d+:(\S+)/)[1]
- }, session_for(:inactive)
+ }, session: session_for(:inactive)
assert_nil(assigns(:unsigned_user_agreements),
"Did not skip check_user_agreements filter " +
"when showing the user agreement.")
test "show file in a subdirectory of a collection" do
setup_for_keep_web
params = collection_params(:collection_with_files_in_subdir, 'subdir2/subdir3/subdir4/file1_in_subdir4.txt')
- get(:show_file, params, session_for(:user1_with_load))
+ get(:show_file, params: params, session: session_for(:user1_with_load))
assert_response :redirect
assert_match /subdir2\/subdir3\/subdir4\/file1_in_subdir4\.txt/, response.redirect_url
end
show_collection(fixture_name, :active)
fixture = api_fixture('collections')[fixture_name.to_s]
assert_equal(fixture['name'], assigns(:object).name)
- assert_equal(fixture['properties'][0], assigns(:object).properties[0])
+ assert_equal(fixture['properties'].values[0], assigns(:object).properties.values[0])
end
test "create collection with properties" do
- post :create, {
+ post :create, params: {
collection: {
name: 'collection created with properties',
manifest_text: '',
},
},
format: :json
- }, session_for(:active)
+ }, session: session_for(:active)
assert_response :success
assert_not_nil assigns(:object).uuid
assert_equal 'collection created with properties', assigns(:object).name
test "update description and check manifest_text is not lost" do
collection = api_fixture("collections")["multilevel_collection_1"]
- post :update, {
+ post :update, params: {
id: collection["uuid"],
collection: {
description: 'test description update'
},
format: :json
- }, session_for(:active)
+ }, session: session_for(:active)
assert_response :success
assert_not_nil assigns(:object)
# Ensure the Workbench response still has the original manifest_text
test "anonymous user accesses collection in shared project" do
config_anonymous true
collection = api_fixture('collections')['public_text_file']
- get(:show, {id: collection['uuid']})
+ get(:show, params: {id: collection['uuid']})
response_object = assigns(:object)
assert_equal collection['name'], response_object['name']
end
test "can view empty collection" do
- get :show, {id: 'd41d8cd98f00b204e9800998ecf8427e+0'}, session_for(:active)
+ get :show, params: {id: 'd41d8cd98f00b204e9800998ecf8427e+0'}, session: session_for(:active)
assert_includes @response.body, 'The following collections have this content'
end
test "collection portable data hash redirect" do
di = api_fixture('collections')['docker_image']
- get :show, {id: di['portable_data_hash']}, session_for(:active)
+ get :show, params: {id: di['portable_data_hash']}, session: session_for(:active)
assert_match /\/collections\/#{di['uuid']}/, @response.redirect_url
end
test "collection portable data hash with multiple matches" do
pdh = api_fixture('collections')['foo_file']['portable_data_hash']
- get :show, {id: pdh}, session_for(:admin)
+ get :show, params: {id: pdh}, session: session_for(:admin)
matches = api_fixture('collections').select {|k,v| v["portable_data_hash"] == pdh}
assert matches.size > 1
test "collection page renders name" do
collection = api_fixture('collections')['foo_file']
- get :show, {id: collection['uuid']}, session_for(:active)
+ get :show, params: {id: collection['uuid']}, session: session_for(:active)
assert_includes @response.body, collection['name']
assert_match /not authorized to manage collection sharing links/, @response.body
end
test "No Upload tab on non-writable collection" do
- get :show, {id: api_fixture('collections')['user_agreement']['uuid']}, session_for(:active)
+ get :show,
+ params: {id: api_fixture('collections')['user_agreement']['uuid']},
+ session: session_for(:active)
assert_not_includes @response.body, '<a href="#Upload"'
end
setup_for_keep_web
tok = api_token('active')
id = api_fixture('collections')['w_a_z_file'][id_type]
- get :show_file, {uuid: id, file: "w a z"}, session_for(:active)
+ get :show_file,
+ params: {uuid: id, file: "w a z"},
+ session: session_for(:active)
assert_response :redirect
assert_equal "https://#{id.sub '+', '-'}.example/_/w%20a%20z?api_token=#{URI.escape tok, '/'}", @response.redirect_url
end
setup_for_keep_web
tok = api_token('active')
id = api_fixture('collections')['w_a_z_file'][id_type]
- get :show_file, {uuid: id, file: "w a z", reader_token: tok}, session_for(:expired)
+ get :show_file,
+ params: {uuid: id, file: "w a z", reader_token: tok},
+ session: session_for(:expired)
assert_response :redirect
assert_equal "https://#{id.sub '+', '-'}.example/t=#{URI.escape tok}/_/w%20a%20z", @response.redirect_url
end
setup_for_keep_web
config_anonymous true
id = api_fixture('collections')['public_text_file'][id_type]
- get :show_file, {uuid: id, file: "Hello World.txt"}
+ get :show_file, params: {uuid: id, file: "Hello World.txt"}
assert_response :redirect
assert_equal "https://#{id.sub '+', '-'}.example/_/Hello%20World.txt", @response.redirect_url
end
setup_for_keep_web
config_anonymous true
id = api_fixture('collections')['public_text_file'][id_type]
- get :show_file, {
+ get :show_file, params: {
uuid: id,
file: "Hello World.txt",
disposition: 'attachment',
'https://download.example/c=%{uuid_or_pdh}')
tok = api_token('active')
id = api_fixture('collections')['w_a_z_file'][id_type]
- get :show_file, {uuid: id, file: "w a z"}, session_for(:active)
+ get :show_file, params: {uuid: id, file: "w a z"}, session: session_for(:active)
assert_response :redirect
assert_equal "https://download.example/c=#{id.sub '+', '-'}/_/w%20a%20z?api_token=#{URI.escape tok, '/'}", @response.redirect_url
end
'https://download.example/c=%{uuid_or_pdh}')
tok = api_token('active')
id = api_fixture('collections')['w_a_z_file'][id_type]
- get :show_file, {uuid: id, file: "w a z"}, session_for(:active)
+ get :show_file, params: {uuid: id, file: "w a z"}, session: session_for(:active)
assert_response :redirect
assert_equal "https://collections.example/c=#{id.sub '+', '-'}/_/w%20a%20z?api_token=#{URI.escape tok, '/'}", @response.redirect_url
end
setup_for_keep_web
config_anonymous anon
id = api_fixture('collections')['w_a_z_file']['uuid']
- get :show_file, {uuid: id, file: "w a z"}, session_for(:spectator)
+ get :show_file, params: {uuid: id, file: "w a z"}, session: session_for(:spectator)
assert_response 404
end
'https://download.example/c=%{uuid_or_pdh}')
tok = api_token('active')
id = api_fixture('collections')['public_text_file']['uuid']
- get :show_file, {
+ get :show_file, params: {
uuid: id,
file: 'Hello world.txt',
disposition: 'attachment',
- }, session_for(:active)
+ }, session: session_for(:active)
assert_response :redirect
expect_url = "https://download.example/c=#{id.sub '+', '-'}/_/Hello%20world.txt"
if not anon
# cannot read this collection without a session token.
setup_for_keep_web 'https://collections.example/c=%{uuid_or_pdh}', false
id = api_fixture('collections')['w_a_z_file']['uuid']
- get :show_file, {uuid: id, file: "w a z"}, session_for(:active)
+ get :show_file, params: {uuid: id, file: "w a z"}, session: session_for(:active)
assert_response 422
end
setup_for_keep_web false, 'https://download.example/c=%{uuid_or_pdh}'
tok = api_token('active')
id = api_fixture('collections')['w_a_z_file']['uuid']
- get :show_file, {uuid: id, file: "w a z"}, session_for(:active)
+ get :show_file, params: {uuid: id, file: "w a z"}, session: session_for(:active)
assert_response :redirect
assert_equal "https://download.example/c=#{id.sub '+', '-'}/_/w%20a%20z?api_token=#{URI.escape tok, '/'}", @response.redirect_url
end
assert_includes(collection['manifest_text'], "0:0:file1")
# now remove all files named 'file1' from the collection
- post :remove_selected_files, {
+ post :remove_selected_files, params: {
id: collection['uuid'],
selection: ["#{collection['uuid']}/file1",
"#{collection['uuid']}/dir1/file1"],
format: :json
- }, session_for(:active)
+ }, session: session_for(:active)
assert_response :success
+ use_token :active
# verify no 'file1' in the updated collection
collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
assert_not_includes(collection['manifest_text'], "0:0:file1")
assert_includes(collection['manifest_text'], "0:0:file1")
# now remove all files from "dir1" subdir of the collection
- post :remove_selected_files, {
+ post :remove_selected_files, params: {
id: collection['uuid'],
selection: ["#{collection['uuid']}/dir1/file1",
"#{collection['uuid']}/dir1/file2"],
format: :json
- }, session_for(:active)
+ }, session: session_for(:active)
assert_response :success
# verify that "./dir1" no longer exists in this collection's manifest text
+ use_token :active
collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
assert_match /. d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:file1 0:0:file2\n$/, collection['manifest_text']
assert_not_includes(collection['manifest_text'], 'dir1')
assert_includes(collection['manifest_text'], "0:0:file1")
# rename 'file1' as 'file1renamed' and verify
- post :update, {
+ post :update, params: {
id: collection['uuid'],
collection: {
'rename-file-path:file1' => 'file1renamed'
},
format: :json
- }, session_for(:active)
+ }, session: session_for(:active)
assert_response :success
+ use_token :active
collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
assert_match /. d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:file1renamed 0:0:file2\n.\/dir1 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file1 0:0:dir1file2 0:0:dir1imagefile.png\n$/, collection['manifest_text']
# now rename 'file2' such that it is moved into 'dir1'
@test_counter = 0
- post :update, {
+ post :update, params: {
id: collection['uuid'],
collection: {
'rename-file-path:file2' => 'dir1/file2'
},
format: :json
- }, session_for(:active)
+ }, session: session_for(:active)
assert_response :success
+ use_token :active
collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
assert_match /. d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:file1renamed\n.\/dir1 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file1 0:0:dir1file2 0:0:dir1imagefile.png 0:0:file2\n$/, collection['manifest_text']
# now rename 'dir1/dir1file1' such that it is moved into a new subdir
@test_counter = 0
- post :update, {
+ post :update, params: {
id: collection['uuid'],
collection: {
'rename-file-path:dir1/dir1file1' => 'dir2/dir3/dir1file1moved'
},
format: :json
- }, session_for(:active)
+ }, session: session_for(:active)
assert_response :success
+ use_token :active
collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
assert_match /. d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:file1renamed\n.\/dir1 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file2 0:0:dir1imagefile.png 0:0:file2\n.\/dir2\/dir3 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file1moved\n$/, collection['manifest_text']
# now rename the image file 'dir1/dir1imagefile.png'
@test_counter = 0
- post :update, {
+ post :update, params: {
id: collection['uuid'],
collection: {
'rename-file-path:dir1/dir1imagefile.png' => 'dir1/dir1imagefilerenamed.png'
},
format: :json
- }, session_for(:active)
+ }, session: session_for(:active)
assert_response :success
+ use_token :active
collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
assert_match /. d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:file1renamed\n.\/dir1 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file2 0:0:dir1imagefilerenamed.png 0:0:file2\n.\/dir2\/dir3 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file1moved\n$/, collection['manifest_text']
end
use_token :active
# rename 'file2' as 'file1' and expect error
- post :update, {
+ post :update, params: {
id: 'zzzzz-4zz18-pyw8yp9g3pr7irn',
collection: {
'rename-file-path:file2' => 'file1'
},
format: :json
- }, session_for(:active)
+ }, session: session_for(:active)
assert_response 422
assert_includes json_response['errors'], 'Duplicate file path'
end
use_token :active
# rename 'file1' as 'dir1/file1' and expect error
- post :update, {
+ post :update, params: {
id: 'zzzzz-4zz18-pyw8yp9g3pr7irn',
collection: {
'rename-file-path:file1' => 'dir1/file1'
},
format: :json
- }, session_for(:active)
+ }, session: session_for(:active)
assert_response 422
assert_includes json_response['errors'], 'Duplicate file path'
end
container_uuid = cr['container_uuid']
container = Container.find(container_uuid)
- get :show, {id: cr['uuid'], tab_pane: 'Log'}, session_for(:active)
+ get :show, params: {id: cr['uuid'], tab_pane: 'Log'}, session: session_for(:active)
assert_response :success
assert_select "a", {:href=>"/collections/#{container['log']}", :text=>"Download the log"}
container_uuid = cr['container_uuid']
container = Container.find(container_uuid)
- get :show, {id: cr['uuid'], tab_pane: 'Log'}, session_for(:active)
+ get :show, params: {id: cr['uuid'], tab_pane: 'Log'}, session: session_for(:active)
assert_response :success
assert_includes @response.body, '<pre id="event_log_div"'
uuid = api_fixture('container_requests')['completed']['uuid']
- get :show, {id: uuid}, session_for(:active)
+ get :show, params: {id: uuid}, session: session_for(:active)
assert_response :success
assert_includes @response.body, "action=\"/container_requests/#{uuid}/copy\""
test "cancel request for queued container" do
cr_fixture = api_fixture('container_requests')['queued']
- post :cancel, {id: cr_fixture['uuid']}, session_for(:active)
+ post :cancel, params: {id: cr_fixture['uuid']}, session: session_for(:active)
assert_response 302
use_token 'active'
if reuse_enabled
copy_params.merge!({use_existing: true})
end
- post(:copy, copy_params, session_for(:active))
+ post(:copy, params: copy_params, session: session_for(:active))
assert_response 302
copied_cr = assigns(:object)
assert_not_nil copied_cr
assert_equal 'Uncommitted', copied_cr[:state]
assert_equal "Copy of #{completed_cr['name']}", copied_cr['name']
- assert_equal completed_cr['cmd'], copied_cr['cmd']
assert_equal completed_cr['runtime_constraints']['ram'], copied_cr['runtime_constraints'][:ram]
if reuse_enabled
assert copied_cr[:use_existing]
cr = api_fixture('container_requests')[cr_fixture]
assert_not_nil cr
get(:show,
- {id: cr['uuid']},
- session_for(:active))
+ params: {id: cr['uuid']},
+ session: session_for(:active))
assert_response :success
if should_show
assert_includes @response.body, "href=\"#Provenance\""
cr = api_fixture('container_requests')['completed_with_input_mounts']
- get :show, {id: cr['uuid']}, session_for(:active)
+ get :show, params: {id: cr['uuid']}, session: session_for(:active)
assert_response :success
assert_match /hello/, @response.body
container = api_fixture('containers')['completed']
- get :show, {id: container['uuid'], tab_pane: 'Log'}, session_for(:active)
+ get :show,
+ params: {id: container['uuid'], tab_pane: 'Log'},
+ session: session_for(:active)
assert_response :success
assert_select "a", {:href=>"/collections/#{container['log']}", :text=>"Download the log"}
dd[:resources][:pipeline_instances][:methods].delete(:index)
ArvadosApiClient.any_instance.stubs(:discovery).returns(dd)
- get :index, {}, session_for(:active)
+ get :index, params: {}, session: session_for(:active)
assert_includes @response.body, "zzzzz-xvhdp-cr4runningcntnr" # expect crs
assert_not_includes @response.body, "zzzzz-d1hrv-" # expect no pipelines
assert_includes @response.body, "Run a process"
dd[:resources][:pipeline_instances][:methods].delete(:index)
ArvadosApiClient.any_instance.stubs(:discovery).returns(dd)
- get :index, {}, session_for(:active)
+ get :index, params: {}, session: session_for(:active)
assert_not_includes @response.body, "compute-node-summary-pane"
end
dd[:resources][ctrl_name][:methods].delete(:index)
ArvadosApiClient.any_instance.stubs(:discovery).returns(dd)
- get :index, {}, session_for(:active)
+ get :index, params: {}, session: session_for(:active)
assert_response 404
end
end
proj_uuid = api_fixture('groups')['anonymously_accessible_project']['uuid']
if user
- get(:show, {id: proj_uuid}, session_for(user))
+ get(:show, params: {id: proj_uuid}, session: session_for(user))
else
- get(:show, {id: proj_uuid})
+ get(:show, params: {id: proj_uuid})
end
resp = @response.body
class JobsControllerTest < ActionController::TestCase
test "visit jobs index page" do
- get :index, {}, session_for(:active)
+ get :index, params: {}, session: session_for(:active)
assert_response :success
end
test "job page lists pipelines and jobs in which it is used" do
get(:show,
- {id: api_fixture('jobs')['completed_job_in_publicly_accessible_project']['uuid']},
- session_for(:active))
+ params: {id: api_fixture('jobs')['completed_job_in_publicly_accessible_project']['uuid']},
+ session: session_for(:active))
assert_response :success
assert_select "div.used-in-pipelines" do
- assert_select "a[href=/pipeline_instances/zzzzz-d1hrv-n68vc490mloy4fi]"
+ assert_select "a[href=\"/pipeline_instances/zzzzz-d1hrv-n68vc490mloy4fi\"]"
end
assert_select "div.used-in-jobs" do
- assert_select "a[href=/jobs/zzzzz-8i9sb-with2components]"
+ assert_select "a[href=\"/jobs/zzzzz-8i9sb-with2components\"]"
end
end
end
def create_instance_long_enough_to(instance_attrs={})
# create 'two_part' pipeline with the given instance attributes
pt_fixture = api_fixture('pipeline_templates')['two_part']
- post :create, {
+ post :create, params: {
pipeline_instance: instance_attrs.merge({
pipeline_template_uuid: pt_fixture['uuid']
}),
format: :json
- }, session_for(:active)
+ }, session: session_for(:active)
assert_response :success
pi_uuid = assigns(:object).uuid
assert_not_nil assigns(:object)
test "can render pipeline instance with tagged collections" do
# Make sure to pass in a tagged collection to test that part of the rendering behavior.
get(:show,
- {id: api_fixture("pipeline_instances")["pipeline_with_tagged_collection_input"]["uuid"]},
- session_for(:active))
+ params: {id: api_fixture("pipeline_instances")["pipeline_with_tagged_collection_input"]["uuid"]},
+ session: session_for(:active))
assert_response :success
end
test "update script_parameters one at a time using merge param" do
template_fixture = api_fixture('pipeline_templates')['two_part']
- post :update, {
+ post :update, params: {
id: api_fixture("pipeline_instances")["pipeline_to_merge_params"]["uuid"],
pipeline_instance: {
components: {
},
merge: true,
format: :json
- }, session_for(:active)
+ }, session: session_for(:active)
assert_response :success
assert_not_nil assigns(:object)
orig_params = template_fixture['components']['part-two']['script_parameters']
test "component rendering copes with unexpected components format" do
get(:show,
- {id: api_fixture("pipeline_instances")["components_is_jobspec"]["uuid"]},
- session_for(:active))
+ params: {id: api_fixture("pipeline_instances")["components_is_jobspec"]["uuid"]},
+ session: session_for(:active))
assert_response :success
end
test "dates in JSON components are parsed" do
get(:show,
- {id: api_fixture('pipeline_instances')['has_component_with_completed_jobs']['uuid']},
- session_for(:active))
+ params: {id: api_fixture('pipeline_instances')['has_component_with_completed_jobs']['uuid']},
+ session: session_for(:active))
assert_response :success
assert_not_nil assigns(:object)
assert_not_nil assigns(:object).components[:foo][:job]
test "copy pipeline instance with components=use_latest" do
post(:copy,
- {
+ params: {
id: api_fixture('pipeline_instances')['pipeline_with_newer_template']['uuid'],
components: 'use_latest',
script: 'use_latest',
state: 'RunningOnServer'
}
},
- session_for(:active))
+ session: session_for(:active))
assert_response 302
assert_not_nil assigns(:object)
test "copy pipeline instance on newer template works with script=use_same" do
post(:copy,
- {
+ params: {
id: api_fixture('pipeline_instances')['pipeline_with_newer_template']['uuid'],
components: 'use_latest',
script: 'use_same',
state: 'RunningOnServer'
}
},
- session_for(:active))
+ session: session_for(:active))
assert_response 302
assert_not_nil assigns(:object)
class PipelineTemplatesControllerTest < ActionController::TestCase
test "component rendering copes with unexpeceted components format" do
get(:show,
- {id: api_fixture("pipeline_templates")["components_is_jobspec"]["uuid"]},
- session_for(:active))
+ params: {id: api_fixture("pipeline_templates")["components_is_jobspec"]["uuid"]},
+ session: session_for(:active))
assert_response :success
end
end
include ShareObjectHelper
test "invited user is asked to sign user agreements on front page" do
- get :index, {}, session_for(:inactive)
+ get :index, params: {}, session: session_for(:inactive)
assert_response :redirect
assert_match(/^#{Regexp.escape(user_agreements_url)}\b/,
@response.redirect_url,
end
test "uninvited user is asked to wait for activation" do
- get :index, {}, session_for(:inactive_uninvited)
+ get :index, params: {}, session: session_for(:inactive_uninvited)
assert_response :redirect
assert_match(/^#{Regexp.escape(inactive_users_url)}\b/,
@response.redirect_url,
[:project_viewer, false]].each do |which_user, should_show|
test "create subproject button #{'not ' unless should_show} shown to #{which_user}" do
readonly_project_uuid = api_fixture('groups')['aproject']['uuid']
- get :show, {
+ get :show, params: {
id: readonly_project_uuid
- }, session_for(which_user)
+ }, session: session_for(which_user)
buttons = css_select('[data-method=post]').select do |el|
el.attributes['data-remote-href'].value.match /project.*owner_uuid.*#{readonly_project_uuid}/
end
test "sharing a project with a user and group" do
uuid_list = [api_fixture("groups")["future_project_viewing_group"]["uuid"],
api_fixture("users")["future_project_user"]["uuid"]]
- post(:share_with, {
+ post(:share_with, params: {
id: api_fixture("groups")["asubproject"]["uuid"],
uuids: uuid_list,
format: "json"},
- session_for(:active))
+ session: session_for(:active))
assert_response :success
assert_equal(uuid_list, json_response["success"])
end
test "user with project read permission can't add permissions" do
share_uuid = api_fixture("users")["spectator"]["uuid"]
- post(:share_with, {
+ post(:share_with, params: {
id: api_fixture("groups")["aproject"]["uuid"],
uuids: [share_uuid],
format: "json"},
- session_for(:project_viewer))
+ session: session_for(:project_viewer))
assert_response 422
assert(json_response["errors"].andand.
any? { |msg| msg.start_with?("#{share_uuid}: ") },
# detected. The test passes quickly, but fails slowly.
Timeout::timeout 10 do
get(:show,
- { id: api_fixture("groups")["project_owns_itself"]["uuid"] },
- session_for(:admin))
+ params: { id: api_fixture("groups")["project_owns_itself"]["uuid"] },
+ session: session_for(:admin))
end
assert_response :success
end
coll_key = "collection_to_remove_from_subproject"
coll_uuid = api_fixture("collections")[coll_key]["uuid"]
delete(:remove_item,
- { id: api_fixture("groups")["asubproject"]["uuid"],
+ params: { id: api_fixture("groups")["asubproject"]["uuid"],
item_uuid: coll_uuid,
format: "js" },
- session_for(:subproject_admin))
+ session: session_for(:subproject_admin))
assert_response :success
assert_match(/\b#{coll_uuid}\b/, @response.body,
"removed object not named in response")
# should be implicitly moved to the user's Home project when removed.
specimen_uuid = api_fixture('specimens', 'in_asubproject')['uuid']
delete(:remove_item,
- { id: api_fixture('groups', 'asubproject')['uuid'],
+ params: { id: api_fixture('groups', 'asubproject')['uuid'],
item_uuid: specimen_uuid,
format: 'js' },
- session_for(:subproject_admin))
+ session: session_for(:subproject_admin))
assert_response :success
assert_match(/\b#{specimen_uuid}\b/, @response.body,
"removed object not named in response")
test "removing #{dm} from a subproject results in renaming it when there is another such object with same name in home project" do
object = api_fixture(dm, fixture)
delete(:remove_item,
- { id: api_fixture('groups', 'asubproject')['uuid'],
+ params: { id: api_fixture('groups', 'asubproject')['uuid'],
item_uuid: object['uuid'],
format: 'js' },
- session_for(:active))
+ session: session_for(:active))
assert_response :success
assert_match(/\b#{object['uuid']}\b/, @response.body,
"removed object not named in response")
encoded_params = Hash[params.map { |k,v|
[k, (v.is_a?(Array) || v.is_a?(Hash)) ? v.to_json : v]
}]
- get :show, encoded_params, session_for(:active)
+ get :show, params: encoded_params, session: session_for(:active)
end
test "visit non-public project as anonymous when anonymous browsing is enabled and expect page not found" do
Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
- get(:show, {id: api_fixture('groups')['aproject']['uuid']})
+ get(:show, params: {id: api_fixture('groups')['aproject']['uuid']})
assert_response 404
assert_match(/log ?in/i, @response.body)
end
Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
if user
- get :public, {}, session_for(user)
+ get :public, params: {}, session: session_for(user)
else
get :public
end
end
test "visit public projects page when anon config is not enabled as active user and expect 404" do
- get :public, {}, session_for(:active)
+ Rails.configuration.anonymous_user_token = nil
+ Rails.configuration.enable_public_projects_page = false
+ get :public, params: {}, session: session_for(:active)
assert_response 404
end
test "visit public projects page when anon config is enabled but public projects page is disabled as active user and expect 404" do
Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
Rails.configuration.enable_public_projects_page = false
- get :public, {}, session_for(:active)
+ get :public, params: {}, session: session_for(:active)
assert_response 404
end
test "visit public projects page when anon config is not enabled as anonymous and expect login page" do
+ Rails.configuration.anonymous_user_token = nil
+ Rails.configuration.enable_public_projects_page = false
get :public
assert_response :redirect
assert_match /\/users\/welcome/, @response.redirect_url
found = Group.find(project['uuid'])
found.description = 'test description update'
found.save!
- get(:show, {id: project['uuid']}, session_for(:active))
+ get(:show, params: {id: project['uuid']}, session: session_for(:active))
assert_includes @response.body, 'test description update'
end
found = Group.find(project['uuid'])
found.description = '*test bold description for textile formatting*'
found.save!
- get(:show, {id: project['uuid']}, session_for(:active))
+ get(:show, params: {id: project['uuid']}, session: session_for(:active))
assert_includes @response.body, '<strong>test bold description for textile formatting</strong>'
end
found = Group.find(project['uuid'])
found.description = '<b>Textile</b> description with link to home page <a href="/">take me home</a>.'
found.save!
- get(:show, {id: project['uuid']}, session_for(:active))
+ get(:show, params: {id: project['uuid']}, session: session_for(:active))
assert_includes @response.body, '<b>Textile</b> description with link to home page <a href="/">take me home</a>.'
end
found = Group.find(project['uuid'])
found.description = 'Textile description with unsafe script tag <script language="javascript">alert("Hello there")</script>.'
found.save!
- get(:show, {id: project['uuid']}, session_for(:active))
+ get(:show, params: {id: project['uuid']}, session: session_for(:active))
assert_includes @response.body, 'Textile description with unsafe script tag alert("Hello there").'
end
found = Group.find(project['uuid'])
found.description = textile_table
found.save!
- get(:show, {id: project['uuid']}, session_for(:active))
+ get(:show, params: {id: project['uuid']}, session: session_for(:active))
assert_includes @response.body, '<th>First Header'
assert_includes @response.body, '<td>Content Cell'
end
# uses 'Link to object' as a hyperlink for the object
found.description = '"Link to object":' + api_fixture('groups')['asubproject']['uuid']
found.save!
- get(:show, {id: project['uuid']}, session_for(:active))
+ get(:show, params: {id: project['uuid']}, session: session_for(:active))
# check that input was converted to textile, not staying as inputted
refute_includes @response.body,'"Link to object"'
test "project viewer can't see project sharing tab" do
project = api_fixture('groups')['aproject']
- get(:show, {id: project['uuid']}, session_for(:project_viewer))
+ get(:show, params: {id: project['uuid']}, session: session_for(:project_viewer))
refute_includes @response.body, '<div id="Sharing"'
assert_includes @response.body, '<div id="Data_collections"'
end
].each do |username|
test "#{username} can see project sharing tab" do
project = api_fixture('groups')['aproject']
- get(:show, {id: project['uuid']}, session_for(username))
+ get(:show, params: {id: project['uuid']}, session: session_for(username))
assert_includes @response.body, '<div id="Sharing"'
assert_includes @response.body, '<div id="Data_collections"'
end
['project_viewer',false],
].each do |user, can_move|
test "#{user} can move subproject from project #{can_move}" do
- get(:show, {id: api_fixture('groups')['aproject']['uuid']}, session_for(user))
+ get(:show, params: {id: api_fixture('groups')['aproject']['uuid']}, session: session_for(user))
if can_move
assert_includes @response.body, 'Move project...'
else
[:active, false],
].each do |user, expect_all_nodes|
test "in dashboard other index page links as #{user}" do
- get :index, {}, session_for(user)
+ get :index, params: {}, session: session_for(user)
[["processes", "/all_processes"],
["collections", "/collections"],
end
test "dashboard should show the correct status for processes" do
- get :index, {}, session_for(:active)
+ get :index, params: {}, session: session_for(:active)
assert_select 'div.panel-body.recent-processes' do
[
{
test "visit a public project and verify the public projects page link exists" do
Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
uuid = api_fixture('groups')['anonymously_accessible_project']['uuid']
- get :show, {id: uuid}
+ get :show, params: {id: uuid}
project = assigns(:object)
assert_equal uuid, project['uuid']
refute_empty css_select("[href=\"/projects/#{project['uuid']}\"]")
test 'all_projects unaffected by params after use by ProjectsController (#6640)' do
@controller = ProjectsController.new
project_uuid = api_fixture('groups')['aproject']['uuid']
- get :index, {
+ get :index, params: {
filters: [['uuid', '<', project_uuid]].to_json,
limit: 0,
offset: 1000,
- }, session_for(:active)
- assert_select "#projects-menu + ul li.divider ~ li a[href=/projects/#{project_uuid}]"
+ }, session: session_for(:active)
+ assert_select "#projects-menu + ul li.divider ~ li a[href=\"/projects/#{project_uuid}\"]"
end
[
# share it again
@controller = LinksController.new
- post :create, {
+ post :create, params: {
link: {
link_class: 'permission',
name: 'can_read',
tail_uuid: api_fixture('users')['project_viewer']['uuid'],
},
format: :json
- }, session_for(:system_user)
+ }, session: session_for(:system_user)
# verify that the project is again included in starred projects
use_token :project_viewer
test "#{user} shares repository with a user and group" do
uuid_list = [api_fixture("groups")["future_project_viewing_group"]["uuid"],
api_fixture("users")["future_project_user"]["uuid"]]
- post(:share_with, {
+ post(:share_with, params: {
id: api_fixture("repositories")["foo"]["uuid"],
uuids: uuid_list,
format: "json"},
- session_for(user))
+ session: session_for(user))
assert_response :success
assert_equal(uuid_list, json_response["success"])
end
test "user with repository read permission cannot add permissions" do
share_uuid = api_fixture("users")["project_viewer"]["uuid"]
- post(:share_with, {
+ post(:share_with, params: {
id: api_fixture("repositories")["arvados"]["uuid"],
uuids: [share_uuid],
format: "json"},
- session_for(:spectator))
+ session: session_for(:spectator))
assert_response 422
assert(json_response["errors"].andand.
any? { |msg| msg.start_with?("#{share_uuid}: ") },
[:admin, ['#Attributes', '#Sharing', '#Advanced']],
].each do |user, expected_panes|
test "#{user} sees panes #{expected_panes}" do
- get :show, {
+ get :show, params: {
id: api_fixture('repositories')['foo']['uuid']
- }, session_for(user)
+ }, session: session_for(user)
assert_response :success
panes = css_select('[data-toggle=tab]').each do |pane|
test "show tree to #{user}" do
reset_api_fixtures_after_test false
sha1, _, _ = stub_repo_content
- get :show_tree, {
+ get :show_tree, params: {
id: api_fixture('repositories')['foo']['uuid'],
commit: sha1,
- }, session_for(user)
+ }, session: session_for(user)
assert_response :success
assert_select 'tr td a', 'COPYING'
assert_select 'tr td', '625 bytes'
test "show commit to #{user}" do
reset_api_fixtures_after_test false
sha1, commit, _ = stub_repo_content
- get :show_commit, {
+ get :show_commit, params: {
id: api_fixture('repositories')['foo']['uuid'],
commit: sha1,
- }, session_for(user)
+ }, session: session_for(user)
assert_response :success
assert_select 'pre', commit
end
test "show blob to #{user}" do
reset_api_fixtures_after_test false
sha1, _, filedata = stub_repo_content filename: 'COPYING'
- get :show_blob, {
+ get :show_blob, params: {
id: api_fixture('repositories')['foo']['uuid'],
commit: sha1,
path: 'COPYING',
- }, session_for(user)
+ }, session: session_for(user)
assert_response :success
assert_select 'pre', filedata
end
test "show tree with path '#{path}'" do
reset_api_fixtures_after_test false
sha1, _, _ = stub_repo_content filename: 'COPYING'
- get :show_tree, {
+ get :show_tree, params: {
id: api_fixture('repositories')['foo']['uuid'],
commit: sha1,
path: path,
- }, session_for(:active)
+ }, session: session_for(:active)
assert_response :success
assert_select 'tr td', 'COPYING'
end
partial: :repositories_rows,
format: :json,
}
- get :index, params, session_for(:active)
+ get :index, params: params, session: session_for(:active)
assert_response :success
repos = assigns(:objects)
assert repos
include Rails.application.routes.url_helpers
test 'Get search dialog' do
- xhr :get, :choose, {
+ get :choose, params: {
format: :js,
title: 'Search',
action_name: 'Show',
action_href: url_for(host: 'localhost', controller: :actions, action: :show),
action_data: {}.to_json,
- }, session_for(:active)
+ }, session: session_for(:active), xhr: true
assert_response :success
end
test 'Get search results for all projects' do
- xhr :get, :choose, {
+ get :choose, params: {
format: :json,
partial: true,
- }, session_for(:active)
+ }, session: session_for(:active), xhr: true
assert_response :success
assert_not_empty(json_response['content'],
'search results for all projects should not be empty')
end
test 'Get search results for empty project' do
- xhr :get, :choose, {
+ get :choose, params: {
format: :json,
partial: true,
project_uuid: api_fixture('groups')['empty_project']['uuid'],
- }, session_for(:active)
+ }, session: session_for(:active), xhr: true
assert_response :success
assert_empty(json_response['content'],
'search results for empty project should be empty')
end
test 'search results for aproject and verify recursive contents' do
- xhr :get, :choose, {
+ get :choose, params: {
format: :json,
partial: true,
project_uuid: api_fixture('groups')['aproject']['uuid'],
- }, session_for(:active)
+ }, session: session_for(:active), xhr: true
assert_response :success
assert_not_empty(json_response['content'],
'search results for aproject should not be empty')
test "untrash collection with same name as another collection" do
collection = api_fixture('collections')['trashed_collection_to_test_name_conflict_on_untrash']
items = [collection['uuid']]
- post :untrash_items, {
+ post :untrash_items, params: {
selection: items,
format: :js
- }, session_for(:active)
+ }, session: session_for(:active)
assert_response :success
end
class UserAgreementsControllerTest < ActionController::TestCase
test 'User agreements page shows form if some user agreements are not signed' do
- get :index, {}, session_for(:inactive)
+ get :index, params: {}, session: session_for(:inactive)
assert_response 200
end
test 'User agreements page redirects if all user agreements signed' do
- get :index, {return_to: root_path}, session_for(:active)
+ get :index, params: {return_to: root_path}, session: session_for(:active)
assert_response :redirect
assert_equal(root_url,
@response.redirect_url,
class UsersControllerTest < ActionController::TestCase
test "valid token works in controller test" do
- get :index, {}, session_for(:active)
+ get :index, params: {}, session: session_for(:active)
assert_response :success
end
test "ignore previously valid token (for deleted user), don't crash" do
- get :activity, {}, session_for(:valid_token_deleted_user)
+ get :activity, params: {}, session: session_for(:valid_token_deleted_user)
assert_response :redirect
assert_match /^#{Rails.configuration.arvados_login_base}/, @response.redirect_url
assert_nil assigns(:my_jobs)
end
test "expired token redirects to api server login" do
- get :show, {
+ get :show, params: {
id: api_fixture('users')['active']['uuid']
- }, session_for(:expired_trustedclient)
+ }, session: session_for(:expired_trustedclient)
assert_response :redirect
assert_match /^#{Rails.configuration.arvados_login_base}/, @response.redirect_url
assert_nil assigns(:my_jobs)
end
test "show welcome page if no token provided" do
- get :index, {}
+ get :index, params: {}
assert_response :redirect
assert_match /\/users\/welcome/, @response.redirect_url
end
test "'log in as user' feature uses a v2 token" do
- post :sudo, {
+ post :sudo, params: {
id: api_fixture('users')['active']['uuid']
- }, session_for('admin_trustedclient')
+ }, session: session_for('admin_trustedclient')
assert_response :redirect
assert_match /api_token=v2%2F/, @response.redirect_url
end
ActionMailer::Base.deliveries = []
- post :request_shell_access, {
+ post :request_shell_access, params: {
id: user['uuid'],
format: 'js'
- }, session_for(:spectator)
+ }, session: session_for(:spectator)
assert_response :success
full_name = "#{user['first_name']} #{user['last_name']}"
test "access users page as #{username} and verify show button is available" do
admin_user = api_fixture('users','admin')
active_user = api_fixture('users','active')
- get :index, {}, session_for(username)
+ get :index, params: {}, session: session_for(username)
if username == 'admin'
assert_match /<a href="\/projects\/#{admin_user['uuid']}">Home<\/a>/, @response.body
assert_match /<a href="\/projects\/#{active_user['uuid']}">Home<\/a>/, @response.body
test "access settings drop down menu as #{username}" do
admin_user = api_fixture('users','admin')
active_user = api_fixture('users','active')
- get :show, {
+ get :show, params: {
id: api_fixture('users')[username]['uuid']
- }, session_for(username)
+ }, session: session_for(username)
if username == 'admin'
assert_includes @response.body, admin_user['email']
refute_empty css_select('[id="system-menu"]')
encoded_params = Hash[params.map { |k,v|
[k, (v.is_a?(Array) || v.is_a?(Hash)) ? v.to_json : v]
}]
- get :index, encoded_params, session_for(:active)
+ get :index, params: encoded_params, session: session_for(:active)
end
end
class WorkflowsControllerTest < ActionController::TestCase
test "index" do
- get :index, {}, session_for(:active)
+ get :index, params: {}, session: session_for(:active)
assert_response :success
assert_includes @response.body, 'Valid workflow with no definition yaml'
end
wf = api_fixture('workflows')['workflow_with_input_specifications']
- get :show, {id: wf['uuid']}, session_for(:active)
+ get :show, params: {id: wf['uuid']}, session: session_for(:active)
assert_response :success
assert_includes @response.body, "a short label for this parameter (optional)"
end
def user_can_manage(user_sym, fixture)
- get(:show, {id: fixture["uuid"]}, session_for(user_sym))
+ get(:show, params: {id: fixture["uuid"]}, session: session_for(user_sym))
is_manager = assigns(:user_is_manager)
assert_not_nil(is_manager, "user_is_manager flag not set")
if not is_manager
# SPDX-License-Identifier: AGPL-3.0
require 'integration_helper'
+require 'config_validators'
class ApplicationLayoutTest < ActionDispatch::IntegrationTest
# These tests don't do state-changing API calls. Save some time by
click_link 'API response'
api_response = JSON.parse(find('div#advanced_api_response pre').text)
input_params = api_response['components']['part-one']['script_parameters']['input']
- assert_equal input_params['value'], collection['portable_data_hash']
- assert_equal input_params['selection_name'], collection['name']
- assert_equal input_params['selection_uuid'], collection['uuid']
+ assert_equal collection['portable_data_hash'], input_params['value']
+ assert_equal collection['name'], input_params['selection_name']
+ assert_equal collection['uuid'], input_params['selection_uuid']
# "Run" button is now enabled
page.assert_no_selector 'a.disabled,button.disabled', text: 'Run'
api_response = JSON.parse(find('div#advanced_api_response pre').text)
input_params = api_response['components']['part-one']['script_parameters']['input']
- assert_equal(input_params['selection_uuid'], collection['uuid'], "Not found expected input param uuid")
+ assert_equal(collection['uuid'], input_params['selection_uuid'], "Not found expected input param uuid")
if choose_file
- assert_equal(input_params['value'], collection['portable_data_hash']+'/foo', "Not found expected input file param value")
- assert_equal(input_params['selection_name'], collection['name']+'/foo', "Not found expected input file param name")
+ assert_equal(collection['portable_data_hash']+'/foo', input_params['value'], "Not found expected input file param value")
+ assert_equal(collection['name']+'/foo', input_params['selection_name'], "Not found expected input file param name")
else
- assert_equal(input_params['value'], collection['portable_data_hash'], "Not found expected input param value")
- assert_equal(input_params['selection_name'], collection['name'], "Not found expected input selection name")
+ assert_equal(collection['portable_data_hash'], input_params['value'], "Not found expected input param value")
+ assert_equal(collection['name'], input_params['selection_name'], "Not found expected input selection name")
end
# "Run" button present and enabled
require File.expand_path('../../config/environment', __FILE__)
require 'rails/test_help'
-require 'mocha/mini_test'
+require 'mocha/minitest'
class ActiveSupport::TestCase
# Setup all fixtures in test/fixtures/*.(yml|csv) for all tests in
if label != nil
assert_equal(label, wu.label)
+ elsif obj.name.nil?
+ assert_nil(wu.label)
else
assert_equal(obj.name, wu.label)
end
assert_equal(obj['uuid'], wu.uuid)
assert_equal(state, wu.state_label)
- assert_equal(success, wu.success?)
+ if success.nil?
+ assert_nil(wu.success?)
+ else
+ assert_equal(success, wu.success?)
+ end
assert_equal(progress, wu.progress)
assert_equal(num_children, wu.children.size)
if walltime
assert_equal true, (wu.walltime >= walltime)
else
- assert_equal walltime, wu.walltime
+ if walltime.nil?
+ assert_nil wu.walltime
+ else
+ assert_equal walltime, wu.walltime
+ end
end
if cputime
if queuedtime
assert_equal true, (wu.queuedtime >= queuedtime)
+ elsif queuedtime.nil?
+ assert_nil wu.queuedtime
else
assert_equal queuedtime, wu.queuedtime
end
# initialize git_internal_dir
# usually /var/lib/arvados/internal.git (set in application.default.yml )
if [ "$APPLICATION_READY" = "1" ]; then
- GIT_INTERNAL_DIR=$($COMMAND_PREFIX bundle exec rake config:check 2>&1 | grep git_internal_dir | awk '{ print $2 }')
+ GIT_INTERNAL_DIR=$($COMMAND_PREFIX bundle exec rake config:dump 2>&1 | grep GitInternalDir | awk '{ print $2 }' |tr -d '"')
if [ ! -e "$GIT_INTERNAL_DIR" ]; then
run_and_report "Creating git_internal_dir '$GIT_INTERNAL_DIR'" \
mkdir -p "$GIT_INTERNAL_DIR"
LICENSE_PACKAGE_TS=20151208015500
if [[ -z "$ARVADOS_BUILDING_VERSION" ]]; then
- RAILS_PACKAGE_ITERATION=8
+ RAILS_PACKAGE_ITERATION=1
else
RAILS_PACKAGE_ITERATION="$ARVADOS_BUILDING_ITERATION"
fi
done
fi
- # the libpam module should place this file in the historically correct place
- # so as not to break backwards compatibility
- if [[ -e "$WORKSPACE/$PKG_DIR/dist/build/usr/share/python2.7/dist/libpam-arvados/lib/security/libpam_arvados.py" ]]; then
- COMMAND_ARR+=("usr/share/$python/dist/$PYTHON_PKG/data/lib/security/libpam_arvados.py=/usr/data/lib/security/")
+ # the libpam module should place a few files in the correct place for the pam
+ # subsystem
+ if [[ -e "$WORKSPACE/$PKG_DIR/dist/build/usr/share/$python/dist/$PYTHON_PKG/lib/security/libpam_arvados.py" ]]; then
+ COMMAND_ARR+=("usr/share/$python/dist/$PYTHON_PKG/lib/security/libpam_arvados.py=/usr/lib/security/")
+ fi
+ if [[ -e "$WORKSPACE/$PKG_DIR/dist/build/usr/share/$python/dist/$PYTHON_PKG/share/pam-configs/arvados" ]]; then
+ COMMAND_ARR+=("usr/share/$python/dist/$PYTHON_PKG/share/pam-configs/arvados=/usr/share/pam-configs/")
fi
# the python-arvados-cwl-runner package comes with cwltool, expose that version
services/ws
sdk/cli
sdk/pam
+sdk/pam:py3
sdk/python
sdk/python:py3
sdk/ruby
echo "${svc} pid ${pid} ok"
}
+checkhealth() {
+ svc="$1"
+ port="$(cat "$WORKSPACE/tmp/${svc}.port")"
+ scheme=http
+ if [[ ${svc} =~ -ssl$ || ${svc} = wss ]]; then
+ scheme=https
+ fi
+ url="$scheme://localhost:${port}/_health/ping"
+ if ! curl -Ss -H "Authorization: Bearer e687950a23c3a9bceec28c6223a06c79" "${url}" | tee -a /dev/stderr | grep '"OK"'; then
+ echo "${url} failed"
+ return 1
+ fi
+}
+
checkdiscoverydoc() {
dd="https://${1}/discovery/v1/apis/arvados/v1/rest"
if ! (set -o pipefail; curl -fsk "$dd" | grep -q ^{ ); then
&& checkdiscoverydoc $ARVADOS_API_HOST \
&& python sdk/python/tests/run_test_server.py start_controller \
&& checkpidfile controller \
+ && checkhealth controller \
&& python sdk/python/tests/run_test_server.py start_keep_proxy \
&& checkpidfile keepproxy \
&& python sdk/python/tests/run_test_server.py start_keep-web \
&& checkpidfile keep-web \
+ && checkhealth keep-web \
&& python sdk/python/tests/run_test_server.py start_arv-git-httpd \
&& checkpidfile arv-git-httpd \
+ && checkhealth arv-git-httpd \
&& python sdk/python/tests/run_test_server.py start_ws \
&& checkpidfile ws \
&& eval $(python sdk/python/tests/run_test_server.py start_nginx) \
lib/cloud
lib/cloud/azure
lib/cloud/ec2
+ lib/config
lib/dispatchcloud
lib/dispatchcloud/container
lib/dispatchcloud/scheduler
test_apps/workbench_units() {
cd "$WORKSPACE/apps/workbench" \
- && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:units TESTOPTS=-v ${testargs[apps/workbench]} ${testargs[apps/workbench_units]}
+ && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:units TESTOPTS='-v -d' ${testargs[apps/workbench]} ${testargs[apps/workbench_units]}
}
test_apps/workbench_functionals() {
cd "$WORKSPACE/apps/workbench" \
- && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:functionals TESTOPTS=-v ${testargs[apps/workbench]} ${testargs[apps/workbench_functionals]}
+ && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:functionals TESTOPTS='-v -d' ${testargs[apps/workbench]} ${testargs[apps/workbench_functionals]}
}
test_apps/workbench_integration() {
cd "$WORKSPACE/apps/workbench" \
- && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:integration TESTOPTS=-v ${testargs[apps/workbench]} ${testargs[apps/workbench_integration]}
+ && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:integration TESTOPTS='-v -d' ${testargs[apps/workbench]} ${testargs[apps/workbench_integration]}
}
test_apps/workbench_benchmark() {
"os"
"git.curoverse.com/arvados.git/lib/cmd"
+ "git.curoverse.com/arvados.git/lib/config"
"git.curoverse.com/arvados.git/lib/controller"
"git.curoverse.com/arvados.git/lib/dispatchcloud"
)
"-version": cmd.Version(version),
"--version": cmd.Version(version),
+ "config-check": config.CheckCommand,
+ "config-dump": config.DumpCommand,
"controller": controller.Command,
"dispatch-cloud": dispatchcloud.Command,
})
To solve the problem mentioned above, the API server offers the possibility to limit the amount of log information stored on the table:
+<pre>
+# Attributes to suppress in events and audit logs. Notably,
+# specifying ["manifest_text"] here typically makes the database
+# smaller and faster.
+#
+# Warning: Using any non-empty value here can have undesirable side
+# effects for any client or component that relies on event logs.
+# Use at your own risk.
+unlogged_attributes: []
+</pre>
+
+The above setting affects all events being logged, independently of how much time they will be kept on the database.
+
<pre>
# Time to keep audit logs (a row in the log table added each time an
# Arvados object is created, modified, or deleted) in the PostgreSQL
# database. Currently, websocket event notifications rely on audit
-# logs, so this should not be set lower than 600 (10 minutes).
+# logs, so this should not be set lower than 300 (5 minutes).
max_audit_log_age: 1209600
</pre>
---
layout: default
navsection: admin
-title: "Migrating a user to a federated account"
+title: "Migrating users to federated accounts"
...
{% comment %}
Copyright (C) The Arvados Authors. All rights reserved.
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-When you use federation capabilities to connect two or more clusters that were already operating, some users might already have accounts on multiple clusters. Typically, they will want to choose a single account on one of the clusters and abandon the rest, transferring all data or permissions from their old “remote” accounts to a single “home” account.
+When using multiple Arvados clusters, prior to federation capabilities described here, a user would have to create a separate account on each cluster. Unfortunately, because each account represents a separate "identity", in this system permissions granted to a user on one cluster do not transfer to another cluster, even if the accounts are associated with the same user.
-This effect can be achieved by changing the UUIDs of the user records on the remote clusters. This should be done before the user has ever used federation features to access cluster B with cluster A credentials. Otherwise, see "managing conflicting accounts" below.
+To address this, Arvados supports "federated user accounts". A federated user account is associated with a specific "home" cluster, and can be used to access other clusters in the federation that trust the home cluster. When a user arrives at another cluster's Workbench, they select and log in to their home cluster, and then are returned to the starting cluster logged in with the federated user account.
-For example, a user might have:
-* an account A on cluster A with uuid @aaaaa-tpzed-abcdefghijklmno@, and
-* an account B on cluster B with uuid @bbbbb-tpzed-lmnopqrstuvwxyz@
+When setting up federation capabilities on existing clusters, some users might already have accounts on multiple clusters. In order to have a single federated identity, users should be assigned a "home" cluster, and accounts associated with that user on the other (non-home) clusters should be migrated to the new federated user account. The @arv-federation-migrate@ tool assists with this.
-An administrator at cluster B can merge the two accounts by renaming account B to account A.
+h2. arv-federation-migrate
-<notextile>
-<pre><code>#!/usr/bin/env python
-import arvados
-arvados.api('v1').users().update_uuid(
- uuid="<span class="userinput">bbbbb-tpzed-lmnopqrstuvwxyz</span>",
- new_uuid="<span class="userinput">aaaaa-tpzed-abcdefghijklmno</span>").execute()
-</code></pre></notextile>
+The tool @arv-federation-migrate@ is part of the @arvados-python-client@ package.
-This should be done when the user is idle, i.e., not logged in and not running any jobs or containers.
+This tool is designed to help an administrator who has access to all clusters in a federation to migrate users who have multiple accounts to a single federated account.
-h2. Managing conflicting accounts
+As part of migrating a user, any data or permissions associated with old user accounts will be reassigned to the federated account.
-If the user has already used federation capabilities to access cluster B using account A before the above migration has been done, this will have already created a database entry for account A on cluster B, and the above program will error out. To fix this, the same "update_uuid API call":../api/methods/users.html#update_uuid can be used to move the conflicting account out of the way first.
+h2. Get user report
-<notextile>
-<pre><code>#!/usr/bin/env python
-import arvados
-import random
-import string
-random_chars = ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(15))
-arvados.api('v1').users().update_uuid(
- uuid="<span class="userinput">aaaaa-tpzed-abcdefghijklmno</span>",
- new_uuid="bbbbb-tpzed-"+random_chars).execute()
-</code></pre></notextile>
+The first step is to create @tokens.csv@ and list each cluster and API token to access the cluster. API tokens must be trusted tokens with administrator access. This is a simple comma separated value file and can be created in a text editor. Example:
-After this is done and the migration is complete, the affected user should wait 5 minutes for the authorization cache to expire before using the remote cluster.
+_tokens.csv_
+
+<pre>
+x3982.arvadosapi.com,v2/x3982-gj3su-sb6meh2jf145s7x/98d40d70d8862e33d7398213435d1a71a96cf870
+x6b1s.arvadosapi.com,v2/x6b1s-gj3su-dxc87btfv5kg91z/5575d980d3ff6231bb0c692281c42a7541c59417
+</pre>
+
+Next, run @arv-federation-migrate@ with the @--tokens@ and @--report@ flags:
+
+<pre>
+$ arv-federation-migrate --tokens tokens.csv --report users.csv
+Reading tokens.csv
+Getting user list from x6b1s
+Getting user list from x3982
+Wrote users.csv
+</pre>
+
+This will produce a report of users across all clusters listed in @tokens.csv@, sorted by email address. This file can be loaded into a text editor or spreadsheet program for ease of viewing and editing.
+
+_users.csv_
+
+<pre>
+email,user uuid,primary cluster/user
+person_a@example.com,x6b1s-tpzed-hb5n7doogwhk6cf,x6b1s
+person_b@example.com,x3982-tpzed-1vl3k7knf7qihbe,
+person_b@example.com,x6b1s-tpzed-w4nhkx2rmrhlr54,
+</pre>
+
+The third column describes that user's home cluster. If a user only has one account (identified by email address), the column will be filled in and there is nothing to do. If the column is blank, that means there is more than one Arvados account associated with the user. Edit the file and provide the desired home cluster for each user. In this example, <code>person_b@example.com</code> is assigned the home cluster @x3982@.
+
+_users.csv_
+
+<pre>
+email,user uuid,primary cluster/user
+person_a@example.com,x6b1s-tpzed-hb5n7doogwhk6cf,x6b1s
+person_b@example.com,x3982-tpzed-1vl3k7knf7qihbe,x3982
+person_b@example.com,x6b1s-tpzed-w4nhkx2rmrhlr54,x3982
+</pre>
+
+h2. Migrate users
+
+To avoid disruption, advise users to log out and avoid running workflows while performing the migration.
+
+After updating @users.csv@, use the @--migrate@ option:
+
+<pre>
+$ arv-federation-migrate --tokens tokens.csv --migrate users.csv
+(person_b@example.com) Migrating x6b1s-tpzed-w4nhkx2rmrhlr54 to x3982-tpzed-1vl3k7knf7qihbe
+</pre>
+
+After migration, users should select their home cluster when logging into Arvados Workbench. If a user attempts to log into a migrated user account, they will be redirected to log in with their home cluster.
TODO: extract this information based on git commit messages and generate changelogs / release notes automatically.
{% endcomment %}
-h3. current master branch
+h3. v1.4.0 (2019-05-31)
+
+h4. Populating the new file_count and file_size_total columns on the collections table
+
+As part of story "#14484":https://dev.arvados.org/issues/14484, two new columns were added to the collections table in a database migration. If your installation has a large collections table, this migration may take some time. We've seen it take ~5 minutes on an installation with 250k collections, but your mileage may vary.
+
+The new columns are initialized with a zero value. In order to populate them, it is necessary to run a script called <code class="userinput">populate-file-info-columns-in-collections.rb</code> from the scripts directory of the API server. This can be done out of band, ideally directly after the API server has been upgraded to v1.4.0.
h4. Stricter collection manifest validation on the API server
<notextile>
<pre><code>Clusters:
<span class="userinput">uuid_prefix</span>:
- NodeProfiles:
- apiserver:
- arvados-controller:
- Listen: ":<span class="userinput">9004</span>" # must match the "upstream controller" section of your Nginx config
+ Services:
+ Controller:
+ InternalURLs:
+ "http://localhost:<span class="userinput">9004</span>": {} # must match the "upstream controller" section of your Nginx config
+ RailsAPI:
arvados-api-server:
- Listen: ":<span class="userinput">8000</span>" # must match the "upstream api" section of your Nginx config
+ "http://localhost:<span class="userinput">8000</span>": {} # must match the "upstream api" section of your Nginx config
PostgreSQL:
ConnectionPool: 128
Connection:
<span class="userinput">uuid_prefix</span>:
ManagementToken: xyzzy
SystemRootToken: <span class="userinput">zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz</span>
- NodeProfiles:
- # The key "apiserver" corresponds to ARVADOS_NODE_PROFILE in environment file (see below).
- apiserver:
- arvados-dispatch-cloud:
- Listen: ":9006"
Services:
Controller:
ExternalURL: "https://<span class="userinput">uuid_prefix.arvadosapi.com</span>"
- CloudVMs:
- # BootProbeCommand is a shell command that succeeds when an instance is ready for service
- BootProbeCommand: "sudo systemctl status docker"
+ DispatchCloud:
+ InternalURLs:
+ "http://localhost:9006": {}
+ Containers:
+ CloudVMs:
+ # BootProbeCommand is a shell command that succeeds when an instance is ready for service
+ BootProbeCommand: "sudo systemctl status docker"
- <b># --- driver-specific configuration goes here --- see Amazon and Azure examples below ---</b>
+ <b># --- driver-specific configuration goes here --- see Amazon and Azure examples below ---</b>
- Dispatch:
- PrivateKey: |
+ DispatchPrivateKey: |
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAqXoCzcOBkFQ7w4dvXf9B++1ctgZRqEbgRYL3SstuMV4oawks
ttUuxJycDdsPmeYcHsKo8vsEZpN6iYsX6ZZzhkO5nEayUTU8sBjmg1ZCTo4QqKXr
<notextile>
<pre><code>Clusters:
<span class="userinput">uuid_prefix</span>:
- CloudVMs:
- ImageID: ami-01234567890abcdef
- Driver: ec2
- DriverParameters:
- AccessKeyID: EALMF21BJC7MKNF9FVVR
- SecretAccessKey: yKJAPmoCQOMtYWzEUQ1tKTyrocTcbH60CRvGP3pM
- SecurityGroupIDs:
- - sg-0123abcd
- SubnetID: subnet-0123abcd
- Region: us-east-1
- EBSVolumeType: gp2
- AdminUsername: debian
+ Containers:
+ CloudVMs:
+ ImageID: ami-01234567890abcdef
+ Driver: ec2
+ DriverParameters:
+ AccessKeyID: EALMF21BJC7MKNF9FVVR
+ SecretAccessKey: yKJAPmoCQOMtYWzEUQ1tKTyrocTcbH60CRvGP3pM
+ SecurityGroupIDs:
+ - sg-0123abcd
+ SubnetID: subnet-0123abcd
+ Region: us-east-1
+ EBSVolumeType: gp2
+ AdminUsername: debian
</code></pre>
</notextile>
<notextile>
<pre><code>Clusters:
<span class="userinput">uuid_prefix</span>:
- CloudVMs:
- ImageID: "https://zzzzzzzz.blob.core.windows.net/system/Microsoft.Compute/Images/images/zzzzz-compute-osDisk.55555555-5555-5555-5555-555555555555.vhd"
- Driver: azure
- DriverParameters:
- SubscriptionID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
- ClientID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
- ClientSecret: 2WyXt0XFbEtutnf2hp528t6Wk9S5bOHWkRaaWwavKQo=
- TenantID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
- CloudEnvironment: AzurePublicCloud
- ResourceGroup: zzzzz
- Location: centralus
- Network: zzzzz
- Subnet: zzzzz-subnet-private
- StorageAccount: example
- BlobContainer: vhds
- DeleteDanglingResourcesAfter: 20s
- AdminUsername: arvados
-</code></pre>
-</notextile>
-
-Create the host configuration file @/etc/arvados/environment@.
-
-<notextile>
-<pre><code>ARVADOS_NODE_PROFILE=apiserver
+ Containers:
+ CloudVMs:
+ ImageID: "https://zzzzzzzz.blob.core.windows.net/system/Microsoft.Compute/Images/images/zzzzz-compute-osDisk.55555555-5555-5555-5555-555555555555.vhd"
+ Driver: azure
+ DriverParameters:
+ SubscriptionID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+ ClientID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+ ClientSecret: 2WyXt0XFbEtutnf2hp528t6Wk9S5bOHWkRaaWwavKQo=
+ TenantID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+ CloudEnvironment: AzurePublicCloud
+ ResourceGroup: zzzzz
+ Location: centralus
+ Network: zzzzz
+ Subnet: zzzzz-subnet-private
+ StorageAccount: example
+ BlobContainer: vhds
+ DeleteDanglingResourcesAfter: 20s
+ AdminUsername: arvados
</code></pre>
</notextile>
To use the FUSE driver elsewhere, you can install from a distribution package, PyPI, or source.
{% include 'notebox_begin' %}
-The Python SDK requires Python 2.7.
+The Arvados FUSE driver requires Python 2.7
{% include 'notebox_end' %}
h4. Option 1: Install from distribution packages
h4. Option 2: Install with pip
-Run @pip-2.7 install arvados_fuse@ in an appropriate installation environment, such as a virtualenv.
+Run @pip install arvados_fuse@ in an appropriate installation environment, such as a virtualenv.
h4. Option 3: Install from source
<notextile>
<pre><code>~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
~$ <span class="userinput">cd arvados/services/fuse</span>
-~/arvados/services/fuse$ <span class="userinput">python2.7 setup.py install</span>
+~/arvados/services/fuse$ <span class="userinput">python setup.py install</span>
</code></pre>
</notextile>
To use the Python SDK elsewhere, you can install from PyPI or a distribution package.
-{% include 'notebox_begin' %}
-The Python SDK requires Python 2.7.
-{% include 'notebox_end' %}
+The Python SDK supports Python 2.7 and 3.4+
h3. Option 1: Install with pip
This installation method is recommended to make the SDK available for use in your own Python programs. It can coexist with the system-wide installation method from a distribution package (option 2, below).
-Run @pip-2.7 install arvados-python-client@ in an appropriate installation environment, such as a virtualenv.
+Run @pip install arvados-python-client@ in an appropriate installation environment, such as a virtualenv.
-If your version of @pip@ is 1.4 or newer, the @pip install@ command might give an error: "Could not find a version that satisfies the requirement arvados-python-client". If this happens, try @pip-2.7 install --pre arvados-python-client@.
+If your version of @pip@ is 1.4 or newer, the @pip install@ command might give an error: "Could not find a version that satisfies the requirement arvados-python-client". If this happens, try @pip install --pre arvados-python-client@.
h3. Option 2: Install from a distribution package
AdminUsername string
}
-const tagKeyInstanceSecret = "InstanceSecret"
-
type containerWrapper interface {
GetBlobReference(name string) *storage.Blob
ListBlobs(params storage.ListBlobsParameters) (storage.BlobListResponse, error)
logger logrus.FieldLogger
}
-func newAzureInstanceSet(config json.RawMessage, dispatcherID cloud.InstanceSetID, logger logrus.FieldLogger) (prv cloud.InstanceSet, err error) {
+func newAzureInstanceSet(config json.RawMessage, dispatcherID cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger) (prv cloud.InstanceSet, err error) {
azcfg := azureInstanceSetConfig{}
err = json.Unmarshal(config, &azcfg)
if err != nil {
name = az.namePrefix + name
- timestamp := time.Now().Format(time.RFC3339Nano)
-
- tags := make(map[string]*string)
- tags["created-at"] = ×tamp
+ tags := map[string]*string{}
for k, v := range newTags {
- newstr := v
- tags["dispatch-"+k] = &newstr
+ tags[k] = to.StringPtr(v)
}
+ tags["created-at"] = to.StringPtr(time.Now().Format(time.RFC3339Nano))
nicParameters := network.Interface{
Location: &az.azconfig.Location,
return nil, wrapAzureError(err)
}
- instances := make([]cloud.Instance, 0)
-
+ var instances []cloud.Instance
for ; result.NotDone(); err = result.Next() {
if err != nil {
return nil, wrapAzureError(err)
}
- if strings.HasPrefix(*result.Value().Name, az.namePrefix) {
- instances = append(instances, &azureInstance{
- provider: az,
- vm: result.Value(),
- nic: interfaces[*(*result.Value().NetworkProfile.NetworkInterfaces)[0].ID]})
- }
+ instances = append(instances, &azureInstance{
+ provider: az,
+ vm: result.Value(),
+ nic: interfaces[*(*result.Value().NetworkProfile.NetworkInterfaces)[0].ID],
+ })
}
return instances, nil
}
// ManageNics returns a list of Azure network interface resources.
-// Also performs garbage collection of NICs which have "namePrefix", are
-// not associated with a virtual machine and have a "create-at" time
-// more than DeleteDanglingResourcesAfter (to prevent racing and
+// Also performs garbage collection of NICs which have "namePrefix",
+// are not associated with a virtual machine and have a "created-at"
+// time more than DeleteDanglingResourcesAfter (to prevent racing and
// deleting newly created NICs) in the past are deleted.
func (az *azureInstanceSet) manageNics() (map[string]network.Interface, error) {
az.stopWg.Add(1)
ai.provider.stopWg.Add(1)
defer ai.provider.stopWg.Done()
- tags := make(map[string]*string)
-
+ tags := map[string]*string{}
for k, v := range ai.vm.Tags {
- if !strings.HasPrefix(k, "dispatch-") {
- tags[k] = v
- }
+ tags[k] = v
}
for k, v := range newTags {
- newstr := v
- tags["dispatch-"+k] = &newstr
+ tags[k] = to.StringPtr(v)
}
vmParameters := compute.VirtualMachine{
}
func (ai *azureInstance) Tags() cloud.InstanceTags {
- tags := make(map[string]string)
-
+ tags := cloud.InstanceTags{}
for k, v := range ai.vm.Tags {
- if strings.HasPrefix(k, "dispatch-") {
- tags[k[9:]] = *v
- }
+ tags[k] = *v
}
-
return tags
}
"net"
"net/http"
"os"
+ "strings"
"testing"
"time"
var _ = check.Suite(&AzureInstanceSetSuite{})
+const testNamePrefix = "compute-test123-"
+
type VirtualMachinesClientStub struct{}
func (*VirtualMachinesClientStub) createOrUpdate(ctx context.Context,
return nil, cloud.ImageID(""), cluster, err
}
- ap, err := newAzureInstanceSet(exampleCfg.DriverParameters, "test123", logrus.StandardLogger())
+ ap, err := newAzureInstanceSet(exampleCfg.DriverParameters, "test123", nil, logrus.StandardLogger())
return ap, cloud.ImageID(exampleCfg.ImageIDForTestSuite), cluster, err
}
ap := azureInstanceSet{
BlobContainer: "vhds",
},
dispatcherID: "test123",
- namePrefix: "compute-test123-",
+ namePrefix: testNamePrefix,
logger: logrus.StandardLogger(),
deleteNIC: make(chan string),
deleteBlob: make(chan storage.Blob),
l, err := ap.Instances(nil)
c.Assert(err, check.IsNil)
- for _, i := range l {
+ for _, i := range filterInstances(c, l) {
c.Check(i.Destroy(), check.IsNil)
}
}
if err != nil {
c.Fatal("Error making provider", err)
}
+
l, err := ap.Instances(nil)
c.Assert(err, check.IsNil)
-
+ l = filterInstances(c, l)
if len(l) > 0 {
err = l[0].SetTags(map[string]string{"foo": "bar"})
if err != nil {
c.Fatal("Error setting tags", err)
}
}
+
l, err = ap.Instances(nil)
c.Assert(err, check.IsNil)
+ l = filterInstances(c, l)
if len(l) > 0 {
tg := l[0].Tags()
}
l, err := ap.Instances(nil)
c.Assert(err, check.IsNil)
+ l = filterInstances(c, l)
if len(l) > 0 {
sshclient, err := SetupSSHClient(c, l[0])
return client, nil
}
+
+func filterInstances(c *check.C, instances []cloud.Instance) []cloud.Instance {
+ var r []cloud.Instance
+ for _, i := range instances {
+ if !strings.HasPrefix(i.String(), testNamePrefix) {
+ c.Logf("ignoring instance %s", i)
+ continue
+ }
+ r = append(r, i)
+ }
+ return r
+}
"encoding/json"
"fmt"
"math/big"
- "strings"
"sync"
"git.curoverse.com/arvados.git/lib/cloud"
"golang.org/x/crypto/ssh"
)
-const arvadosDispatchID = "arvados-dispatch-id"
-const tagPrefix = "arvados-dispatch-tag-"
-
// Driver is the ec2 implementation of the cloud.Driver interface.
var Driver = cloud.DriverFunc(newEC2InstanceSet)
}
type ec2InstanceSet struct {
- ec2config ec2InstanceSetConfig
- dispatcherID cloud.InstanceSetID
- logger logrus.FieldLogger
- client ec2Interface
- keysMtx sync.Mutex
- keys map[string]string
+ ec2config ec2InstanceSetConfig
+ instanceSetID cloud.InstanceSetID
+ logger logrus.FieldLogger
+ client ec2Interface
+ keysMtx sync.Mutex
+ keys map[string]string
}
-func newEC2InstanceSet(config json.RawMessage, dispatcherID cloud.InstanceSetID, logger logrus.FieldLogger) (prv cloud.InstanceSet, err error) {
+func newEC2InstanceSet(config json.RawMessage, instanceSetID cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger) (prv cloud.InstanceSet, err error) {
instanceSet := &ec2InstanceSet{
- dispatcherID: dispatcherID,
- logger: logger,
+ instanceSetID: instanceSetID,
+ logger: logger,
}
err = json.Unmarshal(config, &instanceSet.ec2config)
if err != nil {
}
instanceSet.keysMtx.Unlock()
- ec2tags := []*ec2.Tag{
- &ec2.Tag{
- Key: aws.String(arvadosDispatchID),
- Value: aws.String(string(instanceSet.dispatcherID)),
- },
- &ec2.Tag{
- Key: aws.String("arvados-class"),
- Value: aws.String("dynamic-compute"),
- },
- }
+ ec2tags := []*ec2.Tag{}
for k, v := range newTags {
ec2tags = append(ec2tags, &ec2.Tag{
- Key: aws.String(tagPrefix + k),
+ Key: aws.String(k),
Value: aws.String(v),
})
}
}},
DisableApiTermination: aws.Bool(false),
InstanceInitiatedShutdownBehavior: aws.String("terminate"),
- UserData: aws.String(base64.StdEncoding.EncodeToString([]byte("#!/bin/sh\n" + initCommand + "\n"))),
TagSpecifications: []*ec2.TagSpecification{
&ec2.TagSpecification{
ResourceType: aws.String("instance"),
Tags: ec2tags,
}},
+ UserData: aws.String(base64.StdEncoding.EncodeToString([]byte("#!/bin/sh\n" + initCommand + "\n"))),
}
if instanceType.AddedScratch > 0 {
}, nil
}
-func (instanceSet *ec2InstanceSet) Instances(cloud.InstanceTags) (instances []cloud.Instance, err error) {
- dii := &ec2.DescribeInstancesInput{
- Filters: []*ec2.Filter{&ec2.Filter{
- Name: aws.String("tag:" + arvadosDispatchID),
- Values: []*string{aws.String(string(instanceSet.dispatcherID))},
- }}}
-
+func (instanceSet *ec2InstanceSet) Instances(tags cloud.InstanceTags) (instances []cloud.Instance, err error) {
+ var filters []*ec2.Filter
+ for k, v := range tags {
+ filters = append(filters, &ec2.Filter{
+ Name: aws.String("tag:" + k),
+ Values: []*string{aws.String(v)},
+ })
+ }
+ dii := &ec2.DescribeInstancesInput{Filters: filters}
for {
dio, err := instanceSet.client.DescribeInstances(dii)
if err != nil {
}
func (inst *ec2Instance) SetTags(newTags cloud.InstanceTags) error {
- ec2tags := []*ec2.Tag{
- &ec2.Tag{
- Key: aws.String(arvadosDispatchID),
- Value: aws.String(string(inst.provider.dispatcherID)),
- },
- }
+ var ec2tags []*ec2.Tag
for k, v := range newTags {
ec2tags = append(ec2tags, &ec2.Tag{
- Key: aws.String(tagPrefix + k),
+ Key: aws.String(k),
Value: aws.String(v),
})
}
tags := make(map[string]string)
for _, t := range inst.instance.Tags {
- if strings.HasPrefix(*t.Key, tagPrefix) {
- tags[(*t.Key)[len(tagPrefix):]] = *t.Value
- }
+ tags[*t.Key] = *t.Value
}
return tags
return nil, cloud.ImageID(""), cluster, err
}
- ap, err := newEC2InstanceSet(exampleCfg.DriverParameters, "test123", logrus.StandardLogger())
+ ap, err := newEC2InstanceSet(exampleCfg.DriverParameters, "test123", nil, logrus.StandardLogger())
return ap, cloud.ImageID(exampleCfg.ImageIDForTestSuite), cluster, err
}
ap := ec2InstanceSet{
- ec2config: ec2InstanceSetConfig{},
- dispatcherID: "test123",
- logger: logrus.StandardLogger(),
- client: &ec2stub{},
- keys: make(map[string]string),
+ ec2config: ec2InstanceSetConfig{},
+ instanceSetID: "test123",
+ logger: logrus.StandardLogger(),
+ client: &ec2stub{},
+ keys: make(map[string]string),
}
return &ap, cloud.ImageID("blob"), cluster, nil
}
error
}
+type SharedResourceTags map[string]string
type InstanceSetID string
type InstanceTags map[string]string
type InstanceID string
// A Driver returns an InstanceSet that uses the given InstanceSetID
// and driver-dependent configuration parameters.
//
+// If the driver creates cloud resources that aren't attached to a
+// single VM instance (like SSH key pairs on AWS) and support tagging,
+// they should be tagged with the provided SharedResourceTags.
+//
// The supplied id will be of the form "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
// where each z can be any alphanum. The returned InstanceSet must use
// this id to tag long-lived cloud resources that it creates, and must
// other mechanism. The tags must be visible to another instance of
// the same driver running on a different host.
//
-// The returned InstanceSet must ignore existing resources that are
-// visible but not tagged with the given id, except that it should log
-// a summary of such resources -- only once -- when it starts
-// up. Thus, two identically configured InstanceSets running on
-// different hosts with different ids should log about the existence
-// of each other's resources at startup, but will not interfere with
-// each other.
+// The returned InstanceSet must not modify or delete cloud resources
+// unless they are tagged with the given InstanceSetID or the caller
+// (dispatcher) calls Destroy() on them. It may log a summary of
+// untagged resources once at startup, though. Thus, two identically
+// configured InstanceSets running on different hosts with different
+// ids should log about the existence of each other's resources at
+// startup, but will not interfere with each other.
+//
+// The dispatcher always passes the InstanceSetID as a tag when
+// calling Create() and Instances(), so the driver does not need to
+// tag/filter VMs by InstanceSetID itself.
//
// Example:
//
//
// type exampleDriver struct {}
//
-// func (*exampleDriver) InstanceSet(config json.RawMessage, id InstanceSetID) (InstanceSet, error) {
+// func (*exampleDriver) InstanceSet(config json.RawMessage, id cloud.InstanceSetID, tags cloud.SharedResourceTags, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
// var is exampleInstanceSet
// if err := json.Unmarshal(config, &is); err != nil {
// return nil, err
//
// var _ = registerCloudDriver("example", &exampleDriver{})
type Driver interface {
- InstanceSet(config json.RawMessage, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)
+ InstanceSet(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger) (InstanceSet, error)
}
// DriverFunc makes a Driver using the provided function as its
// InstanceSet method. This is similar to http.HandlerFunc.
-func DriverFunc(fn func(config json.RawMessage, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)) Driver {
+func DriverFunc(fn func(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger) (InstanceSet, error)) Driver {
return driverFunc(fn)
}
-type driverFunc func(config json.RawMessage, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)
+type driverFunc func(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger) (InstanceSet, error)
-func (df driverFunc) InstanceSet(config json.RawMessage, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error) {
- return df(config, id, logger)
+func (df driverFunc) InstanceSet(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger) (InstanceSet, error) {
+ return df(config, id, tags, logger)
}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+
+ "git.curoverse.com/arvados.git/lib/cmd"
+ "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+ "github.com/ghodss/yaml"
+)
+
+var DumpCommand cmd.Handler = dumpCommand{}
+
+type dumpCommand struct{}
+
+func (dumpCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+ var err error
+ defer func() {
+ if err != nil {
+ fmt.Fprintf(stderr, "%s\n", err)
+ }
+ }()
+ if len(args) != 0 {
+ err = fmt.Errorf("usage: %s <config-src.yaml >config-min.yaml", prog)
+ return 2
+ }
+ log := ctxlog.New(stderr, "text", "info")
+ cfg, err := Load(stdin, log)
+ if err != nil {
+ return 1
+ }
+ out, err := yaml.Marshal(cfg)
+ if err != nil {
+ return 1
+ }
+ _, err = stdout.Write(out)
+ if err != nil {
+ return 1
+ }
+ return 0
+}
+
+var CheckCommand cmd.Handler = checkCommand{}
+
+type checkCommand struct{}
+
+func (checkCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+ var err error
+ defer func() {
+ if err != nil {
+ fmt.Fprintf(stderr, "%s\n", err)
+ }
+ }()
+ if len(args) != 0 {
+ err = fmt.Errorf("usage: %s <config-src.yaml && echo 'no changes needed'", prog)
+ return 2
+ }
+ log := &plainLogger{w: stderr}
+ buf, err := ioutil.ReadAll(stdin)
+ if err != nil {
+ return 1
+ }
+ withoutDepr, err := load(bytes.NewBuffer(buf), log, false)
+ if err != nil {
+ return 1
+ }
+ withDepr, err := load(bytes.NewBuffer(buf), nil, true)
+ if err != nil {
+ return 1
+ }
+ cmd := exec.Command("diff", "-u", "--label", "without-deprecated-configs", "--label", "relying-on-deprecated-configs", "/dev/fd/3", "/dev/fd/4")
+ for _, obj := range []interface{}{withoutDepr, withDepr} {
+ y, _ := yaml.Marshal(obj)
+ pr, pw, err := os.Pipe()
+ if err != nil {
+ return 1
+ }
+ defer pr.Close()
+ go func() {
+ io.Copy(pw, bytes.NewBuffer(y))
+ pw.Close()
+ }()
+ cmd.ExtraFiles = append(cmd.ExtraFiles, pr)
+ }
+ diff, err := cmd.CombinedOutput()
+ if bytes.HasPrefix(diff, []byte("--- ")) {
+ fmt.Fprintln(stdout, "Your configuration is relying on deprecated entries. Suggest making the following changes.")
+ stdout.Write(diff)
+ return 1
+ } else if len(diff) > 0 {
+ fmt.Fprintf(stderr, "Unexpected diff output:\n%s", diff)
+ return 1
+ } else if err != nil {
+ return 1
+ }
+ if log.used {
+ return 1
+ }
+ return 0
+}
+
+type plainLogger struct {
+ w io.Writer
+ used bool
+}
+
+func (pl *plainLogger) Warnf(format string, args ...interface{}) {
+ pl.used = true
+ fmt.Fprintf(pl.w, format+"\n", args...)
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+ "bytes"
+
+ check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&CommandSuite{})
+
+type CommandSuite struct{}
+
+func (s *CommandSuite) TestBadArg(c *check.C) {
+ var stderr bytes.Buffer
+ code := DumpCommand.RunCommand("arvados config-dump", []string{"-badarg"}, bytes.NewBuffer(nil), bytes.NewBuffer(nil), &stderr)
+ c.Check(code, check.Equals, 2)
+ c.Check(stderr.String(), check.Matches, `(?ms)usage: .*`)
+}
+
+func (s *CommandSuite) TestEmptyInput(c *check.C) {
+ var stdout, stderr bytes.Buffer
+ code := DumpCommand.RunCommand("arvados config-dump", nil, &bytes.Buffer{}, &stdout, &stderr)
+ c.Check(code, check.Equals, 1)
+ c.Check(stderr.String(), check.Matches, `config does not define any clusters\n`)
+}
+
+func (s *CommandSuite) TestCheckNoDeprecatedKeys(c *check.C) {
+ var stdout, stderr bytes.Buffer
+ in := `
+Clusters:
+ z1234:
+ API:
+ MaxItemsPerResponse: 1234
+`
+ code := CheckCommand.RunCommand("arvados config-check", nil, bytes.NewBufferString(in), &stdout, &stderr)
+ c.Check(code, check.Equals, 0)
+ c.Check(stdout.String(), check.Equals, "")
+ c.Check(stderr.String(), check.Equals, "")
+}
+
+func (s *CommandSuite) TestCheckDeprecatedKeys(c *check.C) {
+ var stdout, stderr bytes.Buffer
+ in := `
+Clusters:
+ z1234:
+ RequestLimits:
+ MaxItemsPerResponse: 1234
+`
+ code := CheckCommand.RunCommand("arvados config-check", nil, bytes.NewBufferString(in), &stdout, &stderr)
+ c.Check(code, check.Equals, 1)
+ c.Check(stdout.String(), check.Matches, `(?ms).*API:\n\- +.*MaxItemsPerResponse: 1000\n\+ +MaxItemsPerResponse: 1234\n.*`)
+}
+
+func (s *CommandSuite) TestCheckUnknownKey(c *check.C) {
+ var stdout, stderr bytes.Buffer
+ in := `
+Clusters:
+ z1234:
+ Bogus1: foo
+ BogusSection:
+ Bogus2: foo
+ API:
+ Bogus3:
+ Bogus4: true
+ PostgreSQL:
+ ConnectionPool:
+ {Bogus5: true}
+`
+ code := CheckCommand.RunCommand("arvados config-check", nil, bytes.NewBufferString(in), &stdout, &stderr)
+ c.Log(stderr.String())
+ c.Check(code, check.Equals, 1)
+ c.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.Bogus1\n.*`)
+ c.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.BogusSection\n.*`)
+ c.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.API.Bogus3\n.*`)
+ c.Check(stderr.String(), check.Matches, `(?ms).*unexpected object in config entry: Clusters.z1234.PostgreSQL.ConnectionPool\n.*`)
+}
+
+func (s *CommandSuite) TestDumpFormatting(c *check.C) {
+ var stdout, stderr bytes.Buffer
+ in := `
+Clusters:
+ z1234:
+ Containers:
+ CloudVMs:
+ TimeoutBooting: 600s
+ Services:
+ Controller:
+ InternalURLs:
+ http://localhost:12345: {}
+`
+ code := DumpCommand.RunCommand("arvados config-dump", nil, bytes.NewBufferString(in), &stdout, &stderr)
+ c.Check(code, check.Equals, 0)
+ c.Check(stdout.String(), check.Matches, `(?ms).*TimeoutBooting: 10m\n.*`)
+ c.Check(stdout.String(), check.Matches, `(?ms).*http://localhost:12345: {}\n.*`)
+}
+
+func (s *CommandSuite) TestDumpUnknownKey(c *check.C) {
+ var stdout, stderr bytes.Buffer
+ in := `
+Clusters:
+ z1234:
+ UnknownKey: foobar
+ ManagementToken: secret
+`
+ code := DumpCommand.RunCommand("arvados config-dump", nil, bytes.NewBufferString(in), &stdout, &stderr)
+ c.Check(code, check.Equals, 0)
+ c.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.UnknownKey.*`)
+ c.Check(stdout.String(), check.Matches, `(?ms)Clusters:\n z1234:\n.*`)
+ c.Check(stdout.String(), check.Matches, `(?ms).*\n *ManagementToken: secret\n.*`)
+ c.Check(stdout.String(), check.Not(check.Matches), `(?ms).*UnknownKey.*`)
+}
Services:
RailsAPI:
InternalURLs: {}
- GitHTTP:
- InternalURLs: {}
- ExternalURL: ""
- Keepstore:
- InternalURLs: {}
+ ExternalURL: "-"
Controller:
InternalURLs: {}
ExternalURL: ""
ExternalURL: ""
Keepbalance:
InternalURLs: {}
+ ExternalURL: "-"
GitHTTP:
InternalURLs: {}
ExternalURL: ""
ExternalURL: ""
DispatchCloud:
InternalURLs: {}
+ ExternalURL: "-"
SSO:
ExternalURL: ""
Keepproxy:
ExternalURL: ""
Keepstore:
InternalURLs: {}
+ ExternalURL: "-"
Composer:
ExternalURL: ""
WebShell:
ExternalURL: ""
Workbench2:
ExternalURL: ""
+ Nodemanager:
+ InternalURLs: {}
+ ExternalURL: "-"
+ Health:
+ InternalURLs: {}
+ ExternalURL: "-"
+
PostgreSQL:
# max concurrent connections per arvados server daemon
ConnectionPool: 32
Connection:
# All parameters here are passed to the PG client library in a connection string;
# see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
- Host: ""
- Port: 0
- User: ""
- Password: ""
- DBName: ""
+ host: ""
+ port: ""
+ user: ""
+ password: ""
+ dbname: ""
API:
# Maximum size (in bytes) allowed for a single API request. This
# limit is published in the discovery document for use by clients.
# update on the permission view in the future, if not already scheduled.
AsyncPermissionsUpdateInterval: 20
+ # Maximum number of concurrent outgoing requests to make while
+ # serving a single incoming multi-cluster (federated) request.
+ MaxRequestAmplification: 4
+
# RailsSessionSecretToken is a string of alphanumeric characters
# used by Rails to sign session tokens. IMPORTANT: This is a
# site secret. It should be at least 50 characters.
RailsSessionSecretToken: ""
+ # Maximum wall clock time to spend handling an incoming request.
+ RequestTimeout: 5m
+
Users:
# Config parameters to automatically setup new users. If enabled,
# this users will be able to self-activate. Enable this if you want
# Arvados object is created, modified, or deleted.)
#
# Currently, websocket event notifications rely on audit logs, so
- # this should not be set lower than 600 (5 minutes).
- MaxAge: 1209600
+ # this should not be set lower than 300 (5 minutes).
+ MaxAge: 336h
# Maximum number of log rows to delete in a single SQL transaction.
#
UnloggedAttributes: []
SystemLogs:
+
+ # Logging threshold: panic, fatal, error, warn, info, debug, or
+ # trace
+ LogLevel: info
+
+ # Logging format: json or text
+ Format: json
+
# Maximum characters of (JSON-encoded) query parameters to include
# in each request log entry. When params exceed this size, they will
# be JSON-encoded, truncated to this size, and logged as
# blob_signing_key note above.
#
# The default is 2 weeks.
- BlobSigningTTL: 1209600
+ BlobSigningTTL: 336h
# Default lifetime for ephemeral collections: 2 weeks. This must not
# be less than blob_signature_ttl.
- DefaultTrashLifetime: 1209600
+ DefaultTrashLifetime: 336h
# Interval (seconds) between trash sweeps. During a trash sweep,
# collections are marked as trash if their trash_at time has
Repositories: /var/lib/arvados/git/repositories
TLS:
+ Certificate: ""
+ Key: ""
Insecure: false
Containers:
# troubleshooting purposes.
LogReuseDecisions: false
+ # PEM encoded SSH key (RSA, DSA, or ECDSA) used by the
+ # (experimental) cloud dispatcher for executing containers on
+ # worker VMs. Begins with "-----BEGIN RSA PRIVATE KEY-----\n"
+ # and ends with "\n-----END RSA PRIVATE KEY-----\n".
+ DispatchPrivateKey: none
+
+ # Maximum time to wait for workers to come up before abandoning
+ # stale locks from a previous dispatch process.
+ StaleLockTimeout: 1m
+
Logging:
# When you run the db:delete_old_container_logs task, it will find
# containers that have been finished for at least this many seconds,
# original job reuse behavior, and is still the default).
ReuseJobIfOutputsDiffer: false
+ CloudVMs:
+ # Enable the cloud scheduler (experimental).
+ Enable: false
+
+ # Name/number of port where workers' SSH services listen.
+ SSHPort: "22"
+
+ # Interval between queue polls.
+ PollInterval: 10s
+
+ # Shell command to execute on each worker to determine whether
+ # the worker is booted and ready to run containers. It should
+ # exit zero if the worker is ready.
+ BootProbeCommand: "docker ps"
+
+ # Minimum interval between consecutive probes to a single
+ # worker.
+ ProbeInterval: 10s
+
+ # Maximum probes per second, across all workers in a pool.
+ MaxProbesPerSecond: 10
+
+ # Time before repeating SIGTERM when killing a container.
+ TimeoutSignal: 5s
+
+ # Time to give up on SIGTERM and write off the worker.
+ TimeoutTERM: 2m
+
+ # Maximum create/destroy-instance operations per second (0 =
+ # unlimited).
+ MaxCloudOpsPerSecond: 0
+
+ # Interval between cloud provider syncs/updates ("list all
+ # instances").
+ SyncInterval: 1m
+
+ # Time to leave an idle worker running (in case new containers
+ # appear in the queue that it can run) before shutting it
+ # down.
+ TimeoutIdle: 1m
+
+ # Time to wait for a new worker to boot (i.e., pass
+ # BootProbeCommand) before giving up and shutting it down.
+ TimeoutBooting: 10m
+
+ # Maximum time a worker can stay alive with no successful
+ # probes before being automatically shut down.
+ TimeoutProbe: 10m
+
+ # Time after shutting down a worker to retry the
+ # shutdown/destroy operation.
+ TimeoutShutdown: 10s
+
+ # Worker VM image ID.
+ ImageID: ami-01234567890abcdef
+
+ # Tags to add on all resources (VMs, NICs, disks) created by
+ # the container dispatcher. (Arvados's own tags --
+ # InstanceType, IdleBehavior, and InstanceSecret -- will also
+ # be added.)
+ ResourceTags:
+ SAMPLE: "tag value"
+
+ # Prefix for predefined tags used by Arvados (InstanceSetID,
+ # InstanceType, InstanceSecret, IdleBehavior). With the
+ # default value "Arvados", tags are "ArvadosInstanceSetID",
+ # "ArvadosInstanceSecret", etc.
+ #
+ # This should only be changed while no cloud resources are in
+ # use and the cloud dispatcher is not running. Otherwise,
+ # VMs/resources that were added using the old tag prefix will
+ # need to be detected and cleaned up manually.
+ TagKeyPrefix: Arvados
+
+ # Cloud driver: "azure" (Microsoft Azure) or "ec2" (Amazon AWS).
+ Driver: ec2
+
+ # Cloud-specific driver parameters.
+ DriverParameters:
+
+ # (ec2) Credentials.
+ AccessKeyID: ""
+ SecretAccessKey: ""
+
+ # (ec2) Instance configuration.
+ SecurityGroupIDs:
+ - ""
+ SubnetID: ""
+ Region: ""
+ EBSVolumeType: gp2
+ AdminUsername: debian
+
+ # (azure) Credentials.
+ SubscriptionID: ""
+ ClientID: ""
+ ClientSecret: ""
+ TenantID: ""
+
+ # (azure) Instance configuration.
+ CloudEnvironment: AzurePublicCloud
+ ResourceGroup: ""
+ Location: centralus
+ Network: ""
+ Subnet: ""
+ StorageAccount: ""
+ BlobContainer: ""
+ DeleteDanglingResourcesAfter: 20s
+ AdminUsername: arvados
+
+ InstanceTypes:
+
+ # Use the instance type name as the key (in place of "SAMPLE" in
+ # this sample entry).
+ SAMPLE:
+ # Cloud provider's instance type. Defaults to the configured type name.
+ ProviderType: ""
+ VCPUs: 1
+ RAM: 128MiB
+ IncludedScratch: 16GB
+ AddedScratch: 0
+ Price: 0.1
+ Preemptible: false
+
Mail:
MailchimpAPIKey: ""
MailchimpListID: ""
EmailFrom: ""
RemoteClusters:
"*":
+ Host: ""
+ Proxy: false
+ Scheme: https
+ Insecure: false
+ ActivateUsers: false
+ SAMPLE:
+ Host: sample.arvadosapi.com
Proxy: false
+ Scheme: https
+ Insecure: false
ActivateUsers: false
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "github.com/ghodss/yaml"
+)
+
+type deprRequestLimits struct {
+ MaxItemsPerResponse *int
+ MultiClusterRequestConcurrency *int
+}
+
+type deprCluster struct {
+ RequestLimits deprRequestLimits
+ NodeProfiles map[string]nodeProfile
+}
+
+type deprecatedConfig struct {
+ Clusters map[string]deprCluster
+}
+
+type nodeProfile struct {
+ Controller systemServiceInstance `json:"arvados-controller"`
+ Health systemServiceInstance `json:"arvados-health"`
+ Keepbalance systemServiceInstance `json:"keep-balance"`
+ Keepproxy systemServiceInstance `json:"keepproxy"`
+ Keepstore systemServiceInstance `json:"keepstore"`
+ Keepweb systemServiceInstance `json:"keep-web"`
+ Nodemanager systemServiceInstance `json:"arvados-node-manager"`
+ DispatchCloud systemServiceInstance `json:"arvados-dispatch-cloud"`
+ RailsAPI systemServiceInstance `json:"arvados-api-server"`
+ Websocket systemServiceInstance `json:"arvados-ws"`
+ Workbench1 systemServiceInstance `json:"arvados-workbench"`
+}
+
+type systemServiceInstance struct {
+ Listen string
+ TLS bool
+ Insecure bool
+}
+
+func applyDeprecatedConfig(cfg *arvados.Config, configdata []byte, log logger) error {
+ var dc deprecatedConfig
+ err := yaml.Unmarshal(configdata, &dc)
+ if err != nil {
+ return err
+ }
+ hostname, err := os.Hostname()
+ if err != nil {
+ return err
+ }
+ for id, dcluster := range dc.Clusters {
+ cluster, ok := cfg.Clusters[id]
+ if !ok {
+ return fmt.Errorf("can't load legacy config %q that is not present in current config", id)
+ }
+ for name, np := range dcluster.NodeProfiles {
+ if name == "*" || name == os.Getenv("ARVADOS_NODE_PROFILE") || name == hostname {
+ name = "localhost"
+ } else if log != nil {
+ log.Warnf("overriding Clusters.%s.Services using Clusters.%s.NodeProfiles.%s (guessing %q is a hostname)", id, id, name, name)
+ }
+ applyDeprecatedNodeProfile(name, np.RailsAPI, &cluster.Services.RailsAPI)
+ applyDeprecatedNodeProfile(name, np.Controller, &cluster.Services.Controller)
+ applyDeprecatedNodeProfile(name, np.DispatchCloud, &cluster.Services.DispatchCloud)
+ }
+ if dst, n := &cluster.API.MaxItemsPerResponse, dcluster.RequestLimits.MaxItemsPerResponse; n != nil && *n != *dst {
+ *dst = *n
+ }
+ if dst, n := &cluster.API.MaxRequestAmplification, dcluster.RequestLimits.MultiClusterRequestConcurrency; n != nil && *n != *dst {
+ *dst = *n
+ }
+ cfg.Clusters[id] = cluster
+ }
+ return nil
+}
+
+func applyDeprecatedNodeProfile(hostname string, ssi systemServiceInstance, svc *arvados.Service) {
+ scheme := "https"
+ if !ssi.TLS {
+ scheme = "http"
+ }
+ if svc.InternalURLs == nil {
+ svc.InternalURLs = map[arvados.URL]arvados.ServiceInstance{}
+ }
+ host := ssi.Listen
+ if host == "" {
+ return
+ }
+ if strings.HasPrefix(host, ":") {
+ host = hostname + host
+ }
+ svc.InternalURLs[arvados.URL{Scheme: scheme, Host: host}] = arvados.ServiceInstance{}
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+ "os"
+
+ check "gopkg.in/check.v1"
+)
+
+func (s *LoadSuite) TestDeprecatedNodeProfilesToServices(c *check.C) {
+ hostname, err := os.Hostname()
+ c.Assert(err, check.IsNil)
+ s.checkEquivalent(c, `
+Clusters:
+ z1111:
+ NodeProfiles:
+ "*":
+ arvados-controller:
+ listen: ":9004"
+ `+hostname+`:
+ arvados-api-server:
+ listen: ":8000"
+ dispatch-host:
+ arvados-dispatch-cloud:
+ listen: ":9006"
+`, `
+Clusters:
+ z1111:
+ Services:
+ RailsAPI:
+ InternalURLs:
+ "http://localhost:8000": {}
+ Controller:
+ InternalURLs:
+ "http://localhost:9004": {}
+ DispatchCloud:
+ InternalURLs:
+ "http://dispatch-host:9006": {}
+ NodeProfiles:
+ "*":
+ arvados-controller:
+ listen: ":9004"
+ `+hostname+`:
+ arvados-api-server:
+ listen: ":8000"
+ dispatch-host:
+ arvados-dispatch-cloud:
+ listen: ":9006"
+`)
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// +build ignore
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+)
+
+func main() {
+ err := generate()
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func generate() error {
+ outfn := "generated_config.go"
+ tmpfile, err := ioutil.TempFile(".", "."+outfn+".")
+ if err != nil {
+ return err
+ }
+ defer os.Remove(tmpfile.Name())
+
+ gofmt := exec.Command("gofmt", "-s")
+ gofmt.Stdout = tmpfile
+ gofmt.Stderr = os.Stderr
+ w, err := gofmt.StdinPipe()
+ if err != nil {
+ return err
+ }
+ gofmt.Start()
+
+ // copyright header: same as this file
+ cmd := exec.Command("head", "-n", "4", "generate.go")
+ cmd.Stdout = w
+ cmd.Stderr = os.Stderr
+ err = cmd.Run()
+ if err != nil {
+ return err
+ }
+
+ data, err := ioutil.ReadFile("config.default.yml")
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintf(w, "package config\nvar DefaultYAML = []byte(`%s`)", bytes.Replace(data, []byte{'`'}, []byte("`+\"`\"+`"), -1))
+ if err != nil {
+ return err
+ }
+ err = w.Close()
+ if err != nil {
+ return err
+ }
+ err = gofmt.Wait()
+ if err != nil {
+ return err
+ }
+ err = tmpfile.Close()
+ if err != nil {
+ return err
+ }
+ return os.Rename(tmpfile.Name(), outfn)
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+var DefaultYAML = []byte(`# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Do not use this file for site configuration. Create
+# /etc/arvados/config.yml instead.
+#
+# The order of precedence (highest to lowest):
+# 1. Legacy component-specific config files (deprecated)
+# 2. /etc/arvados/config.yml
+# 3. config.default.yml
+
+Clusters:
+ xxxxx:
+ SystemRootToken: ""
+
+ # Token to be included in all healthcheck requests. Disabled by default.
+ # Server expects request header of the format "Authorization: Bearer xxx"
+ ManagementToken: ""
+
+ Services:
+ RailsAPI:
+ InternalURLs: {}
+ ExternalURL: "-"
+ Controller:
+ InternalURLs: {}
+ ExternalURL: ""
+ Websocket:
+ InternalURLs: {}
+ ExternalURL: ""
+ Keepbalance:
+ InternalURLs: {}
+ ExternalURL: "-"
+ GitHTTP:
+ InternalURLs: {}
+ ExternalURL: ""
+ GitSSH:
+ ExternalURL: ""
+ DispatchCloud:
+ InternalURLs: {}
+ ExternalURL: "-"
+ SSO:
+ ExternalURL: ""
+ Keepproxy:
+ InternalURLs: {}
+ ExternalURL: ""
+ WebDAV:
+ InternalURLs: {}
+ ExternalURL: ""
+ WebDAVDownload:
+ InternalURLs: {}
+ ExternalURL: ""
+ Keepstore:
+ InternalURLs: {}
+ ExternalURL: "-"
+ Composer:
+ ExternalURL: ""
+ WebShell:
+ ExternalURL: ""
+ Workbench1:
+ InternalURLs: {}
+ ExternalURL: ""
+ Workbench2:
+ ExternalURL: ""
+ Nodemanager:
+ InternalURLs: {}
+ ExternalURL: "-"
+ Health:
+ InternalURLs: {}
+ ExternalURL: "-"
+
+ PostgreSQL:
+ # max concurrent connections per arvados server daemon
+ ConnectionPool: 32
+ Connection:
+ # All parameters here are passed to the PG client library in a connection string;
+ # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
+ host: ""
+ port: ""
+ user: ""
+ password: ""
+ dbname: ""
+ API:
+ # Maximum size (in bytes) allowed for a single API request. This
+ # limit is published in the discovery document for use by clients.
+ # Note: You must separately configure the upstream web server or
+ # proxy to actually enforce the desired maximum request size on the
+ # server side.
+ MaxRequestSize: 134217728
+
+ # Limit the number of bytes read from the database during an index
+ # request (by retrieving and returning fewer rows than would
+ # normally be returned in a single response).
+ # Note 1: This setting never reduces the number of returned rows to
+ # zero, no matter how big the first data row is.
+ # Note 2: Currently, this is only checked against a specific set of
+ # columns that tend to get large (collections.manifest_text,
+ # containers.mounts, workflows.definition). Other fields (e.g.,
+ # "properties" hashes) are not counted against this limit.
+ MaxIndexDatabaseRead: 134217728
+
+ # Maximum number of items to return when responding to a APIs that
+ # can return partial result sets using limit and offset parameters
+ # (e.g., *.index, groups.contents). If a request specifies a "limit"
+ # parameter higher than this value, this value is used instead.
+ MaxItemsPerResponse: 1000
+
+ # API methods to disable. Disabled methods are not listed in the
+ # discovery document, and respond 404 to all requests.
+ # Example: ["jobs.create", "pipeline_instances.create"]
+ DisabledAPIs: []
+
+ # Interval (seconds) between asynchronous permission view updates. Any
+ # permission-updating API called with the 'async' parameter schedules a an
+ # update on the permission view in the future, if not already scheduled.
+ AsyncPermissionsUpdateInterval: 20
+
+ # Maximum number of concurrent outgoing requests to make while
+ # serving a single incoming multi-cluster (federated) request.
+ MaxRequestAmplification: 4
+
+ # RailsSessionSecretToken is a string of alphanumeric characters
+ # used by Rails to sign session tokens. IMPORTANT: This is a
+ # site secret. It should be at least 50 characters.
+ RailsSessionSecretToken: ""
+
+ # Maximum wall clock time to spend handling an incoming request.
+ RequestTimeout: 5m
+
+ Users:
+ # Config parameters to automatically setup new users. If enabled,
+ # this users will be able to self-activate. Enable this if you want
+ # to run an open instance where anyone can create an account and use
+ # the system without requiring manual approval.
+ #
+ # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
+ # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
+ AutoSetupNewUsers: false
+ AutoSetupNewUsersWithVmUUID: ""
+ AutoSetupNewUsersWithRepository: false
+ AutoSetupUsernameBlacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
+
+ # When new_users_are_active is set to true, new users will be active
+ # immediately. This skips the "self-activate" step which enforces
+ # user agreements. Should only be enabled for development.
+ NewUsersAreActive: false
+
+ # The e-mail address of the user you would like to become marked as an admin
+ # user on their first login.
+ # In the default configuration, authentication happens through the Arvados SSO
+ # server, which uses OAuth2 against Google's servers, so in that case this
+ # should be an address associated with a Google account.
+ AutoAdminUserWithEmail: ""
+
+ # If auto_admin_first_user is set to true, the first user to log in when no
+ # other admin users exist will automatically become an admin user.
+ AutoAdminFirstUser: false
+
+ # Email address to notify whenever a user creates a profile for the
+ # first time
+ UserProfileNotificationAddress: ""
+ AdminNotifierEmailFrom: arvados@example.com
+ EmailSubjectPrefix: "[ARVADOS] "
+ UserNotifierEmailFrom: arvados@example.com
+ NewUserNotificationRecipients: []
+ NewInactiveUserNotificationRecipients: []
+
+ AuditLogs:
+ # Time to keep audit logs, in seconds. (An audit log is a row added
+ # to the "logs" table in the PostgreSQL database each time an
+ # Arvados object is created, modified, or deleted.)
+ #
+ # Currently, websocket event notifications rely on audit logs, so
+ # this should not be set lower than 300 (5 minutes).
+ MaxAge: 336h
+
+ # Maximum number of log rows to delete in a single SQL transaction.
+ #
+ # If max_audit_log_delete_batch is 0, log entries will never be
+ # deleted by Arvados. Cleanup can be done by an external process
+ # without affecting any Arvados system processes, as long as very
+ # recent (<5 minutes old) logs are not deleted.
+ #
+ # 100000 is a reasonable batch size for most sites.
+ MaxDeleteBatch: 0
+
+ # Attributes to suppress in events and audit logs. Notably,
+ # specifying ["manifest_text"] here typically makes the database
+ # smaller and faster.
+ #
+ # Warning: Using any non-empty value here can have undesirable side
+ # effects for any client or component that relies on event logs.
+ # Use at your own risk.
+ UnloggedAttributes: []
+
+ SystemLogs:
+
+ # Logging threshold: panic, fatal, error, warn, info, debug, or
+ # trace
+ LogLevel: info
+
+ # Logging format: json or text
+ Format: json
+
+ # Maximum characters of (JSON-encoded) query parameters to include
+ # in each request log entry. When params exceed this size, they will
+ # be JSON-encoded, truncated to this size, and logged as
+ # params_truncated.
+ MaxRequestLogParamsSize: 2000
+
+ Collections:
+ # Allow clients to create collections by providing a manifest with
+ # unsigned data blob locators. IMPORTANT: This effectively disables
+ # access controls for data stored in Keep: a client who knows a hash
+ # can write a manifest that references the hash, pass it to
+ # collections.create (which will create a permission link), use
+ # collections.get to obtain a signature for that data locator, and
+ # use that signed locator to retrieve the data from Keep. Therefore,
+ # do not turn this on if your users expect to keep data private from
+ # one another!
+ BlobSigning: true
+
+ # blob_signing_key is a string of alphanumeric characters used to
+ # generate permission signatures for Keep locators. It must be
+ # identical to the permission key given to Keep. IMPORTANT: This is
+ # a site secret. It should be at least 50 characters.
+ #
+ # Modifying blob_signing_key will invalidate all existing
+ # signatures, which can cause programs to fail (e.g., arv-put,
+ # arv-get, and Crunch jobs). To avoid errors, rotate keys only when
+ # no such processes are running.
+ BlobSigningKey: ""
+
+ # Default replication level for collections. This is used when a
+ # collection's replication_desired attribute is nil.
+ DefaultReplication: 2
+
+ # Lifetime (in seconds) of blob permission signatures generated by
+ # the API server. This determines how long a client can take (after
+ # retrieving a collection record) to retrieve the collection data
+ # from Keep. If the client needs more time than that (assuming the
+ # collection still has the same content and the relevant user/token
+ # still has permission) the client can retrieve the collection again
+ # to get fresh signatures.
+ #
+ # This must be exactly equal to the -blob-signature-ttl flag used by
+ # keepstore servers. Otherwise, reading data blocks and saving
+ # collections will fail with HTTP 403 permission errors.
+ #
+ # Modifying blob_signature_ttl invalidates existing signatures; see
+ # blob_signing_key note above.
+ #
+ # The default is 2 weeks.
+ BlobSigningTTL: 336h
+
+ # Default lifetime for ephemeral collections: 2 weeks. This must not
+ # be less than blob_signature_ttl.
+ DefaultTrashLifetime: 336h
+
+ # Interval (seconds) between trash sweeps. During a trash sweep,
+ # collections are marked as trash if their trash_at time has
+ # arrived, and deleted if their delete_at time has arrived.
+ TrashSweepInterval: 60
+
+ # If true, enable collection versioning.
+ # When a collection's preserve_version field is true or the current version
+ # is older than the amount of seconds defined on preserve_version_if_idle,
+ # a snapshot of the collection's previous state is created and linked to
+ # the current collection.
+ CollectionVersioning: false
+
+ # 0 = auto-create a new version on every update.
+ # -1 = never auto-create new versions.
+ # > 0 = auto-create a new version when older than the specified number of seconds.
+ PreserveVersionIfIdle: -1
+
+ Login:
+ # These settings are provided by your OAuth2 provider (e.g.,
+ # sso-provider).
+ ProviderAppSecret: ""
+ ProviderAppID: ""
+
+ Git:
+ # Git repositories must be readable by api server, or you won't be
+ # able to submit crunch jobs. To pass the test suites, put a clone
+ # of the arvados tree in {git_repositories_dir}/arvados.git or
+ # {git_repositories_dir}/arvados/.git
+ Repositories: /var/lib/arvados/git/repositories
+
+ TLS:
+ Certificate: ""
+ Key: ""
+ Insecure: false
+
+ Containers:
+ # List of supported Docker Registry image formats that compute nodes
+ # are able to use. ` + "`" + `arv keep docker` + "`" + ` will error out if a user tries
+ # to store an image with an unsupported format. Use an empty array
+ # to skip the compatibility check (and display a warning message to
+ # that effect).
+ #
+ # Example for sites running docker < 1.10: ["v1"]
+ # Example for sites running docker >= 1.10: ["v2"]
+ # Example for disabling check: []
+ SupportedDockerImageFormats: ["v2"]
+
+ # Include details about job reuse decisions in the server log. This
+ # causes additional database queries to run, so it should not be
+ # enabled unless you expect to examine the resulting logs for
+ # troubleshooting purposes.
+ LogReuseDecisions: false
+
+ # Default value for keep_cache_ram of a container's runtime_constraints.
+ DefaultKeepCacheRAM: 268435456
+
+ # Number of times a container can be unlocked before being
+ # automatically cancelled.
+ MaxDispatchAttempts: 5
+
+ # Default value for container_count_max for container requests. This is the
+ # number of times Arvados will create a new container to satisfy a container
+ # request. If a container is cancelled it will retry a new container if
+ # container_count < container_count_max on any container requests associated
+ # with the cancelled container.
+ MaxRetryAttempts: 3
+
+ # The maximum number of compute nodes that can be in use simultaneously
+ # If this limit is reduced, any existing nodes with slot number >= new limit
+ # will not be counted against the new limit. In other words, the new limit
+ # won't be strictly enforced until those nodes with higher slot numbers
+ # go down.
+ MaxComputeVMs: 64
+
+ # Preemptible instance support (e.g. AWS Spot Instances)
+ # When true, child containers will get created with the preemptible
+ # scheduling parameter parameter set.
+ UsePreemptibleInstances: false
+
+ # Include details about job reuse decisions in the server log. This
+ # causes additional database queries to run, so it should not be
+ # enabled unless you expect to examine the resulting logs for
+ # troubleshooting purposes.
+ LogReuseDecisions: false
+
+ # PEM encoded SSH key (RSA, DSA, or ECDSA) used by the
+ # (experimental) cloud dispatcher for executing containers on
+ # worker VMs. Begins with "-----BEGIN RSA PRIVATE KEY-----\n"
+ # and ends with "\n-----END RSA PRIVATE KEY-----\n".
+ DispatchPrivateKey: none
+
+ # Maximum time to wait for workers to come up before abandoning
+ # stale locks from a previous dispatch process.
+ StaleLockTimeout: 1m
+
+ Logging:
+ # When you run the db:delete_old_container_logs task, it will find
+ # containers that have been finished for at least this many seconds,
+ # and delete their stdout, stderr, arv-mount, crunch-run, and
+ # crunchstat logs from the logs table.
+ MaxAge: 720h
+
+ # These two settings control how frequently log events are flushed to the
+ # database. Log lines are buffered until either crunch_log_bytes_per_event
+ # has been reached or crunch_log_seconds_between_events has elapsed since
+ # the last flush.
+ LogBytesPerEvent: 4096
+ LogSecondsBetweenEvents: 1
+
+ # The sample period for throttling logs, in seconds.
+ LogThrottlePeriod: 60
+
+ # Maximum number of bytes that job can log over crunch_log_throttle_period
+ # before being silenced until the end of the period.
+ LogThrottleBytes: 65536
+
+ # Maximum number of lines that job can log over crunch_log_throttle_period
+ # before being silenced until the end of the period.
+ LogThrottleLines: 1024
+
+ # Maximum bytes that may be logged by a single job. Log bytes that are
+ # silenced by throttling are not counted against this total.
+ LimitLogBytesPerJob: 67108864
+
+ LogPartialLineThrottlePeriod: 5
+
+ # Container logs are written to Keep and saved in a collection,
+ # which is updated periodically while the container runs. This
+ # value sets the interval (given in seconds) between collection
+ # updates.
+ LogUpdatePeriod: 1800
+
+ # The log collection is also updated when the specified amount of
+ # log data (given in bytes) is produced in less than one update
+ # period.
+ LogUpdateSize: 33554432
+
+ SLURM:
+ Managed:
+ # Path to dns server configuration directory
+ # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
+ # files or touch restart.txt (see below).
+ DNSServerConfDir: ""
+
+ # Template file for the dns server host snippets. See
+ # unbound.template in this directory for an example. If false, do
+ # not write any config files.
+ DNSServerConfTemplate: ""
+
+ # String to write to {dns_server_conf_dir}/restart.txt (with a
+ # trailing newline) after updating local data. If false, do not
+ # open or write the restart.txt file.
+ DNSServerReloadCommand: ""
+
+ # Command to run after each DNS update. Template variables will be
+ # substituted; see the "unbound" example below. If false, do not run
+ # a command.
+ DNSServerUpdateCommand: ""
+
+ ComputeNodeDomain: ""
+ ComputeNodeNameservers:
+ - 192.168.1.1
+
+ # Hostname to assign to a compute node when it sends a "ping" and the
+ # hostname in its Node record is nil.
+ # During bootstrapping, the "ping" script is expected to notice the
+ # hostname given in the ping response, and update its unix hostname
+ # accordingly.
+ # If false, leave the hostname alone (this is appropriate if your compute
+ # nodes' hostnames are already assigned by some other mechanism).
+ #
+ # One way or another, the hostnames of your node records should agree
+ # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
+ #
+ # Example for compute0000, compute0001, ....:
+ # assign_node_hostname: compute%<slot_number>04d
+ # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
+ AssignNodeHostname: "compute%<slot_number>d"
+
+ JobsAPI:
+ # Enable the legacy Jobs API. This value must be a string.
+ # 'auto' -- (default) enable the Jobs API only if it has been used before
+ # (i.e., there are job records in the database)
+ # 'true' -- enable the Jobs API despite lack of existing records.
+ # 'false' -- disable the Jobs API despite presence of existing records.
+ Enable: 'auto'
+
+ # Git repositories must be readable by api server, or you won't be
+ # able to submit crunch jobs. To pass the test suites, put a clone
+ # of the arvados tree in {git_repositories_dir}/arvados.git or
+ # {git_repositories_dir}/arvados/.git
+ GitInternalDir: /var/lib/arvados/internal.git
+
+ # Docker image to be used when none found in runtime_constraints of a job
+ DefaultDockerImage: ""
+
+ # none or slurm_immediate
+ CrunchJobWrapper: none
+
+ # username, or false = do not set uid when running jobs.
+ CrunchJobUser: crunch
+
+ # The web service must be able to create/write this file, and
+ # crunch-job must be able to stat() it.
+ CrunchRefreshTrigger: /tmp/crunch_refresh_trigger
+
+ # Control job reuse behavior when two completed jobs match the
+ # search criteria and have different outputs.
+ #
+ # If true, in case of a conflict, reuse the earliest job (this is
+ # similar to container reuse behavior).
+ #
+ # If false, in case of a conflict, do not reuse any completed job,
+ # but do reuse an already-running job if available (this is the
+ # original job reuse behavior, and is still the default).
+ ReuseJobIfOutputsDiffer: false
+
+ CloudVMs:
+ # Enable the cloud scheduler (experimental).
+ Enable: false
+
+ # Name/number of port where workers' SSH services listen.
+ SSHPort: "22"
+
+ # Interval between queue polls.
+ PollInterval: 10s
+
+ # Shell command to execute on each worker to determine whether
+ # the worker is booted and ready to run containers. It should
+ # exit zero if the worker is ready.
+ BootProbeCommand: "docker ps"
+
+ # Minimum interval between consecutive probes to a single
+ # worker.
+ ProbeInterval: 10s
+
+ # Maximum probes per second, across all workers in a pool.
+ MaxProbesPerSecond: 10
+
+ # Time before repeating SIGTERM when killing a container.
+ TimeoutSignal: 5s
+
+ # Time to give up on SIGTERM and write off the worker.
+ TimeoutTERM: 2m
+
+ # Maximum create/destroy-instance operations per second (0 =
+ # unlimited).
+ MaxCloudOpsPerSecond: 0
+
+ # Interval between cloud provider syncs/updates ("list all
+ # instances").
+ SyncInterval: 1m
+
+ # Time to leave an idle worker running (in case new containers
+ # appear in the queue that it can run) before shutting it
+ # down.
+ TimeoutIdle: 1m
+
+ # Time to wait for a new worker to boot (i.e., pass
+ # BootProbeCommand) before giving up and shutting it down.
+ TimeoutBooting: 10m
+
+ # Maximum time a worker can stay alive with no successful
+ # probes before being automatically shut down.
+ TimeoutProbe: 10m
+
+ # Time after shutting down a worker to retry the
+ # shutdown/destroy operation.
+ TimeoutShutdown: 10s
+
+ # Worker VM image ID.
+ ImageID: ami-01234567890abcdef
+
+ # Tags to add on all resources (VMs, NICs, disks) created by
+ # the container dispatcher. (Arvados's own tags --
+ # InstanceType, IdleBehavior, and InstanceSecret -- will also
+ # be added.)
+ ResourceTags:
+ SAMPLE: "tag value"
+
+ # Prefix for predefined tags used by Arvados (InstanceSetID,
+ # InstanceType, InstanceSecret, IdleBehavior). With the
+ # default value "Arvados", tags are "ArvadosInstanceSetID",
+ # "ArvadosInstanceSecret", etc.
+ #
+ # This should only be changed while no cloud resources are in
+ # use and the cloud dispatcher is not running. Otherwise,
+ # VMs/resources that were added using the old tag prefix will
+ # need to be detected and cleaned up manually.
+ TagKeyPrefix: Arvados
+
+ # Cloud driver: "azure" (Microsoft Azure) or "ec2" (Amazon AWS).
+ Driver: ec2
+
+ # Cloud-specific driver parameters.
+ DriverParameters:
+
+ # (ec2) Credentials.
+ AccessKeyID: ""
+ SecretAccessKey: ""
+
+ # (ec2) Instance configuration.
+ SecurityGroupIDs:
+ - ""
+ SubnetID: ""
+ Region: ""
+ EBSVolumeType: gp2
+ AdminUsername: debian
+
+ # (azure) Credentials.
+ SubscriptionID: ""
+ ClientID: ""
+ ClientSecret: ""
+ TenantID: ""
+
+ # (azure) Instance configuration.
+ CloudEnvironment: AzurePublicCloud
+ ResourceGroup: ""
+ Location: centralus
+ Network: ""
+ Subnet: ""
+ StorageAccount: ""
+ BlobContainer: ""
+ DeleteDanglingResourcesAfter: 20s
+ AdminUsername: arvados
+
+ InstanceTypes:
+
+ # Use the instance type name as the key (in place of "SAMPLE" in
+ # this sample entry).
+ SAMPLE:
+ # Cloud provider's instance type. Defaults to the configured type name.
+ ProviderType: ""
+ VCPUs: 1
+ RAM: 128MiB
+ IncludedScratch: 16GB
+ AddedScratch: 0
+ Price: 0.1
+ Preemptible: false
+
+ Mail:
+ MailchimpAPIKey: ""
+ MailchimpListID: ""
+ SendUserSetupNotificationEmail: ""
+ IssueReporterEmailFrom: ""
+ IssueReporterEmailTo: ""
+ SupportEmailAddress: ""
+ EmailFrom: ""
+ RemoteClusters:
+ "*":
+ Host: ""
+ Proxy: false
+ Scheme: https
+ Insecure: false
+ ActivateUsers: false
+ SAMPLE:
+ Host: sample.arvadosapi.com
+ Proxy: false
+ Scheme: https
+ Insecure: false
+ ActivateUsers: false
+`)
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "github.com/ghodss/yaml"
+ "github.com/imdario/mergo"
+)
+
+type logger interface {
+ Warnf(string, ...interface{})
+}
+
+func LoadFile(path string, log logger) (*arvados.Config, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return Load(f, log)
+}
+
+func Load(rdr io.Reader, log logger) (*arvados.Config, error) {
+ return load(rdr, log, true)
+}
+
+func load(rdr io.Reader, log logger, useDeprecated bool) (*arvados.Config, error) {
+ buf, err := ioutil.ReadAll(rdr)
+ if err != nil {
+ return nil, err
+ }
+
+ // Load the config into a dummy map to get the cluster ID
+ // keys, discarding the values; then set up defaults for each
+ // cluster ID; then load the real config on top of the
+ // defaults.
+ var dummy struct {
+ Clusters map[string]struct{}
+ }
+ err = yaml.Unmarshal(buf, &dummy)
+ if err != nil {
+ return nil, err
+ }
+ if len(dummy.Clusters) == 0 {
+ return nil, errors.New("config does not define any clusters")
+ }
+
+ // We can't merge deep structs here; instead, we unmarshal the
+ // default & loaded config files into generic maps, merge
+ // those, and then json-encode+decode the result into the
+ // config struct type.
+ var merged map[string]interface{}
+ for id := range dummy.Clusters {
+ var src map[string]interface{}
+ err = yaml.Unmarshal(bytes.Replace(DefaultYAML, []byte(" xxxxx:"), []byte(" "+id+":"), -1), &src)
+ if err != nil {
+ return nil, fmt.Errorf("loading defaults for %s: %s", id, err)
+ }
+ err = mergo.Merge(&merged, src, mergo.WithOverride)
+ if err != nil {
+ return nil, fmt.Errorf("merging defaults for %s: %s", id, err)
+ }
+ }
+ var src map[string]interface{}
+ err = yaml.Unmarshal(buf, &src)
+ if err != nil {
+ return nil, fmt.Errorf("loading config data: %s", err)
+ }
+ logExtraKeys(log, merged, src, "")
+ removeSampleKeys(merged)
+ err = mergo.Merge(&merged, src, mergo.WithOverride)
+ if err != nil {
+ return nil, fmt.Errorf("merging config data: %s", err)
+ }
+
+ // map[string]interface{} => json => arvados.Config
+ var cfg arvados.Config
+ var errEnc error
+ pr, pw := io.Pipe()
+ go func() {
+ errEnc = json.NewEncoder(pw).Encode(merged)
+ pw.Close()
+ }()
+ err = json.NewDecoder(pr).Decode(&cfg)
+ if errEnc != nil {
+ err = errEnc
+ }
+ if err != nil {
+ return nil, fmt.Errorf("transcoding config data: %s", err)
+ }
+
+ if useDeprecated {
+ err = applyDeprecatedConfig(&cfg, buf, log)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Check for known mistakes
+ for id, cc := range cfg.Clusters {
+ err = checkKeyConflict(fmt.Sprintf("Clusters.%s.PostgreSQL.Connection", id), cc.PostgreSQL.Connection)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &cfg, nil
+}
+
+func checkKeyConflict(label string, m map[string]string) error {
+ saw := map[string]bool{}
+ for k := range m {
+ k = strings.ToLower(k)
+ if saw[k] {
+ return fmt.Errorf("%s: multiple entries for %q (fix by using same capitalization as default/example file)", label, k)
+ }
+ saw[k] = true
+ }
+ return nil
+}
+
+func removeSampleKeys(m map[string]interface{}) {
+ delete(m, "SAMPLE")
+ for _, v := range m {
+ if v, _ := v.(map[string]interface{}); v != nil {
+ removeSampleKeys(v)
+ }
+ }
+}
+
+func logExtraKeys(log logger, expected, supplied map[string]interface{}, prefix string) {
+ if log == nil {
+ return
+ }
+ allowed := map[string]interface{}{}
+ for k, v := range expected {
+ allowed[strings.ToLower(k)] = v
+ }
+ for k, vsupp := range supplied {
+ vexp, ok := allowed[strings.ToLower(k)]
+ if !ok && expected["SAMPLE"] != nil {
+ vexp = expected["SAMPLE"]
+ } else if !ok {
+ log.Warnf("deprecated or unknown config entry: %s%s", prefix, k)
+ continue
+ }
+ if vsupp, ok := vsupp.(map[string]interface{}); !ok {
+ // if vsupp is a map but vexp isn't map, this
+ // will be caught elsewhere; see TestBadType.
+ continue
+ } else if vexp, ok := vexp.(map[string]interface{}); !ok {
+ log.Warnf("unexpected object in config entry: %s%s", prefix, k)
+ } else {
+ logExtraKeys(log, vexp, vsupp, prefix+k+".")
+ }
+ }
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "os/exec"
+ "strings"
+ "testing"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+ "github.com/ghodss/yaml"
+ "github.com/sirupsen/logrus"
+ check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+ check.TestingT(t)
+}
+
+var _ = check.Suite(&LoadSuite{})
+
+type LoadSuite struct{}
+
+func (s *LoadSuite) TestEmpty(c *check.C) {
+ cfg, err := Load(&bytes.Buffer{}, ctxlog.TestLogger(c))
+ c.Check(cfg, check.IsNil)
+ c.Assert(err, check.ErrorMatches, `config does not define any clusters`)
+}
+
+func (s *LoadSuite) TestNoConfigs(c *check.C) {
+ cfg, err := Load(bytes.NewBufferString(`Clusters: {"z1111": {}}`), ctxlog.TestLogger(c))
+ c.Assert(err, check.IsNil)
+ c.Assert(cfg.Clusters, check.HasLen, 1)
+ cc, err := cfg.GetCluster("z1111")
+ c.Assert(err, check.IsNil)
+ c.Check(cc.ClusterID, check.Equals, "z1111")
+ c.Check(cc.API.MaxRequestAmplification, check.Equals, 4)
+ c.Check(cc.API.MaxItemsPerResponse, check.Equals, 1000)
+}
+
+func (s *LoadSuite) TestSampleKeys(c *check.C) {
+ for _, yaml := range []string{
+ `{"Clusters":{"z1111":{}}}`,
+ `{"Clusters":{"z1111":{"InstanceTypes":{"Foo":{"RAM": "12345M"}}}}}`,
+ } {
+ cfg, err := Load(bytes.NewBufferString(yaml), ctxlog.TestLogger(c))
+ c.Assert(err, check.IsNil)
+ cc, err := cfg.GetCluster("z1111")
+ _, hasSample := cc.InstanceTypes["SAMPLE"]
+ c.Check(hasSample, check.Equals, false)
+ if strings.Contains(yaml, "Foo") {
+ c.Check(cc.InstanceTypes["Foo"].RAM, check.Equals, arvados.ByteSize(12345000000))
+ c.Check(cc.InstanceTypes["Foo"].Price, check.Equals, 0.0)
+ }
+ }
+}
+
+func (s *LoadSuite) TestMultipleClusters(c *check.C) {
+ cfg, err := Load(bytes.NewBufferString(`{"Clusters":{"z1111":{},"z2222":{}}}`), ctxlog.TestLogger(c))
+ c.Assert(err, check.IsNil)
+ c1, err := cfg.GetCluster("z1111")
+ c.Assert(err, check.IsNil)
+ c.Check(c1.ClusterID, check.Equals, "z1111")
+ c2, err := cfg.GetCluster("z2222")
+ c.Assert(err, check.IsNil)
+ c.Check(c2.ClusterID, check.Equals, "z2222")
+}
+
+func (s *LoadSuite) TestDeprecatedOrUnknownWarning(c *check.C) {
+ var logbuf bytes.Buffer
+ logger := logrus.New()
+ logger.Out = &logbuf
+ _, err := Load(bytes.NewBufferString(`
+Clusters:
+ zzzzz:
+ postgresql: {}
+ BadKey: {}
+ Containers: {}
+ RemoteClusters:
+ z2222:
+ Host: z2222.arvadosapi.com
+ Proxy: true
+ BadKey: badValue
+`), logger)
+ c.Assert(err, check.IsNil)
+ logs := strings.Split(strings.TrimSuffix(logbuf.String(), "\n"), "\n")
+ for _, log := range logs {
+ c.Check(log, check.Matches, `.*deprecated or unknown config entry:.*BadKey.*`)
+ }
+ c.Check(logs, check.HasLen, 2)
+}
+
+func (s *LoadSuite) TestNoWarningsForDumpedConfig(c *check.C) {
+ var logbuf bytes.Buffer
+ logger := logrus.New()
+ logger.Out = &logbuf
+ cfg, err := Load(bytes.NewBufferString(`{"Clusters":{"zzzzz":{}}}`), logger)
+ c.Assert(err, check.IsNil)
+ yaml, err := yaml.Marshal(cfg)
+ c.Assert(err, check.IsNil)
+ cfgDumped, err := Load(bytes.NewBuffer(yaml), logger)
+ c.Assert(err, check.IsNil)
+ c.Check(cfg, check.DeepEquals, cfgDumped)
+ c.Check(logbuf.String(), check.Equals, "")
+}
+
+func (s *LoadSuite) TestPostgreSQLKeyConflict(c *check.C) {
+ _, err := Load(bytes.NewBufferString(`
+Clusters:
+ zzzzz:
+ postgresql:
+ connection:
+ DBName: dbname
+ Host: host
+`), ctxlog.TestLogger(c))
+ c.Check(err, check.ErrorMatches, `Clusters.zzzzz.PostgreSQL.Connection: multiple entries for "(dbname|host)".*`)
+}
+
+func (s *LoadSuite) TestBadType(c *check.C) {
+ for _, data := range []string{`
+Clusters:
+ zzzzz:
+ PostgreSQL: true
+`, `
+Clusters:
+ zzzzz:
+ PostgreSQL:
+ ConnectionPool: true
+`, `
+Clusters:
+ zzzzz:
+ PostgreSQL:
+ ConnectionPool: "foo"
+`, `
+Clusters:
+ zzzzz:
+ PostgreSQL:
+ ConnectionPool: []
+`, `
+Clusters:
+ zzzzz:
+ PostgreSQL:
+ ConnectionPool: [] # {foo: bar} isn't caught here; we rely on config-check
+`,
+ } {
+ c.Log(data)
+ v, err := Load(bytes.NewBufferString(data), ctxlog.TestLogger(c))
+ if v != nil {
+ c.Logf("%#v", v.Clusters["zzzzz"].PostgreSQL.ConnectionPool)
+ }
+ c.Check(err, check.ErrorMatches, `.*cannot unmarshal .*PostgreSQL.*`)
+ }
+}
+
+func (s *LoadSuite) TestMovedKeys(c *check.C) {
+ s.checkEquivalent(c, `# config has old keys only
+Clusters:
+ zzzzz:
+ RequestLimits:
+ MultiClusterRequestConcurrency: 3
+ MaxItemsPerResponse: 999
+`, `
+Clusters:
+ zzzzz:
+ API:
+ MaxRequestAmplification: 3
+ MaxItemsPerResponse: 999
+`)
+ s.checkEquivalent(c, `# config has both old and new keys; old values win
+Clusters:
+ zzzzz:
+ RequestLimits:
+ MultiClusterRequestConcurrency: 0
+ MaxItemsPerResponse: 555
+ API:
+ MaxRequestAmplification: 3
+ MaxItemsPerResponse: 999
+`, `
+Clusters:
+ zzzzz:
+ API:
+ MaxRequestAmplification: 0
+ MaxItemsPerResponse: 555
+`)
+}
+
+func (s *LoadSuite) checkEquivalent(c *check.C, goty, expectedy string) {
+ got, err := Load(bytes.NewBufferString(goty), ctxlog.TestLogger(c))
+ c.Assert(err, check.IsNil)
+ expected, err := Load(bytes.NewBufferString(expectedy), ctxlog.TestLogger(c))
+ c.Assert(err, check.IsNil)
+ if !c.Check(got, check.DeepEquals, expected) {
+ cmd := exec.Command("diff", "-u", "--label", "expected", "--label", "got", "/dev/fd/3", "/dev/fd/4")
+ for _, obj := range []interface{}{expected, got} {
+ y, _ := yaml.Marshal(obj)
+ pr, pw, err := os.Pipe()
+ c.Assert(err, check.IsNil)
+ defer pr.Close()
+ go func() {
+ io.Copy(pw, bytes.NewBuffer(y))
+ pw.Close()
+ }()
+ cmd.ExtraFiles = append(cmd.ExtraFiles, pr)
+ }
+ diff, err := cmd.CombinedOutput()
+ c.Log(string(diff))
+ c.Check(err, check.IsNil)
+ }
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+//go:generate go run generate.go
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+ "bytes"
+ "io/ioutil"
+ "testing"
+)
+
+func TestUpToDate(t *testing.T) {
+ src := "config.default.yml"
+ srcdata, err := ioutil.ReadFile(src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(srcdata, DefaultYAML) {
+ t.Fatalf("content of %s differs from DefaultYAML -- you need to run 'go generate' and commit", src)
+ }
+}
var Command cmd.Handler = service.Command(arvados.ServiceNameController, newHandler)
-func newHandler(_ context.Context, cluster *arvados.Cluster, np *arvados.NodeProfile, _ string) service.Handler {
- return &Handler{Cluster: cluster, NodeProfile: np}
+func newHandler(_ context.Context, cluster *arvados.Cluster, _ string) service.Handler {
+ return &Handler{Cluster: cluster}
}
// returned to the client. When that happens, all
// other outstanding requests are cancelled
sharedContext, cancelFunc := context.WithCancel(req.Context())
+ defer cancelFunc()
+
req = req.WithContext(sharedContext)
wg := sync.WaitGroup{}
pdh := m[1]
success := make(chan *http.Response)
errorChan := make(chan error, len(h.handler.Cluster.RemoteClusters))
- // use channel as a semaphore to limit the number of concurrent
- // requests at a time
- sem := make(chan bool, h.handler.Cluster.RequestLimits.GetMultiClusterRequestConcurrency())
-
- defer cancelFunc()
+ acquire, release := semaphore(h.handler.Cluster.API.MaxRequestAmplification)
for remoteID := range h.handler.Cluster.RemoteClusters {
if remoteID == h.handler.Cluster.ClusterID {
wg.Add(1)
go func(remote string) {
defer wg.Done()
- // blocks until it can put a value into the
- // channel (which has a max queue capacity)
- sem <- true
+ acquire()
+ defer release()
select {
case <-sharedContext.Done():
return
case success <- newResponse:
wasSuccess = true
}
- <-sem
}(remoteID)
}
go func() {
httpserver.Error(w, "Federated multi-object may not provide 'limit', 'offset' or 'order'.", http.StatusBadRequest)
return true
}
- if expectCount > h.handler.Cluster.RequestLimits.GetMaxItemsPerResponse() {
+ if max := h.handler.Cluster.API.MaxItemsPerResponse; expectCount > max {
httpserver.Error(w, fmt.Sprintf("Federated multi-object request for %v objects which is more than max page size %v.",
- expectCount, h.handler.Cluster.RequestLimits.GetMaxItemsPerResponse()), http.StatusBadRequest)
+ expectCount, max), http.StatusBadRequest)
return true
}
if req.Form.Get("select") != "" {
// Perform concurrent requests to each cluster
- // use channel as a semaphore to limit the number of concurrent
- // requests at a time
- sem := make(chan bool, h.handler.Cluster.RequestLimits.GetMultiClusterRequestConcurrency())
- defer close(sem)
+ acquire, release := semaphore(h.handler.Cluster.API.MaxRequestAmplification)
wg := sync.WaitGroup{}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
// Nothing to query
continue
}
-
- // blocks until it can put a value into the
- // channel (which has a max queue capacity)
- sem <- true
+ acquire()
wg.Add(1)
go func(k string, v []string) {
+ defer release()
+ defer wg.Done()
rp, kn, err := h.remoteQueryUUIDs(w, req, k, v)
mtx.Lock()
+ defer mtx.Unlock()
if err == nil {
completeResponses = append(completeResponses, rp...)
kind = kn
} else {
errors = append(errors, err)
}
- mtx.Unlock()
- wg.Done()
- <-sem
}(k, v)
}
wg.Wait()
s.remoteMock.Server.Handler = http.HandlerFunc(s.remoteMockHandler)
c.Assert(s.remoteMock.Start(), check.IsNil)
- nodeProfile := arvados.NodeProfile{
- Controller: arvados.SystemServiceInstance{Listen: ":"},
- RailsAPI: arvados.SystemServiceInstance{Listen: ":1"}, // local reqs will error "connection refused"
- }
- s.testHandler = &Handler{Cluster: &arvados.Cluster{
+ cluster := &arvados.Cluster{
ClusterID: "zhome",
PostgreSQL: integrationTestCluster().PostgreSQL,
- NodeProfiles: map[string]arvados.NodeProfile{
- "*": nodeProfile,
- },
- RequestLimits: arvados.RequestLimits{
- MaxItemsPerResponse: 1000,
- MultiClusterRequestConcurrency: 4,
+ TLS: arvados.TLS{Insecure: true},
+ API: arvados.API{
+ MaxItemsPerResponse: 1000,
+ MaxRequestAmplification: 4,
},
- }, NodeProfile: &nodeProfile}
+ }
+ arvadostest.SetServiceURL(&cluster.Services.RailsAPI, "http://localhost:1/")
+ arvadostest.SetServiceURL(&cluster.Services.Controller, "http://localhost:/")
+ s.testHandler = &Handler{Cluster: cluster}
s.testServer = newServerFromIntegrationTestEnv(c)
s.testServer.Server.Handler = httpserver.AddRequestIDs(httpserver.LogRequests(s.log, s.testHandler))
- s.testHandler.Cluster.RemoteClusters = map[string]arvados.RemoteCluster{
+ cluster.RemoteClusters = map[string]arvados.RemoteCluster{
"zzzzz": {
Host: s.remoteServer.Addr,
Proxy: true,
Handler: h,
},
}
-
c.Assert(srv.Start(), check.IsNil)
-
- np := arvados.NodeProfile{
- Controller: arvados.SystemServiceInstance{Listen: ":"},
- RailsAPI: arvados.SystemServiceInstance{Listen: srv.Addr,
- TLS: false, Insecure: true}}
- s.testHandler.Cluster.NodeProfiles["*"] = np
- s.testHandler.NodeProfile = &np
-
+ arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "http://"+srv.Addr)
return srv
}
}
func (s *FederationSuite) TestGetLocalCollection(c *check.C) {
- np := arvados.NodeProfile{
- Controller: arvados.SystemServiceInstance{Listen: ":"},
- RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"),
- TLS: true, Insecure: true}}
s.testHandler.Cluster.ClusterID = "zzzzz"
- s.testHandler.Cluster.NodeProfiles["*"] = np
- s.testHandler.NodeProfile = &np
+ arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
// HTTP GET
}
func (s *FederationSuite) TestGetLocalCollectionByPDH(c *check.C) {
- np := arvados.NodeProfile{
- Controller: arvados.SystemServiceInstance{Listen: ":"},
- RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"),
- TLS: true, Insecure: true}}
- s.testHandler.Cluster.NodeProfiles["*"] = np
- s.testHandler.NodeProfile = &np
+ arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementPDH, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
}
func (s *FederationSuite) TestSaltedTokenGetCollectionByPDH(c *check.C) {
- np := arvados.NodeProfile{
- Controller: arvados.SystemServiceInstance{Listen: ":"},
- RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"),
- TLS: true, Insecure: true}}
- s.testHandler.Cluster.NodeProfiles["*"] = np
- s.testHandler.NodeProfile = &np
+ arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementPDH, nil)
req.Header.Set("Authorization", "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/282d7d172b6cfdce364c5ed12ddf7417b2d00065")
}
func (s *FederationSuite) TestSaltedTokenGetCollectionByPDHError(c *check.C) {
- np := arvados.NodeProfile{
- Controller: arvados.SystemServiceInstance{Listen: ":"},
- RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"),
- TLS: true, Insecure: true}}
- s.testHandler.Cluster.NodeProfiles["*"] = np
- s.testHandler.NodeProfile = &np
+ arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
req := httptest.NewRequest("GET", "/arvados/v1/collections/99999999999999999999999999999999+99", nil)
req.Header.Set("Authorization", "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/282d7d172b6cfdce364c5ed12ddf7417b2d00065")
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
req.Header.Set("Content-type", "application/json")
- np := arvados.NodeProfile{
- Controller: arvados.SystemServiceInstance{Listen: ":"},
- RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"),
- TLS: true, Insecure: true}}
+ arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
s.testHandler.Cluster.ClusterID = "zzzzz"
- s.testHandler.Cluster.NodeProfiles["*"] = np
- s.testHandler.NodeProfile = &np
resp := s.testRequest(req)
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
}
func (s *FederationSuite) TestListMultiRemoteContainerPageSizeError(c *check.C) {
- s.testHandler.Cluster.RequestLimits.MaxItemsPerResponse = 1
+ s.testHandler.Cluster.API.MaxItemsPerResponse = 1
req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s",
url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr"]]]`,
arvadostest.QueuedContainerUUID))),
"context"
"database/sql"
"errors"
- "net"
+ "fmt"
"net/http"
"net/url"
"strings"
)
type Handler struct {
- Cluster *arvados.Cluster
- NodeProfile *arvados.NodeProfile
+ Cluster *arvados.Cluster
setupOnce sync.Once
handlerStack http.Handler
req.URL.Path = strings.Replace(req.URL.Path, "//", "/", -1)
}
}
- if h.Cluster.HTTPRequestTimeout > 0 {
- ctx, cancel := context.WithDeadline(req.Context(), time.Now().Add(time.Duration(h.Cluster.HTTPRequestTimeout)))
+ if h.Cluster.API.RequestTimeout > 0 {
+ ctx, cancel := context.WithDeadline(req.Context(), time.Now().Add(time.Duration(h.Cluster.API.RequestTimeout)))
req = req.WithContext(ctx)
defer cancel()
}
func (h *Handler) CheckHealth() error {
h.setupOnce.Do(h.setup)
- _, _, err := findRailsAPI(h.Cluster, h.NodeProfile)
+ _, _, err := findRailsAPI(h.Cluster)
return err
}
mux.Handle("/_health/", &health.Handler{
Token: h.Cluster.ManagementToken,
Prefix: "/_health/",
+ Routes: health.Routes{"ping": func() error { _, err := h.db(&http.Request{}); return err }},
})
hs := http.NotFoundHandler()
hs = prepend(hs, h.proxyRailsAPI)
}
func (h *Handler) localClusterRequest(req *http.Request) (*http.Response, error) {
- urlOut, insecure, err := findRailsAPI(h.Cluster, h.NodeProfile)
+ urlOut, insecure, err := findRailsAPI(h.Cluster)
if err != nil {
return nil, err
}
}
}
-// For now, findRailsAPI always uses the rails API running on this
-// node.
-func findRailsAPI(cluster *arvados.Cluster, np *arvados.NodeProfile) (*url.URL, bool, error) {
- hostport := np.RailsAPI.Listen
- if len(hostport) > 1 && hostport[0] == ':' && strings.TrimRight(hostport[1:], "0123456789") == "" {
- // ":12345" => connect to indicated port on localhost
- hostport = "localhost" + hostport
- } else if _, _, err := net.SplitHostPort(hostport); err == nil {
- // "[::1]:12345" => connect to indicated address & port
- } else {
- return nil, false, err
+// Use a localhost entry from Services.RailsAPI.InternalURLs if one is
+// present, otherwise choose an arbitrary entry.
+func findRailsAPI(cluster *arvados.Cluster) (*url.URL, bool, error) {
+ var best *url.URL
+ for target := range cluster.Services.RailsAPI.InternalURLs {
+ target := url.URL(target)
+ best = &target
+ if strings.HasPrefix(target.Host, "localhost:") || strings.HasPrefix(target.Host, "127.0.0.1:") || strings.HasPrefix(target.Host, "[::1]:") {
+ break
+ }
}
- proto := "http"
- if np.RailsAPI.TLS {
- proto = "https"
+ if best == nil {
+ return nil, false, fmt.Errorf("Services.RailsAPI.InternalURLs is empty")
}
- url, err := url.Parse(proto + "://" + hostport)
- return url, np.RailsAPI.Insecure, err
+ return best, cluster.TLS.Insecure, nil
}
s.cluster = &arvados.Cluster{
ClusterID: "zzzzz",
PostgreSQL: integrationTestCluster().PostgreSQL,
- NodeProfiles: map[string]arvados.NodeProfile{
- "*": {
- Controller: arvados.SystemServiceInstance{Listen: ":"},
- RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"), TLS: true, Insecure: true},
- },
- },
+ TLS: arvados.TLS{Insecure: true},
}
- node := s.cluster.NodeProfiles["*"]
- s.handler = newHandler(s.ctx, s.cluster, &node, "")
+ arvadostest.SetServiceURL(&s.cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
+ arvadostest.SetServiceURL(&s.cluster.Services.Controller, "http://localhost:/")
+ s.handler = newHandler(s.ctx, s.cluster, "")
}
func (s *HandlerSuite) TearDownTest(c *check.C) {
}
func (s *HandlerSuite) TestRequestTimeout(c *check.C) {
- s.cluster.HTTPRequestTimeout = arvados.Duration(time.Nanosecond)
+ s.cluster.API.RequestTimeout = arvados.Duration(time.Nanosecond)
req := httptest.NewRequest("GET", "/discovery/v1/apis/arvados/v1/rest", nil)
resp := httptest.NewRecorder()
s.handler.ServeHTTP(resp, req)
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+func semaphore(max int) (acquire, release func()) {
+ if max > 0 {
+ ch := make(chan bool, max)
+ return func() { ch <- true }, func() { <-ch }
+ } else {
+ return func() {}, func() {}
+ }
+}
"path/filepath"
"git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/arvadostest"
"git.curoverse.com/arvados.git/sdk/go/ctxlog"
"git.curoverse.com/arvados.git/sdk/go/httpserver"
check "gopkg.in/check.v1"
func newServerFromIntegrationTestEnv(c *check.C) *httpserver.Server {
log := ctxlog.TestLogger(c)
- nodeProfile := arvados.NodeProfile{
- Controller: arvados.SystemServiceInstance{Listen: ":"},
- RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"), TLS: true, Insecure: true},
- }
handler := &Handler{Cluster: &arvados.Cluster{
ClusterID: "zzzzz",
PostgreSQL: integrationTestCluster().PostgreSQL,
- NodeProfiles: map[string]arvados.NodeProfile{
- "*": nodeProfile,
- },
- }, NodeProfile: &nodeProfile}
+ TLS: arvados.TLS{Insecure: true},
+ }}
+ arvadostest.SetServiceURL(&handler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
+ arvadostest.SetServiceURL(&handler.Cluster.Services.Controller, "http://localhost:/")
srv := &httpserver.Server{
Server: http.Server{
Handler: httpserver.AddRequestIDs(httpserver.LogRequests(log, handler)),
},
- Addr: nodeProfile.Controller.Listen,
+ Addr: ":",
}
return srv
}
var Command cmd.Handler = service.Command(arvados.ServiceNameDispatchCloud, newHandler)
-func newHandler(ctx context.Context, cluster *arvados.Cluster, np *arvados.NodeProfile, token string) service.Handler {
+func newHandler(ctx context.Context, cluster *arvados.Cluster, token string) service.Handler {
ac, err := arvados.NewClientFromConfig(cluster)
if err != nil {
- return service.ErrorHandler(ctx, cluster, np, fmt.Errorf("error initializing client from cluster config: %s", err))
+ return service.ErrorHandler(ctx, cluster, fmt.Errorf("error initializing client from cluster config: %s", err))
}
d := &dispatcher{
Cluster: cluster,
// Make a worker.Executor for the given instance.
func (disp *dispatcher) newExecutor(inst cloud.Instance) worker.Executor {
exr := ssh_executor.New(inst)
- exr.SetTargetPort(disp.Cluster.CloudVMs.SSHPort)
+ exr.SetTargetPort(disp.Cluster.Containers.CloudVMs.SSHPort)
exr.SetSigners(disp.sshKey)
return exr
}
disp.stop = make(chan struct{}, 1)
disp.stopped = make(chan struct{})
- if key, err := ssh.ParsePrivateKey([]byte(disp.Cluster.Dispatch.PrivateKey)); err != nil {
- disp.logger.Fatalf("error parsing configured Dispatch.PrivateKey: %s", err)
+ if key, err := ssh.ParsePrivateKey([]byte(disp.Cluster.Containers.DispatchPrivateKey)); err != nil {
+ disp.logger.Fatalf("error parsing configured Containers.DispatchPrivateKey: %s", err)
} else {
disp.sshKey = key
}
}
disp.instanceSet = instanceSet
disp.reg = prometheus.NewRegistry()
- disp.pool = worker.NewPool(disp.logger, disp.ArvClient, disp.reg, disp.instanceSet, disp.newExecutor, disp.sshKey.PublicKey(), disp.Cluster)
+ disp.pool = worker.NewPool(disp.logger, disp.ArvClient, disp.reg, disp.InstanceSetID, disp.instanceSet, disp.newExecutor, disp.sshKey.PublicKey(), disp.Cluster)
disp.queue = container.NewQueue(disp.logger, disp.reg, disp.typeChooser, disp.ArvClient)
if disp.Cluster.ManagementToken == "" {
defer disp.instanceSet.Stop()
defer disp.pool.Stop()
- staleLockTimeout := time.Duration(disp.Cluster.Dispatch.StaleLockTimeout)
+ staleLockTimeout := time.Duration(disp.Cluster.Containers.StaleLockTimeout)
if staleLockTimeout == 0 {
staleLockTimeout = defaultStaleLockTimeout
}
- pollInterval := time.Duration(disp.Cluster.Dispatch.PollInterval)
+ pollInterval := time.Duration(disp.Cluster.Containers.CloudVMs.PollInterval)
if pollInterval <= 0 {
pollInterval = defaultPollInterval
}
}
s.cluster = &arvados.Cluster{
- CloudVMs: arvados.CloudVMs{
- Driver: "test",
- SyncInterval: arvados.Duration(10 * time.Millisecond),
- TimeoutIdle: arvados.Duration(150 * time.Millisecond),
- TimeoutBooting: arvados.Duration(150 * time.Millisecond),
- TimeoutProbe: arvados.Duration(15 * time.Millisecond),
- TimeoutShutdown: arvados.Duration(5 * time.Millisecond),
- MaxCloudOpsPerSecond: 500,
- },
- Dispatch: arvados.Dispatch{
- PrivateKey: string(dispatchprivraw),
- PollInterval: arvados.Duration(5 * time.Millisecond),
- ProbeInterval: arvados.Duration(5 * time.Millisecond),
+ Containers: arvados.ContainersConfig{
+ DispatchPrivateKey: string(dispatchprivraw),
StaleLockTimeout: arvados.Duration(5 * time.Millisecond),
- MaxProbesPerSecond: 1000,
- TimeoutSignal: arvados.Duration(3 * time.Millisecond),
- TimeoutTERM: arvados.Duration(20 * time.Millisecond),
+ CloudVMs: arvados.CloudVMsConfig{
+ Driver: "test",
+ SyncInterval: arvados.Duration(10 * time.Millisecond),
+ TimeoutIdle: arvados.Duration(150 * time.Millisecond),
+ TimeoutBooting: arvados.Duration(150 * time.Millisecond),
+ TimeoutProbe: arvados.Duration(15 * time.Millisecond),
+ TimeoutShutdown: arvados.Duration(5 * time.Millisecond),
+ MaxCloudOpsPerSecond: 500,
+ PollInterval: arvados.Duration(5 * time.Millisecond),
+ ProbeInterval: arvados.Duration(5 * time.Millisecond),
+ MaxProbesPerSecond: 1000,
+ TimeoutSignal: arvados.Duration(3 * time.Millisecond),
+ TimeoutTERM: arvados.Duration(20 * time.Millisecond),
+ ResourceTags: map[string]string{"testtag": "test value"},
+ TagKeyPrefix: "test:",
+ },
},
InstanceTypes: arvados.InstanceTypeMap{
test.InstanceType(1).Name: test.InstanceType(1),
test.InstanceType(8).Name: test.InstanceType(8),
test.InstanceType(16).Name: test.InstanceType(16),
},
- NodeProfiles: map[string]arvados.NodeProfile{
- "*": {
- Controller: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_API_HOST")},
- DispatchCloud: arvados.SystemServiceInstance{Listen: ":"},
- },
- },
- Services: arvados.Services{
- Controller: arvados.Service{ExternalURL: arvados.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")}},
- },
}
+ arvadostest.SetServiceURL(&s.cluster.Services.DispatchCloud, "http://localhost:/")
+ arvadostest.SetServiceURL(&s.cluster.Services.Controller, "https://"+os.Getenv("ARVADOS_API_HOST")+"/")
arvClient, err := arvados.NewClientFromConfig(s.cluster)
c.Check(err, check.IsNil)
func (s *DispatcherSuite) TestInstancesAPI(c *check.C) {
s.cluster.ManagementToken = "abcdefgh"
- s.cluster.CloudVMs.TimeoutBooting = arvados.Duration(time.Second)
+ s.cluster.Containers.CloudVMs.TimeoutBooting = arvados.Duration(time.Second)
drivers["test"] = s.stubDriver
s.disp.setupOnce.Do(s.disp.initialize)
s.disp.queue = &test.Queue{}
}
func newInstanceSet(cluster *arvados.Cluster, setID cloud.InstanceSetID, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
- driver, ok := drivers[cluster.CloudVMs.Driver]
+ driver, ok := drivers[cluster.Containers.CloudVMs.Driver]
if !ok {
- return nil, fmt.Errorf("unsupported cloud driver %q", cluster.CloudVMs.Driver)
+ return nil, fmt.Errorf("unsupported cloud driver %q", cluster.Containers.CloudVMs.Driver)
}
- is, err := driver.InstanceSet(cluster.CloudVMs.DriverParameters, setID, logger)
- if maxops := cluster.CloudVMs.MaxCloudOpsPerSecond; maxops > 0 {
- is = &rateLimitedInstanceSet{
+ sharedResourceTags := cloud.SharedResourceTags(cluster.Containers.CloudVMs.ResourceTags)
+ is, err := driver.InstanceSet(cluster.Containers.CloudVMs.DriverParameters, setID, sharedResourceTags, logger)
+ if maxops := cluster.Containers.CloudVMs.MaxCloudOpsPerSecond; maxops > 0 {
+ is = rateLimitedInstanceSet{
InstanceSet: is,
ticker: time.NewTicker(time.Second / time.Duration(maxops)),
}
}
+ is = defaultTaggingInstanceSet{
+ InstanceSet: is,
+ defaultTags: cloud.InstanceTags(cluster.Containers.CloudVMs.ResourceTags),
+ }
+ is = filteringInstanceSet{
+ InstanceSet: is,
+ logger: logger,
+ }
return is, err
}
<-inst.ticker.C
return inst.Instance.Destroy()
}
+
+// Adds the specified defaultTags to every Create() call.
+type defaultTaggingInstanceSet struct {
+ cloud.InstanceSet
+ defaultTags cloud.InstanceTags
+}
+
+func (is defaultTaggingInstanceSet) Create(it arvados.InstanceType, image cloud.ImageID, tags cloud.InstanceTags, init cloud.InitCommand, pk ssh.PublicKey) (cloud.Instance, error) {
+ allTags := cloud.InstanceTags{}
+ for k, v := range is.defaultTags {
+ allTags[k] = v
+ }
+ for k, v := range tags {
+ allTags[k] = v
+ }
+ return is.InstanceSet.Create(it, image, allTags, init, pk)
+}
+
+// Filters the instances returned by the wrapped InstanceSet's
+// Instances() method (in case the wrapped InstanceSet didn't do this
+// itself).
+type filteringInstanceSet struct {
+ cloud.InstanceSet
+ logger logrus.FieldLogger
+}
+
+func (is filteringInstanceSet) Instances(tags cloud.InstanceTags) ([]cloud.Instance, error) {
+ instances, err := is.InstanceSet.Instances(tags)
+
+ skipped := 0
+ var returning []cloud.Instance
+nextInstance:
+ for _, inst := range instances {
+ instTags := inst.Tags()
+ for k, v := range tags {
+ if instTags[k] != v {
+ skipped++
+ continue nextInstance
+ }
+ }
+ returning = append(returning, inst)
+ }
+ is.logger.WithFields(logrus.Fields{
+ "returning": len(returning),
+ "skipped": skipped,
+ }).WithError(err).Debugf("filteringInstanceSet returning instances")
+ return returning, err
+}
}
// InstanceSet returns a new *StubInstanceSet.
-func (sd *StubDriver) InstanceSet(params json.RawMessage, id cloud.InstanceSetID, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
+func (sd *StubDriver) InstanceSet(params json.RawMessage, id cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
if sd.holdCloudOps == nil {
sd.holdCloudOps = make(chan bool)
}
tagKeyInstanceType = "InstanceType"
tagKeyIdleBehavior = "IdleBehavior"
tagKeyInstanceSecret = "InstanceSecret"
+ tagKeyInstanceSetID = "InstanceSetID"
)
// An InstanceView shows a worker's current state and recent activity.
//
// New instances are configured and set up according to the given
// cluster configuration.
-func NewPool(logger logrus.FieldLogger, arvClient *arvados.Client, reg *prometheus.Registry, instanceSet cloud.InstanceSet, newExecutor func(cloud.Instance) Executor, installPublicKey ssh.PublicKey, cluster *arvados.Cluster) *Pool {
+func NewPool(logger logrus.FieldLogger, arvClient *arvados.Client, reg *prometheus.Registry, instanceSetID cloud.InstanceSetID, instanceSet cloud.InstanceSet, newExecutor func(cloud.Instance) Executor, installPublicKey ssh.PublicKey, cluster *arvados.Cluster) *Pool {
wp := &Pool{
logger: logger,
arvClient: arvClient,
+ instanceSetID: instanceSetID,
instanceSet: &throttledInstanceSet{InstanceSet: instanceSet},
newExecutor: newExecutor,
- bootProbeCommand: cluster.CloudVMs.BootProbeCommand,
- imageID: cloud.ImageID(cluster.CloudVMs.ImageID),
+ bootProbeCommand: cluster.Containers.CloudVMs.BootProbeCommand,
+ imageID: cloud.ImageID(cluster.Containers.CloudVMs.ImageID),
instanceTypes: cluster.InstanceTypes,
- maxProbesPerSecond: cluster.Dispatch.MaxProbesPerSecond,
- probeInterval: duration(cluster.Dispatch.ProbeInterval, defaultProbeInterval),
- syncInterval: duration(cluster.CloudVMs.SyncInterval, defaultSyncInterval),
- timeoutIdle: duration(cluster.CloudVMs.TimeoutIdle, defaultTimeoutIdle),
- timeoutBooting: duration(cluster.CloudVMs.TimeoutBooting, defaultTimeoutBooting),
- timeoutProbe: duration(cluster.CloudVMs.TimeoutProbe, defaultTimeoutProbe),
- timeoutShutdown: duration(cluster.CloudVMs.TimeoutShutdown, defaultTimeoutShutdown),
- timeoutTERM: duration(cluster.Dispatch.TimeoutTERM, defaultTimeoutTERM),
- timeoutSignal: duration(cluster.Dispatch.TimeoutSignal, defaultTimeoutSignal),
+ maxProbesPerSecond: cluster.Containers.CloudVMs.MaxProbesPerSecond,
+ probeInterval: duration(cluster.Containers.CloudVMs.ProbeInterval, defaultProbeInterval),
+ syncInterval: duration(cluster.Containers.CloudVMs.SyncInterval, defaultSyncInterval),
+ timeoutIdle: duration(cluster.Containers.CloudVMs.TimeoutIdle, defaultTimeoutIdle),
+ timeoutBooting: duration(cluster.Containers.CloudVMs.TimeoutBooting, defaultTimeoutBooting),
+ timeoutProbe: duration(cluster.Containers.CloudVMs.TimeoutProbe, defaultTimeoutProbe),
+ timeoutShutdown: duration(cluster.Containers.CloudVMs.TimeoutShutdown, defaultTimeoutShutdown),
+ timeoutTERM: duration(cluster.Containers.CloudVMs.TimeoutTERM, defaultTimeoutTERM),
+ timeoutSignal: duration(cluster.Containers.CloudVMs.TimeoutSignal, defaultTimeoutSignal),
installPublicKey: installPublicKey,
+ tagKeyPrefix: cluster.Containers.CloudVMs.TagKeyPrefix,
stop: make(chan bool),
}
wp.registerMetrics(reg)
// configuration
logger logrus.FieldLogger
arvClient *arvados.Client
+ instanceSetID cloud.InstanceSetID
instanceSet *throttledInstanceSet
newExecutor func(cloud.Instance) Executor
bootProbeCommand string
timeoutTERM time.Duration
timeoutSignal time.Duration
installPublicKey ssh.PublicKey
+ tagKeyPrefix string
// private state
subscribers map[<-chan struct{}]chan<- struct{}
go func() {
defer wp.notify()
tags := cloud.InstanceTags{
- tagKeyInstanceType: it.Name,
- tagKeyIdleBehavior: string(IdleBehaviorRun),
- tagKeyInstanceSecret: secret,
+ wp.tagKeyPrefix + tagKeyInstanceSetID: string(wp.instanceSetID),
+ wp.tagKeyPrefix + tagKeyInstanceType: it.Name,
+ wp.tagKeyPrefix + tagKeyIdleBehavior: string(IdleBehaviorRun),
+ wp.tagKeyPrefix + tagKeyInstanceSecret: secret,
}
initCmd := cloud.InitCommand(fmt.Sprintf("umask 0177 && echo -n %q >%s", secret, instanceSecretFilename))
inst, err := wp.instanceSet.Create(it, wp.imageID, tags, initCmd, wp.installPublicKey)
//
// Caller must have lock.
func (wp *Pool) updateWorker(inst cloud.Instance, it arvados.InstanceType) (*worker, bool) {
- inst = tagVerifier{inst}
+ secret := inst.Tags()[wp.tagKeyPrefix+tagKeyInstanceSecret]
+ inst = tagVerifier{inst, secret}
id := inst.ID()
if wkr := wp.workers[id]; wkr != nil {
wkr.executor.SetTarget(inst)
}
state := StateUnknown
- if _, ok := wp.creating[inst.Tags()[tagKeyInstanceSecret]]; ok {
+ if _, ok := wp.creating[secret]; ok {
state = StateBooting
}
// process); otherwise, default to "run". After this,
// wkr.idleBehavior is the source of truth, and will only be
// changed via SetIdleBehavior().
- idleBehavior := IdleBehavior(inst.Tags()[tagKeyIdleBehavior])
+ idleBehavior := IdleBehavior(inst.Tags()[wp.tagKeyPrefix+tagKeyIdleBehavior])
if !validIdleBehavior[idleBehavior] {
idleBehavior = IdleBehaviorRun
}
}
wp.logger.Debug("getting instance list")
threshold := time.Now()
- instances, err := wp.instanceSet.Instances(cloud.InstanceTags{})
+ instances, err := wp.instanceSet.Instances(cloud.InstanceTags{wp.tagKeyPrefix + tagKeyInstanceSetID: string(wp.instanceSetID)})
if err != nil {
wp.instanceSet.throttleInstances.CheckRateLimitError(err, wp.logger, "list instances", wp.notify)
return err
notify := false
for _, inst := range instances {
- itTag := inst.Tags()[tagKeyInstanceType]
+ itTag := inst.Tags()[wp.tagKeyPrefix+tagKeyInstanceType]
it, ok := wp.instanceTypes[itTag]
if !ok {
wp.logger.WithField("Instance", inst).Errorf("unknown InstanceType tag %q --- ignoring", itTag)
logger := ctxlog.TestLogger(c)
driver := &test.StubDriver{}
- is, err := driver.InstanceSet(nil, "", logger)
+ instanceSetID := cloud.InstanceSetID("test-instance-set-id")
+ is, err := driver.InstanceSet(nil, instanceSetID, nil, logger)
c.Assert(err, check.IsNil)
newExecutor := func(cloud.Instance) Executor {
}
cluster := &arvados.Cluster{
- Dispatch: arvados.Dispatch{
- MaxProbesPerSecond: 1000,
- ProbeInterval: arvados.Duration(time.Millisecond * 10),
- },
- CloudVMs: arvados.CloudVMs{
- BootProbeCommand: "true",
- SyncInterval: arvados.Duration(time.Millisecond * 10),
+ Containers: arvados.ContainersConfig{
+ CloudVMs: arvados.CloudVMsConfig{
+ BootProbeCommand: "true",
+ MaxProbesPerSecond: 1000,
+ ProbeInterval: arvados.Duration(time.Millisecond * 10),
+ SyncInterval: arvados.Duration(time.Millisecond * 10),
+ TagKeyPrefix: "testprefix:",
+ },
},
InstanceTypes: arvados.InstanceTypeMap{
type1.Name: type1,
},
}
- pool := NewPool(logger, arvados.NewClientFromEnv(), prometheus.NewRegistry(), is, newExecutor, nil, cluster)
+ pool := NewPool(logger, arvados.NewClientFromEnv(), prometheus.NewRegistry(), instanceSetID, is, newExecutor, nil, cluster)
notify := pool.Subscribe()
defer pool.Unsubscribe(notify)
pool.Create(type1)
}
}
// Wait for the tags to save to the cloud provider
+ tagKey := cluster.Containers.CloudVMs.TagKeyPrefix + tagKeyIdleBehavior
deadline := time.Now().Add(time.Second)
for !func() bool {
pool.mtx.RLock()
defer pool.mtx.RUnlock()
for _, wkr := range pool.workers {
if wkr.instType == type2 {
- return wkr.instance.Tags()[tagKeyIdleBehavior] == string(IdleBehaviorHold)
+ return wkr.instance.Tags()[tagKey] == string(IdleBehaviorHold)
}
}
return false
c.Log("------- starting new pool, waiting to recover state")
- pool2 := NewPool(logger, arvados.NewClientFromEnv(), prometheus.NewRegistry(), is, newExecutor, nil, cluster)
+ pool2 := NewPool(logger, arvados.NewClientFromEnv(), prometheus.NewRegistry(), instanceSetID, is, newExecutor, nil, cluster)
notify2 := pool2.Subscribe()
defer pool2.Unsubscribe(notify2)
waitForIdle(pool2, notify2)
func (suite *PoolSuite) TestCreateUnallocShutdown(c *check.C) {
logger := ctxlog.TestLogger(c)
driver := test.StubDriver{HoldCloudOps: true}
- instanceSet, err := driver.InstanceSet(nil, "", logger)
+ instanceSet, err := driver.InstanceSet(nil, "test-instance-set-id", nil, logger)
c.Assert(err, check.IsNil)
type1 := arvados.InstanceType{Name: "a1s", ProviderType: "a1.small", VCPUs: 1, RAM: 1 * GiB, Price: .01}
type tagVerifier struct {
cloud.Instance
+ secret string
}
func (tv tagVerifier) VerifyHostKey(pubKey ssh.PublicKey, client *ssh.Client) error {
- expectSecret := tv.Instance.Tags()[tagKeyInstanceSecret]
- if err := tv.Instance.VerifyHostKey(pubKey, client); err != cloud.ErrNotImplemented || expectSecret == "" {
+ if err := tv.Instance.VerifyHostKey(pubKey, client); err != cloud.ErrNotImplemented || tv.secret == "" {
// If the wrapped instance indicates it has a way to
// verify the key, return that decision.
return err
if err != nil {
return err
}
- if stdout.String() != expectSecret {
+ if stdout.String() != tv.secret {
return errBadInstanceSecret
}
return nil
instance := wkr.instance
tags := instance.Tags()
update := cloud.InstanceTags{
- tagKeyInstanceType: wkr.instType.Name,
- tagKeyIdleBehavior: string(wkr.idleBehavior),
+ wkr.wp.tagKeyPrefix + tagKeyInstanceType: wkr.instType.Name,
+ wkr.wp.tagKeyPrefix + tagKeyIdleBehavior: string(wkr.idleBehavior),
}
save := false
for k, v := range update {
bootTimeout := time.Minute
probeTimeout := time.Second
- is, err := (&test.StubDriver{}).InstanceSet(nil, "", logger)
+ is, err := (&test.StubDriver{}).InstanceSet(nil, "test-instance-set-id", nil, logger)
c.Assert(err, check.IsNil)
inst, err := is.Create(arvados.InstanceType{}, "", nil, "echo InitCommand", nil)
c.Assert(err, check.IsNil)
"flag"
"fmt"
"io"
+ "io/ioutil"
+ "net"
"net/http"
"net/url"
"os"
+ "strings"
"git.curoverse.com/arvados.git/lib/cmd"
+ "git.curoverse.com/arvados.git/lib/config"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/ctxlog"
"git.curoverse.com/arvados.git/sdk/go/httpserver"
CheckHealth() error
}
-type NewHandlerFunc func(_ context.Context, _ *arvados.Cluster, _ *arvados.NodeProfile, token string) Handler
+type NewHandlerFunc func(_ context.Context, _ *arvados.Cluster, token string) Handler
type command struct {
newHandler NewHandlerFunc
flags := flag.NewFlagSet("", flag.ContinueOnError)
flags.SetOutput(stderr)
configFile := flags.String("config", arvados.DefaultConfigFile, "Site configuration `file`")
- nodeProfile := flags.String("node-profile", "", "`Name` of NodeProfiles config entry to use (if blank, use $ARVADOS_NODE_PROFILE or hostname reported by OS)")
err = flags.Parse(args)
if err == flag.ErrHelp {
err = nil
} else if err != nil {
return 2
}
- cfg, err := arvados.GetConfig(*configFile)
+ // Logged warnings are discarded for now: the config template
+ // is incomplete, which causes extra warnings about keys that
+ // are really OK.
+ cfg, err := config.LoadFile(*configFile, ctxlog.New(ioutil.Discard, "json", "error"))
if err != nil {
return 1
}
if err != nil {
return 1
}
- log = ctxlog.New(stderr, cluster.Logging.Format, cluster.Logging.Level).WithFields(logrus.Fields{
+ log = ctxlog.New(stderr, cluster.SystemLogs.Format, cluster.SystemLogs.LogLevel).WithFields(logrus.Fields{
"PID": os.Getpid(),
})
ctx := ctxlog.Context(c.ctx, log)
- profileName := *nodeProfile
- if profileName == "" {
- profileName = os.Getenv("ARVADOS_NODE_PROFILE")
- }
- profile, err := cluster.GetNodeProfile(profileName)
+ listen, err := getListenAddr(cluster.Services, c.svcName)
if err != nil {
return 1
}
- listen := profile.ServicePorts()[c.svcName]
- if listen == "" {
- err = fmt.Errorf("configuration does not enable the %s service on this host", c.svcName)
- return 1
- }
if cluster.SystemRootToken == "" {
log.Warn("SystemRootToken missing from cluster config, falling back to ARVADOS_API_TOKEN environment variable")
}
}
- handler := c.newHandler(ctx, cluster, profile, cluster.SystemRootToken)
+ handler := c.newHandler(ctx, cluster, cluster.SystemRootToken)
if err = handler.CheckHealth(); err != nil {
return 1
}
}
const rfc3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+
+func getListenAddr(svcs arvados.Services, prog arvados.ServiceName) (string, error) {
+ svc, ok := svcs.Map()[prog]
+ if !ok {
+ return "", fmt.Errorf("unknown service name %q", prog)
+ }
+ for url := range svc.InternalURLs {
+ if strings.HasPrefix(url.Host, "localhost:") {
+ return url.Host, nil
+ }
+ listener, err := net.Listen("tcp", url.Host)
+ if err == nil {
+ listener.Close()
+ return url.Host, nil
+ }
+ }
+ return "", fmt.Errorf("configuration does not enable the %s service on this host", prog)
+}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- cmd := Command(arvados.ServiceNameController, func(ctx context.Context, _ *arvados.Cluster, _ *arvados.NodeProfile, token string) Handler {
+ cmd := Command(arvados.ServiceNameController, func(ctx context.Context, _ *arvados.Cluster, token string) Handler {
c.Check(ctx.Value("foo"), check.Equals, "bar")
c.Check(token, check.Equals, "abcde")
return &testHandler{ctx: ctx, healthCheck: healthCheck}
// responds 500 to all requests. ErrorHandler itself logs the given
// error once, and the handler logs it again for each incoming
// request.
-func ErrorHandler(ctx context.Context, _ *arvados.Cluster, _ *arvados.NodeProfile, err error) Handler {
+func ErrorHandler(ctx context.Context, _ *arvados.Cluster, err error) Handler {
logger := ctxlog.FromContext(ctx)
logger.WithError(err).Error("unhealthy service")
return errorHandler{err, logger}
vwd.mkdirs(p.target)
else:
source, path = self.arvrunner.fs_access.get_collection(p.resolved)
- vwd.copy(path, p.target, source_collection=source)
+ vwd.copy(path or ".", p.target, source_collection=source)
elif p.type == "CreateFile":
if self.arvrunner.secret_store.has_secret(p.resolved):
secret_mounts["%s/%s" % (self.outdir, p.target)] = {
--- /dev/null
+{
+ "filesDir": {
+ "location": "keep:d7514270f356df848477718d58308cc4+94",
+ "class": "Directory"
+ }
+}
--- /dev/null
+cwlVersion: v1.0
+class: CommandLineTool
+
+requirements:
+ - class: InitialWorkDirRequirement
+ listing:
+ - entry: $(inputs.filesDir)
+ writable: true
+
+inputs:
+ filesDir:
+ type: Directory
+
+outputs:
+ results:
+ type: Directory
+ outputBinding:
+ glob: .
+
+arguments: [touch, $(inputs.filesDir.path)/blurg.txt]
}
tool: 13931-size.cwl
doc: Test that size is set for files in Keep
+
+- job: 15241-writable-dir-job.json
+ output: {
+ "results": {
+ "basename": "keep:6dd5fa20622d5a7a23c9147d0927da2a+180",
+ "class": "Directory",
+ "listing": [
+ {
+ "basename": "d7514270f356df848477718d58308cc4+94",
+ "class": "Directory",
+ "listing": [
+ {
+ "basename": "a",
+ "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ "class": "File",
+ "location": "a",
+ "size": 0
+ },
+ {
+ "basename": "blurg.txt",
+ "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ "class": "File",
+ "location": "blurg.txt",
+ "size": 0
+ },
+ {
+ "basename": "c",
+ "class": "Directory",
+ "listing": [
+ {
+ "basename": "d",
+ "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ "class": "File",
+ "location": "d",
+ "size": 0
+ }
+ ],
+ "location": "c"
+ },
+ {
+ "basename": "b",
+ "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ "class": "File",
+ "location": "b",
+ "size": 0
+ }
+ ],
+ "location": "d7514270f356df848477718d58308cc4+94"
+ }
+ ],
+ "location": "keep:6dd5fa20622d5a7a23c9147d0927da2a+180"
+ }
+ }
+ tool: 15241-writable-dir.cwl
+ doc: Test for writable collections
call_args, call_kwargs = runner.api.container_requests().create.call_args
vwdmock.copy.assert_has_calls([mock.call('bar', 'foo', source_collection=sourcemock)])
- vwdmock.copy.assert_has_calls([mock.call('', 'foo2', source_collection=sourcemock)])
+ vwdmock.copy.assert_has_calls([mock.call('.', 'foo2', source_collection=sourcemock)])
vwdmock.copy.assert_has_calls([mock.call('baz/filename', 'filename', source_collection=sourcemock)])
vwdmock.copy.assert_has_calls([mock.call('subdir', 'subdir', source_collection=sourcemock)])
@stubs
def test_submit_request_uuid(self, stubs):
+ stubs.api._rootDesc["remoteHosts"]["zzzzz"] = "123"
stubs.expect_container_request_uuid = "zzzzz-xvhdp-yyyyyyyyyyyyyyy"
stubs.api.container_requests().update().execute.return_value = {
"errors"
"fmt"
"net/url"
- "os"
"git.curoverse.com/arvados.git/sdk/go/config"
)
}
}
-type RequestLimits struct {
- MaxItemsPerResponse int
- MultiClusterRequestConcurrency int
+type API struct {
+ MaxItemsPerResponse int
+ MaxRequestAmplification int
+ RequestTimeout Duration
}
type Cluster struct {
- ClusterID string `json:"-"`
- ManagementToken string
- SystemRootToken string
- Services Services
- NodeProfiles map[string]NodeProfile
- InstanceTypes InstanceTypeMap
- CloudVMs CloudVMs
- Dispatch Dispatch
- HTTPRequestTimeout Duration
- RemoteClusters map[string]RemoteCluster
- PostgreSQL PostgreSQL
- RequestLimits RequestLimits
- Logging Logging
- TLS TLS
+ ClusterID string `json:"-"`
+ ManagementToken string
+ SystemRootToken string
+ Services Services
+ InstanceTypes InstanceTypeMap
+ Containers ContainersConfig
+ RemoteClusters map[string]RemoteCluster
+ PostgreSQL PostgreSQL
+ API API
+ SystemLogs SystemLogs
+ TLS TLS
}
type Services struct {
Keepbalance Service
Keepproxy Service
Keepstore Service
- Keepweb Service
Nodemanager Service
RailsAPI Service
+ WebDAV Service
Websocket Service
- Workbench Service
+ Workbench1 Service
+ Workbench2 Service
}
type Service struct {
- InternalURLs map[URL]ServiceInstance
+ InternalURLs map[URL]ServiceInstance `json:",omitempty"`
ExternalURL URL
}
return err
}
+func (su URL) MarshalText() ([]byte, error) {
+ return []byte(fmt.Sprintf("%s", (*url.URL)(&su).String())), nil
+}
+
type ServiceInstance struct{}
-type Logging struct {
- Level string
- Format string
+type SystemLogs struct {
+ LogLevel string
+ Format string
+ MaxRequestLogParamsSize int
}
type PostgreSQL struct {
Preemptible bool
}
-type Dispatch struct {
- // PEM encoded SSH key (RSA, DSA, or ECDSA) able to log in to
- // cloud VMs.
- PrivateKey string
-
- // Max time for workers to come up before abandoning stale
- // locks from previous run
- StaleLockTimeout Duration
-
- // Interval between queue polls
- PollInterval Duration
-
- // Interval between probes to each worker
- ProbeInterval Duration
-
- // Maximum total worker probes per second
- MaxProbesPerSecond int
-
- // Time before repeating SIGTERM when killing a container
- TimeoutSignal Duration
-
- // Time to give up on SIGTERM and write off the worker
- TimeoutTERM Duration
+type ContainersConfig struct {
+ CloudVMs CloudVMsConfig
+ DispatchPrivateKey string
+ StaleLockTimeout Duration
}
-type CloudVMs struct {
- // Shell command that exits zero IFF the VM is fully booted
- // and ready to run containers, e.g., "mount | grep
- // /encrypted-tmp"
- BootProbeCommand string
-
- // Listening port (name or number) of SSH servers on worker
- // VMs
- SSHPort string
+type CloudVMsConfig struct {
+ Enable bool
- SyncInterval Duration
-
- // Maximum idle time before automatic shutdown
- TimeoutIdle Duration
-
- // Maximum booting time before automatic shutdown
- TimeoutBooting Duration
-
- // Maximum time with no successful probes before automatic shutdown
- TimeoutProbe Duration
-
- // Time after shutdown to retry shutdown
- TimeoutShutdown Duration
-
- // Maximum create/destroy-instance operations per second
+ BootProbeCommand string
+ ImageID string
MaxCloudOpsPerSecond int
-
- ImageID string
+ MaxProbesPerSecond int
+ PollInterval Duration
+ ProbeInterval Duration
+ SSHPort string
+ SyncInterval Duration
+ TimeoutBooting Duration
+ TimeoutIdle Duration
+ TimeoutProbe Duration
+ TimeoutShutdown Duration
+ TimeoutSignal Duration
+ TimeoutTERM Duration
+ ResourceTags map[string]string
+ TagKeyPrefix string
Driver string
DriverParameters json.RawMessage
return nil
}
-// GetNodeProfile returns a NodeProfile for the given hostname. An
-// error is returned if the appropriate configuration can't be
-// determined (e.g., this does not appear to be a system node). If
-// node is empty, use the OS-reported hostname.
-func (cc *Cluster) GetNodeProfile(node string) (*NodeProfile, error) {
- if node == "" {
- hostname, err := os.Hostname()
- if err != nil {
- return nil, err
- }
- node = hostname
- }
- if cfg, ok := cc.NodeProfiles[node]; ok {
- return &cfg, nil
- }
- // If node is not listed, but "*" gives a default system node
- // config, use the default config.
- if cfg, ok := cc.NodeProfiles["*"]; ok {
- return &cfg, nil
- }
- return nil, fmt.Errorf("config does not provision host %q as a system node", node)
-}
-
-type NodeProfile struct {
- Controller SystemServiceInstance `json:"arvados-controller"`
- Health SystemServiceInstance `json:"arvados-health"`
- Keepbalance SystemServiceInstance `json:"keep-balance"`
- Keepproxy SystemServiceInstance `json:"keepproxy"`
- Keepstore SystemServiceInstance `json:"keepstore"`
- Keepweb SystemServiceInstance `json:"keep-web"`
- Nodemanager SystemServiceInstance `json:"arvados-node-manager"`
- DispatchCloud SystemServiceInstance `json:"arvados-dispatch-cloud"`
- RailsAPI SystemServiceInstance `json:"arvados-api-server"`
- Websocket SystemServiceInstance `json:"arvados-ws"`
- Workbench SystemServiceInstance `json:"arvados-workbench"`
-}
-
type ServiceName string
const (
ServiceNameRailsAPI ServiceName = "arvados-api-server"
ServiceNameController ServiceName = "arvados-controller"
ServiceNameDispatchCloud ServiceName = "arvados-dispatch-cloud"
+ ServiceNameHealth ServiceName = "arvados-health"
ServiceNameNodemanager ServiceName = "arvados-node-manager"
- ServiceNameWorkbench ServiceName = "arvados-workbench"
+ ServiceNameWorkbench1 ServiceName = "arvados-workbench1"
+ ServiceNameWorkbench2 ServiceName = "arvados-workbench2"
ServiceNameWebsocket ServiceName = "arvados-ws"
ServiceNameKeepbalance ServiceName = "keep-balance"
ServiceNameKeepweb ServiceName = "keep-web"
ServiceNameKeepstore ServiceName = "keepstore"
)
-// ServicePorts returns the configured listening address (or "" if
-// disabled) for each service on the node.
-func (np *NodeProfile) ServicePorts() map[ServiceName]string {
- return map[ServiceName]string{
- ServiceNameRailsAPI: np.RailsAPI.Listen,
- ServiceNameController: np.Controller.Listen,
- ServiceNameDispatchCloud: np.DispatchCloud.Listen,
- ServiceNameNodemanager: np.Nodemanager.Listen,
- ServiceNameWorkbench: np.Workbench.Listen,
- ServiceNameWebsocket: np.Websocket.Listen,
- ServiceNameKeepbalance: np.Keepbalance.Listen,
- ServiceNameKeepweb: np.Keepweb.Listen,
- ServiceNameKeepproxy: np.Keepproxy.Listen,
- ServiceNameKeepstore: np.Keepstore.Listen,
- }
-}
-
-func (h RequestLimits) GetMultiClusterRequestConcurrency() int {
- if h.MultiClusterRequestConcurrency == 0 {
- return 4
- }
- return h.MultiClusterRequestConcurrency
-}
-
-func (h RequestLimits) GetMaxItemsPerResponse() int {
- if h.MaxItemsPerResponse == 0 {
- return 1000
+// Map returns all services as a map, suitable for iterating over all
+// services or looking up a service by name.
+func (svcs Services) Map() map[ServiceName]Service {
+ return map[ServiceName]Service{
+ ServiceNameRailsAPI: svcs.RailsAPI,
+ ServiceNameController: svcs.Controller,
+ ServiceNameDispatchCloud: svcs.DispatchCloud,
+ ServiceNameHealth: svcs.Health,
+ ServiceNameNodemanager: svcs.Nodemanager,
+ ServiceNameWorkbench1: svcs.Workbench1,
+ ServiceNameWorkbench2: svcs.Workbench2,
+ ServiceNameWebsocket: svcs.Websocket,
+ ServiceNameKeepbalance: svcs.Keepbalance,
+ ServiceNameKeepweb: svcs.WebDAV,
+ ServiceNameKeepproxy: svcs.Keepproxy,
+ ServiceNameKeepstore: svcs.Keepstore,
}
- return h.MaxItemsPerResponse
-}
-
-type SystemServiceInstance struct {
- Listen string
- TLS bool
- Insecure bool
}
type TLS struct {
import (
"encoding/json"
"fmt"
+ "strings"
"time"
)
}
// MarshalJSON implements json.Marshaler.
-func (d *Duration) MarshalJSON() ([]byte, error) {
+func (d Duration) MarshalJSON() ([]byte, error) {
return json.Marshal(d.String())
}
-// String implements fmt.Stringer.
+// String returns a format similar to (time.Duration)String() but with
+// "0m" and "0s" removed: e.g., "1h" instead of "1h0m0s".
func (d Duration) String() string {
- return time.Duration(d).String()
+ s := time.Duration(d).String()
+ s = strings.Replace(s, "m0s", "m", 1)
+ s = strings.Replace(s, "h0m", "h", 1)
+ return s
}
// Duration returns a time.Duration.
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+ "encoding/json"
+ "time"
+
+ check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&DurationSuite{})
+
+type DurationSuite struct{}
+
+func (s *DurationSuite) TestMarshalJSON(c *check.C) {
+ var d struct {
+ D Duration
+ }
+ err := json.Unmarshal([]byte(`{"D":"1.234s"}`), &d)
+ c.Check(err, check.IsNil)
+ c.Check(d.D, check.Equals, Duration(time.Second+234*time.Millisecond))
+ buf, err := json.Marshal(d)
+ c.Check(string(buf), check.Equals, `{"D":"1.234s"}`)
+
+ for _, trial := range []struct {
+ seconds int
+ out string
+ }{
+ {30, "30s"},
+ {60, "1m"},
+ {120, "2m"},
+ {150, "2m30s"},
+ {3600, "1h"},
+ {7201, "2h1s"},
+ {360600, "100h10m"},
+ {360610, "100h10m10s"},
+ } {
+ buf, err := json.Marshal(Duration(time.Duration(trial.seconds) * time.Second))
+ c.Check(err, check.IsNil)
+ c.Check(string(buf), check.Equals, `"`+trial.out+`"`)
+ }
+}
"testing"
"time"
- "git.curoverse.com/arvados.git/sdk/go/arvadostest"
check "gopkg.in/check.v1"
)
func (s *CollectionFSSuite) SetUpTest(c *check.C) {
s.client = NewClientFromEnv()
- err := s.client.RequestAndDecode(&s.coll, "GET", "arvados/v1/collections/"+arvadostest.FooAndBarFilesInDirUUID, nil, nil)
+ err := s.client.RequestAndDecode(&s.coll, "GET", "arvados/v1/collections/"+fixtureFooAndBarFilesInDirUUID, nil, nil)
c.Assert(err, check.IsNil)
s.kc = &keepClientStub{
blocks: map[string][]byte{
"path/filepath"
"strings"
- "git.curoverse.com/arvados.git/sdk/go/arvadostest"
check "gopkg.in/check.v1"
)
func (s *SiteFSSuite) TestSlashInName(c *check.C) {
badCollection := Collection{
Name: "bad/collection",
- OwnerUUID: arvadostest.AProjectUUID,
+ OwnerUUID: fixtureAProjectUUID,
}
err := s.client.RequestAndDecode(&badCollection, "POST", "arvados/v1/collections", s.client.UpdateBody(&badCollection), nil)
c.Assert(err, check.IsNil)
badProject := Group{
Name: "bad/project",
GroupClass: "project",
- OwnerUUID: arvadostest.AProjectUUID,
+ OwnerUUID: fixtureAProjectUUID,
}
err = s.client.RequestAndDecode(&badProject, "POST", "arvados/v1/groups", s.client.UpdateBody(&badProject), nil)
c.Assert(err, check.IsNil)
oob := Collection{
Name: "oob",
- OwnerUUID: arvadostest.AProjectUUID,
+ OwnerUUID: fixtureAProjectUUID,
}
err = s.client.RequestAndDecode(&oob, "POST", "arvados/v1/collections", s.client.UpdateBody(&oob), nil)
c.Assert(err, check.IsNil)
"net/http"
"os"
- "git.curoverse.com/arvados.git/sdk/go/arvadostest"
check "gopkg.in/check.v1"
)
+const (
+ // Importing arvadostest would be an import cycle, so these
+ // fixtures are duplicated here [until fs moves to a separate
+ // package].
+ fixtureActiveToken = "3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi"
+ fixtureAProjectUUID = "zzzzz-j7d0g-v955i6s2oi1cbso"
+ fixtureFooAndBarFilesInDirUUID = "zzzzz-4zz18-foonbarfilesdir"
+ fixtureFooCollectionName = "zzzzz-4zz18-fy296fx3hot09f7 added sometime"
+ fixtureFooCollectionPDH = "1f4b0bc7583c2a7f9102c395f4ffc5e3+45"
+ fixtureFooCollection = "zzzzz-4zz18-fy296fx3hot09f7"
+ fixtureNonexistentCollection = "zzzzz-4zz18-totallynotexist"
+)
+
var _ = check.Suite(&SiteFSSuite{})
type SiteFSSuite struct {
func (s *SiteFSSuite) SetUpTest(c *check.C) {
s.client = &Client{
APIHost: os.Getenv("ARVADOS_API_HOST"),
- AuthToken: arvadostest.ActiveToken,
+ AuthToken: fixtureActiveToken,
Insecure: true,
}
s.kc = &keepClientStub{
c.Check(err, check.IsNil)
c.Check(len(fis), check.Equals, 0)
- err = s.fs.Mkdir("/by_id/"+arvadostest.FooCollection, 0755)
+ err = s.fs.Mkdir("/by_id/"+fixtureFooCollection, 0755)
c.Check(err, check.Equals, os.ErrExist)
- f, err = s.fs.Open("/by_id/" + arvadostest.NonexistentCollection)
+ f, err = s.fs.Open("/by_id/" + fixtureNonexistentCollection)
c.Assert(err, check.Equals, os.ErrNotExist)
for _, path := range []string{
- arvadostest.FooCollection,
- arvadostest.FooPdh,
- arvadostest.AProjectUUID + "/" + arvadostest.FooCollectionName,
+ fixtureFooCollection,
+ fixtureFooCollectionPDH,
+ fixtureAProjectUUID + "/" + fixtureFooCollectionName,
} {
f, err = s.fs.Open("/by_id/" + path)
c.Assert(err, check.IsNil)
c.Check(names, check.DeepEquals, []string{"foo"})
}
- f, err = s.fs.Open("/by_id/" + arvadostest.AProjectUUID + "/A Subproject/baz_file")
+ f, err = s.fs.Open("/by_id/" + fixtureAProjectUUID + "/A Subproject/baz_file")
c.Assert(err, check.IsNil)
fis, err = f.Readdir(-1)
var names []string
}
c.Check(names, check.DeepEquals, []string{"baz"})
- _, err = s.fs.OpenFile("/by_id/"+arvadostest.NonexistentCollection, os.O_RDWR|os.O_CREATE, 0755)
+ _, err = s.fs.OpenFile("/by_id/"+fixtureNonexistentCollection, os.O_RDWR|os.O_CREATE, 0755)
c.Check(err, check.Equals, ErrInvalidOperation)
- err = s.fs.Rename("/by_id/"+arvadostest.FooCollection, "/by_id/beep")
+ err = s.fs.Rename("/by_id/"+fixtureFooCollection, "/by_id/beep")
c.Check(err, check.Equals, ErrInvalidArgument)
- err = s.fs.Rename("/by_id/"+arvadostest.FooCollection+"/foo", "/by_id/beep")
+ err = s.fs.Rename("/by_id/"+fixtureFooCollection+"/foo", "/by_id/beep")
c.Check(err, check.Equals, ErrInvalidArgument)
_, err = s.fs.Stat("/by_id/beep")
c.Check(err, check.Equals, os.ErrNotExist)
- err = s.fs.Rename("/by_id/"+arvadostest.FooCollection+"/foo", "/by_id/"+arvadostest.FooCollection+"/bar")
+ err = s.fs.Rename("/by_id/"+fixtureFooCollection+"/foo", "/by_id/"+fixtureFooCollection+"/bar")
c.Check(err, check.IsNil)
err = s.fs.Rename("/by_id", "/beep")
func (c PostgreSQLConnection) String() string {
s := ""
for k, v := range c {
+ if v == "" {
+ continue
+ }
s += strings.ToLower(k)
s += "='"
s += strings.Replace(
FooBarDirCollection = "zzzzz-4zz18-foonbarfilesdir"
WazVersion1Collection = "zzzzz-4zz18-25k12570yk1ver1"
UserAgreementPDH = "b519d9cb706a29fc7ea24dbea2f05851+93"
- FooPdh = "1f4b0bc7583c2a7f9102c395f4ffc5e3+45"
HelloWorldPdh = "55713e6a34081eb03609e7ad5fcad129+62"
AProjectUUID = "zzzzz-j7d0g-v955i6s2oi1cbso"
import (
"net/http"
+ "net/url"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
)
// StubResponse struct with response status and body
resp.Write([]byte(``))
}
}
+
+// SetServiceURL overrides the given service config/discovery with the
+// given internalURLs.
+//
+// ExternalURL is set to the last internalURL, which only aims to
+// address the case where there is only one.
+//
+// SetServiceURL panics on errors.
+func SetServiceURL(service *arvados.Service, internalURLs ...string) {
+ service.InternalURLs = map[arvados.URL]arvados.ServiceInstance{}
+ for _, u := range internalURLs {
+ u, err := url.Parse(u)
+ if err != nil {
+ panic(err)
+ }
+ service.InternalURLs[arvados.URL(*u)] = arvados.ServiceInstance{}
+ service.ExternalURL = arvados.URL(*u)
+ }
+}
"encoding/json"
"errors"
"fmt"
- "net"
"net/http"
+ "net/url"
"sync"
"time"
httpClient *http.Client
timeout arvados.Duration
- Config *arvados.Config
+ Cluster *arvados.Cluster
// If non-nil, Log is called after handling each request.
Log func(*http.Request, error)
}
}
+func (agg *Aggregator) CheckHealth() error {
+ return nil
+}
+
func (agg *Aggregator) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
agg.setupOnce.Do(agg.setup)
sendErr := func(statusCode int, err error) {
resp.Header().Set("Content-Type", "application/json")
- cluster, err := agg.Config.GetCluster("")
- if err != nil {
- err = fmt.Errorf("arvados.GetCluster(): %s", err)
- sendErr(http.StatusInternalServerError, err)
- return
- }
- if !agg.checkAuth(req, cluster) {
+ if !agg.checkAuth(req) {
sendErr(http.StatusUnauthorized, errUnauthorized)
return
}
sendErr(http.StatusNotFound, errNotFound)
return
}
- json.NewEncoder(resp).Encode(agg.ClusterHealth(cluster))
+ json.NewEncoder(resp).Encode(agg.ClusterHealth())
if agg.Log != nil {
agg.Log(req, nil)
}
N int `json:"n"`
}
-func (agg *Aggregator) ClusterHealth(cluster *arvados.Cluster) ClusterHealthResponse {
+func (agg *Aggregator) ClusterHealth() ClusterHealthResponse {
resp := ClusterHealthResponse{
Health: "OK",
Checks: make(map[string]CheckResult),
mtx := sync.Mutex{}
wg := sync.WaitGroup{}
- for profileName, profile := range cluster.NodeProfiles {
- for svc, addr := range profile.ServicePorts() {
- // Ensure svc is listed in resp.Services.
- mtx.Lock()
- if _, ok := resp.Services[svc]; !ok {
- resp.Services[svc] = ServiceHealth{Health: "ERROR"}
- }
- mtx.Unlock()
-
- if addr == "" {
- // svc is not expected on this node.
- continue
- }
+ for svcName, svc := range agg.Cluster.Services.Map() {
+ // Ensure svc is listed in resp.Services.
+ mtx.Lock()
+ if _, ok := resp.Services[svcName]; !ok {
+ resp.Services[svcName] = ServiceHealth{Health: "ERROR"}
+ }
+ mtx.Unlock()
+ for addr := range svc.InternalURLs {
wg.Add(1)
- go func(profileName string, svc arvados.ServiceName, addr string) {
+ go func(svcName arvados.ServiceName, addr arvados.URL) {
defer wg.Done()
var result CheckResult
- url, err := agg.pingURL(profileName, addr)
+ pingURL, err := agg.pingURL(addr)
if err != nil {
result = CheckResult{
Health: "ERROR",
Error: err.Error(),
}
} else {
- result = agg.ping(url, cluster)
+ result = agg.ping(pingURL)
}
mtx.Lock()
defer mtx.Unlock()
- resp.Checks[fmt.Sprintf("%s+%s", svc, url)] = result
+ resp.Checks[fmt.Sprintf("%s+%s", svcName, pingURL)] = result
if result.Health == "OK" {
- h := resp.Services[svc]
+ h := resp.Services[svcName]
h.N++
h.Health = "OK"
- resp.Services[svc] = h
+ resp.Services[svcName] = h
} else {
resp.Health = "ERROR"
}
- }(profileName, svc, addr)
+ }(svcName, addr)
}
}
wg.Wait()
return resp
}
-func (agg *Aggregator) pingURL(node, addr string) (string, error) {
- _, port, err := net.SplitHostPort(addr)
- return "http://" + node + ":" + port + "/_health/ping", err
+func (agg *Aggregator) pingURL(svcURL arvados.URL) (*url.URL, error) {
+ base := url.URL(svcURL)
+ return base.Parse("/_health/ping")
}
-func (agg *Aggregator) ping(url string, cluster *arvados.Cluster) (result CheckResult) {
+func (agg *Aggregator) ping(target *url.URL) (result CheckResult) {
t0 := time.Now()
var err error
}
}()
- req, err := http.NewRequest("GET", url, nil)
+ req, err := http.NewRequest("GET", target.String(), nil)
if err != nil {
return
}
- req.Header.Set("Authorization", "Bearer "+cluster.ManagementToken)
+ req.Header.Set("Authorization", "Bearer "+agg.Cluster.ManagementToken)
ctx, cancel := context.WithTimeout(req.Context(), time.Duration(agg.timeout))
defer cancel()
return
}
-func (agg *Aggregator) checkAuth(req *http.Request, cluster *arvados.Cluster) bool {
+func (agg *Aggregator) checkAuth(req *http.Request) bool {
creds := auth.CredentialsFromRequest(req)
for _, token := range creds.Tokens {
- if token != "" && token == cluster.ManagementToken {
+ if token != "" && token == agg.Cluster.ManagementToken {
return true
}
}
}
func (s *AggregatorSuite) SetUpTest(c *check.C) {
- s.handler = &Aggregator{Config: &arvados.Config{
- Clusters: map[string]arvados.Cluster{
- "zzzzz": {
- ManagementToken: arvadostest.ManagementToken,
- NodeProfiles: map[string]arvados.NodeProfile{},
- },
- },
+ s.handler = &Aggregator{Cluster: &arvados.Cluster{
+ ManagementToken: arvadostest.ManagementToken,
}}
s.req = httptest.NewRequest("GET", "/_health/all", nil)
s.req.Header.Set("Authorization", "Bearer "+arvadostest.ManagementToken)
c.Check(s.resp.Code, check.Equals, http.StatusUnauthorized)
}
-func (s *AggregatorSuite) TestEmptyConfig(c *check.C) {
+func (s *AggregatorSuite) TestNoServicesConfigured(c *check.C) {
s.handler.ServeHTTP(s.resp, s.req)
- s.checkOK(c)
+ s.checkUnhealthy(c)
}
func (s *AggregatorSuite) stubServer(handler http.Handler) (*httptest.Server, string) {
return srv, ":" + port
}
-type unhealthyHandler struct{}
-
-func (*unhealthyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
- if req.URL.Path == "/_health/ping" {
- resp.Write([]byte(`{"health":"ERROR","error":"the bends"}`))
- } else {
- http.Error(resp, "not found", http.StatusNotFound)
- }
-}
-
func (s *AggregatorSuite) TestUnhealthy(c *check.C) {
srv, listen := s.stubServer(&unhealthyHandler{})
defer srv.Close()
- s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
- Keepstore: arvados.SystemServiceInstance{Listen: listen},
- }
+ arvadostest.SetServiceURL(&s.handler.Cluster.Services.Keepstore, "http://localhost"+listen+"/")
s.handler.ServeHTTP(s.resp, s.req)
s.checkUnhealthy(c)
}
-type healthyHandler struct{}
-
-func (*healthyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
- if req.URL.Path == "/_health/ping" {
- resp.Write([]byte(`{"health":"OK"}`))
- } else {
- http.Error(resp, "not found", http.StatusNotFound)
- }
-}
-
func (s *AggregatorSuite) TestHealthy(c *check.C) {
srv, listen := s.stubServer(&healthyHandler{})
defer srv.Close()
- s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
- Controller: arvados.SystemServiceInstance{Listen: listen},
- DispatchCloud: arvados.SystemServiceInstance{Listen: listen},
- Keepbalance: arvados.SystemServiceInstance{Listen: listen},
- Keepproxy: arvados.SystemServiceInstance{Listen: listen},
- Keepstore: arvados.SystemServiceInstance{Listen: listen},
- Keepweb: arvados.SystemServiceInstance{Listen: listen},
- Nodemanager: arvados.SystemServiceInstance{Listen: listen},
- RailsAPI: arvados.SystemServiceInstance{Listen: listen},
- Websocket: arvados.SystemServiceInstance{Listen: listen},
- Workbench: arvados.SystemServiceInstance{Listen: listen},
- }
+ s.setAllServiceURLs(listen)
s.handler.ServeHTTP(s.resp, s.req)
resp := s.checkOK(c)
svc := "keepstore+http://localhost" + listen + "/_health/ping"
defer srvH.Close()
srvU, listenU := s.stubServer(&unhealthyHandler{})
defer srvU.Close()
- s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
- Controller: arvados.SystemServiceInstance{Listen: listenH},
- DispatchCloud: arvados.SystemServiceInstance{Listen: listenH},
- Keepbalance: arvados.SystemServiceInstance{Listen: listenH},
- Keepproxy: arvados.SystemServiceInstance{Listen: listenH},
- Keepstore: arvados.SystemServiceInstance{Listen: listenH},
- Keepweb: arvados.SystemServiceInstance{Listen: listenH},
- Nodemanager: arvados.SystemServiceInstance{Listen: listenH},
- RailsAPI: arvados.SystemServiceInstance{Listen: listenH},
- Websocket: arvados.SystemServiceInstance{Listen: listenH},
- Workbench: arvados.SystemServiceInstance{Listen: listenH},
- }
- s.handler.Config.Clusters["zzzzz"].NodeProfiles["127.0.0.1"] = arvados.NodeProfile{
- Keepstore: arvados.SystemServiceInstance{Listen: listenU},
- }
+ s.setAllServiceURLs(listenH)
+ arvadostest.SetServiceURL(&s.handler.Cluster.Services.Keepstore, "http://localhost"+listenH+"/", "http://127.0.0.1"+listenU+"/")
s.handler.ServeHTTP(s.resp, s.req)
resp := s.checkUnhealthy(c)
ep := resp.Checks["keepstore+http://localhost"+listenH+"/_health/ping"]
c.Logf("%#v", ep)
}
+func (s *AggregatorSuite) TestPingTimeout(c *check.C) {
+ s.handler.timeout = arvados.Duration(100 * time.Millisecond)
+ srv, listen := s.stubServer(&slowHandler{})
+ defer srv.Close()
+ arvadostest.SetServiceURL(&s.handler.Cluster.Services.Keepstore, "http://localhost"+listen+"/")
+ s.handler.ServeHTTP(s.resp, s.req)
+ resp := s.checkUnhealthy(c)
+ ep := resp.Checks["keepstore+http://localhost"+listen+"/_health/ping"]
+ c.Check(ep.Health, check.Equals, "ERROR")
+ c.Check(ep.HTTPStatusCode, check.Equals, 0)
+ rt, err := ep.ResponseTime.Float64()
+ c.Check(err, check.IsNil)
+ c.Check(rt > 0.005, check.Equals, true)
+}
+
func (s *AggregatorSuite) checkError(c *check.C) {
c.Check(s.resp.Code, check.Not(check.Equals), http.StatusOK)
var resp ClusterHealthResponse
- err := json.NewDecoder(s.resp.Body).Decode(&resp)
+ err := json.Unmarshal(s.resp.Body.Bytes(), &resp)
c.Check(err, check.IsNil)
c.Check(resp.Health, check.Not(check.Equals), "OK")
}
func (s *AggregatorSuite) checkResult(c *check.C, health string) ClusterHealthResponse {
c.Check(s.resp.Code, check.Equals, http.StatusOK)
var resp ClusterHealthResponse
- err := json.NewDecoder(s.resp.Body).Decode(&resp)
+ c.Log(s.resp.Body.String())
+ err := json.Unmarshal(s.resp.Body.Bytes(), &resp)
c.Check(err, check.IsNil)
c.Check(resp.Health, check.Equals, health)
return resp
}
-type slowHandler struct{}
+func (s *AggregatorSuite) setAllServiceURLs(listen string) {
+ svcs := &s.handler.Cluster.Services
+ for _, svc := range []*arvados.Service{
+ &svcs.Controller,
+ &svcs.DispatchCloud,
+ &svcs.Keepbalance,
+ &svcs.Keepproxy,
+ &svcs.Keepstore,
+ &svcs.Health,
+ &svcs.Nodemanager,
+ &svcs.RailsAPI,
+ &svcs.WebDAV,
+ &svcs.Websocket,
+ &svcs.Workbench1,
+ &svcs.Workbench2,
+ } {
+ arvadostest.SetServiceURL(svc, "http://localhost"+listen+"/")
+ }
+}
-func (*slowHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+type unhealthyHandler struct{}
+
+func (*unhealthyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+ if req.URL.Path == "/_health/ping" {
+ resp.Write([]byte(`{"health":"ERROR","error":"the bends"}`))
+ } else {
+ http.Error(resp, "not found", http.StatusNotFound)
+ }
+}
+
+type healthyHandler struct{}
+
+func (*healthyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
if req.URL.Path == "/_health/ping" {
- time.Sleep(3 * time.Second)
resp.Write([]byte(`{"health":"OK"}`))
} else {
http.Error(resp, "not found", http.StatusNotFound)
}
}
-func (s *AggregatorSuite) TestPingTimeout(c *check.C) {
- s.handler.timeout = arvados.Duration(100 * time.Millisecond)
- srv, listen := s.stubServer(&slowHandler{})
- defer srv.Close()
- s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
- Keepstore: arvados.SystemServiceInstance{Listen: listen},
+type slowHandler struct{}
+
+func (*slowHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+ if req.URL.Path == "/_health/ping" {
+ time.Sleep(3 * time.Second)
+ resp.Write([]byte(`{"health":"OK"}`))
+ } else {
+ http.Error(resp, "not found", http.StatusNotFound)
}
- s.handler.ServeHTTP(s.resp, s.req)
- resp := s.checkUnhealthy(c)
- ep := resp.Checks["keepstore+http://localhost"+listen+"/_health/ping"]
- c.Check(ep.Health, check.Equals, "ERROR")
- c.Check(ep.HTTPStatusCode, check.Equals, 0)
- rt, err := ep.ResponseTime.Float64()
- c.Check(err, check.IsNil)
- c.Check(rt > 0.005, check.Equals, true)
}
try:
username = pamh.get_user(None)
- except pamh.exception, e:
+ except pamh.exception as e:
return e.pam_result
if not username:
('share/pam-configs', ['pam-configs/arvados']),
('share/doc/arvados-pam', ['LICENSE-2.0.txt', 'README.rst']),
('share/doc/arvados-pam/examples', glob.glob('examples/*')),
-
- # The arvados build scripts used to install data files to
- # "/usr/data/*" but now install them to "/usr/*". Here, we
- # install an extra copy in the old location so existing pam
- # configs can still work. When old systems have had a chance
- # to update to the new paths, this line can be removed.
- ('data/lib/security', ['lib/libpam_arvados.py']),
],
install_requires=[
'arvados-python-client>=0.1.20150801000000',
--- /dev/null
+#!/usr/bin/env python3
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import arvados.util
+import arvados.errors
+import csv
+import sys
+import argparse
+import hmac
+import urllib.parse
+
+def main():
+
+ parser = argparse.ArgumentParser(description='Migrate users to federated identity, see https://doc.arvados.org/admin/merge-remote-account.html')
+ parser.add_argument('--tokens', type=str, required=True)
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument('--report', type=str, help="Generate report .csv file listing users by email address and their associated Arvados accounts")
+ group.add_argument('--migrate', type=str, help="Consume report .csv and migrate users to designated Arvados accounts")
+ group.add_argument('--check', action="store_true", help="Check that tokens are usable and the federation is well connected")
+ args = parser.parse_args()
+
+ clusters = {}
+ errors = []
+ print("Reading %s" % args.tokens)
+ with open(args.tokens, "rt") as f:
+ for r in csv.reader(f):
+ host = r[0]
+ token = r[1]
+ print("Contacting %s" % (host))
+ arv = arvados.api(host=host, token=token, cache=False)
+ try:
+ cur = arv.users().current().execute()
+ arv.api_client_authorizations().list(limit=1).execute()
+ except arvados.errors.ApiError as e:
+ errors.append("checking token for %s: %s" % (host, e))
+ errors.append(' This script requires a token issued to a trusted client in order to manipulate access tokens.')
+ errors.append(' See "Trusted client setting" in https://doc.arvados.org/install/install-workbench-app.html')
+ errors.append(' and https://doc.arvados.org/api/tokens.html')
+ continue
+
+ if not cur["is_admin"]:
+ errors.append("Not admin of %s" % host)
+ continue
+
+ clusters[arv._rootDesc["uuidPrefix"]] = arv
+
+
+ print("Checking that the federation is well connected")
+ for v in clusters.values():
+ for r in clusters:
+ if r != v._rootDesc["uuidPrefix"] and r not in v._rootDesc["remoteHosts"]:
+ errors.append("%s is missing from remoteHosts of %s" % (r, v._rootDesc["uuidPrefix"]))
+ for r in v._rootDesc["remoteHosts"]:
+ if r != "*" and r not in clusters:
+ print("WARNING: %s is federated with %s but %s is missing from the tokens file or the token is invalid" % (v._rootDesc["uuidPrefix"], r, r))
+
+ if errors:
+ for e in errors:
+ print("ERROR: "+str(e))
+ exit(1)
+
+ if args.check:
+ print("Tokens file passed checks")
+ exit(0)
+
+ if args.report:
+ users = []
+ for c, arv in clusters.items():
+ print("Getting user list from %s" % c)
+ ul = arvados.util.list_all(arv.users().list)
+ for l in ul:
+ if l["uuid"].startswith(c):
+ users.append(l)
+
+ out = csv.writer(open(args.report, "wt"))
+
+ out.writerow(("email", "user uuid", "primary cluster/user"))
+
+ users = sorted(users, key=lambda u: u["email"]+"::"+u["uuid"])
+
+ accum = []
+ lastemail = None
+ for u in users:
+ if u["uuid"].endswith("-anonymouspublic") or u["uuid"].endswith("-000000000000000"):
+ continue
+ if lastemail == None:
+ lastemail = u["email"]
+ if u["email"] == lastemail:
+ accum.append(u)
+ else:
+ homeuuid = None
+ for a in accum:
+ if homeuuid is None:
+ homeuuid = a["uuid"]
+ if a["uuid"] != homeuuid:
+ homeuuid = ""
+ for a in accum:
+ out.writerow((a["email"], a["uuid"], homeuuid[0:5]))
+ lastemail = u["email"]
+ accum = [u]
+
+ homeuuid = None
+ for a in accum:
+ if homeuuid is None:
+ homeuuid = a["uuid"]
+ if a["uuid"] != homeuuid:
+ homeuuid = ""
+ for a in accum:
+ out.writerow((a["email"], a["uuid"], homeuuid[0:5]))
+
+ print("Wrote %s" % args.report)
+
+ if args.migrate:
+ rows = []
+ by_email = {}
+ with open(args.migrate, "rt") as f:
+ for r in csv.reader(f):
+ if r[0] == "email":
+ continue
+ by_email.setdefault(r[0], [])
+ by_email[r[0]].append(r)
+ rows.append(r)
+ for r in rows:
+ email = r[0]
+ old_user_uuid = r[1]
+ userhome = r[2]
+
+ if userhome == "":
+ print("(%s) Skipping %s, no home cluster specified" % (email, old_user_uuid))
+ if old_user_uuid.startswith(userhome):
+ continue
+ candidates = []
+ for b in by_email[email]:
+ if b[1].startswith(userhome):
+ candidates.append(b)
+ if len(candidates) == 0:
+ if len(userhome) == 5 and userhome not in clusters:
+ print("(%s) Cannot migrate %s, unknown home cluster %s (typo?)" % (email, old_user_uuid, userhome))
+ else:
+ print("(%s) No user listed with same email to migrate %s to %s" % (email, old_user_uuid, userhome))
+ continue
+ if len(candidates) > 1:
+ print("(%s) Multiple users listed to migrate %s to %s, use full uuid" % (email, old_user_uuid, userhome))
+ continue
+ new_user_uuid = candidates[0][1]
+
+ # cluster where the migration is happening
+ migratecluster = old_user_uuid[0:5]
+ migratearv = clusters[migratecluster]
+
+ # the user's new home cluster
+ newhomecluster = userhome[0:5]
+ homearv = clusters[newhomecluster]
+
+ # create a token for the new user and salt it for the
+ # migration cluster, then use it to access the migration
+ # cluster as the new user once before merging to ensure
+ # the new user is known on that cluster.
+ try:
+ newtok = homearv.api_client_authorizations().create(body={
+ "api_client_authorization": {'owner_uuid': new_user_uuid}}).execute()
+ except arvados.errors.ApiError as e:
+ print("(%s) Could not create API token for %s: %s" % (email, new_user_uuid, e))
+ continue
+
+ salted = 'v2/' + newtok["uuid"] + '/' + hmac.new(newtok["api_token"].encode(),
+ msg=migratecluster.encode(),
+ digestmod='sha1').hexdigest()
+ try:
+ ru = urllib.parse.urlparse(migratearv._rootDesc["rootUrl"])
+ newuser = arvados.api(host=ru.netloc, token=salted).users().current().execute()
+ except arvados.errors.ApiError as e:
+ print("(%s) Error getting user info for %s from %s: %s" % (email, new_user_uuid, migratecluster, e))
+ continue
+
+ try:
+ olduser = migratearv.users().get(uuid=old_user_uuid).execute()
+ except arvados.errors.ApiError as e:
+ print("(%s) Could not retrieve user %s from %s, user may have already been migrated: %s" % (email, old_user_uuid, migratecluster, e))
+ continue
+
+ if not newuser["is_active"]:
+ print("(%s) Activating user %s on %s" % (email, new_user_uuid, migratecluster))
+ try:
+ migratearv.users().update(uuid=new_user_uuid, body={"is_active": True}).execute()
+ except arvados.errors.ApiError as e:
+ print("(%s) Could not activate user %s on %s: %s" % (email, new_user_uuid, migratecluster, e))
+ continue
+
+ if olduser["is_admin"] and not newuser["is_admin"]:
+ print("(%s) Not migrating %s because user is admin but target user %s is not admin on %s" % (email, old_user_uuid, new_user_uuid, migratecluster))
+ continue
+
+ print("(%s) Migrating %s to %s on %s" % (email, old_user_uuid, new_user_uuid, migratecluster))
+
+ try:
+ grp = migratearv.groups().create(body={
+ "owner_uuid": new_user_uuid,
+ "name": "Migrated from %s (%s)" % (email, old_user_uuid),
+ "group_class": "project"
+ }, ensure_unique_name=True).execute()
+ migratearv.users().merge(old_user_uuid=old_user_uuid,
+ new_user_uuid=new_user_uuid,
+ new_owner_uuid=grp["uuid"],
+ redirect_to_new_user=True).execute()
+ except arvados.errors.ApiError as e:
+ print("(%s) Error migrating user: %s" % (email, e))
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from arvados.commands.federation_migrate import main
+main()
'bin/arv-keepdocker',
'bin/arv-ls',
'bin/arv-migrate-docker19',
+ 'bin/arv-federation-migrate',
'bin/arv-normalize',
'bin/arv-put',
'bin/arv-run',
f.write("""
Clusters:
zzzzz:
- HTTPRequestTimeout: 30s
+ ManagementToken: e687950a23c3a9bceec28c6223a06c79
+ API:
+ RequestTimeout: 30s
PostgreSQL:
ConnectionPool: 32
Connection:
- host: {}
- dbname: {}
- user: {}
- password: {}
- NodeProfiles:
- "*":
- "arvados-controller":
- Listen: ":{}"
- "arvados-api-server":
- Listen: ":{}"
- TLS: true
- Insecure: true
+ host: {dbhost}
+ dbname: {dbname}
+ user: {dbuser}
+ password: {dbpass}
+ TLS:
+ Insecure: true
+ Services:
+ Controller:
+ InternalURLs:
+ "http://localhost:{controllerport}": {{}}
+ RailsAPI:
+ InternalURLs:
+ "https://localhost:{railsport}": {{}}
""".format(
- _dbconfig('host'),
- _dbconfig('database'),
- _dbconfig('username'),
- _dbconfig('password'),
- port,
- rails_api_port,
+ dbhost=_dbconfig('host'),
+ dbname=_dbconfig('database'),
+ dbuser=_dbconfig('username'),
+ dbpass=_dbconfig('password'),
+ controllerport=port,
+ railsport=rails_api_port,
))
logf = open(_logfilename('controller'), 'a')
controller = subprocess.Popen(
agh = subprocess.Popen(
['arv-git-httpd',
'-repo-root='+gitdir+'/test',
+ '-management-token=e687950a23c3a9bceec28c6223a06c79',
'-address=:'+str(gitport)],
env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
with open(_pidfile('arv-git-httpd'), 'w') as f:
['keep-web',
'-allow-anonymous',
'-attachment-only-host=download',
+ '-management-token=e687950a23c3a9bceec28c6223a06c79',
'-listen=:'+str(keepwebport)],
env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
with open(_pidfile('keep-web'), 'w') as f:
gem 'rails-perftest'
gem 'rails-controller-testing'
+gem 'sass-rails'
+
# Install any plugin gems
Dir.glob(File.join(File.dirname(__FILE__), 'lib', '**', "Gemfile")) do |f|
eval(IO.read(f), binding)
faye-websocket (0.10.7)
eventmachine (>= 0.12.0)
websocket-driver (>= 0.5.1)
+ ffi (1.9.25)
globalid (0.4.2)
activesupport (>= 4.2.0)
googleauth (0.8.0)
rake (>= 0.8.7)
thor (>= 0.18.1, < 2.0)
rake (12.3.2)
+ rb-fsevent (0.10.3)
+ rb-inotify (0.9.10)
+ ffi (>= 0.5.0, < 2)
ref (2.0.0)
request_store (1.4.1)
rack (>= 1.4)
rvm-capistrano (1.5.6)
capistrano (~> 2.15.4)
safe_yaml (1.0.5)
+ sass (3.5.5)
+ sass-listen (~> 4.0.0)
+ sass-listen (4.0.0)
+ rb-fsevent (~> 0.9, >= 0.9.4)
+ rb-inotify (~> 0.9, >= 0.9.7)
+ sass-rails (5.0.7)
+ railties (>= 4.0.0, < 6)
+ sass (~> 3.1)
+ sprockets (>= 2.8, < 4.0)
+ sprockets-rails (>= 2.0, < 4.0)
+ tilt (>= 1.1, < 3)
signet (0.11.0)
addressable (~> 2.3)
faraday (~> 0.9)
ref
thor (0.20.3)
thread_safe (0.3.6)
+ tilt (2.0.8)
tzinfo (1.2.5)
thread_safe (~> 0.1)
uglifier (2.7.2)
ruby-prof (~> 0.15.0)
rvm-capistrano
safe_yaml
+ sass-rails
simplecov (~> 0.7.1)
simplecov-rcov
sshkey
* and any sub-directories. You're free to add application-wide styles to this file and they'll appear at
* the top of the compiled file, but it's generally better to create a new file per style scope.
*= require_self
- *= require_tree .
+ *= require_tree .
*/
.contain-align-left {
font-size: 0.8em;
}
img.curoverse-logo {
- width: 221px;
- height: 44px;
+ height: 66px;
}
#intropage {
font-family: Verdana,Arial,sans-serif;
color: #000;
font-weight: bold;
}
-
end
def load_json_value(hash, key, must_be_class=nil)
- if hash[key].is_a? String
- hash[key] = SafeJSON.load(hash[key])
- if must_be_class and !hash[key].is_a? must_be_class
- raise TypeError.new("parameter #{key.to_s} must be a #{must_be_class.to_s}")
- end
+ return if hash[key].nil?
+
+ val = hash[key]
+ if val.is_a? ActionController::Parameters
+ val = val.to_unsafe_hash
+ elsif val.is_a? String
+ val = SafeJSON.load(val)
+ hash[key] = val
+ end
+ # When assigning a Hash to an ActionController::Parameters and then
+ # retrieve it, we get another ActionController::Parameters instead of
+ # a Hash. This doesn't happen with other types. This is why 'val' is
+ # being used to do type checking below.
+ if must_be_class and !val.is_a? must_be_class
+ raise TypeError.new("parameter #{key.to_s} must be a #{must_be_class.to_s}")
end
end
accept_attribute_as_json :properties, Hash
accept_attribute_as_json :info, Hash
def accept_attribute_as_json(attr, must_be_class)
- if params[resource_name] and resource_attrs.is_a? Hash
+ if params[resource_name] and [Hash, ActionController::Parameters].include?(resource_attrs.class)
if resource_attrs[attr].is_a? Hash
# Convert symbol keys to strings (in hashes provided by
# resource_attrs)
Rails.cache.fetch 'arvados_v1_rest_discovery' do
Rails.application.eager_load!
remoteHosts = {}
- Rails.configuration.RemoteClusters.each {|k,v| if k != "*" then remoteHosts[k] = v["Host"] end }
+ Rails.configuration.RemoteClusters.each {|k,v| if k != :"*" then remoteHosts[k] = v["Host"] end }
discovery = {
kind: "discovery#restDescription",
discoveryVersion: "v1",
remoteHostsViaDNS: Rails.configuration.RemoteClusters["*"].Proxy,
websocketUrl: Rails.configuration.Services.Websocket.ExternalURL.to_s,
workbenchUrl: Rails.configuration.Services.Workbench1.ExternalURL.to_s,
+ workbench2Url: Rails.configuration.Services.Workbench2.ExternalURL.to_s,
keepWebServiceUrl: Rails.configuration.Services.WebDAV.ExternalURL.to_s,
gitUrl: Rails.configuration.Services.GitHTTP.ExternalURL.to_s,
parameters: {
end
def merge
- if !Thread.current[:api_client].andand.is_trusted
- return send_error("supplied API token is not from a trusted client", status: 403)
- elsif Thread.current[:api_client_authorization].scopes != ['all']
- return send_error("cannot merge with a scoped token", status: 403)
- end
+ if (params[:old_user_uuid] || params[:new_user_uuid])
+ if !current_user.andand.is_admin
+ return send_error("Must be admin to use old_user_uuid/new_user_uuid", status: 403)
+ end
+ if !params[:old_user_uuid] || !params[:new_user_uuid]
+ return send_error("Must supply both old_user_uuid and new_user_uuid", status: 422)
+ end
+ new_user = User.find_by_uuid(params[:new_user_uuid])
+ if !new_user
+ return send_error("User in new_user_uuid not found", status: 422)
+ end
+ @object = User.find_by_uuid(params[:old_user_uuid])
+ if !@object
+ return send_error("User in old_user_uuid not found", status: 422)
+ end
+ else
+ if !Thread.current[:api_client].andand.is_trusted
+ return send_error("supplied API token is not from a trusted client", status: 403)
+ elsif Thread.current[:api_client_authorization].scopes != ['all']
+ return send_error("cannot merge with a scoped token", status: 403)
+ end
- new_auth = ApiClientAuthorization.validate(token: params[:new_user_token])
- if !new_auth
- return send_error("invalid new_user_token", status: 401)
- end
- if !new_auth.api_client.andand.is_trusted
- return send_error("supplied new_user_token is not from a trusted client", status: 403)
- elsif new_auth.scopes != ['all']
- return send_error("supplied new_user_token has restricted scope", status: 403)
+ new_auth = ApiClientAuthorization.validate(token: params[:new_user_token])
+ if !new_auth
+ return send_error("invalid new_user_token", status: 401)
+ end
+
+ if new_auth.user.uuid[0..4] == Rails.configuration.ClusterID
+ if !new_auth.api_client.andand.is_trusted
+ return send_error("supplied new_user_token is not from a trusted client", status: 403)
+ elsif new_auth.scopes != ['all']
+ return send_error("supplied new_user_token has restricted scope", status: 403)
+ end
+ end
+ new_user = new_auth.user
+ @object = current_user
end
- new_user = new_auth.user
- if current_user.uuid == new_user.uuid
+ if @object.uuid == new_user.uuid
return send_error("cannot merge user to self", status: 422)
end
+ if !params[:new_owner_uuid]
+ return send_error("missing new_owner_uuid", status: 422)
+ end
+
if !new_user.can?(write: params[:new_owner_uuid])
return send_error("cannot move objects into supplied new_owner_uuid: new user does not have write permission", status: 403)
end
redirect = params[:redirect_to_new_user]
+ if @object.uuid[0..4] != Rails.configuration.ClusterID && redirect
+ return send_error("cannot merge remote user to other with redirect_to_new_user=true", status: 422)
+ end
+
if !redirect
return send_error("merge with redirect_to_new_user=false is not yet supported", status: 422)
end
- @object = current_user
act_as_system_user do
@object.merge(new_owner_uuid: params[:new_owner_uuid], redirect_to_user_uuid: redirect && new_user.uuid)
end
type: 'string', required: true,
},
new_user_token: {
- type: 'string', required: true,
+ type: 'string', required: false,
},
redirect_to_new_user: {
type: 'boolean', required: false,
},
+ old_user_uuid: {
+ type: 'string', required: false,
+ },
+ new_user_uuid: {
+ type: 'string', required: false,
+ }
}
end
# For the benefit of functional and integration tests:
@user = user
+ if user.uuid[0..4] != Rails.configuration.ClusterID
+ # Actually a remote user
+ # Send them to their home cluster's login
+ rh = Rails.configuration.RemoteClusters[user.uuid[0..4]]
+ remote, return_to_url = params[:return_to].split(',', 2)
+ @remotehomeurl = "#{rh.Scheme || "https"}://#{rh.Host}/login?remote=#{Rails.configuration.ClusterID}&return_to=#{return_to_url}"
+ render
+ return
+ end
+
# prevent ArvadosModel#before_create and _update from throwing
# "unauthorized":
Thread.current[:user] = user
timestamp = opts[:expire]
else
timestamp = db_current_time.to_i +
- (opts[:ttl] || Rails.configuration.Collections.BlobSigningTTL)
+ (opts[:ttl] || Rails.configuration.Collections.BlobSigningTTL.to_i)
end
timestamp_hex = timestamp.to_s(16)
# => "53163cb4"
- blob_signature_ttl = Rails.configuration.Collections.BlobSigningTTL.to_s(16)
+ blob_signature_ttl = Rails.configuration.Collections.BlobSigningTTL.to_i.to_s(16)
# Generate a signature.
signature =
if timestamp.to_i(16) < (opts[:now] or db_current_time.to_i)
raise Blob::InvalidSignatureError.new 'Signature expiry time has passed.'
end
- blob_signature_ttl = Rails.configuration.Collections.BlobSigningTTL.to_s(16)
+ blob_signature_ttl = Rails.configuration.Collections.BlobSigningTTL.to_i.to_s(16)
my_signature =
generate_signature((opts[:key] or Rails.configuration.Collections.BlobSigningKey),
validate :past_versions_cannot_be_updated, on: :update
after_validation :set_file_count_and_total_size
before_save :set_file_names
- around_update :manage_versioning
+ around_update :manage_versioning, unless: :is_past_version?
api_accessible :user, extend: :common do |t|
t.add :name
sync_past_versions if syncable_updates.any?
if snapshot
snapshot.attributes = self.syncable_updates
- snapshot.manifest_text = snapshot.signed_manifest_text
- snapshot.save
+ leave_modified_by_user_alone do
+ act_as_system_user do
+ snapshot.save
+ end
+ end
end
end
end
updates = self.syncable_updates
Collection.where('current_version_uuid = ? AND uuid != ?', self.uuid_was, self.uuid_was).each do |c|
c.attributes = updates
- # Use a different validation context to skip the 'old_versions_cannot_be_updated'
+ # Use a different validation context to skip the 'past_versions_cannot_be_updated'
# validator, as on this case it is legal to update some fields.
leave_modified_by_user_alone do
leave_modified_at_alone do
['uuid', 'owner_uuid', 'delete_at', 'trash_at', 'is_trashed', 'replication_desired', 'storage_classes_desired']
end
+ def is_past_version?
+ # Check for the '_was' values just in case the update operation
+ # includes a change on current_version_uuid or uuid.
+ !(new_record? || self.current_version_uuid_was == self.uuid_was)
+ end
+
def should_preserve_version?
return false unless (Rails.configuration.Collections.CollectionVersioning && versionable_updates?(self.changes.keys))
+ return false if self.is_trashed
+
idle_threshold = Rails.configuration.Collections.PreserveVersionIfIdle
if !self.preserve_version_was &&
(idle_threshold < 0 ||
return manifest_text
else
token = Thread.current[:token]
- exp = [db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL,
+ exp = [db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL.to_i,
trash_at].compact.map(&:to_i).min
self.class.sign_manifest manifest_text, token, exp
end
def self.sign_manifest manifest, token, exp=nil
if exp.nil?
- exp = db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL
+ exp = db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL.to_i
end
signing_opts = {
api_token: token,
end
def past_versions_cannot_be_updated
- # We check for the '_was' values just in case the update operation
- # includes a change on current_version_uuid or uuid.
- if current_version_uuid_was != uuid_was
+ if is_past_version?
errors.add(:base, "past versions cannot be updated")
false
end
def versioning_metadata_updates
valid = true
- if (current_version_uuid_was == uuid_was) && current_version_uuid_changed?
+ if !is_past_version? && current_version_uuid_changed?
errors.add(:current_version_uuid, "cannot be updated")
valid = false
end
nil
end
+ def changed_in_place?(raw_old_value, value)
+ # Compare deserialized values for correctness, checking serialized values
+ # may include changes in ordering, inline whitespaces, etc.
+ deserialize(raw_old_value) != value
+ end
+
def deserialize(value)
if value.nil?
self.default_value
if self.prefs_changed?
if self.prefs_was.andand.empty? || !self.prefs_was.andand['profile']
profile_notification_address = Rails.configuration.Users.UserProfileNotificationAddress
- ProfileNotifier.profile_created(self, profile_notification_address).deliver_now if profile_notification_address
+ ProfileNotifier.profile_created(self, profile_notification_address).deliver_now if profile_notification_address and !profile_notification_address.empty?
end
end
end
<!DOCTYPE html>
<html>
<head>
- <title>Server</title>
+ <title>Arvados API Server (<%= Rails.configuration.ClusterID %>)</title>
<%= stylesheet_link_tag "application" %>
<%= javascript_include_tag "application" %>
<%= csrf_meta_tags %>
</head>
<body>
<div id="header">
- <div class="apptitle">ARVADOS <span class="beta"><span>BETA</span></span></div>
+ <div class="apptitle">ARVADOS</div>
+ <div>(<%= Rails.configuration.ClusterID %>)</div>
<div style="float:right">
<% if current_user %>
<%= current_user.full_name %>
•
<a class="logout" href="/logout">Log out</a>
<% else %>
- <a class="logout" href="/auth/joshid">Log in</a>
+ <!--<a class="logout" href="/auth/joshid">Log in</a>-->
<% end %>
<% if current_user and session[:real_uid] and session[:switch_back_to] and User.find(session[:real_uid].to_i).verify_userswitch_cookie(session[:switch_back_to]) %>
<p>Sorry, something went wrong logging you in. Please try again.</p>
- <p style="float:right;margin-top:1em">
- <a href="/auth/joshid">Log in here.</a>
- </p>
+ <!--<p style="float:right;margin-top:1em">
+ <a href="/login">Log in here.</a>
+ </p>-->
<div style="clear:both;height:8em"></div>
</div>
--- /dev/null
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div style="width:40em; margin:2em auto 0 auto">
+ <h1>Login redirect</h1>
+ <p>This login is linked to federated user <b><%= @user.email %></b> (<b><%= @user.uuid %></b>) on cluster <b><%= @user.uuid[0..4] %></b>. You need to log in again on that cluster.</p>
+ <p>After logging in, you will be returned to this cluster (<b><%=Rails.configuration.ClusterID%></b>).</p>
+ <div style="width: 100%">
+ <div style="float: left"><a href="<%=@remotehomeurl%>">Click here log in on cluster <%= @user.uuid[0..4] %>.</a></div>
+ <div style="float: right"><a href="/logout">Cancel</a></div>
+ </div>
+</div>
raise "Missing #{::Rails.root.to_s}/config/config.default.yml"
end
+def remove_sample_entries(h)
+ return unless h.is_a? Hash
+ h.delete("SAMPLE")
+ h.each { |k, v| remove_sample_entries(v) }
+end
+remove_sample_entries($arvados_config_defaults)
+
clusterID, clusterConfig = $arvados_config_defaults["Clusters"].first
$arvados_config_defaults = clusterConfig
$arvados_config_defaults["ClusterID"] = clusterID
arvcfg.declare_config "Collections.PreserveVersionIfIdle", ActiveSupport::Duration, :preserve_version_if_idle
arvcfg.declare_config "Collections.TrashSweepInterval", ActiveSupport::Duration, :trash_sweep_interval
arvcfg.declare_config "Collections.BlobSigningKey", NonemptyString, :blob_signing_key
-arvcfg.declare_config "Collections.BlobSigningTTL", Integer, :blob_signature_ttl
-arvcfg.declare_config "Collections.BlobSigning", Boolean, :permit_create_collection_with_unsigned_manifest
+arvcfg.declare_config "Collections.BlobSigningTTL", ActiveSupport::Duration, :blob_signature_ttl
+arvcfg.declare_config "Collections.BlobSigning", Boolean, :permit_create_collection_with_unsigned_manifest, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, "Collections.BlobSigning", !v }
arvcfg.declare_config "Containers.SupportedDockerImageFormats", Array, :docker_image_formats
arvcfg.declare_config "Containers.LogReuseDecisions", Boolean, :log_reuse_decisions
arvcfg.declare_config "Containers.DefaultKeepCacheRAM", Integer, :container_default_keep_cache_ram
dbcfg = ConfigLoader.new
dbcfg.declare_config "PostgreSQL.ConnectionPool", Integer, :pool
-dbcfg.declare_config "PostgreSQL.Connection.Host", String, :host
-dbcfg.declare_config "PostgreSQL.Connection.Port", Integer, :port
-dbcfg.declare_config "PostgreSQL.Connection.User", String, :username
-dbcfg.declare_config "PostgreSQL.Connection.Password", String, :password
-dbcfg.declare_config "PostgreSQL.Connection.DBName", String, :database
-dbcfg.declare_config "PostgreSQL.Connection.Template", String, :template
-dbcfg.declare_config "PostgreSQL.Connection.Encoding", String, :encoding
+dbcfg.declare_config "PostgreSQL.Connection.host", String, :host
+dbcfg.declare_config "PostgreSQL.Connection.port", String, :port
+dbcfg.declare_config "PostgreSQL.Connection.user", String, :username
+dbcfg.declare_config "PostgreSQL.Connection.password", String, :password
+dbcfg.declare_config "PostgreSQL.Connection.dbname", String, :database
+dbcfg.declare_config "PostgreSQL.Connection.template", String, :template
+dbcfg.declare_config "PostgreSQL.Connection.encoding", String, :encoding
application_config = {}
%w(application.default application).each do |cfgfile|
# rails environments.
#
if ::Rails.env.to_s == "test" && db_config["test"].nil?
- $arvados_config["PostgreSQL"]["Connection"]["DBName"] = "arvados_test"
+ $arvados_config["PostgreSQL"]["Connection"]["dbname"] = "arvados_test"
end
-if $arvados_config["PostgreSQL"]["Connection"]["Password"].empty?
+if $arvados_config["PostgreSQL"]["Connection"]["password"].empty?
raise "Database password is empty, PostgreSQL section is: #{$arvados_config["PostgreSQL"]}"
end
-dbhost = $arvados_config["PostgreSQL"]["Connection"]["Host"]
-if $arvados_config["PostgreSQL"]["Connection"]["Post"] != 0
- dbhost += ":#{$arvados_config["PostgreSQL"]["Connection"]["Post"]}"
+dbhost = $arvados_config["PostgreSQL"]["Connection"]["host"]
+if $arvados_config["PostgreSQL"]["Connection"]["port"] != 0
+ dbhost += ":#{$arvados_config["PostgreSQL"]["Connection"]["port"]}"
end
#
# For config migration, we've previously populated the PostgreSQL
# section of the config from database.yml
#
-ENV["DATABASE_URL"] = "postgresql://#{$arvados_config["PostgreSQL"]["Connection"]["User"]}:"+
- "#{$arvados_config["PostgreSQL"]["Connection"]["Password"]}@"+
- "#{dbhost}/#{$arvados_config["PostgreSQL"]["Connection"]["DBName"]}?"+
- "template=#{$arvados_config["PostgreSQL"]["Connection"]["Template"]}&"+
+ENV["DATABASE_URL"] = "postgresql://#{$arvados_config["PostgreSQL"]["Connection"]["user"]}:"+
+ "#{$arvados_config["PostgreSQL"]["Connection"]["password"]}@"+
+ "#{dbhost}/#{$arvados_config["PostgreSQL"]["Connection"]["dbname"]}?"+
+ "template=#{$arvados_config["PostgreSQL"]["Connection"]["template"]}&"+
"encoding=#{$arvados_config["PostgreSQL"]["Connection"]["client_encoding"]}&"+
"pool=#{$arvados_config["PostgreSQL"]["ConnectionPool"]}"
#
# SPDX-License-Identifier: AGPL-3.0
-require "arvados/keep"
-require "group_pdhs"
-
class AddFileInfoToCollection < ActiveRecord::Migration[4.2]
- def do_batch(pdhs)
- pdhs_str = ''
- pdhs.each do |pdh|
- pdhs_str << "'" << pdh << "'" << ","
- end
-
- collections = ActiveRecord::Base.connection.exec_query(
- "SELECT DISTINCT portable_data_hash, manifest_text FROM collections "\
- "WHERE portable_data_hash IN (#{pdhs_str[0..-2]}) "
- )
-
- collections.rows.each do |row|
- manifest = Keep::Manifest.new(row[1])
- ActiveRecord::Base.connection.exec_query("BEGIN")
- ActiveRecord::Base.connection.exec_query("UPDATE collections SET file_count=#{manifest.files_count}, "\
- "file_size_total=#{manifest.files_size} "\
- "WHERE portable_data_hash='#{row[0]}'")
- ActiveRecord::Base.connection.exec_query("COMMIT")
- end
- end
-
def up
add_column :collections, :file_count, :integer, default: 0, null: false
add_column :collections, :file_size_total, :integer, limit: 8, default: 0, null: false
- distinct_pdh_count = ActiveRecord::Base.connection.exec_query(
- "SELECT DISTINCT portable_data_hash FROM collections"
- ).rows.count
-
- # Generator that queries for all the distinct pdhs greater than last_pdh
- ordered_pdh_query = lambda { |last_pdh, &block|
- pdhs = ActiveRecord::Base.connection.exec_query(
- "SELECT DISTINCT portable_data_hash FROM collections "\
- "WHERE portable_data_hash > '#{last_pdh}' "\
- "ORDER BY portable_data_hash LIMIT 1000"
- )
- pdhs.rows.each do |row|
- block.call(row[0])
- end
- }
-
- batch_size_max = 1 << 28 # 256 MiB
- GroupPdhs.group_pdhs_for_multiple_transactions(ordered_pdh_query,
- distinct_pdh_count,
- batch_size_max,
- "AddFileInfoToCollection") do |pdhs|
- do_batch(pdhs)
- end
+ puts "Collections now have two new columns, file_count and file_size_total."
+ puts "They were initialized with a zero value. If you are upgrading an Arvados"
+ puts "installation, please run the populate-file-info-columns-in-collections.rb"
+ puts "script to populate the columns. If this is a new installation, that is not"
+ puts "necessary."
end
def down
end
def self.tidy_in_background
- max_age = Rails.configuration.AuditLogs.MaxAge
+ max_age = Rails.configuration.AuditLogs.MaxAge.to_i
max_batch = Rails.configuration.AuditLogs.MaxDeleteBatch
return if max_age <= 0 || max_batch <= 0
desc "Remove old container log entries from the logs table"
task delete_old_container_logs: :environment do
- delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND clock_timestamp() - containers.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge} seconds')"
+ delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND clock_timestamp() - containers.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge.to_i} seconds')"
ActiveRecord::Base.connection.execute(delete_sql)
end
namespace :db do
desc "Remove old job stderr entries from the logs table"
task delete_old_job_logs: :environment do
- delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge} seconds')"
+ delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge.to_i} seconds')"
ActiveRecord::Base.connection.execute(delete_sql)
end
earliest_delete = [
@validation_timestamp,
trash_at_was,
- ].compact.min + Rails.configuration.Collections.BlobSigningTTL.seconds
+ ].compact.min + Rails.configuration.Collections.BlobSigningTTL
# The previous value of delete_at is also an upper bound on the
# longest-lived permission token. For example, if TTL=14,
@object.update_attributes!(trash_at: db_current_time)
end
earliest_delete = (@object.trash_at +
- Rails.configuration.Collections.BlobSigningTTL.seconds)
+ Rails.configuration.Collections.BlobSigningTTL)
if @object.delete_at > earliest_delete
@object.update_attributes!(delete_at: earliest_delete)
end
#
# If container priority=0 but there are committed container requests
# for it with priority>0, update priority.
- def self.update_priority
+ #
+ # Normally, update_priority is a no-op if another thread/process is
+ # already updating. Test cases that need to check priorities after
+ # updating can force a (possibly overlapping) update in the current
+ # thread/transaction by setting the "nolock" flag. See #14878.
+ def self.update_priority(nolock: false)
if !File.owned?(Rails.root.join('tmp'))
Rails.logger.warn("UpdatePriority: not owner of #{Rails.root}/tmp, skipping")
return
end
lockfile = Rails.root.join('tmp', 'update_priority.lock')
File.open(lockfile, File::RDWR|File::CREAT, 0600) do |f|
- return unless f.flock(File::LOCK_NB|File::LOCK_EX)
+ return unless nolock || f.flock(File::LOCK_NB|File::LOCK_EX)
# priority>0 but should be 0:
ActiveRecord::Base.connection.
--- /dev/null
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Arvados version 1.4.0 introduces two new columns on the collections table named
+# file_count
+# file_size_total
+#
+# The database migration that adds these columns does not populate them with data,
+# it initializes them set to zero.
+#
+# This script will populate the columns, if file_count is zero. It will ignore
+# collections that have invalid manifests, but it will spit out details for those
+# collections.
+#
+# Run the script as
+#
+# cd scripts
+# RAILS_ENV=production bundle exec populate-file-info-columns-in-collections.rb
+#
+
+ENV["RAILS_ENV"] = ARGV[0] || ENV["RAILS_ENV"] || "development"
+require File.dirname(__FILE__) + '/../config/boot'
+require File.dirname(__FILE__) + '/../config/environment'
+
+require "arvados/keep"
+require "group_pdhs"
+
+ def do_batch(pdhs)
+ pdhs_str = ''
+ pdhs.each do |pdh|
+ pdhs_str << "'" << pdh << "'" << ","
+ end
+
+ collections = ActiveRecord::Base.connection.exec_query(
+ "SELECT DISTINCT portable_data_hash, manifest_text FROM collections "\
+ "WHERE portable_data_hash IN (#{pdhs_str[0..-2]}) "
+ )
+ collections.rows.each do |row|
+ begin
+ manifest = Keep::Manifest.new(row[1])
+ ActiveRecord::Base.connection.exec_query("BEGIN")
+ ActiveRecord::Base.connection.exec_query("UPDATE collections SET file_count=#{manifest.files_count}, "\
+ "file_size_total=#{manifest.files_size} "\
+ "WHERE portable_data_hash='#{row[0]}'")
+ ActiveRecord::Base.connection.exec_query("COMMIT")
+ rescue ArgumentError => detail
+ require 'pp'
+ puts
+ puts "*************** Row detail ***************"
+ puts
+ pp row
+ puts
+ puts "************ Collection detail ***********"
+ puts
+ pp Collection.find_by_portable_data_hash(row[0])
+ puts
+ puts "************** Error detail **************"
+ puts
+ pp detail
+ puts
+ puts "Skipping this collection, continuing!"
+ next
+ end
+ end
+ end
+
+
+def main
+
+ distinct_pdh_count = ActiveRecord::Base.connection.exec_query(
+ "SELECT DISTINCT portable_data_hash FROM collections where file_count=0"
+ ).rows.count
+
+ # Generator that queries for all the distinct pdhs greater than last_pdh
+ ordered_pdh_query = lambda { |last_pdh, &block|
+ pdhs = ActiveRecord::Base.connection.exec_query(
+ "SELECT DISTINCT portable_data_hash FROM collections "\
+ "WHERE file_count=0 and portable_data_hash > '#{last_pdh}' "\
+ "ORDER BY portable_data_hash LIMIT 1000"
+ )
+ pdhs.rows.each do |row|
+ block.call(row[0])
+ end
+ }
+
+ batch_size_max = 1 << 28 # 256 MiB
+ GroupPdhs.group_pdhs_for_multiple_transactions(ordered_pdh_query,
+ distinct_pdh_count,
+ batch_size_max,
+ "AddFileInfoToCollection") do |pdhs|
+ do_batch(pdhs)
+ end
+end
+
+main
assert_equal 'value1', json_response['properties']['property1']
end
- test "create collection with properties" do
- authorize_with :active
- manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
- post :create, params: {
- collection: {
- manifest_text: manifest_text,
- portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47",
- properties: {'property_1' => 'value_1'}
+ [
+ {'property_1' => 'value_1'},
+ "{\"property_1\":\"value_1\"}",
+ ].each do |p|
+ test "create collection with valid properties param #{p.inspect}" do
+ authorize_with :active
+ manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
+ post :create, params: {
+ collection: {
+ manifest_text: manifest_text,
+ portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47",
+ properties: p
+ }
}
- }
- assert_response :success
- assert_not_nil json_response['uuid']
- assert_equal 'value_1', json_response['properties']['property_1']
+ assert_response :success
+ assert_not_nil json_response['uuid']
+ assert_equal Hash, json_response['properties'].class, 'Collection properties attribute should be of type hash'
+ assert_equal 'value_1', json_response['properties']['property_1']
+ end
+ end
+
+ [
+ false,
+ [],
+ 42,
+ 'some string',
+ '["json", "encoded", "array"]',
+ ].each do |p|
+ test "create collection with non-valid properties param #{p.inspect}" do
+ authorize_with :active
+ post :create, params: {
+ collection: {
+ name: "test collection with non-valid properties param '#{p.inspect}'",
+ manifest_text: '',
+ properties: p
+ }
+ }
+ assert_response 422
+ response_errors = json_response['errors']
+ assert_not_nil response_errors, 'Expected error in response'
+ end
end
[
assert_response :success
assert_equal 3, json_response['version']
end
+
+ test "delete collection with versioning enabled" do
+ Rails.configuration.Collections.CollectionVersioning = true
+ Rails.configuration.Collections.PreserveVersionIfIdle = 1 # 1 second
+
+ col = collections(:collection_owned_by_active)
+ assert_equal 2, col.version
+ assert col.modified_at < Time.now - 1.second
+
+ authorize_with(:active)
+ post :trash, params: {
+ id: col.uuid,
+ }
+ assert_response :success
+ assert_equal col.version, json_response['version'], 'Trashing a collection should not create a new version'
+ end
end
redirect_to_new_user: true,
})
assert_response(:success)
- assert_equal(users(:project_viewer).redirect_to_user_uuid, users(:active).uuid)
+ assert_equal(users(:active).uuid, User.unscoped.find_by_uuid(users(:project_viewer).uuid).redirect_to_user_uuid)
auth = ApiClientAuthorization.validate(token: api_client_authorizations(:project_viewer).api_token)
assert_not_nil(auth)
assert_equal(users(:active).uuid, auth.user.uuid)
end
+
+ test "merge 'project_viewer' account into 'active' account using uuids" do
+ authorize_with(:admin)
+ post(:merge, params: {
+ old_user_uuid: users(:project_viewer).uuid,
+ new_user_uuid: users(:active).uuid,
+ new_owner_uuid: users(:active).uuid,
+ redirect_to_new_user: true,
+ })
+ assert_response(:success)
+ assert_equal(users(:active).uuid, User.unscoped.find_by_uuid(users(:project_viewer).uuid).redirect_to_user_uuid)
+
+ auth = ApiClientAuthorization.validate(token: api_client_authorizations(:project_viewer).api_token)
+ assert_not_nil(auth)
+ assert_not_nil(auth.user)
+ assert_equal(users(:active).uuid, auth.user.uuid)
+ end
+
+ test "merge 'project_viewer' account into 'active' account using uuids denied for non-admin" do
+ authorize_with(:active)
+ post(:merge, params: {
+ old_user_uuid: users(:project_viewer).uuid,
+ new_user_uuid: users(:active).uuid,
+ new_owner_uuid: users(:active).uuid,
+ redirect_to_new_user: true,
+ })
+ assert_response(403)
+ assert_nil(users(:project_viewer).redirect_to_user_uuid)
+ end
+
+ test "merge 'project_viewer' account into 'active' account using uuids denied missing old_user_uuid" do
+ authorize_with(:admin)
+ post(:merge, params: {
+ new_user_uuid: users(:active).uuid,
+ new_owner_uuid: users(:active).uuid,
+ redirect_to_new_user: true,
+ })
+ assert_response(422)
+ assert_nil(users(:project_viewer).redirect_to_user_uuid)
+ end
+
+ test "merge 'project_viewer' account into 'active' account using uuids denied missing new_user_uuid" do
+ authorize_with(:admin)
+ post(:merge, params: {
+ old_user_uuid: users(:project_viewer).uuid,
+ new_owner_uuid: users(:active).uuid,
+ redirect_to_new_user: true,
+ })
+ assert_response(422)
+ assert_nil(users(:project_viewer).redirect_to_user_uuid)
+ end
+
+ test "merge 'project_viewer' account into 'active' account using uuids denied bogus old_user_uuid" do
+ authorize_with(:admin)
+ post(:merge, params: {
+ old_user_uuid: "zzzzz-tpzed-bogusbogusbogus",
+ new_user_uuid: users(:active).uuid,
+ new_owner_uuid: users(:active).uuid,
+ redirect_to_new_user: true,
+ })
+ assert_response(422)
+ assert_nil(users(:project_viewer).redirect_to_user_uuid)
+ end
+
+ test "merge 'project_viewer' account into 'active' account using uuids denied bogus new_user_uuid" do
+ authorize_with(:admin)
+ post(:merge, params: {
+ old_user_uuid: users(:project_viewer).uuid,
+ new_user_uuid: "zzzzz-tpzed-bogusbogusbogus",
+ new_owner_uuid: users(:active).uuid,
+ redirect_to_new_user: true,
+ })
+ assert_response(422)
+ assert_nil(users(:project_viewer).redirect_to_user_uuid)
+ end
+
NON_ADMIN_USER_DATA = ["uuid", "kind", "is_active", "email", "first_name",
"last_name", "username"].sort
assert_not_nil json_response['properties']
assert_empty json_response['properties']
- # update collection's description
+ # update collection's properties
put "/arvados/v1/collections/#{json_response['uuid']}",
params: {
format: :json,
},
headers: auth(:active)
assert_response :success
+ assert_equal Hash, json_response['properties'].class, 'Collection properties attribute should be of type hash'
+ assert_equal 'value_1', json_response['properties']['property_1']
+ end
+
+ test "create collection and update it with json encoded hash properties" do
+ # create collection to be searched for
+ signed_manifest = Collection.sign_manifest(". bad42fa702ae3ea7d888fef11b46f450+44 0:44:my_test_file.txt\n", api_token(:active))
+ post "/arvados/v1/collections",
+ params: {
+ format: :json,
+ collection: {manifest_text: signed_manifest}.to_json,
+ },
+ headers: auth(:active)
+ assert_response 200
+ assert_not_nil json_response['uuid']
+ assert_not_nil json_response['properties']
+ assert_empty json_response['properties']
+
+ # update collection's properties
+ put "/arvados/v1/collections/#{json_response['uuid']}",
+ params: {
+ format: :json,
+ collection: {
+ properties: "{\"property_1\":\"value_1\"}"
+ }
+ },
+ headers: auth(:active)
+ assert_response :success
+ assert_equal Hash, json_response['properties'].class, 'Collection properties attribute should be of type hash'
assert_equal 'value_1', json_response['properties']['property_1']
end
end
end
end
+ # This test exposes a bug related to JSONB attributes, see #15725.
+ test "recently loaded collection shouldn't list changed attributes" do
+ col = Collection.where("properties != '{}'::jsonb").limit(1).first
+ refute col.properties_changed?, 'Properties field should not be seen as changed'
+ end
+
+ [
+ [
+ true,
+ {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+ {:foo=>:bar, :lst=>[1, 3, 5, 7], :hsh=>{'baz'=>'qux', :foobar=>true, 'hsh'=>{:nested=>true}}, :delete_at=>nil},
+ ],
+ [
+ true,
+ {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+ {'delete_at'=>nil, 'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}},
+ ],
+ [
+ true,
+ {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+ {'delete_at'=>nil, 'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'foobar'=>true, 'hsh'=>{'nested'=>true}, 'baz'=>'qux'}},
+ ],
+ [
+ false,
+ {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+ {'foo'=>'bar', 'lst'=>[1, 3, 5, 42], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+ ],
+ [
+ false,
+ {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+ {'foo'=>'bar', 'lst'=>[1, 3, 7, 5], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+ ],
+ [
+ false,
+ {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+ {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>false}}, 'delete_at'=>nil},
+ ],
+ [
+ false,
+ {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},
+ {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>1234567890},
+ ],
+ ].each do |should_be_equal, value_1, value_2|
+ test "JSONB properties #{value_1} is#{should_be_equal ? '' : ' not'} equal to #{value_2}" do
+ act_as_user users(:active) do
+ # Set up initial collection
+ c = create_collection 'foo', Encoding::US_ASCII
+ assert c.valid?
+ c.update_attributes!({'properties' => value_1})
+ c.reload
+ assert c.changes.keys.empty?
+ c.properties = value_2
+ if should_be_equal
+ assert c.changes.keys.empty?, "Properties #{value_1.inspect} should be equal to #{value_2.inspect}"
+ else
+ refute c.changes.keys.empty?, "Properties #{value_1.inspect} should not be equal to #{value_2.inspect}"
+ end
+ end
+ end
+ end
+
test "older versions' modified_at indicate when they're created" do
Rails.configuration.Collections.CollectionVersioning = true
Rails.configuration.Collections.PreserveVersionIfIdle = 0
['owner_uuid', 'zzzzz-tpzed-d9tiejq69daie8f', 'zzzzz-tpzed-xurymjxw79nv3jz'],
['replication_desired', 2, 3],
['storage_classes_desired', ['hot'], ['archive']],
- ['is_trashed', true, false],
].each do |attr, first_val, second_val|
test "sync #{attr} with older versions" do
Rails.configuration.Collections.CollectionVersioning = true
name: 'foo',
trash_at: db_current_time + 1.years)
sig_exp = /\+A[0-9a-f]{40}\@([0-9]+)/.match(c.signed_manifest_text)[1].to_i
- expect_max_sig_exp = db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL
+ expect_max_sig_exp = db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL.to_i
assert_operator c.trash_at.to_i, :>, expect_max_sig_exp
assert_operator sig_exp.to_i, :<=, expect_max_sig_exp
end
test test_name do
act_as_user users(:active) do
min_exp = (db_current_time +
- Rails.configuration.Collections.BlobSigningTTL.seconds)
+ Rails.configuration.Collections.BlobSigningTTL)
if fixture_name == :expired_collection
# Fixture-finder shorthand doesn't find trashed collections
# because they're not in the default scope.
assert_not_nil(trash)
assert_not_nil(delete)
assert_in_delta(trash, now + 1.second, 10)
- assert_in_delta(delete, now + Rails.configuration.Collections.BlobSigningTTL.second, 10)
+ assert_in_delta(delete, now + Rails.configuration.Collections.BlobSigningTTL, 10)
end
def check_output_ttl_1y(now, trash, delete)
uuid = containers(:running).uuid
ActiveRecord::Base.connection.exec_query('UPDATE containers SET priority=0 WHERE uuid=$1', 'test-setup', [[nil, uuid]])
assert_equal 0, Container.find_by_uuid(uuid).priority
- UpdatePriority.update_priority
+ UpdatePriority.update_priority(nolock: true)
assert_operator 0, :<, Container.find_by_uuid(uuid).priority
uuid = containers(:queued).uuid
ActiveRecord::Base.connection.exec_query('UPDATE containers SET priority=0 WHERE uuid=$1', 'test-setup', [[nil, uuid]])
assert_equal 0, Container.find_by_uuid(uuid).priority
- UpdatePriority.update_priority
+ UpdatePriority.update_priority(nolock: true)
assert_operator 0, :<, Container.find_by_uuid(uuid).priority
end
uuid = containers(:running).uuid
ActiveRecord::Base.connection.exec_query('DELETE FROM container_requests WHERE container_uuid=$1', 'test-setup', [[nil, uuid]])
assert_operator 0, :<, Container.find_by_uuid(uuid).priority
- UpdatePriority.update_priority
+ UpdatePriority.update_priority(nolock: true)
assert_equal 0, Container.find_by_uuid(uuid).priority
end
end
// simulate mounted read-only collection
s.cp.mounts["/mnt"] = arvados.Mount{
Kind: "collection",
- PortableDataHash: arvadostest.FooPdh,
+ PortableDataHash: arvadostest.FooCollectionPDH,
}
// simulate mounted writable collection
c.Assert(f.Close(), check.IsNil)
s.cp.mounts["/mnt-w"] = arvados.Mount{
Kind: "collection",
- PortableDataHash: arvadostest.FooPdh,
+ PortableDataHash: arvadostest.FooCollectionPDH,
Writable: true,
}
s.cp.binds = append(s.cp.binds, bindtmp+":/mnt-w")
func (s *copierSuite) TestWritableMountBelow(c *check.C) {
s.cp.mounts["/ctr/outdir/mount"] = arvados.Mount{
Kind: "collection",
- PortableDataHash: arvadostest.FooPdh,
+ PortableDataHash: arvadostest.FooCollectionPDH,
Writable: true,
}
c.Assert(os.MkdirAll(s.cp.hostOutputDir+"/mount", 0755), check.IsNil)
package main
import (
- "flag"
- "fmt"
- "net/http"
+ "context"
+ "os"
+ "git.curoverse.com/arvados.git/lib/cmd"
+ "git.curoverse.com/arvados.git/lib/service"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/health"
- "git.curoverse.com/arvados.git/sdk/go/httpserver"
- log "github.com/sirupsen/logrus"
)
-var version = "dev"
-
-func main() {
- configFile := flag.String("config", arvados.DefaultConfigFile, "`path` to arvados configuration file")
- getVersion := flag.Bool("version", false, "Print version information and exit.")
- flag.Parse()
-
- // Print version information if requested
- if *getVersion {
- fmt.Printf("arvados-health %s\n", version)
- return
- }
-
- log.SetFormatter(&log.JSONFormatter{
- TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
- })
- log.Printf("arvados-health %s started", version)
+var (
+ version = "dev"
+ command cmd.Handler = service.Command(arvados.ServiceNameController, newHandler)
+)
- cfg, err := arvados.GetConfig(*configFile)
- if err != nil {
- log.Fatal(err)
- }
- clusterCfg, err := cfg.GetCluster("")
- if err != nil {
- log.Fatal(err)
- }
- nodeCfg, err := clusterCfg.GetNodeProfile("")
- if err != nil {
- log.Fatal(err)
- }
+func newHandler(ctx context.Context, cluster *arvados.Cluster, _ string) service.Handler {
+ return &health.Aggregator{Cluster: cluster}
+}
- log := log.WithField("Service", "Health")
- srv := &httpserver.Server{
- Addr: nodeCfg.Health.Listen,
- Server: http.Server{
- Handler: &health.Aggregator{
- Config: cfg,
- Log: func(req *http.Request, err error) {
- log.WithField("RemoteAddr", req.RemoteAddr).
- WithField("Path", req.URL.Path).
- WithError(err).
- Info("HTTP request")
- },
- },
- },
- }
- if err := srv.Start(); err != nil {
- log.Fatal(err)
- }
- log.WithField("Listen", srv.Addr).Info("listening")
- if err := srv.Wait(); err != nil {
- log.Fatal(err)
- }
+func main() {
+ os.Exit(command.RunCommand(os.Args[0], os.Args[1:], os.Stdin, os.Stdout, os.Stderr))
}
coll, err = cache.Get(arv, arvadostest.FooCollection, false)
c.Check(err, check.Equals, nil)
c.Assert(coll, check.NotNil)
- c.Check(coll.PortableDataHash, check.Equals, arvadostest.FooPdh)
+ c.Check(coll.PortableDataHash, check.Equals, arvadostest.FooCollectionPDH)
c.Check(coll.ManifestText[:2], check.Equals, ". ")
}
s.checkCacheMetrics(c, cache.registry,
// lookup.
arv.ApiToken = arvadostest.ActiveToken
- coll2, err := cache.Get(arv, arvadostest.FooPdh, false)
+ coll2, err := cache.Get(arv, arvadostest.FooCollectionPDH, false)
c.Check(err, check.Equals, nil)
c.Assert(coll2, check.NotNil)
- c.Check(coll2.PortableDataHash, check.Equals, arvadostest.FooPdh)
+ c.Check(coll2.PortableDataHash, check.Equals, arvadostest.FooCollectionPDH)
c.Check(coll2.ManifestText[:2], check.Equals, ". ")
c.Check(coll2.ManifestText, check.Not(check.Equals), coll.ManifestText)
"pdh_hits 4",
"api_calls 2")
- coll2, err = cache.Get(arv, arvadostest.FooPdh, false)
+ coll2, err = cache.Get(arv, arvadostest.FooCollectionPDH, false)
c.Check(err, check.Equals, nil)
c.Assert(coll2, check.NotNil)
- c.Check(coll2.PortableDataHash, check.Equals, arvadostest.FooPdh)
+ c.Check(coll2.PortableDataHash, check.Equals, arvadostest.FooCollectionPDH)
c.Check(coll2.ManifestText[:2], check.Equals, ". ")
s.checkCacheMetrics(c, cache.registry,
cache.registry = prometheus.NewRegistry()
for _, forceReload := range []bool{false, true, false, true} {
- _, err := cache.Get(arv, arvadostest.FooPdh, forceReload)
+ _, err := cache.Get(arv, arvadostest.FooCollectionPDH, forceReload)
c.Check(err, check.Equals, nil)
}
c.Check(stdout, check.Matches, `(?ms).*collection is empty.*`)
}
for _, path := range []string{
- "/by_id/" + arvadostest.FooPdh,
- "/by_id/" + arvadostest.FooPdh + "/",
+ "/by_id/" + arvadostest.FooCollectionPDH,
+ "/by_id/" + arvadostest.FooCollectionPDH + "/",
"/by_id/" + arvadostest.FooCollection,
"/by_id/" + arvadostest.FooCollection + "/",
} {
}
func (s *UnitSuite) TestInvalidUUID(c *check.C) {
- bogusID := strings.Replace(arvadostest.FooPdh, "+", "-", 1) + "-"
+ bogusID := strings.Replace(arvadostest.FooCollectionPDH, "+", "-", 1) + "-"
token := arvadostest.ActiveToken
for _, trial := range []string{
"http://keep-web/c=" + bogusID + "/foo",
arvadostest.FooCollection + ".example.com/foo",
arvadostest.FooCollection + "--collections.example.com/foo",
arvadostest.FooCollection + "--collections.example.com/_/foo",
- arvadostest.FooPdh + ".example.com/foo",
- strings.Replace(arvadostest.FooPdh, "+", "-", -1) + "--collections.example.com/foo",
+ arvadostest.FooCollectionPDH + ".example.com/foo",
+ strings.Replace(arvadostest.FooCollectionPDH, "+", "-", -1) + "--collections.example.com/foo",
arvadostest.FooBarDirCollection + ".example.com/dir1/foo",
} {
c.Log("doRequests: ", hostPath)
dataMD5: "acbd18db4cc2f85cedef654fccc4a4d8",
},
{
- host: strings.Replace(arvadostest.FooPdh, "+", "-", 1) + ".collections.example.com",
+ host: strings.Replace(arvadostest.FooCollectionPDH, "+", "-", 1) + ".collections.example.com",
path: "/t=" + arvadostest.ActiveToken + "/foo",
dataMD5: "acbd18db4cc2f85cedef654fccc4a4d8",
},
{
- path: "/c=" + arvadostest.FooPdh + "/t=" + arvadostest.ActiveToken + "/foo",
+ path: "/c=" + arvadostest.FooCollectionPDH + "/t=" + arvadostest.ActiveToken + "/foo",
dataMD5: "acbd18db4cc2f85cedef654fccc4a4d8",
},
{
- path: "/c=" + strings.Replace(arvadostest.FooPdh, "+", "-", 1) + "/t=" + arvadostest.ActiveToken + "/_/foo",
+ path: "/c=" + strings.Replace(arvadostest.FooCollectionPDH, "+", "-", 1) + "/t=" + arvadostest.ActiveToken + "/_/foo",
dataMD5: "acbd18db4cc2f85cedef654fccc4a4d8",
},
{
fi, err := os.Stat(dev)
if err != nil {
- return giveup("stat %q: %s\n", dev, err)
+ return giveup("stat %q: %s", dev, err)
}
ino := fi.Sys().(*syscall.Stat_t).Ino
n, err := io.Copy(tmpfile, rdr)
v.os.stats.TickOutBytes(uint64(n))
if err != nil {
- log.Printf("%s: writing to %s: %s\n", v, bpath, err)
+ log.Printf("%s: writing to %s: %s", v, bpath, err)
tmpfile.Close()
v.os.Remove(tmpfile.Name())
return err
}
if err := tmpfile.Close(); err != nil {
- log.Printf("closing %s: %s\n", tmpfile.Name(), err)
+ log.Printf("closing %s: %s", tmpfile.Name(), err)
v.os.Remove(tmpfile.Name())
return err
}
if err := v.os.Rename(tmpfile.Name(), bpath); err != nil {
- log.Printf("rename %s %s: %s\n", tmpfile.Name(), bpath, err)
+ log.Printf("rename %s %s: %s", tmpfile.Name(), bpath, err)
return v.os.Remove(tmpfile.Name())
}
return nil
func (v *UnixVolume) Status() *VolumeStatus {
fi, err := v.os.Stat(v.Root)
if err != nil {
- log.Printf("%s: os.Stat: %s\n", v, err)
+ log.Printf("%s: os.Stat: %s", v, err)
return nil
}
devnum := fi.Sys().(*syscall.Stat_t).Dev
var fs syscall.Statfs_t
if err := syscall.Statfs(v.Root, &fs); err != nil {
- log.Printf("%s: statfs: %s\n", v, err)
+ log.Printf("%s: statfs: %s", v, err)
return nil
}
// These calculations match the way df calculates disk usage:
if avail, err := v.FreeDiskSpace(); err == nil {
isFull = avail < MinFreeKilobytes
} else {
- log.Printf("%s: FreeDiskSpace: %s\n", v, err)
+ log.Printf("%s: FreeDiskSpace: %s", v, err)
isFull = false
}
if v.locker == nil {
return nil
}
+ t0 := time.Now()
locked := make(chan struct{})
go func() {
v.locker.Lock()
}()
select {
case <-ctx.Done():
+ log.Printf("%s: client hung up while waiting for Serialize lock (%s)", v, time.Since(t0))
go func() {
<-locked
v.locker.Unlock()
# size class (since libcloud does not provide any consistent API for exposing
# this setting).
# You may also want to define the amount of scratch space (expressed
-# in GB) for Crunch jobs. You can also override Amazon's provided
+# in MB) for Crunch jobs. You can also override Amazon's provided
# data fields (such as price per hour) by setting them here.
#
# Additionally, you can ask for a preemptible instance (AWS's spot instance)
# both spot & reserved versions of the same size, you can do so by renaming
# the Size section and specifying the instance type inside it.
+# 100 GB scratch space
[Size m4.large]
cores = 2
price = 0.126
-scratch = 100
+scratch = 100000
+# 10 GB scratch space
[Size m4.large.spot]
instance_type = m4.large
preemptible = true
cores = 2
price = 0.126
-scratch = 100
+scratch = 10000
+# 200 GB scratch space
[Size m4.xlarge]
cores = 4
price = 0.252
-scratch = 100
+scratch = 200000
'apache-libcloud>=2.3.1.dev1',
'arvados-python-client>=0.1.20170731145219',
'future',
- 'pykka',
+ 'pykka < 2',
'python-daemon',
'setuptools',
'subprocess32>=3.5.1',
;;
root-cert)
- CERT=$PWD/${ARVBOX_CONTAINER}-root-cert.pem
+ CERT=$PWD/${ARVBOX_CONTAINER}-root-cert.crt
if test -n "$1" ; then
CERT="$1"
fi
. /usr/local/lib/arvbox/common.sh
+uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
+
if test ! -s /var/lib/arvados/root-cert.pem ; then
# req signing request sub-command
# -new new certificate request
-nodes \
-sha256 \
-x509 \
- -subj "/C=US/ST=MA/O=Arvados testing/OU=arvbox/CN=arvbox testing root CA for ${uuid_prefix}" \
+ -subj "/C=US/ST=MA/O=Arvados testing/OU=arvbox/CN=test root CA for ${uuid_prefix} generated $(date --rfc-3339=seconds)" \
-extensions x509_ext \
-config <(cat /etc/ssl/openssl.cnf \
<(printf "\n[x509_ext]\nbasicConstraints=critical,CA:true,pathlen:0\nkeyUsage=critical,keyCertSign,cRLSign")) \
-new \
-nodes \
-sha256 \
- -subj "/C=US/ST=MA/O=Arvados testing for ${uuid_prefix}/OU=arvbox/CN=localhost" \
+ -subj "/C=US/ST=MA/O=Arvados testing/OU=arvbox/CN=test server cert for ${uuid_prefix} generated $(date --rfc-3339=seconds)" \
-reqexts x509_ext \
-extensions x509_ext \
-config <(cat /etc/ssl/openssl.cnf \
fi
uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
+secret_token=$(cat /var/lib/arvados/api_secret_token)
+blob_signing_key=$(cat /var/lib/arvados/blob_signing_key)
+management_token=$(cat /var/lib/arvados/management_token)
+sso_app_secret=$(cat /var/lib/arvados/sso_app_secret)
+vm_uuid=$(cat /var/lib/arvados/vm-uuid)
database_pw=$(cat /var/lib/arvados/api_database_pw)
if test -s /var/lib/arvados/api_rails_env ; then
cat >/var/lib/arvados/cluster_config.yml <<EOF
Clusters:
${uuid_prefix}:
- NodeProfiles:
+ ManagementToken: $management_token
+ Services:
+ Workbench1:
+ ExternalURL: "https://$localip:${services[workbench]}"
+ Workbench2:
+ ExternalURL: "https://$localip:${services[workbench2-ssl]}"
+ SSO:
+ ExternalURL: "https://$localip:${services[sso]}"
+ Websocket:
+ ExternalURL: "wss://$localip:${services[websockets-ssl]}/websocket"
+ GitSSH:
+ ExternalURL: "ssh://git@$localip:"
+ GitHTTP:
+ ExternalURL: "http://$localip:${services[arv-git-httpd]}/"
+ WebDAV:
+ ExternalURL: "https://$localip:${services[keep-web-ssl]}/"
+ NodeProfiles: # to be deprecated in favor of "Services" section
"*":
arvados-controller:
Listen: ":${services[controller]}" # choose a port
Connection:
# All parameters here are passed to the PG client library in a connection string;
# see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
- Host: localhost
- User: arvados
- Password: ${database_pw}
- DBName: arvados_${database_env}
+ host: localhost
+ user: arvados
+ password: ${database_pw}
+ dbname: arvados_${database_env}
client_encoding: utf8
+ API:
+ RailsSessionSecretToken: $secret_token
+ Collections:
+ BlobSigningKey: $blob_signing_key
+ DefaultReplication: 1
+ Containers:
+ SupportedDockerImageFormats: ["v2"]
+ Login:
+ ProviderAppSecret: $sso_app_secret
+ ProviderAppID: arvados-server
+ Users:
+ NewUsersAreActive: true
+ AutoAdminFirstUser: true
+ AutoSetupNewUsers: true
+ AutoSetupNewUsersWithVmUUID: $vm_uuid
+ AutoSetupNewUsersWithRepository: true
EOF
/usr/local/lib/arvbox/yml_override.py /var/lib/arvados/cluster_config.yml
cat > /usr/local/bin/crunch-run.sh <<EOF
#!/bin/sh
-exec /usr/local/bin/crunch-run -container-enable-networking=always -container-network-mode=host \$@
+exec /usr/local/bin/crunch-run -container-enable-networking=default -container-network-mode=host \$@
EOF
chmod +x /usr/local/bin/crunch-run.sh
access_log off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
+ client_max_body_size 128M;
+
server {
listen ${services[doc]} default_server;
listen [::]:${services[doc]} default_server;
}
EOF
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
+export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
+
+url_prefix="https://$localip:${services[workbench2-ssl]}/"
+
+set +e
+read -rd $'\000' apiclient <<EOF
+{
+ "url_prefix": "$url_prefix",
+ "is_trusted": true
+}
+EOF
+set -e
+
+clientuuid=$(arv --format=uuid api_client list --filters '[["url_prefix", "=", "'$url_prefix'"]]')
+if [[ -n "$clientuuid" ]] ; then
+ arv api_client update --uuid $clientuuid --api-client "$apiclient"
+else
+ arv api_client create --api-client "$apiclient"
+fi
+
export HTTPS=false
# Can't use "yarn start", need to run the dev server script
# directly so that the TERM signal from "sv restart" gets to the
--- /dev/null
+#!/usr/bin/env python
+#
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+#
+
+from __future__ import print_function, absolute_import
+import argparse
+import arvados
+import arvados.util
+import csv
+import sys
+import logging
+
+lglvl = logging.INFO+1
+logging.basicConfig(level=lglvl, format='%(message)s')
+
+"""
+ Given a list of collections missing blocks (as produced by
+keep-balance), produce a report listing affected collections and
+container requests.
+"""
+
+def rerun_request(arv, container_requests_to_rerun, ct):
+ requests = arvados.util.list_all(arv.container_requests().list, filters=[["container_uuid", "=", ct["uuid"]]])
+ for cr in requests:
+ if cr["requesting_container_uuid"]:
+ rerun_request(arv, container_requests_to_rerun, arv.containers().get(uuid=cr["requesting_container_uuid"]).execute())
+ else:
+ container_requests_to_rerun[cr["uuid"]] = cr
+
+def get_owner(arv, owners, record):
+ uuid = record["owner_uuid"]
+ if uuid not in owners:
+ if uuid[6:11] == "tpzed":
+ owners[uuid] = (arv.users().get(uuid=uuid).execute()["full_name"], uuid)
+ else:
+ grp = arv.groups().get(uuid=uuid).execute()
+ _, ou = get_owner(arv, owners, grp)
+ owners[uuid] = (grp["name"], ou)
+ return owners[uuid]
+
+def main():
+ parser = argparse.ArgumentParser(description='Re-run containers associated with missing blocks')
+ parser.add_argument('inp')
+ args = parser.parse_args()
+
+ arv = arvados.api('v1')
+
+ busted_collections = set()
+
+ logging.log(lglvl, "Reading %s", args.inp)
+
+ # Get the list of bad collection PDHs
+ blocksfile = open(args.inp, "rt")
+ for line in blocksfile:
+ # Ignore the first item, that's the block id
+ collections = line.rstrip().split(" ")[1:]
+ for c in collections:
+ busted_collections.add(c)
+
+ out = csv.writer(sys.stdout)
+
+ out.writerow(("collection uuid", "container request uuid", "record name", "modified at", "owner uuid", "owner name", "root owner uuid", "root owner name", "notes"))
+
+ logging.log(lglvl, "Finding collections")
+
+ owners = {}
+ collections_to_delete = {}
+ container_requests_to_rerun = {}
+ # Get containers that produced these collections
+ i = 0
+ for b in busted_collections:
+ if (i % 100) == 0:
+ logging.log(lglvl, "%d/%d", i, len(busted_collections))
+ i += 1
+ collections_to_delete = arvados.util.list_all(arv.collections().list, filters=[["portable_data_hash", "=", b]])
+ for d in collections_to_delete:
+ t = ""
+ if d["properties"].get("type") not in ("output", "log"):
+ t = "\"type\" was '%s', expected one of 'output' or 'log'" % d["properties"].get("type")
+ ou = get_owner(arv, owners, d)
+ out.writerow((d["uuid"], "", d["name"], d["modified_at"], d["owner_uuid"], ou[0], ou[1], owners[ou[1]][0], t))
+
+ maybe_containers_to_rerun = arvados.util.list_all(arv.containers().list, filters=[["output", "=", b]])
+ for ct in maybe_containers_to_rerun:
+ rerun_request(arv, container_requests_to_rerun, ct)
+
+ logging.log(lglvl, "%d/%d", i, len(busted_collections))
+ logging.log(lglvl, "Finding container requests")
+
+ i = 0
+ for _, cr in container_requests_to_rerun.items():
+ if (i % 100) == 0:
+ logging.log(lglvl, "%d/%d", i, len(container_requests_to_rerun))
+ i += 1
+ ou = get_owner(arv, owners, cr)
+ out.writerow(("", cr["uuid"], cr["name"], cr["modified_at"], cr["owner_uuid"], ou[0], ou[1], owners[ou[1]][0], ""))
+
+ logging.log(lglvl, "%d/%d", i, len(container_requests_to_rerun))
+
+if __name__ == "__main__":
+ main()
"revision": "0a025b7e63adc15a622f29b0b2c4c3848243bbf6",
"revisionTime": "2016-08-13T22:13:03Z"
},
+ {
+ "checksumSHA1": "x7IEwuVYTztOJItr3jtePGyFDWA=",
+ "path": "github.com/imdario/mergo",
+ "revision": "5ef87b449ca75fbed1bc3765b749ca8f73f1fa69",
+ "revisionTime": "2019-04-15T13:31:43Z"
+ },
{
"checksumSHA1": "iCsyavJDnXC9OY//p52IWJWy7PY=",
"path": "github.com/jbenet/go-context/io",